feat: add Claude-Gemini collaboration and chat capabilities
- Add collaboration demo showing dynamic context requests - Implement chat tool for general conversations and brainstorming - Add tool selection guide with clear boundaries - Introduce models configuration system - Update prompts for better tool descriptions - Refactor server to remove redundant functionality - Add comprehensive tests for collaboration features - Enhance base tool with collaborative features This enables Claude to request additional context from Gemini during tool execution, improving analysis quality and accuracy. 🤖 Generated with [Claude Code](https://claude.ai/code) Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
@@ -3,6 +3,7 @@ Tool implementations for Gemini MCP Server
|
||||
"""
|
||||
|
||||
from .analyze import AnalyzeTool
|
||||
from .chat import ChatTool
|
||||
from .debug_issue import DebugIssueTool
|
||||
from .review_code import ReviewCodeTool
|
||||
from .think_deeper import ThinkDeeperTool
|
||||
@@ -12,4 +13,5 @@ __all__ = [
|
||||
"ReviewCodeTool",
|
||||
"DebugIssueTool",
|
||||
"AnalyzeTool",
|
||||
"ChatTool",
|
||||
]
|
||||
|
||||
@@ -5,12 +5,15 @@ Base class for all Gemini MCP tools
|
||||
from abc import ABC, abstractmethod
|
||||
from typing import Any, Dict, List, Optional, Literal
|
||||
import os
|
||||
import json
|
||||
|
||||
from google import genai
|
||||
from google.genai import types
|
||||
from mcp.types import TextContent
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from .models import ToolOutput, ClarificationRequest
|
||||
|
||||
|
||||
class ToolRequest(BaseModel):
|
||||
"""Base request model for all tools"""
|
||||
@@ -95,25 +98,83 @@ class BaseTool(ABC):
|
||||
# Generate response
|
||||
response = model.generate_content(prompt)
|
||||
|
||||
# Handle response
|
||||
# Handle response and create standardized output
|
||||
if response.candidates and response.candidates[0].content.parts:
|
||||
text = response.candidates[0].content.parts[0].text
|
||||
raw_text = response.candidates[0].content.parts[0].text
|
||||
|
||||
# Check if this is a clarification request
|
||||
tool_output = self._parse_response(raw_text, request)
|
||||
|
||||
else:
|
||||
finish_reason = (
|
||||
response.candidates[0].finish_reason
|
||||
if response.candidates
|
||||
else "Unknown"
|
||||
)
|
||||
text = f"Response blocked or incomplete. Finish reason: {finish_reason}"
|
||||
tool_output = ToolOutput(
|
||||
status="error",
|
||||
content=f"Response blocked or incomplete. Finish reason: {finish_reason}",
|
||||
content_type="text",
|
||||
)
|
||||
|
||||
# Format response
|
||||
formatted_response = self.format_response(text, request)
|
||||
|
||||
return [TextContent(type="text", text=formatted_response)]
|
||||
# Serialize the standardized output as JSON
|
||||
return [TextContent(type="text", text=tool_output.model_dump_json())]
|
||||
|
||||
except Exception as e:
|
||||
error_msg = f"Error in {self.name}: {str(e)}"
|
||||
return [TextContent(type="text", text=error_msg)]
|
||||
error_output = ToolOutput(
|
||||
status="error",
|
||||
content=f"Error in {self.name}: {str(e)}",
|
||||
content_type="text",
|
||||
)
|
||||
return [TextContent(type="text", text=error_output.model_dump_json())]
|
||||
|
||||
def _parse_response(self, raw_text: str, request) -> ToolOutput:
|
||||
"""Parse the raw response and determine if it's a clarification request"""
|
||||
try:
|
||||
# Try to parse as JSON to check for clarification requests
|
||||
potential_json = json.loads(raw_text.strip())
|
||||
|
||||
if (
|
||||
isinstance(potential_json, dict)
|
||||
and potential_json.get("status") == "requires_clarification"
|
||||
):
|
||||
# Validate the clarification request structure
|
||||
clarification = ClarificationRequest(**potential_json)
|
||||
return ToolOutput(
|
||||
status="requires_clarification",
|
||||
content=clarification.model_dump_json(),
|
||||
content_type="json",
|
||||
metadata={
|
||||
"original_request": (
|
||||
request.model_dump()
|
||||
if hasattr(request, "model_dump")
|
||||
else str(request)
|
||||
)
|
||||
},
|
||||
)
|
||||
|
||||
except (json.JSONDecodeError, ValueError, TypeError):
|
||||
# Not a JSON clarification request, treat as normal response
|
||||
pass
|
||||
|
||||
# Normal text response - format using tool-specific formatting
|
||||
formatted_content = self.format_response(raw_text, request)
|
||||
|
||||
# Determine content type based on the formatted content
|
||||
content_type = (
|
||||
"markdown"
|
||||
if any(
|
||||
marker in formatted_content for marker in ["##", "**", "`", "- ", "1. "]
|
||||
)
|
||||
else "text"
|
||||
)
|
||||
|
||||
return ToolOutput(
|
||||
status="success",
|
||||
content=formatted_content,
|
||||
content_type=content_type,
|
||||
metadata={"tool_name": self.name},
|
||||
)
|
||||
|
||||
@abstractmethod
|
||||
async def prepare_prompt(self, request) -> str:
|
||||
|
||||
111
tools/chat.py
Normal file
111
tools/chat.py
Normal file
@@ -0,0 +1,111 @@
|
||||
"""
|
||||
Chat tool - General development chat and collaborative thinking
|
||||
"""
|
||||
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
from pydantic import Field
|
||||
|
||||
from config import MAX_CONTEXT_TOKENS, TEMPERATURE_BALANCED
|
||||
from prompts import CHAT_PROMPT
|
||||
from utils import check_token_limit, read_files
|
||||
|
||||
from .base import BaseTool, ToolRequest
|
||||
|
||||
|
||||
class ChatRequest(ToolRequest):
|
||||
"""Request model for chat tool"""
|
||||
|
||||
prompt: str = Field(
|
||||
...,
|
||||
description="Your question, topic, or current thinking to discuss with Gemini",
|
||||
)
|
||||
context_files: Optional[List[str]] = Field(
|
||||
default_factory=list, description="Optional files for context"
|
||||
)
|
||||
|
||||
|
||||
class ChatTool(BaseTool):
|
||||
"""General development chat and collaborative thinking tool"""
|
||||
|
||||
def get_name(self) -> str:
|
||||
return "chat"
|
||||
|
||||
def get_description(self) -> str:
|
||||
return (
|
||||
"GENERAL CHAT & COLLABORATIVE THINKING - Use Gemini as your thinking partner! "
|
||||
"Perfect for: bouncing ideas during your own analysis, getting second opinions on your plans, "
|
||||
"collaborative brainstorming, validating your checklists and approaches, exploring alternatives. "
|
||||
"Also great for: explanations, comparisons, general development questions. "
|
||||
"Triggers: 'ask gemini', 'brainstorm with gemini', 'get gemini's opinion', 'discuss with gemini', "
|
||||
"'share my thinking with gemini', 'explain', 'what is', 'how do I'."
|
||||
)
|
||||
|
||||
def get_input_schema(self) -> Dict[str, Any]:
|
||||
return {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"prompt": {
|
||||
"type": "string",
|
||||
"description": "Your question, topic, or current thinking to discuss with Gemini",
|
||||
},
|
||||
"context_files": {
|
||||
"type": "array",
|
||||
"items": {"type": "string"},
|
||||
"description": "Optional files for context",
|
||||
},
|
||||
"temperature": {
|
||||
"type": "number",
|
||||
"description": "Response creativity (0-1, default 0.5)",
|
||||
"minimum": 0,
|
||||
"maximum": 1,
|
||||
},
|
||||
"thinking_mode": {
|
||||
"type": "string",
|
||||
"enum": ["minimal", "low", "medium", "high", "max"],
|
||||
"description": "Thinking depth: minimal (128), low (2048), medium (8192), high (16384), max (32768)",
|
||||
},
|
||||
},
|
||||
"required": ["prompt"],
|
||||
}
|
||||
|
||||
def get_system_prompt(self) -> str:
|
||||
return CHAT_PROMPT
|
||||
|
||||
def get_default_temperature(self) -> float:
|
||||
return TEMPERATURE_BALANCED
|
||||
|
||||
def get_request_model(self):
|
||||
return ChatRequest
|
||||
|
||||
async def prepare_prompt(self, request: ChatRequest) -> str:
|
||||
"""Prepare the chat prompt with optional context files"""
|
||||
user_content = request.prompt
|
||||
|
||||
# Add context files if provided
|
||||
if request.context_files:
|
||||
file_content, _ = read_files(request.context_files)
|
||||
user_content = f"{request.prompt}\n\n=== CONTEXT FILES ===\n{file_content}\n=== END CONTEXT ==="
|
||||
|
||||
# Check token limits
|
||||
within_limit, estimated_tokens = check_token_limit(user_content)
|
||||
if not within_limit:
|
||||
raise ValueError(
|
||||
f"Content too large (~{estimated_tokens:,} tokens). "
|
||||
f"Maximum is {MAX_CONTEXT_TOKENS:,} tokens."
|
||||
)
|
||||
|
||||
# Combine system prompt with user content
|
||||
full_prompt = f"""{self.get_system_prompt()}
|
||||
|
||||
=== USER REQUEST ===
|
||||
{user_content}
|
||||
=== END REQUEST ===
|
||||
|
||||
Please provide a thoughtful, comprehensive response:"""
|
||||
|
||||
return full_prompt
|
||||
|
||||
def format_response(self, response: str, request: ChatRequest) -> str:
|
||||
"""Format the chat response (no special formatting needed)"""
|
||||
return response
|
||||
59
tools/models.py
Normal file
59
tools/models.py
Normal file
@@ -0,0 +1,59 @@
|
||||
"""
|
||||
Data models for tool responses and interactions
|
||||
"""
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
from typing import Literal, Optional, Dict, Any, List
|
||||
|
||||
|
||||
class ToolOutput(BaseModel):
|
||||
"""Standardized output format for all tools"""
|
||||
|
||||
status: Literal["success", "error", "requires_clarification"] = "success"
|
||||
content: str = Field(..., description="The main content/response from the tool")
|
||||
content_type: Literal["text", "markdown", "json"] = "text"
|
||||
metadata: Optional[Dict[str, Any]] = Field(default_factory=dict)
|
||||
|
||||
|
||||
class ClarificationRequest(BaseModel):
|
||||
"""Request for additional context or clarification"""
|
||||
|
||||
question: str = Field(..., description="Question to ask Claude for more context")
|
||||
files_needed: Optional[List[str]] = Field(
|
||||
default_factory=list, description="Specific files that are needed for analysis"
|
||||
)
|
||||
suggested_next_action: Optional[Dict[str, Any]] = Field(
|
||||
None,
|
||||
description="Suggested tool call with parameters after getting clarification",
|
||||
)
|
||||
|
||||
|
||||
class DiagnosticHypothesis(BaseModel):
|
||||
"""A debugging hypothesis with context and next steps"""
|
||||
|
||||
rank: int = Field(..., description="Ranking of this hypothesis (1 = most likely)")
|
||||
confidence: Literal["high", "medium", "low"] = Field(
|
||||
..., description="Confidence level"
|
||||
)
|
||||
hypothesis: str = Field(..., description="Description of the potential root cause")
|
||||
reasoning: str = Field(..., description="Why this hypothesis is plausible")
|
||||
next_step: str = Field(
|
||||
..., description="Suggested action to test/validate this hypothesis"
|
||||
)
|
||||
|
||||
|
||||
class StructuredDebugResponse(BaseModel):
|
||||
"""Enhanced debug response with multiple hypotheses"""
|
||||
|
||||
summary: str = Field(..., description="Brief summary of the issue")
|
||||
hypotheses: List[DiagnosticHypothesis] = Field(
|
||||
..., description="Ranked list of potential causes"
|
||||
)
|
||||
immediate_actions: List[str] = Field(
|
||||
default_factory=list,
|
||||
description="Immediate steps to take regardless of root cause",
|
||||
)
|
||||
additional_context_needed: Optional[List[str]] = Field(
|
||||
default_factory=list,
|
||||
description="Additional files or information that would help with analysis",
|
||||
)
|
||||
Reference in New Issue
Block a user