feat: complete redesign to v2.4.0 - Claude's ultimate development partner

Major redesign of Gemini MCP Server with modular architecture:

- Removed all emoji characters from tool outputs for clean terminal display
- Kept review category emojis (🔴🟠🟡🟢) per user preference
- Added 4 specialized tools:
  - think_deeper: Extended reasoning and problem-solving (temp 0.7)
  - review_code: Professional code review with severity levels (temp 0.2)
  - debug_issue: Root cause analysis and debugging (temp 0.2)
  - analyze: General-purpose file analysis (temp 0.2)
- Modular architecture with base tool class and Pydantic models
- Verbose tool descriptions with natural language triggers
- Updated README with comprehensive examples and real-world use cases
- All 25 tests passing, type checking clean, critical linting clean

BREAKING CHANGE: Removed analyze_code tool in favor of specialized tools

🤖 Generated with [Claude Code](https://claude.ai/code)

Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
Fahad
2025-06-08 22:30:45 +04:00
parent 8754f3c544
commit 1aa19548d1
25 changed files with 2059 additions and 1828 deletions

15
tools/__init__.py Normal file
View File

@@ -0,0 +1,15 @@
"""
Tool implementations for Gemini MCP Server
"""
from .think_deeper import ThinkDeeperTool
from .review_code import ReviewCodeTool
from .debug_issue import DebugIssueTool
from .analyze import AnalyzeTool
__all__ = [
"ThinkDeeperTool",
"ReviewCodeTool",
"DebugIssueTool",
"AnalyzeTool",
]

151
tools/analyze.py Normal file
View File

@@ -0,0 +1,151 @@
"""
Analyze tool - General-purpose code and file analysis
"""
from typing import Dict, Any, List, Optional
from pydantic import Field
from .base import BaseTool, ToolRequest
from prompts import ANALYZE_PROMPT
from utils import read_files, check_token_limit
from config import TEMPERATURE_ANALYTICAL, MAX_CONTEXT_TOKENS
class AnalyzeRequest(ToolRequest):
"""Request model for analyze tool"""
files: List[str] = Field(..., description="Files to analyze")
question: str = Field(..., description="What to analyze or look for")
analysis_type: Optional[str] = Field(
None,
description="Type of analysis: architecture|performance|security|quality|general",
)
output_format: Optional[str] = Field(
"detailed", description="Output format: summary|detailed|actionable"
)
class AnalyzeTool(BaseTool):
"""General-purpose file and code analysis tool"""
def get_name(self) -> str:
return "analyze"
def get_description(self) -> str:
return (
"ANALYZE FILES & CODE - General-purpose analysis for understanding code. "
"Use this for examining files, understanding architecture, or investigating specific aspects. "
"Triggers: 'analyze these files', 'examine this code', 'understand this'. "
"Perfect for: codebase exploration, dependency analysis, pattern detection. "
"Always uses file paths for clean terminal output."
)
def get_input_schema(self) -> Dict[str, Any]:
return {
"type": "object",
"properties": {
"files": {
"type": "array",
"items": {"type": "string"},
"description": "Files to analyze",
},
"question": {
"type": "string",
"description": "What to analyze or look for",
},
"analysis_type": {
"type": "string",
"enum": [
"architecture",
"performance",
"security",
"quality",
"general",
],
"description": "Type of analysis to perform",
},
"output_format": {
"type": "string",
"enum": ["summary", "detailed", "actionable"],
"default": "detailed",
"description": "How to format the output",
},
"temperature": {
"type": "number",
"description": "Temperature (0-1, default 0.2)",
"minimum": 0,
"maximum": 1,
},
},
"required": ["files", "question"],
}
def get_system_prompt(self) -> str:
return ANALYZE_PROMPT
def get_default_temperature(self) -> float:
return TEMPERATURE_ANALYTICAL
def get_request_model(self):
return AnalyzeRequest
async def prepare_prompt(self, request: AnalyzeRequest) -> str:
"""Prepare the analysis prompt"""
# Read all files
file_content, summary = read_files(request.files)
# Check token limits
within_limit, estimated_tokens = check_token_limit(file_content)
if not within_limit:
raise ValueError(
f"Files too large (~{estimated_tokens:,} tokens). "
f"Maximum is {MAX_CONTEXT_TOKENS:,} tokens."
)
# Build analysis instructions
analysis_focus = []
if request.analysis_type:
type_focus = {
"architecture": "Focus on architectural patterns, structure, and design decisions",
"performance": "Focus on performance characteristics and optimization opportunities",
"security": "Focus on security implications and potential vulnerabilities",
"quality": "Focus on code quality, maintainability, and best practices",
"general": "Provide a comprehensive general analysis",
}
analysis_focus.append(type_focus.get(request.analysis_type, ""))
if request.output_format == "summary":
analysis_focus.append("Provide a concise summary of key findings")
elif request.output_format == "actionable":
analysis_focus.append(
"Focus on actionable insights and specific recommendations"
)
focus_instruction = "\n".join(analysis_focus) if analysis_focus else ""
# Combine everything
full_prompt = f"""{self.get_system_prompt()}
{focus_instruction}
=== USER QUESTION ===
{request.question}
=== END QUESTION ===
=== FILES TO ANALYZE ===
{file_content}
=== END FILES ===
Please analyze these files to answer the user's question."""
return full_prompt
def format_response(self, response: str, request: AnalyzeRequest) -> str:
"""Format the analysis response"""
header = f"Analysis: {request.question[:50]}..."
if request.analysis_type:
header = f"{request.analysis_type.upper()} Analysis"
summary_text = f"Analyzed {len(request.files)} file(s)"
return f"{header}\n{summary_text}\n{'=' * 50}\n\n{response}"

128
tools/base.py Normal file
View File

@@ -0,0 +1,128 @@
"""
Base class for all Gemini MCP tools
"""
from abc import ABC, abstractmethod
from typing import Dict, Any, List, Optional
from pydantic import BaseModel, Field
import google.generativeai as genai
from mcp.types import TextContent
class ToolRequest(BaseModel):
"""Base request model for all tools"""
model: Optional[str] = Field(
None, description="Model to use (defaults to Gemini 2.5 Pro)"
)
max_tokens: Optional[int] = Field(
8192, description="Maximum number of tokens in response"
)
temperature: Optional[float] = Field(
None, description="Temperature for response (tool-specific defaults)"
)
class BaseTool(ABC):
"""Base class for all Gemini tools"""
def __init__(self):
self.name = self.get_name()
self.description = self.get_description()
self.default_temperature = self.get_default_temperature()
@abstractmethod
def get_name(self) -> str:
"""Return the tool name"""
pass
@abstractmethod
def get_description(self) -> str:
"""Return the verbose tool description for Claude"""
pass
@abstractmethod
def get_input_schema(self) -> Dict[str, Any]:
"""Return the JSON schema for tool inputs"""
pass
@abstractmethod
def get_system_prompt(self) -> str:
"""Return the system prompt for this tool"""
pass
def get_default_temperature(self) -> float:
"""Return default temperature for this tool"""
return 0.5
@abstractmethod
def get_request_model(self):
"""Return the Pydantic model for request validation"""
pass
async def execute(self, arguments: Dict[str, Any]) -> List[TextContent]:
"""Execute the tool with given arguments"""
try:
# Validate request
request_model = self.get_request_model()
request = request_model(**arguments)
# Prepare the prompt
prompt = await self.prepare_prompt(request)
# Get model configuration
from config import DEFAULT_MODEL
model_name = getattr(request, "model", None) or DEFAULT_MODEL
temperature = getattr(request, "temperature", None)
if temperature is None:
temperature = self.get_default_temperature()
max_tokens = getattr(request, "max_tokens", 8192)
# Create and configure model
model = self.create_model(model_name, temperature, max_tokens)
# Generate response
response = model.generate_content(prompt)
# Handle response
if response.candidates and response.candidates[0].content.parts:
text = response.candidates[0].content.parts[0].text
else:
finish_reason = (
response.candidates[0].finish_reason
if response.candidates
else "Unknown"
)
text = f"Response blocked or incomplete. Finish reason: {finish_reason}"
# Format response
formatted_response = self.format_response(text, request)
return [TextContent(type="text", text=formatted_response)]
except Exception as e:
error_msg = f"Error in {self.name}: {str(e)}"
return [TextContent(type="text", text=error_msg)]
@abstractmethod
async def prepare_prompt(self, request) -> str:
"""Prepare the full prompt for Gemini"""
pass
def format_response(self, response: str, request) -> str:
"""Format the response for display (can be overridden)"""
return response
def create_model(
self, model_name: str, temperature: float, max_tokens: int
) -> genai.GenerativeModel:
"""Create a configured Gemini model"""
return genai.GenerativeModel(
model_name=model_name,
generation_config={
"temperature": temperature,
"max_output_tokens": max_tokens,
"candidate_count": 1,
},
)

145
tools/debug_issue.py Normal file
View File

@@ -0,0 +1,145 @@
"""
Debug Issue tool - Root cause analysis and debugging assistance
"""
from typing import Dict, Any, List, Optional
from pydantic import Field
from .base import BaseTool, ToolRequest
from prompts import DEBUG_ISSUE_PROMPT
from utils import read_files, check_token_limit
from config import TEMPERATURE_ANALYTICAL, MAX_CONTEXT_TOKENS
class DebugIssueRequest(ToolRequest):
"""Request model for debug_issue tool"""
error_description: str = Field(
..., description="Error message, symptoms, or issue description"
)
error_context: Optional[str] = Field(
None, description="Stack trace, logs, or additional error context"
)
relevant_files: Optional[List[str]] = Field(
None, description="Files that might be related to the issue"
)
runtime_info: Optional[str] = Field(
None, description="Environment, versions, or runtime information"
)
previous_attempts: Optional[str] = Field(
None, description="What has been tried already"
)
class DebugIssueTool(BaseTool):
"""Advanced debugging and root cause analysis tool"""
def get_name(self) -> str:
return "debug_issue"
def get_description(self) -> str:
return (
"DEBUG & ROOT CAUSE ANALYSIS - Expert debugging for complex issues. "
"Use this when you need help tracking down bugs or understanding errors. "
"Triggers: 'debug this', 'why is this failing', 'root cause', 'trace error'. "
"I'll analyze the issue, find root causes, and provide step-by-step solutions. "
"Include error messages, stack traces, and relevant code for best results."
)
def get_input_schema(self) -> Dict[str, Any]:
return {
"type": "object",
"properties": {
"error_description": {
"type": "string",
"description": "Error message, symptoms, or issue description",
},
"error_context": {
"type": "string",
"description": "Stack trace, logs, or additional error context",
},
"relevant_files": {
"type": "array",
"items": {"type": "string"},
"description": "Files that might be related to the issue",
},
"runtime_info": {
"type": "string",
"description": "Environment, versions, or runtime information",
},
"previous_attempts": {
"type": "string",
"description": "What has been tried already",
},
"temperature": {
"type": "number",
"description": "Temperature (0-1, default 0.2 for accuracy)",
"minimum": 0,
"maximum": 1,
},
},
"required": ["error_description"],
}
def get_system_prompt(self) -> str:
return DEBUG_ISSUE_PROMPT
def get_default_temperature(self) -> float:
return TEMPERATURE_ANALYTICAL
def get_request_model(self):
return DebugIssueRequest
async def prepare_prompt(self, request: DebugIssueRequest) -> str:
"""Prepare the debugging prompt"""
# Build context sections
context_parts = [
f"=== ISSUE DESCRIPTION ===\n{request.error_description}\n=== END DESCRIPTION ==="
]
if request.error_context:
context_parts.append(
f"\n=== ERROR CONTEXT/STACK TRACE ===\n{request.error_context}\n=== END CONTEXT ==="
)
if request.runtime_info:
context_parts.append(
f"\n=== RUNTIME INFORMATION ===\n{request.runtime_info}\n=== END RUNTIME ==="
)
if request.previous_attempts:
context_parts.append(
f"\n=== PREVIOUS ATTEMPTS ===\n{request.previous_attempts}\n=== END ATTEMPTS ==="
)
# Add relevant files if provided
if request.relevant_files:
file_content, _ = read_files(request.relevant_files)
context_parts.append(
f"\n=== RELEVANT CODE ===\n{file_content}\n=== END CODE ==="
)
full_context = "\n".join(context_parts)
# Check token limits
within_limit, estimated_tokens = check_token_limit(full_context)
if not within_limit:
raise ValueError(
f"Context too large (~{estimated_tokens:,} tokens). "
f"Maximum is {MAX_CONTEXT_TOKENS:,} tokens."
)
# Combine everything
full_prompt = f"""{self.get_system_prompt()}
{full_context}
Please debug this issue following the structured format in the system prompt.
Focus on finding the root cause and providing actionable solutions."""
return full_prompt
def format_response(
self, response: str, request: DebugIssueRequest
) -> str:
"""Format the debugging response"""
return f"Debug Analysis\n{'=' * 50}\n\n{response}"

160
tools/review_code.py Normal file
View File

@@ -0,0 +1,160 @@
"""
Code Review tool - Comprehensive code analysis and review
"""
from typing import Dict, Any, List, Optional
from pydantic import Field
from .base import BaseTool, ToolRequest
from prompts import REVIEW_CODE_PROMPT
from utils import read_files, check_token_limit
from config import TEMPERATURE_ANALYTICAL, MAX_CONTEXT_TOKENS
class ReviewCodeRequest(ToolRequest):
"""Request model for review_code tool"""
files: List[str] = Field(..., description="Code files to review")
review_type: str = Field(
"full", description="Type of review: full|security|performance|quick"
)
focus_on: Optional[str] = Field(
None, description="Specific aspects to focus on during review"
)
standards: Optional[str] = Field(
None, description="Coding standards or guidelines to enforce"
)
severity_filter: str = Field(
"all",
description="Minimum severity to report: critical|high|medium|all",
)
class ReviewCodeTool(BaseTool):
"""Professional code review tool"""
def get_name(self) -> str:
return "review_code"
def get_description(self) -> str:
return (
"PROFESSIONAL CODE REVIEW - Comprehensive analysis for bugs, security, and quality. "
"Use this for thorough code review with actionable feedback. "
"Triggers: 'review this code', 'check for issues', 'find bugs', 'security audit'. "
"I'll identify issues by severity (Critical→High→Medium→Low) with specific fixes. "
"Supports focused reviews: security, performance, or quick checks."
)
def get_input_schema(self) -> Dict[str, Any]:
return {
"type": "object",
"properties": {
"files": {
"type": "array",
"items": {"type": "string"},
"description": "Code files to review",
},
"review_type": {
"type": "string",
"enum": ["full", "security", "performance", "quick"],
"default": "full",
"description": "Type of review to perform",
},
"focus_on": {
"type": "string",
"description": "Specific aspects to focus on",
},
"standards": {
"type": "string",
"description": "Coding standards to enforce",
},
"severity_filter": {
"type": "string",
"enum": ["critical", "high", "medium", "all"],
"default": "all",
"description": "Minimum severity level to report",
},
"temperature": {
"type": "number",
"description": "Temperature (0-1, default 0.2 for consistency)",
"minimum": 0,
"maximum": 1,
},
},
"required": ["files"],
}
def get_system_prompt(self) -> str:
return REVIEW_CODE_PROMPT
def get_default_temperature(self) -> float:
return TEMPERATURE_ANALYTICAL
def get_request_model(self):
return ReviewCodeRequest
async def prepare_prompt(self, request: ReviewCodeRequest) -> str:
"""Prepare the code review prompt"""
# Read all files
file_content, summary = read_files(request.files)
# Check token limits
within_limit, estimated_tokens = check_token_limit(file_content)
if not within_limit:
raise ValueError(
f"Code too large (~{estimated_tokens:,} tokens). "
f"Maximum is {MAX_CONTEXT_TOKENS:,} tokens."
)
# Build review instructions
review_focus = []
if request.review_type == "security":
review_focus.append(
"Focus on security vulnerabilities and authentication issues"
)
elif request.review_type == "performance":
review_focus.append(
"Focus on performance bottlenecks and optimization opportunities"
)
elif request.review_type == "quick":
review_focus.append(
"Provide a quick review focusing on critical issues only"
)
if request.focus_on:
review_focus.append(
f"Pay special attention to: {request.focus_on}"
)
if request.standards:
review_focus.append(
f"Enforce these standards: {request.standards}"
)
if request.severity_filter != "all":
review_focus.append(
f"Only report issues of {request.severity_filter} severity or higher"
)
focus_instruction = "\n".join(review_focus) if review_focus else ""
# Combine everything
full_prompt = f"""{self.get_system_prompt()}
{focus_instruction}
=== CODE TO REVIEW ===
{file_content}
=== END CODE ===
Please provide a comprehensive code review following the format specified in the system prompt."""
return full_prompt
def format_response(
self, response: str, request: ReviewCodeRequest
) -> str:
"""Format the review response"""
header = f"Code Review ({request.review_type.upper()})"
if request.focus_on:
header += f" - Focus: {request.focus_on}"
return f"{header}\n{'=' * 50}\n\n{response}"

145
tools/think_deeper.py Normal file
View File

@@ -0,0 +1,145 @@
"""
Think Deeper tool - Extended reasoning and problem-solving
"""
from typing import Dict, Any, List, Optional
from pydantic import Field
from .base import BaseTool, ToolRequest
from prompts import THINK_DEEPER_PROMPT
from utils import read_files, check_token_limit
from config import TEMPERATURE_CREATIVE, MAX_CONTEXT_TOKENS
class ThinkDeeperRequest(ToolRequest):
"""Request model for think_deeper tool"""
current_analysis: str = Field(
..., description="Claude's current thinking/analysis to extend"
)
problem_context: Optional[str] = Field(
None, description="Additional context about the problem or goal"
)
focus_areas: Optional[List[str]] = Field(
None,
description="Specific aspects to focus on (architecture, performance, security, etc.)",
)
reference_files: Optional[List[str]] = Field(
None, description="Optional file paths for additional context"
)
class ThinkDeeperTool(BaseTool):
"""Extended thinking and reasoning tool"""
def get_name(self) -> str:
return "think_deeper"
def get_description(self) -> str:
return (
"EXTENDED THINKING & REASONING - Your deep thinking partner for complex problems. "
"Use this when you need to extend your analysis, explore alternatives, or validate approaches. "
"Perfect for: architecture decisions, complex bugs, performance challenges, security analysis. "
"Triggers: 'think deeper', 'ultrathink', 'extend my analysis', 'explore alternatives'. "
"I'll challenge assumptions, find edge cases, and provide alternative solutions."
)
def get_input_schema(self) -> Dict[str, Any]:
return {
"type": "object",
"properties": {
"current_analysis": {
"type": "string",
"description": "Your current thinking/analysis to extend and validate",
},
"problem_context": {
"type": "string",
"description": "Additional context about the problem or goal",
},
"focus_areas": {
"type": "array",
"items": {"type": "string"},
"description": "Specific aspects to focus on (architecture, performance, security, etc.)",
},
"reference_files": {
"type": "array",
"items": {"type": "string"},
"description": "Optional file paths for additional context",
},
"temperature": {
"type": "number",
"description": "Temperature for creative thinking (0-1, default 0.7)",
"minimum": 0,
"maximum": 1,
},
"max_tokens": {
"type": "integer",
"description": "Maximum tokens in response",
"default": 8192,
},
},
"required": ["current_analysis"],
}
def get_system_prompt(self) -> str:
return THINK_DEEPER_PROMPT
def get_default_temperature(self) -> float:
return TEMPERATURE_CREATIVE
def get_request_model(self):
return ThinkDeeperRequest
async def prepare_prompt(self, request: ThinkDeeperRequest) -> str:
"""Prepare the full prompt for extended thinking"""
# Build context parts
context_parts = [
f"=== CLAUDE'S CURRENT ANALYSIS ===\n{request.current_analysis}\n=== END ANALYSIS ==="
]
if request.problem_context:
context_parts.append(
f"\n=== PROBLEM CONTEXT ===\n{request.problem_context}\n=== END CONTEXT ==="
)
# Add reference files if provided
if request.reference_files:
file_content, _ = read_files(request.reference_files)
context_parts.append(
f"\n=== REFERENCE FILES ===\n{file_content}\n=== END FILES ==="
)
full_context = "\n".join(context_parts)
# Check token limits
within_limit, estimated_tokens = check_token_limit(full_context)
if not within_limit:
raise ValueError(
f"Context too large (~{estimated_tokens:,} tokens). "
f"Maximum is {MAX_CONTEXT_TOKENS:,} tokens."
)
# Add focus areas instruction if specified
focus_instruction = ""
if request.focus_areas:
areas = ", ".join(request.focus_areas)
focus_instruction = f"\n\nFOCUS AREAS: Please pay special attention to {areas} aspects."
# Combine system prompt with context
full_prompt = f"""{self.get_system_prompt()}{focus_instruction}
{full_context}
Please provide deep analysis that extends Claude's thinking with:
1. Alternative approaches and solutions
2. Edge cases and potential failure modes
3. Critical evaluation of assumptions
4. Concrete implementation suggestions
5. Risk assessment and mitigation strategies"""
return full_prompt
def format_response(
self, response: str, request: ThinkDeeperRequest
) -> str:
"""Format the response with clear attribution"""
return f"Extended Analysis by Gemini:\n\n{response}"