fix: Docker path translation for review_changes and code deduplication
- Fixed review_changes tool to properly translate host paths to container paths in Docker - Prevents "No such file or directory" errors when running in Docker containers - Added proper error handling with clear messages when paths are inaccessible refactor: Centralized token limit validation across all tools - Added _validate_token_limit method to BaseTool to eliminate code duplication - Reduced ~25 lines of duplicated code across 5 tools (analyze, chat, debug_issue, review_code, think_deeper) - Maintains exact same error messages and behavior feat: Enhanced large prompt handling - Added support for prompts >50K chars by requesting file-based input - Preserves MCP's ~25K token capacity for responses - All tools now check prompt size before processing test: Added comprehensive Docker path integration tests - Tests for path translation, security validation, and error handling - Tests for review_changes tool specifically with Docker paths - Fixed failing think_deeper test (updated default from "max" to "high") chore: Code quality improvements - Applied black formatting across all files - Fixed import sorting with isort - All tests passing (96 tests) - Standardized error handling follows MCP TextContent format The changes ensure consistent behavior across all environments while reducing code duplication and improving maintainability. 🤖 Generated with [Claude Code](https://claude.ai/code) Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
@@ -5,8 +5,8 @@ Tool implementations for Gemini MCP Server
|
||||
from .analyze import AnalyzeTool
|
||||
from .chat import ChatTool
|
||||
from .debug_issue import DebugIssueTool
|
||||
from .review_code import ReviewCodeTool
|
||||
from .review_changes import ReviewChanges
|
||||
from .review_code import ReviewCodeTool
|
||||
from .think_deeper import ThinkDeeperTool
|
||||
|
||||
__all__ = [
|
||||
|
||||
@@ -4,13 +4,15 @@ Analyze tool - General-purpose code and file analysis
|
||||
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
from mcp.types import TextContent
|
||||
from pydantic import Field
|
||||
|
||||
from config import MAX_CONTEXT_TOKENS, TEMPERATURE_ANALYTICAL
|
||||
from config import TEMPERATURE_ANALYTICAL
|
||||
from prompts import ANALYZE_PROMPT
|
||||
from utils import check_token_limit, read_files
|
||||
from utils import read_files
|
||||
|
||||
from .base import BaseTool, ToolRequest
|
||||
from .models import ToolOutput
|
||||
|
||||
|
||||
class AnalyzeRequest(ToolRequest):
|
||||
@@ -99,18 +101,42 @@ class AnalyzeTool(BaseTool):
|
||||
def get_request_model(self):
|
||||
return AnalyzeRequest
|
||||
|
||||
async def execute(self, arguments: Dict[str, Any]) -> List[TextContent]:
|
||||
"""Override execute to check question size before processing"""
|
||||
# First validate request
|
||||
request_model = self.get_request_model()
|
||||
request = request_model(**arguments)
|
||||
|
||||
# Check question size
|
||||
size_check = self.check_prompt_size(request.question)
|
||||
if size_check:
|
||||
return [
|
||||
TextContent(
|
||||
type="text", text=ToolOutput(**size_check).model_dump_json()
|
||||
)
|
||||
]
|
||||
|
||||
# Continue with normal execution
|
||||
return await super().execute(arguments)
|
||||
|
||||
async def prepare_prompt(self, request: AnalyzeRequest) -> str:
|
||||
"""Prepare the analysis prompt"""
|
||||
# Check for prompt.txt in files
|
||||
prompt_content, updated_files = self.handle_prompt_file(request.files)
|
||||
|
||||
# If prompt.txt was found, use it as the question
|
||||
if prompt_content:
|
||||
request.question = prompt_content
|
||||
|
||||
# Update request files list
|
||||
if updated_files is not None:
|
||||
request.files = updated_files
|
||||
|
||||
# Read all files
|
||||
file_content, summary = read_files(request.files)
|
||||
|
||||
# Check token limits
|
||||
within_limit, estimated_tokens = check_token_limit(file_content)
|
||||
if not within_limit:
|
||||
raise ValueError(
|
||||
f"Files too large (~{estimated_tokens:,} tokens). "
|
||||
f"Maximum is {MAX_CONTEXT_TOKENS:,} tokens."
|
||||
)
|
||||
self._validate_token_limit(file_content, "Files")
|
||||
|
||||
# Build analysis instructions
|
||||
analysis_focus = []
|
||||
|
||||
109
tools/base.py
109
tools/base.py
@@ -13,17 +13,20 @@ Key responsibilities:
|
||||
- Support for clarification requests when more information is needed
|
||||
"""
|
||||
|
||||
from abc import ABC, abstractmethod
|
||||
from typing import Any, Dict, List, Optional, Literal
|
||||
import os
|
||||
import json
|
||||
import os
|
||||
from abc import ABC, abstractmethod
|
||||
from typing import Any, Dict, List, Literal, Optional
|
||||
|
||||
from google import genai
|
||||
from google.genai import types
|
||||
from mcp.types import TextContent
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from .models import ToolOutput, ClarificationRequest
|
||||
from config import MCP_PROMPT_SIZE_LIMIT
|
||||
from utils.file_utils import read_file_content
|
||||
|
||||
from .models import ClarificationRequest, ToolOutput
|
||||
|
||||
|
||||
class ToolRequest(BaseModel):
|
||||
@@ -194,6 +197,80 @@ class BaseTool(ABC):
|
||||
|
||||
return None
|
||||
|
||||
def check_prompt_size(self, text: str) -> Optional[Dict[str, Any]]:
|
||||
"""
|
||||
Check if a text field is too large for MCP's token limits.
|
||||
|
||||
The MCP protocol has a combined request+response limit of ~25K tokens.
|
||||
To ensure adequate space for responses, we limit prompt input to a
|
||||
configurable character limit (default 50K chars ~= 10-12K tokens).
|
||||
Larger prompts are handled by having Claude save them to a file,
|
||||
bypassing MCP's token constraints while preserving response capacity.
|
||||
|
||||
Args:
|
||||
text: The text to check
|
||||
|
||||
Returns:
|
||||
Optional[Dict[str, Any]]: Response asking for file handling if too large, None otherwise
|
||||
"""
|
||||
if text and len(text) > MCP_PROMPT_SIZE_LIMIT:
|
||||
return {
|
||||
"status": "requires_file_prompt",
|
||||
"content": (
|
||||
f"The prompt is too large for MCP's token limits (>{MCP_PROMPT_SIZE_LIMIT:,} characters). "
|
||||
"Please save the prompt text to a temporary file named 'prompt.txt' and "
|
||||
"resend the request with an empty prompt string and the absolute file path included "
|
||||
"in the files parameter, along with any other files you wish to share as context."
|
||||
),
|
||||
"content_type": "text",
|
||||
"metadata": {
|
||||
"prompt_size": len(text),
|
||||
"limit": MCP_PROMPT_SIZE_LIMIT,
|
||||
"instructions": "Save prompt to 'prompt.txt' and include absolute path in files parameter",
|
||||
},
|
||||
}
|
||||
return None
|
||||
|
||||
def handle_prompt_file(
|
||||
self, files: Optional[List[str]]
|
||||
) -> tuple[Optional[str], Optional[List[str]]]:
|
||||
"""
|
||||
Check for and handle prompt.txt in the files list.
|
||||
|
||||
If prompt.txt is found, reads its content and removes it from the files list.
|
||||
This file is treated specially as the main prompt, not as an embedded file.
|
||||
|
||||
This mechanism allows us to work around MCP's ~25K token limit by having
|
||||
Claude save large prompts to a file, effectively using the file transfer
|
||||
mechanism to bypass token constraints while preserving response capacity.
|
||||
|
||||
Args:
|
||||
files: List of file paths
|
||||
|
||||
Returns:
|
||||
tuple: (prompt_content, updated_files_list)
|
||||
"""
|
||||
if not files:
|
||||
return None, files
|
||||
|
||||
prompt_content = None
|
||||
updated_files = []
|
||||
|
||||
for file_path in files:
|
||||
# Check if the filename is exactly "prompt.txt"
|
||||
# This ensures we don't match files like "myprompt.txt" or "prompt.txt.bak"
|
||||
if os.path.basename(file_path) == "prompt.txt":
|
||||
try:
|
||||
prompt_content = read_file_content(file_path)
|
||||
except Exception:
|
||||
# If we can't read the file, we'll just skip it
|
||||
# The error will be handled elsewhere
|
||||
pass
|
||||
else:
|
||||
updated_files.append(file_path)
|
||||
|
||||
return prompt_content, updated_files if updated_files else None
|
||||
|
||||
async def execute(self, arguments: Dict[str, Any]) -> List[TextContent]:
|
||||
"""
|
||||
Execute the tool with the provided arguments.
|
||||
@@ -378,6 +455,30 @@ class BaseTool(ABC):
|
||||
"""
|
||||
return response
|
||||
|
||||
def _validate_token_limit(self, text: str, context_type: str = "Context") -> None:
|
||||
"""
|
||||
Validate token limit and raise ValueError if exceeded.
|
||||
|
||||
This centralizes the token limit check that was previously duplicated
|
||||
in all prepare_prompt methods across tools.
|
||||
|
||||
Args:
|
||||
text: The text to check
|
||||
context_type: Description of what's being checked (for error message)
|
||||
|
||||
Raises:
|
||||
ValueError: If text exceeds MAX_CONTEXT_TOKENS
|
||||
"""
|
||||
from config import MAX_CONTEXT_TOKENS
|
||||
from utils import check_token_limit
|
||||
|
||||
within_limit, estimated_tokens = check_token_limit(text)
|
||||
if not within_limit:
|
||||
raise ValueError(
|
||||
f"{context_type} too large (~{estimated_tokens:,} tokens). "
|
||||
f"Maximum is {MAX_CONTEXT_TOKENS:,} tokens."
|
||||
)
|
||||
|
||||
def create_model(
|
||||
self, model_name: str, temperature: float, thinking_mode: str = "medium"
|
||||
):
|
||||
|
||||
@@ -4,13 +4,15 @@ Chat tool - General development chat and collaborative thinking
|
||||
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
from mcp.types import TextContent
|
||||
from pydantic import Field
|
||||
|
||||
from config import MAX_CONTEXT_TOKENS, TEMPERATURE_BALANCED
|
||||
from config import TEMPERATURE_BALANCED
|
||||
from prompts import CHAT_PROMPT
|
||||
from utils import check_token_limit, read_files
|
||||
from utils import read_files
|
||||
|
||||
from .base import BaseTool, ToolRequest
|
||||
from .models import ToolOutput
|
||||
|
||||
|
||||
class ChatRequest(ToolRequest):
|
||||
@@ -79,22 +81,43 @@ class ChatTool(BaseTool):
|
||||
def get_request_model(self):
|
||||
return ChatRequest
|
||||
|
||||
async def execute(self, arguments: Dict[str, Any]) -> List[TextContent]:
|
||||
"""Override execute to check prompt size before processing"""
|
||||
# First validate request
|
||||
request_model = self.get_request_model()
|
||||
request = request_model(**arguments)
|
||||
|
||||
# Check prompt size
|
||||
size_check = self.check_prompt_size(request.prompt)
|
||||
if size_check:
|
||||
return [
|
||||
TextContent(
|
||||
type="text", text=ToolOutput(**size_check).model_dump_json()
|
||||
)
|
||||
]
|
||||
|
||||
# Continue with normal execution
|
||||
return await super().execute(arguments)
|
||||
|
||||
async def prepare_prompt(self, request: ChatRequest) -> str:
|
||||
"""Prepare the chat prompt with optional context files"""
|
||||
user_content = request.prompt
|
||||
# Check for prompt.txt in files
|
||||
prompt_content, updated_files = self.handle_prompt_file(request.files)
|
||||
|
||||
# Use prompt.txt content if available, otherwise use the prompt field
|
||||
user_content = prompt_content if prompt_content else request.prompt
|
||||
|
||||
# Update request files list
|
||||
if updated_files is not None:
|
||||
request.files = updated_files
|
||||
|
||||
# Add context files if provided
|
||||
if request.files:
|
||||
file_content, _ = read_files(request.files)
|
||||
user_content = f"{request.prompt}\n\n=== CONTEXT FILES ===\n{file_content}\n=== END CONTEXT ==="
|
||||
user_content = f"{user_content}\n\n=== CONTEXT FILES ===\n{file_content}\n=== END CONTEXT ===="
|
||||
|
||||
# Check token limits
|
||||
within_limit, estimated_tokens = check_token_limit(user_content)
|
||||
if not within_limit:
|
||||
raise ValueError(
|
||||
f"Content too large (~{estimated_tokens:,} tokens). "
|
||||
f"Maximum is {MAX_CONTEXT_TOKENS:,} tokens."
|
||||
)
|
||||
self._validate_token_limit(user_content, "Content")
|
||||
|
||||
# Combine system prompt with user content
|
||||
full_prompt = f"""{self.get_system_prompt()}
|
||||
|
||||
@@ -4,13 +4,15 @@ Debug Issue tool - Root cause analysis and debugging assistance
|
||||
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
from mcp.types import TextContent
|
||||
from pydantic import Field
|
||||
|
||||
from config import MAX_CONTEXT_TOKENS, TEMPERATURE_ANALYTICAL
|
||||
from config import TEMPERATURE_ANALYTICAL
|
||||
from prompts import DEBUG_ISSUE_PROMPT
|
||||
from utils import check_token_limit, read_files
|
||||
from utils import read_files
|
||||
|
||||
from .base import BaseTool, ToolRequest
|
||||
from .models import ToolOutput
|
||||
|
||||
|
||||
class DebugIssueRequest(ToolRequest):
|
||||
@@ -98,8 +100,51 @@ class DebugIssueTool(BaseTool):
|
||||
def get_request_model(self):
|
||||
return DebugIssueRequest
|
||||
|
||||
async def execute(self, arguments: Dict[str, Any]) -> List[TextContent]:
|
||||
"""Override execute to check error_description and error_context size before processing"""
|
||||
# First validate request
|
||||
request_model = self.get_request_model()
|
||||
request = request_model(**arguments)
|
||||
|
||||
# Check error_description size
|
||||
size_check = self.check_prompt_size(request.error_description)
|
||||
if size_check:
|
||||
return [
|
||||
TextContent(
|
||||
type="text", text=ToolOutput(**size_check).model_dump_json()
|
||||
)
|
||||
]
|
||||
|
||||
# Check error_context size if provided
|
||||
if request.error_context:
|
||||
size_check = self.check_prompt_size(request.error_context)
|
||||
if size_check:
|
||||
return [
|
||||
TextContent(
|
||||
type="text", text=ToolOutput(**size_check).model_dump_json()
|
||||
)
|
||||
]
|
||||
|
||||
# Continue with normal execution
|
||||
return await super().execute(arguments)
|
||||
|
||||
async def prepare_prompt(self, request: DebugIssueRequest) -> str:
|
||||
"""Prepare the debugging prompt"""
|
||||
# Check for prompt.txt in files
|
||||
prompt_content, updated_files = self.handle_prompt_file(request.files)
|
||||
|
||||
# If prompt.txt was found, use it as error_description or error_context
|
||||
# Priority: if error_description is empty, use it there, otherwise use as error_context
|
||||
if prompt_content:
|
||||
if not request.error_description or request.error_description == "":
|
||||
request.error_description = prompt_content
|
||||
else:
|
||||
request.error_context = prompt_content
|
||||
|
||||
# Update request files list
|
||||
if updated_files is not None:
|
||||
request.files = updated_files
|
||||
|
||||
# Build context sections
|
||||
context_parts = [
|
||||
f"=== ISSUE DESCRIPTION ===\n{request.error_description}\n=== END DESCRIPTION ==="
|
||||
@@ -130,12 +175,7 @@ class DebugIssueTool(BaseTool):
|
||||
full_context = "\n".join(context_parts)
|
||||
|
||||
# Check token limits
|
||||
within_limit, estimated_tokens = check_token_limit(full_context)
|
||||
if not within_limit:
|
||||
raise ValueError(
|
||||
f"Context too large (~{estimated_tokens:,} tokens). "
|
||||
f"Maximum is {MAX_CONTEXT_TOKENS:,} tokens."
|
||||
)
|
||||
self._validate_token_limit(full_context, "Context")
|
||||
|
||||
# Combine everything
|
||||
full_prompt = f"""{self.get_system_prompt()}
|
||||
|
||||
@@ -2,14 +2,17 @@
|
||||
Data models for tool responses and interactions
|
||||
"""
|
||||
|
||||
from typing import Any, Dict, List, Literal, Optional
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
from typing import Literal, Optional, Dict, Any, List
|
||||
|
||||
|
||||
class ToolOutput(BaseModel):
|
||||
"""Standardized output format for all tools"""
|
||||
|
||||
status: Literal["success", "error", "requires_clarification"] = "success"
|
||||
status: Literal[
|
||||
"success", "error", "requires_clarification", "requires_file_prompt"
|
||||
] = "success"
|
||||
content: str = Field(..., description="The main content/response from the tool")
|
||||
content_type: Literal["text", "markdown", "json"] = "text"
|
||||
metadata: Optional[Dict[str, Any]] = Field(default_factory=dict)
|
||||
|
||||
@@ -6,15 +6,18 @@ import os
|
||||
import re
|
||||
from typing import Any, Dict, List, Literal, Optional
|
||||
|
||||
from mcp.types import TextContent
|
||||
from pydantic import Field
|
||||
|
||||
from config import MAX_CONTEXT_TOKENS
|
||||
from prompts.tool_prompts import REVIEW_CHANGES_PROMPT
|
||||
from utils.file_utils import read_files
|
||||
from utils.git_utils import find_git_repositories, get_git_status, run_git_command
|
||||
from utils.file_utils import _get_secure_container_path, read_files
|
||||
from utils.git_utils import (find_git_repositories, get_git_status,
|
||||
run_git_command)
|
||||
from utils.token_utils import estimate_tokens
|
||||
|
||||
from .base import BaseTool, ToolRequest
|
||||
from .models import ToolOutput
|
||||
|
||||
|
||||
class ReviewChangesRequest(ToolRequest):
|
||||
@@ -111,10 +114,51 @@ class ReviewChanges(BaseTool):
|
||||
# Limit length to avoid filesystem issues
|
||||
return name[:100]
|
||||
|
||||
async def execute(self, arguments: Dict[str, Any]) -> List[TextContent]:
|
||||
"""Override execute to check original_request size before processing"""
|
||||
# First validate request
|
||||
request_model = self.get_request_model()
|
||||
request = request_model(**arguments)
|
||||
|
||||
# Check original_request size if provided
|
||||
if request.original_request:
|
||||
size_check = self.check_prompt_size(request.original_request)
|
||||
if size_check:
|
||||
return [
|
||||
TextContent(
|
||||
type="text", text=ToolOutput(**size_check).model_dump_json()
|
||||
)
|
||||
]
|
||||
|
||||
# Continue with normal execution
|
||||
return await super().execute(arguments)
|
||||
|
||||
async def prepare_prompt(self, request: ReviewChangesRequest) -> str:
|
||||
"""Prepare the prompt with git diff information."""
|
||||
# Check for prompt.txt in files
|
||||
prompt_content, updated_files = self.handle_prompt_file(request.files)
|
||||
|
||||
# If prompt.txt was found, use it as original_request
|
||||
if prompt_content:
|
||||
request.original_request = prompt_content
|
||||
|
||||
# Update request files list
|
||||
if updated_files is not None:
|
||||
request.files = updated_files
|
||||
|
||||
# Translate the path if running in Docker
|
||||
translated_path = _get_secure_container_path(request.path)
|
||||
|
||||
# Check if the path translation resulted in an error path
|
||||
if translated_path.startswith("/inaccessible/"):
|
||||
raise ValueError(
|
||||
f"The path '{request.path}' is not accessible from within the Docker container. "
|
||||
f"The Docker container can only access files within the mounted workspace. "
|
||||
f"Please ensure the path is within the mounted directory or adjust your Docker volume mounts."
|
||||
)
|
||||
|
||||
# Find all git repositories
|
||||
repositories = find_git_repositories(request.path, request.max_depth)
|
||||
repositories = find_git_repositories(translated_path, request.max_depth)
|
||||
|
||||
if not repositories:
|
||||
return "No git repositories found in the specified path."
|
||||
|
||||
@@ -16,13 +16,15 @@ Key Features:
|
||||
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
from mcp.types import TextContent
|
||||
from pydantic import Field
|
||||
|
||||
from config import MAX_CONTEXT_TOKENS, TEMPERATURE_ANALYTICAL
|
||||
from config import TEMPERATURE_ANALYTICAL
|
||||
from prompts import REVIEW_CODE_PROMPT
|
||||
from utils import check_token_limit, read_files
|
||||
from utils import read_files
|
||||
|
||||
from .base import BaseTool, ToolRequest
|
||||
from .models import ToolOutput
|
||||
|
||||
|
||||
class ReviewCodeRequest(ToolRequest):
|
||||
@@ -128,6 +130,25 @@ class ReviewCodeTool(BaseTool):
|
||||
def get_request_model(self):
|
||||
return ReviewCodeRequest
|
||||
|
||||
async def execute(self, arguments: Dict[str, Any]) -> List[TextContent]:
|
||||
"""Override execute to check focus_on size before processing"""
|
||||
# First validate request
|
||||
request_model = self.get_request_model()
|
||||
request = request_model(**arguments)
|
||||
|
||||
# Check focus_on size if provided
|
||||
if request.focus_on:
|
||||
size_check = self.check_prompt_size(request.focus_on)
|
||||
if size_check:
|
||||
return [
|
||||
TextContent(
|
||||
type="text", text=ToolOutput(**size_check).model_dump_json()
|
||||
)
|
||||
]
|
||||
|
||||
# Continue with normal execution
|
||||
return await super().execute(arguments)
|
||||
|
||||
async def prepare_prompt(self, request: ReviewCodeRequest) -> str:
|
||||
"""
|
||||
Prepare the code review prompt with customized instructions.
|
||||
@@ -144,16 +165,22 @@ class ReviewCodeTool(BaseTool):
|
||||
Raises:
|
||||
ValueError: If the code exceeds token limits
|
||||
"""
|
||||
# Check for prompt.txt in files
|
||||
prompt_content, updated_files = self.handle_prompt_file(request.files)
|
||||
|
||||
# If prompt.txt was found, use it as focus_on
|
||||
if prompt_content:
|
||||
request.focus_on = prompt_content
|
||||
|
||||
# Update request files list
|
||||
if updated_files is not None:
|
||||
request.files = updated_files
|
||||
|
||||
# Read all requested files, expanding directories as needed
|
||||
file_content, summary = read_files(request.files)
|
||||
|
||||
# Validate that the code fits within model context limits
|
||||
within_limit, estimated_tokens = check_token_limit(file_content)
|
||||
if not within_limit:
|
||||
raise ValueError(
|
||||
f"Code too large (~{estimated_tokens:,} tokens). "
|
||||
f"Maximum is {MAX_CONTEXT_TOKENS:,} tokens."
|
||||
)
|
||||
self._validate_token_limit(file_content, "Code")
|
||||
|
||||
# Build customized review instructions based on review type
|
||||
review_focus = []
|
||||
|
||||
@@ -4,13 +4,15 @@ Think Deeper tool - Extended reasoning and problem-solving
|
||||
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
from mcp.types import TextContent
|
||||
from pydantic import Field
|
||||
|
||||
from config import MAX_CONTEXT_TOKENS, TEMPERATURE_CREATIVE
|
||||
from config import TEMPERATURE_CREATIVE
|
||||
from prompts import THINK_DEEPER_PROMPT
|
||||
from utils import check_token_limit, read_files
|
||||
from utils import read_files
|
||||
|
||||
from .base import BaseTool, ToolRequest
|
||||
from .models import ToolOutput
|
||||
|
||||
|
||||
class ThinkDeeperRequest(ToolRequest):
|
||||
@@ -44,7 +46,11 @@ class ThinkDeeperTool(BaseTool):
|
||||
"Use this when you need to extend your analysis, explore alternatives, or validate approaches. "
|
||||
"Perfect for: architecture decisions, complex bugs, performance challenges, security analysis. "
|
||||
"Triggers: 'think deeper', 'ultrathink', 'extend my analysis', 'explore alternatives'. "
|
||||
"I'll challenge assumptions, find edge cases, and provide alternative solutions."
|
||||
"I'll challenge assumptions, find edge cases, and provide alternative solutions. "
|
||||
"IMPORTANT: Choose the appropriate thinking_mode based on task complexity - "
|
||||
"'low' for quick analysis, 'medium' for standard problems, 'high' for complex issues (default), "
|
||||
"'max' for extremely complex challenges requiring deepest analysis. "
|
||||
"When in doubt, err on the side of a higher mode for truly deep thought and evaluation."
|
||||
)
|
||||
|
||||
def get_input_schema(self) -> Dict[str, Any]:
|
||||
@@ -79,7 +85,7 @@ class ThinkDeeperTool(BaseTool):
|
||||
"type": "string",
|
||||
"enum": ["minimal", "low", "medium", "high", "max"],
|
||||
"description": "Thinking depth: minimal (128), low (2048), medium (8192), high (16384), max (32768)",
|
||||
"default": "max",
|
||||
"default": "high",
|
||||
},
|
||||
},
|
||||
"required": ["current_analysis"],
|
||||
@@ -92,17 +98,47 @@ class ThinkDeeperTool(BaseTool):
|
||||
return TEMPERATURE_CREATIVE
|
||||
|
||||
def get_default_thinking_mode(self) -> str:
|
||||
"""ThinkDeeper uses maximum thinking by default"""
|
||||
return "max"
|
||||
"""ThinkDeeper uses high thinking by default"""
|
||||
return "high"
|
||||
|
||||
def get_request_model(self):
|
||||
return ThinkDeeperRequest
|
||||
|
||||
async def execute(self, arguments: Dict[str, Any]) -> List[TextContent]:
|
||||
"""Override execute to check current_analysis size before processing"""
|
||||
# First validate request
|
||||
request_model = self.get_request_model()
|
||||
request = request_model(**arguments)
|
||||
|
||||
# Check current_analysis size
|
||||
size_check = self.check_prompt_size(request.current_analysis)
|
||||
if size_check:
|
||||
return [
|
||||
TextContent(
|
||||
type="text", text=ToolOutput(**size_check).model_dump_json()
|
||||
)
|
||||
]
|
||||
|
||||
# Continue with normal execution
|
||||
return await super().execute(arguments)
|
||||
|
||||
async def prepare_prompt(self, request: ThinkDeeperRequest) -> str:
|
||||
"""Prepare the full prompt for extended thinking"""
|
||||
# Check for prompt.txt in files
|
||||
prompt_content, updated_files = self.handle_prompt_file(request.files)
|
||||
|
||||
# Use prompt.txt content if available, otherwise use the current_analysis field
|
||||
current_analysis = (
|
||||
prompt_content if prompt_content else request.current_analysis
|
||||
)
|
||||
|
||||
# Update request files list
|
||||
if updated_files is not None:
|
||||
request.files = updated_files
|
||||
|
||||
# Build context parts
|
||||
context_parts = [
|
||||
f"=== CLAUDE'S CURRENT ANALYSIS ===\n{request.current_analysis}\n=== END ANALYSIS ==="
|
||||
f"=== CLAUDE'S CURRENT ANALYSIS ===\n{current_analysis}\n=== END ANALYSIS ==="
|
||||
]
|
||||
|
||||
if request.problem_context:
|
||||
@@ -120,12 +156,7 @@ class ThinkDeeperTool(BaseTool):
|
||||
full_context = "\n".join(context_parts)
|
||||
|
||||
# Check token limits
|
||||
within_limit, estimated_tokens = check_token_limit(full_context)
|
||||
if not within_limit:
|
||||
raise ValueError(
|
||||
f"Context too large (~{estimated_tokens:,} tokens). "
|
||||
f"Maximum is {MAX_CONTEXT_TOKENS:,} tokens."
|
||||
)
|
||||
self._validate_token_limit(full_context, "Context")
|
||||
|
||||
# Add focus areas instruction if specified
|
||||
focus_instruction = ""
|
||||
@@ -150,5 +181,33 @@ Please provide deep analysis that extends Claude's thinking with:
|
||||
return full_prompt
|
||||
|
||||
def format_response(self, response: str, request: ThinkDeeperRequest) -> str:
|
||||
"""Format the response with clear attribution"""
|
||||
return f"Extended Analysis by Gemini:\n\n{response}"
|
||||
"""Format the response with clear attribution and critical thinking prompt"""
|
||||
return f"""## Extended Analysis by Gemini
|
||||
|
||||
{response}
|
||||
|
||||
---
|
||||
|
||||
## Critical Evaluation Required
|
||||
|
||||
Claude, now that you've received Gemini's extended analysis, please:
|
||||
|
||||
1. **Critically evaluate each suggestion** - Which points are truly valuable? Which might have limitations or trade-offs?
|
||||
|
||||
2. **Consider technical constraints** - How do these suggestions fit with:
|
||||
- Existing codebase patterns and conventions
|
||||
- Performance and scalability requirements
|
||||
- Security implications and best practices
|
||||
- Architecture and design principles
|
||||
|
||||
3. **Identify potential risks** - What could go wrong with each approach? Are there hidden complexities or edge cases?
|
||||
|
||||
4. **Synthesize your final recommendation** - Based on:
|
||||
- Your original analysis
|
||||
- Gemini's suggestions and critiques
|
||||
- Technical feasibility and correctness
|
||||
- A balanced assessment of trade-offs
|
||||
|
||||
5. **Formulate your conclusion** - What is the best technical solution considering all perspectives?
|
||||
|
||||
Remember: Gemini's analysis is meant to challenge and extend your thinking, not replace it. Use these insights to arrive at a more robust, well-considered solution."""
|
||||
|
||||
Reference in New Issue
Block a user