refactor: rename think_deeper to thinkdeep for brevity

- Renamed `think_deeper` tool to `thinkdeep` for shorter, cleaner naming
- Updated all imports from ThinkDeeperTool to ThinkDeepTool
- Updated all references from THINK_DEEPER_PROMPT to THINKDEEP_PROMPT
- Updated tool registration in server.py
- Updated all test files to use new naming convention
- Updated README documentation to reflect new tool names
- All functionality remains the same, only naming has changed

This completes the tool renaming refactor for improved clarity and consistency.

🤖 Generated with [Claude Code](https://claude.ai/code)

Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
Fahad
2025-06-10 12:38:38 +04:00
parent 5f8ed3aae8
commit ba8f7192c3
12 changed files with 49 additions and 49 deletions

View File

@@ -323,7 +323,7 @@ Replace `/path/to/gemini-mcp-server` with the actual path where you cloned the r
### 6. Start Using It! ### 6. Start Using It!
Just ask Claude naturally: Just ask Claude naturally:
- "Use gemini to think deeper about this architecture design" → `think_deeper` - "Use gemini to think deeper about this architecture design" → `thinkdeep`
- "Get gemini to review this code for security issues" → `review_code` - "Get gemini to review this code for security issues" → `review_code`
- "Get gemini to debug why this test is failing" → `debug` - "Get gemini to debug why this test is failing" → `debug`
- "Use gemini to analyze these files to understand the data flow" → `analyze` - "Use gemini to analyze these files to understand the data flow" → `analyze`
@@ -335,9 +335,9 @@ Just ask Claude naturally:
**Quick Tool Selection Guide:** **Quick Tool Selection Guide:**
- **Need a thinking partner?** → `chat` (brainstorm ideas, get second opinions, validate approaches) - **Need a thinking partner?** → `chat` (brainstorm ideas, get second opinions, validate approaches)
- **Need deeper thinking?** → `think_deeper` (extends Claude's analysis, finds edge cases) - **Need deeper thinking?** → `thinkdeep` (extends Claude's analysis, finds edge cases)
- **Code needs review?** → `review_code` (bugs, security, performance issues) - **Code needs review?** → `codereview` (bugs, security, performance issues)
- **Pre-commit validation?** → `review_changes` (validate git changes before committing) - **Pre-commit validation?** → `precommit` (validate git changes before committing)
- **Something's broken?** → `debug` (root cause analysis, error tracing) - **Something's broken?** → `debug` (root cause analysis, error tracing)
- **Want to understand code?** → `analyze` (architecture, patterns, dependencies) - **Want to understand code?** → `analyze` (architecture, patterns, dependencies)
- **Server info?** → `get_version` (version and configuration details) - **Server info?** → `get_version` (version and configuration details)

View File

@@ -7,11 +7,11 @@ from .tool_prompts import (
CHAT_PROMPT, CHAT_PROMPT,
CODEREVIEW_PROMPT, CODEREVIEW_PROMPT,
DEBUG_ISSUE_PROMPT, DEBUG_ISSUE_PROMPT,
THINK_DEEPER_PROMPT, THINKDEEP_PROMPT,
) )
__all__ = [ __all__ = [
"THINK_DEEPER_PROMPT", "THINKDEEP_PROMPT",
"CODEREVIEW_PROMPT", "CODEREVIEW_PROMPT",
"DEBUG_ISSUE_PROMPT", "DEBUG_ISSUE_PROMPT",
"ANALYZE_PROMPT", "ANALYZE_PROMPT",

View File

@@ -2,7 +2,7 @@
System prompts for each tool System prompts for each tool
""" """
THINK_DEEPER_PROMPT = """You are a senior development partner collaborating with Claude Code on complex problems. THINKDEEP_PROMPT = """You are a senior development partner collaborating with Claude Code on complex problems.
Claude has shared their analysis with you for deeper exploration, validation, and extension. Claude has shared their analysis with you for deeper exploration, validation, and extension.
IMPORTANT: If you need additional context (e.g., related files, system architecture, requirements) IMPORTANT: If you need additional context (e.g., related files, system architecture, requirements)

View File

@@ -43,7 +43,7 @@ from tools import (
CodeReviewTool, CodeReviewTool,
DebugIssueTool, DebugIssueTool,
Precommit, Precommit,
ThinkDeeperTool, ThinkDeepTool,
) )
# Configure logging for server operations # Configure logging for server operations
@@ -59,7 +59,7 @@ server: Server = Server("gemini-server")
# Each tool provides specialized functionality for different development tasks # Each tool provides specialized functionality for different development tasks
# Tools are instantiated once and reused across requests (stateless design) # Tools are instantiated once and reused across requests (stateless design)
TOOLS = { TOOLS = {
"think_deeper": ThinkDeeperTool(), # Extended reasoning for complex problems "thinkdeep": ThinkDeepTool(), # Extended reasoning for complex problems
"codereview": CodeReviewTool(), # Comprehensive code review and quality analysis "codereview": CodeReviewTool(), # Comprehensive code review and quality analysis
"debug": DebugIssueTool(), # Root cause analysis and debugging assistance "debug": DebugIssueTool(), # Root cause analysis and debugging assistance
"analyze": AnalyzeTool(), # General-purpose file and code analysis "analyze": AnalyzeTool(), # General-purpose file and code analysis

View File

@@ -21,7 +21,7 @@ from tools.chat import ChatTool
from tools.codereview import CodeReviewTool from tools.codereview import CodeReviewTool
from tools.debug import DebugIssueTool from tools.debug import DebugIssueTool
from tools.precommit import Precommit from tools.precommit import Precommit
from tools.think_deeper import ThinkDeeperTool from tools.thinkdeep import ThinkDeepTool
class TestLargePromptHandling: class TestLargePromptHandling:
@@ -131,9 +131,9 @@ class TestLargePromptHandling:
shutil.rmtree(temp_dir) shutil.rmtree(temp_dir)
@pytest.mark.asyncio @pytest.mark.asyncio
async def test_think_deeper_large_analysis(self, large_prompt): async def test_thinkdeep_large_analysis(self, large_prompt):
"""Test that think_deeper tool detects large current_analysis.""" """Test that thinkdeep tool detects large current_analysis."""
tool = ThinkDeeperTool() tool = ThinkDeepTool()
result = await tool.execute({"current_analysis": large_prompt}) result = await tool.execute({"current_analysis": large_prompt})
assert len(result) == 1 assert len(result) == 1

View File

@@ -21,7 +21,7 @@ sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import json import json
from tools.analyze import AnalyzeTool from tools.analyze import AnalyzeTool
from tools.think_deeper import ThinkDeeperTool from tools.thinkdeep import ThinkDeepTool
async def run_manual_live_tests(): async def run_manual_live_tests():
@@ -60,8 +60,8 @@ async def run_manual_live_tests():
print("❌ AnalyzeTool live test failed") print("❌ AnalyzeTool live test failed")
return False return False
# Test ThinkDeeperTool # Test ThinkDeepTool
think_tool = ThinkDeeperTool() think_tool = ThinkDeepTool()
result = await think_tool.execute( result = await think_tool.execute(
{ {
"current_analysis": "Testing live integration", "current_analysis": "Testing live integration",
@@ -70,9 +70,9 @@ async def run_manual_live_tests():
) )
if result and result[0].text and "Extended Analysis" in result[0].text: if result and result[0].text and "Extended Analysis" in result[0].text:
print("✅ ThinkDeeperTool live test successful") print("✅ ThinkDeepTool live test successful")
else: else:
print("❌ ThinkDeeperTool live test failed") print("❌ ThinkDeepTool live test failed")
return False return False
# Test collaboration/clarification request # Test collaboration/clarification request

View File

@@ -15,7 +15,7 @@ from tools.chat import ChatTool
from tools.codereview import CodeReviewTool from tools.codereview import CodeReviewTool
from tools.debug import DebugIssueTool from tools.debug import DebugIssueTool
from tools.precommit import Precommit from tools.precommit import Precommit
from tools.think_deeper import ThinkDeeperTool from tools.thinkdeep import ThinkDeepTool
class TestPromptRegression: class TestPromptRegression:
@@ -79,9 +79,9 @@ class TestPromptRegression:
mock_read_files.assert_called_once_with(["/path/to/file.py"]) mock_read_files.assert_called_once_with(["/path/to/file.py"])
@pytest.mark.asyncio @pytest.mark.asyncio
async def test_think_deeper_normal_analysis(self, mock_model_response): async def test_thinkdeep_normal_analysis(self, mock_model_response):
"""Test think_deeper tool with normal analysis.""" """Test thinkdeep tool with normal analysis."""
tool = ThinkDeeperTool() tool = ThinkDeepTool()
with patch.object(tool, "create_model") as mock_create_model: with patch.object(tool, "create_model") as mock_create_model:
mock_model = MagicMock() mock_model = MagicMock()

View File

@@ -19,7 +19,7 @@ class TestServerTools:
tool_names = [tool.name for tool in tools] tool_names = [tool.name for tool in tools]
# Check all core tools are present # Check all core tools are present
assert "think_deeper" in tool_names assert "thinkdeep" in tool_names
assert "codereview" in tool_names assert "codereview" in tool_names
assert "debug" in tool_names assert "debug" in tool_names
assert "analyze" in tool_names assert "analyze" in tool_names
@@ -77,4 +77,4 @@ class TestServerTools:
response = result[0].text response = result[0].text
assert "Gemini MCP Server v" in response # Version agnostic check assert "Gemini MCP Server v" in response # Version agnostic check
assert "Available Tools:" in response assert "Available Tools:" in response
assert "think_deeper" in response assert "thinkdeep" in response

View File

@@ -9,7 +9,7 @@ import pytest
from tools.analyze import AnalyzeTool from tools.analyze import AnalyzeTool
from tools.codereview import CodeReviewTool from tools.codereview import CodeReviewTool
from tools.debug import DebugIssueTool from tools.debug import DebugIssueTool
from tools.think_deeper import ThinkDeeperTool from tools.thinkdeep import ThinkDeepTool
@pytest.fixture(autouse=True) @pytest.fixture(autouse=True)
@@ -25,7 +25,7 @@ class TestThinkingModes:
def test_default_thinking_modes(self): def test_default_thinking_modes(self):
"""Test that tools have correct default thinking modes""" """Test that tools have correct default thinking modes"""
tools = [ tools = [
(ThinkDeeperTool(), "high"), (ThinkDeepTool(), "high"),
(AnalyzeTool(), "medium"), (AnalyzeTool(), "medium"),
(CodeReviewTool(), "medium"), (CodeReviewTool(), "medium"),
(DebugIssueTool(), "medium"), (DebugIssueTool(), "medium"),
@@ -145,14 +145,14 @@ class TestThinkingModes:
@pytest.mark.asyncio @pytest.mark.asyncio
@patch("tools.base.BaseTool.create_model") @patch("tools.base.BaseTool.create_model")
async def test_thinking_mode_max(self, mock_create_model): async def test_thinking_mode_max(self, mock_create_model):
"""Test max thinking mode (default for think_deeper)""" """Test max thinking mode (default for thinkdeep)"""
mock_model = Mock() mock_model = Mock()
mock_model.generate_content.return_value = Mock( mock_model.generate_content.return_value = Mock(
candidates=[Mock(content=Mock(parts=[Mock(text="Max thinking response")]))] candidates=[Mock(content=Mock(parts=[Mock(text="Max thinking response")]))]
) )
mock_create_model.return_value = mock_model mock_create_model.return_value = mock_model
tool = ThinkDeeperTool() tool = ThinkDeepTool()
result = await tool.execute( result = await tool.execute(
{ {
"current_analysis": "Initial analysis", "current_analysis": "Initial analysis",

View File

@@ -7,19 +7,19 @@ from unittest.mock import Mock, patch
import pytest import pytest
from tools import AnalyzeTool, ChatTool, CodeReviewTool, DebugIssueTool, ThinkDeeperTool from tools import AnalyzeTool, ChatTool, CodeReviewTool, DebugIssueTool, ThinkDeepTool
class TestThinkDeeperTool: class TestThinkDeepTool:
"""Test the think_deeper tool""" """Test the thinkdeep tool"""
@pytest.fixture @pytest.fixture
def tool(self): def tool(self):
return ThinkDeeperTool() return ThinkDeepTool()
def test_tool_metadata(self, tool): def test_tool_metadata(self, tool):
"""Test tool metadata""" """Test tool metadata"""
assert tool.get_name() == "think_deeper" assert tool.get_name() == "thinkdeep"
assert "EXTENDED THINKING" in tool.get_description() assert "EXTENDED THINKING" in tool.get_description()
assert tool.get_default_temperature() == 0.7 assert tool.get_default_temperature() == 0.7
@@ -249,9 +249,9 @@ class TestAbsolutePathValidation:
assert "src/main.py" in response["content"] assert "src/main.py" in response["content"]
@pytest.mark.asyncio @pytest.mark.asyncio
async def test_think_deeper_tool_relative_path_rejected(self): async def test_thinkdeep_tool_relative_path_rejected(self):
"""Test that think_deeper tool rejects relative paths""" """Test that thinkdeep tool rejects relative paths"""
tool = ThinkDeeperTool() tool = ThinkDeepTool()
result = await tool.execute({"current_analysis": "My analysis", "files": ["./local/file.py"]}) result = await tool.execute({"current_analysis": "My analysis", "files": ["./local/file.py"]})
assert len(result) == 1 assert len(result) == 1

View File

@@ -7,10 +7,10 @@ from .chat import ChatTool
from .codereview import CodeReviewTool from .codereview import CodeReviewTool
from .debug import DebugIssueTool from .debug import DebugIssueTool
from .precommit import Precommit from .precommit import Precommit
from .think_deeper import ThinkDeeperTool from .thinkdeep import ThinkDeepTool
__all__ = [ __all__ = [
"ThinkDeeperTool", "ThinkDeepTool",
"CodeReviewTool", "CodeReviewTool",
"DebugIssueTool", "DebugIssueTool",
"AnalyzeTool", "AnalyzeTool",

View File

@@ -1,5 +1,5 @@
""" """
Think Deeper tool - Extended reasoning and problem-solving ThinkDeep tool - Extended reasoning and problem-solving
""" """
from typing import Any, Optional from typing import Any, Optional
@@ -8,15 +8,15 @@ from mcp.types import TextContent
from pydantic import Field from pydantic import Field
from config import TEMPERATURE_CREATIVE from config import TEMPERATURE_CREATIVE
from prompts import THINK_DEEPER_PROMPT from prompts import THINKDEEP_PROMPT
from utils import read_files from utils import read_files
from .base import BaseTool, ToolRequest from .base import BaseTool, ToolRequest
from .models import ToolOutput from .models import ToolOutput
class ThinkDeeperRequest(ToolRequest): class ThinkDeepRequest(ToolRequest):
"""Request model for think_deeper tool""" """Request model for thinkdeep tool"""
current_analysis: str = Field(..., description="Claude's current thinking/analysis to extend") current_analysis: str = Field(..., description="Claude's current thinking/analysis to extend")
problem_context: Optional[str] = Field(None, description="Additional context about the problem or goal") problem_context: Optional[str] = Field(None, description="Additional context about the problem or goal")
@@ -30,11 +30,11 @@ class ThinkDeeperRequest(ToolRequest):
) )
class ThinkDeeperTool(BaseTool): class ThinkDeepTool(BaseTool):
"""Extended thinking and reasoning tool""" """Extended thinking and reasoning tool"""
def get_name(self) -> str: def get_name(self) -> str:
return "think_deeper" return "thinkdeep"
def get_description(self) -> str: def get_description(self) -> str:
return ( return (
@@ -92,17 +92,17 @@ class ThinkDeeperTool(BaseTool):
} }
def get_system_prompt(self) -> str: def get_system_prompt(self) -> str:
return THINK_DEEPER_PROMPT return THINKDEEP_PROMPT
def get_default_temperature(self) -> float: def get_default_temperature(self) -> float:
return TEMPERATURE_CREATIVE return TEMPERATURE_CREATIVE
def get_default_thinking_mode(self) -> str: def get_default_thinking_mode(self) -> str:
"""ThinkDeeper uses high thinking by default""" """ThinkDeep uses high thinking by default"""
return "high" return "high"
def get_request_model(self): def get_request_model(self):
return ThinkDeeperRequest return ThinkDeepRequest
async def execute(self, arguments: dict[str, Any]) -> list[TextContent]: async def execute(self, arguments: dict[str, Any]) -> list[TextContent]:
"""Override execute to check current_analysis size before processing""" """Override execute to check current_analysis size before processing"""
@@ -118,7 +118,7 @@ class ThinkDeeperTool(BaseTool):
# Continue with normal execution # Continue with normal execution
return await super().execute(arguments) return await super().execute(arguments)
async def prepare_prompt(self, request: ThinkDeeperRequest) -> str: async def prepare_prompt(self, request: ThinkDeepRequest) -> str:
"""Prepare the full prompt for extended thinking""" """Prepare the full prompt for extended thinking"""
# Check for prompt.txt in files # Check for prompt.txt in files
prompt_content, updated_files = self.handle_prompt_file(request.files) prompt_content, updated_files = self.handle_prompt_file(request.files)
@@ -176,7 +176,7 @@ Please provide deep analysis that extends Claude's thinking with:
return full_prompt return full_prompt
def format_response(self, response: str, request: ThinkDeeperRequest) -> str: def format_response(self, response: str, request: ThinkDeepRequest) -> str:
"""Format the response with clear attribution and critical thinking prompt""" """Format the response with clear attribution and critical thinking prompt"""
return f"""## Extended Analysis by Gemini return f"""## Extended Analysis by Gemini