refactor: rename think_deeper to thinkdeep for brevity

- Renamed `think_deeper` tool to `thinkdeep` for shorter, cleaner naming
- Updated all imports from ThinkDeeperTool to ThinkDeepTool
- Updated all references from THINK_DEEPER_PROMPT to THINKDEEP_PROMPT
- Updated tool registration in server.py
- Updated all test files to use new naming convention
- Updated README documentation to reflect new tool names
- All functionality remains the same, only naming has changed

This completes the tool renaming refactor for improved clarity and consistency.

🤖 Generated with [Claude Code](https://claude.ai/code)

Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
Fahad
2025-06-10 12:38:38 +04:00
parent 5f8ed3aae8
commit ba8f7192c3
12 changed files with 49 additions and 49 deletions

View File

@@ -21,7 +21,7 @@ from tools.chat import ChatTool
from tools.codereview import CodeReviewTool
from tools.debug import DebugIssueTool
from tools.precommit import Precommit
from tools.think_deeper import ThinkDeeperTool
from tools.thinkdeep import ThinkDeepTool
class TestLargePromptHandling:
@@ -131,9 +131,9 @@ class TestLargePromptHandling:
shutil.rmtree(temp_dir)
@pytest.mark.asyncio
async def test_think_deeper_large_analysis(self, large_prompt):
"""Test that think_deeper tool detects large current_analysis."""
tool = ThinkDeeperTool()
async def test_thinkdeep_large_analysis(self, large_prompt):
"""Test that thinkdeep tool detects large current_analysis."""
tool = ThinkDeepTool()
result = await tool.execute({"current_analysis": large_prompt})
assert len(result) == 1

View File

@@ -21,7 +21,7 @@ sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import json
from tools.analyze import AnalyzeTool
from tools.think_deeper import ThinkDeeperTool
from tools.thinkdeep import ThinkDeepTool
async def run_manual_live_tests():
@@ -60,8 +60,8 @@ async def run_manual_live_tests():
print("❌ AnalyzeTool live test failed")
return False
# Test ThinkDeeperTool
think_tool = ThinkDeeperTool()
# Test ThinkDeepTool
think_tool = ThinkDeepTool()
result = await think_tool.execute(
{
"current_analysis": "Testing live integration",
@@ -70,9 +70,9 @@ async def run_manual_live_tests():
)
if result and result[0].text and "Extended Analysis" in result[0].text:
print("✅ ThinkDeeperTool live test successful")
print("✅ ThinkDeepTool live test successful")
else:
print("❌ ThinkDeeperTool live test failed")
print("❌ ThinkDeepTool live test failed")
return False
# Test collaboration/clarification request

View File

@@ -15,7 +15,7 @@ from tools.chat import ChatTool
from tools.codereview import CodeReviewTool
from tools.debug import DebugIssueTool
from tools.precommit import Precommit
from tools.think_deeper import ThinkDeeperTool
from tools.thinkdeep import ThinkDeepTool
class TestPromptRegression:
@@ -79,9 +79,9 @@ class TestPromptRegression:
mock_read_files.assert_called_once_with(["/path/to/file.py"])
@pytest.mark.asyncio
async def test_think_deeper_normal_analysis(self, mock_model_response):
"""Test think_deeper tool with normal analysis."""
tool = ThinkDeeperTool()
async def test_thinkdeep_normal_analysis(self, mock_model_response):
"""Test thinkdeep tool with normal analysis."""
tool = ThinkDeepTool()
with patch.object(tool, "create_model") as mock_create_model:
mock_model = MagicMock()

View File

@@ -19,7 +19,7 @@ class TestServerTools:
tool_names = [tool.name for tool in tools]
# Check all core tools are present
assert "think_deeper" in tool_names
assert "thinkdeep" in tool_names
assert "codereview" in tool_names
assert "debug" in tool_names
assert "analyze" in tool_names
@@ -77,4 +77,4 @@ class TestServerTools:
response = result[0].text
assert "Gemini MCP Server v" in response # Version agnostic check
assert "Available Tools:" in response
assert "think_deeper" in response
assert "thinkdeep" in response

View File

@@ -9,7 +9,7 @@ import pytest
from tools.analyze import AnalyzeTool
from tools.codereview import CodeReviewTool
from tools.debug import DebugIssueTool
from tools.think_deeper import ThinkDeeperTool
from tools.thinkdeep import ThinkDeepTool
@pytest.fixture(autouse=True)
@@ -25,7 +25,7 @@ class TestThinkingModes:
def test_default_thinking_modes(self):
"""Test that tools have correct default thinking modes"""
tools = [
(ThinkDeeperTool(), "high"),
(ThinkDeepTool(), "high"),
(AnalyzeTool(), "medium"),
(CodeReviewTool(), "medium"),
(DebugIssueTool(), "medium"),
@@ -145,14 +145,14 @@ class TestThinkingModes:
@pytest.mark.asyncio
@patch("tools.base.BaseTool.create_model")
async def test_thinking_mode_max(self, mock_create_model):
"""Test max thinking mode (default for think_deeper)"""
"""Test max thinking mode (default for thinkdeep)"""
mock_model = Mock()
mock_model.generate_content.return_value = Mock(
candidates=[Mock(content=Mock(parts=[Mock(text="Max thinking response")]))]
)
mock_create_model.return_value = mock_model
tool = ThinkDeeperTool()
tool = ThinkDeepTool()
result = await tool.execute(
{
"current_analysis": "Initial analysis",

View File

@@ -7,19 +7,19 @@ from unittest.mock import Mock, patch
import pytest
from tools import AnalyzeTool, ChatTool, CodeReviewTool, DebugIssueTool, ThinkDeeperTool
from tools import AnalyzeTool, ChatTool, CodeReviewTool, DebugIssueTool, ThinkDeepTool
class TestThinkDeeperTool:
"""Test the think_deeper tool"""
class TestThinkDeepTool:
"""Test the thinkdeep tool"""
@pytest.fixture
def tool(self):
return ThinkDeeperTool()
return ThinkDeepTool()
def test_tool_metadata(self, tool):
"""Test tool metadata"""
assert tool.get_name() == "think_deeper"
assert tool.get_name() == "thinkdeep"
assert "EXTENDED THINKING" in tool.get_description()
assert tool.get_default_temperature() == 0.7
@@ -249,9 +249,9 @@ class TestAbsolutePathValidation:
assert "src/main.py" in response["content"]
@pytest.mark.asyncio
async def test_think_deeper_tool_relative_path_rejected(self):
"""Test that think_deeper tool rejects relative paths"""
tool = ThinkDeeperTool()
async def test_thinkdeep_tool_relative_path_rejected(self):
"""Test that thinkdeep tool rejects relative paths"""
tool = ThinkDeepTool()
result = await tool.execute({"current_analysis": "My analysis", "files": ["./local/file.py"]})
assert len(result) == 1