style: fix linting and formatting issues
- Run black formatter on all Python files - Fix ruff linting issues: - Remove unused imports - Remove unused variables - Fix f-string without placeholders - All 37 tests still pass - Code quality improved for CI/CD compliance 🧹 Generated with [Claude Code](https://claude.ai/code) Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
@@ -9,7 +9,9 @@ __author__ = "Fahad Gilani"
|
|||||||
|
|
||||||
# Model configuration
|
# Model configuration
|
||||||
DEFAULT_MODEL = "gemini-2.5-pro-preview-06-05"
|
DEFAULT_MODEL = "gemini-2.5-pro-preview-06-05"
|
||||||
THINKING_MODEL = "gemini-2.0-flash-thinking-exp" # Enhanced reasoning model for think_deeper
|
THINKING_MODEL = (
|
||||||
|
"gemini-2.0-flash-thinking-exp" # Enhanced reasoning model for think_deeper
|
||||||
|
)
|
||||||
MAX_CONTEXT_TOKENS = 1_000_000 # 1M tokens for Gemini Pro
|
MAX_CONTEXT_TOKENS = 1_000_000 # 1M tokens for Gemini Pro
|
||||||
|
|
||||||
# Temperature defaults for different tool types
|
# Temperature defaults for different tool types
|
||||||
|
|||||||
@@ -2,8 +2,13 @@
|
|||||||
System prompts for Gemini tools
|
System prompts for Gemini tools
|
||||||
"""
|
"""
|
||||||
|
|
||||||
from .tool_prompts import (ANALYZE_PROMPT, CHAT_PROMPT, DEBUG_ISSUE_PROMPT,
|
from .tool_prompts import (
|
||||||
REVIEW_CODE_PROMPT, THINK_DEEPER_PROMPT)
|
ANALYZE_PROMPT,
|
||||||
|
CHAT_PROMPT,
|
||||||
|
DEBUG_ISSUE_PROMPT,
|
||||||
|
REVIEW_CODE_PROMPT,
|
||||||
|
THINK_DEEPER_PROMPT,
|
||||||
|
)
|
||||||
|
|
||||||
__all__ = [
|
__all__ = [
|
||||||
"THINK_DEEPER_PROMPT",
|
"THINK_DEEPER_PROMPT",
|
||||||
|
|||||||
66
server.py
66
server.py
@@ -10,14 +10,18 @@ from datetime import datetime
|
|||||||
from typing import Any, Dict, List
|
from typing import Any, Dict, List
|
||||||
|
|
||||||
from google import genai
|
from google import genai
|
||||||
from google.genai import types
|
|
||||||
from mcp.server import Server
|
from mcp.server import Server
|
||||||
from mcp.server.models import InitializationOptions
|
from mcp.server.models import InitializationOptions
|
||||||
from mcp.server.stdio import stdio_server
|
from mcp.server.stdio import stdio_server
|
||||||
from mcp.types import TextContent, Tool
|
from mcp.types import TextContent, Tool
|
||||||
|
|
||||||
from config import (DEFAULT_MODEL, MAX_CONTEXT_TOKENS, __author__, __updated__,
|
from config import (
|
||||||
__version__)
|
DEFAULT_MODEL,
|
||||||
|
MAX_CONTEXT_TOKENS,
|
||||||
|
__author__,
|
||||||
|
__updated__,
|
||||||
|
__version__,
|
||||||
|
)
|
||||||
from tools import AnalyzeTool, DebugIssueTool, ReviewCodeTool, ThinkDeeperTool
|
from tools import AnalyzeTool, DebugIssueTool, ReviewCodeTool, ThinkDeeperTool
|
||||||
|
|
||||||
# Configure logging
|
# Configure logging
|
||||||
@@ -125,9 +129,7 @@ async def handle_list_tools() -> List[Tool]:
|
|||||||
|
|
||||||
|
|
||||||
@server.call_tool()
|
@server.call_tool()
|
||||||
async def handle_call_tool(
|
async def handle_call_tool(name: str, arguments: Dict[str, Any]) -> List[TextContent]:
|
||||||
name: str, arguments: Dict[str, Any]
|
|
||||||
) -> List[TextContent]:
|
|
||||||
"""Handle tool execution requests"""
|
"""Handle tool execution requests"""
|
||||||
|
|
||||||
# Handle dynamic tools
|
# Handle dynamic tools
|
||||||
@@ -151,7 +153,7 @@ async def handle_call_tool(
|
|||||||
|
|
||||||
async def handle_chat(arguments: Dict[str, Any]) -> List[TextContent]:
|
async def handle_chat(arguments: Dict[str, Any]) -> List[TextContent]:
|
||||||
"""Handle general chat requests"""
|
"""Handle general chat requests"""
|
||||||
from config import TEMPERATURE_BALANCED, DEFAULT_MODEL, THINKING_MODEL
|
from config import TEMPERATURE_BALANCED, DEFAULT_MODEL
|
||||||
from prompts import CHAT_PROMPT
|
from prompts import CHAT_PROMPT
|
||||||
from utils import read_files
|
from utils import read_files
|
||||||
|
|
||||||
@@ -164,7 +166,9 @@ async def handle_chat(arguments: Dict[str, Any]) -> List[TextContent]:
|
|||||||
user_content = prompt
|
user_content = prompt
|
||||||
if context_files:
|
if context_files:
|
||||||
file_content, _ = read_files(context_files)
|
file_content, _ = read_files(context_files)
|
||||||
user_content = f"{prompt}\n\n=== CONTEXT FILES ===\n{file_content}\n=== END CONTEXT ==="
|
user_content = (
|
||||||
|
f"{prompt}\n\n=== CONTEXT FILES ===\n{file_content}\n=== END CONTEXT ==="
|
||||||
|
)
|
||||||
|
|
||||||
# Combine system prompt with user content
|
# Combine system prompt with user content
|
||||||
full_prompt = f"{CHAT_PROMPT}\n\n=== USER REQUEST ===\n{user_content}\n=== END REQUEST ===\n\nPlease provide a thoughtful, comprehensive response:"
|
full_prompt = f"{CHAT_PROMPT}\n\n=== USER REQUEST ===\n{user_content}\n=== END REQUEST ===\n\nPlease provide a thoughtful, comprehensive response:"
|
||||||
@@ -175,12 +179,23 @@ async def handle_chat(arguments: Dict[str, Any]) -> List[TextContent]:
|
|||||||
|
|
||||||
# Create a temporary tool instance to use create_model method
|
# Create a temporary tool instance to use create_model method
|
||||||
class TempTool(BaseTool):
|
class TempTool(BaseTool):
|
||||||
def get_name(self): return "chat"
|
def get_name(self):
|
||||||
def get_description(self): return ""
|
return "chat"
|
||||||
def get_input_schema(self): return {}
|
|
||||||
def get_system_prompt(self): return ""
|
def get_description(self):
|
||||||
def get_request_model(self): return None
|
return ""
|
||||||
async def prepare_prompt(self, request): return ""
|
|
||||||
|
def get_input_schema(self):
|
||||||
|
return {}
|
||||||
|
|
||||||
|
def get_system_prompt(self):
|
||||||
|
return ""
|
||||||
|
|
||||||
|
def get_request_model(self):
|
||||||
|
return None
|
||||||
|
|
||||||
|
async def prepare_prompt(self, request):
|
||||||
|
return ""
|
||||||
|
|
||||||
temp_tool = TempTool()
|
temp_tool = TempTool()
|
||||||
model = temp_tool.create_model(DEFAULT_MODEL, temperature, thinking_mode)
|
model = temp_tool.create_model(DEFAULT_MODEL, temperature, thinking_mode)
|
||||||
@@ -218,13 +233,21 @@ async def handle_list_models() -> List[TextContent]:
|
|||||||
models.append(
|
models.append(
|
||||||
{
|
{
|
||||||
"name": getattr(model_info, "id", "Unknown"),
|
"name": getattr(model_info, "id", "Unknown"),
|
||||||
"display_name": getattr(model_info, "display_name", getattr(model_info, "id", "Unknown")),
|
"display_name": getattr(
|
||||||
"description": getattr(model_info, "description", "No description"),
|
model_info,
|
||||||
"is_default": getattr(model_info, "id", "").endswith(DEFAULT_MODEL),
|
"display_name",
|
||||||
|
getattr(model_info, "id", "Unknown"),
|
||||||
|
),
|
||||||
|
"description": getattr(
|
||||||
|
model_info, "description", "No description"
|
||||||
|
),
|
||||||
|
"is_default": getattr(model_info, "id", "").endswith(
|
||||||
|
DEFAULT_MODEL
|
||||||
|
),
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
except Exception as e:
|
except Exception:
|
||||||
# Fallback: return some known models
|
# Fallback: return some known models
|
||||||
models = [
|
models = [
|
||||||
{
|
{
|
||||||
@@ -244,9 +267,7 @@ async def handle_list_models() -> List[TextContent]:
|
|||||||
return [TextContent(type="text", text=json.dumps(models, indent=2))]
|
return [TextContent(type="text", text=json.dumps(models, indent=2))]
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
return [
|
return [TextContent(type="text", text=f"Error listing models: {str(e)}")]
|
||||||
TextContent(type="text", text=f"Error listing models: {str(e)}")
|
|
||||||
]
|
|
||||||
|
|
||||||
|
|
||||||
async def handle_get_version() -> List[TextContent]:
|
async def handle_get_version() -> List[TextContent]:
|
||||||
@@ -259,8 +280,7 @@ async def handle_get_version() -> List[TextContent]:
|
|||||||
"max_context_tokens": f"{MAX_CONTEXT_TOKENS:,}",
|
"max_context_tokens": f"{MAX_CONTEXT_TOKENS:,}",
|
||||||
"python_version": f"{sys.version_info.major}.{sys.version_info.minor}.{sys.version_info.micro}",
|
"python_version": f"{sys.version_info.major}.{sys.version_info.minor}.{sys.version_info.micro}",
|
||||||
"server_started": datetime.now().isoformat(),
|
"server_started": datetime.now().isoformat(),
|
||||||
"available_tools": list(TOOLS.keys())
|
"available_tools": list(TOOLS.keys()) + ["chat", "list_models", "get_version"],
|
||||||
+ ["chat", "list_models", "get_version"],
|
|
||||||
}
|
}
|
||||||
|
|
||||||
text = f"""Gemini MCP Server v{__version__}
|
text = f"""Gemini MCP Server v{__version__}
|
||||||
|
|||||||
@@ -2,10 +2,17 @@
|
|||||||
Tests for configuration
|
Tests for configuration
|
||||||
"""
|
"""
|
||||||
|
|
||||||
from config import (DEFAULT_MODEL, MAX_CONTEXT_TOKENS,
|
from config import (
|
||||||
TEMPERATURE_ANALYTICAL, TEMPERATURE_BALANCED,
|
DEFAULT_MODEL,
|
||||||
TEMPERATURE_CREATIVE, TOOL_TRIGGERS, __author__,
|
MAX_CONTEXT_TOKENS,
|
||||||
__updated__, __version__)
|
TEMPERATURE_ANALYTICAL,
|
||||||
|
TEMPERATURE_BALANCED,
|
||||||
|
TEMPERATURE_CREATIVE,
|
||||||
|
TOOL_TRIGGERS,
|
||||||
|
__author__,
|
||||||
|
__updated__,
|
||||||
|
__version__,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
class TestConfig:
|
class TestConfig:
|
||||||
@@ -15,7 +22,7 @@ class TestConfig:
|
|||||||
"""Test version information exists and has correct format"""
|
"""Test version information exists and has correct format"""
|
||||||
# Check version format (e.g., "2.4.1")
|
# Check version format (e.g., "2.4.1")
|
||||||
assert isinstance(__version__, str)
|
assert isinstance(__version__, str)
|
||||||
assert len(__version__.split('.')) == 3 # Major.Minor.Patch
|
assert len(__version__.split(".")) == 3 # Major.Minor.Patch
|
||||||
|
|
||||||
# Check author
|
# Check author
|
||||||
assert __author__ == "Fahad Gilani"
|
assert __author__ == "Fahad Gilani"
|
||||||
|
|||||||
@@ -20,9 +20,6 @@ sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
|||||||
|
|
||||||
from tools.analyze import AnalyzeTool
|
from tools.analyze import AnalyzeTool
|
||||||
from tools.think_deeper import ThinkDeeperTool
|
from tools.think_deeper import ThinkDeeperTool
|
||||||
from tools.review_code import ReviewCodeTool
|
|
||||||
from tools.debug_issue import DebugIssueTool
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
async def run_manual_live_tests():
|
async def run_manual_live_tests():
|
||||||
@@ -36,23 +33,24 @@ async def run_manual_live_tests():
|
|||||||
|
|
||||||
try:
|
try:
|
||||||
# Test google-genai import
|
# Test google-genai import
|
||||||
from google import genai
|
|
||||||
from google.genai import types
|
|
||||||
print("✅ google-genai library import successful")
|
print("✅ google-genai library import successful")
|
||||||
|
|
||||||
# Test tool integration
|
# Test tool integration
|
||||||
with tempfile.NamedTemporaryFile(mode='w', suffix='.py', delete=False) as f:
|
with tempfile.NamedTemporaryFile(mode="w", suffix=".py", delete=False) as f:
|
||||||
f.write("def hello(): return 'world'")
|
f.write("def hello(): return 'world'")
|
||||||
temp_path = f.name
|
temp_path = f.name
|
||||||
|
|
||||||
try:
|
try:
|
||||||
# Test AnalyzeTool
|
# Test AnalyzeTool
|
||||||
tool = AnalyzeTool()
|
tool = AnalyzeTool()
|
||||||
result = await tool.execute({
|
result = await tool.execute(
|
||||||
|
{
|
||||||
"files": [temp_path],
|
"files": [temp_path],
|
||||||
"question": "What does this code do?",
|
"question": "What does this code do?",
|
||||||
"thinking_mode": "low"
|
"thinking_mode": "low",
|
||||||
})
|
}
|
||||||
|
)
|
||||||
|
|
||||||
if result and result[0].text:
|
if result and result[0].text:
|
||||||
print("✅ AnalyzeTool live test successful")
|
print("✅ AnalyzeTool live test successful")
|
||||||
@@ -62,10 +60,12 @@ async def run_manual_live_tests():
|
|||||||
|
|
||||||
# Test ThinkDeeperTool
|
# Test ThinkDeeperTool
|
||||||
think_tool = ThinkDeeperTool()
|
think_tool = ThinkDeeperTool()
|
||||||
result = await think_tool.execute({
|
result = await think_tool.execute(
|
||||||
|
{
|
||||||
"current_analysis": "Testing live integration",
|
"current_analysis": "Testing live integration",
|
||||||
"thinking_mode": "minimal" # Fast test
|
"thinking_mode": "minimal", # Fast test
|
||||||
})
|
}
|
||||||
|
)
|
||||||
|
|
||||||
if result and result[0].text and "Extended Analysis" in result[0].text:
|
if result and result[0].text and "Extended Analysis" in result[0].text:
|
||||||
print("✅ ThinkDeeperTool live test successful")
|
print("✅ ThinkDeeperTool live test successful")
|
||||||
|
|||||||
@@ -33,9 +33,7 @@ class TestServerTools:
|
|||||||
|
|
||||||
# Check descriptions are verbose
|
# Check descriptions are verbose
|
||||||
for tool in tools:
|
for tool in tools:
|
||||||
assert (
|
assert len(tool.description) > 50 # All should have detailed descriptions
|
||||||
len(tool.description) > 50
|
|
||||||
) # All should have detailed descriptions
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
@pytest.mark.asyncio
|
||||||
async def test_handle_call_tool_unknown(self):
|
async def test_handle_call_tool_unknown(self):
|
||||||
@@ -49,6 +47,7 @@ class TestServerTools:
|
|||||||
"""Test chat functionality"""
|
"""Test chat functionality"""
|
||||||
# Set test environment
|
# Set test environment
|
||||||
import os
|
import os
|
||||||
|
|
||||||
os.environ["PYTEST_CURRENT_TEST"] = "test"
|
os.environ["PYTEST_CURRENT_TEST"] = "test"
|
||||||
|
|
||||||
# Create a mock for the model
|
# Create a mock for the model
|
||||||
|
|||||||
@@ -2,7 +2,6 @@
|
|||||||
Tests for thinking_mode functionality across all tools
|
Tests for thinking_mode functionality across all tools
|
||||||
"""
|
"""
|
||||||
|
|
||||||
import os
|
|
||||||
from unittest.mock import Mock, patch
|
from unittest.mock import Mock, patch
|
||||||
|
|
||||||
import pytest
|
import pytest
|
||||||
@@ -33,8 +32,9 @@ class TestThinkingModes:
|
|||||||
]
|
]
|
||||||
|
|
||||||
for tool, expected_default in tools:
|
for tool, expected_default in tools:
|
||||||
assert tool.get_default_thinking_mode() == expected_default, \
|
assert (
|
||||||
f"{tool.__class__.__name__} should default to {expected_default}"
|
tool.get_default_thinking_mode() == expected_default
|
||||||
|
), f"{tool.__class__.__name__} should default to {expected_default}"
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
@pytest.mark.asyncio
|
||||||
@patch("tools.base.BaseTool.create_model")
|
@patch("tools.base.BaseTool.create_model")
|
||||||
@@ -42,16 +42,20 @@ class TestThinkingModes:
|
|||||||
"""Test minimal thinking mode"""
|
"""Test minimal thinking mode"""
|
||||||
mock_model = Mock()
|
mock_model = Mock()
|
||||||
mock_model.generate_content.return_value = Mock(
|
mock_model.generate_content.return_value = Mock(
|
||||||
candidates=[Mock(content=Mock(parts=[Mock(text="Minimal thinking response")]))]
|
candidates=[
|
||||||
|
Mock(content=Mock(parts=[Mock(text="Minimal thinking response")]))
|
||||||
|
]
|
||||||
)
|
)
|
||||||
mock_create_model.return_value = mock_model
|
mock_create_model.return_value = mock_model
|
||||||
|
|
||||||
tool = AnalyzeTool()
|
tool = AnalyzeTool()
|
||||||
result = await tool.execute({
|
result = await tool.execute(
|
||||||
|
{
|
||||||
"files": ["test.py"],
|
"files": ["test.py"],
|
||||||
"question": "What is this?",
|
"question": "What is this?",
|
||||||
"thinking_mode": "minimal"
|
"thinking_mode": "minimal",
|
||||||
})
|
}
|
||||||
|
)
|
||||||
|
|
||||||
# Verify create_model was called with correct thinking_mode
|
# Verify create_model was called with correct thinking_mode
|
||||||
mock_create_model.assert_called_once()
|
mock_create_model.assert_called_once()
|
||||||
@@ -71,10 +75,7 @@ class TestThinkingModes:
|
|||||||
mock_create_model.return_value = mock_model
|
mock_create_model.return_value = mock_model
|
||||||
|
|
||||||
tool = ReviewCodeTool()
|
tool = ReviewCodeTool()
|
||||||
result = await tool.execute({
|
result = await tool.execute({"files": ["test.py"], "thinking_mode": "low"})
|
||||||
"files": ["test.py"],
|
|
||||||
"thinking_mode": "low"
|
|
||||||
})
|
|
||||||
|
|
||||||
# Verify create_model was called with correct thinking_mode
|
# Verify create_model was called with correct thinking_mode
|
||||||
mock_create_model.assert_called_once()
|
mock_create_model.assert_called_once()
|
||||||
@@ -89,15 +90,19 @@ class TestThinkingModes:
|
|||||||
"""Test medium thinking mode (default for most tools)"""
|
"""Test medium thinking mode (default for most tools)"""
|
||||||
mock_model = Mock()
|
mock_model = Mock()
|
||||||
mock_model.generate_content.return_value = Mock(
|
mock_model.generate_content.return_value = Mock(
|
||||||
candidates=[Mock(content=Mock(parts=[Mock(text="Medium thinking response")]))]
|
candidates=[
|
||||||
|
Mock(content=Mock(parts=[Mock(text="Medium thinking response")]))
|
||||||
|
]
|
||||||
)
|
)
|
||||||
mock_create_model.return_value = mock_model
|
mock_create_model.return_value = mock_model
|
||||||
|
|
||||||
tool = DebugIssueTool()
|
tool = DebugIssueTool()
|
||||||
result = await tool.execute({
|
result = await tool.execute(
|
||||||
|
{
|
||||||
"error_description": "Test error",
|
"error_description": "Test error",
|
||||||
# Not specifying thinking_mode, should use default (medium)
|
# Not specifying thinking_mode, should use default (medium)
|
||||||
})
|
}
|
||||||
|
)
|
||||||
|
|
||||||
# Verify create_model was called with default thinking_mode
|
# Verify create_model was called with default thinking_mode
|
||||||
mock_create_model.assert_called_once()
|
mock_create_model.assert_called_once()
|
||||||
@@ -117,11 +122,13 @@ class TestThinkingModes:
|
|||||||
mock_create_model.return_value = mock_model
|
mock_create_model.return_value = mock_model
|
||||||
|
|
||||||
tool = AnalyzeTool()
|
tool = AnalyzeTool()
|
||||||
result = await tool.execute({
|
await tool.execute(
|
||||||
|
{
|
||||||
"files": ["complex.py"],
|
"files": ["complex.py"],
|
||||||
"question": "Analyze architecture",
|
"question": "Analyze architecture",
|
||||||
"thinking_mode": "high"
|
"thinking_mode": "high",
|
||||||
})
|
}
|
||||||
|
)
|
||||||
|
|
||||||
# Verify create_model was called with correct thinking_mode
|
# Verify create_model was called with correct thinking_mode
|
||||||
mock_create_model.assert_called_once()
|
mock_create_model.assert_called_once()
|
||||||
@@ -139,10 +146,12 @@ class TestThinkingModes:
|
|||||||
mock_create_model.return_value = mock_model
|
mock_create_model.return_value = mock_model
|
||||||
|
|
||||||
tool = ThinkDeeperTool()
|
tool = ThinkDeeperTool()
|
||||||
result = await tool.execute({
|
result = await tool.execute(
|
||||||
|
{
|
||||||
"current_analysis": "Initial analysis",
|
"current_analysis": "Initial analysis",
|
||||||
# Not specifying thinking_mode, should use default (max)
|
# Not specifying thinking_mode, should use default (max)
|
||||||
})
|
}
|
||||||
|
)
|
||||||
|
|
||||||
# Verify create_model was called with default thinking_mode
|
# Verify create_model was called with default thinking_mode
|
||||||
mock_create_model.assert_called_once()
|
mock_create_model.assert_called_once()
|
||||||
@@ -157,14 +166,23 @@ class TestThinkingModes:
|
|||||||
|
|
||||||
# Create a simple test tool
|
# Create a simple test tool
|
||||||
class TestTool(BaseTool):
|
class TestTool(BaseTool):
|
||||||
def get_name(self): return "test"
|
def get_name(self):
|
||||||
def get_description(self): return "test"
|
return "test"
|
||||||
def get_input_schema(self): return {}
|
|
||||||
def get_system_prompt(self): return "test"
|
|
||||||
def get_request_model(self): return None
|
|
||||||
async def prepare_prompt(self, request): return "test"
|
|
||||||
|
|
||||||
tool = TestTool()
|
def get_description(self):
|
||||||
|
return "test"
|
||||||
|
|
||||||
|
def get_input_schema(self):
|
||||||
|
return {}
|
||||||
|
|
||||||
|
def get_system_prompt(self):
|
||||||
|
return "test"
|
||||||
|
|
||||||
|
def get_request_model(self):
|
||||||
|
return None
|
||||||
|
|
||||||
|
async def prepare_prompt(self, request):
|
||||||
|
return "test"
|
||||||
|
|
||||||
# Expected mappings
|
# Expected mappings
|
||||||
expected_budgets = {
|
expected_budgets = {
|
||||||
@@ -172,7 +190,7 @@ class TestThinkingModes:
|
|||||||
"low": 2048,
|
"low": 2048,
|
||||||
"medium": 8192,
|
"medium": 8192,
|
||||||
"high": 16384,
|
"high": 16384,
|
||||||
"max": 32768
|
"max": 32768,
|
||||||
}
|
}
|
||||||
|
|
||||||
# Check each mode in create_model
|
# Check each mode in create_model
|
||||||
|
|||||||
@@ -120,7 +120,9 @@ class TestDebugIssueTool:
|
|||||||
# Mock model
|
# Mock model
|
||||||
mock_model = Mock()
|
mock_model = Mock()
|
||||||
mock_model.generate_content.return_value = Mock(
|
mock_model.generate_content.return_value = Mock(
|
||||||
candidates=[Mock(content=Mock(parts=[Mock(text="Root cause: race condition")]))]
|
candidates=[
|
||||||
|
Mock(content=Mock(parts=[Mock(text="Root cause: race condition")]))
|
||||||
|
]
|
||||||
)
|
)
|
||||||
mock_create_model.return_value = mock_model
|
mock_create_model.return_value = mock_model
|
||||||
|
|
||||||
@@ -157,9 +159,7 @@ class TestAnalyzeTool:
|
|||||||
|
|
||||||
@pytest.mark.asyncio
|
@pytest.mark.asyncio
|
||||||
@patch("tools.base.BaseTool.create_model")
|
@patch("tools.base.BaseTool.create_model")
|
||||||
async def test_execute_with_analysis_type(
|
async def test_execute_with_analysis_type(self, mock_model, tool, tmp_path):
|
||||||
self, mock_model, tool, tmp_path
|
|
||||||
):
|
|
||||||
"""Test execution with specific analysis type"""
|
"""Test execution with specific analysis type"""
|
||||||
# Create test file
|
# Create test file
|
||||||
test_file = tmp_path / "module.py"
|
test_file = tmp_path / "module.py"
|
||||||
@@ -168,9 +168,7 @@ class TestAnalyzeTool:
|
|||||||
# Mock response
|
# Mock response
|
||||||
mock_response = Mock()
|
mock_response = Mock()
|
||||||
mock_response.candidates = [Mock()]
|
mock_response.candidates = [Mock()]
|
||||||
mock_response.candidates[0].content.parts = [
|
mock_response.candidates[0].content.parts = [Mock(text="Architecture analysis")]
|
||||||
Mock(text="Architecture analysis")
|
|
||||||
]
|
|
||||||
|
|
||||||
mock_instance = Mock()
|
mock_instance = Mock()
|
||||||
mock_instance.generate_content.return_value = mock_response
|
mock_instance.generate_content.return_value = mock_response
|
||||||
|
|||||||
@@ -2,8 +2,7 @@
|
|||||||
Tests for utility functions
|
Tests for utility functions
|
||||||
"""
|
"""
|
||||||
|
|
||||||
from utils import (check_token_limit, estimate_tokens, read_file_content,
|
from utils import check_token_limit, estimate_tokens, read_file_content, read_files
|
||||||
read_files)
|
|
||||||
|
|
||||||
|
|
||||||
class TestFileUtils:
|
class TestFileUtils:
|
||||||
@@ -12,9 +11,7 @@ class TestFileUtils:
|
|||||||
def test_read_file_content_success(self, tmp_path):
|
def test_read_file_content_success(self, tmp_path):
|
||||||
"""Test successful file reading"""
|
"""Test successful file reading"""
|
||||||
test_file = tmp_path / "test.py"
|
test_file = tmp_path / "test.py"
|
||||||
test_file.write_text(
|
test_file.write_text("def hello():\n return 'world'", encoding="utf-8")
|
||||||
"def hello():\n return 'world'", encoding="utf-8"
|
|
||||||
)
|
|
||||||
|
|
||||||
content, tokens = read_file_content(str(test_file))
|
content, tokens = read_file_content(str(test_file))
|
||||||
assert "--- BEGIN FILE:" in content
|
assert "--- BEGIN FILE:" in content
|
||||||
|
|||||||
@@ -11,6 +11,7 @@ from google.genai import types
|
|||||||
from mcp.types import TextContent
|
from mcp.types import TextContent
|
||||||
from pydantic import BaseModel, Field
|
from pydantic import BaseModel, Field
|
||||||
|
|
||||||
|
|
||||||
class ToolRequest(BaseModel):
|
class ToolRequest(BaseModel):
|
||||||
"""Base request model for all tools"""
|
"""Base request model for all tools"""
|
||||||
|
|
||||||
@@ -21,7 +22,8 @@ class ToolRequest(BaseModel):
|
|||||||
None, description="Temperature for response (tool-specific defaults)"
|
None, description="Temperature for response (tool-specific defaults)"
|
||||||
)
|
)
|
||||||
thinking_mode: Optional[Literal["minimal", "low", "medium", "high", "max"]] = Field(
|
thinking_mode: Optional[Literal["minimal", "low", "medium", "high", "max"]] = Field(
|
||||||
None, description="Thinking depth: minimal (128), low (2048), medium (8192), high (16384), max (32768)"
|
None,
|
||||||
|
description="Thinking depth: minimal (128), low (2048), medium (8192), high (16384), max (32768)",
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@@ -132,7 +134,7 @@ class BaseTool(ABC):
|
|||||||
"low": 2048,
|
"low": 2048,
|
||||||
"medium": 8192,
|
"medium": 8192,
|
||||||
"high": 16384,
|
"high": 16384,
|
||||||
"max": 32768
|
"max": 32768,
|
||||||
}
|
}
|
||||||
|
|
||||||
thinking_budget = thinking_budgets.get(thinking_mode, 8192)
|
thinking_budget = thinking_budgets.get(thinking_mode, 8192)
|
||||||
@@ -150,7 +152,9 @@ class BaseTool(ABC):
|
|||||||
|
|
||||||
# Create a wrapper to match the expected interface
|
# Create a wrapper to match the expected interface
|
||||||
class ModelWrapper:
|
class ModelWrapper:
|
||||||
def __init__(self, client, model_name, temperature, thinking_budget):
|
def __init__(
|
||||||
|
self, client, model_name, temperature, thinking_budget
|
||||||
|
):
|
||||||
self.client = client
|
self.client = client
|
||||||
self.model_name = model_name
|
self.model_name = model_name
|
||||||
self.temperature = temperature
|
self.temperature = temperature
|
||||||
@@ -163,25 +167,44 @@ class BaseTool(ABC):
|
|||||||
config=types.GenerateContentConfig(
|
config=types.GenerateContentConfig(
|
||||||
temperature=self.temperature,
|
temperature=self.temperature,
|
||||||
candidate_count=1,
|
candidate_count=1,
|
||||||
thinking_config=types.ThinkingConfig(thinking_budget=self.thinking_budget)
|
thinking_config=types.ThinkingConfig(
|
||||||
|
thinking_budget=self.thinking_budget
|
||||||
|
),
|
||||||
),
|
),
|
||||||
)
|
)
|
||||||
|
|
||||||
# Convert to match expected format
|
# Convert to match expected format
|
||||||
class ResponseWrapper:
|
class ResponseWrapper:
|
||||||
def __init__(self, text):
|
def __init__(self, text):
|
||||||
self.text = text
|
self.text = text
|
||||||
self.candidates = [type('obj', (object,), {
|
self.candidates = [
|
||||||
'content': type('obj', (object,), {
|
type(
|
||||||
'parts': [type('obj', (object,), {'text': text})]
|
"obj",
|
||||||
})(),
|
(object,),
|
||||||
'finish_reason': 'STOP'
|
{
|
||||||
})]
|
"content": type(
|
||||||
|
"obj",
|
||||||
|
(object,),
|
||||||
|
{
|
||||||
|
"parts": [
|
||||||
|
type(
|
||||||
|
"obj",
|
||||||
|
(object,),
|
||||||
|
{"text": text},
|
||||||
|
)
|
||||||
|
]
|
||||||
|
},
|
||||||
|
)(),
|
||||||
|
"finish_reason": "STOP",
|
||||||
|
},
|
||||||
|
)
|
||||||
|
]
|
||||||
|
|
||||||
return ResponseWrapper(response.text)
|
return ResponseWrapper(response.text)
|
||||||
|
|
||||||
return ModelWrapper(client, model_name, temperature, thinking_budget)
|
return ModelWrapper(client, model_name, temperature, thinking_budget)
|
||||||
|
|
||||||
except Exception as e:
|
except Exception:
|
||||||
# Fall back to regular genai model if new API fails
|
# Fall back to regular genai model if new API fails
|
||||||
pass
|
pass
|
||||||
|
|
||||||
@@ -214,12 +237,24 @@ class BaseTool(ABC):
|
|||||||
class ResponseWrapper:
|
class ResponseWrapper:
|
||||||
def __init__(self, text):
|
def __init__(self, text):
|
||||||
self.text = text
|
self.text = text
|
||||||
self.candidates = [type('obj', (object,), {
|
self.candidates = [
|
||||||
'content': type('obj', (object,), {
|
type(
|
||||||
'parts': [type('obj', (object,), {'text': text})]
|
"obj",
|
||||||
})(),
|
(object,),
|
||||||
'finish_reason': 'STOP'
|
{
|
||||||
})]
|
"content": type(
|
||||||
|
"obj",
|
||||||
|
(object,),
|
||||||
|
{
|
||||||
|
"parts": [
|
||||||
|
type("obj", (object,), {"text": text})
|
||||||
|
]
|
||||||
|
},
|
||||||
|
)(),
|
||||||
|
"finish_reason": "STOP",
|
||||||
|
},
|
||||||
|
)
|
||||||
|
]
|
||||||
|
|
||||||
return ResponseWrapper(response.text)
|
return ResponseWrapper(response.text)
|
||||||
|
|
||||||
|
|||||||
@@ -146,8 +146,6 @@ Focus on finding the root cause and providing actionable solutions."""
|
|||||||
|
|
||||||
return full_prompt
|
return full_prompt
|
||||||
|
|
||||||
def format_response(
|
def format_response(self, response: str, request: DebugIssueRequest) -> str:
|
||||||
self, response: str, request: DebugIssueRequest
|
|
||||||
) -> str:
|
|
||||||
"""Format the debugging response"""
|
"""Format the debugging response"""
|
||||||
return f"Debug Analysis\n{'=' * 50}\n\n{response}"
|
return f"Debug Analysis\n{'=' * 50}\n\n{response}"
|
||||||
|
|||||||
@@ -130,14 +130,10 @@ class ReviewCodeTool(BaseTool):
|
|||||||
)
|
)
|
||||||
|
|
||||||
if request.focus_on:
|
if request.focus_on:
|
||||||
review_focus.append(
|
review_focus.append(f"Pay special attention to: {request.focus_on}")
|
||||||
f"Pay special attention to: {request.focus_on}"
|
|
||||||
)
|
|
||||||
|
|
||||||
if request.standards:
|
if request.standards:
|
||||||
review_focus.append(
|
review_focus.append(f"Enforce these standards: {request.standards}")
|
||||||
f"Enforce these standards: {request.standards}"
|
|
||||||
)
|
|
||||||
|
|
||||||
if request.severity_filter != "all":
|
if request.severity_filter != "all":
|
||||||
review_focus.append(
|
review_focus.append(
|
||||||
@@ -159,9 +155,7 @@ Please provide a comprehensive code review following the format specified in the
|
|||||||
|
|
||||||
return full_prompt
|
return full_prompt
|
||||||
|
|
||||||
def format_response(
|
def format_response(self, response: str, request: ReviewCodeRequest) -> str:
|
||||||
self, response: str, request: ReviewCodeRequest
|
|
||||||
) -> str:
|
|
||||||
"""Format the review response"""
|
"""Format the review response"""
|
||||||
header = f"Code Review ({request.review_type.upper()})"
|
header = f"Code Review ({request.review_type.upper()})"
|
||||||
if request.focus_on:
|
if request.focus_on:
|
||||||
|
|||||||
@@ -130,7 +130,9 @@ class ThinkDeeperTool(BaseTool):
|
|||||||
focus_instruction = ""
|
focus_instruction = ""
|
||||||
if request.focus_areas:
|
if request.focus_areas:
|
||||||
areas = ", ".join(request.focus_areas)
|
areas = ", ".join(request.focus_areas)
|
||||||
focus_instruction = f"\n\nFOCUS AREAS: Please pay special attention to {areas} aspects."
|
focus_instruction = (
|
||||||
|
f"\n\nFOCUS AREAS: Please pay special attention to {areas} aspects."
|
||||||
|
)
|
||||||
|
|
||||||
# Combine system prompt with context
|
# Combine system prompt with context
|
||||||
full_prompt = f"""{self.get_system_prompt()}{focus_instruction}
|
full_prompt = f"""{self.get_system_prompt()}{focus_instruction}
|
||||||
@@ -146,8 +148,6 @@ Please provide deep analysis that extends Claude's thinking with:
|
|||||||
|
|
||||||
return full_prompt
|
return full_prompt
|
||||||
|
|
||||||
def format_response(
|
def format_response(self, response: str, request: ThinkDeeperRequest) -> str:
|
||||||
self, response: str, request: ThinkDeeperRequest
|
|
||||||
) -> str:
|
|
||||||
"""Format the response with clear attribution"""
|
"""Format the response with clear attribution"""
|
||||||
return f"Extended Analysis by Gemini:\n\n{response}"
|
return f"Extended Analysis by Gemini:\n\n{response}"
|
||||||
|
|||||||
@@ -11,11 +11,52 @@ from .token_utils import estimate_tokens, MAX_CONTEXT_TOKENS
|
|||||||
|
|
||||||
# Common code file extensions
|
# Common code file extensions
|
||||||
CODE_EXTENSIONS = {
|
CODE_EXTENSIONS = {
|
||||||
'.py', '.js', '.ts', '.jsx', '.tsx', '.java', '.cpp', '.c', '.h', '.hpp',
|
".py",
|
||||||
'.cs', '.go', '.rs', '.rb', '.php', '.swift', '.kt', '.scala', '.r', '.m',
|
".js",
|
||||||
'.mm', '.sql', '.sh', '.bash', '.zsh', '.fish', '.ps1', '.bat', '.cmd',
|
".ts",
|
||||||
'.yml', '.yaml', '.json', '.xml', '.toml', '.ini', '.cfg', '.conf',
|
".jsx",
|
||||||
'.txt', '.md', '.rst', '.tex', '.html', '.css', '.scss', '.sass', '.less'
|
".tsx",
|
||||||
|
".java",
|
||||||
|
".cpp",
|
||||||
|
".c",
|
||||||
|
".h",
|
||||||
|
".hpp",
|
||||||
|
".cs",
|
||||||
|
".go",
|
||||||
|
".rs",
|
||||||
|
".rb",
|
||||||
|
".php",
|
||||||
|
".swift",
|
||||||
|
".kt",
|
||||||
|
".scala",
|
||||||
|
".r",
|
||||||
|
".m",
|
||||||
|
".mm",
|
||||||
|
".sql",
|
||||||
|
".sh",
|
||||||
|
".bash",
|
||||||
|
".zsh",
|
||||||
|
".fish",
|
||||||
|
".ps1",
|
||||||
|
".bat",
|
||||||
|
".cmd",
|
||||||
|
".yml",
|
||||||
|
".yaml",
|
||||||
|
".json",
|
||||||
|
".xml",
|
||||||
|
".toml",
|
||||||
|
".ini",
|
||||||
|
".cfg",
|
||||||
|
".conf",
|
||||||
|
".txt",
|
||||||
|
".md",
|
||||||
|
".rst",
|
||||||
|
".tex",
|
||||||
|
".html",
|
||||||
|
".css",
|
||||||
|
".scss",
|
||||||
|
".sass",
|
||||||
|
".less",
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@@ -52,11 +93,13 @@ def expand_paths(paths: List[str], extensions: Optional[Set[str]] = None) -> Lis
|
|||||||
# Walk directory recursively
|
# Walk directory recursively
|
||||||
for root, dirs, files in os.walk(path_obj):
|
for root, dirs, files in os.walk(path_obj):
|
||||||
# Skip hidden directories and __pycache__
|
# Skip hidden directories and __pycache__
|
||||||
dirs[:] = [d for d in dirs if not d.startswith('.') and d != '__pycache__']
|
dirs[:] = [
|
||||||
|
d for d in dirs if not d.startswith(".") and d != "__pycache__"
|
||||||
|
]
|
||||||
|
|
||||||
for file in files:
|
for file in files:
|
||||||
# Skip hidden files
|
# Skip hidden files
|
||||||
if file.startswith('.'):
|
if file.startswith("."):
|
||||||
continue
|
continue
|
||||||
|
|
||||||
file_path = Path(root) / file
|
file_path = Path(root) / file
|
||||||
@@ -119,7 +162,7 @@ def read_files(
|
|||||||
file_paths: List[str],
|
file_paths: List[str],
|
||||||
code: Optional[str] = None,
|
code: Optional[str] = None,
|
||||||
max_tokens: Optional[int] = None,
|
max_tokens: Optional[int] = None,
|
||||||
reserve_tokens: int = 50_000
|
reserve_tokens: int = 50_000,
|
||||||
) -> Tuple[str, str]:
|
) -> Tuple[str, str]:
|
||||||
"""
|
"""
|
||||||
Read multiple files and optional direct code with smart token management.
|
Read multiple files and optional direct code with smart token management.
|
||||||
@@ -147,7 +190,9 @@ def read_files(
|
|||||||
|
|
||||||
# First, handle direct code if provided
|
# First, handle direct code if provided
|
||||||
if code:
|
if code:
|
||||||
formatted_code = f"\n--- BEGIN DIRECT CODE ---\n{code}\n--- END DIRECT CODE ---\n"
|
formatted_code = (
|
||||||
|
f"\n--- BEGIN DIRECT CODE ---\n{code}\n--- END DIRECT CODE ---\n"
|
||||||
|
)
|
||||||
code_tokens = estimate_tokens(formatted_code)
|
code_tokens = estimate_tokens(formatted_code)
|
||||||
|
|
||||||
if code_tokens <= available_tokens:
|
if code_tokens <= available_tokens:
|
||||||
@@ -171,7 +216,9 @@ def read_files(
|
|||||||
|
|
||||||
if not all_files and file_paths:
|
if not all_files and file_paths:
|
||||||
# No files found but paths were provided
|
# No files found but paths were provided
|
||||||
content_parts.append(f"\n--- NO FILES FOUND ---\nProvided paths: {', '.join(file_paths)}\n--- END ---\n")
|
content_parts.append(
|
||||||
|
f"\n--- NO FILES FOUND ---\nProvided paths: {', '.join(file_paths)}\n--- END ---\n"
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
# Read files up to token limit
|
# Read files up to token limit
|
||||||
for file_path in all_files:
|
for file_path in all_files:
|
||||||
@@ -201,7 +248,7 @@ def read_files(
|
|||||||
|
|
||||||
# Add skipped files note if any were skipped
|
# Add skipped files note if any were skipped
|
||||||
if files_skipped:
|
if files_skipped:
|
||||||
skip_note = f"\n\n--- SKIPPED FILES (TOKEN LIMIT) ---\n"
|
skip_note = "\n\n--- SKIPPED FILES (TOKEN LIMIT) ---\n"
|
||||||
skip_note += f"Total skipped: {len(files_skipped)}\n"
|
skip_note += f"Total skipped: {len(files_skipped)}\n"
|
||||||
# Show first 10 skipped files
|
# Show first 10 skipped files
|
||||||
for i, file_path in enumerate(files_skipped[:10]):
|
for i, file_path in enumerate(files_skipped[:10]):
|
||||||
|
|||||||
Reference in New Issue
Block a user