style: fix linting and formatting issues
- Run black formatter on all Python files - Fix ruff linting issues: - Remove unused imports - Remove unused variables - Fix f-string without placeholders - All 37 tests still pass - Code quality improved for CI/CD compliance 🧹 Generated with [Claude Code](https://claude.ai/code) Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
@@ -9,7 +9,9 @@ __author__ = "Fahad Gilani"
|
||||
|
||||
# Model configuration
|
||||
DEFAULT_MODEL = "gemini-2.5-pro-preview-06-05"
|
||||
THINKING_MODEL = "gemini-2.0-flash-thinking-exp" # Enhanced reasoning model for think_deeper
|
||||
THINKING_MODEL = (
|
||||
"gemini-2.0-flash-thinking-exp" # Enhanced reasoning model for think_deeper
|
||||
)
|
||||
MAX_CONTEXT_TOKENS = 1_000_000 # 1M tokens for Gemini Pro
|
||||
|
||||
# Temperature defaults for different tool types
|
||||
|
||||
@@ -2,8 +2,13 @@
|
||||
System prompts for Gemini tools
|
||||
"""
|
||||
|
||||
from .tool_prompts import (ANALYZE_PROMPT, CHAT_PROMPT, DEBUG_ISSUE_PROMPT,
|
||||
REVIEW_CODE_PROMPT, THINK_DEEPER_PROMPT)
|
||||
from .tool_prompts import (
|
||||
ANALYZE_PROMPT,
|
||||
CHAT_PROMPT,
|
||||
DEBUG_ISSUE_PROMPT,
|
||||
REVIEW_CODE_PROMPT,
|
||||
THINK_DEEPER_PROMPT,
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
"THINK_DEEPER_PROMPT",
|
||||
|
||||
66
server.py
66
server.py
@@ -10,14 +10,18 @@ from datetime import datetime
|
||||
from typing import Any, Dict, List
|
||||
|
||||
from google import genai
|
||||
from google.genai import types
|
||||
from mcp.server import Server
|
||||
from mcp.server.models import InitializationOptions
|
||||
from mcp.server.stdio import stdio_server
|
||||
from mcp.types import TextContent, Tool
|
||||
|
||||
from config import (DEFAULT_MODEL, MAX_CONTEXT_TOKENS, __author__, __updated__,
|
||||
__version__)
|
||||
from config import (
|
||||
DEFAULT_MODEL,
|
||||
MAX_CONTEXT_TOKENS,
|
||||
__author__,
|
||||
__updated__,
|
||||
__version__,
|
||||
)
|
||||
from tools import AnalyzeTool, DebugIssueTool, ReviewCodeTool, ThinkDeeperTool
|
||||
|
||||
# Configure logging
|
||||
@@ -125,9 +129,7 @@ async def handle_list_tools() -> List[Tool]:
|
||||
|
||||
|
||||
@server.call_tool()
|
||||
async def handle_call_tool(
|
||||
name: str, arguments: Dict[str, Any]
|
||||
) -> List[TextContent]:
|
||||
async def handle_call_tool(name: str, arguments: Dict[str, Any]) -> List[TextContent]:
|
||||
"""Handle tool execution requests"""
|
||||
|
||||
# Handle dynamic tools
|
||||
@@ -151,7 +153,7 @@ async def handle_call_tool(
|
||||
|
||||
async def handle_chat(arguments: Dict[str, Any]) -> List[TextContent]:
|
||||
"""Handle general chat requests"""
|
||||
from config import TEMPERATURE_BALANCED, DEFAULT_MODEL, THINKING_MODEL
|
||||
from config import TEMPERATURE_BALANCED, DEFAULT_MODEL
|
||||
from prompts import CHAT_PROMPT
|
||||
from utils import read_files
|
||||
|
||||
@@ -164,7 +166,9 @@ async def handle_chat(arguments: Dict[str, Any]) -> List[TextContent]:
|
||||
user_content = prompt
|
||||
if context_files:
|
||||
file_content, _ = read_files(context_files)
|
||||
user_content = f"{prompt}\n\n=== CONTEXT FILES ===\n{file_content}\n=== END CONTEXT ==="
|
||||
user_content = (
|
||||
f"{prompt}\n\n=== CONTEXT FILES ===\n{file_content}\n=== END CONTEXT ==="
|
||||
)
|
||||
|
||||
# Combine system prompt with user content
|
||||
full_prompt = f"{CHAT_PROMPT}\n\n=== USER REQUEST ===\n{user_content}\n=== END REQUEST ===\n\nPlease provide a thoughtful, comprehensive response:"
|
||||
@@ -175,12 +179,23 @@ async def handle_chat(arguments: Dict[str, Any]) -> List[TextContent]:
|
||||
|
||||
# Create a temporary tool instance to use create_model method
|
||||
class TempTool(BaseTool):
|
||||
def get_name(self): return "chat"
|
||||
def get_description(self): return ""
|
||||
def get_input_schema(self): return {}
|
||||
def get_system_prompt(self): return ""
|
||||
def get_request_model(self): return None
|
||||
async def prepare_prompt(self, request): return ""
|
||||
def get_name(self):
|
||||
return "chat"
|
||||
|
||||
def get_description(self):
|
||||
return ""
|
||||
|
||||
def get_input_schema(self):
|
||||
return {}
|
||||
|
||||
def get_system_prompt(self):
|
||||
return ""
|
||||
|
||||
def get_request_model(self):
|
||||
return None
|
||||
|
||||
async def prepare_prompt(self, request):
|
||||
return ""
|
||||
|
||||
temp_tool = TempTool()
|
||||
model = temp_tool.create_model(DEFAULT_MODEL, temperature, thinking_mode)
|
||||
@@ -218,13 +233,21 @@ async def handle_list_models() -> List[TextContent]:
|
||||
models.append(
|
||||
{
|
||||
"name": getattr(model_info, "id", "Unknown"),
|
||||
"display_name": getattr(model_info, "display_name", getattr(model_info, "id", "Unknown")),
|
||||
"description": getattr(model_info, "description", "No description"),
|
||||
"is_default": getattr(model_info, "id", "").endswith(DEFAULT_MODEL),
|
||||
"display_name": getattr(
|
||||
model_info,
|
||||
"display_name",
|
||||
getattr(model_info, "id", "Unknown"),
|
||||
),
|
||||
"description": getattr(
|
||||
model_info, "description", "No description"
|
||||
),
|
||||
"is_default": getattr(model_info, "id", "").endswith(
|
||||
DEFAULT_MODEL
|
||||
),
|
||||
}
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
except Exception:
|
||||
# Fallback: return some known models
|
||||
models = [
|
||||
{
|
||||
@@ -244,9 +267,7 @@ async def handle_list_models() -> List[TextContent]:
|
||||
return [TextContent(type="text", text=json.dumps(models, indent=2))]
|
||||
|
||||
except Exception as e:
|
||||
return [
|
||||
TextContent(type="text", text=f"Error listing models: {str(e)}")
|
||||
]
|
||||
return [TextContent(type="text", text=f"Error listing models: {str(e)}")]
|
||||
|
||||
|
||||
async def handle_get_version() -> List[TextContent]:
|
||||
@@ -259,8 +280,7 @@ async def handle_get_version() -> List[TextContent]:
|
||||
"max_context_tokens": f"{MAX_CONTEXT_TOKENS:,}",
|
||||
"python_version": f"{sys.version_info.major}.{sys.version_info.minor}.{sys.version_info.micro}",
|
||||
"server_started": datetime.now().isoformat(),
|
||||
"available_tools": list(TOOLS.keys())
|
||||
+ ["chat", "list_models", "get_version"],
|
||||
"available_tools": list(TOOLS.keys()) + ["chat", "list_models", "get_version"],
|
||||
}
|
||||
|
||||
text = f"""Gemini MCP Server v{__version__}
|
||||
|
||||
@@ -2,10 +2,17 @@
|
||||
Tests for configuration
|
||||
"""
|
||||
|
||||
from config import (DEFAULT_MODEL, MAX_CONTEXT_TOKENS,
|
||||
TEMPERATURE_ANALYTICAL, TEMPERATURE_BALANCED,
|
||||
TEMPERATURE_CREATIVE, TOOL_TRIGGERS, __author__,
|
||||
__updated__, __version__)
|
||||
from config import (
|
||||
DEFAULT_MODEL,
|
||||
MAX_CONTEXT_TOKENS,
|
||||
TEMPERATURE_ANALYTICAL,
|
||||
TEMPERATURE_BALANCED,
|
||||
TEMPERATURE_CREATIVE,
|
||||
TOOL_TRIGGERS,
|
||||
__author__,
|
||||
__updated__,
|
||||
__version__,
|
||||
)
|
||||
|
||||
|
||||
class TestConfig:
|
||||
@@ -15,7 +22,7 @@ class TestConfig:
|
||||
"""Test version information exists and has correct format"""
|
||||
# Check version format (e.g., "2.4.1")
|
||||
assert isinstance(__version__, str)
|
||||
assert len(__version__.split('.')) == 3 # Major.Minor.Patch
|
||||
assert len(__version__.split(".")) == 3 # Major.Minor.Patch
|
||||
|
||||
# Check author
|
||||
assert __author__ == "Fahad Gilani"
|
||||
|
||||
@@ -20,9 +20,6 @@ sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||
|
||||
from tools.analyze import AnalyzeTool
|
||||
from tools.think_deeper import ThinkDeeperTool
|
||||
from tools.review_code import ReviewCodeTool
|
||||
from tools.debug_issue import DebugIssueTool
|
||||
|
||||
|
||||
|
||||
async def run_manual_live_tests():
|
||||
@@ -36,23 +33,24 @@ async def run_manual_live_tests():
|
||||
|
||||
try:
|
||||
# Test google-genai import
|
||||
from google import genai
|
||||
from google.genai import types
|
||||
|
||||
print("✅ google-genai library import successful")
|
||||
|
||||
# Test tool integration
|
||||
with tempfile.NamedTemporaryFile(mode='w', suffix='.py', delete=False) as f:
|
||||
with tempfile.NamedTemporaryFile(mode="w", suffix=".py", delete=False) as f:
|
||||
f.write("def hello(): return 'world'")
|
||||
temp_path = f.name
|
||||
|
||||
try:
|
||||
# Test AnalyzeTool
|
||||
tool = AnalyzeTool()
|
||||
result = await tool.execute({
|
||||
"files": [temp_path],
|
||||
"question": "What does this code do?",
|
||||
"thinking_mode": "low"
|
||||
})
|
||||
result = await tool.execute(
|
||||
{
|
||||
"files": [temp_path],
|
||||
"question": "What does this code do?",
|
||||
"thinking_mode": "low",
|
||||
}
|
||||
)
|
||||
|
||||
if result and result[0].text:
|
||||
print("✅ AnalyzeTool live test successful")
|
||||
@@ -62,10 +60,12 @@ async def run_manual_live_tests():
|
||||
|
||||
# Test ThinkDeeperTool
|
||||
think_tool = ThinkDeeperTool()
|
||||
result = await think_tool.execute({
|
||||
"current_analysis": "Testing live integration",
|
||||
"thinking_mode": "minimal" # Fast test
|
||||
})
|
||||
result = await think_tool.execute(
|
||||
{
|
||||
"current_analysis": "Testing live integration",
|
||||
"thinking_mode": "minimal", # Fast test
|
||||
}
|
||||
)
|
||||
|
||||
if result and result[0].text and "Extended Analysis" in result[0].text:
|
||||
print("✅ ThinkDeeperTool live test successful")
|
||||
|
||||
@@ -33,9 +33,7 @@ class TestServerTools:
|
||||
|
||||
# Check descriptions are verbose
|
||||
for tool in tools:
|
||||
assert (
|
||||
len(tool.description) > 50
|
||||
) # All should have detailed descriptions
|
||||
assert len(tool.description) > 50 # All should have detailed descriptions
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_handle_call_tool_unknown(self):
|
||||
@@ -49,6 +47,7 @@ class TestServerTools:
|
||||
"""Test chat functionality"""
|
||||
# Set test environment
|
||||
import os
|
||||
|
||||
os.environ["PYTEST_CURRENT_TEST"] = "test"
|
||||
|
||||
# Create a mock for the model
|
||||
|
||||
@@ -2,7 +2,6 @@
|
||||
Tests for thinking_mode functionality across all tools
|
||||
"""
|
||||
|
||||
import os
|
||||
from unittest.mock import Mock, patch
|
||||
|
||||
import pytest
|
||||
@@ -33,8 +32,9 @@ class TestThinkingModes:
|
||||
]
|
||||
|
||||
for tool, expected_default in tools:
|
||||
assert tool.get_default_thinking_mode() == expected_default, \
|
||||
f"{tool.__class__.__name__} should default to {expected_default}"
|
||||
assert (
|
||||
tool.get_default_thinking_mode() == expected_default
|
||||
), f"{tool.__class__.__name__} should default to {expected_default}"
|
||||
|
||||
@pytest.mark.asyncio
|
||||
@patch("tools.base.BaseTool.create_model")
|
||||
@@ -42,16 +42,20 @@ class TestThinkingModes:
|
||||
"""Test minimal thinking mode"""
|
||||
mock_model = Mock()
|
||||
mock_model.generate_content.return_value = Mock(
|
||||
candidates=[Mock(content=Mock(parts=[Mock(text="Minimal thinking response")]))]
|
||||
candidates=[
|
||||
Mock(content=Mock(parts=[Mock(text="Minimal thinking response")]))
|
||||
]
|
||||
)
|
||||
mock_create_model.return_value = mock_model
|
||||
|
||||
tool = AnalyzeTool()
|
||||
result = await tool.execute({
|
||||
"files": ["test.py"],
|
||||
"question": "What is this?",
|
||||
"thinking_mode": "minimal"
|
||||
})
|
||||
result = await tool.execute(
|
||||
{
|
||||
"files": ["test.py"],
|
||||
"question": "What is this?",
|
||||
"thinking_mode": "minimal",
|
||||
}
|
||||
)
|
||||
|
||||
# Verify create_model was called with correct thinking_mode
|
||||
mock_create_model.assert_called_once()
|
||||
@@ -71,10 +75,7 @@ class TestThinkingModes:
|
||||
mock_create_model.return_value = mock_model
|
||||
|
||||
tool = ReviewCodeTool()
|
||||
result = await tool.execute({
|
||||
"files": ["test.py"],
|
||||
"thinking_mode": "low"
|
||||
})
|
||||
result = await tool.execute({"files": ["test.py"], "thinking_mode": "low"})
|
||||
|
||||
# Verify create_model was called with correct thinking_mode
|
||||
mock_create_model.assert_called_once()
|
||||
@@ -89,15 +90,19 @@ class TestThinkingModes:
|
||||
"""Test medium thinking mode (default for most tools)"""
|
||||
mock_model = Mock()
|
||||
mock_model.generate_content.return_value = Mock(
|
||||
candidates=[Mock(content=Mock(parts=[Mock(text="Medium thinking response")]))]
|
||||
candidates=[
|
||||
Mock(content=Mock(parts=[Mock(text="Medium thinking response")]))
|
||||
]
|
||||
)
|
||||
mock_create_model.return_value = mock_model
|
||||
|
||||
tool = DebugIssueTool()
|
||||
result = await tool.execute({
|
||||
"error_description": "Test error",
|
||||
# Not specifying thinking_mode, should use default (medium)
|
||||
})
|
||||
result = await tool.execute(
|
||||
{
|
||||
"error_description": "Test error",
|
||||
# Not specifying thinking_mode, should use default (medium)
|
||||
}
|
||||
)
|
||||
|
||||
# Verify create_model was called with default thinking_mode
|
||||
mock_create_model.assert_called_once()
|
||||
@@ -117,11 +122,13 @@ class TestThinkingModes:
|
||||
mock_create_model.return_value = mock_model
|
||||
|
||||
tool = AnalyzeTool()
|
||||
result = await tool.execute({
|
||||
"files": ["complex.py"],
|
||||
"question": "Analyze architecture",
|
||||
"thinking_mode": "high"
|
||||
})
|
||||
await tool.execute(
|
||||
{
|
||||
"files": ["complex.py"],
|
||||
"question": "Analyze architecture",
|
||||
"thinking_mode": "high",
|
||||
}
|
||||
)
|
||||
|
||||
# Verify create_model was called with correct thinking_mode
|
||||
mock_create_model.assert_called_once()
|
||||
@@ -139,10 +146,12 @@ class TestThinkingModes:
|
||||
mock_create_model.return_value = mock_model
|
||||
|
||||
tool = ThinkDeeperTool()
|
||||
result = await tool.execute({
|
||||
"current_analysis": "Initial analysis",
|
||||
# Not specifying thinking_mode, should use default (max)
|
||||
})
|
||||
result = await tool.execute(
|
||||
{
|
||||
"current_analysis": "Initial analysis",
|
||||
# Not specifying thinking_mode, should use default (max)
|
||||
}
|
||||
)
|
||||
|
||||
# Verify create_model was called with default thinking_mode
|
||||
mock_create_model.assert_called_once()
|
||||
@@ -157,14 +166,23 @@ class TestThinkingModes:
|
||||
|
||||
# Create a simple test tool
|
||||
class TestTool(BaseTool):
|
||||
def get_name(self): return "test"
|
||||
def get_description(self): return "test"
|
||||
def get_input_schema(self): return {}
|
||||
def get_system_prompt(self): return "test"
|
||||
def get_request_model(self): return None
|
||||
async def prepare_prompt(self, request): return "test"
|
||||
def get_name(self):
|
||||
return "test"
|
||||
|
||||
tool = TestTool()
|
||||
def get_description(self):
|
||||
return "test"
|
||||
|
||||
def get_input_schema(self):
|
||||
return {}
|
||||
|
||||
def get_system_prompt(self):
|
||||
return "test"
|
||||
|
||||
def get_request_model(self):
|
||||
return None
|
||||
|
||||
async def prepare_prompt(self, request):
|
||||
return "test"
|
||||
|
||||
# Expected mappings
|
||||
expected_budgets = {
|
||||
@@ -172,7 +190,7 @@ class TestThinkingModes:
|
||||
"low": 2048,
|
||||
"medium": 8192,
|
||||
"high": 16384,
|
||||
"max": 32768
|
||||
"max": 32768,
|
||||
}
|
||||
|
||||
# Check each mode in create_model
|
||||
|
||||
@@ -120,7 +120,9 @@ class TestDebugIssueTool:
|
||||
# Mock model
|
||||
mock_model = Mock()
|
||||
mock_model.generate_content.return_value = Mock(
|
||||
candidates=[Mock(content=Mock(parts=[Mock(text="Root cause: race condition")]))]
|
||||
candidates=[
|
||||
Mock(content=Mock(parts=[Mock(text="Root cause: race condition")]))
|
||||
]
|
||||
)
|
||||
mock_create_model.return_value = mock_model
|
||||
|
||||
@@ -157,9 +159,7 @@ class TestAnalyzeTool:
|
||||
|
||||
@pytest.mark.asyncio
|
||||
@patch("tools.base.BaseTool.create_model")
|
||||
async def test_execute_with_analysis_type(
|
||||
self, mock_model, tool, tmp_path
|
||||
):
|
||||
async def test_execute_with_analysis_type(self, mock_model, tool, tmp_path):
|
||||
"""Test execution with specific analysis type"""
|
||||
# Create test file
|
||||
test_file = tmp_path / "module.py"
|
||||
@@ -168,9 +168,7 @@ class TestAnalyzeTool:
|
||||
# Mock response
|
||||
mock_response = Mock()
|
||||
mock_response.candidates = [Mock()]
|
||||
mock_response.candidates[0].content.parts = [
|
||||
Mock(text="Architecture analysis")
|
||||
]
|
||||
mock_response.candidates[0].content.parts = [Mock(text="Architecture analysis")]
|
||||
|
||||
mock_instance = Mock()
|
||||
mock_instance.generate_content.return_value = mock_response
|
||||
|
||||
@@ -2,8 +2,7 @@
|
||||
Tests for utility functions
|
||||
"""
|
||||
|
||||
from utils import (check_token_limit, estimate_tokens, read_file_content,
|
||||
read_files)
|
||||
from utils import check_token_limit, estimate_tokens, read_file_content, read_files
|
||||
|
||||
|
||||
class TestFileUtils:
|
||||
@@ -12,9 +11,7 @@ class TestFileUtils:
|
||||
def test_read_file_content_success(self, tmp_path):
|
||||
"""Test successful file reading"""
|
||||
test_file = tmp_path / "test.py"
|
||||
test_file.write_text(
|
||||
"def hello():\n return 'world'", encoding="utf-8"
|
||||
)
|
||||
test_file.write_text("def hello():\n return 'world'", encoding="utf-8")
|
||||
|
||||
content, tokens = read_file_content(str(test_file))
|
||||
assert "--- BEGIN FILE:" in content
|
||||
|
||||
@@ -11,6 +11,7 @@ from google.genai import types
|
||||
from mcp.types import TextContent
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
|
||||
class ToolRequest(BaseModel):
|
||||
"""Base request model for all tools"""
|
||||
|
||||
@@ -21,7 +22,8 @@ class ToolRequest(BaseModel):
|
||||
None, description="Temperature for response (tool-specific defaults)"
|
||||
)
|
||||
thinking_mode: Optional[Literal["minimal", "low", "medium", "high", "max"]] = Field(
|
||||
None, description="Thinking depth: minimal (128), low (2048), medium (8192), high (16384), max (32768)"
|
||||
None,
|
||||
description="Thinking depth: minimal (128), low (2048), medium (8192), high (16384), max (32768)",
|
||||
)
|
||||
|
||||
|
||||
@@ -128,11 +130,11 @@ class BaseTool(ABC):
|
||||
"""Create a configured Gemini model with thinking configuration"""
|
||||
# Map thinking modes to budget values
|
||||
thinking_budgets = {
|
||||
"minimal": 128, # Minimum for 2.5 Pro
|
||||
"minimal": 128, # Minimum for 2.5 Pro
|
||||
"low": 2048,
|
||||
"medium": 8192,
|
||||
"high": 16384,
|
||||
"max": 32768
|
||||
"max": 32768,
|
||||
}
|
||||
|
||||
thinking_budget = thinking_budgets.get(thinking_mode, 8192)
|
||||
@@ -150,7 +152,9 @@ class BaseTool(ABC):
|
||||
|
||||
# Create a wrapper to match the expected interface
|
||||
class ModelWrapper:
|
||||
def __init__(self, client, model_name, temperature, thinking_budget):
|
||||
def __init__(
|
||||
self, client, model_name, temperature, thinking_budget
|
||||
):
|
||||
self.client = client
|
||||
self.model_name = model_name
|
||||
self.temperature = temperature
|
||||
@@ -163,25 +167,44 @@ class BaseTool(ABC):
|
||||
config=types.GenerateContentConfig(
|
||||
temperature=self.temperature,
|
||||
candidate_count=1,
|
||||
thinking_config=types.ThinkingConfig(thinking_budget=self.thinking_budget)
|
||||
thinking_config=types.ThinkingConfig(
|
||||
thinking_budget=self.thinking_budget
|
||||
),
|
||||
),
|
||||
)
|
||||
|
||||
# Convert to match expected format
|
||||
class ResponseWrapper:
|
||||
def __init__(self, text):
|
||||
self.text = text
|
||||
self.candidates = [type('obj', (object,), {
|
||||
'content': type('obj', (object,), {
|
||||
'parts': [type('obj', (object,), {'text': text})]
|
||||
})(),
|
||||
'finish_reason': 'STOP'
|
||||
})]
|
||||
self.candidates = [
|
||||
type(
|
||||
"obj",
|
||||
(object,),
|
||||
{
|
||||
"content": type(
|
||||
"obj",
|
||||
(object,),
|
||||
{
|
||||
"parts": [
|
||||
type(
|
||||
"obj",
|
||||
(object,),
|
||||
{"text": text},
|
||||
)
|
||||
]
|
||||
},
|
||||
)(),
|
||||
"finish_reason": "STOP",
|
||||
},
|
||||
)
|
||||
]
|
||||
|
||||
return ResponseWrapper(response.text)
|
||||
|
||||
return ModelWrapper(client, model_name, temperature, thinking_budget)
|
||||
|
||||
except Exception as e:
|
||||
except Exception:
|
||||
# Fall back to regular genai model if new API fails
|
||||
pass
|
||||
|
||||
@@ -214,12 +237,24 @@ class BaseTool(ABC):
|
||||
class ResponseWrapper:
|
||||
def __init__(self, text):
|
||||
self.text = text
|
||||
self.candidates = [type('obj', (object,), {
|
||||
'content': type('obj', (object,), {
|
||||
'parts': [type('obj', (object,), {'text': text})]
|
||||
})(),
|
||||
'finish_reason': 'STOP'
|
||||
})]
|
||||
self.candidates = [
|
||||
type(
|
||||
"obj",
|
||||
(object,),
|
||||
{
|
||||
"content": type(
|
||||
"obj",
|
||||
(object,),
|
||||
{
|
||||
"parts": [
|
||||
type("obj", (object,), {"text": text})
|
||||
]
|
||||
},
|
||||
)(),
|
||||
"finish_reason": "STOP",
|
||||
},
|
||||
)
|
||||
]
|
||||
|
||||
return ResponseWrapper(response.text)
|
||||
|
||||
|
||||
@@ -146,8 +146,6 @@ Focus on finding the root cause and providing actionable solutions."""
|
||||
|
||||
return full_prompt
|
||||
|
||||
def format_response(
|
||||
self, response: str, request: DebugIssueRequest
|
||||
) -> str:
|
||||
def format_response(self, response: str, request: DebugIssueRequest) -> str:
|
||||
"""Format the debugging response"""
|
||||
return f"Debug Analysis\n{'=' * 50}\n\n{response}"
|
||||
|
||||
@@ -130,14 +130,10 @@ class ReviewCodeTool(BaseTool):
|
||||
)
|
||||
|
||||
if request.focus_on:
|
||||
review_focus.append(
|
||||
f"Pay special attention to: {request.focus_on}"
|
||||
)
|
||||
review_focus.append(f"Pay special attention to: {request.focus_on}")
|
||||
|
||||
if request.standards:
|
||||
review_focus.append(
|
||||
f"Enforce these standards: {request.standards}"
|
||||
)
|
||||
review_focus.append(f"Enforce these standards: {request.standards}")
|
||||
|
||||
if request.severity_filter != "all":
|
||||
review_focus.append(
|
||||
@@ -159,9 +155,7 @@ Please provide a comprehensive code review following the format specified in the
|
||||
|
||||
return full_prompt
|
||||
|
||||
def format_response(
|
||||
self, response: str, request: ReviewCodeRequest
|
||||
) -> str:
|
||||
def format_response(self, response: str, request: ReviewCodeRequest) -> str:
|
||||
"""Format the review response"""
|
||||
header = f"Code Review ({request.review_type.upper()})"
|
||||
if request.focus_on:
|
||||
|
||||
@@ -130,7 +130,9 @@ class ThinkDeeperTool(BaseTool):
|
||||
focus_instruction = ""
|
||||
if request.focus_areas:
|
||||
areas = ", ".join(request.focus_areas)
|
||||
focus_instruction = f"\n\nFOCUS AREAS: Please pay special attention to {areas} aspects."
|
||||
focus_instruction = (
|
||||
f"\n\nFOCUS AREAS: Please pay special attention to {areas} aspects."
|
||||
)
|
||||
|
||||
# Combine system prompt with context
|
||||
full_prompt = f"""{self.get_system_prompt()}{focus_instruction}
|
||||
@@ -146,8 +148,6 @@ Please provide deep analysis that extends Claude's thinking with:
|
||||
|
||||
return full_prompt
|
||||
|
||||
def format_response(
|
||||
self, response: str, request: ThinkDeeperRequest
|
||||
) -> str:
|
||||
def format_response(self, response: str, request: ThinkDeeperRequest) -> str:
|
||||
"""Format the response with clear attribution"""
|
||||
return f"Extended Analysis by Gemini:\n\n{response}"
|
||||
|
||||
@@ -11,11 +11,52 @@ from .token_utils import estimate_tokens, MAX_CONTEXT_TOKENS
|
||||
|
||||
# Common code file extensions
|
||||
CODE_EXTENSIONS = {
|
||||
'.py', '.js', '.ts', '.jsx', '.tsx', '.java', '.cpp', '.c', '.h', '.hpp',
|
||||
'.cs', '.go', '.rs', '.rb', '.php', '.swift', '.kt', '.scala', '.r', '.m',
|
||||
'.mm', '.sql', '.sh', '.bash', '.zsh', '.fish', '.ps1', '.bat', '.cmd',
|
||||
'.yml', '.yaml', '.json', '.xml', '.toml', '.ini', '.cfg', '.conf',
|
||||
'.txt', '.md', '.rst', '.tex', '.html', '.css', '.scss', '.sass', '.less'
|
||||
".py",
|
||||
".js",
|
||||
".ts",
|
||||
".jsx",
|
||||
".tsx",
|
||||
".java",
|
||||
".cpp",
|
||||
".c",
|
||||
".h",
|
||||
".hpp",
|
||||
".cs",
|
||||
".go",
|
||||
".rs",
|
||||
".rb",
|
||||
".php",
|
||||
".swift",
|
||||
".kt",
|
||||
".scala",
|
||||
".r",
|
||||
".m",
|
||||
".mm",
|
||||
".sql",
|
||||
".sh",
|
||||
".bash",
|
||||
".zsh",
|
||||
".fish",
|
||||
".ps1",
|
||||
".bat",
|
||||
".cmd",
|
||||
".yml",
|
||||
".yaml",
|
||||
".json",
|
||||
".xml",
|
||||
".toml",
|
||||
".ini",
|
||||
".cfg",
|
||||
".conf",
|
||||
".txt",
|
||||
".md",
|
||||
".rst",
|
||||
".tex",
|
||||
".html",
|
||||
".css",
|
||||
".scss",
|
||||
".sass",
|
||||
".less",
|
||||
}
|
||||
|
||||
|
||||
@@ -52,11 +93,13 @@ def expand_paths(paths: List[str], extensions: Optional[Set[str]] = None) -> Lis
|
||||
# Walk directory recursively
|
||||
for root, dirs, files in os.walk(path_obj):
|
||||
# Skip hidden directories and __pycache__
|
||||
dirs[:] = [d for d in dirs if not d.startswith('.') and d != '__pycache__']
|
||||
dirs[:] = [
|
||||
d for d in dirs if not d.startswith(".") and d != "__pycache__"
|
||||
]
|
||||
|
||||
for file in files:
|
||||
# Skip hidden files
|
||||
if file.startswith('.'):
|
||||
if file.startswith("."):
|
||||
continue
|
||||
|
||||
file_path = Path(root) / file
|
||||
@@ -119,7 +162,7 @@ def read_files(
|
||||
file_paths: List[str],
|
||||
code: Optional[str] = None,
|
||||
max_tokens: Optional[int] = None,
|
||||
reserve_tokens: int = 50_000
|
||||
reserve_tokens: int = 50_000,
|
||||
) -> Tuple[str, str]:
|
||||
"""
|
||||
Read multiple files and optional direct code with smart token management.
|
||||
@@ -147,7 +190,9 @@ def read_files(
|
||||
|
||||
# First, handle direct code if provided
|
||||
if code:
|
||||
formatted_code = f"\n--- BEGIN DIRECT CODE ---\n{code}\n--- END DIRECT CODE ---\n"
|
||||
formatted_code = (
|
||||
f"\n--- BEGIN DIRECT CODE ---\n{code}\n--- END DIRECT CODE ---\n"
|
||||
)
|
||||
code_tokens = estimate_tokens(formatted_code)
|
||||
|
||||
if code_tokens <= available_tokens:
|
||||
@@ -171,7 +216,9 @@ def read_files(
|
||||
|
||||
if not all_files and file_paths:
|
||||
# No files found but paths were provided
|
||||
content_parts.append(f"\n--- NO FILES FOUND ---\nProvided paths: {', '.join(file_paths)}\n--- END ---\n")
|
||||
content_parts.append(
|
||||
f"\n--- NO FILES FOUND ---\nProvided paths: {', '.join(file_paths)}\n--- END ---\n"
|
||||
)
|
||||
else:
|
||||
# Read files up to token limit
|
||||
for file_path in all_files:
|
||||
@@ -201,7 +248,7 @@ def read_files(
|
||||
|
||||
# Add skipped files note if any were skipped
|
||||
if files_skipped:
|
||||
skip_note = f"\n\n--- SKIPPED FILES (TOKEN LIMIT) ---\n"
|
||||
skip_note = "\n\n--- SKIPPED FILES (TOKEN LIMIT) ---\n"
|
||||
skip_note += f"Total skipped: {len(files_skipped)}\n"
|
||||
# Show first 10 skipped files
|
||||
for i, file_path in enumerate(files_skipped[:10]):
|
||||
|
||||
Reference in New Issue
Block a user