feat: complete redesign to v2.4.0 - Claude's ultimate development partner
Major redesign of Gemini MCP Server with modular architecture: - Removed all emoji characters from tool outputs for clean terminal display - Kept review category emojis (🔴🟠🟡🟢) per user preference - Added 4 specialized tools: - think_deeper: Extended reasoning and problem-solving (temp 0.7) - review_code: Professional code review with severity levels (temp 0.2) - debug_issue: Root cause analysis and debugging (temp 0.2) - analyze: General-purpose file analysis (temp 0.2) - Modular architecture with base tool class and Pydantic models - Verbose tool descriptions with natural language triggers - Updated README with comprehensive examples and real-world use cases - All 25 tests passing, type checking clean, critical linting clean BREAKING CHANGE: Removed analyze_code tool in favor of specialized tools 🤖 Generated with [Claude Code](https://claude.ai/code) Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
49
tests/test_config.py
Normal file
49
tests/test_config.py
Normal file
@@ -0,0 +1,49 @@
|
||||
"""
|
||||
Tests for configuration
|
||||
"""
|
||||
|
||||
from config import (
|
||||
__version__,
|
||||
__updated__,
|
||||
__author__,
|
||||
DEFAULT_MODEL,
|
||||
MAX_CONTEXT_TOKENS,
|
||||
TEMPERATURE_ANALYTICAL,
|
||||
TEMPERATURE_BALANCED,
|
||||
TEMPERATURE_CREATIVE,
|
||||
TOOL_TRIGGERS,
|
||||
)
|
||||
|
||||
|
||||
class TestConfig:
|
||||
"""Test configuration values"""
|
||||
|
||||
def test_version_info(self):
|
||||
"""Test version information"""
|
||||
assert __version__ == "2.4.0"
|
||||
assert __author__ == "Fahad Gilani"
|
||||
assert __updated__ == "2025-06-08"
|
||||
|
||||
def test_model_config(self):
|
||||
"""Test model configuration"""
|
||||
assert DEFAULT_MODEL == "gemini-2.5-pro-preview-06-05"
|
||||
assert MAX_CONTEXT_TOKENS == 1_000_000
|
||||
|
||||
def test_temperature_defaults(self):
|
||||
"""Test temperature constants"""
|
||||
assert TEMPERATURE_ANALYTICAL == 0.2
|
||||
assert TEMPERATURE_BALANCED == 0.5
|
||||
assert TEMPERATURE_CREATIVE == 0.7
|
||||
|
||||
def test_tool_triggers(self):
|
||||
"""Test tool trigger phrases"""
|
||||
assert "think_deeper" in TOOL_TRIGGERS
|
||||
assert "review_code" in TOOL_TRIGGERS
|
||||
assert "debug_issue" in TOOL_TRIGGERS
|
||||
assert "analyze" in TOOL_TRIGGERS
|
||||
|
||||
# Check some specific triggers
|
||||
assert "ultrathink" in TOOL_TRIGGERS["think_deeper"]
|
||||
assert "extended thinking" in TOOL_TRIGGERS["think_deeper"]
|
||||
assert "find bugs" in TOOL_TRIGGERS["review_code"]
|
||||
assert "root cause" in TOOL_TRIGGERS["debug_issue"]
|
||||
@@ -1,352 +0,0 @@
|
||||
"""
|
||||
Unit tests for Gemini MCP Server
|
||||
"""
|
||||
|
||||
import pytest
|
||||
import json
|
||||
from unittest.mock import Mock, patch, AsyncMock
|
||||
from pathlib import Path
|
||||
import sys
|
||||
import os
|
||||
|
||||
# Add parent directory to path for imports in a cross-platform way
|
||||
parent_dir = Path(__file__).resolve().parent.parent
|
||||
if str(parent_dir) not in sys.path:
|
||||
sys.path.insert(0, str(parent_dir))
|
||||
|
||||
from gemini_server import (
|
||||
GeminiChatRequest,
|
||||
CodeAnalysisRequest,
|
||||
read_file_content,
|
||||
prepare_code_context,
|
||||
handle_list_tools,
|
||||
handle_call_tool,
|
||||
DEVELOPER_SYSTEM_PROMPT,
|
||||
DEFAULT_MODEL,
|
||||
)
|
||||
|
||||
|
||||
class TestModels:
|
||||
"""Test request models"""
|
||||
|
||||
def test_gemini_chat_request_defaults(self):
|
||||
"""Test GeminiChatRequest with default values"""
|
||||
request = GeminiChatRequest(prompt="Test prompt")
|
||||
assert request.prompt == "Test prompt"
|
||||
assert request.system_prompt is None
|
||||
assert request.max_tokens == 8192
|
||||
assert request.temperature == 0.5
|
||||
assert request.model == DEFAULT_MODEL
|
||||
|
||||
def test_gemini_chat_request_custom(self):
|
||||
"""Test GeminiChatRequest with custom values"""
|
||||
request = GeminiChatRequest(
|
||||
prompt="Test prompt",
|
||||
system_prompt="Custom system",
|
||||
max_tokens=4096,
|
||||
temperature=0.8,
|
||||
model="custom-model",
|
||||
)
|
||||
assert request.system_prompt == "Custom system"
|
||||
assert request.max_tokens == 4096
|
||||
assert request.temperature == 0.8
|
||||
assert request.model == "custom-model"
|
||||
|
||||
def test_code_analysis_request_defaults(self):
|
||||
"""Test CodeAnalysisRequest with default values"""
|
||||
request = CodeAnalysisRequest(question="Analyze this")
|
||||
assert request.question == "Analyze this"
|
||||
assert request.files is None
|
||||
assert request.code is None
|
||||
assert request.max_tokens == 8192
|
||||
assert request.temperature == 0.2
|
||||
assert request.model == DEFAULT_MODEL
|
||||
|
||||
|
||||
class TestFileOperations:
|
||||
"""Test file reading and context preparation"""
|
||||
|
||||
def test_read_file_content_success(self, tmp_path):
|
||||
"""Test successful file reading"""
|
||||
test_file = tmp_path / "test.py"
|
||||
test_file.write_text("def hello():\n return 'world'", encoding="utf-8")
|
||||
|
||||
content = read_file_content(str(test_file))
|
||||
assert "--- BEGIN FILE:" in content
|
||||
assert "--- END FILE:" in content
|
||||
assert "def hello():" in content
|
||||
assert "return 'world'" in content
|
||||
|
||||
def test_read_file_content_not_found(self):
|
||||
"""Test reading non-existent file"""
|
||||
# Use a path that's guaranteed not to exist on any platform
|
||||
nonexistent_path = os.path.join(
|
||||
os.path.sep, "nonexistent_dir_12345", "nonexistent_file.py"
|
||||
)
|
||||
content = read_file_content(nonexistent_path)
|
||||
assert "--- FILE NOT FOUND:" in content
|
||||
assert "Error: File does not exist" in content
|
||||
|
||||
def test_read_file_content_directory(self, tmp_path):
|
||||
"""Test reading a directory instead of file"""
|
||||
content = read_file_content(str(tmp_path))
|
||||
assert "--- NOT A FILE:" in content
|
||||
assert "Error: Path is not a file" in content
|
||||
|
||||
def test_prepare_code_context_with_files(self, tmp_path):
|
||||
"""Test preparing context from files"""
|
||||
file1 = tmp_path / "file1.py"
|
||||
file1.write_text("print('file1')", encoding="utf-8")
|
||||
file2 = tmp_path / "file2.py"
|
||||
file2.write_text("print('file2')", encoding="utf-8")
|
||||
|
||||
context, summary = prepare_code_context([str(file1), str(file2)], None)
|
||||
assert "--- BEGIN FILE:" in context
|
||||
assert "file1.py" in context
|
||||
assert "file2.py" in context
|
||||
assert "print('file1')" in context
|
||||
assert "print('file2')" in context
|
||||
assert "--- END FILE:" in context
|
||||
assert "Analyzing 2 file(s)" in summary
|
||||
assert "bytes)" in summary
|
||||
|
||||
def test_prepare_code_context_with_code(self):
|
||||
"""Test preparing context from direct code"""
|
||||
code = "def test():\n pass"
|
||||
context, summary = prepare_code_context(None, code)
|
||||
assert "--- BEGIN DIRECT CODE ---" in context
|
||||
assert "--- END DIRECT CODE ---" in context
|
||||
assert code in context
|
||||
assert "Direct code provided" in summary
|
||||
|
||||
def test_prepare_code_context_mixed(self, tmp_path):
|
||||
"""Test preparing context from both files and code"""
|
||||
test_file = tmp_path / "test.py"
|
||||
test_file.write_text("# From file", encoding="utf-8")
|
||||
code = "# Direct code"
|
||||
|
||||
context, summary = prepare_code_context([str(test_file)], code)
|
||||
assert "# From file" in context
|
||||
assert "# Direct code" in context
|
||||
assert "Analyzing 1 file(s)" in summary
|
||||
assert "Direct code provided" in summary
|
||||
|
||||
|
||||
class TestToolHandlers:
|
||||
"""Test MCP tool handlers"""
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_handle_list_tools(self):
|
||||
"""Test listing available tools"""
|
||||
tools = await handle_list_tools()
|
||||
assert len(tools) == 6
|
||||
|
||||
tool_names = [tool.name for tool in tools]
|
||||
assert "chat" in tool_names
|
||||
assert "analyze_code" in tool_names
|
||||
assert "list_models" in tool_names
|
||||
assert "get_version" in tool_names
|
||||
assert "analyze_file" in tool_names
|
||||
assert "extended_think" in tool_names
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_handle_call_tool_unknown(self):
|
||||
"""Test calling unknown tool"""
|
||||
result = await handle_call_tool("unknown_tool", {})
|
||||
assert len(result) == 1
|
||||
assert "Unknown tool" in result[0].text
|
||||
|
||||
@pytest.mark.asyncio
|
||||
@patch("google.generativeai.GenerativeModel")
|
||||
async def test_handle_call_tool_chat_success(self, mock_model):
|
||||
"""Test successful chat tool call"""
|
||||
# Mock the response
|
||||
mock_response = Mock()
|
||||
mock_response.candidates = [Mock()]
|
||||
mock_response.candidates[0].content.parts = [Mock(text="Test response")]
|
||||
|
||||
mock_instance = Mock()
|
||||
mock_instance.generate_content.return_value = mock_response
|
||||
mock_model.return_value = mock_instance
|
||||
|
||||
result = await handle_call_tool(
|
||||
"chat", {"prompt": "Test prompt", "temperature": 0.5}
|
||||
)
|
||||
|
||||
assert len(result) == 1
|
||||
assert result[0].text == "Test response"
|
||||
|
||||
# Verify model was called with correct parameters
|
||||
mock_model.assert_called_once()
|
||||
call_args = mock_model.call_args[1]
|
||||
assert call_args["model_name"] == DEFAULT_MODEL
|
||||
assert call_args["generation_config"]["temperature"] == 0.5
|
||||
|
||||
@pytest.mark.asyncio
|
||||
@patch("google.generativeai.GenerativeModel")
|
||||
async def test_handle_call_tool_chat_with_developer_prompt(self, mock_model):
|
||||
"""Test chat tool uses developer prompt when no system prompt provided"""
|
||||
mock_response = Mock()
|
||||
mock_response.candidates = [Mock()]
|
||||
mock_response.candidates[0].content.parts = [Mock(text="Response")]
|
||||
|
||||
mock_instance = Mock()
|
||||
mock_instance.generate_content.return_value = mock_response
|
||||
mock_model.return_value = mock_instance
|
||||
|
||||
await handle_call_tool("chat", {"prompt": "Test"})
|
||||
|
||||
# Check that developer prompt was included
|
||||
call_args = mock_instance.generate_content.call_args[0][0]
|
||||
assert DEVELOPER_SYSTEM_PROMPT in call_args
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_handle_call_tool_analyze_code_no_input(self):
|
||||
"""Test analyze_code with no files or code"""
|
||||
result = await handle_call_tool("analyze_code", {"question": "Analyze what?"})
|
||||
assert len(result) == 1
|
||||
assert "Must provide either 'files' or 'code'" in result[0].text
|
||||
|
||||
@pytest.mark.asyncio
|
||||
@patch("google.generativeai.GenerativeModel")
|
||||
async def test_handle_call_tool_analyze_code_success(self, mock_model, tmp_path):
|
||||
"""Test successful code analysis"""
|
||||
# Create test file
|
||||
test_file = tmp_path / "test.py"
|
||||
test_file.write_text("def hello(): pass", encoding="utf-8")
|
||||
|
||||
# Mock response
|
||||
mock_response = Mock()
|
||||
mock_response.candidates = [Mock()]
|
||||
mock_response.candidates[0].content.parts = [Mock(text="Analysis result")]
|
||||
|
||||
mock_instance = Mock()
|
||||
mock_instance.generate_content.return_value = mock_response
|
||||
mock_model.return_value = mock_instance
|
||||
|
||||
result = await handle_call_tool(
|
||||
"analyze_code", {"files": [str(test_file)], "question": "Analyze this"}
|
||||
)
|
||||
|
||||
assert len(result) == 1
|
||||
# Check that the response contains both summary and Gemini's response
|
||||
response_text = result[0].text
|
||||
assert "Analyzing 1 file(s)" in response_text
|
||||
assert "Gemini's Analysis:" in response_text
|
||||
assert "Analysis result" in response_text
|
||||
|
||||
@pytest.mark.asyncio
|
||||
@patch("google.generativeai.list_models")
|
||||
async def test_handle_call_tool_list_models(self, mock_list_models):
|
||||
"""Test listing models"""
|
||||
# Mock model data
|
||||
mock_model = Mock()
|
||||
mock_model.name = "test-model"
|
||||
mock_model.display_name = "Test Model"
|
||||
mock_model.description = "A test model"
|
||||
mock_model.supported_generation_methods = ["generateContent"]
|
||||
|
||||
mock_list_models.return_value = [mock_model]
|
||||
|
||||
result = await handle_call_tool("list_models", {})
|
||||
assert len(result) == 1
|
||||
|
||||
models = json.loads(result[0].text)
|
||||
assert len(models) == 1
|
||||
assert models[0]["name"] == "test-model"
|
||||
assert models[0]["is_default"] == False
|
||||
|
||||
@pytest.mark.asyncio
|
||||
@patch("google.generativeai.GenerativeModel")
|
||||
async def test_handle_call_tool_analyze_file_success(self, mock_model, tmp_path):
|
||||
"""Test successful file analysis with analyze_file tool"""
|
||||
# Create test file
|
||||
test_file = tmp_path / "test.py"
|
||||
test_file.write_text("def hello(): pass", encoding="utf-8")
|
||||
|
||||
# Mock response
|
||||
mock_response = Mock()
|
||||
mock_response.candidates = [Mock()]
|
||||
mock_response.candidates[0].content.parts = [Mock(text="File analysis result")]
|
||||
|
||||
mock_instance = Mock()
|
||||
mock_instance.generate_content.return_value = mock_response
|
||||
mock_model.return_value = mock_instance
|
||||
|
||||
result = await handle_call_tool(
|
||||
"analyze_file", {"files": [str(test_file)], "question": "Analyze this file"}
|
||||
)
|
||||
|
||||
assert len(result) == 1
|
||||
response_text = result[0].text
|
||||
assert "Analyzing 1 file(s)" in response_text
|
||||
assert "Gemini's Analysis:" in response_text
|
||||
assert "File analysis result" in response_text
|
||||
|
||||
@pytest.mark.asyncio
|
||||
@patch("google.generativeai.GenerativeModel")
|
||||
async def test_handle_call_tool_extended_think_success(self, mock_model):
|
||||
"""Test successful extended thinking"""
|
||||
# Mock response
|
||||
mock_response = Mock()
|
||||
mock_response.candidates = [Mock()]
|
||||
mock_response.candidates[0].content.parts = [
|
||||
Mock(text="Extended thinking result")
|
||||
]
|
||||
|
||||
mock_instance = Mock()
|
||||
mock_instance.generate_content.return_value = mock_response
|
||||
mock_model.return_value = mock_instance
|
||||
|
||||
result = await handle_call_tool(
|
||||
"extended_think",
|
||||
{
|
||||
"thought_process": "Claude's analysis of the problem...",
|
||||
"context": "Building a distributed system",
|
||||
"focus": "performance",
|
||||
},
|
||||
)
|
||||
|
||||
assert len(result) == 1
|
||||
response_text = result[0].text
|
||||
assert "Extended Analysis by Gemini:" in response_text
|
||||
assert "Extended thinking result" in response_text
|
||||
|
||||
|
||||
class TestErrorHandling:
|
||||
"""Test error handling scenarios"""
|
||||
|
||||
@pytest.mark.asyncio
|
||||
@patch("google.generativeai.GenerativeModel")
|
||||
async def test_handle_call_tool_chat_api_error(self, mock_model):
|
||||
"""Test handling API errors in chat"""
|
||||
mock_instance = Mock()
|
||||
mock_instance.generate_content.side_effect = Exception("API Error")
|
||||
mock_model.return_value = mock_instance
|
||||
|
||||
result = await handle_call_tool("chat", {"prompt": "Test"})
|
||||
assert len(result) == 1
|
||||
assert "Error calling Gemini API" in result[0].text
|
||||
assert "API Error" in result[0].text
|
||||
|
||||
@pytest.mark.asyncio
|
||||
@patch("google.generativeai.GenerativeModel")
|
||||
async def test_handle_call_tool_chat_blocked_response(self, mock_model):
|
||||
"""Test handling blocked responses"""
|
||||
mock_response = Mock()
|
||||
mock_response.candidates = [Mock()]
|
||||
mock_response.candidates[0].content.parts = []
|
||||
mock_response.candidates[0].finish_reason = 2
|
||||
|
||||
mock_instance = Mock()
|
||||
mock_instance.generate_content.return_value = mock_response
|
||||
mock_model.return_value = mock_instance
|
||||
|
||||
result = await handle_call_tool("chat", {"prompt": "Test"})
|
||||
assert len(result) == 1
|
||||
assert "Response blocked or incomplete" in result[0].text
|
||||
assert "Finish reason: 2" in result[0].text
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
pytest.main([__file__, "-v"])
|
||||
@@ -1,48 +0,0 @@
|
||||
"""
|
||||
Test that imports work correctly when package is installed
|
||||
This helps verify CI setup is correct
|
||||
"""
|
||||
|
||||
import pytest
|
||||
|
||||
|
||||
def test_direct_import():
|
||||
"""Test that gemini_server can be imported directly"""
|
||||
try:
|
||||
import gemini_server
|
||||
|
||||
assert hasattr(gemini_server, "GeminiChatRequest")
|
||||
assert hasattr(gemini_server, "CodeAnalysisRequest")
|
||||
assert hasattr(gemini_server, "handle_list_tools")
|
||||
assert hasattr(gemini_server, "handle_call_tool")
|
||||
except ImportError as e:
|
||||
pytest.fail(f"Failed to import gemini_server: {e}")
|
||||
|
||||
|
||||
def test_from_import():
|
||||
"""Test that specific items can be imported from gemini_server"""
|
||||
try:
|
||||
from gemini_server import (
|
||||
GeminiChatRequest,
|
||||
CodeAnalysisRequest,
|
||||
DEFAULT_MODEL,
|
||||
DEVELOPER_SYSTEM_PROMPT,
|
||||
)
|
||||
|
||||
assert GeminiChatRequest is not None
|
||||
assert CodeAnalysisRequest is not None
|
||||
assert isinstance(DEFAULT_MODEL, str)
|
||||
assert isinstance(DEVELOPER_SYSTEM_PROMPT, str)
|
||||
except ImportError as e:
|
||||
pytest.fail(f"Failed to import from gemini_server: {e}")
|
||||
|
||||
|
||||
def test_google_generativeai_import():
|
||||
"""Test that google.generativeai can be imported"""
|
||||
try:
|
||||
import google.generativeai as genai
|
||||
|
||||
assert hasattr(genai, "GenerativeModel")
|
||||
assert hasattr(genai, "configure")
|
||||
except ImportError as e:
|
||||
pytest.fail(f"Failed to import google.generativeai: {e}")
|
||||
96
tests/test_server.py
Normal file
96
tests/test_server.py
Normal file
@@ -0,0 +1,96 @@
|
||||
"""
|
||||
Tests for the main server functionality
|
||||
"""
|
||||
|
||||
import pytest
|
||||
import json
|
||||
from unittest.mock import Mock, patch
|
||||
|
||||
from server import handle_list_tools, handle_call_tool
|
||||
|
||||
|
||||
class TestServerTools:
|
||||
"""Test server tool handling"""
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_handle_list_tools(self):
|
||||
"""Test listing all available tools"""
|
||||
tools = await handle_list_tools()
|
||||
tool_names = [tool.name for tool in tools]
|
||||
|
||||
# Check all core tools are present
|
||||
assert "think_deeper" in tool_names
|
||||
assert "review_code" in tool_names
|
||||
assert "debug_issue" in tool_names
|
||||
assert "analyze" in tool_names
|
||||
assert "chat" in tool_names
|
||||
assert "list_models" in tool_names
|
||||
assert "get_version" in tool_names
|
||||
|
||||
# Should have exactly 7 tools
|
||||
assert len(tools) == 7
|
||||
|
||||
# Check descriptions are verbose
|
||||
for tool in tools:
|
||||
assert (
|
||||
len(tool.description) > 50
|
||||
) # All should have detailed descriptions
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_handle_call_tool_unknown(self):
|
||||
"""Test calling an unknown tool"""
|
||||
result = await handle_call_tool("unknown_tool", {})
|
||||
assert len(result) == 1
|
||||
assert "Unknown tool: unknown_tool" in result[0].text
|
||||
|
||||
@pytest.mark.asyncio
|
||||
@patch("google.generativeai.GenerativeModel")
|
||||
async def test_handle_chat(self, mock_model):
|
||||
"""Test chat functionality"""
|
||||
# Mock response
|
||||
mock_response = Mock()
|
||||
mock_response.candidates = [Mock()]
|
||||
mock_response.candidates[0].content.parts = [
|
||||
Mock(text="Chat response")
|
||||
]
|
||||
|
||||
mock_instance = Mock()
|
||||
mock_instance.generate_content.return_value = mock_response
|
||||
mock_model.return_value = mock_instance
|
||||
|
||||
result = await handle_call_tool("chat", {"prompt": "Hello Gemini"})
|
||||
|
||||
assert len(result) == 1
|
||||
assert result[0].text == "Chat response"
|
||||
|
||||
@pytest.mark.asyncio
|
||||
@patch("google.generativeai.list_models")
|
||||
async def test_handle_list_models(self, mock_list_models):
|
||||
"""Test listing models"""
|
||||
# Mock model data
|
||||
mock_model = Mock()
|
||||
mock_model.name = "models/gemini-2.5-pro-preview-06-05"
|
||||
mock_model.display_name = "Gemini 2.5 Pro"
|
||||
mock_model.description = "Latest Gemini model"
|
||||
mock_model.supported_generation_methods = ["generateContent"]
|
||||
|
||||
mock_list_models.return_value = [mock_model]
|
||||
|
||||
result = await handle_call_tool("list_models", {})
|
||||
assert len(result) == 1
|
||||
|
||||
models = json.loads(result[0].text)
|
||||
assert len(models) == 1
|
||||
assert models[0]["name"] == "models/gemini-2.5-pro-preview-06-05"
|
||||
assert models[0]["is_default"] is True
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_handle_get_version(self):
|
||||
"""Test getting version info"""
|
||||
result = await handle_call_tool("get_version", {})
|
||||
assert len(result) == 1
|
||||
|
||||
response = result[0].text
|
||||
assert "Gemini MCP Server v2.4.0" in response
|
||||
assert "Available Tools:" in response
|
||||
assert "think_deeper" in response
|
||||
202
tests/test_tools.py
Normal file
202
tests/test_tools.py
Normal file
@@ -0,0 +1,202 @@
|
||||
"""
|
||||
Tests for individual tool implementations
|
||||
"""
|
||||
|
||||
import pytest
|
||||
from unittest.mock import Mock, patch
|
||||
|
||||
from tools import ThinkDeeperTool, ReviewCodeTool, DebugIssueTool, AnalyzeTool
|
||||
|
||||
|
||||
class TestThinkDeeperTool:
|
||||
"""Test the think_deeper tool"""
|
||||
|
||||
@pytest.fixture
|
||||
def tool(self):
|
||||
return ThinkDeeperTool()
|
||||
|
||||
def test_tool_metadata(self, tool):
|
||||
"""Test tool metadata"""
|
||||
assert tool.get_name() == "think_deeper"
|
||||
assert "EXTENDED THINKING" in tool.get_description()
|
||||
assert tool.get_default_temperature() == 0.7
|
||||
|
||||
schema = tool.get_input_schema()
|
||||
assert "current_analysis" in schema["properties"]
|
||||
assert schema["required"] == ["current_analysis"]
|
||||
|
||||
@pytest.mark.asyncio
|
||||
@patch("google.generativeai.GenerativeModel")
|
||||
async def test_execute_success(self, mock_model, tool):
|
||||
"""Test successful execution"""
|
||||
# Mock response
|
||||
mock_response = Mock()
|
||||
mock_response.candidates = [Mock()]
|
||||
mock_response.candidates[0].content.parts = [
|
||||
Mock(text="Extended analysis")
|
||||
]
|
||||
|
||||
mock_instance = Mock()
|
||||
mock_instance.generate_content.return_value = mock_response
|
||||
mock_model.return_value = mock_instance
|
||||
|
||||
result = await tool.execute(
|
||||
{
|
||||
"current_analysis": "Initial analysis",
|
||||
"problem_context": "Building a cache",
|
||||
"focus_areas": ["performance", "scalability"],
|
||||
}
|
||||
)
|
||||
|
||||
assert len(result) == 1
|
||||
assert "Extended Analysis by Gemini:" in result[0].text
|
||||
assert "Extended analysis" in result[0].text
|
||||
|
||||
|
||||
class TestReviewCodeTool:
|
||||
"""Test the review_code tool"""
|
||||
|
||||
@pytest.fixture
|
||||
def tool(self):
|
||||
return ReviewCodeTool()
|
||||
|
||||
def test_tool_metadata(self, tool):
|
||||
"""Test tool metadata"""
|
||||
assert tool.get_name() == "review_code"
|
||||
assert "PROFESSIONAL CODE REVIEW" in tool.get_description()
|
||||
assert tool.get_default_temperature() == 0.2
|
||||
|
||||
schema = tool.get_input_schema()
|
||||
assert "files" in schema["properties"]
|
||||
assert schema["required"] == ["files"]
|
||||
|
||||
@pytest.mark.asyncio
|
||||
@patch("google.generativeai.GenerativeModel")
|
||||
async def test_execute_with_review_type(self, mock_model, tool, tmp_path):
|
||||
"""Test execution with specific review type"""
|
||||
# Create test file
|
||||
test_file = tmp_path / "test.py"
|
||||
test_file.write_text("def insecure(): pass", encoding="utf-8")
|
||||
|
||||
# Mock response
|
||||
mock_response = Mock()
|
||||
mock_response.candidates = [Mock()]
|
||||
mock_response.candidates[0].content.parts = [
|
||||
Mock(text="Security issues found")
|
||||
]
|
||||
|
||||
mock_instance = Mock()
|
||||
mock_instance.generate_content.return_value = mock_response
|
||||
mock_model.return_value = mock_instance
|
||||
|
||||
result = await tool.execute(
|
||||
{
|
||||
"files": [str(test_file)],
|
||||
"review_type": "security",
|
||||
"focus_on": "authentication",
|
||||
}
|
||||
)
|
||||
|
||||
assert len(result) == 1
|
||||
assert "Code Review (SECURITY)" in result[0].text
|
||||
assert "Focus: authentication" in result[0].text
|
||||
assert "Security issues found" in result[0].text
|
||||
|
||||
|
||||
class TestDebugIssueTool:
|
||||
"""Test the debug_issue tool"""
|
||||
|
||||
@pytest.fixture
|
||||
def tool(self):
|
||||
return DebugIssueTool()
|
||||
|
||||
def test_tool_metadata(self, tool):
|
||||
"""Test tool metadata"""
|
||||
assert tool.get_name() == "debug_issue"
|
||||
assert "DEBUG & ROOT CAUSE ANALYSIS" in tool.get_description()
|
||||
assert tool.get_default_temperature() == 0.2
|
||||
|
||||
schema = tool.get_input_schema()
|
||||
assert "error_description" in schema["properties"]
|
||||
assert schema["required"] == ["error_description"]
|
||||
|
||||
@pytest.mark.asyncio
|
||||
@patch("google.generativeai.GenerativeModel")
|
||||
async def test_execute_with_context(self, mock_model, tool):
|
||||
"""Test execution with error context"""
|
||||
# Mock response
|
||||
mock_response = Mock()
|
||||
mock_response.candidates = [Mock()]
|
||||
mock_response.candidates[0].content.parts = [
|
||||
Mock(text="Root cause: race condition")
|
||||
]
|
||||
|
||||
mock_instance = Mock()
|
||||
mock_instance.generate_content.return_value = mock_response
|
||||
mock_model.return_value = mock_instance
|
||||
|
||||
result = await tool.execute(
|
||||
{
|
||||
"error_description": "Test fails intermittently",
|
||||
"error_context": "AssertionError in test_async",
|
||||
"previous_attempts": "Added sleep, still fails",
|
||||
}
|
||||
)
|
||||
|
||||
assert len(result) == 1
|
||||
assert "Debug Analysis" in result[0].text
|
||||
assert "Root cause: race condition" in result[0].text
|
||||
|
||||
|
||||
class TestAnalyzeTool:
|
||||
"""Test the analyze tool"""
|
||||
|
||||
@pytest.fixture
|
||||
def tool(self):
|
||||
return AnalyzeTool()
|
||||
|
||||
def test_tool_metadata(self, tool):
|
||||
"""Test tool metadata"""
|
||||
assert tool.get_name() == "analyze"
|
||||
assert "ANALYZE FILES & CODE" in tool.get_description()
|
||||
assert tool.get_default_temperature() == 0.2
|
||||
|
||||
schema = tool.get_input_schema()
|
||||
assert "files" in schema["properties"]
|
||||
assert "question" in schema["properties"]
|
||||
assert set(schema["required"]) == {"files", "question"}
|
||||
|
||||
@pytest.mark.asyncio
|
||||
@patch("google.generativeai.GenerativeModel")
|
||||
async def test_execute_with_analysis_type(
|
||||
self, mock_model, tool, tmp_path
|
||||
):
|
||||
"""Test execution with specific analysis type"""
|
||||
# Create test file
|
||||
test_file = tmp_path / "module.py"
|
||||
test_file.write_text("class Service: pass", encoding="utf-8")
|
||||
|
||||
# Mock response
|
||||
mock_response = Mock()
|
||||
mock_response.candidates = [Mock()]
|
||||
mock_response.candidates[0].content.parts = [
|
||||
Mock(text="Architecture analysis")
|
||||
]
|
||||
|
||||
mock_instance = Mock()
|
||||
mock_instance.generate_content.return_value = mock_response
|
||||
mock_model.return_value = mock_instance
|
||||
|
||||
result = await tool.execute(
|
||||
{
|
||||
"files": [str(test_file)],
|
||||
"question": "What's the structure?",
|
||||
"analysis_type": "architecture",
|
||||
"output_format": "summary",
|
||||
}
|
||||
)
|
||||
|
||||
assert len(result) == 1
|
||||
assert "ARCHITECTURE Analysis" in result[0].text
|
||||
assert "Analyzed 1 file(s)" in result[0].text
|
||||
assert "Architecture analysis" in result[0].text
|
||||
91
tests/test_utils.py
Normal file
91
tests/test_utils.py
Normal file
@@ -0,0 +1,91 @@
|
||||
"""
|
||||
Tests for utility functions
|
||||
"""
|
||||
|
||||
from utils import (
|
||||
read_file_content,
|
||||
read_files,
|
||||
estimate_tokens,
|
||||
check_token_limit,
|
||||
)
|
||||
|
||||
|
||||
class TestFileUtils:
|
||||
"""Test file reading utilities"""
|
||||
|
||||
def test_read_file_content_success(self, tmp_path):
|
||||
"""Test successful file reading"""
|
||||
test_file = tmp_path / "test.py"
|
||||
test_file.write_text(
|
||||
"def hello():\n return 'world'", encoding="utf-8"
|
||||
)
|
||||
|
||||
content = read_file_content(str(test_file))
|
||||
assert "--- BEGIN FILE:" in content
|
||||
assert "--- END FILE:" in content
|
||||
assert "def hello():" in content
|
||||
assert "return 'world'" in content
|
||||
|
||||
def test_read_file_content_not_found(self):
|
||||
"""Test reading non-existent file"""
|
||||
content = read_file_content("/nonexistent/file.py")
|
||||
assert "--- FILE NOT FOUND:" in content
|
||||
assert "Error: File does not exist" in content
|
||||
|
||||
def test_read_file_content_directory(self, tmp_path):
|
||||
"""Test reading a directory"""
|
||||
content = read_file_content(str(tmp_path))
|
||||
assert "--- NOT A FILE:" in content
|
||||
assert "Error: Path is not a file" in content
|
||||
|
||||
def test_read_files_multiple(self, tmp_path):
|
||||
"""Test reading multiple files"""
|
||||
file1 = tmp_path / "file1.py"
|
||||
file1.write_text("print('file1')", encoding="utf-8")
|
||||
file2 = tmp_path / "file2.py"
|
||||
file2.write_text("print('file2')", encoding="utf-8")
|
||||
|
||||
content, summary = read_files([str(file1), str(file2)])
|
||||
|
||||
assert "--- BEGIN FILE:" in content
|
||||
assert "file1.py" in content
|
||||
assert "file2.py" in content
|
||||
assert "print('file1')" in content
|
||||
assert "print('file2')" in content
|
||||
|
||||
assert "Reading 2 file(s)" in summary
|
||||
|
||||
def test_read_files_with_code(self):
|
||||
"""Test reading with direct code"""
|
||||
code = "def test():\n pass"
|
||||
content, summary = read_files([], code)
|
||||
|
||||
assert "--- BEGIN DIRECT CODE ---" in content
|
||||
assert "--- END DIRECT CODE ---" in content
|
||||
assert code in content
|
||||
|
||||
assert "Direct code:" in summary
|
||||
|
||||
|
||||
class TestTokenUtils:
|
||||
"""Test token counting utilities"""
|
||||
|
||||
def test_estimate_tokens(self):
|
||||
"""Test token estimation"""
|
||||
# Rough estimate: 1 token ≈ 4 characters
|
||||
text = "a" * 400 # 400 characters
|
||||
assert estimate_tokens(text) == 100
|
||||
|
||||
def test_check_token_limit_within(self):
|
||||
"""Test token limit check - within limit"""
|
||||
text = "a" * 4000 # 1000 tokens
|
||||
within_limit, tokens = check_token_limit(text)
|
||||
assert within_limit is True
|
||||
assert tokens == 1000
|
||||
|
||||
def test_check_token_limit_exceeded(self):
|
||||
"""Test token limit check - exceeded"""
|
||||
text = "a" * 5_000_000 # 1.25M tokens
|
||||
within_limit, tokens = check_token_limit(text)
|
||||
assert within_limit is False
|
||||
assert tokens == 1_250_000
|
||||
@@ -1,105 +0,0 @@
|
||||
"""
|
||||
Test verbose output functionality
|
||||
"""
|
||||
|
||||
import pytest
|
||||
from pathlib import Path
|
||||
import sys
|
||||
|
||||
# Add parent directory to path for imports
|
||||
parent_dir = Path(__file__).resolve().parent.parent
|
||||
if str(parent_dir) not in sys.path:
|
||||
sys.path.insert(0, str(parent_dir))
|
||||
|
||||
from gemini_server import prepare_code_context
|
||||
|
||||
|
||||
class TestNewFormattingBehavior:
|
||||
"""Test the improved formatting behavior"""
|
||||
|
||||
def test_file_formatting_for_gemini(self, tmp_path):
|
||||
"""Test that files are properly formatted for Gemini"""
|
||||
test_file = tmp_path / "test.py"
|
||||
content = "def hello():\n return 'world'"
|
||||
test_file.write_text(content, encoding="utf-8")
|
||||
|
||||
context, summary = prepare_code_context([str(test_file)], None)
|
||||
|
||||
# Context should have clear markers for Gemini
|
||||
assert "--- BEGIN FILE:" in context
|
||||
assert "--- END FILE:" in context
|
||||
assert str(test_file) in context
|
||||
assert content in context
|
||||
|
||||
# Summary should be concise for terminal
|
||||
assert "Analyzing 1 file(s)" in summary
|
||||
assert "bytes)" in summary
|
||||
assert len(summary) < len(context) # Summary much smaller than full context
|
||||
|
||||
def test_terminal_summary_shows_preview(self, tmp_path):
|
||||
"""Test that terminal summary shows small preview"""
|
||||
test_file = tmp_path / "large_file.py"
|
||||
content = "# This is a large file\n" + "x = 1\n" * 1000
|
||||
test_file.write_text(content, encoding="utf-8")
|
||||
|
||||
context, summary = prepare_code_context([str(test_file)], None)
|
||||
|
||||
# Summary should show preview but not full content
|
||||
assert "Analyzing 1 file(s)" in summary
|
||||
assert str(test_file) in summary
|
||||
assert "bytes)" in summary
|
||||
assert "Preview:" in summary
|
||||
# Full content should not be in summary
|
||||
assert "x = 1" not in summary or summary.count("x = 1") < 5
|
||||
|
||||
def test_multiple_files_summary(self, tmp_path):
|
||||
"""Test summary with multiple files"""
|
||||
files = []
|
||||
for i in range(3):
|
||||
file = tmp_path / f"file{i}.py"
|
||||
file.write_text(f"# File {i}\nprint({i})", encoding="utf-8")
|
||||
files.append(str(file))
|
||||
|
||||
context, summary = prepare_code_context(files, None)
|
||||
|
||||
assert "Analyzing 3 file(s)" in summary
|
||||
for file in files:
|
||||
assert file in summary
|
||||
assert "bytes)" in summary
|
||||
# Should have clear delimiters in context
|
||||
assert context.count("--- BEGIN FILE:") == 3
|
||||
assert context.count("--- END FILE:") == 3
|
||||
|
||||
def test_direct_code_formatting(self):
|
||||
"""Test direct code formatting"""
|
||||
direct_code = "# Direct code\nprint('hello')"
|
||||
|
||||
context, summary = prepare_code_context(None, direct_code)
|
||||
|
||||
# Context should have clear markers
|
||||
assert "--- BEGIN DIRECT CODE ---" in context
|
||||
assert "--- END DIRECT CODE ---" in context
|
||||
assert direct_code in context
|
||||
|
||||
# Summary should show preview
|
||||
assert "Direct code provided" in summary
|
||||
assert f"({len(direct_code)} characters)" in summary
|
||||
assert "Preview:" in summary
|
||||
|
||||
def test_mixed_content_formatting(self, tmp_path):
|
||||
"""Test formatting with both files and direct code"""
|
||||
test_file = tmp_path / "test.py"
|
||||
test_file.write_text("# Test file", encoding="utf-8")
|
||||
direct_code = "# Direct code\nprint('hello')"
|
||||
|
||||
context, summary = prepare_code_context([str(test_file)], direct_code)
|
||||
|
||||
# Context should have both with clear separation
|
||||
assert "--- BEGIN FILE:" in context
|
||||
assert "--- END FILE:" in context
|
||||
assert "--- BEGIN DIRECT CODE ---" in context
|
||||
assert "--- END DIRECT CODE ---" in context
|
||||
|
||||
# Summary should mention both
|
||||
assert "Analyzing 1 file(s)" in summary
|
||||
assert "Direct code provided" in summary
|
||||
@@ -1,89 +0,0 @@
|
||||
"""
|
||||
Test version functionality
|
||||
"""
|
||||
|
||||
import pytest
|
||||
import json
|
||||
from pathlib import Path
|
||||
import sys
|
||||
|
||||
# Add parent directory to path for imports
|
||||
parent_dir = Path(__file__).resolve().parent.parent
|
||||
if str(parent_dir) not in sys.path:
|
||||
sys.path.insert(0, str(parent_dir))
|
||||
|
||||
from gemini_server import (
|
||||
__version__,
|
||||
__updated__,
|
||||
__author__,
|
||||
handle_list_tools,
|
||||
handle_call_tool,
|
||||
)
|
||||
|
||||
|
||||
class TestVersionFunctionality:
|
||||
"""Test version-related functionality"""
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_version_constants_exist(self):
|
||||
"""Test that version constants are defined"""
|
||||
assert __version__ is not None
|
||||
assert isinstance(__version__, str)
|
||||
assert __updated__ is not None
|
||||
assert isinstance(__updated__, str)
|
||||
assert __author__ is not None
|
||||
assert isinstance(__author__, str)
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_version_tool_in_list(self):
|
||||
"""Test that get_version tool appears in tool list"""
|
||||
tools = await handle_list_tools()
|
||||
tool_names = [tool.name for tool in tools]
|
||||
assert "get_version" in tool_names
|
||||
|
||||
# Find the version tool
|
||||
version_tool = next(t for t in tools if t.name == "get_version")
|
||||
assert (
|
||||
version_tool.description
|
||||
== "Get the version and metadata of the Gemini MCP Server"
|
||||
)
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_get_version_tool_execution(self):
|
||||
"""Test executing the get_version tool"""
|
||||
result = await handle_call_tool("get_version", {})
|
||||
|
||||
assert len(result) == 1
|
||||
assert result[0].type == "text"
|
||||
|
||||
# Check the response contains expected information
|
||||
response_text = result[0].text
|
||||
assert __version__ in response_text
|
||||
assert __updated__ in response_text
|
||||
assert __author__ in response_text
|
||||
assert "Gemini MCP Server" in response_text
|
||||
assert "Default Model:" in response_text
|
||||
assert "Max Context:" in response_text
|
||||
assert "Python:" in response_text
|
||||
assert "Started:" in response_text
|
||||
assert "github.com/BeehiveInnovations/gemini-mcp-server" in response_text
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_version_format(self):
|
||||
"""Test that version follows semantic versioning"""
|
||||
parts = __version__.split(".")
|
||||
assert len(parts) == 3 # Major.Minor.Patch
|
||||
for part in parts:
|
||||
assert part.isdigit() # Each part should be numeric
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_date_format(self):
|
||||
"""Test that updated date follows expected format"""
|
||||
# Expected format: YYYY-MM-DD
|
||||
parts = __updated__.split("-")
|
||||
assert len(parts) == 3
|
||||
assert len(parts[0]) == 4 # Year
|
||||
assert len(parts[1]) == 2 # Month
|
||||
assert len(parts[2]) == 2 # Day
|
||||
for part in parts:
|
||||
assert part.isdigit()
|
||||
Reference in New Issue
Block a user