refactor: remove emojis and apply code formatting

- Remove all emoji characters from output strings for better compatibility
- Update all tests to match non-emoji output
- Apply black formatting to all Python files
- Ensure all tests pass and linting succeeds
- Remove htmlcov directory (already in .gitignore)

This change improves cross-platform compatibility and ensures
consistent code formatting across the project.

🤖 Generated with [Claude Code](https://claude.ai/code)

Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
Fahad
2025-06-08 21:32:38 +04:00
parent fb1d843950
commit b6f8c43b81
7 changed files with 157 additions and 137 deletions

View File

@@ -175,9 +175,9 @@ def prepare_code_context(
if preview.strip():
summary_parts.append(f" Preview: {preview[:50]}...")
except Exception:
summary_parts.append(f" 📄 {file_path} ({size:,} bytes)")
summary_parts.append(f" {file_path} ({size:,} bytes)")
else:
summary_parts.append(f" {file_path} (not found)")
summary_parts.append(f" {file_path} (not found)")
# Add direct code
if code:
@@ -303,7 +303,9 @@ async def handle_call_tool(name: str, arguments: Dict[str, Any]) -> List[TextCon
try:
# Use the specified model with optimized settings
model_name = request.model or DEFAULT_MODEL
temperature = request.temperature if request.temperature is not None else 0.5
temperature = (
request.temperature if request.temperature is not None else 0.5
)
max_tokens = request.max_tokens if request.max_tokens is not None else 8192
model = genai.GenerativeModel(
@@ -359,7 +361,9 @@ async def handle_call_tool(name: str, arguments: Dict[str, Any]) -> List[TextCon
try:
# Prepare code context - always use non-verbose mode for Claude Code compatibility
code_context, summary = prepare_code_context(request_analysis.files, request_analysis.code)
code_context, summary = prepare_code_context(
request_analysis.files, request_analysis.code
)
# Count approximate tokens (rough estimate: 1 token ≈ 4 characters)
estimated_tokens = len(code_context) // 4
@@ -374,8 +378,16 @@ async def handle_call_tool(name: str, arguments: Dict[str, Any]) -> List[TextCon
# Use the specified model with optimized settings for code analysis
model_name = request_analysis.model or DEFAULT_MODEL
temperature = request_analysis.temperature if request_analysis.temperature is not None else 0.2
max_tokens = request_analysis.max_tokens if request_analysis.max_tokens is not None else 8192
temperature = (
request_analysis.temperature
if request_analysis.temperature is not None
else 0.2
)
max_tokens = (
request_analysis.max_tokens
if request_analysis.max_tokens is not None
else 8192
)
model = genai.GenerativeModel(
model_name=model_name,
@@ -417,7 +429,7 @@ marked with their paths and content boundaries."""
# Always return response with summary for Claude Code compatibility
if request_analysis.files or request_analysis.code:
response_text = f"{summary}\n\n🤖 Gemini's Analysis:\n{text}"
response_text = f"{summary}\n\nGemini's Analysis:\n{text}"
else:
response_text = text
@@ -431,13 +443,19 @@ marked with their paths and content boundaries."""
# List available models
models = []
for model_info in genai.list_models():
if (hasattr(model_info, 'supported_generation_methods') and
"generateContent" in model_info.supported_generation_methods):
if (
hasattr(model_info, "supported_generation_methods")
and "generateContent" in model_info.supported_generation_methods
):
models.append(
{
"name": model_info.name,
"display_name": getattr(model_info, 'display_name', 'Unknown'),
"description": getattr(model_info, 'description', 'No description'),
"display_name": getattr(
model_info, "display_name", "Unknown"
),
"description": getattr(
model_info, "description", "No description"
),
"is_default": model_info.name.endswith(DEFAULT_MODEL),
}
)
@@ -462,7 +480,7 @@ marked with their paths and content boundaries."""
return [
TextContent(
type="text",
text=f"""🤖 Gemini MCP Server v{__version__}
text=f"""Gemini MCP Server v{__version__}
Updated: {__updated__}
Author: {__author__}

View File

@@ -1 +1 @@
# Tests for Gemini MCP Server
# Tests for Gemini MCP Server

View File

@@ -18,11 +18,11 @@ if "GEMINI_API_KEY" not in os.environ:
# Configure asyncio for Windows compatibility
if sys.platform == "win32":
import asyncio
asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
# Pytest configuration
def pytest_configure(config):
"""Configure pytest with custom markers"""
config.addinivalue_line(
"markers", "asyncio: mark test as async"
)
config.addinivalue_line("markers", "asyncio: mark test as async")

View File

@@ -22,13 +22,13 @@ from gemini_server import (
handle_list_tools,
handle_call_tool,
DEVELOPER_SYSTEM_PROMPT,
DEFAULT_MODEL
DEFAULT_MODEL,
)
class TestModels:
"""Test request models"""
def test_gemini_chat_request_defaults(self):
"""Test GeminiChatRequest with default values"""
request = GeminiChatRequest(prompt="Test prompt")
@@ -37,7 +37,7 @@ class TestModels:
assert request.max_tokens == 8192
assert request.temperature == 0.5
assert request.model == DEFAULT_MODEL
def test_gemini_chat_request_custom(self):
"""Test GeminiChatRequest with custom values"""
request = GeminiChatRequest(
@@ -45,13 +45,13 @@ class TestModels:
system_prompt="Custom system",
max_tokens=4096,
temperature=0.8,
model="custom-model"
model="custom-model",
)
assert request.system_prompt == "Custom system"
assert request.max_tokens == 4096
assert request.temperature == 0.8
assert request.model == "custom-model"
def test_code_analysis_request_defaults(self):
"""Test CodeAnalysisRequest with default values"""
request = CodeAnalysisRequest(question="Analyze this")
@@ -65,18 +65,18 @@ class TestModels:
class TestFileOperations:
"""Test file reading and context preparation"""
def test_read_file_content_success(self, tmp_path):
"""Test successful file reading"""
test_file = tmp_path / "test.py"
test_file.write_text("def hello():\n return 'world'", encoding='utf-8')
test_file.write_text("def hello():\n return 'world'", encoding="utf-8")
content = read_file_content(str(test_file))
assert "--- BEGIN FILE:" in content
assert "--- END FILE:" in content
assert "def hello():" in content
assert "return 'world'" in content
def test_read_file_content_not_found(self):
"""Test reading non-existent file"""
# Use a path that's guaranteed not to exist on any platform
@@ -86,20 +86,20 @@ class TestFileOperations:
content = read_file_content(nonexistent_path)
assert "--- FILE NOT FOUND:" in content
assert "Error: File does not exist" in content
def test_read_file_content_directory(self, tmp_path):
"""Test reading a directory instead of file"""
content = read_file_content(str(tmp_path))
assert "--- NOT A FILE:" in content
assert "Error: Path is not a file" in content
def test_prepare_code_context_with_files(self, tmp_path):
"""Test preparing context from files"""
file1 = tmp_path / "file1.py"
file1.write_text("print('file1')", encoding='utf-8')
file1.write_text("print('file1')", encoding="utf-8")
file2 = tmp_path / "file2.py"
file2.write_text("print('file2')", encoding='utf-8')
file2.write_text("print('file2')", encoding="utf-8")
context, summary = prepare_code_context([str(file1), str(file2)], None)
assert "--- BEGIN FILE:" in context
assert "file1.py" in context
@@ -107,9 +107,9 @@ class TestFileOperations:
assert "print('file1')" in context
assert "print('file2')" in context
assert "--- END FILE:" in context
assert "📁 Analyzing 2 file(s)" in summary
assert "Analyzing 2 file(s)" in summary
assert "bytes)" in summary
def test_prepare_code_context_with_code(self):
"""Test preparing context from direct code"""
code = "def test():\n pass"
@@ -117,128 +117,124 @@ class TestFileOperations:
assert "--- BEGIN DIRECT CODE ---" in context
assert "--- END DIRECT CODE ---" in context
assert code in context
assert "💻 Direct code provided" in summary
assert "Direct code provided" in summary
def test_prepare_code_context_mixed(self, tmp_path):
"""Test preparing context from both files and code"""
test_file = tmp_path / "test.py"
test_file.write_text("# From file", encoding='utf-8')
test_file.write_text("# From file", encoding="utf-8")
code = "# Direct code"
context, summary = prepare_code_context([str(test_file)], code)
assert "# From file" in context
assert "# Direct code" in context
assert "📁 Analyzing 1 file(s)" in summary
assert "💻 Direct code provided" in summary
assert "Analyzing 1 file(s)" in summary
assert "Direct code provided" in summary
class TestToolHandlers:
"""Test MCP tool handlers"""
@pytest.mark.asyncio
async def test_handle_list_tools(self):
"""Test listing available tools"""
tools = await handle_list_tools()
assert len(tools) == 4
tool_names = [tool.name for tool in tools]
assert "chat" in tool_names
assert "analyze_code" in tool_names
assert "list_models" in tool_names
assert "get_version" in tool_names
@pytest.mark.asyncio
async def test_handle_call_tool_unknown(self):
"""Test calling unknown tool"""
result = await handle_call_tool("unknown_tool", {})
assert len(result) == 1
assert "Unknown tool" in result[0].text
@pytest.mark.asyncio
@patch('google.generativeai.GenerativeModel')
@patch("google.generativeai.GenerativeModel")
async def test_handle_call_tool_chat_success(self, mock_model):
"""Test successful chat tool call"""
# Mock the response
mock_response = Mock()
mock_response.candidates = [Mock()]
mock_response.candidates[0].content.parts = [Mock(text="Test response")]
mock_instance = Mock()
mock_instance.generate_content.return_value = mock_response
mock_model.return_value = mock_instance
result = await handle_call_tool("chat", {
"prompt": "Test prompt",
"temperature": 0.5
})
result = await handle_call_tool(
"chat", {"prompt": "Test prompt", "temperature": 0.5}
)
assert len(result) == 1
assert result[0].text == "Test response"
# Verify model was called with correct parameters
mock_model.assert_called_once()
call_args = mock_model.call_args[1]
assert call_args['model_name'] == DEFAULT_MODEL
assert call_args['generation_config']['temperature'] == 0.5
assert call_args["model_name"] == DEFAULT_MODEL
assert call_args["generation_config"]["temperature"] == 0.5
@pytest.mark.asyncio
@patch('google.generativeai.GenerativeModel')
@patch("google.generativeai.GenerativeModel")
async def test_handle_call_tool_chat_with_developer_prompt(self, mock_model):
"""Test chat tool uses developer prompt when no system prompt provided"""
mock_response = Mock()
mock_response.candidates = [Mock()]
mock_response.candidates[0].content.parts = [Mock(text="Response")]
mock_instance = Mock()
mock_instance.generate_content.return_value = mock_response
mock_model.return_value = mock_instance
await handle_call_tool("chat", {"prompt": "Test"})
# Check that developer prompt was included
call_args = mock_instance.generate_content.call_args[0][0]
assert DEVELOPER_SYSTEM_PROMPT in call_args
@pytest.mark.asyncio
async def test_handle_call_tool_analyze_code_no_input(self):
"""Test analyze_code with no files or code"""
result = await handle_call_tool("analyze_code", {
"question": "Analyze what?"
})
result = await handle_call_tool("analyze_code", {"question": "Analyze what?"})
assert len(result) == 1
assert "Must provide either 'files' or 'code'" in result[0].text
@pytest.mark.asyncio
@patch('google.generativeai.GenerativeModel')
@patch("google.generativeai.GenerativeModel")
async def test_handle_call_tool_analyze_code_success(self, mock_model, tmp_path):
"""Test successful code analysis"""
# Create test file
test_file = tmp_path / "test.py"
test_file.write_text("def hello(): pass", encoding='utf-8')
test_file.write_text("def hello(): pass", encoding="utf-8")
# Mock response
mock_response = Mock()
mock_response.candidates = [Mock()]
mock_response.candidates[0].content.parts = [Mock(text="Analysis result")]
mock_instance = Mock()
mock_instance.generate_content.return_value = mock_response
mock_model.return_value = mock_instance
result = await handle_call_tool("analyze_code", {
"files": [str(test_file)],
"question": "Analyze this"
})
result = await handle_call_tool(
"analyze_code", {"files": [str(test_file)], "question": "Analyze this"}
)
assert len(result) == 1
# Check that the response contains both summary and Gemini's response
response_text = result[0].text
assert "📁 Analyzing 1 file(s)" in response_text
assert "🤖 Gemini's Analysis:" in response_text
assert "Analyzing 1 file(s)" in response_text
assert "Gemini's Analysis:" in response_text
assert "Analysis result" in response_text
@pytest.mark.asyncio
@patch('google.generativeai.list_models')
@patch("google.generativeai.list_models")
async def test_handle_call_tool_list_models(self, mock_list_models):
"""Test listing models"""
# Mock model data
@@ -246,48 +242,48 @@ class TestToolHandlers:
mock_model.name = "test-model"
mock_model.display_name = "Test Model"
mock_model.description = "A test model"
mock_model.supported_generation_methods = ['generateContent']
mock_model.supported_generation_methods = ["generateContent"]
mock_list_models.return_value = [mock_model]
result = await handle_call_tool("list_models", {})
assert len(result) == 1
models = json.loads(result[0].text)
assert len(models) == 1
assert models[0]['name'] == "test-model"
assert models[0]['is_default'] == False
assert models[0]["name"] == "test-model"
assert models[0]["is_default"] == False
class TestErrorHandling:
"""Test error handling scenarios"""
@pytest.mark.asyncio
@patch('google.generativeai.GenerativeModel')
@patch("google.generativeai.GenerativeModel")
async def test_handle_call_tool_chat_api_error(self, mock_model):
"""Test handling API errors in chat"""
mock_instance = Mock()
mock_instance.generate_content.side_effect = Exception("API Error")
mock_model.return_value = mock_instance
result = await handle_call_tool("chat", {"prompt": "Test"})
assert len(result) == 1
assert "Error calling Gemini API" in result[0].text
assert "API Error" in result[0].text
@pytest.mark.asyncio
@patch('google.generativeai.GenerativeModel')
@patch("google.generativeai.GenerativeModel")
async def test_handle_call_tool_chat_blocked_response(self, mock_model):
"""Test handling blocked responses"""
mock_response = Mock()
mock_response.candidates = [Mock()]
mock_response.candidates[0].content.parts = []
mock_response.candidates[0].finish_reason = 2
mock_instance = Mock()
mock_instance.generate_content.return_value = mock_response
mock_model.return_value = mock_instance
result = await handle_call_tool("chat", {"prompt": "Test"})
assert len(result) == 1
assert "Response blocked or incomplete" in result[0].text
@@ -295,4 +291,4 @@ class TestErrorHandling:
if __name__ == "__main__":
pytest.main([__file__, "-v"])
pytest.main([__file__, "-v"])

View File

@@ -10,10 +10,11 @@ def test_direct_import():
"""Test that gemini_server can be imported directly"""
try:
import gemini_server
assert hasattr(gemini_server, 'GeminiChatRequest')
assert hasattr(gemini_server, 'CodeAnalysisRequest')
assert hasattr(gemini_server, 'handle_list_tools')
assert hasattr(gemini_server, 'handle_call_tool')
assert hasattr(gemini_server, "GeminiChatRequest")
assert hasattr(gemini_server, "CodeAnalysisRequest")
assert hasattr(gemini_server, "handle_list_tools")
assert hasattr(gemini_server, "handle_call_tool")
except ImportError as e:
pytest.fail(f"Failed to import gemini_server: {e}")
@@ -25,8 +26,9 @@ def test_from_import():
GeminiChatRequest,
CodeAnalysisRequest,
DEFAULT_MODEL,
DEVELOPER_SYSTEM_PROMPT
DEVELOPER_SYSTEM_PROMPT,
)
assert GeminiChatRequest is not None
assert CodeAnalysisRequest is not None
assert isinstance(DEFAULT_MODEL, str)
@@ -39,7 +41,8 @@ def test_google_generativeai_import():
"""Test that google.generativeai can be imported"""
try:
import google.generativeai as genai
assert hasattr(genai, 'GenerativeModel')
assert hasattr(genai, 'configure')
assert hasattr(genai, "GenerativeModel")
assert hasattr(genai, "configure")
except ImportError as e:
pytest.fail(f"Failed to import google.generativeai: {e}")
pytest.fail(f"Failed to import google.generativeai: {e}")

View File

@@ -16,90 +16,90 @@ from gemini_server import prepare_code_context
class TestNewFormattingBehavior:
"""Test the improved formatting behavior"""
def test_file_formatting_for_gemini(self, tmp_path):
"""Test that files are properly formatted for Gemini"""
test_file = tmp_path / "test.py"
content = "def hello():\n return 'world'"
test_file.write_text(content, encoding='utf-8')
test_file.write_text(content, encoding="utf-8")
context, summary = prepare_code_context([str(test_file)], None)
# Context should have clear markers for Gemini
assert "--- BEGIN FILE:" in context
assert "--- END FILE:" in context
assert str(test_file) in context
assert content in context
# Summary should be concise for terminal
assert "📁 Analyzing 1 file(s)" in summary
assert "Analyzing 1 file(s)" in summary
assert "bytes)" in summary
assert len(summary) < len(context) # Summary much smaller than full context
def test_terminal_summary_shows_preview(self, tmp_path):
"""Test that terminal summary shows small preview"""
test_file = tmp_path / "large_file.py"
content = "# This is a large file\n" + "x = 1\n" * 1000
test_file.write_text(content, encoding='utf-8')
test_file.write_text(content, encoding="utf-8")
context, summary = prepare_code_context([str(test_file)], None)
# Summary should show preview but not full content
assert "📁 Analyzing 1 file(s)" in summary
assert "Analyzing 1 file(s)" in summary
assert str(test_file) in summary
assert "bytes)" in summary
assert "Preview:" in summary
# Full content should not be in summary
assert "x = 1" not in summary or summary.count("x = 1") < 5
def test_multiple_files_summary(self, tmp_path):
"""Test summary with multiple files"""
files = []
for i in range(3):
file = tmp_path / f"file{i}.py"
file.write_text(f"# File {i}\nprint({i})", encoding='utf-8')
file.write_text(f"# File {i}\nprint({i})", encoding="utf-8")
files.append(str(file))
context, summary = prepare_code_context(files, None)
assert "📁 Analyzing 3 file(s)" in summary
assert "Analyzing 3 file(s)" in summary
for file in files:
assert file in summary
assert "bytes)" in summary
# Should have clear delimiters in context
assert context.count("--- BEGIN FILE:") == 3
assert context.count("--- END FILE:") == 3
def test_direct_code_formatting(self):
"""Test direct code formatting"""
direct_code = "# Direct code\nprint('hello')"
context, summary = prepare_code_context(None, direct_code)
# Context should have clear markers
assert "--- BEGIN DIRECT CODE ---" in context
assert "--- END DIRECT CODE ---" in context
assert direct_code in context
# Summary should show preview
assert "💻 Direct code provided" in summary
assert "Direct code provided" in summary
assert f"({len(direct_code)} characters)" in summary
assert "Preview:" in summary
def test_mixed_content_formatting(self, tmp_path):
"""Test formatting with both files and direct code"""
test_file = tmp_path / "test.py"
test_file.write_text("# Test file", encoding='utf-8')
test_file.write_text("# Test file", encoding="utf-8")
direct_code = "# Direct code\nprint('hello')"
context, summary = prepare_code_context([str(test_file)], direct_code)
# Context should have both with clear separation
assert "--- BEGIN FILE:" in context
assert "--- END FILE:" in context
assert "--- BEGIN DIRECT CODE ---" in context
assert "--- END DIRECT CODE ---" in context
# Summary should mention both
assert "📁 Analyzing 1 file(s)" in summary
assert "💻 Direct code provided" in summary
assert "Analyzing 1 file(s)" in summary
assert "Direct code provided" in summary

View File

@@ -23,7 +23,7 @@ from gemini_server import (
class TestVersionFunctionality:
"""Test version-related functionality"""
@pytest.mark.asyncio
async def test_version_constants_exist(self):
"""Test that version constants are defined"""
@@ -33,26 +33,29 @@ class TestVersionFunctionality:
assert isinstance(__updated__, str)
assert __author__ is not None
assert isinstance(__author__, str)
@pytest.mark.asyncio
async def test_version_tool_in_list(self):
"""Test that get_version tool appears in tool list"""
tools = await handle_list_tools()
tool_names = [tool.name for tool in tools]
assert "get_version" in tool_names
# Find the version tool
version_tool = next(t for t in tools if t.name == "get_version")
assert version_tool.description == "Get the version and metadata of the Gemini MCP Server"
assert (
version_tool.description
== "Get the version and metadata of the Gemini MCP Server"
)
@pytest.mark.asyncio
async def test_get_version_tool_execution(self):
"""Test executing the get_version tool"""
result = await handle_call_tool("get_version", {})
assert len(result) == 1
assert result[0].type == "text"
# Check the response contains expected information
response_text = result[0].text
assert __version__ in response_text
@@ -64,7 +67,7 @@ class TestVersionFunctionality:
assert "Python:" in response_text
assert "Started:" in response_text
assert "github.com/BeehiveInnovations/gemini-mcp-server" in response_text
@pytest.mark.asyncio
async def test_version_format(self):
"""Test that version follows semantic versioning"""
@@ -72,7 +75,7 @@ class TestVersionFunctionality:
assert len(parts) == 3 # Major.Minor.Patch
for part in parts:
assert part.isdigit() # Each part should be numeric
@pytest.mark.asyncio
async def test_date_format(self):
"""Test that updated date follows expected format"""
@@ -83,4 +86,4 @@ class TestVersionFunctionality:
assert len(parts[1]) == 2 # Month
assert len(parts[2]) == 2 # Day
for part in parts:
assert part.isdigit()
assert part.isdigit()