refactor: remove emojis and apply code formatting
- Remove all emoji characters from output strings for better compatibility - Update all tests to match non-emoji output - Apply black formatting to all Python files - Ensure all tests pass and linting succeeds - Remove htmlcov directory (already in .gitignore) This change improves cross-platform compatibility and ensures consistent code formatting across the project. 🤖 Generated with [Claude Code](https://claude.ai/code) Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
@@ -175,9 +175,9 @@ def prepare_code_context(
|
|||||||
if preview.strip():
|
if preview.strip():
|
||||||
summary_parts.append(f" Preview: {preview[:50]}...")
|
summary_parts.append(f" Preview: {preview[:50]}...")
|
||||||
except Exception:
|
except Exception:
|
||||||
summary_parts.append(f" 📄 {file_path} ({size:,} bytes)")
|
summary_parts.append(f" {file_path} ({size:,} bytes)")
|
||||||
else:
|
else:
|
||||||
summary_parts.append(f" ❌ {file_path} (not found)")
|
summary_parts.append(f" {file_path} (not found)")
|
||||||
|
|
||||||
# Add direct code
|
# Add direct code
|
||||||
if code:
|
if code:
|
||||||
@@ -303,7 +303,9 @@ async def handle_call_tool(name: str, arguments: Dict[str, Any]) -> List[TextCon
|
|||||||
try:
|
try:
|
||||||
# Use the specified model with optimized settings
|
# Use the specified model with optimized settings
|
||||||
model_name = request.model or DEFAULT_MODEL
|
model_name = request.model or DEFAULT_MODEL
|
||||||
temperature = request.temperature if request.temperature is not None else 0.5
|
temperature = (
|
||||||
|
request.temperature if request.temperature is not None else 0.5
|
||||||
|
)
|
||||||
max_tokens = request.max_tokens if request.max_tokens is not None else 8192
|
max_tokens = request.max_tokens if request.max_tokens is not None else 8192
|
||||||
|
|
||||||
model = genai.GenerativeModel(
|
model = genai.GenerativeModel(
|
||||||
@@ -359,7 +361,9 @@ async def handle_call_tool(name: str, arguments: Dict[str, Any]) -> List[TextCon
|
|||||||
|
|
||||||
try:
|
try:
|
||||||
# Prepare code context - always use non-verbose mode for Claude Code compatibility
|
# Prepare code context - always use non-verbose mode for Claude Code compatibility
|
||||||
code_context, summary = prepare_code_context(request_analysis.files, request_analysis.code)
|
code_context, summary = prepare_code_context(
|
||||||
|
request_analysis.files, request_analysis.code
|
||||||
|
)
|
||||||
|
|
||||||
# Count approximate tokens (rough estimate: 1 token ≈ 4 characters)
|
# Count approximate tokens (rough estimate: 1 token ≈ 4 characters)
|
||||||
estimated_tokens = len(code_context) // 4
|
estimated_tokens = len(code_context) // 4
|
||||||
@@ -374,8 +378,16 @@ async def handle_call_tool(name: str, arguments: Dict[str, Any]) -> List[TextCon
|
|||||||
|
|
||||||
# Use the specified model with optimized settings for code analysis
|
# Use the specified model with optimized settings for code analysis
|
||||||
model_name = request_analysis.model or DEFAULT_MODEL
|
model_name = request_analysis.model or DEFAULT_MODEL
|
||||||
temperature = request_analysis.temperature if request_analysis.temperature is not None else 0.2
|
temperature = (
|
||||||
max_tokens = request_analysis.max_tokens if request_analysis.max_tokens is not None else 8192
|
request_analysis.temperature
|
||||||
|
if request_analysis.temperature is not None
|
||||||
|
else 0.2
|
||||||
|
)
|
||||||
|
max_tokens = (
|
||||||
|
request_analysis.max_tokens
|
||||||
|
if request_analysis.max_tokens is not None
|
||||||
|
else 8192
|
||||||
|
)
|
||||||
|
|
||||||
model = genai.GenerativeModel(
|
model = genai.GenerativeModel(
|
||||||
model_name=model_name,
|
model_name=model_name,
|
||||||
@@ -417,7 +429,7 @@ marked with their paths and content boundaries."""
|
|||||||
|
|
||||||
# Always return response with summary for Claude Code compatibility
|
# Always return response with summary for Claude Code compatibility
|
||||||
if request_analysis.files or request_analysis.code:
|
if request_analysis.files or request_analysis.code:
|
||||||
response_text = f"{summary}\n\n🤖 Gemini's Analysis:\n{text}"
|
response_text = f"{summary}\n\nGemini's Analysis:\n{text}"
|
||||||
else:
|
else:
|
||||||
response_text = text
|
response_text = text
|
||||||
|
|
||||||
@@ -431,13 +443,19 @@ marked with their paths and content boundaries."""
|
|||||||
# List available models
|
# List available models
|
||||||
models = []
|
models = []
|
||||||
for model_info in genai.list_models():
|
for model_info in genai.list_models():
|
||||||
if (hasattr(model_info, 'supported_generation_methods') and
|
if (
|
||||||
"generateContent" in model_info.supported_generation_methods):
|
hasattr(model_info, "supported_generation_methods")
|
||||||
|
and "generateContent" in model_info.supported_generation_methods
|
||||||
|
):
|
||||||
models.append(
|
models.append(
|
||||||
{
|
{
|
||||||
"name": model_info.name,
|
"name": model_info.name,
|
||||||
"display_name": getattr(model_info, 'display_name', 'Unknown'),
|
"display_name": getattr(
|
||||||
"description": getattr(model_info, 'description', 'No description'),
|
model_info, "display_name", "Unknown"
|
||||||
|
),
|
||||||
|
"description": getattr(
|
||||||
|
model_info, "description", "No description"
|
||||||
|
),
|
||||||
"is_default": model_info.name.endswith(DEFAULT_MODEL),
|
"is_default": model_info.name.endswith(DEFAULT_MODEL),
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
@@ -462,7 +480,7 @@ marked with their paths and content boundaries."""
|
|||||||
return [
|
return [
|
||||||
TextContent(
|
TextContent(
|
||||||
type="text",
|
type="text",
|
||||||
text=f"""🤖 Gemini MCP Server v{__version__}
|
text=f"""Gemini MCP Server v{__version__}
|
||||||
Updated: {__updated__}
|
Updated: {__updated__}
|
||||||
Author: {__author__}
|
Author: {__author__}
|
||||||
|
|
||||||
|
|||||||
@@ -18,11 +18,11 @@ if "GEMINI_API_KEY" not in os.environ:
|
|||||||
# Configure asyncio for Windows compatibility
|
# Configure asyncio for Windows compatibility
|
||||||
if sys.platform == "win32":
|
if sys.platform == "win32":
|
||||||
import asyncio
|
import asyncio
|
||||||
|
|
||||||
asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
|
asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
|
||||||
|
|
||||||
|
|
||||||
# Pytest configuration
|
# Pytest configuration
|
||||||
def pytest_configure(config):
|
def pytest_configure(config):
|
||||||
"""Configure pytest with custom markers"""
|
"""Configure pytest with custom markers"""
|
||||||
config.addinivalue_line(
|
config.addinivalue_line("markers", "asyncio: mark test as async")
|
||||||
"markers", "asyncio: mark test as async"
|
|
||||||
)
|
|
||||||
|
|||||||
@@ -22,7 +22,7 @@ from gemini_server import (
|
|||||||
handle_list_tools,
|
handle_list_tools,
|
||||||
handle_call_tool,
|
handle_call_tool,
|
||||||
DEVELOPER_SYSTEM_PROMPT,
|
DEVELOPER_SYSTEM_PROMPT,
|
||||||
DEFAULT_MODEL
|
DEFAULT_MODEL,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@@ -45,7 +45,7 @@ class TestModels:
|
|||||||
system_prompt="Custom system",
|
system_prompt="Custom system",
|
||||||
max_tokens=4096,
|
max_tokens=4096,
|
||||||
temperature=0.8,
|
temperature=0.8,
|
||||||
model="custom-model"
|
model="custom-model",
|
||||||
)
|
)
|
||||||
assert request.system_prompt == "Custom system"
|
assert request.system_prompt == "Custom system"
|
||||||
assert request.max_tokens == 4096
|
assert request.max_tokens == 4096
|
||||||
@@ -69,7 +69,7 @@ class TestFileOperations:
|
|||||||
def test_read_file_content_success(self, tmp_path):
|
def test_read_file_content_success(self, tmp_path):
|
||||||
"""Test successful file reading"""
|
"""Test successful file reading"""
|
||||||
test_file = tmp_path / "test.py"
|
test_file = tmp_path / "test.py"
|
||||||
test_file.write_text("def hello():\n return 'world'", encoding='utf-8')
|
test_file.write_text("def hello():\n return 'world'", encoding="utf-8")
|
||||||
|
|
||||||
content = read_file_content(str(test_file))
|
content = read_file_content(str(test_file))
|
||||||
assert "--- BEGIN FILE:" in content
|
assert "--- BEGIN FILE:" in content
|
||||||
@@ -96,9 +96,9 @@ class TestFileOperations:
|
|||||||
def test_prepare_code_context_with_files(self, tmp_path):
|
def test_prepare_code_context_with_files(self, tmp_path):
|
||||||
"""Test preparing context from files"""
|
"""Test preparing context from files"""
|
||||||
file1 = tmp_path / "file1.py"
|
file1 = tmp_path / "file1.py"
|
||||||
file1.write_text("print('file1')", encoding='utf-8')
|
file1.write_text("print('file1')", encoding="utf-8")
|
||||||
file2 = tmp_path / "file2.py"
|
file2 = tmp_path / "file2.py"
|
||||||
file2.write_text("print('file2')", encoding='utf-8')
|
file2.write_text("print('file2')", encoding="utf-8")
|
||||||
|
|
||||||
context, summary = prepare_code_context([str(file1), str(file2)], None)
|
context, summary = prepare_code_context([str(file1), str(file2)], None)
|
||||||
assert "--- BEGIN FILE:" in context
|
assert "--- BEGIN FILE:" in context
|
||||||
@@ -107,7 +107,7 @@ class TestFileOperations:
|
|||||||
assert "print('file1')" in context
|
assert "print('file1')" in context
|
||||||
assert "print('file2')" in context
|
assert "print('file2')" in context
|
||||||
assert "--- END FILE:" in context
|
assert "--- END FILE:" in context
|
||||||
assert "📁 Analyzing 2 file(s)" in summary
|
assert "Analyzing 2 file(s)" in summary
|
||||||
assert "bytes)" in summary
|
assert "bytes)" in summary
|
||||||
|
|
||||||
def test_prepare_code_context_with_code(self):
|
def test_prepare_code_context_with_code(self):
|
||||||
@@ -117,19 +117,19 @@ class TestFileOperations:
|
|||||||
assert "--- BEGIN DIRECT CODE ---" in context
|
assert "--- BEGIN DIRECT CODE ---" in context
|
||||||
assert "--- END DIRECT CODE ---" in context
|
assert "--- END DIRECT CODE ---" in context
|
||||||
assert code in context
|
assert code in context
|
||||||
assert "💻 Direct code provided" in summary
|
assert "Direct code provided" in summary
|
||||||
|
|
||||||
def test_prepare_code_context_mixed(self, tmp_path):
|
def test_prepare_code_context_mixed(self, tmp_path):
|
||||||
"""Test preparing context from both files and code"""
|
"""Test preparing context from both files and code"""
|
||||||
test_file = tmp_path / "test.py"
|
test_file = tmp_path / "test.py"
|
||||||
test_file.write_text("# From file", encoding='utf-8')
|
test_file.write_text("# From file", encoding="utf-8")
|
||||||
code = "# Direct code"
|
code = "# Direct code"
|
||||||
|
|
||||||
context, summary = prepare_code_context([str(test_file)], code)
|
context, summary = prepare_code_context([str(test_file)], code)
|
||||||
assert "# From file" in context
|
assert "# From file" in context
|
||||||
assert "# Direct code" in context
|
assert "# Direct code" in context
|
||||||
assert "📁 Analyzing 1 file(s)" in summary
|
assert "Analyzing 1 file(s)" in summary
|
||||||
assert "💻 Direct code provided" in summary
|
assert "Direct code provided" in summary
|
||||||
|
|
||||||
|
|
||||||
class TestToolHandlers:
|
class TestToolHandlers:
|
||||||
@@ -155,7 +155,7 @@ class TestToolHandlers:
|
|||||||
assert "Unknown tool" in result[0].text
|
assert "Unknown tool" in result[0].text
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
@pytest.mark.asyncio
|
||||||
@patch('google.generativeai.GenerativeModel')
|
@patch("google.generativeai.GenerativeModel")
|
||||||
async def test_handle_call_tool_chat_success(self, mock_model):
|
async def test_handle_call_tool_chat_success(self, mock_model):
|
||||||
"""Test successful chat tool call"""
|
"""Test successful chat tool call"""
|
||||||
# Mock the response
|
# Mock the response
|
||||||
@@ -167,10 +167,9 @@ class TestToolHandlers:
|
|||||||
mock_instance.generate_content.return_value = mock_response
|
mock_instance.generate_content.return_value = mock_response
|
||||||
mock_model.return_value = mock_instance
|
mock_model.return_value = mock_instance
|
||||||
|
|
||||||
result = await handle_call_tool("chat", {
|
result = await handle_call_tool(
|
||||||
"prompt": "Test prompt",
|
"chat", {"prompt": "Test prompt", "temperature": 0.5}
|
||||||
"temperature": 0.5
|
)
|
||||||
})
|
|
||||||
|
|
||||||
assert len(result) == 1
|
assert len(result) == 1
|
||||||
assert result[0].text == "Test response"
|
assert result[0].text == "Test response"
|
||||||
@@ -178,11 +177,11 @@ class TestToolHandlers:
|
|||||||
# Verify model was called with correct parameters
|
# Verify model was called with correct parameters
|
||||||
mock_model.assert_called_once()
|
mock_model.assert_called_once()
|
||||||
call_args = mock_model.call_args[1]
|
call_args = mock_model.call_args[1]
|
||||||
assert call_args['model_name'] == DEFAULT_MODEL
|
assert call_args["model_name"] == DEFAULT_MODEL
|
||||||
assert call_args['generation_config']['temperature'] == 0.5
|
assert call_args["generation_config"]["temperature"] == 0.5
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
@pytest.mark.asyncio
|
||||||
@patch('google.generativeai.GenerativeModel')
|
@patch("google.generativeai.GenerativeModel")
|
||||||
async def test_handle_call_tool_chat_with_developer_prompt(self, mock_model):
|
async def test_handle_call_tool_chat_with_developer_prompt(self, mock_model):
|
||||||
"""Test chat tool uses developer prompt when no system prompt provided"""
|
"""Test chat tool uses developer prompt when no system prompt provided"""
|
||||||
mock_response = Mock()
|
mock_response = Mock()
|
||||||
@@ -202,19 +201,17 @@ class TestToolHandlers:
|
|||||||
@pytest.mark.asyncio
|
@pytest.mark.asyncio
|
||||||
async def test_handle_call_tool_analyze_code_no_input(self):
|
async def test_handle_call_tool_analyze_code_no_input(self):
|
||||||
"""Test analyze_code with no files or code"""
|
"""Test analyze_code with no files or code"""
|
||||||
result = await handle_call_tool("analyze_code", {
|
result = await handle_call_tool("analyze_code", {"question": "Analyze what?"})
|
||||||
"question": "Analyze what?"
|
|
||||||
})
|
|
||||||
assert len(result) == 1
|
assert len(result) == 1
|
||||||
assert "Must provide either 'files' or 'code'" in result[0].text
|
assert "Must provide either 'files' or 'code'" in result[0].text
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
@pytest.mark.asyncio
|
||||||
@patch('google.generativeai.GenerativeModel')
|
@patch("google.generativeai.GenerativeModel")
|
||||||
async def test_handle_call_tool_analyze_code_success(self, mock_model, tmp_path):
|
async def test_handle_call_tool_analyze_code_success(self, mock_model, tmp_path):
|
||||||
"""Test successful code analysis"""
|
"""Test successful code analysis"""
|
||||||
# Create test file
|
# Create test file
|
||||||
test_file = tmp_path / "test.py"
|
test_file = tmp_path / "test.py"
|
||||||
test_file.write_text("def hello(): pass", encoding='utf-8')
|
test_file.write_text("def hello(): pass", encoding="utf-8")
|
||||||
|
|
||||||
# Mock response
|
# Mock response
|
||||||
mock_response = Mock()
|
mock_response = Mock()
|
||||||
@@ -225,20 +222,19 @@ class TestToolHandlers:
|
|||||||
mock_instance.generate_content.return_value = mock_response
|
mock_instance.generate_content.return_value = mock_response
|
||||||
mock_model.return_value = mock_instance
|
mock_model.return_value = mock_instance
|
||||||
|
|
||||||
result = await handle_call_tool("analyze_code", {
|
result = await handle_call_tool(
|
||||||
"files": [str(test_file)],
|
"analyze_code", {"files": [str(test_file)], "question": "Analyze this"}
|
||||||
"question": "Analyze this"
|
)
|
||||||
})
|
|
||||||
|
|
||||||
assert len(result) == 1
|
assert len(result) == 1
|
||||||
# Check that the response contains both summary and Gemini's response
|
# Check that the response contains both summary and Gemini's response
|
||||||
response_text = result[0].text
|
response_text = result[0].text
|
||||||
assert "📁 Analyzing 1 file(s)" in response_text
|
assert "Analyzing 1 file(s)" in response_text
|
||||||
assert "🤖 Gemini's Analysis:" in response_text
|
assert "Gemini's Analysis:" in response_text
|
||||||
assert "Analysis result" in response_text
|
assert "Analysis result" in response_text
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
@pytest.mark.asyncio
|
||||||
@patch('google.generativeai.list_models')
|
@patch("google.generativeai.list_models")
|
||||||
async def test_handle_call_tool_list_models(self, mock_list_models):
|
async def test_handle_call_tool_list_models(self, mock_list_models):
|
||||||
"""Test listing models"""
|
"""Test listing models"""
|
||||||
# Mock model data
|
# Mock model data
|
||||||
@@ -246,7 +242,7 @@ class TestToolHandlers:
|
|||||||
mock_model.name = "test-model"
|
mock_model.name = "test-model"
|
||||||
mock_model.display_name = "Test Model"
|
mock_model.display_name = "Test Model"
|
||||||
mock_model.description = "A test model"
|
mock_model.description = "A test model"
|
||||||
mock_model.supported_generation_methods = ['generateContent']
|
mock_model.supported_generation_methods = ["generateContent"]
|
||||||
|
|
||||||
mock_list_models.return_value = [mock_model]
|
mock_list_models.return_value = [mock_model]
|
||||||
|
|
||||||
@@ -255,15 +251,15 @@ class TestToolHandlers:
|
|||||||
|
|
||||||
models = json.loads(result[0].text)
|
models = json.loads(result[0].text)
|
||||||
assert len(models) == 1
|
assert len(models) == 1
|
||||||
assert models[0]['name'] == "test-model"
|
assert models[0]["name"] == "test-model"
|
||||||
assert models[0]['is_default'] == False
|
assert models[0]["is_default"] == False
|
||||||
|
|
||||||
|
|
||||||
class TestErrorHandling:
|
class TestErrorHandling:
|
||||||
"""Test error handling scenarios"""
|
"""Test error handling scenarios"""
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
@pytest.mark.asyncio
|
||||||
@patch('google.generativeai.GenerativeModel')
|
@patch("google.generativeai.GenerativeModel")
|
||||||
async def test_handle_call_tool_chat_api_error(self, mock_model):
|
async def test_handle_call_tool_chat_api_error(self, mock_model):
|
||||||
"""Test handling API errors in chat"""
|
"""Test handling API errors in chat"""
|
||||||
mock_instance = Mock()
|
mock_instance = Mock()
|
||||||
@@ -276,7 +272,7 @@ class TestErrorHandling:
|
|||||||
assert "API Error" in result[0].text
|
assert "API Error" in result[0].text
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
@pytest.mark.asyncio
|
||||||
@patch('google.generativeai.GenerativeModel')
|
@patch("google.generativeai.GenerativeModel")
|
||||||
async def test_handle_call_tool_chat_blocked_response(self, mock_model):
|
async def test_handle_call_tool_chat_blocked_response(self, mock_model):
|
||||||
"""Test handling blocked responses"""
|
"""Test handling blocked responses"""
|
||||||
mock_response = Mock()
|
mock_response = Mock()
|
||||||
|
|||||||
@@ -10,10 +10,11 @@ def test_direct_import():
|
|||||||
"""Test that gemini_server can be imported directly"""
|
"""Test that gemini_server can be imported directly"""
|
||||||
try:
|
try:
|
||||||
import gemini_server
|
import gemini_server
|
||||||
assert hasattr(gemini_server, 'GeminiChatRequest')
|
|
||||||
assert hasattr(gemini_server, 'CodeAnalysisRequest')
|
assert hasattr(gemini_server, "GeminiChatRequest")
|
||||||
assert hasattr(gemini_server, 'handle_list_tools')
|
assert hasattr(gemini_server, "CodeAnalysisRequest")
|
||||||
assert hasattr(gemini_server, 'handle_call_tool')
|
assert hasattr(gemini_server, "handle_list_tools")
|
||||||
|
assert hasattr(gemini_server, "handle_call_tool")
|
||||||
except ImportError as e:
|
except ImportError as e:
|
||||||
pytest.fail(f"Failed to import gemini_server: {e}")
|
pytest.fail(f"Failed to import gemini_server: {e}")
|
||||||
|
|
||||||
@@ -25,8 +26,9 @@ def test_from_import():
|
|||||||
GeminiChatRequest,
|
GeminiChatRequest,
|
||||||
CodeAnalysisRequest,
|
CodeAnalysisRequest,
|
||||||
DEFAULT_MODEL,
|
DEFAULT_MODEL,
|
||||||
DEVELOPER_SYSTEM_PROMPT
|
DEVELOPER_SYSTEM_PROMPT,
|
||||||
)
|
)
|
||||||
|
|
||||||
assert GeminiChatRequest is not None
|
assert GeminiChatRequest is not None
|
||||||
assert CodeAnalysisRequest is not None
|
assert CodeAnalysisRequest is not None
|
||||||
assert isinstance(DEFAULT_MODEL, str)
|
assert isinstance(DEFAULT_MODEL, str)
|
||||||
@@ -39,7 +41,8 @@ def test_google_generativeai_import():
|
|||||||
"""Test that google.generativeai can be imported"""
|
"""Test that google.generativeai can be imported"""
|
||||||
try:
|
try:
|
||||||
import google.generativeai as genai
|
import google.generativeai as genai
|
||||||
assert hasattr(genai, 'GenerativeModel')
|
|
||||||
assert hasattr(genai, 'configure')
|
assert hasattr(genai, "GenerativeModel")
|
||||||
|
assert hasattr(genai, "configure")
|
||||||
except ImportError as e:
|
except ImportError as e:
|
||||||
pytest.fail(f"Failed to import google.generativeai: {e}")
|
pytest.fail(f"Failed to import google.generativeai: {e}")
|
||||||
@@ -21,7 +21,7 @@ class TestNewFormattingBehavior:
|
|||||||
"""Test that files are properly formatted for Gemini"""
|
"""Test that files are properly formatted for Gemini"""
|
||||||
test_file = tmp_path / "test.py"
|
test_file = tmp_path / "test.py"
|
||||||
content = "def hello():\n return 'world'"
|
content = "def hello():\n return 'world'"
|
||||||
test_file.write_text(content, encoding='utf-8')
|
test_file.write_text(content, encoding="utf-8")
|
||||||
|
|
||||||
context, summary = prepare_code_context([str(test_file)], None)
|
context, summary = prepare_code_context([str(test_file)], None)
|
||||||
|
|
||||||
@@ -32,7 +32,7 @@ class TestNewFormattingBehavior:
|
|||||||
assert content in context
|
assert content in context
|
||||||
|
|
||||||
# Summary should be concise for terminal
|
# Summary should be concise for terminal
|
||||||
assert "📁 Analyzing 1 file(s)" in summary
|
assert "Analyzing 1 file(s)" in summary
|
||||||
assert "bytes)" in summary
|
assert "bytes)" in summary
|
||||||
assert len(summary) < len(context) # Summary much smaller than full context
|
assert len(summary) < len(context) # Summary much smaller than full context
|
||||||
|
|
||||||
@@ -40,12 +40,12 @@ class TestNewFormattingBehavior:
|
|||||||
"""Test that terminal summary shows small preview"""
|
"""Test that terminal summary shows small preview"""
|
||||||
test_file = tmp_path / "large_file.py"
|
test_file = tmp_path / "large_file.py"
|
||||||
content = "# This is a large file\n" + "x = 1\n" * 1000
|
content = "# This is a large file\n" + "x = 1\n" * 1000
|
||||||
test_file.write_text(content, encoding='utf-8')
|
test_file.write_text(content, encoding="utf-8")
|
||||||
|
|
||||||
context, summary = prepare_code_context([str(test_file)], None)
|
context, summary = prepare_code_context([str(test_file)], None)
|
||||||
|
|
||||||
# Summary should show preview but not full content
|
# Summary should show preview but not full content
|
||||||
assert "📁 Analyzing 1 file(s)" in summary
|
assert "Analyzing 1 file(s)" in summary
|
||||||
assert str(test_file) in summary
|
assert str(test_file) in summary
|
||||||
assert "bytes)" in summary
|
assert "bytes)" in summary
|
||||||
assert "Preview:" in summary
|
assert "Preview:" in summary
|
||||||
@@ -57,12 +57,12 @@ class TestNewFormattingBehavior:
|
|||||||
files = []
|
files = []
|
||||||
for i in range(3):
|
for i in range(3):
|
||||||
file = tmp_path / f"file{i}.py"
|
file = tmp_path / f"file{i}.py"
|
||||||
file.write_text(f"# File {i}\nprint({i})", encoding='utf-8')
|
file.write_text(f"# File {i}\nprint({i})", encoding="utf-8")
|
||||||
files.append(str(file))
|
files.append(str(file))
|
||||||
|
|
||||||
context, summary = prepare_code_context(files, None)
|
context, summary = prepare_code_context(files, None)
|
||||||
|
|
||||||
assert "📁 Analyzing 3 file(s)" in summary
|
assert "Analyzing 3 file(s)" in summary
|
||||||
for file in files:
|
for file in files:
|
||||||
assert file in summary
|
assert file in summary
|
||||||
assert "bytes)" in summary
|
assert "bytes)" in summary
|
||||||
@@ -82,14 +82,14 @@ class TestNewFormattingBehavior:
|
|||||||
assert direct_code in context
|
assert direct_code in context
|
||||||
|
|
||||||
# Summary should show preview
|
# Summary should show preview
|
||||||
assert "💻 Direct code provided" in summary
|
assert "Direct code provided" in summary
|
||||||
assert f"({len(direct_code)} characters)" in summary
|
assert f"({len(direct_code)} characters)" in summary
|
||||||
assert "Preview:" in summary
|
assert "Preview:" in summary
|
||||||
|
|
||||||
def test_mixed_content_formatting(self, tmp_path):
|
def test_mixed_content_formatting(self, tmp_path):
|
||||||
"""Test formatting with both files and direct code"""
|
"""Test formatting with both files and direct code"""
|
||||||
test_file = tmp_path / "test.py"
|
test_file = tmp_path / "test.py"
|
||||||
test_file.write_text("# Test file", encoding='utf-8')
|
test_file.write_text("# Test file", encoding="utf-8")
|
||||||
direct_code = "# Direct code\nprint('hello')"
|
direct_code = "# Direct code\nprint('hello')"
|
||||||
|
|
||||||
context, summary = prepare_code_context([str(test_file)], direct_code)
|
context, summary = prepare_code_context([str(test_file)], direct_code)
|
||||||
@@ -101,5 +101,5 @@ class TestNewFormattingBehavior:
|
|||||||
assert "--- END DIRECT CODE ---" in context
|
assert "--- END DIRECT CODE ---" in context
|
||||||
|
|
||||||
# Summary should mention both
|
# Summary should mention both
|
||||||
assert "📁 Analyzing 1 file(s)" in summary
|
assert "Analyzing 1 file(s)" in summary
|
||||||
assert "💻 Direct code provided" in summary
|
assert "Direct code provided" in summary
|
||||||
|
|||||||
@@ -43,7 +43,10 @@ class TestVersionFunctionality:
|
|||||||
|
|
||||||
# Find the version tool
|
# Find the version tool
|
||||||
version_tool = next(t for t in tools if t.name == "get_version")
|
version_tool = next(t for t in tools if t.name == "get_version")
|
||||||
assert version_tool.description == "Get the version and metadata of the Gemini MCP Server"
|
assert (
|
||||||
|
version_tool.description
|
||||||
|
== "Get the version and metadata of the Gemini MCP Server"
|
||||||
|
)
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
@pytest.mark.asyncio
|
||||||
async def test_get_version_tool_execution(self):
|
async def test_get_version_tool_execution(self):
|
||||||
|
|||||||
Reference in New Issue
Block a user