feat: improve analyze_code terminal output with non-verbose mode
- Add verbose_output parameter (default: False) to CodeAnalysisRequest - Modify prepare_code_context to return both full context and summary - Show only file paths and sizes in terminal by default, not full content - Full file content is still sent to Gemini for analysis - Add comprehensive tests for verbose output functionality This prevents terminal hangs when analyzing large files while still providing Gemini with complete file contents for analysis. 🤖 Generated with [Claude Code](https://claude.ai/code) Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
@@ -64,6 +64,7 @@ class CodeAnalysisRequest(BaseModel):
|
||||
max_tokens: Optional[int] = Field(8192, description="Maximum number of tokens in response")
|
||||
temperature: Optional[float] = Field(0.2, description="Temperature for code analysis (0-1, default 0.2 for high accuracy)")
|
||||
model: Optional[str] = Field(DEFAULT_MODEL, description=f"Model to use (defaults to {DEFAULT_MODEL})")
|
||||
verbose_output: Optional[bool] = Field(False, description="Show file contents in terminal output")
|
||||
|
||||
|
||||
# Create the MCP server instance
|
||||
@@ -97,20 +98,37 @@ def read_file_content(file_path: str) -> str:
|
||||
return f"Error reading {file_path}: {str(e)}"
|
||||
|
||||
|
||||
def prepare_code_context(files: Optional[List[str]], code: Optional[str]) -> str:
|
||||
"""Prepare code context from files and/or direct code"""
|
||||
def prepare_code_context(files: Optional[List[str]], code: Optional[str], verbose: bool = False) -> tuple[str, str]:
|
||||
"""Prepare code context from files and/or direct code
|
||||
Returns: (context_for_gemini, summary_for_terminal)
|
||||
"""
|
||||
context_parts = []
|
||||
summary_parts = []
|
||||
|
||||
# Add file contents
|
||||
if files:
|
||||
summary_parts.append(f"Analyzing {len(files)} file(s):")
|
||||
for file_path in files:
|
||||
context_parts.append(read_file_content(file_path))
|
||||
content = read_file_content(file_path)
|
||||
context_parts.append(content)
|
||||
|
||||
# For summary, just show file path and size
|
||||
path = Path(file_path)
|
||||
if path.exists() and path.is_file():
|
||||
size = path.stat().st_size
|
||||
summary_parts.append(f" - {file_path} ({size:,} bytes)")
|
||||
else:
|
||||
summary_parts.append(f" - {file_path} (not found)")
|
||||
|
||||
# Add direct code
|
||||
if code:
|
||||
context_parts.append("=== Direct Code ===\n" + code + "\n")
|
||||
summary_parts.append(f"Direct code provided ({len(code):,} characters)")
|
||||
|
||||
return "\n".join(context_parts)
|
||||
full_context = "\n".join(context_parts)
|
||||
summary = "\n".join(summary_parts) if not verbose else full_context
|
||||
|
||||
return full_context, summary
|
||||
|
||||
|
||||
@server.list_tools()
|
||||
@@ -268,7 +286,7 @@ async def handle_call_tool(name: str, arguments: Dict[str, Any]) -> List[TextCon
|
||||
|
||||
try:
|
||||
# Prepare code context
|
||||
code_context = prepare_code_context(request.files, request.code)
|
||||
code_context, summary = prepare_code_context(request.files, request.code, request.verbose_output)
|
||||
|
||||
# Count approximate tokens (rough estimate: 1 token ≈ 4 characters)
|
||||
estimated_tokens = len(code_context) // 4
|
||||
@@ -302,9 +320,15 @@ async def handle_call_tool(name: str, arguments: Dict[str, Any]) -> List[TextCon
|
||||
finish_reason = response.candidates[0].finish_reason if response.candidates else "Unknown"
|
||||
text = f"Response blocked or incomplete. Finish reason: {finish_reason}"
|
||||
|
||||
# Return response with summary if not verbose
|
||||
if not request.verbose_output and request.files:
|
||||
response_text = f"{summary}\n\nGemini's response:\n{text}"
|
||||
else:
|
||||
response_text = text
|
||||
|
||||
return [TextContent(
|
||||
type="text",
|
||||
text=text
|
||||
text=response_text
|
||||
)]
|
||||
|
||||
except Exception as e:
|
||||
|
||||
@@ -61,6 +61,7 @@ class TestModels:
|
||||
assert request.max_tokens == 8192
|
||||
assert request.temperature == 0.2
|
||||
assert request.model == DEFAULT_MODEL
|
||||
assert request.verbose_output == False
|
||||
|
||||
|
||||
class TestFileOperations:
|
||||
@@ -97,18 +98,21 @@ class TestFileOperations:
|
||||
file2 = tmp_path / "file2.py"
|
||||
file2.write_text("print('file2')", encoding='utf-8')
|
||||
|
||||
context = prepare_code_context([str(file1), str(file2)], None)
|
||||
context, summary = prepare_code_context([str(file1), str(file2)], None)
|
||||
assert "file1.py" in context
|
||||
assert "file2.py" in context
|
||||
assert "print('file1')" in context
|
||||
assert "print('file2')" in context
|
||||
assert "Analyzing 2 file(s)" in summary
|
||||
assert "bytes)" in summary
|
||||
|
||||
def test_prepare_code_context_with_code(self):
|
||||
"""Test preparing context from direct code"""
|
||||
code = "def test():\n pass"
|
||||
context = prepare_code_context(None, code)
|
||||
context, summary = prepare_code_context(None, code)
|
||||
assert "=== Direct Code ===" in context
|
||||
assert code in context
|
||||
assert "Direct code provided" in summary
|
||||
|
||||
def test_prepare_code_context_mixed(self, tmp_path):
|
||||
"""Test preparing context from both files and code"""
|
||||
@@ -116,9 +120,11 @@ class TestFileOperations:
|
||||
test_file.write_text("# From file", encoding='utf-8')
|
||||
code = "# Direct code"
|
||||
|
||||
context = prepare_code_context([str(test_file)], code)
|
||||
context, summary = prepare_code_context([str(test_file)], code)
|
||||
assert "# From file" in context
|
||||
assert "# Direct code" in context
|
||||
assert "Analyzing 1 file(s)" in summary
|
||||
assert "Direct code provided" in summary
|
||||
|
||||
|
||||
class TestToolHandlers:
|
||||
@@ -219,7 +225,11 @@ class TestToolHandlers:
|
||||
})
|
||||
|
||||
assert len(result) == 1
|
||||
assert result[0].text == "Analysis result"
|
||||
# Check that the response contains both summary and Gemini's response
|
||||
response_text = result[0].text
|
||||
assert "Analyzing 1 file(s)" in response_text
|
||||
assert "Gemini's response:" in response_text
|
||||
assert "Analysis result" in response_text
|
||||
|
||||
@pytest.mark.asyncio
|
||||
@patch('google.generativeai.list_models')
|
||||
|
||||
74
tests/test_verbose_output.py
Normal file
74
tests/test_verbose_output.py
Normal file
@@ -0,0 +1,74 @@
|
||||
"""
|
||||
Test verbose output functionality
|
||||
"""
|
||||
|
||||
import pytest
|
||||
from pathlib import Path
|
||||
import sys
|
||||
|
||||
# Add parent directory to path for imports
|
||||
parent_dir = Path(__file__).resolve().parent.parent
|
||||
if str(parent_dir) not in sys.path:
|
||||
sys.path.insert(0, str(parent_dir))
|
||||
|
||||
from gemini_server import prepare_code_context
|
||||
|
||||
|
||||
class TestVerboseOutput:
|
||||
"""Test verbose output functionality"""
|
||||
|
||||
def test_verbose_true_shows_full_content(self, tmp_path):
|
||||
"""Test that verbose=True shows full file content"""
|
||||
test_file = tmp_path / "test.py"
|
||||
content = "def hello():\n return 'world'"
|
||||
test_file.write_text(content, encoding='utf-8')
|
||||
|
||||
context, summary = prepare_code_context([str(test_file)], None, verbose=True)
|
||||
|
||||
# With verbose=True, summary should equal context
|
||||
assert summary == context
|
||||
assert content in summary
|
||||
|
||||
def test_verbose_false_shows_summary(self, tmp_path):
|
||||
"""Test that verbose=False shows only summary"""
|
||||
test_file = tmp_path / "large_file.py"
|
||||
content = "x = 1\n" * 1000 # Large content
|
||||
test_file.write_text(content, encoding='utf-8')
|
||||
|
||||
context, summary = prepare_code_context([str(test_file)], None, verbose=False)
|
||||
|
||||
# Summary should be much smaller than context
|
||||
assert len(summary) < len(context)
|
||||
assert "Analyzing 1 file(s)" in summary
|
||||
assert str(test_file) in summary
|
||||
assert "bytes)" in summary
|
||||
# Content should not be in summary
|
||||
assert content not in summary
|
||||
|
||||
def test_multiple_files_summary(self, tmp_path):
|
||||
"""Test summary with multiple files"""
|
||||
files = []
|
||||
for i in range(3):
|
||||
file = tmp_path / f"file{i}.py"
|
||||
file.write_text(f"# File {i}\nprint({i})", encoding='utf-8')
|
||||
files.append(str(file))
|
||||
|
||||
context, summary = prepare_code_context(files, None, verbose=False)
|
||||
|
||||
assert "Analyzing 3 file(s)" in summary
|
||||
for file in files:
|
||||
assert file in summary
|
||||
assert "bytes)" in summary
|
||||
|
||||
def test_code_and_files_summary(self, tmp_path):
|
||||
"""Test summary with both files and direct code"""
|
||||
test_file = tmp_path / "test.py"
|
||||
test_file.write_text("# Test file", encoding='utf-8')
|
||||
direct_code = "# Direct code\nprint('hello')"
|
||||
|
||||
context, summary = prepare_code_context([str(test_file)], direct_code, verbose=False)
|
||||
|
||||
assert "Analyzing 1 file(s)" in summary
|
||||
assert str(test_file) in summary
|
||||
assert "Direct code provided" in summary
|
||||
assert f"({len(direct_code):,} characters)" in summary
|
||||
Reference in New Issue
Block a user