feat: optimize analyze_code for Claude Code with improved formatting

BREAKING CHANGES:
- Remove verbose_output from tool schema (Claude Code can't accidentally use it)
- Always show minimal terminal output with file previews
- Improved file content formatting for Gemini with clear delimiters

Key improvements:
- Files formatted as "--- BEGIN FILE: path --- content --- END FILE: path ---"
- Direct code formatted as "--- BEGIN DIRECT CODE --- code --- END DIRECT CODE ---"
- Terminal shows file paths, sizes, and small previews (not full content)
- Clear prompt structure for Gemini: USER REQUEST | CODE TO ANALYZE sections
- Prevents terminal hangs/glitches with large files in Claude Code
- All tests updated and passing

This ensures Claude Code stays responsive while Gemini gets properly formatted content.

🤖 Generated with [Claude Code](https://claude.ai/code)

Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
Fahad
2025-06-08 20:59:35 +04:00
parent c2c80dd828
commit 341022a35b
3 changed files with 131 additions and 68 deletions

View File

@@ -109,25 +109,31 @@ def configure_gemini():
def read_file_content(file_path: str) -> str:
"""Read content from a file with error handling"""
"""Read content from a file with error handling - for backward compatibility"""
return read_file_content_for_gemini(file_path)
def read_file_content_for_gemini(file_path: str) -> str:
"""Read content from a file with proper formatting for Gemini"""
try:
path = Path(file_path)
if not path.exists():
return f"Error: File not found: {file_path}"
return f"\n--- FILE NOT FOUND: {file_path} ---\nError: File does not exist\n--- END FILE ---\n"
if not path.is_file():
return f"Error: Not a file: {file_path}"
return f"\n--- NOT A FILE: {file_path} ---\nError: Path is not a file\n--- END FILE ---\n"
# Read the file
with open(path, "r", encoding="utf-8") as f:
content = f.read()
return f"=== File: {file_path} ===\n{content}\n"
# Format with clear delimiters for Gemini
return f"\n--- BEGIN FILE: {file_path} ---\n{content}\n--- END FILE: {file_path} ---\n"
except Exception as e:
return f"Error reading {file_path}: {str(e)}"
return f"\n--- ERROR READING FILE: {file_path} ---\nError: {str(e)}\n--- END FILE ---\n"
def prepare_code_context(
files: Optional[List[str]], code: Optional[str], verbose: bool = False
files: Optional[List[str]], code: Optional[str]
) -> Tuple[str, str]:
"""Prepare code context from files and/or direct code
Returns: (context_for_gemini, summary_for_terminal)
@@ -137,26 +143,45 @@ def prepare_code_context(
# Add file contents
if files:
summary_parts.append(f"Analyzing {len(files)} file(s):")
summary_parts.append(f"📁 Analyzing {len(files)} file(s):")
for file_path in files:
content = read_file_content(file_path)
context_parts.append(content)
# Get file content for Gemini
file_content = read_file_content_for_gemini(file_path)
context_parts.append(file_content)
# For summary, just show file path and size
# Create summary with small excerpt for terminal
path = Path(file_path)
if path.exists() and path.is_file():
size = path.stat().st_size
summary_parts.append(f" - {file_path} ({size:,} bytes)")
try:
with open(path, "r", encoding="utf-8") as f:
# Read first few lines for preview
preview_lines = []
for i, line in enumerate(f):
if i >= 3: # Show max 3 lines
break
preview_lines.append(line.rstrip())
preview = "\n".join(preview_lines)
if len(preview) > 100:
preview = preview[:100] + "..."
summary_parts.append(f" 📄 {file_path} ({size:,} bytes)")
if preview.strip():
summary_parts.append(f" Preview: {preview[:50]}...")
except Exception:
summary_parts.append(f" 📄 {file_path} ({size:,} bytes)")
else:
summary_parts.append(f" - {file_path} (not found)")
summary_parts.append(f" {file_path} (not found)")
# Add direct code
if code:
context_parts.append("=== Direct Code ===\n" + code + "\n")
summary_parts.append(f"Direct code provided ({len(code):,} characters)")
formatted_code = f"\n--- BEGIN DIRECT CODE ---\n{code}\n--- END DIRECT CODE ---\n"
context_parts.append(formatted_code)
preview = code[:100] + "..." if len(code) > 100 else code
summary_parts.append(f"💻 Direct code provided ({len(code):,} characters)")
summary_parts.append(f" Preview: {preview}")
full_context = "\n".join(context_parts)
summary = "\n".join(summary_parts) if not verbose else full_context
full_context = "\n\n".join(context_parts)
summary = "\n".join(summary_parts)
return full_context, summary
@@ -241,11 +266,6 @@ async def handle_list_tools() -> List[Tool]:
"description": f"Model to use (defaults to {DEFAULT_MODEL})",
"default": DEFAULT_MODEL,
},
"verbose_output": {
"type": "boolean",
"description": "Show file contents in terminal output",
"default": False,
},
},
"required": ["question"],
},
@@ -320,9 +340,9 @@ async def handle_call_tool(name: str, arguments: Dict[str, Any]) -> List[TextCon
]
try:
# Prepare code context
# Prepare code context - always use non-verbose mode for Claude Code compatibility
code_context, summary = prepare_code_context(
request.files, request.code, request.verbose_output
request.files, request.code
)
# Count approximate tokens (rough estimate: 1 token ≈ 4 characters)
@@ -346,12 +366,19 @@ async def handle_call_tool(name: str, arguments: Dict[str, Any]) -> List[TextCon
},
)
# Prepare the full prompt with enhanced developer context
# Prepare the full prompt with enhanced developer context and clear structure
system_prompt = request.system_prompt or DEVELOPER_SYSTEM_PROMPT
full_prompt = (
f"{system_prompt}\n\nCode to analyze:\n\n{code_context}\n\n"
f"Question/Request: {request.question}"
)
full_prompt = f"""{system_prompt}
=== USER REQUEST ===
{request.question}
=== END USER REQUEST ===
=== CODE TO ANALYZE ===
{code_context}
=== END CODE TO ANALYZE ===
Please analyze the code above and respond to the user's request. The code files are clearly marked with their paths and content boundaries."""
# Generate response
response = model.generate_content(full_prompt)
@@ -367,9 +394,9 @@ async def handle_call_tool(name: str, arguments: Dict[str, Any]) -> List[TextCon
)
text = f"Response blocked or incomplete. Finish reason: {finish_reason}"
# Return response with summary if not verbose
if not request.verbose_output and request.files:
response_text = f"{summary}\n\nGemini's response:\n{text}"
# Always return response with summary for Claude Code compatibility
if request.files or request.code:
response_text = f"{summary}\n\n🤖 Gemini's Analysis:\n{text}"
else:
response_text = text

View File

@@ -61,7 +61,6 @@ class TestModels:
assert request.max_tokens == 8192
assert request.temperature == 0.2
assert request.model == DEFAULT_MODEL
assert request.verbose_output == False
class TestFileOperations:
@@ -73,7 +72,8 @@ class TestFileOperations:
test_file.write_text("def hello():\n return 'world'", encoding='utf-8')
content = read_file_content(str(test_file))
assert "=== File:" in content
assert "--- BEGIN FILE:" in content
assert "--- END FILE:" in content
assert "def hello():" in content
assert "return 'world'" in content
@@ -84,12 +84,14 @@ class TestFileOperations:
os.path.sep, "nonexistent_dir_12345", "nonexistent_file.py"
)
content = read_file_content(nonexistent_path)
assert "Error: File not found" in content
assert "--- FILE NOT FOUND:" in content
assert "Error: File does not exist" in content
def test_read_file_content_directory(self, tmp_path):
"""Test reading a directory instead of file"""
content = read_file_content(str(tmp_path))
assert "Error: Not a file" in content
assert "--- NOT A FILE:" in content
assert "Error: Path is not a file" in content
def test_prepare_code_context_with_files(self, tmp_path):
"""Test preparing context from files"""
@@ -99,20 +101,23 @@ class TestFileOperations:
file2.write_text("print('file2')", encoding='utf-8')
context, summary = prepare_code_context([str(file1), str(file2)], None)
assert "--- BEGIN FILE:" in context
assert "file1.py" in context
assert "file2.py" in context
assert "print('file1')" in context
assert "print('file2')" in context
assert "Analyzing 2 file(s)" in summary
assert "--- END FILE:" in context
assert "📁 Analyzing 2 file(s)" in summary
assert "bytes)" in summary
def test_prepare_code_context_with_code(self):
"""Test preparing context from direct code"""
code = "def test():\n pass"
context, summary = prepare_code_context(None, code)
assert "=== Direct Code ===" in context
assert "--- BEGIN DIRECT CODE ---" in context
assert "--- END DIRECT CODE ---" in context
assert code in context
assert "Direct code provided" in summary
assert "💻 Direct code provided" in summary
def test_prepare_code_context_mixed(self, tmp_path):
"""Test preparing context from both files and code"""
@@ -123,8 +128,8 @@ class TestFileOperations:
context, summary = prepare_code_context([str(test_file)], code)
assert "# From file" in context
assert "# Direct code" in context
assert "Analyzing 1 file(s)" in summary
assert "Direct code provided" in summary
assert "📁 Analyzing 1 file(s)" in summary
assert "💻 Direct code provided" in summary
class TestToolHandlers:
@@ -227,8 +232,8 @@ class TestToolHandlers:
assert len(result) == 1
# Check that the response contains both summary and Gemini's response
response_text = result[0].text
assert "Analyzing 1 file(s)" in response_text
assert "Gemini's response:" in response_text
assert "📁 Analyzing 1 file(s)" in response_text
assert "🤖 Gemini's Analysis:" in response_text
assert "Analysis result" in response_text
@pytest.mark.asyncio

View File

@@ -14,36 +14,43 @@ if str(parent_dir) not in sys.path:
from gemini_server import prepare_code_context
class TestVerboseOutput:
"""Test verbose output functionality"""
class TestNewFormattingBehavior:
"""Test the improved formatting behavior"""
def test_verbose_true_shows_full_content(self, tmp_path):
"""Test that verbose=True shows full file content"""
def test_file_formatting_for_gemini(self, tmp_path):
"""Test that files are properly formatted for Gemini"""
test_file = tmp_path / "test.py"
content = "def hello():\n return 'world'"
test_file.write_text(content, encoding='utf-8')
context, summary = prepare_code_context([str(test_file)], None, verbose=True)
context, summary = prepare_code_context([str(test_file)], None)
# With verbose=True, summary should equal context
assert summary == context
assert content in summary
# Context should have clear markers for Gemini
assert "--- BEGIN FILE:" in context
assert "--- END FILE:" in context
assert str(test_file) in context
assert content in context
def test_verbose_false_shows_summary(self, tmp_path):
"""Test that verbose=False shows only summary"""
# Summary should be concise for terminal
assert "📁 Analyzing 1 file(s)" in summary
assert "bytes)" in summary
assert len(summary) < len(context) # Summary much smaller than full context
def test_terminal_summary_shows_preview(self, tmp_path):
"""Test that terminal summary shows small preview"""
test_file = tmp_path / "large_file.py"
content = "x = 1\n" * 1000 # Large content
content = "# This is a large file\n" + "x = 1\n" * 1000
test_file.write_text(content, encoding='utf-8')
context, summary = prepare_code_context([str(test_file)], None, verbose=False)
context, summary = prepare_code_context([str(test_file)], None)
# Summary should be much smaller than context
assert len(summary) < len(context)
assert "Analyzing 1 file(s)" in summary
# Summary should show preview but not full content
assert "📁 Analyzing 1 file(s)" in summary
assert str(test_file) in summary
assert "bytes)" in summary
# Content should not be in summary
assert content not in summary
assert "Preview:" in summary
# Full content should not be in summary
assert "x = 1" not in summary or summary.count("x = 1") < 5
def test_multiple_files_summary(self, tmp_path):
"""Test summary with multiple files"""
@@ -53,22 +60,46 @@ class TestVerboseOutput:
file.write_text(f"# File {i}\nprint({i})", encoding='utf-8')
files.append(str(file))
context, summary = prepare_code_context(files, None, verbose=False)
context, summary = prepare_code_context(files, None)
assert "Analyzing 3 file(s)" in summary
assert "📁 Analyzing 3 file(s)" in summary
for file in files:
assert file in summary
assert "bytes)" in summary
# Should have clear delimiters in context
assert context.count("--- BEGIN FILE:") == 3
assert context.count("--- END FILE:") == 3
def test_code_and_files_summary(self, tmp_path):
"""Test summary with both files and direct code"""
def test_direct_code_formatting(self):
"""Test direct code formatting"""
direct_code = "# Direct code\nprint('hello')"
context, summary = prepare_code_context(None, direct_code)
# Context should have clear markers
assert "--- BEGIN DIRECT CODE ---" in context
assert "--- END DIRECT CODE ---" in context
assert direct_code in context
# Summary should show preview
assert "💻 Direct code provided" in summary
assert f"({len(direct_code)} characters)" in summary
assert "Preview:" in summary
def test_mixed_content_formatting(self, tmp_path):
"""Test formatting with both files and direct code"""
test_file = tmp_path / "test.py"
test_file.write_text("# Test file", encoding='utf-8')
direct_code = "# Direct code\nprint('hello')"
context, summary = prepare_code_context([str(test_file)], direct_code, verbose=False)
context, summary = prepare_code_context([str(test_file)], direct_code)
assert "Analyzing 1 file(s)" in summary
assert str(test_file) in summary
assert "Direct code provided" in summary
assert f"({len(direct_code):,} characters)" in summary
# Context should have both with clear separation
assert "--- BEGIN FILE:" in context
assert "--- END FILE:" in context
assert "--- BEGIN DIRECT CODE ---" in context
assert "--- END DIRECT CODE ---" in context
# Summary should mention both
assert "📁 Analyzing 1 file(s)" in summary
assert "💻 Direct code provided" in summary