docs: Add practical usage tips and clean up test files
- Added comprehensive usage tips section to README - Effective command examples - Common workflows for Claude Code users - Best practices for combining Claude and Gemini - Specific examples for different use cases - Removed all test files (test_*.py) that were only for development - test_server.py - test_enhanced.py - test_developer_context.py - test_optimized.py The repository now contains only the essential files needed for users to run the Gemini MCP server with Claude Code. 🤖 Generated with [Claude Code](https://claude.ai/code) Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
53
README.md
53
README.md
@@ -171,6 +171,59 @@ You: "Use Gemini to analyze all files in /src/core/ and create an architecture d
|
||||
You: "Have Gemini profile this codebase and suggest the top 5 performance improvements"
|
||||
```
|
||||
|
||||
## 💡 Practical Usage Tips
|
||||
|
||||
### Effective Commands
|
||||
Be specific about what you want from Gemini:
|
||||
- ✅ "Ask Gemini to identify memory leaks in this code"
|
||||
- ❌ "Ask Gemini about this"
|
||||
|
||||
### Common Workflows
|
||||
|
||||
#### 1. **Extended Code Review**
|
||||
```
|
||||
You: "Implement feature X"
|
||||
Claude: [implements]
|
||||
You: "Use Gemini to review this implementation for scalability issues"
|
||||
Gemini: [provides detailed feedback]
|
||||
You: "Based on Gemini's feedback, improve the implementation"
|
||||
Claude: [refines based on feedback]
|
||||
```
|
||||
|
||||
#### 2. **Large File Analysis**
|
||||
```
|
||||
"Use Gemini to analyze /path/to/large/file.py and summarize its architecture"
|
||||
"Have Gemini trace all function calls in this module"
|
||||
"Ask Gemini to identify unused code in this file"
|
||||
```
|
||||
|
||||
#### 3. **Multi-File Context**
|
||||
```
|
||||
"Use Gemini to analyze how auth.py, users.py, and permissions.py work together"
|
||||
"Have Gemini map the data flow between these components"
|
||||
"Ask Gemini to find all circular dependencies in /src"
|
||||
```
|
||||
|
||||
#### 4. **Second Opinion & Validation**
|
||||
```
|
||||
"I'm planning to refactor using pattern X. Ask Gemini for potential issues"
|
||||
"Use Gemini to validate my database schema design"
|
||||
"Have Gemini suggest alternative approaches to this algorithm"
|
||||
```
|
||||
|
||||
#### 5. **Security & Performance Audits**
|
||||
```
|
||||
"Use Gemini to security audit this authentication flow"
|
||||
"Have Gemini identify performance bottlenecks in this codebase"
|
||||
"Ask Gemini to check for common security vulnerabilities"
|
||||
```
|
||||
|
||||
### Best Practices
|
||||
- Use Gemini when you need analysis beyond Claude's context window
|
||||
- Leverage Gemini's 1M token limit for whole-codebase analysis
|
||||
- Combine both assistants: Claude for implementation, Gemini for review
|
||||
- Be specific in your requests for more accurate responses
|
||||
|
||||
## 📝 Notes
|
||||
|
||||
- Gemini 2.5 Pro Preview may occasionally block certain prompts due to safety filters
|
||||
|
||||
@@ -1,87 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Test script to verify developer context is properly injected
|
||||
"""
|
||||
|
||||
import os
|
||||
import asyncio
|
||||
from gemini_server import configure_gemini, handle_call_tool
|
||||
|
||||
|
||||
async def test_developer_context():
|
||||
"""Test the developer context system prompt"""
|
||||
print("Testing Developer Context in Gemini MCP Server...")
|
||||
print("-" * 50)
|
||||
|
||||
# Test configuration
|
||||
try:
|
||||
configure_gemini()
|
||||
print("✓ Gemini API configured successfully")
|
||||
except Exception as e:
|
||||
print(f"✗ Failed to configure Gemini API: {e}")
|
||||
return
|
||||
|
||||
# Test 1: Chat without explicit system prompt (should use developer prompt)
|
||||
print("\n1. Testing chat WITHOUT system prompt (should auto-inject developer context)...")
|
||||
result = await handle_call_tool("chat", {
|
||||
"prompt": "Write a Python function to reverse a linked list. Include comments.",
|
||||
"temperature": 0.3,
|
||||
"max_tokens": 500
|
||||
})
|
||||
print("Response preview:")
|
||||
print(result[0].text[:400] + "..." if len(result[0].text) > 400 else result[0].text)
|
||||
|
||||
# Test 2: Chat WITH explicit system prompt (should use provided prompt)
|
||||
print("\n2. Testing chat WITH custom system prompt...")
|
||||
result = await handle_call_tool("chat", {
|
||||
"prompt": "Write a haiku about coding",
|
||||
"system_prompt": "You are a creative poet who writes about technology.",
|
||||
"temperature": 0.9,
|
||||
"max_tokens": 100
|
||||
})
|
||||
print("Response:")
|
||||
print(result[0].text)
|
||||
|
||||
# Test 3: Code analysis without system prompt (should use developer prompt)
|
||||
print("\n3. Testing analyze_code WITHOUT system prompt...")
|
||||
test_code = '''
|
||||
def bubble_sort(arr):
|
||||
n = len(arr)
|
||||
for i in range(n):
|
||||
for j in range(0, n-i-1):
|
||||
if arr[j] > arr[j+1]:
|
||||
arr[j], arr[j+1] = arr[j+1], arr[j]
|
||||
return arr
|
||||
'''
|
||||
|
||||
result = await handle_call_tool("analyze_code", {
|
||||
"code": test_code,
|
||||
"question": "Review this code and suggest improvements",
|
||||
"temperature": 0.3
|
||||
})
|
||||
print("Response preview:")
|
||||
print(result[0].text[:500] + "..." if len(result[0].text) > 500 else result[0].text)
|
||||
|
||||
# Test 4: Code analysis WITH custom system prompt
|
||||
print("\n4. Testing analyze_code WITH custom system prompt...")
|
||||
result = await handle_call_tool("analyze_code", {
|
||||
"code": test_code,
|
||||
"question": "Is this code correct?",
|
||||
"system_prompt": "You are a beginner-friendly tutor. Explain things simply.",
|
||||
"temperature": 0.5
|
||||
})
|
||||
print("Response preview:")
|
||||
print(result[0].text[:400] + "..." if len(result[0].text) > 400 else result[0].text)
|
||||
|
||||
print("\n" + "-" * 50)
|
||||
print("Developer context tests completed!")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
# Check for API key
|
||||
if not os.getenv("GEMINI_API_KEY"):
|
||||
print("Error: GEMINI_API_KEY environment variable is not set")
|
||||
print("Please set it with: export GEMINI_API_KEY='your-api-key'")
|
||||
exit(1)
|
||||
|
||||
asyncio.run(test_developer_context())
|
||||
111
test_enhanced.py
111
test_enhanced.py
@@ -1,111 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Enhanced test script for Gemini MCP Server with code analysis features
|
||||
"""
|
||||
|
||||
import os
|
||||
import asyncio
|
||||
import json
|
||||
from pathlib import Path
|
||||
from gemini_server import configure_gemini, handle_call_tool, handle_list_tools
|
||||
|
||||
|
||||
async def test_enhanced_features():
|
||||
"""Test the enhanced server functionality"""
|
||||
print("Testing Enhanced Gemini MCP Server...")
|
||||
print("-" * 50)
|
||||
|
||||
# Test configuration
|
||||
try:
|
||||
configure_gemini()
|
||||
print("✓ Gemini API configured successfully")
|
||||
except Exception as e:
|
||||
print(f"✗ Failed to configure Gemini API: {e}")
|
||||
return
|
||||
|
||||
# Test listing tools (should now include analyze_code)
|
||||
print("\n1. Testing list_tools...")
|
||||
tools = await handle_list_tools()
|
||||
print(f"✓ Found {len(tools)} tools:")
|
||||
for tool in tools:
|
||||
print(f" - {tool.name}: {tool.description}")
|
||||
|
||||
# Test chat with 2.5 Pro Preview default
|
||||
print("\n2. Testing chat with default 2.5 Pro Preview...")
|
||||
chat_result = await handle_call_tool("chat", {
|
||||
"prompt": "What model are you? Please confirm you're Gemini 2.5 Pro Preview.",
|
||||
"temperature": 0.3,
|
||||
"max_tokens": 200
|
||||
})
|
||||
print("✓ Chat response:")
|
||||
print(chat_result[0].text[:200] + "..." if len(chat_result[0].text) > 200 else chat_result[0].text)
|
||||
|
||||
# Create a test file for code analysis
|
||||
test_file = Path("test_sample.py")
|
||||
test_code = '''def fibonacci(n):
|
||||
"""Calculate fibonacci number at position n"""
|
||||
if n <= 1:
|
||||
return n
|
||||
return fibonacci(n-1) + fibonacci(n-2)
|
||||
|
||||
def factorial(n):
|
||||
"""Calculate factorial of n"""
|
||||
if n <= 1:
|
||||
return 1
|
||||
return n * factorial(n-1)
|
||||
|
||||
# Test the functions
|
||||
print(f"Fibonacci(10): {fibonacci(10)}")
|
||||
print(f"Factorial(5): {factorial(5)}")
|
||||
'''
|
||||
|
||||
with open(test_file, 'w') as f:
|
||||
f.write(test_code)
|
||||
|
||||
# Test analyze_code with file
|
||||
print("\n3. Testing analyze_code with file...")
|
||||
analysis_result = await handle_call_tool("analyze_code", {
|
||||
"files": [str(test_file)],
|
||||
"question": "What are the time complexities of these functions? Can you suggest optimizations?",
|
||||
"temperature": 0.3,
|
||||
"max_tokens": 500
|
||||
})
|
||||
print("✓ Code analysis response:")
|
||||
print(analysis_result[0].text[:400] + "..." if len(analysis_result[0].text) > 400 else analysis_result[0].text)
|
||||
|
||||
# Test analyze_code with direct code
|
||||
print("\n4. Testing analyze_code with direct code...")
|
||||
analysis_result = await handle_call_tool("analyze_code", {
|
||||
"code": "class Stack:\n def __init__(self):\n self.items = []\n def push(self, item):\n self.items.append(item)\n def pop(self):\n return self.items.pop() if self.items else None",
|
||||
"question": "Is this a good implementation of a stack? What improvements would you suggest?",
|
||||
"temperature": 0.3
|
||||
})
|
||||
print("✓ Direct code analysis response:")
|
||||
print(analysis_result[0].text[:400] + "..." if len(analysis_result[0].text) > 400 else analysis_result[0].text)
|
||||
|
||||
# Test large context (simulate)
|
||||
print("\n5. Testing context size estimation...")
|
||||
large_code = "x = 1\n" * 100000 # ~600K characters, ~150K tokens
|
||||
analysis_result = await handle_call_tool("analyze_code", {
|
||||
"code": large_code,
|
||||
"question": "How many assignment statements are in this code?",
|
||||
"temperature": 0.1
|
||||
})
|
||||
print("✓ Large context test:")
|
||||
print(analysis_result[0].text[:200] + "..." if len(analysis_result[0].text) > 200 else analysis_result[0].text)
|
||||
|
||||
# Clean up test file
|
||||
test_file.unlink()
|
||||
|
||||
print("\n" + "-" * 50)
|
||||
print("All enhanced tests completed!")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
# Check for API key
|
||||
if not os.getenv("GEMINI_API_KEY"):
|
||||
print("Error: GEMINI_API_KEY environment variable is not set")
|
||||
print("Please set it with: export GEMINI_API_KEY='your-api-key'")
|
||||
exit(1)
|
||||
|
||||
asyncio.run(test_enhanced_features())
|
||||
@@ -1,84 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Test script for optimized Claude Code settings
|
||||
"""
|
||||
|
||||
import os
|
||||
import asyncio
|
||||
from gemini_server import configure_gemini, handle_call_tool
|
||||
|
||||
|
||||
async def test_optimized_settings():
|
||||
"""Test the optimized temperature and developer settings"""
|
||||
print("Testing Optimized Claude Code Settings...")
|
||||
print("-" * 50)
|
||||
|
||||
# Test configuration
|
||||
try:
|
||||
configure_gemini()
|
||||
print("✓ Gemini API configured successfully")
|
||||
except Exception as e:
|
||||
print(f"✗ Failed to configure Gemini API: {e}")
|
||||
return
|
||||
|
||||
# Test 1: Default chat temperature (should be 0.5)
|
||||
print("\n1. Testing chat with default temperature (0.5)...")
|
||||
result = await handle_call_tool("chat", {
|
||||
"prompt": "Explain the concept of dependency injection in one paragraph. Be concise but thorough."
|
||||
})
|
||||
print("Response preview (should be balanced - accurate but not robotic):")
|
||||
print(result[0].text[:300] + "..." if len(result[0].text) > 300 else result[0].text)
|
||||
|
||||
# Test 2: Code analysis with low temperature (0.2)
|
||||
print("\n2. Testing code analysis with default low temperature (0.2)...")
|
||||
code = '''
|
||||
async def fetch_user_data(user_id: str, cache=None):
|
||||
if cache and user_id in cache:
|
||||
return cache[user_id]
|
||||
|
||||
response = await http_client.get(f"/api/users/{user_id}")
|
||||
user_data = response.json()
|
||||
|
||||
if cache:
|
||||
cache[user_id] = user_data
|
||||
|
||||
return user_data
|
||||
'''
|
||||
|
||||
result = await handle_call_tool("analyze_code", {
|
||||
"code": code,
|
||||
"question": "Identify potential issues and suggest improvements"
|
||||
})
|
||||
print("Response preview (should be precise and technical):")
|
||||
print(result[0].text[:400] + "..." if len(result[0].text) > 400 else result[0].text)
|
||||
|
||||
# Test 3: Creative task with higher temperature
|
||||
print("\n3. Testing creative task with custom higher temperature...")
|
||||
result = await handle_call_tool("chat", {
|
||||
"prompt": "Suggest 3 innovative ways to implement a rate limiter",
|
||||
"temperature": 0.8
|
||||
})
|
||||
print("Response preview (should be more creative):")
|
||||
print(result[0].text[:400] + "..." if len(result[0].text) > 400 else result[0].text)
|
||||
|
||||
# Test 4: Verify developer context is applied
|
||||
print("\n4. Testing developer context (no system prompt)...")
|
||||
result = await handle_call_tool("chat", {
|
||||
"prompt": "What's the time complexity of quicksort?",
|
||||
"temperature": 0.3
|
||||
})
|
||||
print("Response (should be technical and developer-focused):")
|
||||
print(result[0].text[:300] + "..." if len(result[0].text) > 300 else result[0].text)
|
||||
|
||||
print("\n" + "-" * 50)
|
||||
print("Optimized settings test completed!")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
# Check for API key
|
||||
if not os.getenv("GEMINI_API_KEY"):
|
||||
print("Error: GEMINI_API_KEY environment variable is not set")
|
||||
print("Please set it with: export GEMINI_API_KEY='your-api-key'")
|
||||
exit(1)
|
||||
|
||||
asyncio.run(test_optimized_settings())
|
||||
@@ -1,70 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Test script for Gemini MCP Server
|
||||
"""
|
||||
|
||||
import os
|
||||
import asyncio
|
||||
import json
|
||||
from gemini_server import configure_gemini, handle_call_tool, handle_list_tools
|
||||
|
||||
|
||||
async def test_server():
|
||||
"""Test the server functionality"""
|
||||
print("Testing Gemini MCP Server...")
|
||||
print("-" * 50)
|
||||
|
||||
# Test configuration
|
||||
try:
|
||||
configure_gemini()
|
||||
print("✓ Gemini API configured successfully")
|
||||
except Exception as e:
|
||||
print(f"✗ Failed to configure Gemini API: {e}")
|
||||
return
|
||||
|
||||
# Test listing tools
|
||||
print("\n1. Testing list_tools...")
|
||||
tools = await handle_list_tools()
|
||||
print(f"✓ Found {len(tools)} tools:")
|
||||
for tool in tools:
|
||||
print(f" - {tool.name}: {tool.description}")
|
||||
|
||||
# Test list_models
|
||||
print("\n2. Testing list_models tool...")
|
||||
models_result = await handle_call_tool("list_models", {})
|
||||
print("✓ Available models:")
|
||||
print(models_result[0].text)
|
||||
|
||||
# Test chat
|
||||
print("\n3. Testing chat tool...")
|
||||
chat_result = await handle_call_tool("chat", {
|
||||
"prompt": "What is the capital of France?",
|
||||
"temperature": 0.3,
|
||||
"max_tokens": 50
|
||||
})
|
||||
print("✓ Chat response:")
|
||||
print(chat_result[0].text)
|
||||
|
||||
# Test chat with system prompt
|
||||
print("\n4. Testing chat with system prompt...")
|
||||
chat_result = await handle_call_tool("chat", {
|
||||
"prompt": "What's 2+2?",
|
||||
"system_prompt": "You are a helpful math tutor. Always explain your reasoning step by step.",
|
||||
"temperature": 0.3,
|
||||
"max_tokens": 200
|
||||
})
|
||||
print("✓ Chat response with system prompt:")
|
||||
print(chat_result[0].text)
|
||||
|
||||
print("\n" + "-" * 50)
|
||||
print("All tests completed!")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
# Check for API key
|
||||
if not os.getenv("GEMINI_API_KEY"):
|
||||
print("Error: GEMINI_API_KEY environment variable is not set")
|
||||
print("Please set it with: export GEMINI_API_KEY='your-api-key'")
|
||||
exit(1)
|
||||
|
||||
asyncio.run(test_server())
|
||||
Reference in New Issue
Block a user