feat: Add automatic developer-focused system prompt

When using the Gemini MCP server from Claude Code, it now automatically
injects a developer-focused system prompt similar to Claude Code's own
behavior. This ensures Gemini responds with the same developer mindset:

- Expert software development knowledge
- Clean code practices
- Debugging and problem-solving focus
- Clear technical explanations
- Architecture and design understanding
- Performance optimization expertise

The system prompt is automatically applied when no custom system prompt
is provided, making the integration seamless for Claude Code users.

🤖 Generated with [Claude Code](https://claude.ai/code)

Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
Fahad
2025-06-08 19:54:03 +04:00
parent b86c42cf3a
commit 4d2ad48638
3 changed files with 109 additions and 4 deletions

View File

@@ -19,6 +19,7 @@ See [MCP_DISCOVERY.md](MCP_DISCOVERY.md) for detailed information about how Clau
- **List Models**: View all available Gemini models
- **Configurable Parameters**: Adjust temperature, max tokens, and model selection
- **System Prompts**: Support for system prompts to set context
- **Developer Context**: Automatically uses developer-focused system prompt for Claude Code integration
## Installation
@@ -146,6 +147,7 @@ Other available models:
- If a model returns a blocked response, the server will indicate the finish reason
- The server estimates tokens as ~4 characters per token
- Maximum context window is 1 million tokens (~4 million characters)
- When no system prompt is provided, the server automatically uses a developer-focused prompt similar to Claude Code
## Tips for Claude Code Users

View File

@@ -21,6 +21,20 @@ import google.generativeai as genai
DEFAULT_MODEL = "gemini-2.5-pro-preview-06-05"
MAX_CONTEXT_TOKENS = 1000000 # 1M tokens
# Developer-focused system prompt for Claude Code usage
DEVELOPER_SYSTEM_PROMPT = """You are an expert software developer and code analyst, similar to Claude Code.
You excel at:
- Writing clean, efficient, and well-documented code
- Debugging and solving complex programming problems
- Explaining technical concepts clearly
- Following best practices and design patterns
- Providing thoughtful code reviews and suggestions
- Understanding system architecture and design
- Helping with testing strategies and implementation
- Optimizing performance and identifying bottlenecks
You should be direct, helpful, and focused on practical solutions. When analyzing code, provide actionable insights and concrete improvements. Always consider the broader context and long-term maintainability."""
class GeminiChatRequest(BaseModel):
"""Request model for Gemini chat"""
@@ -202,10 +216,12 @@ async def handle_call_tool(name: str, arguments: Dict[str, Any]) -> List[TextCon
}
)
# Prepare the prompt
full_prompt = request.prompt
# Prepare the prompt with automatic developer context if no system prompt provided
if request.system_prompt:
full_prompt = f"{request.system_prompt}\n\n{request.prompt}"
else:
# Auto-inject developer system prompt for better Claude Code integration
full_prompt = f"{DEVELOPER_SYSTEM_PROMPT}\n\n{request.prompt}"
# Generate response
response = model.generate_content(full_prompt)
@@ -262,8 +278,8 @@ async def handle_call_tool(name: str, arguments: Dict[str, Any]) -> List[TextCon
}
)
# Prepare the full prompt
system_prompt = request.system_prompt or "You are an expert code analyst. Provide detailed, accurate analysis of the provided code."
# Prepare the full prompt with enhanced developer context
system_prompt = request.system_prompt or DEVELOPER_SYSTEM_PROMPT
full_prompt = f"{system_prompt}\n\nCode to analyze:\n\n{code_context}\n\nQuestion/Request: {request.question}"
# Generate response

87
test_developer_context.py Normal file
View File

@@ -0,0 +1,87 @@
#!/usr/bin/env python3
"""
Test script to verify developer context is properly injected
"""
import os
import asyncio
from gemini_server import configure_gemini, handle_call_tool
async def test_developer_context():
"""Test the developer context system prompt"""
print("Testing Developer Context in Gemini MCP Server...")
print("-" * 50)
# Test configuration
try:
configure_gemini()
print("✓ Gemini API configured successfully")
except Exception as e:
print(f"✗ Failed to configure Gemini API: {e}")
return
# Test 1: Chat without explicit system prompt (should use developer prompt)
print("\n1. Testing chat WITHOUT system prompt (should auto-inject developer context)...")
result = await handle_call_tool("chat", {
"prompt": "Write a Python function to reverse a linked list. Include comments.",
"temperature": 0.3,
"max_tokens": 500
})
print("Response preview:")
print(result[0].text[:400] + "..." if len(result[0].text) > 400 else result[0].text)
# Test 2: Chat WITH explicit system prompt (should use provided prompt)
print("\n2. Testing chat WITH custom system prompt...")
result = await handle_call_tool("chat", {
"prompt": "Write a haiku about coding",
"system_prompt": "You are a creative poet who writes about technology.",
"temperature": 0.9,
"max_tokens": 100
})
print("Response:")
print(result[0].text)
# Test 3: Code analysis without system prompt (should use developer prompt)
print("\n3. Testing analyze_code WITHOUT system prompt...")
test_code = '''
def bubble_sort(arr):
n = len(arr)
for i in range(n):
for j in range(0, n-i-1):
if arr[j] > arr[j+1]:
arr[j], arr[j+1] = arr[j+1], arr[j]
return arr
'''
result = await handle_call_tool("analyze_code", {
"code": test_code,
"question": "Review this code and suggest improvements",
"temperature": 0.3
})
print("Response preview:")
print(result[0].text[:500] + "..." if len(result[0].text) > 500 else result[0].text)
# Test 4: Code analysis WITH custom system prompt
print("\n4. Testing analyze_code WITH custom system prompt...")
result = await handle_call_tool("analyze_code", {
"code": test_code,
"question": "Is this code correct?",
"system_prompt": "You are a beginner-friendly tutor. Explain things simply.",
"temperature": 0.5
})
print("Response preview:")
print(result[0].text[:400] + "..." if len(result[0].text) > 400 else result[0].text)
print("\n" + "-" * 50)
print("Developer context tests completed!")
if __name__ == "__main__":
# Check for API key
if not os.getenv("GEMINI_API_KEY"):
print("Error: GEMINI_API_KEY environment variable is not set")
print("Please set it with: export GEMINI_API_KEY='your-api-key'")
exit(1)
asyncio.run(test_developer_context())