Initial commit: Gemini MCP Server
- MCP server implementation for Google Gemini models - Support for multiple Gemini models including 1.5 Pro and 2.5 Pro preview - Chat tool with configurable parameters (temperature, max_tokens, model) - List models tool to view available Gemini models - System prompt support - Comprehensive error handling for blocked responses - Test suite included - Documentation and examples 🤖 Generated with [Claude Code](https://claude.ai/code) Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
157
.gitignore
vendored
Normal file
157
.gitignore
vendored
Normal file
@@ -0,0 +1,157 @@
|
|||||||
|
# Byte-compiled / optimized / DLL files
|
||||||
|
__pycache__/
|
||||||
|
*.py[cod]
|
||||||
|
*$py.class
|
||||||
|
|
||||||
|
# C extensions
|
||||||
|
*.so
|
||||||
|
|
||||||
|
# Distribution / packaging
|
||||||
|
.Python
|
||||||
|
build/
|
||||||
|
develop-eggs/
|
||||||
|
dist/
|
||||||
|
downloads/
|
||||||
|
eggs/
|
||||||
|
.eggs/
|
||||||
|
lib/
|
||||||
|
lib64/
|
||||||
|
parts/
|
||||||
|
sdist/
|
||||||
|
var/
|
||||||
|
wheels/
|
||||||
|
share/python-wheels/
|
||||||
|
*.egg-info/
|
||||||
|
.installed.cfg
|
||||||
|
*.egg
|
||||||
|
MANIFEST
|
||||||
|
|
||||||
|
# PyInstaller
|
||||||
|
*.manifest
|
||||||
|
*.spec
|
||||||
|
|
||||||
|
# Installer logs
|
||||||
|
pip-log.txt
|
||||||
|
pip-delete-this-directory.txt
|
||||||
|
|
||||||
|
# Unit test / coverage reports
|
||||||
|
htmlcov/
|
||||||
|
.tox/
|
||||||
|
.nox/
|
||||||
|
.coverage
|
||||||
|
.coverage.*
|
||||||
|
.cache
|
||||||
|
nosetests.xml
|
||||||
|
coverage.xml
|
||||||
|
*.cover
|
||||||
|
*.py,cover
|
||||||
|
.hypothesis/
|
||||||
|
.pytest_cache/
|
||||||
|
cover/
|
||||||
|
|
||||||
|
# Translations
|
||||||
|
*.mo
|
||||||
|
*.pot
|
||||||
|
|
||||||
|
# Django stuff:
|
||||||
|
*.log
|
||||||
|
local_settings.py
|
||||||
|
db.sqlite3
|
||||||
|
db.sqlite3-journal
|
||||||
|
|
||||||
|
# Flask stuff:
|
||||||
|
instance/
|
||||||
|
.webassets-cache
|
||||||
|
|
||||||
|
# Scrapy stuff:
|
||||||
|
.scrapy
|
||||||
|
|
||||||
|
# Sphinx documentation
|
||||||
|
docs/_build/
|
||||||
|
|
||||||
|
# PyBuilder
|
||||||
|
.pybuilder/
|
||||||
|
target/
|
||||||
|
|
||||||
|
# Jupyter Notebook
|
||||||
|
.ipynb_checkpoints
|
||||||
|
|
||||||
|
# IPython
|
||||||
|
profile_default/
|
||||||
|
ipython_config.py
|
||||||
|
|
||||||
|
# pyenv
|
||||||
|
.python-version
|
||||||
|
|
||||||
|
# pipenv
|
||||||
|
Pipfile.lock
|
||||||
|
|
||||||
|
# poetry
|
||||||
|
poetry.lock
|
||||||
|
|
||||||
|
# pdm
|
||||||
|
.pdm.toml
|
||||||
|
.pdm-python
|
||||||
|
pdm.lock
|
||||||
|
|
||||||
|
# PEP 582
|
||||||
|
__pypackages__/
|
||||||
|
|
||||||
|
# Celery stuff
|
||||||
|
celerybeat-schedule
|
||||||
|
celerybeat.pid
|
||||||
|
|
||||||
|
# SageMath parsed files
|
||||||
|
*.sage.py
|
||||||
|
|
||||||
|
# Environments
|
||||||
|
.env
|
||||||
|
.venv
|
||||||
|
env/
|
||||||
|
venv/
|
||||||
|
ENV/
|
||||||
|
env.bak/
|
||||||
|
venv.bak/
|
||||||
|
|
||||||
|
# Spyder project settings
|
||||||
|
.spyderproject
|
||||||
|
.spyproject
|
||||||
|
|
||||||
|
# Rope project settings
|
||||||
|
.ropeproject
|
||||||
|
|
||||||
|
# mkdocs documentation
|
||||||
|
/site
|
||||||
|
|
||||||
|
# mypy
|
||||||
|
.mypy_cache/
|
||||||
|
.dmypy.json
|
||||||
|
dmypy.json
|
||||||
|
|
||||||
|
# Pyre type checker
|
||||||
|
.pyre/
|
||||||
|
|
||||||
|
# pytype static type analyzer
|
||||||
|
.pytype/
|
||||||
|
|
||||||
|
# Cython debug symbols
|
||||||
|
cython_debug/
|
||||||
|
|
||||||
|
# PyCharm
|
||||||
|
.idea/
|
||||||
|
|
||||||
|
# VS Code
|
||||||
|
.vscode/
|
||||||
|
|
||||||
|
# macOS
|
||||||
|
.DS_Store
|
||||||
|
|
||||||
|
# API Keys and secrets
|
||||||
|
*.key
|
||||||
|
*.pem
|
||||||
|
.env.local
|
||||||
|
.env.*.local
|
||||||
|
|
||||||
|
# Test outputs
|
||||||
|
test_output/
|
||||||
|
*.test.log
|
||||||
92
README.md
Normal file
92
README.md
Normal file
@@ -0,0 +1,92 @@
|
|||||||
|
# Gemini MCP Server
|
||||||
|
|
||||||
|
A Model Context Protocol (MCP) server that enables integration with Google's Gemini models, including Gemini 1.5 Pro and Gemini 2.5 Pro preview.
|
||||||
|
|
||||||
|
## Features
|
||||||
|
|
||||||
|
- **Chat with Gemini**: Send prompts to any available Gemini model
|
||||||
|
- **List Models**: View all available Gemini models
|
||||||
|
- **Configurable Parameters**: Adjust temperature, max tokens, and model selection
|
||||||
|
- **System Prompts**: Support for system prompts to set context
|
||||||
|
|
||||||
|
## Installation
|
||||||
|
|
||||||
|
1. Clone this repository
|
||||||
|
2. Create a virtual environment:
|
||||||
|
```bash
|
||||||
|
python3 -m venv venv
|
||||||
|
source venv/bin/activate
|
||||||
|
```
|
||||||
|
3. Install dependencies:
|
||||||
|
```bash
|
||||||
|
pip install -r requirements.txt
|
||||||
|
```
|
||||||
|
|
||||||
|
## Configuration
|
||||||
|
|
||||||
|
Set your Gemini API key as an environment variable:
|
||||||
|
```bash
|
||||||
|
export GEMINI_API_KEY="your-api-key-here"
|
||||||
|
```
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
|
### For Claude Desktop
|
||||||
|
|
||||||
|
Add this configuration to your Claude Desktop config file:
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"mcpServers": {
|
||||||
|
"gemini": {
|
||||||
|
"command": "/path/to/venv/bin/python",
|
||||||
|
"args": ["/path/to/gemini_server.py"],
|
||||||
|
"env": {
|
||||||
|
"GEMINI_API_KEY": "your-api-key-here"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Direct Usage
|
||||||
|
|
||||||
|
Run the server:
|
||||||
|
```bash
|
||||||
|
source venv/bin/activate
|
||||||
|
export GEMINI_API_KEY="your-api-key-here"
|
||||||
|
python gemini_server.py
|
||||||
|
```
|
||||||
|
|
||||||
|
## Available Tools
|
||||||
|
|
||||||
|
### chat
|
||||||
|
Send a prompt to Gemini and receive a response.
|
||||||
|
|
||||||
|
Parameters:
|
||||||
|
- `prompt` (required): The prompt to send to Gemini
|
||||||
|
- `system_prompt` (optional): System prompt for context
|
||||||
|
- `max_tokens` (optional): Maximum tokens in response (default: 4096)
|
||||||
|
- `temperature` (optional): Temperature for randomness 0-1 (default: 0.7)
|
||||||
|
- `model` (optional): Model to use (default: gemini-1.5-pro-latest)
|
||||||
|
|
||||||
|
Available models include:
|
||||||
|
- `gemini-1.5-pro-latest` - Latest stable Gemini 1.5 Pro
|
||||||
|
- `gemini-1.5-flash` - Fast Gemini 1.5 Flash model
|
||||||
|
- `gemini-2.5-pro-preview-06-05` - Gemini 2.5 Pro preview (may have restrictions)
|
||||||
|
- `gemini-2.0-flash` - Gemini 2.0 Flash
|
||||||
|
- And many more (use `list_models` to see all available)
|
||||||
|
|
||||||
|
### list_models
|
||||||
|
List all available Gemini models that support content generation.
|
||||||
|
|
||||||
|
## Requirements
|
||||||
|
|
||||||
|
- Python 3.8+
|
||||||
|
- Valid Google Gemini API key
|
||||||
|
|
||||||
|
## Notes
|
||||||
|
|
||||||
|
- The Gemini 2.5 Pro preview models may have safety restrictions that block certain prompts
|
||||||
|
- If a model returns a blocked response, the server will indicate the finish reason
|
||||||
|
- For most reliable results, use `gemini-1.5-pro-latest` or `gemini-1.5-flash`
|
||||||
11
claude_config_example.json
Normal file
11
claude_config_example.json
Normal file
@@ -0,0 +1,11 @@
|
|||||||
|
{
|
||||||
|
"mcpServers": {
|
||||||
|
"gemini": {
|
||||||
|
"command": "/Users/fahad/Developer/gemini-mcp-server/venv/bin/python",
|
||||||
|
"args": ["/Users/fahad/Developer/gemini-mcp-server/gemini_server.py"],
|
||||||
|
"env": {
|
||||||
|
"GEMINI_API_KEY": "your-gemini-api-key-here"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
186
gemini_server.py
Executable file
186
gemini_server.py
Executable file
@@ -0,0 +1,186 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
"""
|
||||||
|
Gemini MCP Server - Model Context Protocol server for Google Gemini
|
||||||
|
"""
|
||||||
|
|
||||||
|
import os
|
||||||
|
import json
|
||||||
|
import asyncio
|
||||||
|
from typing import Optional, Dict, Any, List
|
||||||
|
from mcp.server.models import InitializationOptions
|
||||||
|
from mcp.server import Server, NotificationOptions
|
||||||
|
from mcp.server.stdio import stdio_server
|
||||||
|
from mcp.types import TextContent, Tool
|
||||||
|
from pydantic import BaseModel, Field
|
||||||
|
import google.generativeai as genai
|
||||||
|
|
||||||
|
|
||||||
|
class GeminiChatRequest(BaseModel):
|
||||||
|
"""Request model for Gemini chat"""
|
||||||
|
prompt: str = Field(..., description="The prompt to send to Gemini")
|
||||||
|
system_prompt: Optional[str] = Field(None, description="Optional system prompt for context")
|
||||||
|
max_tokens: Optional[int] = Field(4096, description="Maximum number of tokens in response")
|
||||||
|
temperature: Optional[float] = Field(0.7, description="Temperature for response randomness (0-1)")
|
||||||
|
model: Optional[str] = Field("gemini-1.5-pro-latest", description="Model to use (defaults to gemini-1.5-pro-latest)")
|
||||||
|
|
||||||
|
|
||||||
|
# Create the MCP server instance
|
||||||
|
server = Server("gemini-server")
|
||||||
|
|
||||||
|
|
||||||
|
# Configure Gemini API
|
||||||
|
def configure_gemini():
|
||||||
|
"""Configure the Gemini API with API key from environment"""
|
||||||
|
api_key = os.getenv("GEMINI_API_KEY")
|
||||||
|
if not api_key:
|
||||||
|
raise ValueError("GEMINI_API_KEY environment variable is not set")
|
||||||
|
genai.configure(api_key=api_key)
|
||||||
|
|
||||||
|
|
||||||
|
@server.list_tools()
|
||||||
|
async def handle_list_tools() -> List[Tool]:
|
||||||
|
"""List all available tools"""
|
||||||
|
return [
|
||||||
|
Tool(
|
||||||
|
name="chat",
|
||||||
|
description="Chat with Gemini Pro 2.5 model",
|
||||||
|
inputSchema={
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"prompt": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "The prompt to send to Gemini"
|
||||||
|
},
|
||||||
|
"system_prompt": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "Optional system prompt for context"
|
||||||
|
},
|
||||||
|
"max_tokens": {
|
||||||
|
"type": "integer",
|
||||||
|
"description": "Maximum number of tokens in response",
|
||||||
|
"default": 4096
|
||||||
|
},
|
||||||
|
"temperature": {
|
||||||
|
"type": "number",
|
||||||
|
"description": "Temperature for response randomness (0-1)",
|
||||||
|
"default": 0.7,
|
||||||
|
"minimum": 0,
|
||||||
|
"maximum": 1
|
||||||
|
},
|
||||||
|
"model": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "Model to use (e.g., gemini-1.5-pro-latest, gemini-2.5-pro-preview-06-05)",
|
||||||
|
"default": "gemini-1.5-pro-latest"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"required": ["prompt"]
|
||||||
|
}
|
||||||
|
),
|
||||||
|
Tool(
|
||||||
|
name="list_models",
|
||||||
|
description="List available Gemini models",
|
||||||
|
inputSchema={
|
||||||
|
"type": "object",
|
||||||
|
"properties": {}
|
||||||
|
}
|
||||||
|
)
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
@server.call_tool()
|
||||||
|
async def handle_call_tool(name: str, arguments: Dict[str, Any]) -> List[TextContent]:
|
||||||
|
"""Handle tool execution requests"""
|
||||||
|
|
||||||
|
if name == "chat":
|
||||||
|
# Validate request
|
||||||
|
request = GeminiChatRequest(**arguments)
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Use the specified model or default to 1.5 Pro
|
||||||
|
model = genai.GenerativeModel(
|
||||||
|
model_name=request.model,
|
||||||
|
generation_config={
|
||||||
|
"temperature": request.temperature,
|
||||||
|
"max_output_tokens": request.max_tokens,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
# Prepare the prompt
|
||||||
|
full_prompt = request.prompt
|
||||||
|
if request.system_prompt:
|
||||||
|
full_prompt = f"{request.system_prompt}\n\n{request.prompt}"
|
||||||
|
|
||||||
|
# Generate response
|
||||||
|
response = model.generate_content(full_prompt)
|
||||||
|
|
||||||
|
# Handle response based on finish reason
|
||||||
|
if response.candidates and response.candidates[0].content.parts:
|
||||||
|
text = response.candidates[0].content.parts[0].text
|
||||||
|
else:
|
||||||
|
# Handle safety filters or other issues
|
||||||
|
finish_reason = response.candidates[0].finish_reason if response.candidates else "Unknown"
|
||||||
|
text = f"Response blocked or incomplete. Finish reason: {finish_reason}"
|
||||||
|
|
||||||
|
return [TextContent(
|
||||||
|
type="text",
|
||||||
|
text=text
|
||||||
|
)]
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
return [TextContent(
|
||||||
|
type="text",
|
||||||
|
text=f"Error calling Gemini API: {str(e)}"
|
||||||
|
)]
|
||||||
|
|
||||||
|
elif name == "list_models":
|
||||||
|
try:
|
||||||
|
# List available models
|
||||||
|
models = []
|
||||||
|
for model in genai.list_models():
|
||||||
|
if 'generateContent' in model.supported_generation_methods:
|
||||||
|
models.append({
|
||||||
|
"name": model.name,
|
||||||
|
"display_name": model.display_name,
|
||||||
|
"description": model.description
|
||||||
|
})
|
||||||
|
|
||||||
|
return [TextContent(
|
||||||
|
type="text",
|
||||||
|
text=json.dumps(models, indent=2)
|
||||||
|
)]
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
return [TextContent(
|
||||||
|
type="text",
|
||||||
|
text=f"Error listing models: {str(e)}"
|
||||||
|
)]
|
||||||
|
|
||||||
|
else:
|
||||||
|
return [TextContent(
|
||||||
|
type="text",
|
||||||
|
text=f"Unknown tool: {name}"
|
||||||
|
)]
|
||||||
|
|
||||||
|
|
||||||
|
async def main():
|
||||||
|
"""Main entry point for the server"""
|
||||||
|
# Configure Gemini API
|
||||||
|
configure_gemini()
|
||||||
|
|
||||||
|
# Run the server using stdio transport
|
||||||
|
async with stdio_server() as (read_stream, write_stream):
|
||||||
|
await server.run(
|
||||||
|
read_stream,
|
||||||
|
write_stream,
|
||||||
|
InitializationOptions(
|
||||||
|
server_name="gemini",
|
||||||
|
server_version="1.0.0",
|
||||||
|
capabilities={
|
||||||
|
"tools": {}
|
||||||
|
}
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
asyncio.run(main())
|
||||||
3
requirements.txt
Normal file
3
requirements.txt
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
mcp>=1.0.0
|
||||||
|
google-generativeai>=0.8.0
|
||||||
|
python-dotenv>=1.0.0
|
||||||
70
test_server.py
Normal file
70
test_server.py
Normal file
@@ -0,0 +1,70 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
"""
|
||||||
|
Test script for Gemini MCP Server
|
||||||
|
"""
|
||||||
|
|
||||||
|
import os
|
||||||
|
import asyncio
|
||||||
|
import json
|
||||||
|
from gemini_server import configure_gemini, handle_call_tool, handle_list_tools
|
||||||
|
|
||||||
|
|
||||||
|
async def test_server():
|
||||||
|
"""Test the server functionality"""
|
||||||
|
print("Testing Gemini MCP Server...")
|
||||||
|
print("-" * 50)
|
||||||
|
|
||||||
|
# Test configuration
|
||||||
|
try:
|
||||||
|
configure_gemini()
|
||||||
|
print("✓ Gemini API configured successfully")
|
||||||
|
except Exception as e:
|
||||||
|
print(f"✗ Failed to configure Gemini API: {e}")
|
||||||
|
return
|
||||||
|
|
||||||
|
# Test listing tools
|
||||||
|
print("\n1. Testing list_tools...")
|
||||||
|
tools = await handle_list_tools()
|
||||||
|
print(f"✓ Found {len(tools)} tools:")
|
||||||
|
for tool in tools:
|
||||||
|
print(f" - {tool.name}: {tool.description}")
|
||||||
|
|
||||||
|
# Test list_models
|
||||||
|
print("\n2. Testing list_models tool...")
|
||||||
|
models_result = await handle_call_tool("list_models", {})
|
||||||
|
print("✓ Available models:")
|
||||||
|
print(models_result[0].text)
|
||||||
|
|
||||||
|
# Test chat
|
||||||
|
print("\n3. Testing chat tool...")
|
||||||
|
chat_result = await handle_call_tool("chat", {
|
||||||
|
"prompt": "What is the capital of France?",
|
||||||
|
"temperature": 0.3,
|
||||||
|
"max_tokens": 50
|
||||||
|
})
|
||||||
|
print("✓ Chat response:")
|
||||||
|
print(chat_result[0].text)
|
||||||
|
|
||||||
|
# Test chat with system prompt
|
||||||
|
print("\n4. Testing chat with system prompt...")
|
||||||
|
chat_result = await handle_call_tool("chat", {
|
||||||
|
"prompt": "What's 2+2?",
|
||||||
|
"system_prompt": "You are a helpful math tutor. Always explain your reasoning step by step.",
|
||||||
|
"temperature": 0.3,
|
||||||
|
"max_tokens": 200
|
||||||
|
})
|
||||||
|
print("✓ Chat response with system prompt:")
|
||||||
|
print(chat_result[0].text)
|
||||||
|
|
||||||
|
print("\n" + "-" * 50)
|
||||||
|
print("All tests completed!")
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
# Check for API key
|
||||||
|
if not os.getenv("GEMINI_API_KEY"):
|
||||||
|
print("Error: GEMINI_API_KEY environment variable is not set")
|
||||||
|
print("Please set it with: export GEMINI_API_KEY='your-api-key'")
|
||||||
|
exit(1)
|
||||||
|
|
||||||
|
asyncio.run(test_server())
|
||||||
Reference in New Issue
Block a user