docs: Improve Quick Start instructions for better clarity

- Add explicit step to clone the repository first
- Include instructions for accessing config via Claude Desktop UI
- Clarify that users must replace placeholder paths with actual paths
- Add note to remember the cloned directory path

🤖 Generated with [Claude Code](https://claude.ai/code)

Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
Fahad
2025-06-08 20:39:27 +04:00
parent 29edd06be7
commit 970b73c175
3 changed files with 97 additions and 22 deletions

View File

@@ -13,11 +13,27 @@ This server acts as a developer assistant that augments Claude Code when you nee
## 🚀 Quick Start for Claude Code ## 🚀 Quick Start for Claude Code
### 1. Configure in Claude Desktop ### 1. Clone the Repository
Add to your Claude Desktop configuration file: First, clone this repository to your local machine:
```bash
git clone https://github.com/BeehiveInnovations/gemini-mcp-server.git
cd gemini-mcp-server
```
**macOS**: `~/Library/Application Support/Claude/claude_desktop_config.json` Note the full path to this directory - you'll need it for the configuration.
### 2. Configure in Claude Desktop
You can access the configuration file in two ways:
- **Through Claude Desktop**: Open Claude Desktop → Settings → Developer → Edit Config
- **Direct file access**:
- **macOS**: `~/Library/Application Support/Claude/claude_desktop_config.json`
- **Windows**: `%APPDATA%\Claude\claude_desktop_config.json`
Add the following configuration, replacing the path with your actual directory path:
**macOS**:
```json ```json
{ {
"mcpServers": { "mcpServers": {
@@ -31,7 +47,7 @@ Add to your Claude Desktop configuration file:
} }
``` ```
**Windows**: `%APPDATA%\Claude\claude_desktop_config.json` **Windows**:
```json ```json
{ {
"mcpServers": { "mcpServers": {
@@ -45,18 +61,20 @@ Add to your Claude Desktop configuration file:
} }
``` ```
### 2. Restart Claude Desktop **Important**: Replace `/path/to/gemini-mcp-server` with the actual path where you cloned the repository.
### 3. Restart Claude Desktop
After adding the configuration, restart Claude Desktop. You'll see "gemini" in the MCP servers list. After adding the configuration, restart Claude Desktop. You'll see "gemini" in the MCP servers list.
### 3. Add to Claude Code ### 4. Add to Claude Code
To make the server available in Claude Code, run: To make the server available in Claude Code, run:
```bash ```bash
claude mcp add-from-claude-desktop -s user claude mcp add-from-claude-desktop -s user
``` ```
### 4. Start Using Natural Language ### 5. Start Using Natural Language
Just talk to Claude naturally: Just talk to Claude naturally:
- "Use Gemini to analyze this large file..." - "Use Gemini to analyze this large file..."

28
tests/conftest.py Normal file
View File

@@ -0,0 +1,28 @@
"""
Pytest configuration for Gemini MCP Server tests
"""
import sys
import os
from pathlib import Path
# Ensure the parent directory is in the Python path for imports
parent_dir = Path(__file__).resolve().parent.parent
if str(parent_dir) not in sys.path:
sys.path.insert(0, str(parent_dir))
# Set dummy API key for tests if not already set
if "GEMINI_API_KEY" not in os.environ:
os.environ["GEMINI_API_KEY"] = "dummy-key-for-tests"
# Configure asyncio for Windows compatibility
if sys.platform == "win32":
import asyncio
asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
# Pytest configuration
def pytest_configure(config):
"""Configure pytest with custom markers"""
config.addinivalue_line(
"markers", "asyncio: mark test as async"
)

View File

@@ -7,11 +7,15 @@ import json
from unittest.mock import Mock, patch, AsyncMock from unittest.mock import Mock, patch, AsyncMock
from pathlib import Path from pathlib import Path
import sys import sys
import os
# Add parent directory to path for imports # Add parent directory to path for imports in a cross-platform way
sys.path.append(str(Path(__file__).parent.parent)) parent_dir = Path(__file__).resolve().parent.parent
if str(parent_dir) not in sys.path:
sys.path.insert(0, str(parent_dir))
from gemini_server import ( try:
from gemini_server import (
GeminiChatRequest, GeminiChatRequest,
CodeAnalysisRequest, CodeAnalysisRequest,
read_file_content, read_file_content,
@@ -21,6 +25,27 @@ from gemini_server import (
DEVELOPER_SYSTEM_PROMPT, DEVELOPER_SYSTEM_PROMPT,
DEFAULT_MODEL DEFAULT_MODEL
) )
except ImportError as e:
# If import fails, try alternative import method
import importlib.util
spec = importlib.util.spec_from_file_location(
"gemini_server",
parent_dir / "gemini_server.py"
)
gemini_server = importlib.util.module_from_spec(spec)
sys.modules["gemini_server"] = gemini_server
spec.loader.exec_module(gemini_server)
from gemini_server import (
GeminiChatRequest,
CodeAnalysisRequest,
read_file_content,
prepare_code_context,
handle_list_tools,
handle_call_tool,
DEVELOPER_SYSTEM_PROMPT,
DEFAULT_MODEL
)
class TestModels: class TestModels:
@@ -66,7 +91,7 @@ class TestFileOperations:
def test_read_file_content_success(self, tmp_path): def test_read_file_content_success(self, tmp_path):
"""Test successful file reading""" """Test successful file reading"""
test_file = tmp_path / "test.py" test_file = tmp_path / "test.py"
test_file.write_text("def hello():\n return 'world'") test_file.write_text("def hello():\n return 'world'", encoding='utf-8')
content = read_file_content(str(test_file)) content = read_file_content(str(test_file))
assert "=== File:" in content assert "=== File:" in content
@@ -75,7 +100,11 @@ class TestFileOperations:
def test_read_file_content_not_found(self): def test_read_file_content_not_found(self):
"""Test reading non-existent file""" """Test reading non-existent file"""
content = read_file_content("/nonexistent/file.py") # Use a path that's guaranteed not to exist on any platform
nonexistent_path = os.path.join(
os.path.sep, "nonexistent_dir_12345", "nonexistent_file.py"
)
content = read_file_content(nonexistent_path)
assert "Error: File not found" in content assert "Error: File not found" in content
def test_read_file_content_directory(self, tmp_path): def test_read_file_content_directory(self, tmp_path):
@@ -86,9 +115,9 @@ class TestFileOperations:
def test_prepare_code_context_with_files(self, tmp_path): def test_prepare_code_context_with_files(self, tmp_path):
"""Test preparing context from files""" """Test preparing context from files"""
file1 = tmp_path / "file1.py" file1 = tmp_path / "file1.py"
file1.write_text("print('file1')") file1.write_text("print('file1')", encoding='utf-8')
file2 = tmp_path / "file2.py" file2 = tmp_path / "file2.py"
file2.write_text("print('file2')") file2.write_text("print('file2')", encoding='utf-8')
context = prepare_code_context([str(file1), str(file2)], None) context = prepare_code_context([str(file1), str(file2)], None)
assert "file1.py" in context assert "file1.py" in context
@@ -106,7 +135,7 @@ class TestFileOperations:
def test_prepare_code_context_mixed(self, tmp_path): def test_prepare_code_context_mixed(self, tmp_path):
"""Test preparing context from both files and code""" """Test preparing context from both files and code"""
test_file = tmp_path / "test.py" test_file = tmp_path / "test.py"
test_file.write_text("# From file") test_file.write_text("# From file", encoding='utf-8')
code = "# Direct code" code = "# Direct code"
context = prepare_code_context([str(test_file)], code) context = prepare_code_context([str(test_file)], code)
@@ -136,7 +165,7 @@ class TestToolHandlers:
assert "Unknown tool" in result[0].text assert "Unknown tool" in result[0].text
@pytest.mark.asyncio @pytest.mark.asyncio
@patch('gemini_server.genai.GenerativeModel') @patch('google.generativeai.GenerativeModel')
async def test_handle_call_tool_chat_success(self, mock_model): async def test_handle_call_tool_chat_success(self, mock_model):
"""Test successful chat tool call""" """Test successful chat tool call"""
# Mock the response # Mock the response
@@ -163,7 +192,7 @@ class TestToolHandlers:
assert call_args['generation_config']['temperature'] == 0.5 assert call_args['generation_config']['temperature'] == 0.5
@pytest.mark.asyncio @pytest.mark.asyncio
@patch('gemini_server.genai.GenerativeModel') @patch('google.generativeai.GenerativeModel')
async def test_handle_call_tool_chat_with_developer_prompt(self, mock_model): async def test_handle_call_tool_chat_with_developer_prompt(self, mock_model):
"""Test chat tool uses developer prompt when no system prompt provided""" """Test chat tool uses developer prompt when no system prompt provided"""
mock_response = Mock() mock_response = Mock()
@@ -190,12 +219,12 @@ class TestToolHandlers:
assert "Must provide either 'files' or 'code'" in result[0].text assert "Must provide either 'files' or 'code'" in result[0].text
@pytest.mark.asyncio @pytest.mark.asyncio
@patch('gemini_server.genai.GenerativeModel') @patch('google.generativeai.GenerativeModel')
async def test_handle_call_tool_analyze_code_success(self, mock_model, tmp_path): async def test_handle_call_tool_analyze_code_success(self, mock_model, tmp_path):
"""Test successful code analysis""" """Test successful code analysis"""
# Create test file # Create test file
test_file = tmp_path / "test.py" test_file = tmp_path / "test.py"
test_file.write_text("def hello(): pass") test_file.write_text("def hello(): pass", encoding='utf-8')
# Mock response # Mock response
mock_response = Mock() mock_response = Mock()
@@ -215,7 +244,7 @@ class TestToolHandlers:
assert result[0].text == "Analysis result" assert result[0].text == "Analysis result"
@pytest.mark.asyncio @pytest.mark.asyncio
@patch('gemini_server.genai.list_models') @patch('google.generativeai.list_models')
async def test_handle_call_tool_list_models(self, mock_list_models): async def test_handle_call_tool_list_models(self, mock_list_models):
"""Test listing models""" """Test listing models"""
# Mock model data # Mock model data
@@ -240,7 +269,7 @@ class TestErrorHandling:
"""Test error handling scenarios""" """Test error handling scenarios"""
@pytest.mark.asyncio @pytest.mark.asyncio
@patch('gemini_server.genai.GenerativeModel') @patch('google.generativeai.GenerativeModel')
async def test_handle_call_tool_chat_api_error(self, mock_model): async def test_handle_call_tool_chat_api_error(self, mock_model):
"""Test handling API errors in chat""" """Test handling API errors in chat"""
mock_instance = Mock() mock_instance = Mock()
@@ -253,7 +282,7 @@ class TestErrorHandling:
assert "API Error" in result[0].text assert "API Error" in result[0].text
@pytest.mark.asyncio @pytest.mark.asyncio
@patch('gemini_server.genai.GenerativeModel') @patch('google.generativeai.GenerativeModel')
async def test_handle_call_tool_chat_blocked_response(self, mock_model): async def test_handle_call_tool_chat_blocked_response(self, mock_model):
"""Test handling blocked responses""" """Test handling blocked responses"""
mock_response = Mock() mock_response = Mock()