fix: Docker path translation for review_changes and code deduplication

- Fixed review_changes tool to properly translate host paths to container paths in Docker
- Prevents "No such file or directory" errors when running in Docker containers
- Added proper error handling with clear messages when paths are inaccessible

refactor: Centralized token limit validation across all tools
- Added _validate_token_limit method to BaseTool to eliminate code duplication
- Reduced ~25 lines of duplicated code across 5 tools (analyze, chat, debug_issue, review_code, think_deeper)
- Maintains exact same error messages and behavior

feat: Enhanced large prompt handling
- Added support for prompts >50K chars by requesting file-based input
- Preserves MCP's ~25K token capacity for responses
- All tools now check prompt size before processing

test: Added comprehensive Docker path integration tests
- Tests for path translation, security validation, and error handling
- Tests for review_changes tool specifically with Docker paths
- Fixed failing think_deeper test (updated default from "max" to "high")

chore: Code quality improvements
- Applied black formatting across all files
- Fixed import sorting with isort
- All tests passing (96 tests)
- Standardized error handling follows MCP TextContent format

The changes ensure consistent behavior across all environments while reducing code duplication and improving maintainability.

🤖 Generated with [Claude Code](https://claude.ai/code)

Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
Fahad
2025-06-10 07:20:24 +04:00
parent ab007fb4b2
commit 7ea790ef88
36 changed files with 1540 additions and 176 deletions

View File

@@ -9,7 +9,7 @@ import pytest
from tools.analyze import AnalyzeTool
from tools.debug_issue import DebugIssueTool
from tools.models import ToolOutput, ClarificationRequest
from tools.models import ClarificationRequest, ToolOutput
class TestDynamicContextRequests:

View File

@@ -2,16 +2,9 @@
Tests for configuration
"""
from config import (
GEMINI_MODEL,
MAX_CONTEXT_TOKENS,
TEMPERATURE_ANALYTICAL,
TEMPERATURE_BALANCED,
TEMPERATURE_CREATIVE,
__author__,
__updated__,
__version__,
)
from config import (GEMINI_MODEL, MAX_CONTEXT_TOKENS, TEMPERATURE_ANALYTICAL,
TEMPERATURE_BALANCED, TEMPERATURE_CREATIVE, __author__,
__updated__, __version__)
class TestConfig:

View File

@@ -0,0 +1,251 @@
"""
Integration tests for Docker path translation
These tests verify the actual behavior when running in a Docker-like environment
by creating temporary directories and testing the path translation logic.
"""
import importlib
import os
import tempfile
from pathlib import Path
import pytest
# We'll reload the module to test different environment configurations
import utils.file_utils
def test_docker_path_translation_integration():
"""Test path translation in a simulated Docker environment"""
with tempfile.TemporaryDirectory() as tmpdir:
# Set up directories
host_workspace = Path(tmpdir) / "host_workspace"
host_workspace.mkdir()
container_workspace = Path(tmpdir) / "container_workspace"
container_workspace.mkdir()
# Create a test file structure
(host_workspace / "src").mkdir()
test_file = host_workspace / "src" / "test.py"
test_file.write_text("# test file")
# Set environment variables and reload the module
original_env = os.environ.copy()
try:
os.environ["WORKSPACE_ROOT"] = str(host_workspace)
os.environ["MCP_PROJECT_ROOT"] = str(container_workspace)
# Reload the module to pick up new environment variables
importlib.reload(utils.file_utils)
# Mock the CONTAINER_WORKSPACE to point to our test directory
utils.file_utils.CONTAINER_WORKSPACE = container_workspace
# Test the translation
from utils.file_utils import _get_secure_container_path
# This should translate the host path to container path
host_path = str(test_file)
result = _get_secure_container_path(host_path)
# Verify the translation worked
expected = str(container_workspace / "src" / "test.py")
assert result == expected
finally:
# Restore original environment
os.environ.clear()
os.environ.update(original_env)
importlib.reload(utils.file_utils)
def test_docker_security_validation():
"""Test that path traversal attempts are properly blocked"""
with tempfile.TemporaryDirectory() as tmpdir:
# Set up directories
host_workspace = Path(tmpdir) / "workspace"
host_workspace.mkdir()
secret_dir = Path(tmpdir) / "secret"
secret_dir.mkdir()
secret_file = secret_dir / "password.txt"
secret_file.write_text("secret")
# Create a symlink inside workspace pointing to secret
symlink = host_workspace / "link_to_secret"
symlink.symlink_to(secret_file)
original_env = os.environ.copy()
try:
os.environ["WORKSPACE_ROOT"] = str(host_workspace)
os.environ["MCP_PROJECT_ROOT"] = str(host_workspace)
# Reload the module
importlib.reload(utils.file_utils)
utils.file_utils.CONTAINER_WORKSPACE = Path("/workspace")
from utils.file_utils import resolve_and_validate_path
# Trying to access the symlink should fail
with pytest.raises(PermissionError):
resolve_and_validate_path(str(symlink))
finally:
os.environ.clear()
os.environ.update(original_env)
importlib.reload(utils.file_utils)
def test_no_docker_environment():
"""Test that paths are unchanged when Docker environment is not set"""
original_env = os.environ.copy()
try:
# Clear Docker-related environment variables
os.environ.pop("WORKSPACE_ROOT", None)
os.environ.pop("MCP_PROJECT_ROOT", None)
# Reload the module
importlib.reload(utils.file_utils)
from utils.file_utils import _get_secure_container_path
# Path should remain unchanged
test_path = "/some/random/path.py"
assert _get_secure_container_path(test_path) == test_path
finally:
os.environ.clear()
os.environ.update(original_env)
importlib.reload(utils.file_utils)
def test_review_changes_docker_path_translation():
"""Test that review_changes tool properly translates Docker paths"""
with tempfile.TemporaryDirectory() as tmpdir:
# Set up directories to simulate Docker mount
host_workspace = Path(tmpdir) / "host_workspace"
host_workspace.mkdir()
container_workspace = Path(tmpdir) / "container_workspace"
container_workspace.mkdir()
# Create a git repository in the container workspace
project_dir = container_workspace / "project"
project_dir.mkdir()
# Initialize git repo
import subprocess
subprocess.run(["git", "init"], cwd=project_dir, capture_output=True)
# Create a test file
test_file = project_dir / "test.py"
test_file.write_text("print('hello')")
# Stage the file
subprocess.run(["git", "add", "test.py"], cwd=project_dir, capture_output=True)
original_env = os.environ.copy()
try:
# Simulate Docker environment
os.environ["WORKSPACE_ROOT"] = str(host_workspace)
os.environ["MCP_PROJECT_ROOT"] = str(container_workspace)
# Reload the module
importlib.reload(utils.file_utils)
utils.file_utils.CONTAINER_WORKSPACE = container_workspace
# Import after reloading to get updated environment
from tools.review_changes import ReviewChanges
# Create tool instance
tool = ReviewChanges()
# Test path translation in prepare_prompt
request = tool.get_request_model()(
path=str(
host_workspace / "project"
), # Host path that needs translation
review_type="quick",
severity_filter="all",
)
# This should translate the path and find the git repository
import asyncio
result = asyncio.run(tool.prepare_prompt(request))
# Should find the repository (not raise an error about inaccessible path)
# If we get here without exception, the path was successfully translated
assert isinstance(result, str)
# The result should contain git diff information or indicate no changes
assert (
"No git repositories found" not in result or "changes" in result.lower()
)
finally:
os.environ.clear()
os.environ.update(original_env)
importlib.reload(utils.file_utils)
def test_review_changes_docker_path_error():
"""Test that review_changes tool raises error for inaccessible paths"""
with tempfile.TemporaryDirectory() as tmpdir:
# Set up directories to simulate Docker mount
host_workspace = Path(tmpdir) / "host_workspace"
host_workspace.mkdir()
container_workspace = Path(tmpdir) / "container_workspace"
container_workspace.mkdir()
# Create a path outside the mounted workspace
outside_path = Path(tmpdir) / "outside_workspace"
outside_path.mkdir()
original_env = os.environ.copy()
try:
# Simulate Docker environment
os.environ["WORKSPACE_ROOT"] = str(host_workspace)
os.environ["MCP_PROJECT_ROOT"] = str(container_workspace)
# Reload the module
importlib.reload(utils.file_utils)
utils.file_utils.CONTAINER_WORKSPACE = container_workspace
# Import after reloading to get updated environment
from tools.review_changes import ReviewChanges
# Create tool instance
tool = ReviewChanges()
# Test path translation with an inaccessible path
request = tool.get_request_model()(
path=str(outside_path), # Path outside the mounted workspace
review_type="quick",
severity_filter="all",
)
# This should raise a ValueError
import asyncio
with pytest.raises(ValueError) as exc_info:
asyncio.run(tool.prepare_prompt(request))
# Check the error message
assert "not accessible from within the Docker container" in str(
exc_info.value
)
assert "mounted workspace" in str(exc_info.value)
finally:
os.environ.clear()
os.environ.update(original_env)
importlib.reload(utils.file_utils)
if __name__ == "__main__":
pytest.main([__file__, "-v"])

View File

@@ -0,0 +1,313 @@
"""
Tests for large prompt handling functionality.
This test module verifies that the MCP server correctly handles
prompts that exceed the 50,000 character limit by requesting
Claude to save them to a file and resend.
"""
import json
import os
import shutil
import tempfile
from unittest.mock import MagicMock, patch
import pytest
from mcp.types import TextContent
from config import MCP_PROMPT_SIZE_LIMIT
from tools.analyze import AnalyzeTool
from tools.chat import ChatTool
from tools.debug_issue import DebugIssueTool
from tools.review_changes import ReviewChanges
from tools.review_code import ReviewCodeTool
from tools.think_deeper import ThinkDeeperTool
class TestLargePromptHandling:
"""Test suite for large prompt handling across all tools."""
@pytest.fixture
def large_prompt(self):
"""Create a prompt larger than MCP_PROMPT_SIZE_LIMIT characters."""
return "x" * (MCP_PROMPT_SIZE_LIMIT + 1000)
@pytest.fixture
def normal_prompt(self):
"""Create a normal-sized prompt."""
return "This is a normal prompt that should work fine."
@pytest.fixture
def temp_prompt_file(self, large_prompt):
"""Create a temporary prompt.txt file with large content."""
# Create temp file with exact name "prompt.txt"
temp_dir = tempfile.mkdtemp()
file_path = os.path.join(temp_dir, "prompt.txt")
with open(file_path, "w") as f:
f.write(large_prompt)
return file_path
@pytest.mark.asyncio
async def test_chat_large_prompt_detection(self, large_prompt):
"""Test that chat tool detects large prompts."""
tool = ChatTool()
result = await tool.execute({"prompt": large_prompt})
assert len(result) == 1
assert isinstance(result[0], TextContent)
output = json.loads(result[0].text)
assert output["status"] == "requires_file_prompt"
assert f"{MCP_PROMPT_SIZE_LIMIT:,} characters" in output["content"]
assert output["metadata"]["prompt_size"] == len(large_prompt)
assert output["metadata"]["limit"] == MCP_PROMPT_SIZE_LIMIT
@pytest.mark.asyncio
async def test_chat_normal_prompt_works(self, normal_prompt):
"""Test that chat tool works normally with regular prompts."""
tool = ChatTool()
# Mock the model to avoid actual API calls
with patch.object(tool, "create_model") as mock_create_model:
mock_model = MagicMock()
mock_response = MagicMock()
mock_response.candidates = [
MagicMock(
content=MagicMock(
parts=[MagicMock(text="This is a test response")]
),
finish_reason="STOP",
)
]
mock_model.generate_content.return_value = mock_response
mock_create_model.return_value = mock_model
result = await tool.execute({"prompt": normal_prompt})
assert len(result) == 1
output = json.loads(result[0].text)
assert output["status"] == "success"
assert "This is a test response" in output["content"]
@pytest.mark.asyncio
async def test_chat_prompt_file_handling(self, temp_prompt_file, large_prompt):
"""Test that chat tool correctly handles prompt.txt files."""
tool = ChatTool()
# Mock the model
with patch.object(tool, "create_model") as mock_create_model:
mock_model = MagicMock()
mock_response = MagicMock()
mock_response.candidates = [
MagicMock(
content=MagicMock(parts=[MagicMock(text="Processed large prompt")]),
finish_reason="STOP",
)
]
mock_model.generate_content.return_value = mock_response
mock_create_model.return_value = mock_model
# Mock read_file_content to avoid security checks
with patch("tools.base.read_file_content") as mock_read_file:
mock_read_file.return_value = large_prompt
# Execute with empty prompt and prompt.txt file
result = await tool.execute({"prompt": "", "files": [temp_prompt_file]})
assert len(result) == 1
output = json.loads(result[0].text)
assert output["status"] == "success"
# Verify read_file_content was called with the prompt file
mock_read_file.assert_called_once_with(temp_prompt_file)
# Verify the large content was used
call_args = mock_model.generate_content.call_args[0][0]
assert large_prompt in call_args
# Cleanup
temp_dir = os.path.dirname(temp_prompt_file)
shutil.rmtree(temp_dir)
@pytest.mark.asyncio
async def test_think_deeper_large_analysis(self, large_prompt):
"""Test that think_deeper tool detects large current_analysis."""
tool = ThinkDeeperTool()
result = await tool.execute({"current_analysis": large_prompt})
assert len(result) == 1
output = json.loads(result[0].text)
assert output["status"] == "requires_file_prompt"
@pytest.mark.asyncio
async def test_review_code_large_focus(self, large_prompt):
"""Test that review_code tool detects large focus_on field."""
tool = ReviewCodeTool()
result = await tool.execute(
{"files": ["/some/file.py"], "focus_on": large_prompt}
)
assert len(result) == 1
output = json.loads(result[0].text)
assert output["status"] == "requires_file_prompt"
@pytest.mark.asyncio
async def test_review_changes_large_original_request(self, large_prompt):
"""Test that review_changes tool detects large original_request."""
tool = ReviewChanges()
result = await tool.execute(
{"path": "/some/path", "original_request": large_prompt}
)
assert len(result) == 1
output = json.loads(result[0].text)
assert output["status"] == "requires_file_prompt"
@pytest.mark.asyncio
async def test_debug_issue_large_error_description(self, large_prompt):
"""Test that debug_issue tool detects large error_description."""
tool = DebugIssueTool()
result = await tool.execute({"error_description": large_prompt})
assert len(result) == 1
output = json.loads(result[0].text)
assert output["status"] == "requires_file_prompt"
@pytest.mark.asyncio
async def test_debug_issue_large_error_context(self, large_prompt, normal_prompt):
"""Test that debug_issue tool detects large error_context."""
tool = DebugIssueTool()
result = await tool.execute(
{"error_description": normal_prompt, "error_context": large_prompt}
)
assert len(result) == 1
output = json.loads(result[0].text)
assert output["status"] == "requires_file_prompt"
@pytest.mark.asyncio
async def test_analyze_large_question(self, large_prompt):
"""Test that analyze tool detects large question."""
tool = AnalyzeTool()
result = await tool.execute(
{"files": ["/some/file.py"], "question": large_prompt}
)
assert len(result) == 1
output = json.loads(result[0].text)
assert output["status"] == "requires_file_prompt"
@pytest.mark.asyncio
async def test_multiple_files_with_prompt_txt(self, temp_prompt_file):
"""Test handling of prompt.txt alongside other files."""
tool = ChatTool()
other_file = "/some/other/file.py"
with patch.object(tool, "create_model") as mock_create_model:
mock_model = MagicMock()
mock_response = MagicMock()
mock_response.candidates = [
MagicMock(
content=MagicMock(parts=[MagicMock(text="Success")]),
finish_reason="STOP",
)
]
mock_model.generate_content.return_value = mock_response
mock_create_model.return_value = mock_model
# Mock read_files to avoid file system access
with patch("tools.chat.read_files") as mock_read_files:
mock_read_files.return_value = ("File content", "Summary")
await tool.execute(
{"prompt": "", "files": [temp_prompt_file, other_file]}
)
# Verify prompt.txt was removed from files list
mock_read_files.assert_called_once()
files_arg = mock_read_files.call_args[0][0]
assert len(files_arg) == 1
assert files_arg[0] == other_file
temp_dir = os.path.dirname(temp_prompt_file)
shutil.rmtree(temp_dir)
@pytest.mark.asyncio
async def test_boundary_case_exactly_at_limit(self):
"""Test prompt exactly at MCP_PROMPT_SIZE_LIMIT characters (should pass)."""
tool = ChatTool()
exact_prompt = "x" * MCP_PROMPT_SIZE_LIMIT
with patch.object(tool, "create_model") as mock_create_model:
mock_model = MagicMock()
mock_response = MagicMock()
mock_response.candidates = [
MagicMock(
content=MagicMock(parts=[MagicMock(text="Success")]),
finish_reason="STOP",
)
]
mock_model.generate_content.return_value = mock_response
mock_create_model.return_value = mock_model
result = await tool.execute({"prompt": exact_prompt})
output = json.loads(result[0].text)
assert output["status"] == "success"
@pytest.mark.asyncio
async def test_boundary_case_just_over_limit(self):
"""Test prompt just over MCP_PROMPT_SIZE_LIMIT characters (should trigger file request)."""
tool = ChatTool()
over_prompt = "x" * (MCP_PROMPT_SIZE_LIMIT + 1)
result = await tool.execute({"prompt": over_prompt})
output = json.loads(result[0].text)
assert output["status"] == "requires_file_prompt"
@pytest.mark.asyncio
async def test_empty_prompt_no_file(self):
"""Test empty prompt without prompt.txt file."""
tool = ChatTool()
with patch.object(tool, "create_model") as mock_create_model:
mock_model = MagicMock()
mock_response = MagicMock()
mock_response.candidates = [
MagicMock(
content=MagicMock(parts=[MagicMock(text="Success")]),
finish_reason="STOP",
)
]
mock_model.generate_content.return_value = mock_response
mock_create_model.return_value = mock_model
result = await tool.execute({"prompt": ""})
output = json.loads(result[0].text)
assert output["status"] == "success"
@pytest.mark.asyncio
async def test_prompt_file_read_error(self):
"""Test handling when prompt.txt can't be read."""
tool = ChatTool()
bad_file = "/nonexistent/prompt.txt"
with patch.object(tool, "create_model") as mock_create_model:
mock_model = MagicMock()
mock_response = MagicMock()
mock_response.candidates = [
MagicMock(
content=MagicMock(parts=[MagicMock(text="Success")]),
finish_reason="STOP",
)
]
mock_model.generate_content.return_value = mock_response
mock_create_model.return_value = mock_model
# Should continue with empty prompt when file can't be read
result = await tool.execute({"prompt": "", "files": [bad_file]})
output = json.loads(result[0].text)
assert output["status"] == "success"
if __name__ == "__main__":
pytest.main([__file__, "-v"])

View File

@@ -9,10 +9,10 @@ Note: These tests are excluded from regular pytest runs to avoid API rate limits
They confirm that the google-genai library integration works correctly with live data.
"""
import asyncio
import os
import sys
import tempfile
import asyncio
from pathlib import Path
# Add parent directory to path to allow imports

View File

@@ -0,0 +1,333 @@
"""
Regression tests to ensure normal prompt handling still works after large prompt changes.
This test module verifies that all tools continue to work correctly with
normal-sized prompts after implementing the large prompt handling feature.
"""
import json
from unittest.mock import MagicMock, patch
import pytest
from tools.analyze import AnalyzeTool
from tools.chat import ChatTool
from tools.debug_issue import DebugIssueTool
from tools.review_changes import ReviewChanges
from tools.review_code import ReviewCodeTool
from tools.think_deeper import ThinkDeeperTool
class TestPromptRegression:
"""Regression test suite for normal prompt handling."""
@pytest.fixture
def mock_model_response(self):
"""Create a mock model response."""
def _create_response(text="Test response"):
mock_response = MagicMock()
mock_response.candidates = [
MagicMock(
content=MagicMock(parts=[MagicMock(text=text)]),
finish_reason="STOP",
)
]
return mock_response
return _create_response
@pytest.mark.asyncio
async def test_chat_normal_prompt(self, mock_model_response):
"""Test chat tool with normal prompt."""
tool = ChatTool()
with patch.object(tool, "create_model") as mock_create_model:
mock_model = MagicMock()
mock_model.generate_content.return_value = mock_model_response(
"This is a helpful response about Python."
)
mock_create_model.return_value = mock_model
result = await tool.execute({"prompt": "Explain Python decorators"})
assert len(result) == 1
output = json.loads(result[0].text)
assert output["status"] == "success"
assert "helpful response about Python" in output["content"]
# Verify model was called
mock_model.generate_content.assert_called_once()
@pytest.mark.asyncio
async def test_chat_with_files(self, mock_model_response):
"""Test chat tool with files parameter."""
tool = ChatTool()
with patch.object(tool, "create_model") as mock_create_model:
mock_model = MagicMock()
mock_model.generate_content.return_value = mock_model_response()
mock_create_model.return_value = mock_model
# Mock file reading
with patch("tools.chat.read_files") as mock_read_files:
mock_read_files.return_value = ("File content here", "Summary")
result = await tool.execute(
{"prompt": "Analyze this code", "files": ["/path/to/file.py"]}
)
assert len(result) == 1
output = json.loads(result[0].text)
assert output["status"] == "success"
mock_read_files.assert_called_once_with(["/path/to/file.py"])
@pytest.mark.asyncio
async def test_think_deeper_normal_analysis(self, mock_model_response):
"""Test think_deeper tool with normal analysis."""
tool = ThinkDeeperTool()
with patch.object(tool, "create_model") as mock_create_model:
mock_model = MagicMock()
mock_model.generate_content.return_value = mock_model_response(
"Here's a deeper analysis with edge cases..."
)
mock_create_model.return_value = mock_model
result = await tool.execute(
{
"current_analysis": "I think we should use a cache for performance",
"problem_context": "Building a high-traffic API",
"focus_areas": ["scalability", "reliability"],
}
)
assert len(result) == 1
output = json.loads(result[0].text)
assert output["status"] == "success"
assert "Extended Analysis by Gemini" in output["content"]
assert "deeper analysis" in output["content"]
@pytest.mark.asyncio
async def test_review_code_normal_review(self, mock_model_response):
"""Test review_code tool with normal inputs."""
tool = ReviewCodeTool()
with patch.object(tool, "create_model") as mock_create_model:
mock_model = MagicMock()
mock_model.generate_content.return_value = mock_model_response(
"Found 3 issues: 1) Missing error handling..."
)
mock_create_model.return_value = mock_model
# Mock file reading
with patch("tools.review_code.read_files") as mock_read_files:
mock_read_files.return_value = ("def main(): pass", "1 file")
result = await tool.execute(
{
"files": ["/path/to/code.py"],
"review_type": "security",
"focus_on": "Look for SQL injection vulnerabilities",
}
)
assert len(result) == 1
output = json.loads(result[0].text)
assert output["status"] == "success"
assert "Found 3 issues" in output["content"]
@pytest.mark.asyncio
async def test_review_changes_normal_request(self, mock_model_response):
"""Test review_changes tool with normal original_request."""
tool = ReviewChanges()
with patch.object(tool, "create_model") as mock_create_model:
mock_model = MagicMock()
mock_model.generate_content.return_value = mock_model_response(
"Changes look good, implementing feature as requested..."
)
mock_create_model.return_value = mock_model
# Mock git operations
with patch("tools.review_changes.find_git_repositories") as mock_find_repos:
with patch("tools.review_changes.get_git_status") as mock_git_status:
mock_find_repos.return_value = ["/path/to/repo"]
mock_git_status.return_value = {
"modified": ["file.py"],
"untracked": [],
}
result = await tool.execute(
{
"path": "/path/to/repo",
"original_request": "Add user authentication feature with JWT tokens",
}
)
assert len(result) == 1
output = json.loads(result[0].text)
assert output["status"] == "success"
@pytest.mark.asyncio
async def test_debug_issue_normal_error(self, mock_model_response):
"""Test debug_issue tool with normal error description."""
tool = DebugIssueTool()
with patch.object(tool, "create_model") as mock_create_model:
mock_model = MagicMock()
mock_model.generate_content.return_value = mock_model_response(
"Root cause: The variable is undefined. Fix: Initialize it..."
)
mock_create_model.return_value = mock_model
result = await tool.execute(
{
"error_description": "TypeError: Cannot read property 'name' of undefined",
"error_context": "at line 42 in user.js\n console.log(user.name)",
"runtime_info": "Node.js v16.14.0",
}
)
assert len(result) == 1
output = json.loads(result[0].text)
assert output["status"] == "success"
assert "Debug Analysis" in output["content"]
assert "Root cause" in output["content"]
@pytest.mark.asyncio
async def test_analyze_normal_question(self, mock_model_response):
"""Test analyze tool with normal question."""
tool = AnalyzeTool()
with patch.object(tool, "create_model") as mock_create_model:
mock_model = MagicMock()
mock_model.generate_content.return_value = mock_model_response(
"The code follows MVC pattern with clear separation..."
)
mock_create_model.return_value = mock_model
# Mock file reading
with patch("tools.analyze.read_files") as mock_read_files:
mock_read_files.return_value = ("class UserController: ...", "3 files")
result = await tool.execute(
{
"files": ["/path/to/project"],
"question": "What design patterns are used in this codebase?",
"analysis_type": "architecture",
}
)
assert len(result) == 1
output = json.loads(result[0].text)
assert output["status"] == "success"
assert "MVC pattern" in output["content"]
@pytest.mark.asyncio
async def test_empty_optional_fields(self, mock_model_response):
"""Test tools work with empty optional fields."""
tool = ChatTool()
with patch.object(tool, "create_model") as mock_create_model:
mock_model = MagicMock()
mock_model.generate_content.return_value = mock_model_response()
mock_create_model.return_value = mock_model
# Test with no files parameter
result = await tool.execute({"prompt": "Hello"})
assert len(result) == 1
output = json.loads(result[0].text)
assert output["status"] == "success"
@pytest.mark.asyncio
async def test_thinking_modes_work(self, mock_model_response):
"""Test that thinking modes are properly passed through."""
tool = ChatTool()
with patch.object(tool, "create_model") as mock_create_model:
mock_model = MagicMock()
mock_model.generate_content.return_value = mock_model_response()
mock_create_model.return_value = mock_model
result = await tool.execute(
{"prompt": "Test", "thinking_mode": "high", "temperature": 0.8}
)
assert len(result) == 1
output = json.loads(result[0].text)
assert output["status"] == "success"
# Verify create_model was called with correct parameters
mock_create_model.assert_called_once()
call_args = mock_create_model.call_args
assert call_args[0][2] == "high" # thinking_mode
assert call_args[0][1] == 0.8 # temperature
@pytest.mark.asyncio
async def test_special_characters_in_prompts(self, mock_model_response):
"""Test prompts with special characters work correctly."""
tool = ChatTool()
with patch.object(tool, "create_model") as mock_create_model:
mock_model = MagicMock()
mock_model.generate_content.return_value = mock_model_response()
mock_create_model.return_value = mock_model
special_prompt = 'Test with "quotes" and\nnewlines\tand tabs'
result = await tool.execute({"prompt": special_prompt})
assert len(result) == 1
output = json.loads(result[0].text)
assert output["status"] == "success"
@pytest.mark.asyncio
async def test_mixed_file_paths(self, mock_model_response):
"""Test handling of various file path formats."""
tool = AnalyzeTool()
with patch.object(tool, "create_model") as mock_create_model:
mock_model = MagicMock()
mock_model.generate_content.return_value = mock_model_response()
mock_create_model.return_value = mock_model
with patch("tools.analyze.read_files") as mock_read_files:
mock_read_files.return_value = ("Content", "Summary")
result = await tool.execute(
{
"files": [
"/absolute/path/file.py",
"/Users/name/project/src/",
"/home/user/code.js",
],
"question": "Analyze these files",
}
)
assert len(result) == 1
output = json.loads(result[0].text)
assert output["status"] == "success"
mock_read_files.assert_called_once()
@pytest.mark.asyncio
async def test_unicode_content(self, mock_model_response):
"""Test handling of unicode content in prompts."""
tool = ChatTool()
with patch.object(tool, "create_model") as mock_create_model:
mock_model = MagicMock()
mock_model.generate_content.return_value = mock_model_response()
mock_create_model.return_value = mock_model
unicode_prompt = "Explain this: 你好世界 مرحبا بالعالم"
result = await tool.execute({"prompt": unicode_prompt})
assert len(result) == 1
output = json.loads(result[0].text)
assert output["status"] == "success"
if __name__ == "__main__":
pytest.main([__file__, "-v"])

View File

@@ -7,10 +7,7 @@ from unittest.mock import Mock, patch
import pytest
from tools.review_changes import (
ReviewChanges,
ReviewChangesRequest,
)
from tools.review_changes import ReviewChanges, ReviewChangesRequest
class TestReviewChangesTool:

View File

@@ -25,7 +25,7 @@ class TestThinkingModes:
def test_default_thinking_modes(self):
"""Test that tools have correct default thinking modes"""
tools = [
(ThinkDeeperTool(), "max"),
(ThinkDeeperTool(), "high"),
(AnalyzeTool(), "medium"),
(ReviewCodeTool(), "medium"),
(DebugIssueTool(), "medium"),
@@ -156,14 +156,14 @@ class TestThinkingModes:
result = await tool.execute(
{
"current_analysis": "Initial analysis",
# Not specifying thinking_mode, should use default (max)
# Not specifying thinking_mode, should use default (high)
}
)
# Verify create_model was called with default thinking_mode
mock_create_model.assert_called_once()
args = mock_create_model.call_args[0]
assert args[2] == "max"
assert args[2] == "high"
assert "Extended Analysis by Gemini" in result[0].text

View File

@@ -7,7 +7,8 @@ from unittest.mock import Mock, patch
import pytest
from tools import AnalyzeTool, DebugIssueTool, ReviewCodeTool, ThinkDeeperTool, ChatTool
from tools import (AnalyzeTool, ChatTool, DebugIssueTool, ReviewCodeTool,
ThinkDeeperTool)
class TestThinkDeeperTool:
@@ -47,8 +48,11 @@ class TestThinkDeeperTool:
)
assert len(result) == 1
assert "Extended Analysis by Gemini:" in result[0].text
assert "Extended analysis" in result[0].text
# Parse the JSON response
output = json.loads(result[0].text)
assert output["status"] == "success"
assert "Extended Analysis by Gemini" in output["content"]
assert "Extended analysis" in output["content"]
class TestReviewCodeTool:

View File

@@ -2,7 +2,8 @@
Tests for utility functions
"""
from utils import check_token_limit, estimate_tokens, read_file_content, read_files
from utils import (check_token_limit, estimate_tokens, read_file_content,
read_files)
class TestFileUtils: