feat: add review_pending_changes tool and enforce absolute path security

- Add new review_pending_changes tool for comprehensive pre-commit reviews
- Implement filesystem sandboxing with MCP_PROJECT_ROOT
- Enforce absolute paths for all file/directory operations
- Add comprehensive git utilities for repository management
- Update all tools to use centralized path validation
- Add extensive test coverage for new features and security model
- Update documentation with new tool and path requirements
- Remove obsolete demo and guide files

🤖 Generated with [Claude Code](https://claude.ai/code)

Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
Fahad
2025-06-09 12:42:18 +04:00
parent 00b365f108
commit 7ee610938b
23 changed files with 1369 additions and 486 deletions

View File

@@ -15,6 +15,14 @@ if str(parent_dir) not in sys.path:
if "GEMINI_API_KEY" not in os.environ:
os.environ["GEMINI_API_KEY"] = "dummy-key-for-tests"
# Set MCP_PROJECT_ROOT to a temporary directory for tests
# This provides a safe sandbox for file operations during testing
import tempfile
# Create a temporary directory that will be used as the project root for all tests
test_root = tempfile.mkdtemp(prefix="gemini_mcp_test_")
os.environ["MCP_PROJECT_ROOT"] = test_root
# Configure asyncio for Windows compatibility
if sys.platform == "win32":
import asyncio
@@ -22,6 +30,26 @@ if sys.platform == "win32":
asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
# Pytest fixtures
import pytest
@pytest.fixture
def project_path(tmp_path):
"""
Provides a temporary directory within the PROJECT_ROOT sandbox for tests.
This ensures all file operations during tests are within the allowed directory.
"""
# Get the test project root
test_root = Path(os.environ.get("MCP_PROJECT_ROOT", "/tmp"))
# Create a subdirectory for this specific test
test_dir = test_root / f"test_{tmp_path.name}"
test_dir.mkdir(parents=True, exist_ok=True)
return test_dir
# Pytest configuration
def pytest_configure(config):
"""Configure pytest with custom markers"""

View File

@@ -28,38 +28,47 @@ class TestDynamicContextRequests:
async def test_clarification_request_parsing(self, mock_create_model, analyze_tool):
"""Test that tools correctly parse clarification requests"""
# Mock model to return a clarification request
clarification_json = json.dumps({
"status": "requires_clarification",
"question": "I need to see the package.json file to understand dependencies",
"files_needed": ["package.json", "package-lock.json"]
})
clarification_json = json.dumps(
{
"status": "requires_clarification",
"question": "I need to see the package.json file to understand dependencies",
"files_needed": ["package.json", "package-lock.json"],
}
)
mock_model = Mock()
mock_model.generate_content.return_value = Mock(
candidates=[Mock(content=Mock(parts=[Mock(text=clarification_json)]))]
)
mock_create_model.return_value = mock_model
result = await analyze_tool.execute({
"files": ["src/index.js"],
"question": "Analyze the dependencies used in this project"
})
result = await analyze_tool.execute(
{
"files": ["/absolute/path/src/index.js"],
"question": "Analyze the dependencies used in this project",
}
)
assert len(result) == 1
# Parse the response
response_data = json.loads(result[0].text)
assert response_data["status"] == "requires_clarification"
assert response_data["content_type"] == "json"
# Parse the clarification request
clarification = json.loads(response_data["content"])
assert clarification["question"] == "I need to see the package.json file to understand dependencies"
assert (
clarification["question"]
== "I need to see the package.json file to understand dependencies"
)
assert clarification["files_needed"] == ["package.json", "package-lock.json"]
@pytest.mark.asyncio
@patch("tools.base.BaseTool.create_model")
async def test_normal_response_not_parsed_as_clarification(self, mock_create_model, debug_tool):
async def test_normal_response_not_parsed_as_clarification(
self, mock_create_model, debug_tool
):
"""Test that normal responses are not mistaken for clarification requests"""
normal_response = """
## Summary
@@ -70,19 +79,19 @@ class TestDynamicContextRequests:
### 1. Missing Import (Confidence: High)
**Root Cause:** The module 'utils' is not imported
"""
mock_model = Mock()
mock_model.generate_content.return_value = Mock(
candidates=[Mock(content=Mock(parts=[Mock(text=normal_response)]))]
)
mock_create_model.return_value = mock_model
result = await debug_tool.execute({
"error_description": "NameError: name 'utils' is not defined"
})
result = await debug_tool.execute(
{"error_description": "NameError: name 'utils' is not defined"}
)
assert len(result) == 1
# Parse the response
response_data = json.loads(result[0].text)
assert response_data["status"] == "success"
@@ -91,23 +100,26 @@ class TestDynamicContextRequests:
@pytest.mark.asyncio
@patch("tools.base.BaseTool.create_model")
async def test_malformed_clarification_request_treated_as_normal(self, mock_create_model, analyze_tool):
async def test_malformed_clarification_request_treated_as_normal(
self, mock_create_model, analyze_tool
):
"""Test that malformed JSON clarification requests are treated as normal responses"""
malformed_json = '{"status": "requires_clarification", "question": "Missing closing brace"'
malformed_json = (
'{"status": "requires_clarification", "question": "Missing closing brace"'
)
mock_model = Mock()
mock_model.generate_content.return_value = Mock(
candidates=[Mock(content=Mock(parts=[Mock(text=malformed_json)]))]
)
mock_create_model.return_value = mock_model
result = await analyze_tool.execute({
"files": ["test.py"],
"question": "What does this do?"
})
result = await analyze_tool.execute(
{"files": ["/absolute/path/test.py"], "question": "What does this do?"}
)
assert len(result) == 1
# Should be treated as normal response due to JSON parse error
response_data = json.loads(result[0].text)
assert response_data["status"] == "success"
@@ -115,37 +127,47 @@ class TestDynamicContextRequests:
@pytest.mark.asyncio
@patch("tools.base.BaseTool.create_model")
async def test_clarification_with_suggested_action(self, mock_create_model, debug_tool):
async def test_clarification_with_suggested_action(
self, mock_create_model, debug_tool
):
"""Test clarification request with suggested next action"""
clarification_json = json.dumps({
"status": "requires_clarification",
"question": "I need to see the database configuration to diagnose the connection error",
"files_needed": ["config/database.yml", "src/db.py"],
"suggested_next_action": {
"tool": "debug_issue",
"args": {
"error_description": "Connection timeout to database",
"files": ["config/database.yml", "src/db.py", "logs/error.log"]
}
clarification_json = json.dumps(
{
"status": "requires_clarification",
"question": "I need to see the database configuration to diagnose the connection error",
"files_needed": ["config/database.yml", "src/db.py"],
"suggested_next_action": {
"tool": "debug_issue",
"args": {
"error_description": "Connection timeout to database",
"files": [
"/config/database.yml",
"/src/db.py",
"/logs/error.log",
],
},
},
}
})
)
mock_model = Mock()
mock_model.generate_content.return_value = Mock(
candidates=[Mock(content=Mock(parts=[Mock(text=clarification_json)]))]
)
mock_create_model.return_value = mock_model
result = await debug_tool.execute({
"error_description": "Connection timeout to database",
"files": ["logs/error.log"]
})
result = await debug_tool.execute(
{
"error_description": "Connection timeout to database",
"files": ["/absolute/logs/error.log"],
}
)
assert len(result) == 1
response_data = json.loads(result[0].text)
assert response_data["status"] == "requires_clarification"
clarification = json.loads(response_data["content"])
assert "suggested_next_action" in clarification
assert clarification["suggested_next_action"]["tool"] == "debug_issue"
@@ -156,12 +178,12 @@ class TestDynamicContextRequests:
status="success",
content="Test content",
content_type="markdown",
metadata={"tool_name": "test", "execution_time": 1.5}
metadata={"tool_name": "test", "execution_time": 1.5},
)
json_str = output.model_dump_json()
parsed = json.loads(json_str)
assert parsed["status"] == "success"
assert parsed["content"] == "Test content"
assert parsed["content_type"] == "markdown"
@@ -172,9 +194,9 @@ class TestDynamicContextRequests:
request = ClarificationRequest(
question="Need more context",
files_needed=["file1.py", "file2.py"],
suggested_next_action={"tool": "analyze", "args": {}}
suggested_next_action={"tool": "analyze", "args": {}},
)
assert request.question == "Need more context"
assert len(request.files_needed) == 2
assert request.suggested_next_action["tool"] == "analyze"
@@ -185,13 +207,12 @@ class TestDynamicContextRequests:
"""Test error response format"""
mock_create_model.side_effect = Exception("API connection failed")
result = await analyze_tool.execute({
"files": ["test.py"],
"question": "Analyze this"
})
result = await analyze_tool.execute(
{"files": ["/absolute/path/test.py"], "question": "Analyze this"}
)
assert len(result) == 1
response_data = json.loads(result[0].text)
assert response_data["status"] == "error"
assert "API connection failed" in response_data["content"]
@@ -206,14 +227,16 @@ class TestCollaborationWorkflow:
async def test_dependency_analysis_triggers_clarification(self, mock_create_model):
"""Test that asking about dependencies without package files triggers clarification"""
tool = AnalyzeTool()
# Mock Gemini to request package.json when asked about dependencies
clarification_json = json.dumps({
"status": "requires_clarification",
"question": "I need to see the package.json file to analyze npm dependencies",
"files_needed": ["package.json", "package-lock.json"]
})
clarification_json = json.dumps(
{
"status": "requires_clarification",
"question": "I need to see the package.json file to analyze npm dependencies",
"files_needed": ["package.json", "package-lock.json"],
}
)
mock_model = Mock()
mock_model.generate_content.return_value = Mock(
candidates=[Mock(content=Mock(parts=[Mock(text=clarification_json)]))]
@@ -221,46 +244,54 @@ class TestCollaborationWorkflow:
mock_create_model.return_value = mock_model
# Ask about dependencies with only source files
result = await tool.execute({
"files": ["src/index.js"],
"question": "What npm packages and versions does this project use?"
})
result = await tool.execute(
{
"files": ["/absolute/path/src/index.js"],
"question": "What npm packages and versions does this project use?",
}
)
response = json.loads(result[0].text)
assert response["status"] == "requires_clarification", \
"Should request clarification when asked about dependencies without package files"
assert (
response["status"] == "requires_clarification"
), "Should request clarification when asked about dependencies without package files"
clarification = json.loads(response["content"])
assert "package.json" in str(clarification["files_needed"]), \
"Should specifically request package.json"
assert "package.json" in str(
clarification["files_needed"]
), "Should specifically request package.json"
@pytest.mark.asyncio
@patch("tools.base.BaseTool.create_model")
async def test_multi_step_collaboration(self, mock_create_model):
"""Test a multi-step collaboration workflow"""
tool = DebugIssueTool()
# Step 1: Initial request returns clarification needed
clarification_json = json.dumps({
"status": "requires_clarification",
"question": "I need to see the configuration file to understand the connection settings",
"files_needed": ["config.py"]
})
clarification_json = json.dumps(
{
"status": "requires_clarification",
"question": "I need to see the configuration file to understand the connection settings",
"files_needed": ["config.py"],
}
)
mock_model = Mock()
mock_model.generate_content.return_value = Mock(
candidates=[Mock(content=Mock(parts=[Mock(text=clarification_json)]))]
)
mock_create_model.return_value = mock_model
result1 = await tool.execute({
"error_description": "Database connection timeout",
"error_context": "Timeout after 30s"
})
result1 = await tool.execute(
{
"error_description": "Database connection timeout",
"error_context": "Timeout after 30s",
}
)
response1 = json.loads(result1[0].text)
assert response1["status"] == "requires_clarification"
# Step 2: Claude would provide additional context and re-invoke
# This simulates the second call with more context
final_response = """
@@ -272,17 +303,19 @@ class TestCollaborationWorkflow:
### 1. Incorrect Database Host (Confidence: High)
**Root Cause:** The config.py file shows the database host is set to 'localhost' but the database is running on a different server.
"""
mock_model.generate_content.return_value = Mock(
candidates=[Mock(content=Mock(parts=[Mock(text=final_response)]))]
)
result2 = await tool.execute({
"error_description": "Database connection timeout",
"error_context": "Timeout after 30s",
"files": ["config.py"] # Additional context provided
})
result2 = await tool.execute(
{
"error_description": "Database connection timeout",
"error_context": "Timeout after 30s",
"files": ["/absolute/path/config.py"], # Additional context provided
}
)
response2 = json.loads(result2[0].text)
assert response2["status"] == "success"
assert "incorrect host configuration" in response2["content"].lower()
assert "incorrect host configuration" in response2["content"].lower()

View File

@@ -77,39 +77,50 @@ async def run_manual_live_tests():
# Test collaboration/clarification request
print("\n🔄 Testing dynamic context request (collaboration)...")
# Create a specific test case designed to trigger clarification
# We'll use analyze tool with a question that requires seeing files
analyze_tool = AnalyzeTool()
# Ask about dependencies without providing package files
result = await analyze_tool.execute({
"files": [temp_path], # Only Python file, no package.json
"question": "What npm packages and their versions does this project depend on? List all dependencies.",
"thinking_mode": "minimal" # Fast test
})
result = await analyze_tool.execute(
{
"files": [temp_path], # Only Python file, no package.json
"question": "What npm packages and their versions does this project depend on? List all dependencies.",
"thinking_mode": "minimal", # Fast test
}
)
if result and result[0].text:
response_data = json.loads(result[0].text)
print(f" Response status: {response_data['status']}")
if response_data['status'] == 'requires_clarification':
if response_data["status"] == "requires_clarification":
print("✅ Dynamic context request successfully triggered!")
clarification = json.loads(response_data['content'])
clarification = json.loads(response_data["content"])
print(f" Gemini asks: {clarification.get('question', 'N/A')}")
if 'files_needed' in clarification:
if "files_needed" in clarification:
print(f" Files requested: {clarification['files_needed']}")
# Verify it's asking for package-related files
expected_files = ['package.json', 'package-lock.json', 'yarn.lock']
if any(f in str(clarification['files_needed']) for f in expected_files):
expected_files = [
"package.json",
"package-lock.json",
"yarn.lock",
]
if any(
f in str(clarification["files_needed"])
for f in expected_files
):
print(" ✅ Correctly identified missing package files!")
else:
print(" ⚠️ Unexpected files requested")
else:
# This is a failure - we specifically designed this to need clarification
print("❌ Expected clarification request but got direct response")
print(" This suggests the dynamic context feature may not be working")
print(" Response:", response_data.get('content', '')[:200])
print(
" This suggests the dynamic context feature may not be working"
)
print(" Response:", response_data.get("content", "")[:200])
return False
else:
print("❌ Collaboration test failed - no response")

View File

@@ -0,0 +1,255 @@
"""
Tests for the review_pending_changes tool
"""
import json
from unittest.mock import MagicMock, Mock, patch
import pytest
from tools.review_pending_changes import (
ReviewPendingChanges,
ReviewPendingChangesRequest,
)
class TestReviewPendingChangesTool:
"""Test the review_pending_changes tool"""
@pytest.fixture
def tool(self):
"""Create tool instance"""
return ReviewPendingChanges()
def test_tool_metadata(self, tool):
"""Test tool metadata"""
assert tool.get_name() == "review_pending_changes"
assert "REVIEW PENDING GIT CHANGES" in tool.get_description()
assert "pre-commit review" in tool.get_description()
# Check schema
schema = tool.get_input_schema()
assert schema["type"] == "object"
assert "path" in schema["properties"]
assert "original_request" in schema["properties"]
assert "compare_to" in schema["properties"]
assert "review_type" in schema["properties"]
def test_request_model_defaults(self):
"""Test request model default values"""
request = ReviewPendingChangesRequest(path="/some/absolute/path")
assert request.path == "/some/absolute/path"
assert request.original_request is None
assert request.compare_to is None
assert request.include_staged is True
assert request.include_unstaged is True
assert request.review_type == "full"
assert request.severity_filter == "all"
assert request.max_depth == 5
def test_sanitize_filename(self, tool):
"""Test filename sanitization"""
# Test path separators
assert tool._sanitize_filename("src/main.py") == "src_main.py"
assert tool._sanitize_filename("src\\main.py") == "src_main.py"
# Test spaces
assert tool._sanitize_filename("my file.py") == "my_file.py"
# Test special characters
assert tool._sanitize_filename("file@#$.py") == "file.py"
# Test length limit
long_name = "a" * 150
sanitized = tool._sanitize_filename(long_name)
assert len(sanitized) == 100
@pytest.mark.asyncio
async def test_relative_path_rejected(self, tool):
"""Test that relative paths are rejected"""
result = await tool.execute(
{"path": "./relative/path", "original_request": "Test"}
)
assert len(result) == 1
response = json.loads(result[0].text)
assert response["status"] == "error"
assert "must be absolute" in response["content"]
assert "./relative/path" in response["content"]
@pytest.mark.asyncio
@patch("tools.review_pending_changes.find_git_repositories")
async def test_no_repositories_found(self, mock_find_repos, tool):
"""Test when no git repositories are found"""
mock_find_repos.return_value = []
request = ReviewPendingChangesRequest(path="/absolute/path/no-git")
result = await tool.prepare_prompt(request)
assert result == "No git repositories found in the specified path."
mock_find_repos.assert_called_once_with("/absolute/path/no-git", 5)
@pytest.mark.asyncio
@patch("tools.review_pending_changes.find_git_repositories")
@patch("tools.review_pending_changes.get_git_status")
@patch("tools.review_pending_changes.run_git_command")
async def test_no_changes_found(
self, mock_run_git, mock_status, mock_find_repos, tool
):
"""Test when repositories have no changes"""
mock_find_repos.return_value = ["/test/repo"]
mock_status.return_value = {
"branch": "main",
"ahead": 0,
"behind": 0,
"staged_files": [],
"unstaged_files": [],
"untracked_files": [],
}
# No staged or unstaged files
mock_run_git.side_effect = [
(True, ""), # staged files (empty)
(True, ""), # unstaged files (empty)
]
request = ReviewPendingChangesRequest(path="/absolute/repo/path")
result = await tool.prepare_prompt(request)
assert result == "No pending changes found in any of the git repositories."
@pytest.mark.asyncio
@patch("tools.review_pending_changes.find_git_repositories")
@patch("tools.review_pending_changes.get_git_status")
@patch("tools.review_pending_changes.run_git_command")
async def test_staged_changes_review(
self,
mock_run_git,
mock_status,
mock_find_repos,
tool,
):
"""Test reviewing staged changes"""
mock_find_repos.return_value = ["/test/repo"]
mock_status.return_value = {
"branch": "feature",
"ahead": 1,
"behind": 0,
"staged_files": ["main.py"],
"unstaged_files": [],
"untracked_files": [],
}
# Mock git commands
mock_run_git.side_effect = [
(True, "main.py\n"), # staged files
(
True,
"diff --git a/main.py b/main.py\n+print('hello')",
), # diff for main.py
(True, ""), # unstaged files (empty)
]
request = ReviewPendingChangesRequest(
path="/absolute/repo/path",
original_request="Add hello message",
review_type="security",
)
result = await tool.prepare_prompt(request)
# Verify result structure
assert "## Original Request/Ticket" in result
assert "Add hello message" in result
assert "## Review Parameters" in result
assert "Review Type: security" in result
assert "## Repository Changes Summary" in result
assert "Branch: feature" in result
assert "## Git Diffs" in result
@pytest.mark.asyncio
@patch("tools.review_pending_changes.find_git_repositories")
@patch("tools.review_pending_changes.get_git_status")
@patch("tools.review_pending_changes.run_git_command")
async def test_compare_to_invalid_ref(
self, mock_run_git, mock_status, mock_find_repos, tool
):
"""Test comparing to an invalid git ref"""
mock_find_repos.return_value = ["/test/repo"]
mock_status.return_value = {"branch": "main"}
# Mock git commands - ref validation fails
mock_run_git.side_effect = [
(False, "fatal: not a valid ref"), # rev-parse fails
]
request = ReviewPendingChangesRequest(
path="/absolute/repo/path", compare_to="invalid-branch"
)
result = await tool.prepare_prompt(request)
# When all repos have errors and no changes, we get this message
assert "No pending changes found in any of the git repositories." in result
@pytest.mark.asyncio
@patch("tools.review_pending_changes.ReviewPendingChanges.execute")
async def test_execute_integration(self, mock_execute, tool):
"""Test execute method integration"""
# Mock the execute to return a standardized response
mock_execute.return_value = [
Mock(
text='{"status": "success", "content": "Review complete", "content_type": "text"}'
)
]
result = await tool.execute({"path": ".", "review_type": "full"})
assert len(result) == 1
mock_execute.assert_called_once()
def test_default_temperature(self, tool):
"""Test default temperature setting"""
from config import TEMPERATURE_ANALYTICAL
assert tool.get_default_temperature() == TEMPERATURE_ANALYTICAL
@pytest.mark.asyncio
@patch("tools.review_pending_changes.find_git_repositories")
@patch("tools.review_pending_changes.get_git_status")
@patch("tools.review_pending_changes.run_git_command")
async def test_mixed_staged_unstaged_changes(
self,
mock_run_git,
mock_status,
mock_find_repos,
tool,
):
"""Test reviewing both staged and unstaged changes"""
mock_find_repos.return_value = ["/test/repo"]
mock_status.return_value = {
"branch": "develop",
"ahead": 2,
"behind": 1,
"staged_files": ["file1.py"],
"unstaged_files": ["file2.py"],
}
# Mock git commands
mock_run_git.side_effect = [
(True, "file1.py\n"), # staged files
(True, "diff --git a/file1.py..."), # diff for file1.py
(True, "file2.py\n"), # unstaged files
(True, "diff --git a/file2.py..."), # diff for file2.py
]
request = ReviewPendingChangesRequest(
path="/absolute/repo/path",
focus_on="error handling",
severity_filter="high",
)
result = await tool.prepare_prompt(request)
# Verify all sections are present
assert "Review Type: full" in result
assert "Severity Filter: high" in result
assert "Focus Areas: error handling" in result
assert "Reviewing: staged and unstaged changes" in result

View File

@@ -25,11 +25,12 @@ class TestServerTools:
assert "debug_issue" in tool_names
assert "analyze" in tool_names
assert "chat" in tool_names
assert "review_pending_changes" in tool_names
assert "list_models" in tool_names
assert "get_version" in tool_names
# Should have exactly 7 tools
assert len(tools) == 7
# Should have exactly 8 tools
assert len(tools) == 8
# Check descriptions are verbose
for tool in tools:

View File

@@ -51,7 +51,7 @@ class TestThinkingModes:
tool = AnalyzeTool()
result = await tool.execute(
{
"files": ["test.py"],
"files": ["/absolute/path/test.py"],
"question": "What is this?",
"thinking_mode": "minimal",
}
@@ -80,7 +80,9 @@ class TestThinkingModes:
mock_create_model.return_value = mock_model
tool = ReviewCodeTool()
result = await tool.execute({"files": ["test.py"], "thinking_mode": "low"})
result = await tool.execute(
{"files": ["/absolute/path/test.py"], "thinking_mode": "low"}
)
# Verify create_model was called with correct thinking_mode
mock_create_model.assert_called_once()
@@ -129,7 +131,7 @@ class TestThinkingModes:
tool = AnalyzeTool()
await tool.execute(
{
"files": ["complex.py"],
"files": ["/absolute/path/complex.py"],
"question": "Analyze architecture",
"thinking_mode": "high",
}

View File

@@ -2,11 +2,12 @@
Tests for individual tool implementations
"""
import json
from unittest.mock import Mock, patch
import pytest
from tools import AnalyzeTool, DebugIssueTool, ReviewCodeTool, ThinkDeeperTool
from tools import AnalyzeTool, DebugIssueTool, ReviewCodeTool, ThinkDeeperTool, ChatTool
class TestThinkDeeperTool:
@@ -187,3 +188,110 @@ class TestAnalyzeTool:
assert "ARCHITECTURE Analysis" in result[0].text
assert "Analyzed 1 file(s)" in result[0].text
assert "Architecture analysis" in result[0].text
class TestAbsolutePathValidation:
"""Test absolute path validation across all tools"""
@pytest.mark.asyncio
async def test_analyze_tool_relative_path_rejected(self):
"""Test that analyze tool rejects relative paths"""
tool = AnalyzeTool()
result = await tool.execute(
{
"files": ["./relative/path.py", "/absolute/path.py"],
"question": "What does this do?",
}
)
assert len(result) == 1
response = json.loads(result[0].text)
assert response["status"] == "error"
assert "must be absolute" in response["content"]
assert "./relative/path.py" in response["content"]
@pytest.mark.asyncio
async def test_review_code_tool_relative_path_rejected(self):
"""Test that review_code tool rejects relative paths"""
tool = ReviewCodeTool()
result = await tool.execute(
{"files": ["../parent/file.py"], "review_type": "full"}
)
assert len(result) == 1
response = json.loads(result[0].text)
assert response["status"] == "error"
assert "must be absolute" in response["content"]
assert "../parent/file.py" in response["content"]
@pytest.mark.asyncio
async def test_debug_issue_tool_relative_path_rejected(self):
"""Test that debug_issue tool rejects relative paths"""
tool = DebugIssueTool()
result = await tool.execute(
{
"error_description": "Something broke",
"files": ["src/main.py"], # relative path
}
)
assert len(result) == 1
response = json.loads(result[0].text)
assert response["status"] == "error"
assert "must be absolute" in response["content"]
assert "src/main.py" in response["content"]
@pytest.mark.asyncio
async def test_think_deeper_tool_relative_path_rejected(self):
"""Test that think_deeper tool rejects relative paths"""
tool = ThinkDeeperTool()
result = await tool.execute(
{"current_analysis": "My analysis", "files": ["./local/file.py"]}
)
assert len(result) == 1
response = json.loads(result[0].text)
assert response["status"] == "error"
assert "must be absolute" in response["content"]
assert "./local/file.py" in response["content"]
@pytest.mark.asyncio
async def test_chat_tool_relative_path_rejected(self):
"""Test that chat tool rejects relative paths"""
tool = ChatTool()
result = await tool.execute(
{
"prompt": "Explain this code",
"files": ["code.py"], # relative path without ./
}
)
assert len(result) == 1
response = json.loads(result[0].text)
assert response["status"] == "error"
assert "must be absolute" in response["content"]
assert "code.py" in response["content"]
@pytest.mark.asyncio
@patch("tools.AnalyzeTool.create_model")
async def test_analyze_tool_accepts_absolute_paths(self, mock_model):
"""Test that analyze tool accepts absolute paths"""
tool = AnalyzeTool()
# Mock the model response
mock_response = Mock()
mock_response.candidates = [Mock()]
mock_response.candidates[0].content.parts = [Mock(text="Analysis complete")]
mock_instance = Mock()
mock_instance.generate_content.return_value = mock_response
mock_model.return_value = mock_instance
result = await tool.execute(
{"files": ["/absolute/path/file.py"], "question": "What does this do?"}
)
assert len(result) == 1
response = json.loads(result[0].text)
assert response["status"] == "success"
assert "Analysis complete" in response["content"]

View File

@@ -8,9 +8,9 @@ from utils import check_token_limit, estimate_tokens, read_file_content, read_fi
class TestFileUtils:
"""Test file reading utilities"""
def test_read_file_content_success(self, tmp_path):
def test_read_file_content_success(self, project_path):
"""Test successful file reading"""
test_file = tmp_path / "test.py"
test_file = project_path / "test.py"
test_file.write_text("def hello():\n return 'world'", encoding="utf-8")
content, tokens = read_file_content(str(test_file))
@@ -20,25 +20,43 @@ class TestFileUtils:
assert "return 'world'" in content
assert tokens > 0 # Should have estimated tokens
def test_read_file_content_not_found(self):
def test_read_file_content_not_found(self, project_path):
"""Test reading non-existent file"""
content, tokens = read_file_content("/nonexistent/file.py")
# Use a non-existent file within the project path
nonexistent = project_path / "nonexistent" / "file.py"
content, tokens = read_file_content(str(nonexistent))
assert "--- FILE NOT FOUND:" in content
assert "Error: File does not exist" in content
assert tokens > 0
def test_read_file_content_directory(self, tmp_path):
def test_read_file_content_outside_project_root(self):
"""Test that paths outside project root are rejected"""
# Try to read a file outside the project root
content, tokens = read_file_content("/etc/passwd")
assert "--- ERROR ACCESSING FILE:" in content
assert "Path outside project root" in content
assert tokens > 0
def test_read_file_content_relative_path_rejected(self):
"""Test that relative paths are rejected"""
# Try to use a relative path
content, tokens = read_file_content("./some/relative/path.py")
assert "--- ERROR ACCESSING FILE:" in content
assert "Relative paths are not supported" in content
assert tokens > 0
def test_read_file_content_directory(self, project_path):
"""Test reading a directory"""
content, tokens = read_file_content(str(tmp_path))
content, tokens = read_file_content(str(project_path))
assert "--- NOT A FILE:" in content
assert "Error: Path is not a file" in content
assert tokens > 0
def test_read_files_multiple(self, tmp_path):
def test_read_files_multiple(self, project_path):
"""Test reading multiple files"""
file1 = tmp_path / "file1.py"
file1 = project_path / "file1.py"
file1.write_text("print('file1')", encoding="utf-8")
file2 = tmp_path / "file2.py"
file2 = project_path / "file2.py"
file2.write_text("print('file2')", encoding="utf-8")
content, summary = read_files([str(file1), str(file2)])
@@ -62,23 +80,23 @@ class TestFileUtils:
assert "Direct code:" in summary
def test_read_files_directory_support(self, tmp_path):
def test_read_files_directory_support(self, project_path):
"""Test reading all files from a directory"""
# Create directory structure
(tmp_path / "file1.py").write_text("print('file1')", encoding="utf-8")
(tmp_path / "file2.js").write_text("console.log('file2')", encoding="utf-8")
(tmp_path / "readme.md").write_text("# README", encoding="utf-8")
(project_path / "file1.py").write_text("print('file1')", encoding="utf-8")
(project_path / "file2.js").write_text("console.log('file2')", encoding="utf-8")
(project_path / "readme.md").write_text("# README", encoding="utf-8")
# Create subdirectory
subdir = tmp_path / "src"
subdir = project_path / "src"
subdir.mkdir()
(subdir / "module.py").write_text("class Module: pass", encoding="utf-8")
# Create hidden file (should be skipped)
(tmp_path / ".hidden").write_text("secret", encoding="utf-8")
(project_path / ".hidden").write_text("secret", encoding="utf-8")
# Read the directory
content, summary = read_files([str(tmp_path)])
content, summary = read_files([str(project_path)])
# Check files are included
assert "file1.py" in content
@@ -102,14 +120,14 @@ class TestFileUtils:
assert "Processed 1 dir(s)" in summary
assert "Read 4 file(s)" in summary
def test_read_files_mixed_paths(self, tmp_path):
def test_read_files_mixed_paths(self, project_path):
"""Test reading mix of files and directories"""
# Create files
file1 = tmp_path / "direct.py"
file1 = project_path / "direct.py"
file1.write_text("# Direct file", encoding="utf-8")
# Create directory with files
subdir = tmp_path / "subdir"
subdir = project_path / "subdir"
subdir.mkdir()
(subdir / "sub1.py").write_text("# Sub file 1", encoding="utf-8")
(subdir / "sub2.py").write_text("# Sub file 2", encoding="utf-8")
@@ -127,19 +145,19 @@ class TestFileUtils:
assert "Processed 1 dir(s)" in summary
assert "Read 3 file(s)" in summary
def test_read_files_token_limit(self, tmp_path):
def test_read_files_token_limit(self, project_path):
"""Test token limit handling"""
# Create files with known token counts
# ~250 tokens each (1000 chars)
large_content = "x" * 1000
for i in range(5):
(tmp_path / f"file{i}.txt").write_text(large_content, encoding="utf-8")
(project_path / f"file{i}.txt").write_text(large_content, encoding="utf-8")
# Read with small token limit (should skip some files)
# Reserve 50k tokens, limit to 51k total = 1k available
# Each file ~250 tokens, so should read ~3-4 files
content, summary = read_files([str(tmp_path)], max_tokens=51_000)
content, summary = read_files([str(project_path)], max_tokens=51_000)
assert "Skipped" in summary
assert "token limit" in summary
@@ -149,10 +167,10 @@ class TestFileUtils:
read_count = content.count("--- BEGIN FILE:")
assert 2 <= read_count <= 4 # Should read some but not all
def test_read_files_large_file(self, tmp_path):
def test_read_files_large_file(self, project_path):
"""Test handling of large files"""
# Create a file larger than max_size (1MB)
large_file = tmp_path / "large.txt"
large_file = project_path / "large.txt"
large_file.write_text("x" * 2_000_000, encoding="utf-8") # 2MB
content, summary = read_files([str(large_file)])
@@ -161,15 +179,15 @@ class TestFileUtils:
assert "2,000,000 bytes" in content
assert "Read 1 file(s)" in summary # File is counted but shows error message
def test_read_files_file_extensions(self, tmp_path):
def test_read_files_file_extensions(self, project_path):
"""Test file extension filtering"""
# Create various file types
(tmp_path / "code.py").write_text("python", encoding="utf-8")
(tmp_path / "style.css").write_text("css", encoding="utf-8")
(tmp_path / "binary.exe").write_text("exe", encoding="utf-8")
(tmp_path / "image.jpg").write_text("jpg", encoding="utf-8")
(project_path / "code.py").write_text("python", encoding="utf-8")
(project_path / "style.css").write_text("css", encoding="utf-8")
(project_path / "binary.exe").write_text("exe", encoding="utf-8")
(project_path / "image.jpg").write_text("jpg", encoding="utf-8")
content, summary = read_files([str(tmp_path)])
content, summary = read_files([str(project_path)])
# Code files should be included
assert "code.py" in content