feat: add review_pending_changes tool and enforce absolute path security

- Add new review_pending_changes tool for comprehensive pre-commit reviews
- Implement filesystem sandboxing with MCP_PROJECT_ROOT
- Enforce absolute paths for all file/directory operations
- Add comprehensive git utilities for repository management
- Update all tools to use centralized path validation
- Add extensive test coverage for new features and security model
- Update documentation with new tool and path requirements
- Remove obsolete demo and guide files

🤖 Generated with [Claude Code](https://claude.ai/code)

Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
Fahad
2025-06-09 12:42:18 +04:00
parent 00b365f108
commit 7ee610938b
23 changed files with 1369 additions and 486 deletions

View File

@@ -28,38 +28,47 @@ class TestDynamicContextRequests:
async def test_clarification_request_parsing(self, mock_create_model, analyze_tool):
"""Test that tools correctly parse clarification requests"""
# Mock model to return a clarification request
clarification_json = json.dumps({
"status": "requires_clarification",
"question": "I need to see the package.json file to understand dependencies",
"files_needed": ["package.json", "package-lock.json"]
})
clarification_json = json.dumps(
{
"status": "requires_clarification",
"question": "I need to see the package.json file to understand dependencies",
"files_needed": ["package.json", "package-lock.json"],
}
)
mock_model = Mock()
mock_model.generate_content.return_value = Mock(
candidates=[Mock(content=Mock(parts=[Mock(text=clarification_json)]))]
)
mock_create_model.return_value = mock_model
result = await analyze_tool.execute({
"files": ["src/index.js"],
"question": "Analyze the dependencies used in this project"
})
result = await analyze_tool.execute(
{
"files": ["/absolute/path/src/index.js"],
"question": "Analyze the dependencies used in this project",
}
)
assert len(result) == 1
# Parse the response
response_data = json.loads(result[0].text)
assert response_data["status"] == "requires_clarification"
assert response_data["content_type"] == "json"
# Parse the clarification request
clarification = json.loads(response_data["content"])
assert clarification["question"] == "I need to see the package.json file to understand dependencies"
assert (
clarification["question"]
== "I need to see the package.json file to understand dependencies"
)
assert clarification["files_needed"] == ["package.json", "package-lock.json"]
@pytest.mark.asyncio
@patch("tools.base.BaseTool.create_model")
async def test_normal_response_not_parsed_as_clarification(self, mock_create_model, debug_tool):
async def test_normal_response_not_parsed_as_clarification(
self, mock_create_model, debug_tool
):
"""Test that normal responses are not mistaken for clarification requests"""
normal_response = """
## Summary
@@ -70,19 +79,19 @@ class TestDynamicContextRequests:
### 1. Missing Import (Confidence: High)
**Root Cause:** The module 'utils' is not imported
"""
mock_model = Mock()
mock_model.generate_content.return_value = Mock(
candidates=[Mock(content=Mock(parts=[Mock(text=normal_response)]))]
)
mock_create_model.return_value = mock_model
result = await debug_tool.execute({
"error_description": "NameError: name 'utils' is not defined"
})
result = await debug_tool.execute(
{"error_description": "NameError: name 'utils' is not defined"}
)
assert len(result) == 1
# Parse the response
response_data = json.loads(result[0].text)
assert response_data["status"] == "success"
@@ -91,23 +100,26 @@ class TestDynamicContextRequests:
@pytest.mark.asyncio
@patch("tools.base.BaseTool.create_model")
async def test_malformed_clarification_request_treated_as_normal(self, mock_create_model, analyze_tool):
async def test_malformed_clarification_request_treated_as_normal(
self, mock_create_model, analyze_tool
):
"""Test that malformed JSON clarification requests are treated as normal responses"""
malformed_json = '{"status": "requires_clarification", "question": "Missing closing brace"'
malformed_json = (
'{"status": "requires_clarification", "question": "Missing closing brace"'
)
mock_model = Mock()
mock_model.generate_content.return_value = Mock(
candidates=[Mock(content=Mock(parts=[Mock(text=malformed_json)]))]
)
mock_create_model.return_value = mock_model
result = await analyze_tool.execute({
"files": ["test.py"],
"question": "What does this do?"
})
result = await analyze_tool.execute(
{"files": ["/absolute/path/test.py"], "question": "What does this do?"}
)
assert len(result) == 1
# Should be treated as normal response due to JSON parse error
response_data = json.loads(result[0].text)
assert response_data["status"] == "success"
@@ -115,37 +127,47 @@ class TestDynamicContextRequests:
@pytest.mark.asyncio
@patch("tools.base.BaseTool.create_model")
async def test_clarification_with_suggested_action(self, mock_create_model, debug_tool):
async def test_clarification_with_suggested_action(
self, mock_create_model, debug_tool
):
"""Test clarification request with suggested next action"""
clarification_json = json.dumps({
"status": "requires_clarification",
"question": "I need to see the database configuration to diagnose the connection error",
"files_needed": ["config/database.yml", "src/db.py"],
"suggested_next_action": {
"tool": "debug_issue",
"args": {
"error_description": "Connection timeout to database",
"files": ["config/database.yml", "src/db.py", "logs/error.log"]
}
clarification_json = json.dumps(
{
"status": "requires_clarification",
"question": "I need to see the database configuration to diagnose the connection error",
"files_needed": ["config/database.yml", "src/db.py"],
"suggested_next_action": {
"tool": "debug_issue",
"args": {
"error_description": "Connection timeout to database",
"files": [
"/config/database.yml",
"/src/db.py",
"/logs/error.log",
],
},
},
}
})
)
mock_model = Mock()
mock_model.generate_content.return_value = Mock(
candidates=[Mock(content=Mock(parts=[Mock(text=clarification_json)]))]
)
mock_create_model.return_value = mock_model
result = await debug_tool.execute({
"error_description": "Connection timeout to database",
"files": ["logs/error.log"]
})
result = await debug_tool.execute(
{
"error_description": "Connection timeout to database",
"files": ["/absolute/logs/error.log"],
}
)
assert len(result) == 1
response_data = json.loads(result[0].text)
assert response_data["status"] == "requires_clarification"
clarification = json.loads(response_data["content"])
assert "suggested_next_action" in clarification
assert clarification["suggested_next_action"]["tool"] == "debug_issue"
@@ -156,12 +178,12 @@ class TestDynamicContextRequests:
status="success",
content="Test content",
content_type="markdown",
metadata={"tool_name": "test", "execution_time": 1.5}
metadata={"tool_name": "test", "execution_time": 1.5},
)
json_str = output.model_dump_json()
parsed = json.loads(json_str)
assert parsed["status"] == "success"
assert parsed["content"] == "Test content"
assert parsed["content_type"] == "markdown"
@@ -172,9 +194,9 @@ class TestDynamicContextRequests:
request = ClarificationRequest(
question="Need more context",
files_needed=["file1.py", "file2.py"],
suggested_next_action={"tool": "analyze", "args": {}}
suggested_next_action={"tool": "analyze", "args": {}},
)
assert request.question == "Need more context"
assert len(request.files_needed) == 2
assert request.suggested_next_action["tool"] == "analyze"
@@ -185,13 +207,12 @@ class TestDynamicContextRequests:
"""Test error response format"""
mock_create_model.side_effect = Exception("API connection failed")
result = await analyze_tool.execute({
"files": ["test.py"],
"question": "Analyze this"
})
result = await analyze_tool.execute(
{"files": ["/absolute/path/test.py"], "question": "Analyze this"}
)
assert len(result) == 1
response_data = json.loads(result[0].text)
assert response_data["status"] == "error"
assert "API connection failed" in response_data["content"]
@@ -206,14 +227,16 @@ class TestCollaborationWorkflow:
async def test_dependency_analysis_triggers_clarification(self, mock_create_model):
"""Test that asking about dependencies without package files triggers clarification"""
tool = AnalyzeTool()
# Mock Gemini to request package.json when asked about dependencies
clarification_json = json.dumps({
"status": "requires_clarification",
"question": "I need to see the package.json file to analyze npm dependencies",
"files_needed": ["package.json", "package-lock.json"]
})
clarification_json = json.dumps(
{
"status": "requires_clarification",
"question": "I need to see the package.json file to analyze npm dependencies",
"files_needed": ["package.json", "package-lock.json"],
}
)
mock_model = Mock()
mock_model.generate_content.return_value = Mock(
candidates=[Mock(content=Mock(parts=[Mock(text=clarification_json)]))]
@@ -221,46 +244,54 @@ class TestCollaborationWorkflow:
mock_create_model.return_value = mock_model
# Ask about dependencies with only source files
result = await tool.execute({
"files": ["src/index.js"],
"question": "What npm packages and versions does this project use?"
})
result = await tool.execute(
{
"files": ["/absolute/path/src/index.js"],
"question": "What npm packages and versions does this project use?",
}
)
response = json.loads(result[0].text)
assert response["status"] == "requires_clarification", \
"Should request clarification when asked about dependencies without package files"
assert (
response["status"] == "requires_clarification"
), "Should request clarification when asked about dependencies without package files"
clarification = json.loads(response["content"])
assert "package.json" in str(clarification["files_needed"]), \
"Should specifically request package.json"
assert "package.json" in str(
clarification["files_needed"]
), "Should specifically request package.json"
@pytest.mark.asyncio
@patch("tools.base.BaseTool.create_model")
async def test_multi_step_collaboration(self, mock_create_model):
"""Test a multi-step collaboration workflow"""
tool = DebugIssueTool()
# Step 1: Initial request returns clarification needed
clarification_json = json.dumps({
"status": "requires_clarification",
"question": "I need to see the configuration file to understand the connection settings",
"files_needed": ["config.py"]
})
clarification_json = json.dumps(
{
"status": "requires_clarification",
"question": "I need to see the configuration file to understand the connection settings",
"files_needed": ["config.py"],
}
)
mock_model = Mock()
mock_model.generate_content.return_value = Mock(
candidates=[Mock(content=Mock(parts=[Mock(text=clarification_json)]))]
)
mock_create_model.return_value = mock_model
result1 = await tool.execute({
"error_description": "Database connection timeout",
"error_context": "Timeout after 30s"
})
result1 = await tool.execute(
{
"error_description": "Database connection timeout",
"error_context": "Timeout after 30s",
}
)
response1 = json.loads(result1[0].text)
assert response1["status"] == "requires_clarification"
# Step 2: Claude would provide additional context and re-invoke
# This simulates the second call with more context
final_response = """
@@ -272,17 +303,19 @@ class TestCollaborationWorkflow:
### 1. Incorrect Database Host (Confidence: High)
**Root Cause:** The config.py file shows the database host is set to 'localhost' but the database is running on a different server.
"""
mock_model.generate_content.return_value = Mock(
candidates=[Mock(content=Mock(parts=[Mock(text=final_response)]))]
)
result2 = await tool.execute({
"error_description": "Database connection timeout",
"error_context": "Timeout after 30s",
"files": ["config.py"] # Additional context provided
})
result2 = await tool.execute(
{
"error_description": "Database connection timeout",
"error_context": "Timeout after 30s",
"files": ["/absolute/path/config.py"], # Additional context provided
}
)
response2 = json.loads(result2[0].text)
assert response2["status"] == "success"
assert "incorrect host configuration" in response2["content"].lower()
assert "incorrect host configuration" in response2["content"].lower()