Re-imagined and re-written Debug tool. Instead of prompting Claude to perform initial analysis (and hoping it did), the tool now works through the debug process as an 'investigation', encouraging Claud to gather its 'findings' / 'hypothesis', stepping back as needed, collecting files it's gone through and keeping track of files relevant to the issue at hand. This structured investiion is then passed to the other model with far greater insight than the original debug tool ever could.

Improved prompts, guard against overengineering and flag that as an antipattern
This commit is contained in:
Fahad
2025-06-19 10:22:30 +04:00
parent 2641c78f8d
commit fccfb0d999
16 changed files with 2243 additions and 707 deletions

View File

@@ -70,35 +70,35 @@ class TestDynamicContextRequests:
@pytest.mark.asyncio
@patch("tools.base.BaseTool.get_model_provider")
async def test_normal_response_not_parsed_as_clarification(self, mock_get_provider, debug_tool):
"""Test that normal responses are not mistaken for clarification requests"""
normal_response = """
## Summary
The error is caused by a missing import statement.
## Hypotheses (Ranked by Likelihood)
### 1. Missing Import (Confidence: High)
**Root Cause:** The module 'utils' is not imported
"""
mock_provider = create_mock_provider()
mock_provider.get_provider_type.return_value = Mock(value="google")
mock_provider.supports_thinking_mode.return_value = False
mock_provider.generate_content.return_value = Mock(
content=normal_response, usage={}, model_name="gemini-2.5-flash", metadata={}
@patch("utils.conversation_memory.create_thread", return_value="debug-test-uuid")
@patch("utils.conversation_memory.add_turn")
async def test_normal_response_not_parsed_as_clarification(
self, mock_add_turn, mock_create_thread, mock_get_provider, debug_tool
):
"""Test that normal investigation responses work correctly with new debug tool"""
# The new debug tool uses self-investigation pattern
result = await debug_tool.execute(
{
"step": "Investigating NameError: name 'utils' is not defined",
"step_number": 1,
"total_steps": 3,
"next_step_required": True,
"findings": "The error indicates 'utils' module is not imported or defined",
"files_checked": ["/code/main.py"],
"relevant_files": ["/code/main.py"],
"hypothesis": "Missing import statement for utils module",
"confidence": "high",
}
)
mock_get_provider.return_value = mock_provider
result = await debug_tool.execute({"prompt": "NameError: name 'utils' is not defined"})
assert len(result) == 1
# Parse the response
# Parse the response - new debug tool returns structured JSON
response_data = json.loads(result[0].text)
assert response_data["status"] == "success"
assert response_data["content_type"] in ["text", "markdown"]
assert "Summary" in response_data["content"]
assert response_data["status"] == "investigation_in_progress"
assert response_data["step_number"] == 1
assert response_data["next_step_required"] is True
assert response_data["investigation_status"]["current_confidence"] == "high"
@pytest.mark.asyncio
@patch("tools.base.BaseTool.get_model_provider")
@@ -125,17 +125,17 @@ class TestDynamicContextRequests:
@pytest.mark.asyncio
@patch("tools.base.BaseTool.get_model_provider")
async def test_clarification_with_suggested_action(self, mock_get_provider, debug_tool):
async def test_clarification_with_suggested_action(self, mock_get_provider, analyze_tool):
"""Test clarification request with suggested next action"""
clarification_json = json.dumps(
{
"status": "files_required_to_continue",
"mandatory_instructions": "I need to see the database configuration to diagnose the connection error",
"mandatory_instructions": "I need to see the database configuration to analyze the connection error",
"files_needed": ["config/database.yml", "src/db.py"],
"suggested_next_action": {
"tool": "debug",
"tool": "analyze",
"args": {
"prompt": "Connection timeout to database",
"prompt": "Analyze database connection timeout issue",
"files": [
"/config/database.yml",
"/src/db.py",
@@ -154,9 +154,9 @@ class TestDynamicContextRequests:
)
mock_get_provider.return_value = mock_provider
result = await debug_tool.execute(
result = await analyze_tool.execute(
{
"prompt": "Connection timeout to database",
"prompt": "Analyze database connection timeout issue",
"files": ["/absolute/logs/error.log"],
}
)
@@ -168,7 +168,7 @@ class TestDynamicContextRequests:
clarification = json.loads(response_data["content"])
assert "suggested_next_action" in clarification
assert clarification["suggested_next_action"]["tool"] == "debug"
assert clarification["suggested_next_action"]["tool"] == "analyze"
def test_tool_output_model_serialization(self):
"""Test ToolOutput model serialization"""
@@ -298,7 +298,7 @@ class TestCollaborationWorkflow:
@patch("tools.base.BaseTool.get_model_provider")
async def test_multi_step_collaboration(self, mock_get_provider):
"""Test a multi-step collaboration workflow"""
tool = DebugIssueTool()
tool = AnalyzeTool()
# Step 1: Initial request returns clarification needed
clarification_json = json.dumps(
@@ -319,8 +319,8 @@ class TestCollaborationWorkflow:
result1 = await tool.execute(
{
"prompt": "Database connection timeout",
"error_context": "Timeout after 30s",
"prompt": "Analyze database connection timeout issue",
"files": ["/logs/error.log"],
}
)
@@ -345,9 +345,8 @@ class TestCollaborationWorkflow:
result2 = await tool.execute(
{
"prompt": "Database connection timeout",
"error_context": "Timeout after 30s",
"files": ["/absolute/path/config.py"], # Additional context provided
"prompt": "Analyze database connection timeout issue with config file",
"files": ["/absolute/path/config.py", "/logs/error.log"], # Additional context provided
}
)