Files
my-pal-mcp-server/tests/test_collaboration.py
Fahad 94f542c76a fix: critical conversation history bug and improve Docker integration
This commit addresses several critical issues and improvements:

🔧 Critical Fixes:
- Fixed conversation history not being included when using continuation_id in AI-to-AI conversations
- Fixed test mock targeting issues preventing proper conversation memory validation
- Fixed Docker debug logging functionality with Gemini tools

🐛 Bug Fixes:
- Docker compose configuration for proper container command execution
- Test mock import targeting from utils.conversation_memory.* to tools.base.*
- Version bump to 3.1.0 reflecting significant improvements

🚀 Improvements:
- Enhanced Docker environment configuration with comprehensive logging setup
- Added cross-tool continuation documentation and examples in README
- Improved error handling and validation across all tools
- Better logging configuration with LOG_LEVEL environment variable support
- Enhanced conversation memory system documentation

🧪 Testing:
- Added comprehensive conversation history bug fix tests
- Added cross-tool continuation functionality tests
- All 132 tests now pass with proper conversation history validation
- Improved test coverage for AI-to-AI conversation threading

 Code Quality:
- Applied black, isort, and ruff formatting across entire codebase
- Enhanced inline documentation for conversation memory system
- Cleaned up temporary files and improved repository hygiene
- Better test descriptions and coverage for critical functionality

🤖 Generated with [Claude Code](https://claude.ai/code)

Co-Authored-By: Claude <noreply@anthropic.com>
2025-06-11 08:53:45 +04:00

303 lines
11 KiB
Python

"""
Tests for dynamic context request and collaboration features
"""
import json
from unittest.mock import Mock, patch
import pytest
from tools.analyze import AnalyzeTool
from tools.debug import DebugIssueTool
from tools.models import ClarificationRequest, ToolOutput
class TestDynamicContextRequests:
"""Test the dynamic context request mechanism"""
@pytest.fixture
def analyze_tool(self):
return AnalyzeTool()
@pytest.fixture
def debug_tool(self):
return DebugIssueTool()
@pytest.mark.asyncio
@patch("tools.base.BaseTool.create_model")
async def test_clarification_request_parsing(self, mock_create_model, analyze_tool):
"""Test that tools correctly parse clarification requests"""
# Mock model to return a clarification request
clarification_json = json.dumps(
{
"status": "requires_clarification",
"question": "I need to see the package.json file to understand dependencies",
"files_needed": ["package.json", "package-lock.json"],
}
)
mock_model = Mock()
mock_model.generate_content.return_value = Mock(
candidates=[Mock(content=Mock(parts=[Mock(text=clarification_json)]))]
)
mock_create_model.return_value = mock_model
result = await analyze_tool.execute(
{
"files": ["/absolute/path/src/index.js"],
"question": "Analyze the dependencies used in this project",
}
)
assert len(result) == 1
# Parse the response
response_data = json.loads(result[0].text)
assert response_data["status"] == "requires_clarification"
assert response_data["content_type"] == "json"
# Parse the clarification request
clarification = json.loads(response_data["content"])
assert clarification["question"] == "I need to see the package.json file to understand dependencies"
assert clarification["files_needed"] == ["package.json", "package-lock.json"]
@pytest.mark.asyncio
@patch("tools.base.BaseTool.create_model")
async def test_normal_response_not_parsed_as_clarification(self, mock_create_model, debug_tool):
"""Test that normal responses are not mistaken for clarification requests"""
normal_response = """
## Summary
The error is caused by a missing import statement.
## Hypotheses (Ranked by Likelihood)
### 1. Missing Import (Confidence: High)
**Root Cause:** The module 'utils' is not imported
"""
mock_model = Mock()
mock_model.generate_content.return_value = Mock(
candidates=[Mock(content=Mock(parts=[Mock(text=normal_response)]))]
)
mock_create_model.return_value = mock_model
result = await debug_tool.execute({"error_description": "NameError: name 'utils' is not defined"})
assert len(result) == 1
# Parse the response
response_data = json.loads(result[0].text)
assert response_data["status"] == "success"
assert response_data["content_type"] in ["text", "markdown"]
assert "Summary" in response_data["content"]
@pytest.mark.asyncio
@patch("tools.base.BaseTool.create_model")
async def test_malformed_clarification_request_treated_as_normal(self, mock_create_model, analyze_tool):
"""Test that malformed JSON clarification requests are treated as normal responses"""
malformed_json = '{"status": "requires_clarification", "question": "Missing closing brace"'
mock_model = Mock()
mock_model.generate_content.return_value = Mock(
candidates=[Mock(content=Mock(parts=[Mock(text=malformed_json)]))]
)
mock_create_model.return_value = mock_model
result = await analyze_tool.execute({"files": ["/absolute/path/test.py"], "question": "What does this do?"})
assert len(result) == 1
# Should be treated as normal response due to JSON parse error
response_data = json.loads(result[0].text)
assert response_data["status"] == "success"
assert malformed_json in response_data["content"]
@pytest.mark.asyncio
@patch("tools.base.BaseTool.create_model")
async def test_clarification_with_suggested_action(self, mock_create_model, debug_tool):
"""Test clarification request with suggested next action"""
clarification_json = json.dumps(
{
"status": "requires_clarification",
"question": "I need to see the database configuration to diagnose the connection error",
"files_needed": ["config/database.yml", "src/db.py"],
"suggested_next_action": {
"tool": "debug",
"args": {
"error_description": "Connection timeout to database",
"files": [
"/config/database.yml",
"/src/db.py",
"/logs/error.log",
],
},
},
}
)
mock_model = Mock()
mock_model.generate_content.return_value = Mock(
candidates=[Mock(content=Mock(parts=[Mock(text=clarification_json)]))]
)
mock_create_model.return_value = mock_model
result = await debug_tool.execute(
{
"error_description": "Connection timeout to database",
"files": ["/absolute/logs/error.log"],
}
)
assert len(result) == 1
response_data = json.loads(result[0].text)
assert response_data["status"] == "requires_clarification"
clarification = json.loads(response_data["content"])
assert "suggested_next_action" in clarification
assert clarification["suggested_next_action"]["tool"] == "debug"
def test_tool_output_model_serialization(self):
"""Test ToolOutput model serialization"""
output = ToolOutput(
status="success",
content="Test content",
content_type="markdown",
metadata={"tool_name": "test", "execution_time": 1.5},
)
json_str = output.model_dump_json()
parsed = json.loads(json_str)
assert parsed["status"] == "success"
assert parsed["content"] == "Test content"
assert parsed["content_type"] == "markdown"
assert parsed["metadata"]["tool_name"] == "test"
def test_clarification_request_model(self):
"""Test ClarificationRequest model"""
request = ClarificationRequest(
question="Need more context",
files_needed=["file1.py", "file2.py"],
suggested_next_action={"tool": "analyze", "args": {}},
)
assert request.question == "Need more context"
assert len(request.files_needed) == 2
assert request.suggested_next_action["tool"] == "analyze"
@pytest.mark.asyncio
@patch("tools.base.BaseTool.create_model")
async def test_error_response_format(self, mock_create_model, analyze_tool):
"""Test error response format"""
mock_create_model.side_effect = Exception("API connection failed")
result = await analyze_tool.execute({"files": ["/absolute/path/test.py"], "question": "Analyze this"})
assert len(result) == 1
response_data = json.loads(result[0].text)
assert response_data["status"] == "error"
assert "API connection failed" in response_data["content"]
assert response_data["content_type"] == "text"
class TestCollaborationWorkflow:
"""Test complete collaboration workflows"""
@pytest.mark.asyncio
@patch("tools.base.BaseTool.create_model")
async def test_dependency_analysis_triggers_clarification(self, mock_create_model):
"""Test that asking about dependencies without package files triggers clarification"""
tool = AnalyzeTool()
# Mock Gemini to request package.json when asked about dependencies
clarification_json = json.dumps(
{
"status": "requires_clarification",
"question": "I need to see the package.json file to analyze npm dependencies",
"files_needed": ["package.json", "package-lock.json"],
}
)
mock_model = Mock()
mock_model.generate_content.return_value = Mock(
candidates=[Mock(content=Mock(parts=[Mock(text=clarification_json)]))]
)
mock_create_model.return_value = mock_model
# Ask about dependencies with only source files
result = await tool.execute(
{
"files": ["/absolute/path/src/index.js"],
"question": "What npm packages and versions does this project use?",
}
)
response = json.loads(result[0].text)
assert (
response["status"] == "requires_clarification"
), "Should request clarification when asked about dependencies without package files"
clarification = json.loads(response["content"])
assert "package.json" in str(clarification["files_needed"]), "Should specifically request package.json"
@pytest.mark.asyncio
@patch("tools.base.BaseTool.create_model")
async def test_multi_step_collaboration(self, mock_create_model):
"""Test a multi-step collaboration workflow"""
tool = DebugIssueTool()
# Step 1: Initial request returns clarification needed
clarification_json = json.dumps(
{
"status": "requires_clarification",
"question": "I need to see the configuration file to understand the connection settings",
"files_needed": ["config.py"],
}
)
mock_model = Mock()
mock_model.generate_content.return_value = Mock(
candidates=[Mock(content=Mock(parts=[Mock(text=clarification_json)]))]
)
mock_create_model.return_value = mock_model
result1 = await tool.execute(
{
"error_description": "Database connection timeout",
"error_context": "Timeout after 30s",
}
)
response1 = json.loads(result1[0].text)
assert response1["status"] == "requires_clarification"
# Step 2: Claude would provide additional context and re-invoke
# This simulates the second call with more context
final_response = """
## Summary
The database connection timeout is caused by incorrect host configuration.
## Hypotheses (Ranked by Likelihood)
### 1. Incorrect Database Host (Confidence: High)
**Root Cause:** The config.py file shows the database host is set to 'localhost' but the database is running on a different server.
"""
mock_model.generate_content.return_value = Mock(
candidates=[Mock(content=Mock(parts=[Mock(text=final_response)]))]
)
result2 = await tool.execute(
{
"error_description": "Database connection timeout",
"error_context": "Timeout after 30s",
"files": ["/absolute/path/config.py"], # Additional context provided
}
)
response2 = json.loads(result2[0].text)
assert response2["status"] == "success"
assert "incorrect host configuration" in response2["content"].lower()