Migration from Docker to Standalone Python Server (#73)

* Migration from docker to standalone server
Migration handling
Fixed tests
Use simpler in-memory storage
Support for concurrent logging to disk
Simplified direct connections to localhost

* Migration from docker / redis to standalone script
Updated tests
Updated run script
Fixed requirements
Use dotenv
Ask if user would like to install MCP in Claude Desktop once
Updated docs

* More cleanup and references to docker removed

* Cleanup

* Comments

* Fixed tests

* Fix GitHub Actions workflow for standalone Python architecture

- Install requirements-dev.txt for pytest and testing dependencies
- Remove Docker setup from simulation tests (now standalone)
- Simplify linting job to use requirements-dev.txt
- Update simulation tests to run directly without Docker

Fixes unit test failures in CI due to missing pytest dependency.

🤖 Generated with [Claude Code](https://claude.ai/code)

Co-Authored-By: Claude <noreply@anthropic.com>

* Remove simulation tests from GitHub Actions

- Removed simulation-tests job that makes real API calls
- Keep only unit tests (mocked, no API costs) and linting
- Simulation tests should be run manually with real API keys
- Reduces CI costs and complexity

GitHub Actions now only runs:
- Unit tests (569 tests, all mocked)
- Code quality checks (ruff, black)

🤖 Generated with [Claude Code](https://claude.ai/code)

Co-Authored-By: Claude <noreply@anthropic.com>

* Fixed tests

* Fixed tests

---------

Co-authored-by: Claude <noreply@anthropic.com>
This commit is contained in:
Beehive Innovations
2025-06-18 23:41:22 +04:00
committed by GitHub
parent 9d72545ecd
commit 4151c3c3a5
121 changed files with 2842 additions and 3168 deletions

View File

@@ -10,7 +10,7 @@ import pytest
from tests.mock_helpers import create_mock_provider
from tools.analyze import AnalyzeTool
from tools.debug import DebugIssueTool
from tools.models import ClarificationRequest, ToolOutput
from tools.models import FilesNeededRequest, ToolOutput
class TestDynamicContextRequests:
@@ -31,8 +31,8 @@ class TestDynamicContextRequests:
# Mock model to return a clarification request
clarification_json = json.dumps(
{
"status": "clarification_required",
"question": "I need to see the package.json file to understand dependencies",
"status": "files_required_to_continue",
"mandatory_instructions": "I need to see the package.json file to understand dependencies",
"files_needed": ["package.json", "package-lock.json"],
}
)
@@ -56,12 +56,16 @@ class TestDynamicContextRequests:
# Parse the response
response_data = json.loads(result[0].text)
assert response_data["status"] == "clarification_required"
assert response_data["status"] == "files_required_to_continue"
assert response_data["content_type"] == "json"
# Parse the clarification request
clarification = json.loads(response_data["content"])
assert clarification["question"] == "I need to see the package.json file to understand dependencies"
# Check that the enhanced instructions contain the original message and additional guidance
expected_start = "I need to see the package.json file to understand dependencies"
assert clarification["mandatory_instructions"].startswith(expected_start)
assert "IMPORTANT GUIDANCE:" in clarification["mandatory_instructions"]
assert "Use FULL absolute paths" in clarification["mandatory_instructions"]
assert clarification["files_needed"] == ["package.json", "package-lock.json"]
@pytest.mark.asyncio
@@ -100,7 +104,7 @@ class TestDynamicContextRequests:
@patch("tools.base.BaseTool.get_model_provider")
async def test_malformed_clarification_request_treated_as_normal(self, mock_get_provider, analyze_tool):
"""Test that malformed JSON clarification requests are treated as normal responses"""
malformed_json = '{"status": "clarification_required", "prompt": "Missing closing brace"'
malformed_json = '{"status": "files_required_to_continue", "prompt": "Missing closing brace"'
mock_provider = create_mock_provider()
mock_provider.get_provider_type.return_value = Mock(value="google")
@@ -125,8 +129,8 @@ class TestDynamicContextRequests:
"""Test clarification request with suggested next action"""
clarification_json = json.dumps(
{
"status": "clarification_required",
"question": "I need to see the database configuration to diagnose the connection error",
"status": "files_required_to_continue",
"mandatory_instructions": "I need to see the database configuration to diagnose the connection error",
"files_needed": ["config/database.yml", "src/db.py"],
"suggested_next_action": {
"tool": "debug",
@@ -160,7 +164,7 @@ class TestDynamicContextRequests:
assert len(result) == 1
response_data = json.loads(result[0].text)
assert response_data["status"] == "clarification_required"
assert response_data["status"] == "files_required_to_continue"
clarification = json.loads(response_data["content"])
assert "suggested_next_action" in clarification
@@ -184,17 +188,54 @@ class TestDynamicContextRequests:
assert parsed["metadata"]["tool_name"] == "test"
def test_clarification_request_model(self):
"""Test ClarificationRequest model"""
request = ClarificationRequest(
question="Need more context",
"""Test FilesNeededRequest model"""
request = FilesNeededRequest(
mandatory_instructions="Need more context",
files_needed=["file1.py", "file2.py"],
suggested_next_action={"tool": "analyze", "args": {}},
)
assert request.question == "Need more context"
assert request.mandatory_instructions == "Need more context"
assert len(request.files_needed) == 2
assert request.suggested_next_action["tool"] == "analyze"
def test_mandatory_instructions_enhancement(self):
"""Test that mandatory_instructions are enhanced with additional guidance"""
from tools.base import BaseTool
# Create a dummy tool instance for testing
class TestTool(BaseTool):
def get_name(self):
return "test"
def get_description(self):
return "test"
def get_request_model(self):
return None
def prepare_prompt(self, request):
return ""
def get_system_prompt(self):
return ""
def get_input_schema(self):
return {}
tool = TestTool()
original = "I need additional files to proceed"
enhanced = tool._enhance_mandatory_instructions(original)
# Verify the original instructions are preserved
assert enhanced.startswith(original)
# Verify additional guidance is added
assert "IMPORTANT GUIDANCE:" in enhanced
assert "CRITICAL for providing accurate analysis" in enhanced
assert "Use FULL absolute paths" in enhanced
assert "continuation_id to continue" in enhanced
@pytest.mark.asyncio
@patch("tools.base.BaseTool.get_model_provider")
async def test_error_response_format(self, mock_get_provider, analyze_tool):
@@ -223,8 +264,8 @@ class TestCollaborationWorkflow:
# Mock Gemini to request package.json when asked about dependencies
clarification_json = json.dumps(
{
"status": "clarification_required",
"question": "I need to see the package.json file to analyze npm dependencies",
"status": "files_required_to_continue",
"mandatory_instructions": "I need to see the package.json file to analyze npm dependencies",
"files_needed": ["package.json", "package-lock.json"],
}
)
@@ -247,7 +288,7 @@ class TestCollaborationWorkflow:
response = json.loads(result[0].text)
assert (
response["status"] == "clarification_required"
response["status"] == "files_required_to_continue"
), "Should request clarification when asked about dependencies without package files"
clarification = json.loads(response["content"])
@@ -262,8 +303,8 @@ class TestCollaborationWorkflow:
# Step 1: Initial request returns clarification needed
clarification_json = json.dumps(
{
"status": "clarification_required",
"question": "I need to see the configuration file to understand the connection settings",
"status": "files_required_to_continue",
"mandatory_instructions": "I need to see the configuration file to understand the connection settings",
"files_needed": ["config.py"],
}
)
@@ -284,7 +325,7 @@ class TestCollaborationWorkflow:
)
response1 = json.loads(result1[0].text)
assert response1["status"] == "clarification_required"
assert response1["status"] == "files_required_to_continue"
# Step 2: Claude would provide additional context and re-invoke
# This simulates the second call with more context