Fix all failing tests and pytest collection warnings

Fixed MagicMock comparison errors across multiple test suites by:
- Adding proper ModelCapabilities mocks with real values instead of MagicMock objects
- Updating test_auto_mode.py with correct provider mocking for model availability tests
- Updating test_thinking_modes.py with proper capabilities mocking in all thinking mode tests
- Updating test_tools.py with proper capabilities mocking for CodeReview and Analyze tools
- Fixing test_large_prompt_handling.py by adding proper provider mocking to prevent errors before large prompt detection

Fixed pytest collection warnings by:
- Renaming TestGenRequest to TestGenerationRequest to avoid pytest collecting it as a test class
- Renaming TestGenTool to TestGenerationTool to avoid pytest collecting it as a test class
- Updated all imports and references across server.py, tools/__init__.py, and test files

All 459 tests now pass without warnings or MagicMock comparison errors.

🤖 Generated with [Claude Code](https://claude.ai/code)

Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
Fahad
2025-06-16 06:02:12 +04:00
parent 8c3efd5676
commit 2cfe0b163a
11 changed files with 312 additions and 193 deletions

View File

@@ -45,7 +45,7 @@ from tools import (
DebugIssueTool,
Precommit,
RefactorTool,
TestGenTool,
TestGenerationTool,
ThinkDeepTool,
TracerTool,
)
@@ -149,7 +149,7 @@ TOOLS = {
"analyze": AnalyzeTool(), # General-purpose file and code analysis
"chat": ChatTool(), # Interactive development chat and brainstorming
"precommit": Precommit(), # Pre-commit validation of git changes
"testgen": TestGenTool(), # Comprehensive test generation with edge case coverage
"testgen": TestGenerationTool(), # Comprehensive test generation with edge case coverage
"refactor": RefactorTool(), # Intelligent code refactoring suggestions with precise line references
"tracer": TracerTool(), # Static call path prediction and control flow analysis
}

View File

@@ -160,6 +160,7 @@ class TestAutoMode:
patch("providers.registry.ModelProviderRegistry.get_provider_for_model") as mock_provider,
patch("providers.registry.ModelProviderRegistry.get_available_models") as mock_available,
patch.object(tool, "_get_available_models") as mock_tool_available,
patch("providers.registry.ModelProviderRegistry.is_model_available") as mock_is_available,
):
# Mock that o3 is not available but actual available models are
@@ -198,6 +199,12 @@ class TestAutoMode:
# Mock the tool's available models method to return the actual available models
mock_tool_available.return_value = available_models
# Mock is_model_available to return False for o3 specifically
def mock_model_available(model_name):
return model_name != "o3" and model_name in available_models
mock_is_available.side_effect = mock_model_available
# Execute with unavailable model
result = await tool.execute(
{"files": ["/tmp/test.py"], "prompt": "Analyze this", "model": "o3"} # This model is not available

View File

@@ -149,7 +149,28 @@ class TestLargePromptHandling:
@pytest.mark.asyncio
async def test_codereview_large_focus(self, large_prompt):
"""Test that codereview tool detects large focus_on field."""
from unittest.mock import MagicMock
from providers.base import ModelCapabilities, ProviderType
tool = CodeReviewTool()
# Mock provider to avoid MagicMock comparison errors that would prevent large prompt detection
with patch.object(tool, "get_model_provider") as mock_get_provider:
mock_provider = MagicMock()
mock_provider.get_provider_type.return_value = MagicMock(value="google")
mock_provider.supports_thinking_mode.return_value = False
# Set up proper capabilities to avoid MagicMock comparison errors
mock_capabilities = ModelCapabilities(
provider=ProviderType.GOOGLE,
model_name="gemini-2.5-flash-preview-05-20",
friendly_name="Test Model",
context_window=1048576,
supports_function_calling=True,
)
mock_provider.get_capabilities.return_value = mock_capabilities
mock_get_provider.return_value = mock_provider
result = await tool.execute(
{
"files": ["/some/file.py"],

View File

@@ -8,7 +8,7 @@ from tools.codereview import CodeReviewTool
from tools.debug import DebugIssueTool
from tools.precommit import Precommit
from tools.refactor import RefactorTool
from tools.testgen import TestGenTool
from tools.testgen import TestGenerationTool
class TestLineNumbersIntegration:
@@ -22,7 +22,7 @@ class TestLineNumbersIntegration:
CodeReviewTool(),
DebugIssueTool(),
RefactorTool(),
TestGenTool(),
TestGenerationTool(),
Precommit(),
]
@@ -38,7 +38,7 @@ class TestLineNumbersIntegration:
CodeReviewTool,
DebugIssueTool,
RefactorTool,
TestGenTool,
TestGenerationTool,
Precommit,
]

View File

@@ -10,7 +10,7 @@ from unittest.mock import Mock, patch
import pytest
from tests.mock_helpers import create_mock_provider
from tools.testgen import TestGenRequest, TestGenTool
from tools.testgen import TestGenerationRequest, TestGenerationTool
class TestTestGenTool:
@@ -18,7 +18,7 @@ class TestTestGenTool:
@pytest.fixture
def tool(self):
return TestGenTool()
return TestGenerationTool()
@pytest.fixture
def temp_files(self):

View File

@@ -39,15 +39,28 @@ class TestThinkingModes:
), f"{tool.__class__.__name__} should default to {expected_default}"
@pytest.mark.asyncio
@patch("tools.base.BaseTool.get_model_provider")
async def test_thinking_mode_minimal(self, mock_get_provider):
async def test_thinking_mode_minimal(self):
"""Test minimal thinking mode"""
from unittest.mock import MagicMock
from providers.base import ModelCapabilities, ProviderType
with patch("tools.base.BaseTool.get_model_provider") as mock_get_provider:
mock_provider = create_mock_provider()
mock_provider.get_provider_type.return_value = Mock(value="google")
mock_provider.supports_thinking_mode.return_value = True
mock_provider.generate_content.return_value = Mock(
content="Minimal thinking response", usage={}, model_name="gemini-2.5-flash-preview-05-20", metadata={}
)
# Set up proper capabilities to avoid MagicMock comparison errors
mock_capabilities = ModelCapabilities(
provider=ProviderType.GOOGLE,
model_name="gemini-2.5-flash-preview-05-20",
friendly_name="Test Model",
context_window=1048576,
supports_function_calling=True,
)
mock_provider.get_capabilities.return_value = mock_capabilities
mock_get_provider.return_value = mock_provider
tool = AnalyzeTool()
@@ -76,15 +89,28 @@ class TestThinkingModes:
assert "Minimal thinking response" in response_data["content"] or "Analysis:" in response_data["content"]
@pytest.mark.asyncio
@patch("tools.base.BaseTool.get_model_provider")
async def test_thinking_mode_low(self, mock_get_provider):
async def test_thinking_mode_low(self):
"""Test low thinking mode"""
from unittest.mock import MagicMock
from providers.base import ModelCapabilities, ProviderType
with patch("tools.base.BaseTool.get_model_provider") as mock_get_provider:
mock_provider = create_mock_provider()
mock_provider.get_provider_type.return_value = Mock(value="google")
mock_provider.supports_thinking_mode.return_value = True
mock_provider.generate_content.return_value = Mock(
content="Low thinking response", usage={}, model_name="gemini-2.5-flash-preview-05-20", metadata={}
)
# Set up proper capabilities to avoid MagicMock comparison errors
mock_capabilities = ModelCapabilities(
provider=ProviderType.GOOGLE,
model_name="gemini-2.5-flash-preview-05-20",
friendly_name="Test Model",
context_window=1048576,
supports_function_calling=True,
)
mock_provider.get_capabilities.return_value = mock_capabilities
mock_get_provider.return_value = mock_provider
tool = CodeReviewTool()
@@ -108,15 +134,28 @@ class TestThinkingModes:
assert "Low thinking response" in result[0].text or "Code Review" in result[0].text
@pytest.mark.asyncio
@patch("tools.base.BaseTool.get_model_provider")
async def test_thinking_mode_medium(self, mock_get_provider):
async def test_thinking_mode_medium(self):
"""Test medium thinking mode (default for most tools)"""
from unittest.mock import MagicMock
from providers.base import ModelCapabilities, ProviderType
with patch("tools.base.BaseTool.get_model_provider") as mock_get_provider:
mock_provider = create_mock_provider()
mock_provider.get_provider_type.return_value = Mock(value="google")
mock_provider.supports_thinking_mode.return_value = True
mock_provider.generate_content.return_value = Mock(
content="Medium thinking response", usage={}, model_name="gemini-2.5-flash-preview-05-20", metadata={}
)
# Set up proper capabilities to avoid MagicMock comparison errors
mock_capabilities = ModelCapabilities(
provider=ProviderType.GOOGLE,
model_name="gemini-2.5-flash-preview-05-20",
friendly_name="Test Model",
context_window=1048576,
supports_function_calling=True,
)
mock_provider.get_capabilities.return_value = mock_capabilities
mock_get_provider.return_value = mock_provider
tool = DebugIssueTool()
@@ -139,15 +178,28 @@ class TestThinkingModes:
assert "Medium thinking response" in result[0].text or "Debug Analysis" in result[0].text
@pytest.mark.asyncio
@patch("tools.base.BaseTool.get_model_provider")
async def test_thinking_mode_high(self, mock_get_provider):
async def test_thinking_mode_high(self):
"""Test high thinking mode"""
from unittest.mock import MagicMock
from providers.base import ModelCapabilities, ProviderType
with patch("tools.base.BaseTool.get_model_provider") as mock_get_provider:
mock_provider = create_mock_provider()
mock_provider.get_provider_type.return_value = Mock(value="google")
mock_provider.supports_thinking_mode.return_value = True
mock_provider.generate_content.return_value = Mock(
content="High thinking response", usage={}, model_name="gemini-2.5-flash-preview-05-20", metadata={}
)
# Set up proper capabilities to avoid MagicMock comparison errors
mock_capabilities = ModelCapabilities(
provider=ProviderType.GOOGLE,
model_name="gemini-2.5-flash-preview-05-20",
friendly_name="Test Model",
context_window=1048576,
supports_function_calling=True,
)
mock_provider.get_capabilities.return_value = mock_capabilities
mock_get_provider.return_value = mock_provider
tool = AnalyzeTool()

View File

@@ -76,13 +76,16 @@ class TestCodeReviewTool:
assert schema["required"] == ["files", "prompt"]
@pytest.mark.asyncio
@patch("tools.base.BaseTool.get_model_provider")
async def test_execute_with_review_type(self, mock_get_provider, tool, tmp_path):
async def test_execute_with_review_type(self, tool, tmp_path):
"""Test execution with specific review type"""
from unittest.mock import MagicMock
from providers.base import ModelCapabilities, ProviderType
# Create test file
test_file = tmp_path / "test.py"
test_file.write_text("def insecure(): pass", encoding="utf-8")
with patch("tools.base.BaseTool.get_model_provider") as mock_get_provider:
# Mock provider
mock_provider = create_mock_provider()
mock_provider.get_provider_type.return_value = Mock(value="google")
@@ -90,6 +93,16 @@ class TestCodeReviewTool:
mock_provider.generate_content.return_value = Mock(
content="Security issues found", usage={}, model_name="gemini-2.5-flash-preview-05-20", metadata={}
)
# Set up proper capabilities to avoid MagicMock comparison errors
mock_capabilities = ModelCapabilities(
provider=ProviderType.GOOGLE,
model_name="gemini-2.5-flash-preview-05-20",
friendly_name="Test Model",
context_window=1048576,
supports_function_calling=True,
)
mock_provider.get_capabilities.return_value = mock_capabilities
mock_get_provider.return_value = mock_provider
result = await tool.execute(
@@ -169,13 +182,16 @@ class TestAnalyzeTool:
assert set(schema["required"]) == {"files", "prompt"}
@pytest.mark.asyncio
@patch("tools.base.BaseTool.get_model_provider")
async def test_execute_with_analysis_type(self, mock_get_provider, tool, tmp_path):
async def test_execute_with_analysis_type(self, tool, tmp_path):
"""Test execution with specific analysis type"""
from unittest.mock import MagicMock
from providers.base import ModelCapabilities, ProviderType
# Create test file
test_file = tmp_path / "module.py"
test_file.write_text("class Service: pass", encoding="utf-8")
with patch("tools.base.BaseTool.get_model_provider") as mock_get_provider:
# Mock provider
mock_provider = create_mock_provider()
mock_provider.get_provider_type.return_value = Mock(value="google")
@@ -183,6 +199,16 @@ class TestAnalyzeTool:
mock_provider.generate_content.return_value = Mock(
content="Architecture analysis", usage={}, model_name="gemini-2.5-flash-preview-05-20", metadata={}
)
# Set up proper capabilities to avoid MagicMock comparison errors
mock_capabilities = ModelCapabilities(
provider=ProviderType.GOOGLE,
model_name="gemini-2.5-flash-preview-05-20",
friendly_name="Test Model",
context_window=1048576,
supports_function_calling=True,
)
mock_provider.get_capabilities.return_value = mock_capabilities
mock_get_provider.return_value = mock_provider
result = await tool.execute(
@@ -287,9 +313,9 @@ class TestAbsolutePathValidation:
@pytest.mark.asyncio
async def test_testgen_tool_relative_path_rejected(self):
"""Test that testgen tool rejects relative paths"""
from tools import TestGenTool
from tools import TestGenerationTool
tool = TestGenTool()
tool = TestGenerationTool()
result = await tool.execute(
{"files": ["src/main.py"], "prompt": "Generate tests for the functions"} # relative path
)
@@ -301,11 +327,14 @@ class TestAbsolutePathValidation:
assert "src/main.py" in response["content"]
@pytest.mark.asyncio
@patch("tools.AnalyzeTool.get_model_provider")
async def test_analyze_tool_accepts_absolute_paths(self, mock_get_provider):
async def test_analyze_tool_accepts_absolute_paths(self):
"""Test that analyze tool accepts absolute paths"""
from unittest.mock import MagicMock
from providers.base import ModelCapabilities, ProviderType
tool = AnalyzeTool()
with patch("tools.AnalyzeTool.get_model_provider") as mock_get_provider:
# Mock provider
mock_provider = create_mock_provider()
mock_provider.get_provider_type.return_value = Mock(value="google")
@@ -313,6 +342,16 @@ class TestAbsolutePathValidation:
mock_provider.generate_content.return_value = Mock(
content="Analysis complete", usage={}, model_name="gemini-2.5-flash-preview-05-20", metadata={}
)
# Set up proper capabilities to avoid MagicMock comparison errors
mock_capabilities = ModelCapabilities(
provider=ProviderType.GOOGLE,
model_name="gemini-2.5-flash-preview-05-20",
friendly_name="Test Model",
context_window=1048576,
supports_function_calling=True,
)
mock_provider.get_capabilities.return_value = mock_capabilities
mock_get_provider.return_value = mock_provider
result = await tool.execute({"files": ["/absolute/path/file.py"], "prompt": "What does this do?"})

View File

@@ -8,7 +8,7 @@ from .codereview import CodeReviewTool
from .debug import DebugIssueTool
from .precommit import Precommit
from .refactor import RefactorTool
from .testgen import TestGenTool
from .testgen import TestGenerationTool
from .thinkdeep import ThinkDeepTool
from .tracer import TracerTool
@@ -20,6 +20,6 @@ __all__ = [
"ChatTool",
"Precommit",
"RefactorTool",
"TestGenTool",
"TestGenerationTool",
"TracerTool",
]

View File

@@ -962,7 +962,7 @@ When recommending searches, be specific about what information you need and why
files: List of file paths to check
Returns:
Dict with MCP_CODE_TOO_LARGE response if too large, None if acceptable
Dict with `code_too_large` response if too large, None if acceptable
"""
if not files:
return None

View File

@@ -28,7 +28,7 @@ from .base import BaseTool, ToolRequest
logger = logging.getLogger(__name__)
class TestGenRequest(ToolRequest):
class TestGenerationRequest(ToolRequest):
"""
Request model for the test generation tool.
@@ -56,7 +56,7 @@ class TestGenRequest(ToolRequest):
)
class TestGenTool(BaseTool):
class TestGenerationTool(BaseTool):
"""
Test generation tool implementation.
@@ -141,7 +141,7 @@ class TestGenTool(BaseTool):
return ToolModelCategory.EXTENDED_REASONING
def get_request_model(self):
return TestGenRequest
return TestGenerationRequest
def _process_test_examples(
self, test_examples: list[str], continuation_id: Optional[str], available_tokens: int = None
@@ -246,7 +246,7 @@ class TestGenTool(BaseTool):
logger.error(f"[TESTGEN] Failed to process test examples: {type(e).__name__}: {e}")
return "", f"Warning: Could not process test examples: {str(e)}"
async def prepare_prompt(self, request: TestGenRequest) -> str:
async def prepare_prompt(self, request: TestGenerationRequest) -> str:
"""
Prepare the test generation prompt with code analysis and optional test examples.

View File

@@ -980,7 +980,7 @@ def check_total_file_size(files: list[str], model_name: Optional[str] = None) ->
model_name: Model name for context-aware thresholds, or None for default
Returns:
Dict with MCP_CODE_TOO_LARGE response if too large, None if acceptable
Dict with `code_too_large` response if too large, None if acceptable
"""
if not files:
return None