Fix all failing tests and pytest collection warnings
Fixed MagicMock comparison errors across multiple test suites by: - Adding proper ModelCapabilities mocks with real values instead of MagicMock objects - Updating test_auto_mode.py with correct provider mocking for model availability tests - Updating test_thinking_modes.py with proper capabilities mocking in all thinking mode tests - Updating test_tools.py with proper capabilities mocking for CodeReview and Analyze tools - Fixing test_large_prompt_handling.py by adding proper provider mocking to prevent errors before large prompt detection Fixed pytest collection warnings by: - Renaming TestGenRequest to TestGenerationRequest to avoid pytest collecting it as a test class - Renaming TestGenTool to TestGenerationTool to avoid pytest collecting it as a test class - Updated all imports and references across server.py, tools/__init__.py, and test files All 459 tests now pass without warnings or MagicMock comparison errors. 🤖 Generated with [Claude Code](https://claude.ai/code) Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
@@ -45,7 +45,7 @@ from tools import (
|
|||||||
DebugIssueTool,
|
DebugIssueTool,
|
||||||
Precommit,
|
Precommit,
|
||||||
RefactorTool,
|
RefactorTool,
|
||||||
TestGenTool,
|
TestGenerationTool,
|
||||||
ThinkDeepTool,
|
ThinkDeepTool,
|
||||||
TracerTool,
|
TracerTool,
|
||||||
)
|
)
|
||||||
@@ -149,7 +149,7 @@ TOOLS = {
|
|||||||
"analyze": AnalyzeTool(), # General-purpose file and code analysis
|
"analyze": AnalyzeTool(), # General-purpose file and code analysis
|
||||||
"chat": ChatTool(), # Interactive development chat and brainstorming
|
"chat": ChatTool(), # Interactive development chat and brainstorming
|
||||||
"precommit": Precommit(), # Pre-commit validation of git changes
|
"precommit": Precommit(), # Pre-commit validation of git changes
|
||||||
"testgen": TestGenTool(), # Comprehensive test generation with edge case coverage
|
"testgen": TestGenerationTool(), # Comprehensive test generation with edge case coverage
|
||||||
"refactor": RefactorTool(), # Intelligent code refactoring suggestions with precise line references
|
"refactor": RefactorTool(), # Intelligent code refactoring suggestions with precise line references
|
||||||
"tracer": TracerTool(), # Static call path prediction and control flow analysis
|
"tracer": TracerTool(), # Static call path prediction and control flow analysis
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -160,6 +160,7 @@ class TestAutoMode:
|
|||||||
patch("providers.registry.ModelProviderRegistry.get_provider_for_model") as mock_provider,
|
patch("providers.registry.ModelProviderRegistry.get_provider_for_model") as mock_provider,
|
||||||
patch("providers.registry.ModelProviderRegistry.get_available_models") as mock_available,
|
patch("providers.registry.ModelProviderRegistry.get_available_models") as mock_available,
|
||||||
patch.object(tool, "_get_available_models") as mock_tool_available,
|
patch.object(tool, "_get_available_models") as mock_tool_available,
|
||||||
|
patch("providers.registry.ModelProviderRegistry.is_model_available") as mock_is_available,
|
||||||
):
|
):
|
||||||
|
|
||||||
# Mock that o3 is not available but actual available models are
|
# Mock that o3 is not available but actual available models are
|
||||||
@@ -198,6 +199,12 @@ class TestAutoMode:
|
|||||||
# Mock the tool's available models method to return the actual available models
|
# Mock the tool's available models method to return the actual available models
|
||||||
mock_tool_available.return_value = available_models
|
mock_tool_available.return_value = available_models
|
||||||
|
|
||||||
|
# Mock is_model_available to return False for o3 specifically
|
||||||
|
def mock_model_available(model_name):
|
||||||
|
return model_name != "o3" and model_name in available_models
|
||||||
|
|
||||||
|
mock_is_available.side_effect = mock_model_available
|
||||||
|
|
||||||
# Execute with unavailable model
|
# Execute with unavailable model
|
||||||
result = await tool.execute(
|
result = await tool.execute(
|
||||||
{"files": ["/tmp/test.py"], "prompt": "Analyze this", "model": "o3"} # This model is not available
|
{"files": ["/tmp/test.py"], "prompt": "Analyze this", "model": "o3"} # This model is not available
|
||||||
|
|||||||
@@ -149,18 +149,39 @@ class TestLargePromptHandling:
|
|||||||
@pytest.mark.asyncio
|
@pytest.mark.asyncio
|
||||||
async def test_codereview_large_focus(self, large_prompt):
|
async def test_codereview_large_focus(self, large_prompt):
|
||||||
"""Test that codereview tool detects large focus_on field."""
|
"""Test that codereview tool detects large focus_on field."""
|
||||||
tool = CodeReviewTool()
|
from unittest.mock import MagicMock
|
||||||
result = await tool.execute(
|
from providers.base import ModelCapabilities, ProviderType
|
||||||
{
|
|
||||||
"files": ["/some/file.py"],
|
|
||||||
"focus_on": large_prompt,
|
|
||||||
"prompt": "Test code review for validation purposes",
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
assert len(result) == 1
|
tool = CodeReviewTool()
|
||||||
output = json.loads(result[0].text)
|
|
||||||
assert output["status"] == "resend_prompt"
|
# Mock provider to avoid MagicMock comparison errors that would prevent large prompt detection
|
||||||
|
with patch.object(tool, "get_model_provider") as mock_get_provider:
|
||||||
|
mock_provider = MagicMock()
|
||||||
|
mock_provider.get_provider_type.return_value = MagicMock(value="google")
|
||||||
|
mock_provider.supports_thinking_mode.return_value = False
|
||||||
|
|
||||||
|
# Set up proper capabilities to avoid MagicMock comparison errors
|
||||||
|
mock_capabilities = ModelCapabilities(
|
||||||
|
provider=ProviderType.GOOGLE,
|
||||||
|
model_name="gemini-2.5-flash-preview-05-20",
|
||||||
|
friendly_name="Test Model",
|
||||||
|
context_window=1048576,
|
||||||
|
supports_function_calling=True,
|
||||||
|
)
|
||||||
|
mock_provider.get_capabilities.return_value = mock_capabilities
|
||||||
|
mock_get_provider.return_value = mock_provider
|
||||||
|
|
||||||
|
result = await tool.execute(
|
||||||
|
{
|
||||||
|
"files": ["/some/file.py"],
|
||||||
|
"focus_on": large_prompt,
|
||||||
|
"prompt": "Test code review for validation purposes",
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
assert len(result) == 1
|
||||||
|
output = json.loads(result[0].text)
|
||||||
|
assert output["status"] == "resend_prompt"
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
@pytest.mark.asyncio
|
||||||
async def test_review_changes_large_original_request(self, large_prompt):
|
async def test_review_changes_large_original_request(self, large_prompt):
|
||||||
|
|||||||
@@ -8,7 +8,7 @@ from tools.codereview import CodeReviewTool
|
|||||||
from tools.debug import DebugIssueTool
|
from tools.debug import DebugIssueTool
|
||||||
from tools.precommit import Precommit
|
from tools.precommit import Precommit
|
||||||
from tools.refactor import RefactorTool
|
from tools.refactor import RefactorTool
|
||||||
from tools.testgen import TestGenTool
|
from tools.testgen import TestGenerationTool
|
||||||
|
|
||||||
|
|
||||||
class TestLineNumbersIntegration:
|
class TestLineNumbersIntegration:
|
||||||
@@ -22,7 +22,7 @@ class TestLineNumbersIntegration:
|
|||||||
CodeReviewTool(),
|
CodeReviewTool(),
|
||||||
DebugIssueTool(),
|
DebugIssueTool(),
|
||||||
RefactorTool(),
|
RefactorTool(),
|
||||||
TestGenTool(),
|
TestGenerationTool(),
|
||||||
Precommit(),
|
Precommit(),
|
||||||
]
|
]
|
||||||
|
|
||||||
@@ -38,7 +38,7 @@ class TestLineNumbersIntegration:
|
|||||||
CodeReviewTool,
|
CodeReviewTool,
|
||||||
DebugIssueTool,
|
DebugIssueTool,
|
||||||
RefactorTool,
|
RefactorTool,
|
||||||
TestGenTool,
|
TestGenerationTool,
|
||||||
Precommit,
|
Precommit,
|
||||||
]
|
]
|
||||||
|
|
||||||
|
|||||||
@@ -10,7 +10,7 @@ from unittest.mock import Mock, patch
|
|||||||
import pytest
|
import pytest
|
||||||
|
|
||||||
from tests.mock_helpers import create_mock_provider
|
from tests.mock_helpers import create_mock_provider
|
||||||
from tools.testgen import TestGenRequest, TestGenTool
|
from tools.testgen import TestGenerationRequest, TestGenerationTool
|
||||||
|
|
||||||
|
|
||||||
class TestTestGenTool:
|
class TestTestGenTool:
|
||||||
@@ -18,7 +18,7 @@ class TestTestGenTool:
|
|||||||
|
|
||||||
@pytest.fixture
|
@pytest.fixture
|
||||||
def tool(self):
|
def tool(self):
|
||||||
return TestGenTool()
|
return TestGenerationTool()
|
||||||
|
|
||||||
@pytest.fixture
|
@pytest.fixture
|
||||||
def temp_files(self):
|
def temp_files(self):
|
||||||
|
|||||||
@@ -39,134 +39,186 @@ class TestThinkingModes:
|
|||||||
), f"{tool.__class__.__name__} should default to {expected_default}"
|
), f"{tool.__class__.__name__} should default to {expected_default}"
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
@pytest.mark.asyncio
|
||||||
@patch("tools.base.BaseTool.get_model_provider")
|
async def test_thinking_mode_minimal(self):
|
||||||
async def test_thinking_mode_minimal(self, mock_get_provider):
|
|
||||||
"""Test minimal thinking mode"""
|
"""Test minimal thinking mode"""
|
||||||
mock_provider = create_mock_provider()
|
from unittest.mock import MagicMock
|
||||||
mock_provider.get_provider_type.return_value = Mock(value="google")
|
from providers.base import ModelCapabilities, ProviderType
|
||||||
mock_provider.supports_thinking_mode.return_value = True
|
|
||||||
mock_provider.generate_content.return_value = Mock(
|
|
||||||
content="Minimal thinking response", usage={}, model_name="gemini-2.5-flash-preview-05-20", metadata={}
|
|
||||||
)
|
|
||||||
mock_get_provider.return_value = mock_provider
|
|
||||||
|
|
||||||
tool = AnalyzeTool()
|
with patch("tools.base.BaseTool.get_model_provider") as mock_get_provider:
|
||||||
result = await tool.execute(
|
mock_provider = create_mock_provider()
|
||||||
{
|
mock_provider.get_provider_type.return_value = Mock(value="google")
|
||||||
"files": ["/absolute/path/test.py"],
|
mock_provider.supports_thinking_mode.return_value = True
|
||||||
"prompt": "What is this?",
|
mock_provider.generate_content.return_value = Mock(
|
||||||
"thinking_mode": "minimal",
|
content="Minimal thinking response", usage={}, model_name="gemini-2.5-flash-preview-05-20", metadata={}
|
||||||
}
|
)
|
||||||
)
|
|
||||||
|
# Set up proper capabilities to avoid MagicMock comparison errors
|
||||||
|
mock_capabilities = ModelCapabilities(
|
||||||
|
provider=ProviderType.GOOGLE,
|
||||||
|
model_name="gemini-2.5-flash-preview-05-20",
|
||||||
|
friendly_name="Test Model",
|
||||||
|
context_window=1048576,
|
||||||
|
supports_function_calling=True,
|
||||||
|
)
|
||||||
|
mock_provider.get_capabilities.return_value = mock_capabilities
|
||||||
|
mock_get_provider.return_value = mock_provider
|
||||||
|
|
||||||
# Verify create_model was called with correct thinking_mode
|
tool = AnalyzeTool()
|
||||||
assert mock_get_provider.called
|
result = await tool.execute(
|
||||||
# Verify generate_content was called with thinking_mode
|
{
|
||||||
mock_provider.generate_content.assert_called_once()
|
"files": ["/absolute/path/test.py"],
|
||||||
call_kwargs = mock_provider.generate_content.call_args[1]
|
"prompt": "What is this?",
|
||||||
assert call_kwargs.get("thinking_mode") == "minimal" or (
|
"thinking_mode": "minimal",
|
||||||
not mock_provider.supports_thinking_mode.return_value and call_kwargs.get("thinking_mode") is None
|
}
|
||||||
) # thinking_mode parameter
|
)
|
||||||
|
|
||||||
# Parse JSON response
|
# Verify create_model was called with correct thinking_mode
|
||||||
import json
|
assert mock_get_provider.called
|
||||||
|
# Verify generate_content was called with thinking_mode
|
||||||
|
mock_provider.generate_content.assert_called_once()
|
||||||
|
call_kwargs = mock_provider.generate_content.call_args[1]
|
||||||
|
assert call_kwargs.get("thinking_mode") == "minimal" or (
|
||||||
|
not mock_provider.supports_thinking_mode.return_value and call_kwargs.get("thinking_mode") is None
|
||||||
|
) # thinking_mode parameter
|
||||||
|
|
||||||
response_data = json.loads(result[0].text)
|
# Parse JSON response
|
||||||
assert response_data["status"] == "success"
|
import json
|
||||||
assert "Minimal thinking response" in response_data["content"] or "Analysis:" in response_data["content"]
|
|
||||||
|
response_data = json.loads(result[0].text)
|
||||||
|
assert response_data["status"] == "success"
|
||||||
|
assert "Minimal thinking response" in response_data["content"] or "Analysis:" in response_data["content"]
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
@pytest.mark.asyncio
|
||||||
@patch("tools.base.BaseTool.get_model_provider")
|
async def test_thinking_mode_low(self):
|
||||||
async def test_thinking_mode_low(self, mock_get_provider):
|
|
||||||
"""Test low thinking mode"""
|
"""Test low thinking mode"""
|
||||||
mock_provider = create_mock_provider()
|
from unittest.mock import MagicMock
|
||||||
mock_provider.get_provider_type.return_value = Mock(value="google")
|
from providers.base import ModelCapabilities, ProviderType
|
||||||
mock_provider.supports_thinking_mode.return_value = True
|
|
||||||
mock_provider.generate_content.return_value = Mock(
|
|
||||||
content="Low thinking response", usage={}, model_name="gemini-2.5-flash-preview-05-20", metadata={}
|
|
||||||
)
|
|
||||||
mock_get_provider.return_value = mock_provider
|
|
||||||
|
|
||||||
tool = CodeReviewTool()
|
with patch("tools.base.BaseTool.get_model_provider") as mock_get_provider:
|
||||||
result = await tool.execute(
|
mock_provider = create_mock_provider()
|
||||||
{
|
mock_provider.get_provider_type.return_value = Mock(value="google")
|
||||||
"files": ["/absolute/path/test.py"],
|
mock_provider.supports_thinking_mode.return_value = True
|
||||||
"thinking_mode": "low",
|
mock_provider.generate_content.return_value = Mock(
|
||||||
"prompt": "Test code review for validation purposes",
|
content="Low thinking response", usage={}, model_name="gemini-2.5-flash-preview-05-20", metadata={}
|
||||||
}
|
)
|
||||||
)
|
|
||||||
|
# Set up proper capabilities to avoid MagicMock comparison errors
|
||||||
|
mock_capabilities = ModelCapabilities(
|
||||||
|
provider=ProviderType.GOOGLE,
|
||||||
|
model_name="gemini-2.5-flash-preview-05-20",
|
||||||
|
friendly_name="Test Model",
|
||||||
|
context_window=1048576,
|
||||||
|
supports_function_calling=True,
|
||||||
|
)
|
||||||
|
mock_provider.get_capabilities.return_value = mock_capabilities
|
||||||
|
mock_get_provider.return_value = mock_provider
|
||||||
|
|
||||||
# Verify create_model was called with correct thinking_mode
|
tool = CodeReviewTool()
|
||||||
assert mock_get_provider.called
|
result = await tool.execute(
|
||||||
# Verify generate_content was called with thinking_mode
|
{
|
||||||
mock_provider.generate_content.assert_called_once()
|
"files": ["/absolute/path/test.py"],
|
||||||
call_kwargs = mock_provider.generate_content.call_args[1]
|
"thinking_mode": "low",
|
||||||
assert call_kwargs.get("thinking_mode") == "low" or (
|
"prompt": "Test code review for validation purposes",
|
||||||
not mock_provider.supports_thinking_mode.return_value and call_kwargs.get("thinking_mode") is None
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
assert "Low thinking response" in result[0].text or "Code Review" in result[0].text
|
# Verify create_model was called with correct thinking_mode
|
||||||
|
assert mock_get_provider.called
|
||||||
|
# Verify generate_content was called with thinking_mode
|
||||||
|
mock_provider.generate_content.assert_called_once()
|
||||||
|
call_kwargs = mock_provider.generate_content.call_args[1]
|
||||||
|
assert call_kwargs.get("thinking_mode") == "low" or (
|
||||||
|
not mock_provider.supports_thinking_mode.return_value and call_kwargs.get("thinking_mode") is None
|
||||||
|
)
|
||||||
|
|
||||||
|
assert "Low thinking response" in result[0].text or "Code Review" in result[0].text
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
@pytest.mark.asyncio
|
||||||
@patch("tools.base.BaseTool.get_model_provider")
|
async def test_thinking_mode_medium(self):
|
||||||
async def test_thinking_mode_medium(self, mock_get_provider):
|
|
||||||
"""Test medium thinking mode (default for most tools)"""
|
"""Test medium thinking mode (default for most tools)"""
|
||||||
mock_provider = create_mock_provider()
|
from unittest.mock import MagicMock
|
||||||
mock_provider.get_provider_type.return_value = Mock(value="google")
|
from providers.base import ModelCapabilities, ProviderType
|
||||||
mock_provider.supports_thinking_mode.return_value = True
|
|
||||||
mock_provider.generate_content.return_value = Mock(
|
|
||||||
content="Medium thinking response", usage={}, model_name="gemini-2.5-flash-preview-05-20", metadata={}
|
|
||||||
)
|
|
||||||
mock_get_provider.return_value = mock_provider
|
|
||||||
|
|
||||||
tool = DebugIssueTool()
|
with patch("tools.base.BaseTool.get_model_provider") as mock_get_provider:
|
||||||
result = await tool.execute(
|
mock_provider = create_mock_provider()
|
||||||
{
|
mock_provider.get_provider_type.return_value = Mock(value="google")
|
||||||
"prompt": "Test error",
|
mock_provider.supports_thinking_mode.return_value = True
|
||||||
# Not specifying thinking_mode, should use default (medium)
|
mock_provider.generate_content.return_value = Mock(
|
||||||
}
|
content="Medium thinking response", usage={}, model_name="gemini-2.5-flash-preview-05-20", metadata={}
|
||||||
)
|
)
|
||||||
|
|
||||||
|
# Set up proper capabilities to avoid MagicMock comparison errors
|
||||||
|
mock_capabilities = ModelCapabilities(
|
||||||
|
provider=ProviderType.GOOGLE,
|
||||||
|
model_name="gemini-2.5-flash-preview-05-20",
|
||||||
|
friendly_name="Test Model",
|
||||||
|
context_window=1048576,
|
||||||
|
supports_function_calling=True,
|
||||||
|
)
|
||||||
|
mock_provider.get_capabilities.return_value = mock_capabilities
|
||||||
|
mock_get_provider.return_value = mock_provider
|
||||||
|
|
||||||
# Verify create_model was called with default thinking_mode
|
tool = DebugIssueTool()
|
||||||
assert mock_get_provider.called
|
result = await tool.execute(
|
||||||
# Verify generate_content was called with thinking_mode
|
{
|
||||||
mock_provider.generate_content.assert_called_once()
|
"prompt": "Test error",
|
||||||
call_kwargs = mock_provider.generate_content.call_args[1]
|
# Not specifying thinking_mode, should use default (medium)
|
||||||
assert call_kwargs.get("thinking_mode") == "medium" or (
|
}
|
||||||
not mock_provider.supports_thinking_mode.return_value and call_kwargs.get("thinking_mode") is None
|
)
|
||||||
)
|
|
||||||
|
|
||||||
assert "Medium thinking response" in result[0].text or "Debug Analysis" in result[0].text
|
# Verify create_model was called with default thinking_mode
|
||||||
|
assert mock_get_provider.called
|
||||||
|
# Verify generate_content was called with thinking_mode
|
||||||
|
mock_provider.generate_content.assert_called_once()
|
||||||
|
call_kwargs = mock_provider.generate_content.call_args[1]
|
||||||
|
assert call_kwargs.get("thinking_mode") == "medium" or (
|
||||||
|
not mock_provider.supports_thinking_mode.return_value and call_kwargs.get("thinking_mode") is None
|
||||||
|
)
|
||||||
|
|
||||||
|
assert "Medium thinking response" in result[0].text or "Debug Analysis" in result[0].text
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
@pytest.mark.asyncio
|
||||||
@patch("tools.base.BaseTool.get_model_provider")
|
async def test_thinking_mode_high(self):
|
||||||
async def test_thinking_mode_high(self, mock_get_provider):
|
|
||||||
"""Test high thinking mode"""
|
"""Test high thinking mode"""
|
||||||
mock_provider = create_mock_provider()
|
from unittest.mock import MagicMock
|
||||||
mock_provider.get_provider_type.return_value = Mock(value="google")
|
from providers.base import ModelCapabilities, ProviderType
|
||||||
mock_provider.supports_thinking_mode.return_value = True
|
|
||||||
mock_provider.generate_content.return_value = Mock(
|
|
||||||
content="High thinking response", usage={}, model_name="gemini-2.5-flash-preview-05-20", metadata={}
|
|
||||||
)
|
|
||||||
mock_get_provider.return_value = mock_provider
|
|
||||||
|
|
||||||
tool = AnalyzeTool()
|
with patch("tools.base.BaseTool.get_model_provider") as mock_get_provider:
|
||||||
await tool.execute(
|
mock_provider = create_mock_provider()
|
||||||
{
|
mock_provider.get_provider_type.return_value = Mock(value="google")
|
||||||
"files": ["/absolute/path/complex.py"],
|
mock_provider.supports_thinking_mode.return_value = True
|
||||||
"prompt": "Analyze architecture",
|
mock_provider.generate_content.return_value = Mock(
|
||||||
"thinking_mode": "high",
|
content="High thinking response", usage={}, model_name="gemini-2.5-flash-preview-05-20", metadata={}
|
||||||
}
|
)
|
||||||
)
|
|
||||||
|
# Set up proper capabilities to avoid MagicMock comparison errors
|
||||||
|
mock_capabilities = ModelCapabilities(
|
||||||
|
provider=ProviderType.GOOGLE,
|
||||||
|
model_name="gemini-2.5-flash-preview-05-20",
|
||||||
|
friendly_name="Test Model",
|
||||||
|
context_window=1048576,
|
||||||
|
supports_function_calling=True,
|
||||||
|
)
|
||||||
|
mock_provider.get_capabilities.return_value = mock_capabilities
|
||||||
|
mock_get_provider.return_value = mock_provider
|
||||||
|
|
||||||
# Verify create_model was called with correct thinking_mode
|
tool = AnalyzeTool()
|
||||||
assert mock_get_provider.called
|
await tool.execute(
|
||||||
# Verify generate_content was called with thinking_mode
|
{
|
||||||
mock_provider.generate_content.assert_called_once()
|
"files": ["/absolute/path/complex.py"],
|
||||||
call_kwargs = mock_provider.generate_content.call_args[1]
|
"prompt": "Analyze architecture",
|
||||||
assert call_kwargs.get("thinking_mode") == "high" or (
|
"thinking_mode": "high",
|
||||||
not mock_provider.supports_thinking_mode.return_value and call_kwargs.get("thinking_mode") is None
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
|
# Verify create_model was called with correct thinking_mode
|
||||||
|
assert mock_get_provider.called
|
||||||
|
# Verify generate_content was called with thinking_mode
|
||||||
|
mock_provider.generate_content.assert_called_once()
|
||||||
|
call_kwargs = mock_provider.generate_content.call_args[1]
|
||||||
|
assert call_kwargs.get("thinking_mode") == "high" or (
|
||||||
|
not mock_provider.supports_thinking_mode.return_value and call_kwargs.get("thinking_mode") is None
|
||||||
|
)
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
@pytest.mark.asyncio
|
||||||
@patch("tools.base.BaseTool.get_model_provider")
|
@patch("tools.base.BaseTool.get_model_provider")
|
||||||
|
|||||||
@@ -76,35 +76,48 @@ class TestCodeReviewTool:
|
|||||||
assert schema["required"] == ["files", "prompt"]
|
assert schema["required"] == ["files", "prompt"]
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
@pytest.mark.asyncio
|
||||||
@patch("tools.base.BaseTool.get_model_provider")
|
async def test_execute_with_review_type(self, tool, tmp_path):
|
||||||
async def test_execute_with_review_type(self, mock_get_provider, tool, tmp_path):
|
|
||||||
"""Test execution with specific review type"""
|
"""Test execution with specific review type"""
|
||||||
|
from unittest.mock import MagicMock
|
||||||
|
from providers.base import ModelCapabilities, ProviderType
|
||||||
|
|
||||||
# Create test file
|
# Create test file
|
||||||
test_file = tmp_path / "test.py"
|
test_file = tmp_path / "test.py"
|
||||||
test_file.write_text("def insecure(): pass", encoding="utf-8")
|
test_file.write_text("def insecure(): pass", encoding="utf-8")
|
||||||
|
|
||||||
# Mock provider
|
with patch("tools.base.BaseTool.get_model_provider") as mock_get_provider:
|
||||||
mock_provider = create_mock_provider()
|
# Mock provider
|
||||||
mock_provider.get_provider_type.return_value = Mock(value="google")
|
mock_provider = create_mock_provider()
|
||||||
mock_provider.supports_thinking_mode.return_value = False
|
mock_provider.get_provider_type.return_value = Mock(value="google")
|
||||||
mock_provider.generate_content.return_value = Mock(
|
mock_provider.supports_thinking_mode.return_value = False
|
||||||
content="Security issues found", usage={}, model_name="gemini-2.5-flash-preview-05-20", metadata={}
|
mock_provider.generate_content.return_value = Mock(
|
||||||
)
|
content="Security issues found", usage={}, model_name="gemini-2.5-flash-preview-05-20", metadata={}
|
||||||
mock_get_provider.return_value = mock_provider
|
)
|
||||||
|
|
||||||
|
# Set up proper capabilities to avoid MagicMock comparison errors
|
||||||
|
mock_capabilities = ModelCapabilities(
|
||||||
|
provider=ProviderType.GOOGLE,
|
||||||
|
model_name="gemini-2.5-flash-preview-05-20",
|
||||||
|
friendly_name="Test Model",
|
||||||
|
context_window=1048576,
|
||||||
|
supports_function_calling=True,
|
||||||
|
)
|
||||||
|
mock_provider.get_capabilities.return_value = mock_capabilities
|
||||||
|
mock_get_provider.return_value = mock_provider
|
||||||
|
|
||||||
result = await tool.execute(
|
result = await tool.execute(
|
||||||
{
|
{
|
||||||
"files": [str(test_file)],
|
"files": [str(test_file)],
|
||||||
"review_type": "security",
|
"review_type": "security",
|
||||||
"focus_on": "authentication",
|
"focus_on": "authentication",
|
||||||
"prompt": "Test code review for validation purposes",
|
"prompt": "Test code review for validation purposes",
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
assert len(result) == 1
|
assert len(result) == 1
|
||||||
assert "Security issues found" in result[0].text
|
assert "Security issues found" in result[0].text
|
||||||
assert "Claude's Next Steps:" in result[0].text
|
assert "Claude's Next Steps:" in result[0].text
|
||||||
assert "Security issues found" in result[0].text
|
assert "Security issues found" in result[0].text
|
||||||
|
|
||||||
|
|
||||||
class TestDebugIssueTool:
|
class TestDebugIssueTool:
|
||||||
@@ -169,35 +182,48 @@ class TestAnalyzeTool:
|
|||||||
assert set(schema["required"]) == {"files", "prompt"}
|
assert set(schema["required"]) == {"files", "prompt"}
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
@pytest.mark.asyncio
|
||||||
@patch("tools.base.BaseTool.get_model_provider")
|
async def test_execute_with_analysis_type(self, tool, tmp_path):
|
||||||
async def test_execute_with_analysis_type(self, mock_get_provider, tool, tmp_path):
|
|
||||||
"""Test execution with specific analysis type"""
|
"""Test execution with specific analysis type"""
|
||||||
|
from unittest.mock import MagicMock
|
||||||
|
from providers.base import ModelCapabilities, ProviderType
|
||||||
|
|
||||||
# Create test file
|
# Create test file
|
||||||
test_file = tmp_path / "module.py"
|
test_file = tmp_path / "module.py"
|
||||||
test_file.write_text("class Service: pass", encoding="utf-8")
|
test_file.write_text("class Service: pass", encoding="utf-8")
|
||||||
|
|
||||||
# Mock provider
|
with patch("tools.base.BaseTool.get_model_provider") as mock_get_provider:
|
||||||
mock_provider = create_mock_provider()
|
# Mock provider
|
||||||
mock_provider.get_provider_type.return_value = Mock(value="google")
|
mock_provider = create_mock_provider()
|
||||||
mock_provider.supports_thinking_mode.return_value = False
|
mock_provider.get_provider_type.return_value = Mock(value="google")
|
||||||
mock_provider.generate_content.return_value = Mock(
|
mock_provider.supports_thinking_mode.return_value = False
|
||||||
content="Architecture analysis", usage={}, model_name="gemini-2.5-flash-preview-05-20", metadata={}
|
mock_provider.generate_content.return_value = Mock(
|
||||||
)
|
content="Architecture analysis", usage={}, model_name="gemini-2.5-flash-preview-05-20", metadata={}
|
||||||
mock_get_provider.return_value = mock_provider
|
)
|
||||||
|
|
||||||
|
# Set up proper capabilities to avoid MagicMock comparison errors
|
||||||
|
mock_capabilities = ModelCapabilities(
|
||||||
|
provider=ProviderType.GOOGLE,
|
||||||
|
model_name="gemini-2.5-flash-preview-05-20",
|
||||||
|
friendly_name="Test Model",
|
||||||
|
context_window=1048576,
|
||||||
|
supports_function_calling=True,
|
||||||
|
)
|
||||||
|
mock_provider.get_capabilities.return_value = mock_capabilities
|
||||||
|
mock_get_provider.return_value = mock_provider
|
||||||
|
|
||||||
result = await tool.execute(
|
result = await tool.execute(
|
||||||
{
|
{
|
||||||
"files": [str(test_file)],
|
"files": [str(test_file)],
|
||||||
"prompt": "What's the structure?",
|
"prompt": "What's the structure?",
|
||||||
"analysis_type": "architecture",
|
"analysis_type": "architecture",
|
||||||
"output_format": "summary",
|
"output_format": "summary",
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
assert len(result) == 1
|
assert len(result) == 1
|
||||||
assert "Architecture analysis" in result[0].text
|
assert "Architecture analysis" in result[0].text
|
||||||
assert "Next Steps:" in result[0].text
|
assert "Next Steps:" in result[0].text
|
||||||
assert "Architecture analysis" in result[0].text
|
assert "Architecture analysis" in result[0].text
|
||||||
|
|
||||||
|
|
||||||
class TestAbsolutePathValidation:
|
class TestAbsolutePathValidation:
|
||||||
@@ -287,9 +313,9 @@ class TestAbsolutePathValidation:
|
|||||||
@pytest.mark.asyncio
|
@pytest.mark.asyncio
|
||||||
async def test_testgen_tool_relative_path_rejected(self):
|
async def test_testgen_tool_relative_path_rejected(self):
|
||||||
"""Test that testgen tool rejects relative paths"""
|
"""Test that testgen tool rejects relative paths"""
|
||||||
from tools import TestGenTool
|
from tools import TestGenerationTool
|
||||||
|
|
||||||
tool = TestGenTool()
|
tool = TestGenerationTool()
|
||||||
result = await tool.execute(
|
result = await tool.execute(
|
||||||
{"files": ["src/main.py"], "prompt": "Generate tests for the functions"} # relative path
|
{"files": ["src/main.py"], "prompt": "Generate tests for the functions"} # relative path
|
||||||
)
|
)
|
||||||
@@ -301,26 +327,39 @@ class TestAbsolutePathValidation:
|
|||||||
assert "src/main.py" in response["content"]
|
assert "src/main.py" in response["content"]
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
@pytest.mark.asyncio
|
||||||
@patch("tools.AnalyzeTool.get_model_provider")
|
async def test_analyze_tool_accepts_absolute_paths(self):
|
||||||
async def test_analyze_tool_accepts_absolute_paths(self, mock_get_provider):
|
|
||||||
"""Test that analyze tool accepts absolute paths"""
|
"""Test that analyze tool accepts absolute paths"""
|
||||||
|
from unittest.mock import MagicMock
|
||||||
|
from providers.base import ModelCapabilities, ProviderType
|
||||||
|
|
||||||
tool = AnalyzeTool()
|
tool = AnalyzeTool()
|
||||||
|
|
||||||
# Mock provider
|
with patch("tools.AnalyzeTool.get_model_provider") as mock_get_provider:
|
||||||
mock_provider = create_mock_provider()
|
# Mock provider
|
||||||
mock_provider.get_provider_type.return_value = Mock(value="google")
|
mock_provider = create_mock_provider()
|
||||||
mock_provider.supports_thinking_mode.return_value = False
|
mock_provider.get_provider_type.return_value = Mock(value="google")
|
||||||
mock_provider.generate_content.return_value = Mock(
|
mock_provider.supports_thinking_mode.return_value = False
|
||||||
content="Analysis complete", usage={}, model_name="gemini-2.5-flash-preview-05-20", metadata={}
|
mock_provider.generate_content.return_value = Mock(
|
||||||
)
|
content="Analysis complete", usage={}, model_name="gemini-2.5-flash-preview-05-20", metadata={}
|
||||||
mock_get_provider.return_value = mock_provider
|
)
|
||||||
|
|
||||||
|
# Set up proper capabilities to avoid MagicMock comparison errors
|
||||||
|
mock_capabilities = ModelCapabilities(
|
||||||
|
provider=ProviderType.GOOGLE,
|
||||||
|
model_name="gemini-2.5-flash-preview-05-20",
|
||||||
|
friendly_name="Test Model",
|
||||||
|
context_window=1048576,
|
||||||
|
supports_function_calling=True,
|
||||||
|
)
|
||||||
|
mock_provider.get_capabilities.return_value = mock_capabilities
|
||||||
|
mock_get_provider.return_value = mock_provider
|
||||||
|
|
||||||
result = await tool.execute({"files": ["/absolute/path/file.py"], "prompt": "What does this do?"})
|
result = await tool.execute({"files": ["/absolute/path/file.py"], "prompt": "What does this do?"})
|
||||||
|
|
||||||
assert len(result) == 1
|
assert len(result) == 1
|
||||||
response = json.loads(result[0].text)
|
response = json.loads(result[0].text)
|
||||||
assert response["status"] == "success"
|
assert response["status"] == "success"
|
||||||
assert "Analysis complete" in response["content"]
|
assert "Analysis complete" in response["content"]
|
||||||
|
|
||||||
|
|
||||||
class TestSpecialStatusModels:
|
class TestSpecialStatusModels:
|
||||||
|
|||||||
@@ -8,7 +8,7 @@ from .codereview import CodeReviewTool
|
|||||||
from .debug import DebugIssueTool
|
from .debug import DebugIssueTool
|
||||||
from .precommit import Precommit
|
from .precommit import Precommit
|
||||||
from .refactor import RefactorTool
|
from .refactor import RefactorTool
|
||||||
from .testgen import TestGenTool
|
from .testgen import TestGenerationTool
|
||||||
from .thinkdeep import ThinkDeepTool
|
from .thinkdeep import ThinkDeepTool
|
||||||
from .tracer import TracerTool
|
from .tracer import TracerTool
|
||||||
|
|
||||||
@@ -20,6 +20,6 @@ __all__ = [
|
|||||||
"ChatTool",
|
"ChatTool",
|
||||||
"Precommit",
|
"Precommit",
|
||||||
"RefactorTool",
|
"RefactorTool",
|
||||||
"TestGenTool",
|
"TestGenerationTool",
|
||||||
"TracerTool",
|
"TracerTool",
|
||||||
]
|
]
|
||||||
|
|||||||
@@ -962,7 +962,7 @@ When recommending searches, be specific about what information you need and why
|
|||||||
files: List of file paths to check
|
files: List of file paths to check
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
Dict with MCP_CODE_TOO_LARGE response if too large, None if acceptable
|
Dict with `code_too_large` response if too large, None if acceptable
|
||||||
"""
|
"""
|
||||||
if not files:
|
if not files:
|
||||||
return None
|
return None
|
||||||
|
|||||||
@@ -28,7 +28,7 @@ from .base import BaseTool, ToolRequest
|
|||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
class TestGenRequest(ToolRequest):
|
class TestGenerationRequest(ToolRequest):
|
||||||
"""
|
"""
|
||||||
Request model for the test generation tool.
|
Request model for the test generation tool.
|
||||||
|
|
||||||
@@ -56,7 +56,7 @@ class TestGenRequest(ToolRequest):
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
class TestGenTool(BaseTool):
|
class TestGenerationTool(BaseTool):
|
||||||
"""
|
"""
|
||||||
Test generation tool implementation.
|
Test generation tool implementation.
|
||||||
|
|
||||||
@@ -141,7 +141,7 @@ class TestGenTool(BaseTool):
|
|||||||
return ToolModelCategory.EXTENDED_REASONING
|
return ToolModelCategory.EXTENDED_REASONING
|
||||||
|
|
||||||
def get_request_model(self):
|
def get_request_model(self):
|
||||||
return TestGenRequest
|
return TestGenerationRequest
|
||||||
|
|
||||||
def _process_test_examples(
|
def _process_test_examples(
|
||||||
self, test_examples: list[str], continuation_id: Optional[str], available_tokens: int = None
|
self, test_examples: list[str], continuation_id: Optional[str], available_tokens: int = None
|
||||||
@@ -246,7 +246,7 @@ class TestGenTool(BaseTool):
|
|||||||
logger.error(f"[TESTGEN] Failed to process test examples: {type(e).__name__}: {e}")
|
logger.error(f"[TESTGEN] Failed to process test examples: {type(e).__name__}: {e}")
|
||||||
return "", f"Warning: Could not process test examples: {str(e)}"
|
return "", f"Warning: Could not process test examples: {str(e)}"
|
||||||
|
|
||||||
async def prepare_prompt(self, request: TestGenRequest) -> str:
|
async def prepare_prompt(self, request: TestGenerationRequest) -> str:
|
||||||
"""
|
"""
|
||||||
Prepare the test generation prompt with code analysis and optional test examples.
|
Prepare the test generation prompt with code analysis and optional test examples.
|
||||||
|
|
||||||
|
|||||||
@@ -980,7 +980,7 @@ def check_total_file_size(files: list[str], model_name: Optional[str] = None) ->
|
|||||||
model_name: Model name for context-aware thresholds, or None for default
|
model_name: Model name for context-aware thresholds, or None for default
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
Dict with MCP_CODE_TOO_LARGE response if too large, None if acceptable
|
Dict with `code_too_large` response if too large, None if acceptable
|
||||||
"""
|
"""
|
||||||
if not files:
|
if not files:
|
||||||
return None
|
return None
|
||||||
|
|||||||
Reference in New Issue
Block a user