Fix tests to work with effective auto mode changes
- Added autouse fixture to mock provider availability in tests - Updated test expectations to match new auto mode behavior - Fixed mock provider capabilities to return proper values - Updated claude continuation tests to set default model - All 256 tests now passing 🤖 Generated with [Claude Code](https://claude.ai/code) Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
@@ -72,3 +72,48 @@ def project_path(tmp_path):
|
||||
def pytest_configure(config):
|
||||
"""Configure pytest with custom markers"""
|
||||
config.addinivalue_line("markers", "asyncio: mark test as async")
|
||||
config.addinivalue_line("markers", "no_mock_provider: disable automatic provider mocking")
|
||||
|
||||
|
||||
@pytest.fixture(autouse=True)
|
||||
def mock_provider_availability(request, monkeypatch):
|
||||
"""
|
||||
Automatically mock provider availability for all tests to prevent
|
||||
effective auto mode from being triggered when DEFAULT_MODEL is unavailable.
|
||||
|
||||
This fixture ensures that when tests run with dummy API keys,
|
||||
the tools don't require model selection unless explicitly testing auto mode.
|
||||
"""
|
||||
# Skip this fixture for tests that need real providers
|
||||
if hasattr(request, "node") and request.node.get_closest_marker("no_mock_provider"):
|
||||
return
|
||||
|
||||
from unittest.mock import MagicMock
|
||||
|
||||
original_get_provider = ModelProviderRegistry.get_provider_for_model
|
||||
|
||||
def mock_get_provider_for_model(model_name):
|
||||
# If it's a test looking for unavailable models, return None
|
||||
if model_name in ["unavailable-model", "gpt-5-turbo", "o3"]:
|
||||
return None
|
||||
# For common test models, return a mock provider
|
||||
if model_name in ["gemini-2.5-flash-preview-05-20", "gemini-2.5-pro-preview-06-05", "pro", "flash"]:
|
||||
# Try to use the real provider first if it exists
|
||||
real_provider = original_get_provider(model_name)
|
||||
if real_provider:
|
||||
return real_provider
|
||||
|
||||
# Otherwise create a mock
|
||||
provider = MagicMock()
|
||||
# Set up the model capabilities mock with actual values
|
||||
capabilities = MagicMock()
|
||||
capabilities.context_window = 1000000 # 1M tokens for Gemini models
|
||||
capabilities.supports_extended_thinking = False
|
||||
capabilities.input_cost_per_1k = 0.075
|
||||
capabilities.output_cost_per_1k = 0.3
|
||||
provider.get_model_capabilities.return_value = capabilities
|
||||
return provider
|
||||
# Otherwise use the original logic
|
||||
return original_get_provider(model_name)
|
||||
|
||||
monkeypatch.setattr(ModelProviderRegistry, "get_provider_for_model", mock_get_provider_for_model)
|
||||
|
||||
@@ -88,6 +88,7 @@ class TestAutoMode:
|
||||
def test_tool_schema_in_normal_mode(self):
|
||||
"""Test that tool schemas don't require model in normal mode"""
|
||||
# This test uses the default from conftest.py which sets non-auto mode
|
||||
# The conftest.py mock_provider_availability fixture ensures the model is available
|
||||
tool = AnalyzeTool()
|
||||
schema = tool.get_input_schema()
|
||||
|
||||
|
||||
@@ -56,6 +56,8 @@ class TestClaudeContinuationOffers:
|
||||
|
||||
def setup_method(self):
|
||||
self.tool = ClaudeContinuationTool()
|
||||
# Set default model to avoid effective auto mode
|
||||
self.tool.default_model = "gemini-2.5-flash-preview-05-20"
|
||||
|
||||
@patch("utils.conversation_memory.get_redis_client")
|
||||
@patch.dict("os.environ", {"PYTEST_CURRENT_TEST": ""}, clear=False)
|
||||
@@ -331,6 +333,8 @@ class TestContinuationIntegration:
|
||||
|
||||
def setup_method(self):
|
||||
self.tool = ClaudeContinuationTool()
|
||||
# Set default model to avoid effective auto mode
|
||||
self.tool.default_model = "gemini-2.5-flash-preview-05-20"
|
||||
|
||||
@patch("utils.conversation_memory.get_redis_client")
|
||||
@patch.dict("os.environ", {"PYTEST_CURRENT_TEST": ""}, clear=False)
|
||||
|
||||
@@ -130,11 +130,8 @@ async def test_unknown_tool_defaults_to_prompt():
|
||||
with patch("utils.conversation_memory.get_thread", return_value=mock_context):
|
||||
with patch("utils.conversation_memory.add_turn", return_value=True):
|
||||
with patch("utils.conversation_memory.build_conversation_history", return_value=("History", 500)):
|
||||
with patch.dict(os.environ, {"GEMINI_API_KEY": "test-key", "OPENAI_API_KEY": ""}, clear=False):
|
||||
from providers.registry import ModelProviderRegistry
|
||||
|
||||
ModelProviderRegistry.clear_cache()
|
||||
|
||||
# The test uses the conftest fixture which should handle provider mocking
|
||||
# We just need to ensure the arguments are correct
|
||||
arguments = {
|
||||
"continuation_id": "test-thread-456",
|
||||
"prompt": "User input",
|
||||
|
||||
@@ -3,6 +3,8 @@
|
||||
import os
|
||||
from unittest.mock import Mock, patch
|
||||
|
||||
import pytest
|
||||
|
||||
from providers import ModelProviderRegistry, ModelResponse
|
||||
from providers.base import ProviderType
|
||||
from providers.gemini import GeminiModelProvider
|
||||
@@ -57,6 +59,7 @@ class TestModelProviderRegistry:
|
||||
assert provider is None
|
||||
|
||||
@patch.dict(os.environ, {"GEMINI_API_KEY": "test-key"})
|
||||
@pytest.mark.no_mock_provider
|
||||
def test_get_provider_for_model(self):
|
||||
"""Test getting provider for a specific model"""
|
||||
ModelProviderRegistry.register_provider(ProviderType.GOOGLE, GeminiModelProvider)
|
||||
|
||||
Reference in New Issue
Block a user