refactor: removed method from provider, should use model capabilities instead
refactor: cleanup temperature factory method
This commit is contained in:
@@ -25,7 +25,6 @@ def create_mock_provider(model_name="gemini-2.5-flash", context_window=1_048_576
|
||||
|
||||
mock_provider.get_capabilities.return_value = mock_capabilities
|
||||
mock_provider.get_provider_type.return_value = ProviderType.GOOGLE
|
||||
mock_provider.supports_thinking_mode.return_value = False
|
||||
mock_provider.validate_model_name.return_value = True
|
||||
|
||||
# Set up generate_content response
|
||||
|
||||
@@ -40,7 +40,6 @@ class TestDynamicContextRequests:
|
||||
|
||||
mock_provider = create_mock_provider()
|
||||
mock_provider.get_provider_type.return_value = Mock(value="google")
|
||||
mock_provider.supports_thinking_mode.return_value = False
|
||||
mock_provider.generate_content.return_value = Mock(
|
||||
content=clarification_json, usage={}, model_name="gemini-2.5-flash", metadata={}
|
||||
)
|
||||
@@ -122,7 +121,6 @@ class TestDynamicContextRequests:
|
||||
|
||||
mock_provider = create_mock_provider()
|
||||
mock_provider.get_provider_type.return_value = Mock(value="google")
|
||||
mock_provider.supports_thinking_mode.return_value = False
|
||||
mock_provider.generate_content.return_value = Mock(
|
||||
content=malformed_json, usage={}, model_name="gemini-2.5-flash", metadata={}
|
||||
)
|
||||
@@ -181,7 +179,6 @@ class TestDynamicContextRequests:
|
||||
|
||||
mock_provider = create_mock_provider()
|
||||
mock_provider.get_provider_type.return_value = Mock(value="google")
|
||||
mock_provider.supports_thinking_mode.return_value = False
|
||||
mock_provider.generate_content.return_value = Mock(
|
||||
content=clarification_json, usage={}, model_name="gemini-2.5-flash", metadata={}
|
||||
)
|
||||
@@ -347,7 +344,6 @@ class TestCollaborationWorkflow:
|
||||
|
||||
mock_provider = create_mock_provider()
|
||||
mock_provider.get_provider_type.return_value = Mock(value="google")
|
||||
mock_provider.supports_thinking_mode.return_value = False
|
||||
mock_provider.generate_content.return_value = Mock(
|
||||
content=clarification_json, usage={}, model_name="gemini-2.5-flash", metadata={}
|
||||
)
|
||||
@@ -414,7 +410,6 @@ class TestCollaborationWorkflow:
|
||||
|
||||
mock_provider = create_mock_provider()
|
||||
mock_provider.get_provider_type.return_value = Mock(value="google")
|
||||
mock_provider.supports_thinking_mode.return_value = False
|
||||
mock_provider.generate_content.return_value = Mock(
|
||||
content=clarification_json, usage={}, model_name="gemini-2.5-flash", metadata={}
|
||||
)
|
||||
|
||||
@@ -86,7 +86,7 @@ class TestCustomOpenAITemperatureParameterFix:
|
||||
mock_registry_class.return_value = mock_registry
|
||||
|
||||
# Mock get_model_config to return our test model
|
||||
from providers.shared import ModelCapabilities, ProviderType, create_temperature_constraint
|
||||
from providers.shared import ModelCapabilities, ProviderType, TemperatureConstraint
|
||||
|
||||
test_capabilities = ModelCapabilities(
|
||||
provider=ProviderType.OPENAI,
|
||||
@@ -102,7 +102,7 @@ class TestCustomOpenAITemperatureParameterFix:
|
||||
supports_images=True,
|
||||
max_image_size_mb=20.0,
|
||||
supports_temperature=False, # This is the key setting
|
||||
temperature_constraint=create_temperature_constraint("fixed"),
|
||||
temperature_constraint=TemperatureConstraint.create("fixed"),
|
||||
description="Custom OpenAI GPT-5 test model",
|
||||
)
|
||||
|
||||
@@ -170,7 +170,7 @@ class TestCustomOpenAITemperatureParameterFix:
|
||||
mock_registry_class.return_value = mock_registry
|
||||
|
||||
# Mock get_model_config to return a model that supports temperature
|
||||
from providers.shared import ModelCapabilities, ProviderType, create_temperature_constraint
|
||||
from providers.shared import ModelCapabilities, ProviderType, TemperatureConstraint
|
||||
|
||||
test_capabilities = ModelCapabilities(
|
||||
provider=ProviderType.OPENAI,
|
||||
@@ -186,7 +186,7 @@ class TestCustomOpenAITemperatureParameterFix:
|
||||
supports_images=True,
|
||||
max_image_size_mb=20.0,
|
||||
supports_temperature=True, # This model DOES support temperature
|
||||
temperature_constraint=create_temperature_constraint("range"),
|
||||
temperature_constraint=TemperatureConstraint.create("range"),
|
||||
description="Custom OpenAI GPT-4 test model",
|
||||
)
|
||||
|
||||
@@ -227,7 +227,7 @@ class TestCustomOpenAITemperatureParameterFix:
|
||||
mock_registry = Mock()
|
||||
mock_registry_class.return_value = mock_registry
|
||||
|
||||
from providers.shared import ModelCapabilities, ProviderType, create_temperature_constraint
|
||||
from providers.shared import ModelCapabilities, ProviderType, TemperatureConstraint
|
||||
|
||||
test_capabilities = ModelCapabilities(
|
||||
provider=ProviderType.OPENAI,
|
||||
@@ -243,7 +243,7 @@ class TestCustomOpenAITemperatureParameterFix:
|
||||
supports_images=True,
|
||||
max_image_size_mb=20.0,
|
||||
supports_temperature=False,
|
||||
temperature_constraint=create_temperature_constraint("fixed"),
|
||||
temperature_constraint=TemperatureConstraint.create("fixed"),
|
||||
description="Custom OpenAI O3 test model",
|
||||
)
|
||||
|
||||
|
||||
@@ -99,11 +99,11 @@ class TestCustomProvider:
|
||||
assert resolved_local == "llama3.2"
|
||||
|
||||
def test_no_thinking_mode_support(self):
|
||||
"""Test CustomProvider doesn't support thinking mode."""
|
||||
"""Custom provider generic capabilities default to no thinking mode."""
|
||||
provider = CustomProvider(api_key="test-key", base_url="http://localhost:11434/v1")
|
||||
|
||||
assert not provider.supports_thinking_mode("llama3.2")
|
||||
assert not provider.supports_thinking_mode("any-model")
|
||||
assert not provider.get_capabilities("llama3.2").supports_extended_thinking
|
||||
assert not provider.get_capabilities("any-model").supports_extended_thinking
|
||||
|
||||
@patch("providers.custom.OpenAICompatibleProvider.generate_content")
|
||||
def test_generate_content_with_alias_resolution(self, mock_generate):
|
||||
|
||||
@@ -43,10 +43,6 @@ class MinimalTestProvider(ModelProvider):
|
||||
"""Not needed for image validation tests."""
|
||||
raise NotImplementedError("Not needed for image validation tests")
|
||||
|
||||
def supports_thinking_mode(self, model_name: str) -> bool:
|
||||
"""Not needed for image validation tests."""
|
||||
raise NotImplementedError("Not needed for image validation tests")
|
||||
|
||||
|
||||
class TestImageValidation:
|
||||
"""Test suite for image validation functionality."""
|
||||
|
||||
@@ -41,7 +41,7 @@ def test_issue_245_custom_openai_temperature_ignored():
|
||||
mock_registry = Mock()
|
||||
mock_registry_class.return_value = mock_registry
|
||||
|
||||
from providers.shared import ModelCapabilities, ProviderType, create_temperature_constraint
|
||||
from providers.shared import ModelCapabilities, ProviderType, TemperatureConstraint
|
||||
|
||||
# This is what the user configured in their custom_models.json
|
||||
custom_config = ModelCapabilities(
|
||||
@@ -56,7 +56,7 @@ def test_issue_245_custom_openai_temperature_ignored():
|
||||
supports_streaming=True,
|
||||
supports_function_calling=True,
|
||||
supports_temperature=False, # User set this to false!
|
||||
temperature_constraint=create_temperature_constraint("fixed"),
|
||||
temperature_constraint=TemperatureConstraint.create("fixed"),
|
||||
supports_images=True,
|
||||
max_image_size_mb=20.0,
|
||||
description="Custom OpenAI GPT-5",
|
||||
|
||||
@@ -244,7 +244,7 @@ class TestLargePromptHandling:
|
||||
with patch.object(tool, "get_model_provider") as mock_get_provider:
|
||||
mock_provider = MagicMock()
|
||||
mock_provider.get_provider_type.return_value = MagicMock(value="google")
|
||||
mock_provider.supports_thinking_mode.return_value = False
|
||||
mock_provider.get_capabilities.return_value = MagicMock(supports_extended_thinking=False)
|
||||
mock_provider.generate_content.return_value = MagicMock(
|
||||
content="Success",
|
||||
usage={"input_tokens": 10, "output_tokens": 20, "total_tokens": 30},
|
||||
@@ -287,7 +287,7 @@ class TestLargePromptHandling:
|
||||
with patch.object(tool, "get_model_provider") as mock_get_provider:
|
||||
mock_provider = MagicMock()
|
||||
mock_provider.get_provider_type.return_value = MagicMock(value="google")
|
||||
mock_provider.supports_thinking_mode.return_value = False
|
||||
mock_provider.get_capabilities.return_value = MagicMock(supports_extended_thinking=False)
|
||||
mock_provider.generate_content.return_value = MagicMock(
|
||||
content="Response to the large prompt",
|
||||
usage={"input_tokens": 12000, "output_tokens": 10, "total_tokens": 12010},
|
||||
@@ -319,7 +319,7 @@ class TestLargePromptHandling:
|
||||
with patch.object(tool, "get_model_provider") as mock_get_provider:
|
||||
mock_provider = MagicMock()
|
||||
mock_provider.get_provider_type.return_value = MagicMock(value="google")
|
||||
mock_provider.supports_thinking_mode.return_value = False
|
||||
mock_provider.get_capabilities.return_value = MagicMock(supports_extended_thinking=False)
|
||||
mock_provider.generate_content.return_value = MagicMock(
|
||||
content="Success",
|
||||
usage={"input_tokens": 10, "output_tokens": 20, "total_tokens": 30},
|
||||
|
||||
@@ -252,33 +252,31 @@ class TestOpenAIProvider:
|
||||
call_kwargs = mock_client.chat.completions.create.call_args[1]
|
||||
assert call_kwargs["model"] == "o3-mini" # Should be unchanged
|
||||
|
||||
def test_supports_thinking_mode(self):
|
||||
"""Test thinking mode support based on model capabilities."""
|
||||
def test_extended_thinking_capabilities(self):
|
||||
"""Thinking-mode support should be reflected via ModelCapabilities."""
|
||||
provider = OpenAIModelProvider("test-key")
|
||||
|
||||
# GPT-5 models support thinking mode (reasoning tokens) - all variants
|
||||
assert provider.supports_thinking_mode("gpt-5") is True
|
||||
assert provider.supports_thinking_mode("gpt-5-mini") is True
|
||||
assert provider.supports_thinking_mode("gpt-5-nano") is True # Now included
|
||||
supported_aliases = [
|
||||
"gpt-5",
|
||||
"gpt-5-mini",
|
||||
"gpt-5-nano",
|
||||
"gpt5",
|
||||
"gpt5-mini",
|
||||
"gpt5mini",
|
||||
"gpt5-nano",
|
||||
"gpt5nano",
|
||||
"nano",
|
||||
"mini", # resolves to gpt-5-mini
|
||||
]
|
||||
for alias in supported_aliases:
|
||||
assert provider.get_capabilities(alias).supports_extended_thinking is True
|
||||
|
||||
# Test GPT-5 aliases
|
||||
assert provider.supports_thinking_mode("gpt5") is True
|
||||
assert provider.supports_thinking_mode("gpt5-mini") is True
|
||||
assert provider.supports_thinking_mode("gpt5mini") is True
|
||||
assert provider.supports_thinking_mode("gpt5-nano") is True
|
||||
assert provider.supports_thinking_mode("gpt5nano") is True
|
||||
assert provider.supports_thinking_mode("nano") is True # New alias for gpt-5-nano
|
||||
unsupported_aliases = ["o3", "o3-mini", "o4-mini"]
|
||||
for alias in unsupported_aliases:
|
||||
assert provider.get_capabilities(alias).supports_extended_thinking is False
|
||||
|
||||
# O3/O4 models don't support thinking mode
|
||||
assert provider.supports_thinking_mode("o3") is False
|
||||
assert provider.supports_thinking_mode("o3-mini") is False
|
||||
assert provider.supports_thinking_mode("o4-mini") is False
|
||||
assert (
|
||||
provider.supports_thinking_mode("mini") is True
|
||||
) # "mini" now resolves to gpt-5-mini which supports thinking
|
||||
|
||||
# Test invalid model name
|
||||
assert provider.supports_thinking_mode("invalid-model") is False
|
||||
# Invalid models should not validate, treat as unsupported
|
||||
assert not provider.validate_model_name("invalid-model")
|
||||
|
||||
@patch("providers.openai_compatible.OpenAI")
|
||||
def test_o3_pro_routes_to_responses_endpoint(self, mock_openai_class):
|
||||
|
||||
@@ -213,7 +213,7 @@ class TestOpenRouterModelRegistry:
|
||||
|
||||
def test_model_with_all_capabilities(self):
|
||||
"""Test model with all capability flags."""
|
||||
from providers.shared import create_temperature_constraint
|
||||
from providers.shared import TemperatureConstraint
|
||||
|
||||
caps = ModelCapabilities(
|
||||
provider=ProviderType.OPENROUTER,
|
||||
@@ -228,7 +228,7 @@ class TestOpenRouterModelRegistry:
|
||||
supports_function_calling=True,
|
||||
supports_json_mode=True,
|
||||
description="Fully featured test model",
|
||||
temperature_constraint=create_temperature_constraint("range"),
|
||||
temperature_constraint=TemperatureConstraint.create("range"),
|
||||
)
|
||||
assert caps.context_window == 128000
|
||||
assert caps.supports_extended_thinking
|
||||
|
||||
@@ -120,13 +120,6 @@ class TestGeminiProvider:
|
||||
capabilities = provider.get_capabilities("flash")
|
||||
assert capabilities.model_name == "gemini-2.5-flash"
|
||||
|
||||
def test_supports_thinking_mode(self):
|
||||
"""Test thinking mode support detection"""
|
||||
provider = GeminiModelProvider(api_key="test-key")
|
||||
|
||||
assert provider.supports_thinking_mode("gemini-2.5-flash")
|
||||
assert provider.supports_thinking_mode("gemini-2.5-pro")
|
||||
|
||||
@patch("google.genai.Client")
|
||||
def test_generate_content(self, mock_client_class):
|
||||
"""Test content generation"""
|
||||
@@ -219,12 +212,10 @@ class TestOpenAIProvider:
|
||||
assert not provider.validate_model_name("gpt-4o")
|
||||
assert not provider.validate_model_name("invalid-model")
|
||||
|
||||
def test_no_thinking_mode_support(self):
|
||||
"""Test that no OpenAI models support thinking mode"""
|
||||
def test_openai_models_do_not_support_extended_thinking(self):
|
||||
"""OpenAI catalogue exposes extended thinking capability via ModelCapabilities."""
|
||||
provider = OpenAIModelProvider(api_key="test-key")
|
||||
|
||||
assert not provider.supports_thinking_mode("o3")
|
||||
assert not provider.supports_thinking_mode("o3mini")
|
||||
assert not provider.supports_thinking_mode("o3-mini")
|
||||
assert not provider.supports_thinking_mode("o4-mini")
|
||||
assert not provider.supports_thinking_mode("o4-mini")
|
||||
aliases = ["o3", "o3mini", "o3-mini", "o4-mini", "o4mini"]
|
||||
for alias in aliases:
|
||||
assert not provider.get_capabilities(alias).supports_extended_thinking
|
||||
|
||||
@@ -75,7 +75,7 @@ class TestWorkflowToolsUTF8(unittest.IsolatedAsyncioTestCase):
|
||||
# Mock provider with more complete setup (same as codereview test)
|
||||
mock_provider = Mock()
|
||||
mock_provider.get_provider_type.return_value = Mock(value="test")
|
||||
mock_provider.supports_thinking_mode.return_value = False
|
||||
mock_provider.get_capabilities.return_value = Mock(supports_extended_thinking=False)
|
||||
mock_provider.generate_content = AsyncMock(
|
||||
return_value=Mock(
|
||||
content=json.dumps(
|
||||
@@ -93,6 +93,7 @@ class TestWorkflowToolsUTF8(unittest.IsolatedAsyncioTestCase):
|
||||
# Use the same provider for both contexts
|
||||
mock_get_provider.return_value = mock_provider
|
||||
mock_context_instance.provider = mock_provider
|
||||
mock_context_instance.capabilities = Mock(supports_extended_thinking=False)
|
||||
mock_model_context.return_value = mock_context_instance
|
||||
|
||||
# Test the tool
|
||||
@@ -131,7 +132,7 @@ class TestWorkflowToolsUTF8(unittest.IsolatedAsyncioTestCase):
|
||||
# Mock with analysis in French
|
||||
mock_provider = Mock()
|
||||
mock_provider.get_provider_type.return_value = Mock(value="test")
|
||||
mock_provider.supports_thinking_mode.return_value = False
|
||||
mock_provider.get_capabilities.return_value = Mock(supports_extended_thinking=False)
|
||||
mock_provider.generate_content = AsyncMock(
|
||||
return_value=Mock(
|
||||
content=json.dumps(
|
||||
@@ -204,7 +205,7 @@ class TestWorkflowToolsUTF8(unittest.IsolatedAsyncioTestCase):
|
||||
# Mock provider
|
||||
mock_provider = Mock()
|
||||
mock_provider.get_provider_type.return_value = Mock(value="test")
|
||||
mock_provider.supports_thinking_mode.return_value = False
|
||||
mock_provider.get_capabilities.return_value = Mock(supports_extended_thinking=False)
|
||||
mock_provider.generate_content = AsyncMock(
|
||||
return_value=Mock(
|
||||
content=json.dumps(
|
||||
|
||||
@@ -144,21 +144,17 @@ class TestXAIProvider:
|
||||
with pytest.raises(ValueError, match="Unsupported X.AI model"):
|
||||
provider.get_capabilities("invalid-model")
|
||||
|
||||
def test_thinking_mode_support(self):
|
||||
"""Test thinking mode support for X.AI models."""
|
||||
def test_extended_thinking_flags(self):
|
||||
"""X.AI capabilities should expose extended thinking support correctly."""
|
||||
provider = XAIModelProvider("test-key")
|
||||
|
||||
# Grok-4 supports thinking mode
|
||||
assert provider.supports_thinking_mode("grok-4") is True
|
||||
assert provider.supports_thinking_mode("grok") is True # Resolves to grok-4
|
||||
thinking_aliases = ["grok-4", "grok", "grok4"]
|
||||
for alias in thinking_aliases:
|
||||
assert provider.get_capabilities(alias).supports_extended_thinking is True
|
||||
|
||||
# Grok-3 models don't support thinking mode
|
||||
assert not provider.supports_thinking_mode("grok-3")
|
||||
assert not provider.supports_thinking_mode("grok-3-fast")
|
||||
assert provider.supports_thinking_mode("grok-4") # grok-4 supports thinking mode
|
||||
assert provider.supports_thinking_mode("grok") # resolves to grok-4
|
||||
assert provider.supports_thinking_mode("grok4") # resolves to grok-4
|
||||
assert not provider.supports_thinking_mode("grokfast")
|
||||
non_thinking_aliases = ["grok-3", "grok-3-fast", "grokfast"]
|
||||
for alias in non_thinking_aliases:
|
||||
assert provider.get_capabilities(alias).supports_extended_thinking is False
|
||||
|
||||
def test_provider_type(self):
|
||||
"""Test provider type identification."""
|
||||
|
||||
Reference in New Issue
Block a user