Further fixes to tests
Pass O3 simulation test when keys are not set, along with a notice
Updated docs on testing, simulation tests / contributing
Support for OpenAI o4-mini and o4-mini-high
This commit is contained in:
Fahad
2025-06-14 09:28:20 +04:00
parent c5f682c7b0
commit 746380eb7f
17 changed files with 324 additions and 53 deletions

View File

@@ -85,8 +85,10 @@ def mock_provider_availability(request, monkeypatch):
the tools don't require model selection unless explicitly testing auto mode.
"""
# Skip this fixture for tests that need real providers
if hasattr(request, "node") and request.node.get_closest_marker("no_mock_provider"):
return
if hasattr(request, "node"):
marker = request.node.get_closest_marker("no_mock_provider")
if marker:
return
from unittest.mock import MagicMock

View File

@@ -2,7 +2,6 @@
Test that conversation history is correctly mapped to tool-specific fields
"""
import os
from datetime import datetime
from unittest.mock import MagicMock, patch
@@ -130,8 +129,7 @@ async def test_unknown_tool_defaults_to_prompt():
with patch("utils.conversation_memory.get_thread", return_value=mock_context):
with patch("utils.conversation_memory.add_turn", return_value=True):
with patch("utils.conversation_memory.build_conversation_history", return_value=("History", 500)):
# The test uses the conftest fixture which should handle provider mocking
# We just need to ensure the arguments are correct
# The autouse fixture should handle provider mocking
arguments = {
"continuation_id": "test-thread-456",
"prompt": "User input",

View File

@@ -72,7 +72,10 @@ class TestOpenRouterProvider:
assert provider._resolve_model_name("opus") == "anthropic/claude-3-opus"
assert provider._resolve_model_name("sonnet") == "anthropic/claude-3-sonnet"
assert provider._resolve_model_name("o3") == "openai/o3"
assert provider._resolve_model_name("o3-mini") == "openai/o3-mini-high"
assert provider._resolve_model_name("o3-mini") == "openai/o3-mini"
assert provider._resolve_model_name("o3mini") == "openai/o3-mini"
assert provider._resolve_model_name("o4-mini") == "openai/o4-mini"
assert provider._resolve_model_name("o4-mini-high") == "openai/o4-mini-high"
assert provider._resolve_model_name("claude") == "anthropic/claude-3-sonnet"
assert provider._resolve_model_name("mistral") == "mistral/mistral-large"
assert provider._resolve_model_name("deepseek") == "deepseek/deepseek-r1-0528"

View File

@@ -183,12 +183,31 @@ class TestOpenAIProvider:
assert capabilities.context_window == 200_000
assert not capabilities.supports_extended_thinking
def test_get_capabilities_o4_mini(self):
"""Test getting O4-mini model capabilities"""
provider = OpenAIModelProvider(api_key="test-key")
capabilities = provider.get_capabilities("o4-mini")
assert capabilities.provider == ProviderType.OPENAI
assert capabilities.model_name == "o4-mini"
assert capabilities.context_window == 200_000
assert not capabilities.supports_extended_thinking
# Check temperature constraint is fixed at 1.0
assert capabilities.temperature_constraint.value == 1.0
def test_validate_model_names(self):
"""Test model name validation"""
provider = OpenAIModelProvider(api_key="test-key")
assert provider.validate_model_name("o3")
assert provider.validate_model_name("o3-mini")
assert provider.validate_model_name("o3mini")
assert provider.validate_model_name("o3-mini") # Backwards compatibility
assert provider.validate_model_name("o4-mini")
assert provider.validate_model_name("o4mini")
assert provider.validate_model_name("o4-mini-high")
assert provider.validate_model_name("o4minihigh")
assert provider.validate_model_name("o4minihi")
assert not provider.validate_model_name("gpt-4o")
assert not provider.validate_model_name("invalid-model")
@@ -197,4 +216,7 @@ class TestOpenAIProvider:
provider = OpenAIModelProvider(api_key="test-key")
assert not provider.supports_thinking_mode("o3")
assert not provider.supports_thinking_mode("o3mini")
assert not provider.supports_thinking_mode("o3-mini")
assert not provider.supports_thinking_mode("o4-mini")
assert not provider.supports_thinking_mode("o4-mini-high")