feat: enhance model support by adding GPT-5.1 to .gitignore and updating cassette maintenance documentation for dual-model testing
This commit is contained in:
@@ -98,8 +98,8 @@ class TestModelSelection:
|
||||
ModelProviderRegistry.register_provider(ProviderType.OPENAI, OpenAIModelProvider)
|
||||
|
||||
model = ModelProviderRegistry.get_preferred_fallback_model(ToolModelCategory.EXTENDED_REASONING)
|
||||
# OpenAI prefers GPT-5-Codex for extended reasoning (coding tasks)
|
||||
assert model == "gpt-5-codex"
|
||||
# OpenAI prefers GPT-5.1-Codex for extended reasoning (coding tasks)
|
||||
assert model == "gpt-5.1-codex"
|
||||
|
||||
def test_extended_reasoning_with_gemini_only(self):
|
||||
"""Test EXTENDED_REASONING prefers pro when only Gemini is available."""
|
||||
@@ -133,8 +133,8 @@ class TestModelSelection:
|
||||
ModelProviderRegistry.register_provider(ProviderType.OPENAI, OpenAIModelProvider)
|
||||
|
||||
model = ModelProviderRegistry.get_preferred_fallback_model(ToolModelCategory.FAST_RESPONSE)
|
||||
# OpenAI now prefers gpt-5 for fast response (based on our new preference order)
|
||||
assert model == "gpt-5"
|
||||
# OpenAI now prefers gpt-5.1 for fast response (based on our new preference order)
|
||||
assert model == "gpt-5.1"
|
||||
|
||||
def test_fast_response_with_gemini_only(self):
|
||||
"""Test FAST_RESPONSE prefers flash when only Gemini is available."""
|
||||
@@ -167,8 +167,8 @@ class TestModelSelection:
|
||||
ModelProviderRegistry.register_provider(ProviderType.OPENAI, OpenAIModelProvider)
|
||||
|
||||
model = ModelProviderRegistry.get_preferred_fallback_model(ToolModelCategory.BALANCED)
|
||||
# OpenAI prefers gpt-5 for balanced (based on our new preference order)
|
||||
assert model == "gpt-5"
|
||||
# OpenAI prefers gpt-5.1 for balanced (based on our new preference order)
|
||||
assert model == "gpt-5.1"
|
||||
|
||||
def test_no_category_uses_balanced_logic(self):
|
||||
"""Test that no category specified uses balanced logic."""
|
||||
@@ -195,7 +195,7 @@ class TestFlexibleModelSelection:
|
||||
"env": {"OPENAI_API_KEY": "test-key"},
|
||||
"provider_type": ProviderType.OPENAI,
|
||||
"category": ToolModelCategory.EXTENDED_REASONING,
|
||||
"expected": "gpt-5-codex", # GPT-5-Codex prioritized for coding tasks
|
||||
"expected": "gpt-5.1-codex", # GPT-5.1-Codex prioritized for coding tasks
|
||||
},
|
||||
# Case 2: Gemini provider for fast response
|
||||
{
|
||||
@@ -209,7 +209,7 @@ class TestFlexibleModelSelection:
|
||||
"env": {"OPENAI_API_KEY": "test-key"},
|
||||
"provider_type": ProviderType.OPENAI,
|
||||
"category": ToolModelCategory.FAST_RESPONSE,
|
||||
"expected": "gpt-5", # Based on new preference order
|
||||
"expected": "gpt-5.1", # Based on new preference order
|
||||
},
|
||||
]
|
||||
|
||||
|
||||
Reference in New Issue
Block a user