Merge pull request #268 from Coquinate/feat/gpt5-codex-responses-api

feat: add GPT-5-Codex support with Responses API integration
This commit is contained in:
Beehive Innovations
2025-10-03 21:16:13 +04:00
committed by GitHub
5 changed files with 34 additions and 11 deletions

View File

@@ -93,7 +93,7 @@ class TestAutoModeComprehensive:
"OPENROUTER_API_KEY": None,
},
{
"EXTENDED_REASONING": "o3", # O3 for deep reasoning
"EXTENDED_REASONING": "gpt-5-codex", # GPT-5-Codex prioritized for coding tasks
"FAST_RESPONSE": "gpt-5", # Prefer gpt-5 for speed
"BALANCED": "gpt-5", # Prefer gpt-5 for balanced
},

View File

@@ -98,7 +98,7 @@ class TestAutoModeProviderSelection:
balanced = ModelProviderRegistry.get_preferred_fallback_model(ToolModelCategory.BALANCED)
# Should select appropriate OpenAI models based on new preference order
assert extended_reasoning == "o3" # O3 for extended reasoning
assert extended_reasoning == "gpt-5-codex" # GPT-5-Codex prioritized for extended reasoning
assert fast_response == "gpt-5" # gpt-5 comes first in fast response preference
assert balanced == "gpt-5" # gpt-5 for balanced

View File

@@ -95,8 +95,8 @@ class TestModelSelection:
ModelProviderRegistry.register_provider(ProviderType.OPENAI, OpenAIModelProvider)
model = ModelProviderRegistry.get_preferred_fallback_model(ToolModelCategory.EXTENDED_REASONING)
# OpenAI prefers o3 for extended reasoning
assert model == "o3"
# OpenAI prefers GPT-5-Codex for extended reasoning (coding tasks)
assert model == "gpt-5-codex"
def test_extended_reasoning_with_gemini_only(self):
"""Test EXTENDED_REASONING prefers pro when only Gemini is available."""
@@ -192,7 +192,7 @@ class TestFlexibleModelSelection:
"env": {"OPENAI_API_KEY": "test-key"},
"provider_type": ProviderType.OPENAI,
"category": ToolModelCategory.EXTENDED_REASONING,
"expected": "o3",
"expected": "gpt-5-codex", # GPT-5-Codex prioritized for coding tasks
},
# Case 2: Gemini provider for fast response
{