feat: add GPT-5-Codex support with Responses API integration

Adds support for OpenAI's GPT-5-Codex model which uses the new Responses API
endpoint (/v1/responses) instead of the standard Chat Completions API.

Changes:
- Add GPT-5-Codex to MODEL_CAPABILITIES with 400K context, 128K output
- Prioritize GPT-5-Codex for EXTENDED_REASONING tasks
- Add aliases: codex, gpt5-codex, gpt-5-code
- Update tests to expect GPT-5-Codex for extended reasoning

Benefits:
- 40-80% cost savings through Responses API caching
- 3% better performance on coding tasks (SWE-bench)
- Leverages existing dual-API infrastructure
This commit is contained in:
aberemia24
2025-10-03 13:59:44 +03:00
parent 95d98a9bc0
commit f2653427ca
4 changed files with 30 additions and 8 deletions

View File

@@ -174,6 +174,25 @@ class OpenAIModelProvider(OpenAICompatibleProvider):
description="GPT-4.1 (1M context) - Advanced reasoning model with large context window",
aliases=["gpt4.1"],
),
"gpt-5-codex": ModelCapabilities(
provider=ProviderType.OPENAI,
model_name="gpt-5-codex",
friendly_name="OpenAI (GPT-5 Codex)",
intelligence_score=17, # Higher than GPT-5 for coding tasks
context_window=400_000, # 400K tokens (same as GPT-5)
max_output_tokens=128_000, # 128K output tokens
supports_extended_thinking=True, # Responses API supports reasoning tokens
supports_system_prompts=True,
supports_streaming=True,
supports_function_calling=True, # Enhanced for agentic software engineering
supports_json_mode=True,
supports_images=True, # Screenshots, wireframes, diagrams
max_image_size_mb=20.0, # 20MB per OpenAI docs
supports_temperature=True,
temperature_constraint=TemperatureConstraint.create("range"),
description="GPT-5 Codex (400K context) - Uses Responses API for 40-80% cost savings. Specialized for coding, refactoring, and software architecture. 3% better performance on SWE-bench.",
aliases=["gpt5-codex", "codex", "gpt-5-code", "gpt5-code"],
),
}
def __init__(self, api_key: str, **kwargs):
@@ -290,15 +309,18 @@ class OpenAIModelProvider(OpenAICompatibleProvider):
if category == ToolModelCategory.EXTENDED_REASONING:
# Prefer models with extended thinking support
preferred = find_first(["o3", "o3-pro", "gpt-5"])
# GPT-5-Codex first for coding tasks (uses Responses API with 40-80% cost savings)
preferred = find_first(["gpt-5-codex", "o3", "o3-pro", "gpt-5"])
return preferred if preferred else allowed_models[0]
elif category == ToolModelCategory.FAST_RESPONSE:
# Prefer fast, cost-efficient models
preferred = find_first(["gpt-5", "gpt-5-mini", "o4-mini", "o3-mini"])
# GPT-5 models for speed, GPT-5-Codex after (premium pricing but cached)
preferred = find_first(["gpt-5", "gpt-5-mini", "gpt-5-codex", "o4-mini", "o3-mini"])
return preferred if preferred else allowed_models[0]
else: # BALANCED or default
# Prefer balanced performance/cost models
preferred = find_first(["gpt-5", "gpt-5-mini", "o4-mini", "o3-mini"])
# Include GPT-5-Codex for coding workflows
preferred = find_first(["gpt-5", "gpt-5-codex", "gpt-5-mini", "o4-mini", "o3-mini"])
return preferred if preferred else allowed_models[0]