feat: support for GPT-5-Pro highest reasoning model https://github.com/BeehiveInnovations/zen-mcp-server/issues/275
This commit is contained in:
@@ -391,9 +391,10 @@ class OpenAICompatibleProvider(ModelProvider):
|
||||
messages: list,
|
||||
temperature: float,
|
||||
max_output_tokens: Optional[int] = None,
|
||||
capabilities: Optional[ModelCapabilities] = None,
|
||||
**kwargs,
|
||||
) -> ModelResponse:
|
||||
"""Generate content using the /v1/responses endpoint for o3-pro via OpenAI library."""
|
||||
"""Generate content using the /v1/responses endpoint for reasoning models."""
|
||||
# Convert messages to the correct format for responses endpoint
|
||||
input_messages = []
|
||||
|
||||
@@ -412,10 +413,14 @@ class OpenAICompatibleProvider(ModelProvider):
|
||||
|
||||
# Prepare completion parameters for responses endpoint
|
||||
# Based on OpenAI documentation, use nested reasoning object for responses endpoint
|
||||
effort = "medium"
|
||||
if capabilities and capabilities.default_reasoning_effort:
|
||||
effort = capabilities.default_reasoning_effort
|
||||
|
||||
completion_params = {
|
||||
"model": model_name,
|
||||
"input": input_messages,
|
||||
"reasoning": {"effort": "medium"}, # Use nested object for responses endpoint
|
||||
"reasoning": {"effort": effort},
|
||||
"store": True,
|
||||
}
|
||||
|
||||
@@ -475,11 +480,11 @@ class OpenAICompatibleProvider(ModelProvider):
|
||||
operation=_attempt,
|
||||
max_attempts=max_retries,
|
||||
delays=retry_delays,
|
||||
log_prefix="o3-pro responses endpoint",
|
||||
log_prefix="responses endpoint",
|
||||
)
|
||||
except Exception as exc:
|
||||
attempts = max(attempt_counter["value"], 1)
|
||||
error_msg = f"o3-pro responses endpoint error after {attempts} attempt{'s' if attempts > 1 else ''}: {exc}"
|
||||
error_msg = f"responses endpoint error after {attempts} attempt{'s' if attempts > 1 else ''}: {exc}"
|
||||
logging.error(error_msg)
|
||||
raise RuntimeError(error_msg) from exc
|
||||
|
||||
@@ -614,6 +619,7 @@ class OpenAICompatibleProvider(ModelProvider):
|
||||
messages=messages,
|
||||
temperature=temperature,
|
||||
max_output_tokens=max_output_tokens,
|
||||
capabilities=capabilities,
|
||||
**kwargs,
|
||||
)
|
||||
|
||||
|
||||
@@ -31,7 +31,7 @@ class OpenAIModelProvider(OpenAICompatibleProvider):
|
||||
max_output_tokens=128_000, # 128K max output tokens
|
||||
supports_extended_thinking=True, # Supports reasoning tokens
|
||||
supports_system_prompts=True,
|
||||
supports_streaming=True,
|
||||
supports_streaming=False,
|
||||
supports_function_calling=True,
|
||||
supports_json_mode=True,
|
||||
supports_images=True, # GPT-5 supports vision
|
||||
@@ -41,6 +41,27 @@ class OpenAIModelProvider(OpenAICompatibleProvider):
|
||||
description="GPT-5 (400K context, 128K output) - Advanced model with reasoning support",
|
||||
aliases=["gpt5"],
|
||||
),
|
||||
"gpt-5-pro": ModelCapabilities(
|
||||
provider=ProviderType.OPENAI,
|
||||
model_name="gpt-5-pro",
|
||||
friendly_name="OpenAI (GPT-5 Pro)",
|
||||
intelligence_score=18,
|
||||
use_openai_response_api=True,
|
||||
context_window=400_000,
|
||||
max_output_tokens=272_000,
|
||||
supports_extended_thinking=True,
|
||||
supports_system_prompts=True,
|
||||
supports_streaming=False,
|
||||
supports_function_calling=True,
|
||||
supports_json_mode=True,
|
||||
supports_images=True,
|
||||
max_image_size_mb=20.0,
|
||||
supports_temperature=True,
|
||||
temperature_constraint=TemperatureConstraint.create("fixed"),
|
||||
default_reasoning_effort="high",
|
||||
description="GPT-5 Pro (400K context, 272K output) - Advanced model with reasoning support",
|
||||
aliases=["gpt5pro"],
|
||||
),
|
||||
"gpt-5-mini": ModelCapabilities(
|
||||
provider=ProviderType.OPENAI,
|
||||
model_name="gpt-5-mini",
|
||||
@@ -50,7 +71,7 @@ class OpenAIModelProvider(OpenAICompatibleProvider):
|
||||
max_output_tokens=128_000, # 128K max output tokens
|
||||
supports_extended_thinking=True, # Supports reasoning tokens
|
||||
supports_system_prompts=True,
|
||||
supports_streaming=True,
|
||||
supports_streaming=False,
|
||||
supports_function_calling=True,
|
||||
supports_json_mode=True,
|
||||
supports_images=True, # GPT-5-mini supports vision
|
||||
@@ -284,7 +305,7 @@ class OpenAIModelProvider(OpenAICompatibleProvider):
|
||||
if category == ToolModelCategory.EXTENDED_REASONING:
|
||||
# Prefer models with extended thinking support
|
||||
# GPT-5-Codex first for coding tasks
|
||||
preferred = find_first(["gpt-5-codex", "o3", "o3-pro", "gpt-5"])
|
||||
preferred = find_first(["gpt-5-codex", "gpt-5-pro", "o3", "o3-pro", "gpt-5"])
|
||||
return preferred if preferred else allowed_models[0]
|
||||
|
||||
elif category == ToolModelCategory.FAST_RESPONSE:
|
||||
@@ -296,5 +317,5 @@ class OpenAIModelProvider(OpenAICompatibleProvider):
|
||||
else: # BALANCED or default
|
||||
# Prefer balanced performance/cost models
|
||||
# Include GPT-5-Codex for coding workflows
|
||||
preferred = find_first(["gpt-5", "gpt-5-codex", "gpt-5-mini", "o4-mini", "o3-mini"])
|
||||
preferred = find_first(["gpt-5", "gpt-5-codex", "gpt-5-pro", "gpt-5-mini", "o4-mini", "o3-mini"])
|
||||
return preferred if preferred else allowed_models[0]
|
||||
|
||||
@@ -51,6 +51,7 @@ class ModelCapabilities:
|
||||
supports_json_mode: bool = False
|
||||
supports_temperature: bool = True
|
||||
use_openai_response_api: bool = False
|
||||
default_reasoning_effort: Optional[str] = None
|
||||
|
||||
# Additional attributes
|
||||
max_image_size_mb: float = 0.0
|
||||
|
||||
Reference in New Issue
Block a user