feat: OpenAI/compatible models (such as Azure OpenAI) can declare if they use the response API instead via use_openai_responses_api
This commit is contained in:
@@ -17,6 +17,7 @@
|
||||
"max_image_size_mb": "Maximum total size in MB for all images combined (capped at 40MB max for custom models)",
|
||||
"supports_temperature": "Whether the model accepts temperature parameter in API calls (set to false for O3/O4 reasoning models)",
|
||||
"temperature_constraint": "Type of temperature constraint: 'fixed' (fixed value), 'range' (continuous range), 'discrete' (specific values), or omit for default range",
|
||||
"use_openai_response_api": "Set to true when the deployment must call Azure's /responses endpoint (O-series reasoning models). Leave false/omit for standard chat completions.",
|
||||
"description": "Human-readable description of the model",
|
||||
"intelligence_score": "1-20 human rating used as the primary signal for auto-mode model ordering"
|
||||
}
|
||||
@@ -37,6 +38,7 @@
|
||||
"max_image_size_mb": 0.0,
|
||||
"supports_temperature": false,
|
||||
"temperature_constraint": "fixed",
|
||||
"use_openai_response_api": false,
|
||||
"description": "GPT-4 (128K context, 16K output)",
|
||||
"intelligence_score": 10
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user