feat: GPT-5.2 support
This commit is contained in:
@@ -55,7 +55,7 @@ OPENROUTER_API_KEY=your_openrouter_api_key_here
|
||||
|
||||
# Optional: Default model to use
|
||||
# Options: 'auto' (Claude picks best model), 'pro', 'flash', 'o3', 'o3-mini', 'o4-mini', 'o4-mini-high',
|
||||
# 'gpt-5.1', 'gpt-5.1-codex', 'gpt-5.1-codex-mini', 'gpt-5', 'gpt-5-mini', 'grok',
|
||||
# 'gpt-5.2', 'gpt-5.1-codex', 'gpt-5.1-codex-mini', 'gpt-5', 'gpt-5-mini', 'grok',
|
||||
# 'opus-4.1', 'sonnet-4.1', or any DIAL model if DIAL is configured
|
||||
# When set to 'auto', Claude will select the best model for each task
|
||||
# Defaults to 'auto' if not specified
|
||||
@@ -80,7 +80,8 @@ DEFAULT_THINKING_MODE_THINKDEEP=high
|
||||
# If you want to disable a provider entirely, don't set its API key
|
||||
#
|
||||
# Supported OpenAI models:
|
||||
# - gpt-5.1 (400K context, 128K output, reasoning tokens, streaming enabled)
|
||||
# - gpt-5.2 (400K context, 128K output, reasoning tokens, streaming enabled)
|
||||
# - gpt-5.2-pro (400K context, 272K output, highest reasoning quality, Responses API only)
|
||||
# - gpt-5.1-codex (400K context, 128K output, coding specialization, Responses API only)
|
||||
# - gpt-5.1-codex-mini (400K context, 128K output, cost-efficient Codex with streaming)
|
||||
# - gpt-5 (400K context, 128K output, reasoning tokens)
|
||||
@@ -126,7 +127,7 @@ DEFAULT_THINKING_MODE_THINKDEEP=high
|
||||
#
|
||||
# Examples:
|
||||
# OPENAI_ALLOWED_MODELS=o3-mini,o4-mini,mini # Only allow mini models (cost control)
|
||||
# OPENAI_ALLOWED_MODELS=gpt-5.1,gpt-5.1-codex # Pin to GPT-5.1 family
|
||||
# OPENAI_ALLOWED_MODELS=gpt-5.2,gpt-5.1-codex # Pin to flagship GPT-5 family
|
||||
# GOOGLE_ALLOWED_MODELS=flash # Only allow Flash (fast responses)
|
||||
# XAI_ALLOWED_MODELS=grok-3 # Only allow standard GROK (not fast variant)
|
||||
# OPENAI_ALLOWED_MODELS=o4-mini # Single model standardization
|
||||
|
||||
Reference in New Issue
Block a user