docs: update .env.example to include new GPT-5.1 model options and clarify existing model descriptions

This commit is contained in:
Bjorn Melin
2025-11-14 01:09:59 -07:00
parent 807c9df70e
commit dbbfef292c

View File

@@ -55,7 +55,8 @@ OPENROUTER_API_KEY=your_openrouter_api_key_here
# Optional: Default model to use # Optional: Default model to use
# Options: 'auto' (Claude picks best model), 'pro', 'flash', 'o3', 'o3-mini', 'o4-mini', 'o4-mini-high', # Options: 'auto' (Claude picks best model), 'pro', 'flash', 'o3', 'o3-mini', 'o4-mini', 'o4-mini-high',
# 'gpt-5', 'gpt-5-mini', 'grok', 'opus-4.1', 'sonnet-4.1', or any DIAL model if DIAL is configured # 'gpt-5.1', 'gpt-5.1-codex', 'gpt-5.1-codex-mini', 'gpt-5', 'gpt-5-mini', 'grok',
# 'opus-4.1', 'sonnet-4.1', or any DIAL model if DIAL is configured
# When set to 'auto', Claude will select the best model for each task # When set to 'auto', Claude will select the best model for each task
# Defaults to 'auto' if not specified # Defaults to 'auto' if not specified
DEFAULT_MODEL=auto DEFAULT_MODEL=auto
@@ -79,12 +80,15 @@ DEFAULT_THINKING_MODE_THINKDEEP=high
# If you want to disable a provider entirely, don't set its API key # If you want to disable a provider entirely, don't set its API key
# #
# Supported OpenAI models: # Supported OpenAI models:
# - gpt-5.1 (400K context, 128K output, reasoning tokens, streaming enabled)
# - gpt-5.1-codex (400K context, 128K output, coding specialization, Responses API only)
# - gpt-5.1-codex-mini (400K context, 128K output, cost-efficient Codex with streaming)
# - gpt-5 (400K context, 128K output, reasoning tokens)
# - gpt-5-mini (400K context, 128K output, reasoning tokens)
# - o3 (200K context, high reasoning) # - o3 (200K context, high reasoning)
# - o3-mini (200K context, balanced) # - o3-mini (200K context, balanced)
# - o4-mini (200K context, latest balanced, temperature=1.0 only) # - o4-mini (200K context, latest balanced, temperature=1.0 only)
# - o4-mini-high (200K context, enhanced reasoning, temperature=1.0 only) # - o4-mini-high (200K context, enhanced reasoning, temperature=1.0 only)
# - gpt-5 (400K context, 128K output, reasoning tokens)
# - gpt-5-mini (400K context, 128K output, reasoning tokens)
# - mini (shorthand for o4-mini) # - mini (shorthand for o4-mini)
# #
# Supported Google/Gemini models: # Supported Google/Gemini models:
@@ -122,6 +126,7 @@ DEFAULT_THINKING_MODE_THINKDEEP=high
# #
# Examples: # Examples:
# OPENAI_ALLOWED_MODELS=o3-mini,o4-mini,mini # Only allow mini models (cost control) # OPENAI_ALLOWED_MODELS=o3-mini,o4-mini,mini # Only allow mini models (cost control)
# OPENAI_ALLOWED_MODELS=gpt-5.1,gpt-5.1-codex # Pin to GPT-5.1 family
# GOOGLE_ALLOWED_MODELS=flash # Only allow Flash (fast responses) # GOOGLE_ALLOWED_MODELS=flash # Only allow Flash (fast responses)
# XAI_ALLOWED_MODELS=grok-3 # Only allow standard GROK (not fast variant) # XAI_ALLOWED_MODELS=grok-3 # Only allow standard GROK (not fast variant)
# OPENAI_ALLOWED_MODELS=o4-mini # Single model standardization # OPENAI_ALLOWED_MODELS=o4-mini # Single model standardization