186 lines
7.1 KiB
JSON
186 lines
7.1 KiB
JSON
{
|
|
"_README": {
|
|
"description": "Model metadata for OpenCode Zen curated models.",
|
|
"documentation": "https://opencode.ai/docs/zen",
|
|
"usage": "Models listed here are exposed through OpenCode Zen. Aliases are case-insensitive.",
|
|
"field_notes": "Matches providers/shared/model_capabilities.py.",
|
|
"field_descriptions": {
|
|
"model_name": "The model identifier as returned by Zen API (e.g., 'gpt-5.1-codex')",
|
|
"aliases": "Array of short names users can type instead of the full model name",
|
|
"context_window": "Total number of tokens the model can process (input + output combined)",
|
|
"max_output_tokens": "Maximum number of tokens the model can generate in a single response",
|
|
"supports_extended_thinking": "Whether the model supports extended reasoning tokens",
|
|
"supports_json_mode": "Whether the model can guarantee valid JSON output",
|
|
"supports_function_calling": "Whether the model supports function/tool calling",
|
|
"supports_images": "Whether the model can process images/visual input",
|
|
"max_image_size_mb": "Maximum total size in MB for all images combined",
|
|
"supports_temperature": "Whether the model accepts temperature parameter",
|
|
"temperature_constraint": "Type of temperature constraint or omit for default range",
|
|
"description": "Human-readable description of the model",
|
|
"intelligence_score": "1-20 human rating used for auto-mode model ordering",
|
|
"allow_code_generation": "Whether this model can generate working code"
|
|
}
|
|
},
|
|
"models": [
|
|
{
|
|
"model_name": "claude-opus-4-5",
|
|
"aliases": ["zen-opus", "zen-opus4.5", "zen-claude-opus"],
|
|
"context_window": 200000,
|
|
"max_output_tokens": 64000,
|
|
"supports_extended_thinking": false,
|
|
"supports_json_mode": true,
|
|
"supports_function_calling": true,
|
|
"supports_images": true,
|
|
"max_image_size_mb": 5.0,
|
|
"description": "Claude Opus 4.5 via OpenCode Zen - Anthropic's frontier reasoning model for complex software engineering",
|
|
"intelligence_score": 18,
|
|
"allow_code_generation": true
|
|
},
|
|
{
|
|
"model_name": "claude-sonnet-4-5",
|
|
"aliases": ["zen-sonnet", "zen-sonnet4.5"],
|
|
"context_window": 200000,
|
|
"max_output_tokens": 64000,
|
|
"supports_extended_thinking": false,
|
|
"supports_json_mode": true,
|
|
"supports_function_calling": true,
|
|
"supports_images": true,
|
|
"max_image_size_mb": 5.0,
|
|
"description": "Claude Sonnet 4.5 via OpenCode Zen - Balanced performance for coding and general tasks",
|
|
"intelligence_score": 17,
|
|
"allow_code_generation": true
|
|
},
|
|
{
|
|
"model_name": "claude-haiku-4-5",
|
|
"aliases": ["zen-haiku", "zen-haiku4.5"],
|
|
"context_window": 200000,
|
|
"max_output_tokens": 64000,
|
|
"supports_extended_thinking": false,
|
|
"supports_json_mode": true,
|
|
"supports_function_calling": true,
|
|
"supports_images": true,
|
|
"max_image_size_mb": 5.0,
|
|
"description": "Claude Haiku 4.5 via OpenCode Zen - Fast and efficient for coding tasks",
|
|
"intelligence_score": 16,
|
|
"allow_code_generation": true
|
|
},
|
|
{
|
|
"model_name": "gpt-5.1-codex",
|
|
"aliases": ["zen-gpt-codex", "zen-codex"],
|
|
"context_window": 400000,
|
|
"max_output_tokens": 64000,
|
|
"supports_extended_thinking": false,
|
|
"supports_json_mode": true,
|
|
"supports_function_calling": true,
|
|
"supports_images": false,
|
|
"description": "GPT 5.1 Codex via OpenCode Zen - Specialized for code generation and understanding",
|
|
"intelligence_score": 17,
|
|
"allow_code_generation": true
|
|
},
|
|
{
|
|
"model_name": "gpt-5.1",
|
|
"aliases": ["zen-gpt5.1"],
|
|
"context_window": 400000,
|
|
"max_output_tokens": 64000,
|
|
"supports_extended_thinking": false,
|
|
"supports_json_mode": true,
|
|
"supports_function_calling": true,
|
|
"supports_images": false,
|
|
"description": "GPT 5.1 via OpenCode Zen - Latest GPT model for general AI tasks",
|
|
"intelligence_score": 16,
|
|
"allow_code_generation": true
|
|
},
|
|
{
|
|
"model_name": "gemini-3-pro",
|
|
"aliases": ["zen-gemini", "zen-gemini-pro"],
|
|
"context_window": 1000000,
|
|
"max_output_tokens": 64000,
|
|
"supports_extended_thinking": false,
|
|
"supports_json_mode": true,
|
|
"supports_function_calling": true,
|
|
"supports_images": true,
|
|
"max_image_size_mb": 10.0,
|
|
"description": "Gemini 3 Pro via OpenCode Zen - Google's multimodal model with large context",
|
|
"intelligence_score": 16,
|
|
"allow_code_generation": true
|
|
},
|
|
{
|
|
"model_name": "glm-4.6",
|
|
"aliases": ["zen-glm"],
|
|
"context_window": 205000,
|
|
"max_output_tokens": 32000,
|
|
"supports_extended_thinking": false,
|
|
"supports_json_mode": true,
|
|
"supports_function_calling": true,
|
|
"supports_images": false,
|
|
"description": "GLM 4.6 via OpenCode Zen - High-performance model for coding and reasoning",
|
|
"intelligence_score": 15,
|
|
"allow_code_generation": true
|
|
},
|
|
{
|
|
"model_name": "kimi-k2",
|
|
"aliases": ["zen-kimi"],
|
|
"context_window": 400000,
|
|
"max_output_tokens": 32000,
|
|
"supports_extended_thinking": false,
|
|
"supports_json_mode": true,
|
|
"supports_function_calling": true,
|
|
"supports_images": false,
|
|
"description": "Kimi K2 via OpenCode Zen - Advanced reasoning model",
|
|
"intelligence_score": 15,
|
|
"allow_code_generation": true
|
|
},
|
|
{
|
|
"model_name": "qwen3-coder",
|
|
"aliases": ["zen-qwen", "zen-qwen-coder"],
|
|
"context_window": 480000,
|
|
"max_output_tokens": 32000,
|
|
"supports_extended_thinking": false,
|
|
"supports_json_mode": true,
|
|
"supports_function_calling": true,
|
|
"supports_images": false,
|
|
"description": "Qwen3 Coder via OpenCode Zen - Specialized coding model with large context",
|
|
"intelligence_score": 15,
|
|
"allow_code_generation": true
|
|
},
|
|
{
|
|
"model_name": "grok-code",
|
|
"aliases": ["zen-grok"],
|
|
"context_window": 200000,
|
|
"max_output_tokens": 32000,
|
|
"supports_extended_thinking": false,
|
|
"supports_json_mode": true,
|
|
"supports_function_calling": true,
|
|
"supports_images": false,
|
|
"description": "Grok Code via OpenCode Zen - xAI's coding-focused model",
|
|
"intelligence_score": 14,
|
|
"allow_code_generation": true
|
|
},
|
|
{
|
|
"model_name": "big-pickle",
|
|
"aliases": ["zen-pickle"],
|
|
"context_window": 200000,
|
|
"max_output_tokens": 32000,
|
|
"supports_extended_thinking": false,
|
|
"supports_json_mode": true,
|
|
"supports_function_calling": true,
|
|
"supports_images": false,
|
|
"description": "Big Pickle via OpenCode Zen - Stealth model for coding tasks",
|
|
"intelligence_score": 13,
|
|
"allow_code_generation": true
|
|
},
|
|
{
|
|
"model_name": "gpt-5-nano",
|
|
"aliases": ["zen-nano"],
|
|
"context_window": 400000,
|
|
"max_output_tokens": 32000,
|
|
"supports_extended_thinking": false,
|
|
"supports_json_mode": true,
|
|
"supports_function_calling": true,
|
|
"supports_images": false,
|
|
"description": "GPT 5 Nano via OpenCode Zen - Lightweight GPT model",
|
|
"intelligence_score": 12,
|
|
"allow_code_generation": true
|
|
}
|
|
]
|
|
} |