fix: missing "optenai/" in name

This commit is contained in:
Fahad
2025-10-01 19:41:17 +04:00
parent 09d6ba4eac
commit 7371ed6487

View File

@@ -52,7 +52,7 @@
"models": [
{
"model_name": "anthropic/claude-opus-4.1",
"aliases": ["opus", "claude-opus", "claude-opus-4.1", "claude-4.1-opus"],
"aliases": ["opus", "claude-opus"],
"context_window": 200000,
"max_output_tokens": 64000,
"supports_extended_thinking": false,
@@ -64,7 +64,7 @@
},
{
"model_name": "anthropic/claude-sonnet-4.1",
"aliases": ["sonnet", "claude-sonnet", "claude-sonnet-4.1", "claude-4.1-sonnet", "claude"],
"aliases": ["sonnet4.1"],
"context_window": 200000,
"max_output_tokens": 64000,
"supports_extended_thinking": false,
@@ -76,7 +76,7 @@
},
{
"model_name": "anthropic/claude-3.5-haiku",
"aliases": ["haiku", "claude-haiku", "claude3-haiku", "claude-3-haiku"],
"aliases": ["haiku"],
"context_window": 200000,
"max_output_tokens": 64000,
"supports_extended_thinking": false,
@@ -100,7 +100,7 @@
},
{
"model_name": "google/gemini-2.5-flash",
"aliases": ["flash","gemini-flash", "flash-openrouter", "flash-2.5"],
"aliases": ["flash","gemini-flash"],
"context_window": 1048576,
"max_output_tokens": 65536,
"supports_extended_thinking": false,
@@ -202,7 +202,7 @@
},
{
"model_name": "openai/o3-pro",
"aliases": ["o3-pro", "o3pro"],
"aliases": ["o3pro"],
"context_window": 200000,
"max_output_tokens": 100000,
"supports_extended_thinking": false,
@@ -229,8 +229,8 @@
"description": "OpenAI's o4-mini model - optimized for shorter contexts with rapid reasoning and vision"
},
{
"model_name": "gpt-5",
"aliases": ["gpt5", "gpt-5"],
"model_name": "openai/gpt-5",
"aliases": ["gpt5"],
"context_window": 400000,
"max_output_tokens": 128000,
"supports_extended_thinking": true,
@@ -243,8 +243,8 @@
"description": "GPT-5 (400K context, 128K output) - Advanced model with reasoning support"
},
{
"model_name": "gpt-5-mini",
"aliases": ["gpt5-mini", "gpt5mini", "mini"],
"model_name": "openai/gpt-5-mini",
"aliases": ["gpt5mini"],
"context_window": 400000,
"max_output_tokens": 128000,
"supports_extended_thinking": true,
@@ -257,8 +257,8 @@
"description": "GPT-5-mini (400K context, 128K output) - Efficient variant with reasoning support"
},
{
"model_name": "gpt-5-nano",
"aliases": ["gpt5nano", "gpt5-nano", "nano"],
"model_name": "openai/gpt-5-nano",
"aliases": ["gpt5nano"],
"context_window": 400000,
"max_output_tokens": 128000,
"supports_extended_thinking": true,
@@ -272,7 +272,7 @@
},
{
"model_name": "llama3.2",
"aliases": ["local-llama", "local", "llama3.2", "ollama-llama"],
"aliases": ["local-llama", "ollama-llama"],
"context_window": 128000,
"max_output_tokens": 64000,
"supports_extended_thinking": false,