diff --git a/providers/openai_provider.py b/providers/openai_provider.py index 2d3c0cd..e9bbb6e 100644 --- a/providers/openai_provider.py +++ b/providers/openai_provider.py @@ -38,7 +38,7 @@ class OpenAIModelProvider(OpenAICompatibleProvider): supports_temperature=True, # Regular models accept temperature parameter temperature_constraint=create_temperature_constraint("fixed"), description="GPT-5 (400K context, 128K output) - Advanced model with reasoning support", - aliases=["gpt5", "gpt-5"], + aliases=["gpt5"], ), "gpt-5-mini": ModelCapabilities( provider=ProviderType.OPENAI, @@ -110,7 +110,7 @@ class OpenAIModelProvider(OpenAICompatibleProvider): supports_temperature=False, # O3 models don't accept temperature parameter temperature_constraint=create_temperature_constraint("fixed"), description="Fast O3 variant (200K context) - Balanced performance/speed, moderate complexity", - aliases=["o3mini", "o3-mini"], + aliases=["o3mini"], ), "o3-pro": ModelCapabilities( provider=ProviderType.OPENAI, @@ -128,7 +128,7 @@ class OpenAIModelProvider(OpenAICompatibleProvider): supports_temperature=False, # O3 models don't accept temperature parameter temperature_constraint=create_temperature_constraint("fixed"), description="Professional-grade reasoning (200K context) - EXTREMELY EXPENSIVE: Only for the most complex problems requiring universe-scale complexity analysis OR when the user explicitly asks for this model. Use sparingly for critical architectural decisions or exceptionally complex debugging that other models cannot handle.", - aliases=["o3-pro"], + aliases=["o3pro"], ), "o4-mini": ModelCapabilities( provider=ProviderType.OPENAI, @@ -146,7 +146,7 @@ class OpenAIModelProvider(OpenAICompatibleProvider): supports_temperature=False, # O4 models don't accept temperature parameter temperature_constraint=create_temperature_constraint("fixed"), description="Latest reasoning model (200K context) - Optimized for shorter contexts, rapid reasoning", - aliases=["o4mini", "o4-mini"], + aliases=["o4mini"], ), "gpt-4.1": ModelCapabilities( provider=ProviderType.OPENAI, @@ -164,7 +164,7 @@ class OpenAIModelProvider(OpenAICompatibleProvider): supports_temperature=True, # Regular models accept temperature parameter temperature_constraint=create_temperature_constraint("range"), description="GPT-4.1 (1M context) - Advanced reasoning model with large context window", - aliases=["gpt4.1", "gpt-4.1"], + aliases=["gpt4.1"], ), } diff --git a/tests/test_supported_models_aliases.py b/tests/test_supported_models_aliases.py index 256eaaf..336368b 100644 --- a/tests/test_supported_models_aliases.py +++ b/tests/test_supported_models_aliases.py @@ -50,15 +50,15 @@ class TestSupportedModelsAliases: # "mini" is now an alias for gpt-5-mini, not o4-mini assert "mini" in provider.SUPPORTED_MODELS["gpt-5-mini"].aliases assert "o4mini" in provider.SUPPORTED_MODELS["o4-mini"].aliases - assert "o4-mini" in provider.SUPPORTED_MODELS["o4-mini"].aliases + # o4-mini is no longer in its own aliases (removed self-reference) assert "o3mini" in provider.SUPPORTED_MODELS["o3-mini"].aliases - assert "o3-pro" in provider.SUPPORTED_MODELS["o3-pro"].aliases + assert "o3pro" in provider.SUPPORTED_MODELS["o3-pro"].aliases assert "gpt4.1" in provider.SUPPORTED_MODELS["gpt-4.1"].aliases # Test alias resolution assert provider._resolve_model_name("mini") == "gpt-5-mini" # mini -> gpt-5-mini now assert provider._resolve_model_name("o3mini") == "o3-mini" - assert provider._resolve_model_name("o3-pro") == "o3-pro" # o3-pro is already the base model name + assert provider._resolve_model_name("o3pro") == "o3-pro" # o3pro resolves to o3-pro assert provider._resolve_model_name("o4mini") == "o4-mini" assert provider._resolve_model_name("gpt4.1") == "gpt-4.1" # gpt4.1 resolves to gpt-4.1 diff --git a/tools/listmodels.py b/tools/listmodels.py index 7fa0c90..4d17062 100644 --- a/tools/listmodels.py +++ b/tools/listmodels.py @@ -138,7 +138,9 @@ class ListModelsTool(BaseTool): for model_name, capabilities in provider.get_model_configurations().items(): if capabilities.aliases: for alias in capabilities.aliases: - aliases.append(f"- `{alias}` → `{model_name}`") + # Skip aliases that are the same as the model name to avoid duplicates + if alias != model_name: + aliases.append(f"- `{alias}` → `{model_name}`") if aliases: output_lines.append("\n**Aliases**:")