fix: Remove duplicate OpenAI models from listmodels output
Fixed issue where OpenAI models appeared twice in listmodels output by: - Removing self-referencing aliases from OpenAI model definitions (e.g., "gpt-5" no longer includes "gpt-5" in its aliases) - Adding filter in listmodels.py to skip aliases that match the model name - Cleaned up inconsistent alias naming (o3-pro -> o3pro) This ensures each model appears only once in the listing while preserving all useful aliases. 🤖 Generated with [Claude Code](https://claude.ai/code) Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
@@ -38,7 +38,7 @@ class OpenAIModelProvider(OpenAICompatibleProvider):
|
|||||||
supports_temperature=True, # Regular models accept temperature parameter
|
supports_temperature=True, # Regular models accept temperature parameter
|
||||||
temperature_constraint=create_temperature_constraint("fixed"),
|
temperature_constraint=create_temperature_constraint("fixed"),
|
||||||
description="GPT-5 (400K context, 128K output) - Advanced model with reasoning support",
|
description="GPT-5 (400K context, 128K output) - Advanced model with reasoning support",
|
||||||
aliases=["gpt5", "gpt-5"],
|
aliases=["gpt5"],
|
||||||
),
|
),
|
||||||
"gpt-5-mini": ModelCapabilities(
|
"gpt-5-mini": ModelCapabilities(
|
||||||
provider=ProviderType.OPENAI,
|
provider=ProviderType.OPENAI,
|
||||||
@@ -110,7 +110,7 @@ class OpenAIModelProvider(OpenAICompatibleProvider):
|
|||||||
supports_temperature=False, # O3 models don't accept temperature parameter
|
supports_temperature=False, # O3 models don't accept temperature parameter
|
||||||
temperature_constraint=create_temperature_constraint("fixed"),
|
temperature_constraint=create_temperature_constraint("fixed"),
|
||||||
description="Fast O3 variant (200K context) - Balanced performance/speed, moderate complexity",
|
description="Fast O3 variant (200K context) - Balanced performance/speed, moderate complexity",
|
||||||
aliases=["o3mini", "o3-mini"],
|
aliases=["o3mini"],
|
||||||
),
|
),
|
||||||
"o3-pro": ModelCapabilities(
|
"o3-pro": ModelCapabilities(
|
||||||
provider=ProviderType.OPENAI,
|
provider=ProviderType.OPENAI,
|
||||||
@@ -128,7 +128,7 @@ class OpenAIModelProvider(OpenAICompatibleProvider):
|
|||||||
supports_temperature=False, # O3 models don't accept temperature parameter
|
supports_temperature=False, # O3 models don't accept temperature parameter
|
||||||
temperature_constraint=create_temperature_constraint("fixed"),
|
temperature_constraint=create_temperature_constraint("fixed"),
|
||||||
description="Professional-grade reasoning (200K context) - EXTREMELY EXPENSIVE: Only for the most complex problems requiring universe-scale complexity analysis OR when the user explicitly asks for this model. Use sparingly for critical architectural decisions or exceptionally complex debugging that other models cannot handle.",
|
description="Professional-grade reasoning (200K context) - EXTREMELY EXPENSIVE: Only for the most complex problems requiring universe-scale complexity analysis OR when the user explicitly asks for this model. Use sparingly for critical architectural decisions or exceptionally complex debugging that other models cannot handle.",
|
||||||
aliases=["o3-pro"],
|
aliases=["o3pro"],
|
||||||
),
|
),
|
||||||
"o4-mini": ModelCapabilities(
|
"o4-mini": ModelCapabilities(
|
||||||
provider=ProviderType.OPENAI,
|
provider=ProviderType.OPENAI,
|
||||||
@@ -146,7 +146,7 @@ class OpenAIModelProvider(OpenAICompatibleProvider):
|
|||||||
supports_temperature=False, # O4 models don't accept temperature parameter
|
supports_temperature=False, # O4 models don't accept temperature parameter
|
||||||
temperature_constraint=create_temperature_constraint("fixed"),
|
temperature_constraint=create_temperature_constraint("fixed"),
|
||||||
description="Latest reasoning model (200K context) - Optimized for shorter contexts, rapid reasoning",
|
description="Latest reasoning model (200K context) - Optimized for shorter contexts, rapid reasoning",
|
||||||
aliases=["o4mini", "o4-mini"],
|
aliases=["o4mini"],
|
||||||
),
|
),
|
||||||
"gpt-4.1": ModelCapabilities(
|
"gpt-4.1": ModelCapabilities(
|
||||||
provider=ProviderType.OPENAI,
|
provider=ProviderType.OPENAI,
|
||||||
@@ -164,7 +164,7 @@ class OpenAIModelProvider(OpenAICompatibleProvider):
|
|||||||
supports_temperature=True, # Regular models accept temperature parameter
|
supports_temperature=True, # Regular models accept temperature parameter
|
||||||
temperature_constraint=create_temperature_constraint("range"),
|
temperature_constraint=create_temperature_constraint("range"),
|
||||||
description="GPT-4.1 (1M context) - Advanced reasoning model with large context window",
|
description="GPT-4.1 (1M context) - Advanced reasoning model with large context window",
|
||||||
aliases=["gpt4.1", "gpt-4.1"],
|
aliases=["gpt4.1"],
|
||||||
),
|
),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -138,7 +138,9 @@ class ListModelsTool(BaseTool):
|
|||||||
for model_name, capabilities in provider.get_model_configurations().items():
|
for model_name, capabilities in provider.get_model_configurations().items():
|
||||||
if capabilities.aliases:
|
if capabilities.aliases:
|
||||||
for alias in capabilities.aliases:
|
for alias in capabilities.aliases:
|
||||||
aliases.append(f"- `{alias}` → `{model_name}`")
|
# Skip aliases that are the same as the model name to avoid duplicates
|
||||||
|
if alias != model_name:
|
||||||
|
aliases.append(f"- `{alias}` → `{model_name}`")
|
||||||
|
|
||||||
if aliases:
|
if aliases:
|
||||||
output_lines.append("\n**Aliases**:")
|
output_lines.append("\n**Aliases**:")
|
||||||
|
|||||||
Reference in New Issue
Block a user