Fix for: https://github.com/BeehiveInnovations/zen-mcp-server/issues/102 - Removed centralized MODEL_CAPABILITIES_DESC from config.py - Added model descriptions to individual provider SUPPORTED_MODELS - Updated _get_available_models() to use ModelProviderRegistry for API key filtering - Added comprehensive test suite validating bug reproduction and fix
This commit is contained in:
@@ -295,19 +295,19 @@ class BaseTool(ABC):
|
||||
|
||||
def _get_available_models(self) -> list[str]:
|
||||
"""
|
||||
Get list of all possible models for the schema enum.
|
||||
Get list of models available from enabled providers.
|
||||
|
||||
In auto mode, we show ALL models from MODEL_CAPABILITIES_DESC so Claude
|
||||
can see all options, even if some require additional API configuration.
|
||||
Runtime validation will handle whether a model is actually available.
|
||||
Only returns models from providers that have valid API keys configured.
|
||||
This fixes the namespace collision bug where models from disabled providers
|
||||
were shown to Claude, causing routing conflicts.
|
||||
|
||||
Returns:
|
||||
List of all model names from config
|
||||
List of model names from enabled providers only
|
||||
"""
|
||||
from config import MODEL_CAPABILITIES_DESC
|
||||
from providers.registry import ModelProviderRegistry
|
||||
|
||||
# Start with all models from MODEL_CAPABILITIES_DESC
|
||||
all_models = list(MODEL_CAPABILITIES_DESC.keys())
|
||||
# Get models from enabled providers only (those with valid API keys)
|
||||
all_models = ModelProviderRegistry.get_available_model_names()
|
||||
|
||||
# Add OpenRouter models if OpenRouter is configured
|
||||
openrouter_key = os.getenv("OPENROUTER_API_KEY")
|
||||
@@ -339,9 +339,6 @@ class BaseTool(ABC):
|
||||
|
||||
logging.debug(f"Failed to add custom models to enum: {e}")
|
||||
|
||||
# Note: MODEL_CAPABILITIES_DESC already includes both short aliases (e.g., "flash", "o3")
|
||||
# and full model names (e.g., "gemini-2.5-flash") as keys
|
||||
|
||||
# Remove duplicates while preserving order
|
||||
seen = set()
|
||||
unique_models = []
|
||||
@@ -364,7 +361,7 @@ class BaseTool(ABC):
|
||||
"""
|
||||
import os
|
||||
|
||||
from config import DEFAULT_MODEL, MODEL_CAPABILITIES_DESC
|
||||
from config import DEFAULT_MODEL
|
||||
|
||||
# Check if OpenRouter is configured
|
||||
has_openrouter = bool(
|
||||
@@ -378,8 +375,39 @@ class BaseTool(ABC):
|
||||
"IMPORTANT: Use the model specified by the user if provided, OR select the most suitable model "
|
||||
"for this specific task based on the requirements and capabilities listed below:"
|
||||
]
|
||||
for model, desc in MODEL_CAPABILITIES_DESC.items():
|
||||
model_desc_parts.append(f"- '{model}': {desc}")
|
||||
|
||||
# Get descriptions from enabled providers
|
||||
from providers.base import ProviderType
|
||||
from providers.registry import ModelProviderRegistry
|
||||
|
||||
# Map provider types to readable names
|
||||
provider_names = {
|
||||
ProviderType.GOOGLE: "Gemini models",
|
||||
ProviderType.OPENAI: "OpenAI models",
|
||||
ProviderType.XAI: "X.AI GROK models",
|
||||
ProviderType.CUSTOM: "Custom models",
|
||||
ProviderType.OPENROUTER: "OpenRouter models",
|
||||
}
|
||||
|
||||
# Check available providers and add their model descriptions
|
||||
for provider_type in [ProviderType.GOOGLE, ProviderType.OPENAI, ProviderType.XAI]:
|
||||
provider = ModelProviderRegistry.get_provider(provider_type)
|
||||
if provider:
|
||||
provider_section_added = False
|
||||
for model_name in provider.list_models(respect_restrictions=True):
|
||||
try:
|
||||
# Get model config to extract description
|
||||
model_config = provider.SUPPORTED_MODELS.get(model_name)
|
||||
if isinstance(model_config, dict) and "description" in model_config:
|
||||
if not provider_section_added:
|
||||
model_desc_parts.append(
|
||||
f"\n{provider_names[provider_type]} - Available when {provider_type.value.upper()}_API_KEY is configured:"
|
||||
)
|
||||
provider_section_added = True
|
||||
model_desc_parts.append(f"- '{model_name}': {model_config['description']}")
|
||||
except Exception:
|
||||
# Skip models without descriptions
|
||||
continue
|
||||
|
||||
# Add custom models if custom API is configured
|
||||
custom_url = os.getenv("CUSTOM_API_URL")
|
||||
@@ -433,7 +461,7 @@ class BaseTool(ABC):
|
||||
|
||||
if model_configs:
|
||||
model_desc_parts.append("\nOpenRouter models (use these aliases):")
|
||||
for alias, config in model_configs[:10]: # Limit to top 10
|
||||
for alias, config in model_configs: # Show ALL models so Claude can choose
|
||||
# Format context window in human-readable form
|
||||
context_tokens = config.context_window
|
||||
if context_tokens >= 1_000_000:
|
||||
@@ -450,11 +478,6 @@ class BaseTool(ABC):
|
||||
# Fallback to showing the model name if no description
|
||||
desc = f"- '{alias}' ({context_str} context): {config.model_name}"
|
||||
model_desc_parts.append(desc)
|
||||
|
||||
# Add note about additional models if any were cut off
|
||||
total_models = len(model_configs)
|
||||
if total_models > 10:
|
||||
model_desc_parts.append(f"... and {total_models - 10} more models available")
|
||||
except Exception as e:
|
||||
# Log for debugging but don't fail
|
||||
import logging
|
||||
@@ -475,10 +498,11 @@ class BaseTool(ABC):
|
||||
}
|
||||
else:
|
||||
# Normal mode - model is optional with default
|
||||
available_models = list(MODEL_CAPABILITIES_DESC.keys())
|
||||
models_str = ", ".join(f"'{m}'" for m in available_models)
|
||||
available_models = self._get_available_models()
|
||||
models_str = ", ".join(f"'{m}'" for m in available_models) # Show ALL models so Claude can choose
|
||||
|
||||
description = f"Model to use. Available models: {models_str}."
|
||||
|
||||
description = f"Model to use. Native models: {models_str}."
|
||||
if has_openrouter:
|
||||
# Add OpenRouter aliases
|
||||
try:
|
||||
@@ -489,7 +513,7 @@ class BaseTool(ABC):
|
||||
if aliases:
|
||||
# Show all aliases so Claude knows every option available
|
||||
all_aliases = sorted(aliases)
|
||||
alias_list = ", ".join(f"'{a}'" for a in all_aliases)
|
||||
alias_list = ", ".join(f"'{a}'" for a in all_aliases) # Show ALL aliases so Claude can choose
|
||||
description += f" OpenRouter aliases: {alias_list}."
|
||||
else:
|
||||
description += " OpenRouter: Any model available on openrouter.ai."
|
||||
|
||||
@@ -72,10 +72,12 @@ CODEREVIEW_WORKFLOW_FIELD_DESCRIPTIONS = {
|
||||
"exploration path."
|
||||
),
|
||||
"relevant_files": (
|
||||
"Subset of files_checked (as full absolute paths) that contain code directly relevant to the review or "
|
||||
"contain significant issues, patterns, or examples worth highlighting. Only list those that are directly "
|
||||
"tied to important findings, security concerns, performance issues, or architectural decisions. This could "
|
||||
"include core implementation files, configuration files, or files with notable patterns."
|
||||
"For when this is the first step, please pass absolute file paths of relevant code to review (do not clip "
|
||||
"file paths). When used for the final step, this contains a subset of files_checked (as full absolute paths) "
|
||||
"that contain code directly relevant to the review or contain significant issues, patterns, or examples worth "
|
||||
"highlighting. Only list those that are directly tied to important findings, security concerns, performance "
|
||||
"issues, or architectural decisions. This could include core implementation files, configuration files, or "
|
||||
"files with notable patterns."
|
||||
),
|
||||
"relevant_context": (
|
||||
"List methods, functions, classes, or modules that are central to the code review findings, in the format "
|
||||
|
||||
@@ -72,79 +72,74 @@ class ListModelsTool(BaseTool):
|
||||
Returns:
|
||||
Formatted list of models by provider
|
||||
"""
|
||||
from config import MODEL_CAPABILITIES_DESC
|
||||
from providers.base import ProviderType
|
||||
from providers.openrouter_registry import OpenRouterModelRegistry
|
||||
from providers.registry import ModelProviderRegistry
|
||||
|
||||
output_lines = ["# Available AI Models\n"]
|
||||
|
||||
# Check native providers
|
||||
native_providers = {
|
||||
"gemini": {
|
||||
"name": "Google Gemini",
|
||||
"env_key": "GEMINI_API_KEY",
|
||||
"models": {
|
||||
"flash": "gemini-2.5-flash",
|
||||
"pro": "gemini-2.5-pro",
|
||||
},
|
||||
},
|
||||
"openai": {
|
||||
"name": "OpenAI",
|
||||
"env_key": "OPENAI_API_KEY",
|
||||
"models": {
|
||||
"o3": "o3",
|
||||
"o3-mini": "o3-mini",
|
||||
"o3-pro": "o3-pro",
|
||||
"o4-mini": "o4-mini",
|
||||
"o4-mini-high": "o4-mini-high",
|
||||
},
|
||||
},
|
||||
"xai": {
|
||||
"name": "X.AI (Grok)",
|
||||
"env_key": "XAI_API_KEY",
|
||||
"models": {
|
||||
"grok": "grok-3",
|
||||
"grok-3": "grok-3",
|
||||
"grok-3-fast": "grok-3-fast",
|
||||
"grok3": "grok-3",
|
||||
"grokfast": "grok-3-fast",
|
||||
},
|
||||
},
|
||||
# Map provider types to friendly names and their models
|
||||
provider_info = {
|
||||
ProviderType.GOOGLE: {"name": "Google Gemini", "env_key": "GEMINI_API_KEY"},
|
||||
ProviderType.OPENAI: {"name": "OpenAI", "env_key": "OPENAI_API_KEY"},
|
||||
ProviderType.XAI: {"name": "X.AI (Grok)", "env_key": "XAI_API_KEY"},
|
||||
}
|
||||
|
||||
# Check each native provider
|
||||
for provider_key, provider_info in native_providers.items():
|
||||
api_key = os.getenv(provider_info["env_key"])
|
||||
is_configured = api_key and api_key != f"your_{provider_key}_api_key_here"
|
||||
# Check each native provider type
|
||||
for provider_type, info in provider_info.items():
|
||||
# Check if provider is enabled
|
||||
provider = ModelProviderRegistry.get_provider(provider_type)
|
||||
is_configured = provider is not None
|
||||
|
||||
output_lines.append(f"## {provider_info['name']} {'✅' if is_configured else '❌'}")
|
||||
output_lines.append(f"## {info['name']} {'✅' if is_configured else '❌'}")
|
||||
|
||||
if is_configured:
|
||||
output_lines.append("**Status**: Configured and available")
|
||||
output_lines.append("\n**Models**:")
|
||||
|
||||
for alias, full_name in provider_info["models"].items():
|
||||
# Get description from MODEL_CAPABILITIES_DESC
|
||||
desc = MODEL_CAPABILITIES_DESC.get(alias, "")
|
||||
if isinstance(desc, str):
|
||||
# Extract context window from description
|
||||
import re
|
||||
# Get models from the provider's SUPPORTED_MODELS
|
||||
for model_name, config in provider.SUPPORTED_MODELS.items():
|
||||
# Skip alias entries (string values)
|
||||
if isinstance(config, str):
|
||||
continue
|
||||
|
||||
context_match = re.search(r"\(([^)]+context)\)", desc)
|
||||
context_info = context_match.group(1) if context_match else ""
|
||||
# Get description and context from the model config
|
||||
description = config.get("description", "No description available")
|
||||
context_window = config.get("context_window", 0)
|
||||
|
||||
output_lines.append(f"- `{alias}` → `{full_name}` - {context_info}")
|
||||
# Format context window
|
||||
if context_window >= 1_000_000:
|
||||
context_str = f"{context_window // 1_000_000}M context"
|
||||
elif context_window >= 1_000:
|
||||
context_str = f"{context_window // 1_000}K context"
|
||||
else:
|
||||
context_str = f"{context_window} context" if context_window > 0 else "unknown context"
|
||||
|
||||
# Extract key capability
|
||||
if "Ultra-fast" in desc:
|
||||
output_lines.append(" - Fast processing, quick iterations")
|
||||
elif "Deep reasoning" in desc:
|
||||
output_lines.append(" - Extended reasoning with thinking mode")
|
||||
elif "Strong reasoning" in desc:
|
||||
output_lines.append(" - Logical problems, systematic analysis")
|
||||
elif "EXTREMELY EXPENSIVE" in desc:
|
||||
output_lines.append(" - ⚠️ Professional grade (very expensive)")
|
||||
output_lines.append(f"- `{model_name}` - {context_str}")
|
||||
|
||||
# Extract key capability from description
|
||||
if "Ultra-fast" in description:
|
||||
output_lines.append(" - Fast processing, quick iterations")
|
||||
elif "Deep reasoning" in description:
|
||||
output_lines.append(" - Extended reasoning with thinking mode")
|
||||
elif "Strong reasoning" in description:
|
||||
output_lines.append(" - Logical problems, systematic analysis")
|
||||
elif "EXTREMELY EXPENSIVE" in description:
|
||||
output_lines.append(" - ⚠️ Professional grade (very expensive)")
|
||||
elif "Advanced reasoning" in description:
|
||||
output_lines.append(" - Advanced reasoning and complex analysis")
|
||||
|
||||
# Show aliases for this provider
|
||||
aliases = []
|
||||
for alias_name, target in provider.SUPPORTED_MODELS.items():
|
||||
if isinstance(target, str): # This is an alias
|
||||
aliases.append(f"- `{alias_name}` → `{target}`")
|
||||
|
||||
if aliases:
|
||||
output_lines.append("\n**Aliases**:")
|
||||
output_lines.extend(aliases)
|
||||
else:
|
||||
output_lines.append(f"**Status**: Not configured (set {provider_info['env_key']})")
|
||||
output_lines.append(f"**Status**: Not configured (set {info['env_key']})")
|
||||
|
||||
output_lines.append("")
|
||||
|
||||
@@ -171,7 +166,7 @@ class ListModelsTool(BaseTool):
|
||||
|
||||
# Group by provider for better organization
|
||||
providers_models = {}
|
||||
for model_name in available_models[:20]: # Limit to first 20 to avoid overwhelming output
|
||||
for model_name in available_models: # Show ALL available models
|
||||
# Try to resolve to get config details
|
||||
config = registry.resolve(model_name)
|
||||
if config:
|
||||
@@ -187,10 +182,10 @@ class ListModelsTool(BaseTool):
|
||||
providers_models[provider_name] = []
|
||||
providers_models[provider_name].append((model_name, None))
|
||||
|
||||
output_lines.append("\n**Available Models** (showing top 20):")
|
||||
output_lines.append("\n**Available Models**:")
|
||||
for provider_name, models in sorted(providers_models.items()):
|
||||
output_lines.append(f"\n*{provider_name.title()}:*")
|
||||
for alias, config in models[:5]: # Limit each provider to 5 models
|
||||
for alias, config in models: # Show ALL models from each provider
|
||||
if config:
|
||||
context_str = f"{config.context_window // 1000}K" if config.context_window else "?"
|
||||
output_lines.append(f"- `{alias}` → `{config.model_name}` ({context_str} context)")
|
||||
@@ -198,8 +193,7 @@ class ListModelsTool(BaseTool):
|
||||
output_lines.append(f"- `{alias}`")
|
||||
|
||||
total_models = len(available_models)
|
||||
if total_models > 20:
|
||||
output_lines.append(f"\n...and {total_models - 20} more models available")
|
||||
# Show all models - no truncation message needed
|
||||
|
||||
# Check if restrictions are applied
|
||||
restriction_service = None
|
||||
@@ -267,9 +261,8 @@ class ListModelsTool(BaseTool):
|
||||
configured_count = sum(
|
||||
[
|
||||
1
|
||||
for p in native_providers.values()
|
||||
if os.getenv(p["env_key"])
|
||||
and os.getenv(p["env_key"]) != f"your_{p['env_key'].lower().replace('_api_key', '')}_api_key_here"
|
||||
for provider_type, info in provider_info.items()
|
||||
if ModelProviderRegistry.get_provider(provider_type) is not None
|
||||
]
|
||||
)
|
||||
if is_openrouter_configured:
|
||||
|
||||
@@ -220,19 +220,19 @@ class BaseTool(ABC):
|
||||
|
||||
def _get_available_models(self) -> list[str]:
|
||||
"""
|
||||
Get list of all possible models for the schema enum.
|
||||
Get list of models available from enabled providers.
|
||||
|
||||
In auto mode, we show ALL models from MODEL_CAPABILITIES_DESC so Claude
|
||||
can see all options, even if some require additional API configuration.
|
||||
Runtime validation will handle whether a model is actually available.
|
||||
Only returns models from providers that have valid API keys configured.
|
||||
This fixes the namespace collision bug where models from disabled providers
|
||||
were shown to Claude, causing routing conflicts.
|
||||
|
||||
Returns:
|
||||
List of all model names from config
|
||||
List of model names from enabled providers only
|
||||
"""
|
||||
from config import MODEL_CAPABILITIES_DESC
|
||||
from providers.registry import ModelProviderRegistry
|
||||
|
||||
# Start with all models from MODEL_CAPABILITIES_DESC
|
||||
all_models = list(MODEL_CAPABILITIES_DESC.keys())
|
||||
# Get models from enabled providers only (those with valid API keys)
|
||||
all_models = ModelProviderRegistry.get_available_model_names()
|
||||
|
||||
# Add OpenRouter models if OpenRouter is configured
|
||||
openrouter_key = os.getenv("OPENROUTER_API_KEY")
|
||||
@@ -286,7 +286,7 @@ class BaseTool(ABC):
|
||||
"""
|
||||
import os
|
||||
|
||||
from config import DEFAULT_MODEL, MODEL_CAPABILITIES_DESC
|
||||
from config import DEFAULT_MODEL
|
||||
|
||||
# Check if OpenRouter is configured
|
||||
has_openrouter = bool(
|
||||
@@ -300,8 +300,39 @@ class BaseTool(ABC):
|
||||
"IMPORTANT: Use the model specified by the user if provided, OR select the most suitable model "
|
||||
"for this specific task based on the requirements and capabilities listed below:"
|
||||
]
|
||||
for model, desc in MODEL_CAPABILITIES_DESC.items():
|
||||
model_desc_parts.append(f"- '{model}': {desc}")
|
||||
|
||||
# Get descriptions from enabled providers
|
||||
from providers.base import ProviderType
|
||||
from providers.registry import ModelProviderRegistry
|
||||
|
||||
# Map provider types to readable names
|
||||
provider_names = {
|
||||
ProviderType.GOOGLE: "Gemini models",
|
||||
ProviderType.OPENAI: "OpenAI models",
|
||||
ProviderType.XAI: "X.AI GROK models",
|
||||
ProviderType.CUSTOM: "Custom models",
|
||||
ProviderType.OPENROUTER: "OpenRouter models",
|
||||
}
|
||||
|
||||
# Check available providers and add their model descriptions
|
||||
for provider_type in [ProviderType.GOOGLE, ProviderType.OPENAI, ProviderType.XAI]:
|
||||
provider = ModelProviderRegistry.get_provider(provider_type)
|
||||
if provider:
|
||||
provider_section_added = False
|
||||
for model_name in provider.list_models(respect_restrictions=True):
|
||||
try:
|
||||
# Get model config to extract description
|
||||
model_config = provider.SUPPORTED_MODELS.get(model_name)
|
||||
if isinstance(model_config, dict) and "description" in model_config:
|
||||
if not provider_section_added:
|
||||
model_desc_parts.append(
|
||||
f"\n{provider_names[provider_type]} - Available when {provider_type.value.upper()}_API_KEY is configured:"
|
||||
)
|
||||
provider_section_added = True
|
||||
model_desc_parts.append(f"- '{model_name}': {model_config['description']}")
|
||||
except Exception:
|
||||
# Skip models without descriptions
|
||||
continue
|
||||
|
||||
# Add custom models if custom API is configured
|
||||
custom_url = os.getenv("CUSTOM_API_URL")
|
||||
@@ -355,7 +386,7 @@ class BaseTool(ABC):
|
||||
|
||||
if model_configs:
|
||||
model_desc_parts.append("\nOpenRouter models (use these aliases):")
|
||||
for alias, config in model_configs[:10]: # Limit to top 10
|
||||
for alias, config in model_configs: # Show ALL models so Claude can choose
|
||||
# Format context window in human-readable form
|
||||
context_tokens = config.context_window
|
||||
if context_tokens >= 1_000_000:
|
||||
@@ -373,10 +404,7 @@ class BaseTool(ABC):
|
||||
desc = f"- '{alias}' ({context_str} context): {config.model_name}"
|
||||
model_desc_parts.append(desc)
|
||||
|
||||
# Add note about additional models if any were cut off
|
||||
total_models = len(model_configs)
|
||||
if total_models > 10:
|
||||
model_desc_parts.append(f"... and {total_models - 10} more models available")
|
||||
# Show all models - no truncation needed
|
||||
except Exception as e:
|
||||
# Log for debugging but don't fail
|
||||
import logging
|
||||
@@ -397,8 +425,8 @@ class BaseTool(ABC):
|
||||
}
|
||||
else:
|
||||
# Normal mode - model is optional with default
|
||||
available_models = list(MODEL_CAPABILITIES_DESC.keys())
|
||||
models_str = ", ".join(f"'{m}'" for m in available_models)
|
||||
available_models = self._get_available_models()
|
||||
models_str = ", ".join(f"'{m}'" for m in available_models) # Show ALL models so Claude can choose
|
||||
|
||||
description = f"Model to use. Native models: {models_str}."
|
||||
if has_openrouter:
|
||||
@@ -1099,19 +1127,19 @@ When recommending searches, be specific about what information you need and why
|
||||
|
||||
def _get_available_models(self) -> list[str]:
|
||||
"""
|
||||
Get list of all possible models for the schema enum.
|
||||
Get list of models available from enabled providers.
|
||||
|
||||
In auto mode, we show ALL models from MODEL_CAPABILITIES_DESC so Claude
|
||||
can see all options, even if some require additional API configuration.
|
||||
Runtime validation will handle whether a model is actually available.
|
||||
Only returns models from providers that have valid API keys configured.
|
||||
This fixes the namespace collision bug where models from disabled providers
|
||||
were shown to Claude, causing routing conflicts.
|
||||
|
||||
Returns:
|
||||
List of all model names from config
|
||||
List of model names from enabled providers only
|
||||
"""
|
||||
from config import MODEL_CAPABILITIES_DESC
|
||||
from providers.registry import ModelProviderRegistry
|
||||
|
||||
# Start with all models from MODEL_CAPABILITIES_DESC
|
||||
all_models = list(MODEL_CAPABILITIES_DESC.keys())
|
||||
# Get models from enabled providers only (those with valid API keys)
|
||||
all_models = ModelProviderRegistry.get_available_model_names()
|
||||
|
||||
# Add OpenRouter models if OpenRouter is configured
|
||||
openrouter_key = os.getenv("OPENROUTER_API_KEY")
|
||||
|
||||
Reference in New Issue
Block a user