Merge branch 'main' into main
This commit is contained in:
18
CHANGELOG.md
18
CHANGELOG.md
@@ -2,6 +2,24 @@
|
|||||||
|
|
||||||
<!-- version list -->
|
<!-- version list -->
|
||||||
|
|
||||||
|
## v5.11.1 (2025-10-01)
|
||||||
|
|
||||||
|
### Bug Fixes
|
||||||
|
|
||||||
|
- Remove duplicate OpenAI models from listmodels output
|
||||||
|
([`c29e762`](https://github.com/BeehiveInnovations/zen-mcp-server/commit/c29e7623ace257eb45396cdf8c19e1659e29edb9))
|
||||||
|
|
||||||
|
### Chores
|
||||||
|
|
||||||
|
- Sync version to config.py [skip ci]
|
||||||
|
([`1209064`](https://github.com/BeehiveInnovations/zen-mcp-server/commit/12090646ee83f2368311d595d87ae947e46ddacd))
|
||||||
|
|
||||||
|
### Testing
|
||||||
|
|
||||||
|
- Update OpenAI provider alias tests to match new format
|
||||||
|
([`d13700c`](https://github.com/BeehiveInnovations/zen-mcp-server/commit/d13700c14c7ee3d092302837cb1726d17bab1ab8))
|
||||||
|
|
||||||
|
|
||||||
## v5.11.0 (2025-08-26)
|
## v5.11.0 (2025-08-26)
|
||||||
|
|
||||||
### Chores
|
### Chores
|
||||||
|
|||||||
@@ -14,9 +14,9 @@ import os
|
|||||||
# These values are used in server responses and for tracking releases
|
# These values are used in server responses and for tracking releases
|
||||||
# IMPORTANT: This is the single source of truth for version and author info
|
# IMPORTANT: This is the single source of truth for version and author info
|
||||||
# Semantic versioning: MAJOR.MINOR.PATCH
|
# Semantic versioning: MAJOR.MINOR.PATCH
|
||||||
__version__ = "5.11.0"
|
__version__ = "5.11.1"
|
||||||
# Last update date in ISO format
|
# Last update date in ISO format
|
||||||
__updated__ = "2025-09-05"
|
__updated__ = "2025-10-01"
|
||||||
# Primary maintainer
|
# Primary maintainer
|
||||||
__author__ = "Fahad Gilani"
|
__author__ = "Fahad Gilani"
|
||||||
|
|
||||||
|
|||||||
@@ -38,7 +38,7 @@ class OpenAIModelProvider(OpenAICompatibleProvider):
|
|||||||
supports_temperature=True, # Regular models accept temperature parameter
|
supports_temperature=True, # Regular models accept temperature parameter
|
||||||
temperature_constraint=create_temperature_constraint("fixed"),
|
temperature_constraint=create_temperature_constraint("fixed"),
|
||||||
description="GPT-5 (400K context, 128K output) - Advanced model with reasoning support",
|
description="GPT-5 (400K context, 128K output) - Advanced model with reasoning support",
|
||||||
aliases=["gpt5", "gpt-5"],
|
aliases=["gpt5"],
|
||||||
),
|
),
|
||||||
"gpt-5-mini": ModelCapabilities(
|
"gpt-5-mini": ModelCapabilities(
|
||||||
provider=ProviderType.OPENAI,
|
provider=ProviderType.OPENAI,
|
||||||
@@ -110,7 +110,7 @@ class OpenAIModelProvider(OpenAICompatibleProvider):
|
|||||||
supports_temperature=False, # O3 models don't accept temperature parameter
|
supports_temperature=False, # O3 models don't accept temperature parameter
|
||||||
temperature_constraint=create_temperature_constraint("fixed"),
|
temperature_constraint=create_temperature_constraint("fixed"),
|
||||||
description="Fast O3 variant (200K context) - Balanced performance/speed, moderate complexity",
|
description="Fast O3 variant (200K context) - Balanced performance/speed, moderate complexity",
|
||||||
aliases=["o3mini", "o3-mini"],
|
aliases=["o3mini"],
|
||||||
),
|
),
|
||||||
"o3-pro": ModelCapabilities(
|
"o3-pro": ModelCapabilities(
|
||||||
provider=ProviderType.OPENAI,
|
provider=ProviderType.OPENAI,
|
||||||
@@ -128,7 +128,7 @@ class OpenAIModelProvider(OpenAICompatibleProvider):
|
|||||||
supports_temperature=False, # O3 models don't accept temperature parameter
|
supports_temperature=False, # O3 models don't accept temperature parameter
|
||||||
temperature_constraint=create_temperature_constraint("fixed"),
|
temperature_constraint=create_temperature_constraint("fixed"),
|
||||||
description="Professional-grade reasoning (200K context) - EXTREMELY EXPENSIVE: Only for the most complex problems requiring universe-scale complexity analysis OR when the user explicitly asks for this model. Use sparingly for critical architectural decisions or exceptionally complex debugging that other models cannot handle.",
|
description="Professional-grade reasoning (200K context) - EXTREMELY EXPENSIVE: Only for the most complex problems requiring universe-scale complexity analysis OR when the user explicitly asks for this model. Use sparingly for critical architectural decisions or exceptionally complex debugging that other models cannot handle.",
|
||||||
aliases=["o3-pro"],
|
aliases=["o3pro"],
|
||||||
),
|
),
|
||||||
"o4-mini": ModelCapabilities(
|
"o4-mini": ModelCapabilities(
|
||||||
provider=ProviderType.OPENAI,
|
provider=ProviderType.OPENAI,
|
||||||
@@ -146,7 +146,7 @@ class OpenAIModelProvider(OpenAICompatibleProvider):
|
|||||||
supports_temperature=False, # O4 models don't accept temperature parameter
|
supports_temperature=False, # O4 models don't accept temperature parameter
|
||||||
temperature_constraint=create_temperature_constraint("fixed"),
|
temperature_constraint=create_temperature_constraint("fixed"),
|
||||||
description="Latest reasoning model (200K context) - Optimized for shorter contexts, rapid reasoning",
|
description="Latest reasoning model (200K context) - Optimized for shorter contexts, rapid reasoning",
|
||||||
aliases=["o4mini", "o4-mini"],
|
aliases=["o4mini"],
|
||||||
),
|
),
|
||||||
"gpt-4.1": ModelCapabilities(
|
"gpt-4.1": ModelCapabilities(
|
||||||
provider=ProviderType.OPENAI,
|
provider=ProviderType.OPENAI,
|
||||||
@@ -164,7 +164,7 @@ class OpenAIModelProvider(OpenAICompatibleProvider):
|
|||||||
supports_temperature=True, # Regular models accept temperature parameter
|
supports_temperature=True, # Regular models accept temperature parameter
|
||||||
temperature_constraint=create_temperature_constraint("range"),
|
temperature_constraint=create_temperature_constraint("range"),
|
||||||
description="GPT-4.1 (1M context) - Advanced reasoning model with large context window",
|
description="GPT-4.1 (1M context) - Advanced reasoning model with large context window",
|
||||||
aliases=["gpt4.1", "gpt-4.1"],
|
aliases=["gpt4.1"],
|
||||||
),
|
),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
[project]
|
[project]
|
||||||
name = "zen-mcp-server"
|
name = "zen-mcp-server"
|
||||||
version = "5.11.0"
|
version = "5.11.1"
|
||||||
description = "AI-powered MCP server with multiple model providers"
|
description = "AI-powered MCP server with multiple model providers"
|
||||||
requires-python = ">=3.9"
|
requires-python = ">=3.9"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
|
|||||||
@@ -50,15 +50,15 @@ class TestSupportedModelsAliases:
|
|||||||
# "mini" is now an alias for gpt-5-mini, not o4-mini
|
# "mini" is now an alias for gpt-5-mini, not o4-mini
|
||||||
assert "mini" in provider.SUPPORTED_MODELS["gpt-5-mini"].aliases
|
assert "mini" in provider.SUPPORTED_MODELS["gpt-5-mini"].aliases
|
||||||
assert "o4mini" in provider.SUPPORTED_MODELS["o4-mini"].aliases
|
assert "o4mini" in provider.SUPPORTED_MODELS["o4-mini"].aliases
|
||||||
assert "o4-mini" in provider.SUPPORTED_MODELS["o4-mini"].aliases
|
# o4-mini is no longer in its own aliases (removed self-reference)
|
||||||
assert "o3mini" in provider.SUPPORTED_MODELS["o3-mini"].aliases
|
assert "o3mini" in provider.SUPPORTED_MODELS["o3-mini"].aliases
|
||||||
assert "o3-pro" in provider.SUPPORTED_MODELS["o3-pro"].aliases
|
assert "o3pro" in provider.SUPPORTED_MODELS["o3-pro"].aliases
|
||||||
assert "gpt4.1" in provider.SUPPORTED_MODELS["gpt-4.1"].aliases
|
assert "gpt4.1" in provider.SUPPORTED_MODELS["gpt-4.1"].aliases
|
||||||
|
|
||||||
# Test alias resolution
|
# Test alias resolution
|
||||||
assert provider._resolve_model_name("mini") == "gpt-5-mini" # mini -> gpt-5-mini now
|
assert provider._resolve_model_name("mini") == "gpt-5-mini" # mini -> gpt-5-mini now
|
||||||
assert provider._resolve_model_name("o3mini") == "o3-mini"
|
assert provider._resolve_model_name("o3mini") == "o3-mini"
|
||||||
assert provider._resolve_model_name("o3-pro") == "o3-pro" # o3-pro is already the base model name
|
assert provider._resolve_model_name("o3pro") == "o3-pro" # o3pro resolves to o3-pro
|
||||||
assert provider._resolve_model_name("o4mini") == "o4-mini"
|
assert provider._resolve_model_name("o4mini") == "o4-mini"
|
||||||
assert provider._resolve_model_name("gpt4.1") == "gpt-4.1" # gpt4.1 resolves to gpt-4.1
|
assert provider._resolve_model_name("gpt4.1") == "gpt-4.1" # gpt4.1 resolves to gpt-4.1
|
||||||
|
|
||||||
|
|||||||
@@ -138,7 +138,9 @@ class ListModelsTool(BaseTool):
|
|||||||
for model_name, capabilities in provider.get_model_configurations().items():
|
for model_name, capabilities in provider.get_model_configurations().items():
|
||||||
if capabilities.aliases:
|
if capabilities.aliases:
|
||||||
for alias in capabilities.aliases:
|
for alias in capabilities.aliases:
|
||||||
aliases.append(f"- `{alias}` → `{model_name}`")
|
# Skip aliases that are the same as the model name to avoid duplicates
|
||||||
|
if alias != model_name:
|
||||||
|
aliases.append(f"- `{alias}` → `{model_name}`")
|
||||||
|
|
||||||
if aliases:
|
if aliases:
|
||||||
output_lines.append("\n**Aliases**:")
|
output_lines.append("\n**Aliases**:")
|
||||||
|
|||||||
Reference in New Issue
Block a user