Merge pull request #1 from BeehiveInnovations/main

Refreshing this branch
This commit is contained in:
Devon Hillard
2025-10-01 10:33:12 -06:00
committed by GitHub
8 changed files with 144 additions and 10 deletions

View File

@@ -2,6 +2,19 @@
<!-- version list -->
## v5.14.1 (2025-10-01)
### Bug Fixes
- Https://github.com/BeehiveInnovations/zen-mcp-server/issues/258
([`696b45f`](https://github.com/BeehiveInnovations/zen-mcp-server/commit/696b45f25e80faccb67034254cf9a8fc4c643dbd))
### Chores
- Sync version to config.py [skip ci]
([`692016c`](https://github.com/BeehiveInnovations/zen-mcp-server/commit/692016c6205ed0a0c3d9e830482d88231aca2e31))
## v5.14.0 (2025-10-01)
### Chores

View File

@@ -9,7 +9,7 @@
<br/>
**AI orchestration for Claude Code** - A Model Context Protocol server that gives your CLI of choice (e.g. [Claude Code](https://www.anthropic.com/claude-code)) access to multiple AI models for enhanced code analysis, problem-solving, and collaborative development. Zen
works with Claude Code, Gemini CLI, Codex CLI as well as others.
works with Claude Code, Gemini CLI, Codex CLI, and IDE clients like [Cursor](https://cursor.com) and the [Claude Dev extension for VS Code](https://marketplace.visualstudio.com/items?itemName=Anthropic.claude-vscode).
**True AI collaboration with conversation continuity** - Claude stays in control but gets perspectives from the best AI for each subtask. Context carries forward seamlessly across tools and models, enabling complex workflows like: code reviews with multiple models → automated planning → implementation → pre-commit validation.
@@ -130,6 +130,7 @@ cd zen-mcp-server
```
👉 **[Complete Setup Guide](docs/getting-started.md)** with detailed installation, configuration for Gemini / Codex, and troubleshooting
👉 **[Cursor & VS Code Setup](docs/getting-started.md#ide-clients)** for IDE integration instructions
## Core Tools

View File

@@ -14,7 +14,7 @@ import os
# These values are used in server responses and for tracking releases
# IMPORTANT: This is the single source of truth for version and author info
# Semantic versioning: MAJOR.MINOR.PATCH
__version__ = "5.14.0"
__version__ = "5.14.1"
# Last update date in ISO format
__updated__ = "2025-10-01"
# Primary maintainer

View File

@@ -147,6 +147,33 @@ PATH = "/usr/local/bin:/usr/bin:/bin:/opt/homebrew/bin:$HOME/.local/bin:$HOME/.c
GEMINI_API_KEY = "your_api_key_here"
```
#### IDE Clients (Cursor & VS Code)
Zen works in GUI IDEs that speak MCP. The configuration mirrors the CLI examples above—point the client at the `uvx` launcher and set any required environment variables.
**Cursor IDE**
1. Open Cursor → `Settings` (`Cmd+,`/`Ctrl+,`) → **Integrations Model Context Protocol (MCP)**.
2. Click **Add MCP Server** and supply the following values:
- Command: `sh`
- Args: `-c` and `for p in $(which uvx 2>/dev/null) $HOME/.local/bin/uvx /opt/homebrew/bin/uvx /usr/local/bin/uvx uvx; do [ -x "$p" ] && exec "$p" --from git+https://github.com/BeehiveInnovations/zen-mcp-server.git zen-mcp-server; done; echo 'uvx not found' >&2; exit 1`
- Environment (example):
- `PATH=/usr/local/bin:/usr/bin:/bin:/opt/homebrew/bin:~/.local/bin`
- `GEMINI_API_KEY=your_api_key_here`
3. Save the configuration—Cursor will launch the MCP server on demand. See the [Cursor MCP guide](https://cursor.com/docs) for screenshots of the UI.
**Visual Studio Code (Claude Dev extension)**
1. Install the [Claude Dev extension](https://marketplace.visualstudio.com/items?itemName=Anthropic.claude-vscode) v0.6.0 or later.
2. Open the Command Palette (`Cmd+Shift+P`/`Ctrl+Shift+P`) → **Claude: Configure MCP Servers****Add server**.
3. When prompted, use the same values as above:
- Command: `sh`
- Args: `-c` and the `uvx` bootstrap loop
- Environment: add the API keys you need (e.g. `GEMINI_API_KEY`, `OPENAI_API_KEY`)
4. Save the JSON snippet the extension generates. VS Code will reload the server automatically the next time you interact with Claude.
👉 Pro tip: If you prefer a one-line command, replace the long loop with `uvx --from git+https://github.com/BeehiveInnovations/zen-mcp-server.git zen-mcp-server`—just make sure `uvx` is on your PATH for every client.
**Benefits of uvx method:**
- ✅ Zero manual setup required
- ✅ Always pulls latest version

View File

@@ -1,6 +1,6 @@
[project]
name = "zen-mcp-server"
version = "5.14.0"
version = "5.14.1"
description = "AI-powered MCP server with multiple model providers"
requires-python = ">=3.9"
dependencies = [

View File

@@ -263,6 +263,7 @@ class TestAutoModeComprehensive:
"OPENAI_API_KEY": None,
"XAI_API_KEY": None,
"OPENROUTER_API_KEY": None,
"CUSTOM_API_URL": None,
"DEFAULT_MODEL": "auto",
}

View File

@@ -6,6 +6,7 @@ all expected models based on which providers are configured via environment vari
"""
import importlib
import json
import os
import pytest
@@ -121,6 +122,18 @@ class TestModelEnumeration:
assert found_count == 0, "Custom models should not be included without CUSTOM_API_URL"
def test_custom_models_not_exposed_with_openrouter_only(self):
"""Ensure OpenRouter access alone does not surface custom-only endpoints."""
self._setup_environment({"OPENROUTER_API_KEY": "test-openrouter-key"})
tool = AnalyzeTool()
models = tool._get_available_models()
for alias in ("local-llama", "llama3.2"):
assert (
alias not in models
), f"Custom model alias '{alias}' should remain hidden without CUSTOM_API_URL"
def test_no_duplicates_with_overlapping_providers(self):
"""Test that models aren't duplicated when multiple providers offer the same model."""
self._setup_environment(
@@ -165,6 +178,54 @@ class TestModelEnumeration:
else:
assert model_name not in models, f"Native model {model_name} should not be present without API key"
def test_openrouter_free_model_aliases_available(self, tmp_path, monkeypatch):
"""Free OpenRouter variants should expose both canonical names and aliases."""
# Configure environment with OpenRouter access only
self._setup_environment({"OPENROUTER_API_KEY": "test-openrouter-key"})
# Create a temporary custom model config with a free variant
custom_config = {
"models": [
{
"model_name": "deepseek/deepseek-r1:free",
"aliases": ["deepseek-free", "r1-free"],
"context_window": 163840,
"max_output_tokens": 8192,
"supports_extended_thinking": False,
"supports_json_mode": True,
"supports_function_calling": False,
"supports_images": False,
"max_image_size_mb": 0.0,
"description": "DeepSeek R1 free tier variant",
}
]
}
config_path = tmp_path / "custom_models.json"
config_path.write_text(json.dumps(custom_config), encoding="utf-8")
monkeypatch.setenv("CUSTOM_MODELS_CONFIG_PATH", str(config_path))
# Reset cached registries so the temporary config is loaded
from tools.shared.base_tool import BaseTool
monkeypatch.setattr(BaseTool, "_openrouter_registry_cache", None, raising=False)
from providers.openrouter import OpenRouterProvider
monkeypatch.setattr(OpenRouterProvider, "_registry", None, raising=False)
# Rebuild the provider registry with OpenRouter registered
ModelProviderRegistry._instance = None
from providers.base import ProviderType
ModelProviderRegistry.register_provider(ProviderType.OPENROUTER, OpenRouterProvider)
tool = AnalyzeTool()
models = tool._get_available_models()
assert "deepseek/deepseek-r1:free" in models, "Canonical free model name should be available"
assert "deepseek-free" in models, "Free model alias should be included for MCP validation"
# DELETED: test_auto_mode_behavior_with_environment_variables
# This test was fundamentally broken due to registry corruption.

View File

@@ -1199,17 +1199,48 @@ When recommending searches, be specific about what information you need and why
# Get models from enabled providers only (those with valid API keys)
all_models = ModelProviderRegistry.get_available_model_names()
# Add OpenRouter models if OpenRouter is configured
# Add OpenRouter models and their aliases when OpenRouter is configured
openrouter_key = os.getenv("OPENROUTER_API_KEY")
if openrouter_key and openrouter_key != "your_openrouter_api_key_here":
try:
from config import OPENROUTER_MODELS
registry = self._get_openrouter_registry()
all_models.extend(OPENROUTER_MODELS)
except ImportError:
pass
# Include every known alias so MCP enum matches registry capabilities
for alias in registry.list_aliases():
config = registry.resolve(alias)
if config and config.is_custom:
# Custom-only models require CUSTOM_API_URL; defer to custom block
continue
if alias not in all_models:
all_models.append(alias)
except Exception as exc: # pragma: no cover - logged for observability
import logging
return sorted(set(all_models))
logging.debug(f"Failed to add OpenRouter models to enum: {exc}")
# Add custom models (and their aliases) when a custom endpoint is available
custom_url = os.getenv("CUSTOM_API_URL")
if custom_url:
try:
registry = self._get_openrouter_registry()
for alias in registry.list_aliases():
config = registry.resolve(alias)
if config and config.is_custom and alias not in all_models:
all_models.append(alias)
except Exception as exc: # pragma: no cover - logged for observability
import logging
logging.debug(f"Failed to add custom models to enum: {exc}")
# Remove duplicates while preserving insertion order
seen: set[str] = set()
unique_models: list[str] = []
for model in all_models:
if model not in seen:
seen.add(model)
unique_models.append(model)
return unique_models
def _resolve_model_context(self, arguments: dict, request) -> tuple[str, Any]:
"""