feat: Add uvx support
This commit is contained in:
@@ -14,7 +14,7 @@ import os
|
||||
# These values are used in server responses and for tracking releases
|
||||
# IMPORTANT: This is the single source of truth for version and author info
|
||||
# Semantic versioning: MAJOR.MINOR.PATCH
|
||||
__version__ = "5.7.3"
|
||||
__version__ = "5.7.4"
|
||||
# Last update date in ISO format
|
||||
__updated__ = "2025-06-27"
|
||||
# Primary maintainer
|
||||
|
||||
@@ -178,6 +178,11 @@ class OpenRouterProvider(OpenAICompatibleProvider):
|
||||
# Resolve model alias to actual OpenRouter model name
|
||||
resolved_model = self._resolve_model_name(model_name)
|
||||
|
||||
# Always disable streaming for OpenRouter
|
||||
# MCP doesn't use streaming, and this avoids issues with O3 model access
|
||||
if "stream" not in kwargs:
|
||||
kwargs["stream"] = False
|
||||
|
||||
# Call parent method with resolved model name
|
||||
return super().generate_content(
|
||||
prompt=prompt,
|
||||
|
||||
@@ -1,3 +1,31 @@
|
||||
[project]
|
||||
name = "zen-mcp-server"
|
||||
version = "0.1.0"
|
||||
description = "AI-powered MCP server with multiple model providers"
|
||||
requires-python = ">=3.9"
|
||||
dependencies = [
|
||||
"mcp>=1.0.0",
|
||||
"google-genai>=1.19.0",
|
||||
"openai>=1.55.2",
|
||||
"pydantic>=2.0.0",
|
||||
"python-dotenv>=1.0.0",
|
||||
]
|
||||
|
||||
[tool.setuptools.packages.find]
|
||||
include = ["tools*", "providers*", "systemprompts*", "utils*"]
|
||||
|
||||
[tool.setuptools]
|
||||
py-modules = ["server", "config"]
|
||||
|
||||
[tool.setuptools.package-data]
|
||||
"*" = ["conf/*.json"]
|
||||
|
||||
[tool.setuptools.data-files]
|
||||
"conf" = ["conf/custom_models.json"]
|
||||
|
||||
[project.scripts]
|
||||
zen-mcp-server = "server:run"
|
||||
|
||||
[tool.black]
|
||||
line-length = 120
|
||||
target-version = ['py39', 'py310', 'py311', 'py312', 'py313']
|
||||
@@ -57,4 +85,4 @@ ignore = [
|
||||
|
||||
[build-system]
|
||||
requires = ["setuptools>=45", "wheel", "setuptools_scm[toml]>=6.2"]
|
||||
build-backend = "setuptools.build_meta"
|
||||
build-backend = "setuptools.build_meta"
|
||||
|
||||
46
server.py
46
server.py
@@ -28,13 +28,20 @@ from logging.handlers import RotatingFileHandler
|
||||
from pathlib import Path
|
||||
from typing import Any, Optional
|
||||
|
||||
from dotenv import load_dotenv
|
||||
|
||||
# Load environment variables from .env file in the script's directory
|
||||
# This ensures .env is loaded regardless of the current working directory
|
||||
script_dir = Path(__file__).parent
|
||||
env_file = script_dir / ".env"
|
||||
load_dotenv(dotenv_path=env_file)
|
||||
# Try to load environment variables from .env file if dotenv is available
|
||||
# This is optional - environment variables can still be passed directly
|
||||
try:
|
||||
from dotenv import load_dotenv
|
||||
|
||||
# Load environment variables from .env file in the script's directory
|
||||
# This ensures .env is loaded regardless of the current working directory
|
||||
script_dir = Path(__file__).parent
|
||||
env_file = script_dir / ".env"
|
||||
load_dotenv(dotenv_path=env_file)
|
||||
except ImportError:
|
||||
# dotenv not available - this is fine, environment variables can still be passed directly
|
||||
# This commonly happens when running via uvx or in minimal environments
|
||||
pass
|
||||
|
||||
from mcp.server import Server # noqa: E402
|
||||
from mcp.server.models import InitializationOptions # noqa: E402
|
||||
@@ -362,6 +369,12 @@ def configure_providers():
|
||||
Raises:
|
||||
ValueError: If no valid API keys are found or conflicting configurations detected
|
||||
"""
|
||||
# Log environment variable status for debugging
|
||||
logger.debug("Checking environment variables for API keys...")
|
||||
api_keys_to_check = ["OPENAI_API_KEY", "OPENROUTER_API_KEY", "GEMINI_API_KEY", "XAI_API_KEY", "CUSTOM_API_URL"]
|
||||
for key in api_keys_to_check:
|
||||
value = os.getenv(key)
|
||||
logger.debug(f" {key}: {'[PRESENT]' if value else '[MISSING]'}")
|
||||
from providers import ModelProviderRegistry
|
||||
from providers.base import ProviderType
|
||||
from providers.custom import CustomProvider
|
||||
@@ -386,10 +399,16 @@ def configure_providers():
|
||||
|
||||
# Check for OpenAI API key
|
||||
openai_key = os.getenv("OPENAI_API_KEY")
|
||||
logger.debug(f"OpenAI key check: key={'[PRESENT]' if openai_key else '[MISSING]'}")
|
||||
if openai_key and openai_key != "your_openai_api_key_here":
|
||||
valid_providers.append("OpenAI (o3)")
|
||||
has_native_apis = True
|
||||
logger.info("OpenAI API key found - o3 model available")
|
||||
else:
|
||||
if not openai_key:
|
||||
logger.debug("OpenAI API key not found in environment")
|
||||
else:
|
||||
logger.debug("OpenAI API key is placeholder value")
|
||||
|
||||
# Check for X.AI API key
|
||||
xai_key = os.getenv("XAI_API_KEY")
|
||||
@@ -407,10 +426,16 @@ def configure_providers():
|
||||
|
||||
# Check for OpenRouter API key
|
||||
openrouter_key = os.getenv("OPENROUTER_API_KEY")
|
||||
logger.debug(f"OpenRouter key check: key={'[PRESENT]' if openrouter_key else '[MISSING]'}")
|
||||
if openrouter_key and openrouter_key != "your_openrouter_api_key_here":
|
||||
valid_providers.append("OpenRouter")
|
||||
has_openrouter = True
|
||||
logger.info("OpenRouter API key found - Multiple models available via OpenRouter")
|
||||
else:
|
||||
if not openrouter_key:
|
||||
logger.debug("OpenRouter API key not found in environment")
|
||||
else:
|
||||
logger.debug("OpenRouter API key is placeholder value")
|
||||
|
||||
# Check for custom API endpoint (Ollama, vLLM, etc.)
|
||||
custom_url = os.getenv("CUSTOM_API_URL")
|
||||
@@ -1285,9 +1310,14 @@ async def main():
|
||||
)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
def run():
|
||||
"""Console script entry point for zen-mcp-server."""
|
||||
try:
|
||||
asyncio.run(main())
|
||||
except KeyboardInterrupt:
|
||||
# Handle graceful shutdown
|
||||
pass
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
run()
|
||||
|
||||
Reference in New Issue
Block a user