Fix duplicate version tool in MCP tool list (#121)

* fix: remove duplicate version tool registration

The version tool was appearing twice in the MCP tool list due to:
- VersionTool class properly registered in TOOLS dictionary (line 181)
- Hardcoded Tool() registration in handle_list_tools() (lines 451-462)

This duplicate was leftover from the architectural migration:
- June 8, 2025: Original hardcoded "get_version" tool added
- June 14, 2025: Renamed from "get_version" to "version"
- June 21, 2025: VersionTool class added during workflow architecture migration
- The old hardcoded registration was never removed

The hardcoded registration has been removed since VersionTool provides
identical functionality through the proper architecture.

Fixes: BeehiveInnovations/zen-mcp-server#120

* fix: complete removal of legacy version tool code

Following up on the duplicate version tool fix, this commit removes
all remaining dead code identified by Gemini Code Assist:

- Removed dead elif block for version tool (lines 639-643)
  This block was unreachable since version is handled by TOOLS registry

- Removed orphaned handle_version() function (lines 942-1030)
  No longer called after elif block removal

- Fixed imports: removed unused __author__ and __updated__ imports

These were remnants from the June 2025 migration from function-based
to class-based tools. The VersionTool class now handles all version
functionality through the standard tool architecture.

All 546 tests pass - no functional changes.

Related to: BeehiveInnovations/zen-mcp-server#120
This commit is contained in:
Brad Fair
2025-06-22 12:09:58 -05:00
committed by GitHub
parent 132c6ca025
commit 3960835793

114
server.py
View File

@@ -23,7 +23,6 @@ import logging
import os import os
import sys import sys
import time import time
from datetime import datetime
from logging.handlers import RotatingFileHandler from logging.handlers import RotatingFileHandler
from pathlib import Path from pathlib import Path
from typing import Any, Optional from typing import Any, Optional
@@ -52,8 +51,6 @@ from mcp.types import ( # noqa: E402
from config import ( # noqa: E402 from config import ( # noqa: E402
DEFAULT_MODEL, DEFAULT_MODEL,
__author__,
__updated__,
__version__, __version__,
) )
from tools import ( # noqa: E402 from tools import ( # noqa: E402
@@ -446,21 +443,6 @@ async def handle_list_tools() -> list[Tool]:
) )
) )
# Add utility tools that provide server metadata and configuration info
# These tools don't require AI processing but are useful for clients
tools.extend(
[
Tool(
name="version",
description=(
"VERSION & CONFIGURATION - Get server version, configuration details, "
"and list of available tools. Useful for debugging and understanding capabilities."
),
inputSchema={"type": "object", "properties": {}},
),
]
)
# Log cache efficiency info # Log cache efficiency info
if os.getenv("OPENROUTER_API_KEY") and os.getenv("OPENROUTER_API_KEY") != "your_openrouter_api_key_here": if os.getenv("OPENROUTER_API_KEY") and os.getenv("OPENROUTER_API_KEY") != "your_openrouter_api_key_here":
logger.debug("OpenRouter registry cache used efficiently across all tool schemas") logger.debug("OpenRouter registry cache used efficiently across all tool schemas")
@@ -650,13 +632,6 @@ async def handle_call_tool(name: str, arguments: dict[str, Any]) -> list[TextCon
pass pass
return result return result
# Route to utility tools that provide server information
elif name == "version":
logger.info(f"Executing utility tool '{name}'")
result = await handle_version()
logger.info(f"Utility tool '{name}' execution completed")
return result
# Handle unknown tool requests gracefully # Handle unknown tool requests gracefully
else: else:
return [TextContent(type="text", text=f"Unknown tool: {name}")] return [TextContent(type="text", text=f"Unknown tool: {name}")]
@@ -960,95 +935,6 @@ async def reconstruct_thread_context(arguments: dict[str, Any]) -> dict[str, Any
return enhanced_arguments return enhanced_arguments
async def handle_version() -> list[TextContent]:
"""
Get comprehensive version and configuration information about the server.
Provides details about the server version, configuration settings,
available tools, and runtime environment. Useful for debugging and
understanding the server's capabilities.
Returns:
Formatted text with version and configuration details
"""
# Import thinking mode here to avoid circular imports
from config import DEFAULT_THINKING_MODE_THINKDEEP
# Gather comprehensive server information
version_info = {
"version": __version__,
"updated": __updated__,
"author": __author__,
"default_model": DEFAULT_MODEL,
"default_thinking_mode_thinkdeep": DEFAULT_THINKING_MODE_THINKDEEP,
"max_context_tokens": "Dynamic (model-specific)",
"python_version": f"{sys.version_info.major}.{sys.version_info.minor}.{sys.version_info.micro}",
"server_started": datetime.now().isoformat(),
"available_tools": list(TOOLS.keys()) + ["version"],
}
# Check configured providers and available models
from providers import ModelProviderRegistry
from providers.base import ProviderType
configured_providers = []
available_models = ModelProviderRegistry.get_available_models(respect_restrictions=True)
# Group models by provider
models_by_provider = {}
for model_name, provider_type in available_models.items():
if provider_type not in models_by_provider:
models_by_provider[provider_type] = []
models_by_provider[provider_type].append(model_name)
# Format provider information with actual available models
if ProviderType.GOOGLE in models_by_provider:
gemini_models = ", ".join(sorted(models_by_provider[ProviderType.GOOGLE]))
configured_providers.append(f"Gemini ({gemini_models})")
if ProviderType.OPENAI in models_by_provider:
openai_models = ", ".join(sorted(models_by_provider[ProviderType.OPENAI]))
configured_providers.append(f"OpenAI ({openai_models})")
if ProviderType.XAI in models_by_provider:
xai_models = ", ".join(sorted(models_by_provider[ProviderType.XAI]))
configured_providers.append(f"X.AI ({xai_models})")
if ProviderType.CUSTOM in models_by_provider:
custom_models = ", ".join(sorted(models_by_provider[ProviderType.CUSTOM]))
custom_url = os.getenv("CUSTOM_API_URL", "")
configured_providers.append(f"Custom API ({custom_url}) - Models: {custom_models}")
if ProviderType.OPENROUTER in models_by_provider:
# For OpenRouter, show a summary since there could be many models
openrouter_count = len(models_by_provider[ProviderType.OPENROUTER])
configured_providers.append(f"OpenRouter ({openrouter_count} models via conf/custom_models.json)")
# Format the information in a human-readable way
text = f"""Zen MCP Server v{__version__}
Updated: {__updated__}
Author: {__author__}
Configuration:
- Default Model: {DEFAULT_MODEL}
- Default Thinking Mode (ThinkDeep): {DEFAULT_THINKING_MODE_THINKDEEP}
- Max Context: Dynamic (model-specific)
- Python: {version_info["python_version"]}
- Started: {version_info["server_started"]}
Configured Providers:
{chr(10).join(f" - {provider}" for provider in configured_providers)}
Available Tools:
{chr(10).join(f" - {tool}" for tool in version_info["available_tools"])}
All Available Models:
{chr(10).join(f" - {model}" for model in sorted(available_models.keys()))}
For updates, visit: https://github.com/BeehiveInnovations/zen-mcp-server"""
# Create standardized tool output
tool_output = ToolOutput(status="success", content=text, content_type="text", metadata={"tool_name": "version"})
return [TextContent(type="text", text=tool_output.model_dump_json())]
@server.list_prompts() @server.list_prompts()
async def handle_list_prompts() -> list[Prompt]: async def handle_list_prompts() -> list[Prompt]:
""" """