Improved tracer workflow tool

Updated 2.5 pro model name
Add metadata to results
Fix for https://github.com/BeehiveInnovations/zen-mcp-server/issues/98
This commit is contained in:
Fahad
2025-06-21 09:15:18 +04:00
parent 7d8ab39418
commit 668cb8b052
7 changed files with 856 additions and 20 deletions

View File

@@ -1635,6 +1635,12 @@ When recommending searches, be specific about what information you need and why
model_name = model_info.get("model_name")
if model_name:
metadata["model_used"] = model_name
# FEATURE: Add provider_used metadata (Added for Issue #98)
# This shows which provider (google, openai, openrouter, etc.) handled the request
# TEST COVERAGE: tests/test_provider_routing_bugs.py::TestProviderMetadataBug
provider = model_info.get("provider")
if provider:
metadata["provider_used"] = provider.get_provider_type().value
return ToolOutput(
status=status_key,
@@ -1712,6 +1718,10 @@ When recommending searches, be specific about what information you need and why
model_name = model_info.get("model_name")
if model_name:
metadata["model_used"] = model_name
# FEATURE: Add provider_used metadata (Added for Issue #98)
provider = model_info.get("provider")
if provider:
metadata["provider_used"] = provider.get_provider_type().value
return ToolOutput(
status="success",
@@ -1847,6 +1857,10 @@ When recommending searches, be specific about what information you need and why
model_name = model_info.get("model_name")
if model_name:
metadata["model_used"] = model_name
# FEATURE: Add provider_used metadata (Added for Issue #98)
provider = model_info.get("provider")
if provider:
metadata["provider_used"] = provider.get_provider_type().value
return ToolOutput(
status="continuation_available",
@@ -1866,6 +1880,10 @@ When recommending searches, be specific about what information you need and why
model_name = model_info.get("model_name")
if model_name:
metadata["model_used"] = model_name
# FEATURE: Add provider_used metadata (Added for Issue #98)
provider = model_info.get("provider")
if provider:
metadata["provider_used"] = provider.get_provider_type().value
return ToolOutput(
status="success",
@@ -2059,21 +2077,46 @@ When recommending searches, be specific about what information you need and why
provider = ModelProviderRegistry.get_provider_for_model(model_name)
if not provider:
# Try to determine provider from model name patterns
# =====================================================================================
# CRITICAL FALLBACK LOGIC - HANDLES PROVIDER AUTO-REGISTRATION
# =====================================================================================
#
# This fallback logic auto-registers providers when no provider is found for a model.
#
# CRITICAL BUG PREVENTION (Fixed in Issue #98):
# - Previously, providers were registered without checking API key availability
# - This caused Google provider to be used for "flash" model even when only
# OpenRouter API key was configured
# - The fix below validates API keys BEFORE registering any provider
#
# TEST COVERAGE: tests/test_provider_routing_bugs.py
# - test_fallback_routing_bug_reproduction()
# - test_fallback_should_not_register_without_api_key()
#
# DO NOT REMOVE API KEY VALIDATION - This prevents incorrect provider routing
# =====================================================================================
import os
if "gemini" in model_name.lower() or model_name.lower() in ["flash", "pro"]:
# Register Gemini provider if not already registered
from providers.base import ProviderType
from providers.gemini import GeminiModelProvider
# CRITICAL: Validate API key before registering Google provider
# This prevents auto-registration when user only has OpenRouter configured
gemini_key = os.getenv("GEMINI_API_KEY")
if gemini_key and gemini_key.strip() and gemini_key != "your_gemini_api_key_here":
from providers.base import ProviderType
from providers.gemini import GeminiModelProvider
ModelProviderRegistry.register_provider(ProviderType.GOOGLE, GeminiModelProvider)
provider = ModelProviderRegistry.get_provider(ProviderType.GOOGLE)
ModelProviderRegistry.register_provider(ProviderType.GOOGLE, GeminiModelProvider)
provider = ModelProviderRegistry.get_provider(ProviderType.GOOGLE)
elif "gpt" in model_name.lower() or "o3" in model_name.lower():
# Register OpenAI provider if not already registered
from providers.base import ProviderType
from providers.openai_provider import OpenAIModelProvider
# CRITICAL: Validate API key before registering OpenAI provider
# This prevents auto-registration when user only has OpenRouter configured
openai_key = os.getenv("OPENAI_API_KEY")
if openai_key and openai_key.strip() and openai_key != "your_openai_api_key_here":
from providers.base import ProviderType
from providers.openai_provider import OpenAIModelProvider
ModelProviderRegistry.register_provider(ProviderType.OPENAI, OpenAIModelProvider)
provider = ModelProviderRegistry.get_provider(ProviderType.OPENAI)
ModelProviderRegistry.register_provider(ProviderType.OPENAI, OpenAIModelProvider)
provider = ModelProviderRegistry.get_provider(ProviderType.OPENAI)
if not provider:
raise ValueError(

View File

@@ -657,6 +657,9 @@ class BaseWorkflowMixin(ABC):
# Allow tools to customize the final response
response_data = self.customize_workflow_response(response_data, request)
# Add metadata (provider_used and model_used) to workflow response
self._add_workflow_metadata(response_data, arguments)
# Store in conversation memory
if continuation_id:
self.store_conversation_turn(continuation_id, response_data, request)
@@ -670,6 +673,10 @@ class BaseWorkflowMixin(ABC):
"error": str(e),
"step_number": arguments.get("step_number", 0),
}
# Add metadata to error responses too
self._add_workflow_metadata(error_data, arguments)
return [TextContent(type="text", text=json.dumps(error_data, indent=2))]
# Hook methods for tool customization
@@ -1047,6 +1054,67 @@ class BaseWorkflowMixin(ABC):
images=self.get_request_images(request),
)
def _add_workflow_metadata(self, response_data: dict, arguments: dict[str, Any]) -> None:
"""
Add metadata (provider_used and model_used) to workflow response.
This ensures workflow tools have the same metadata as regular tools,
making it consistent across all tool types for tracking which provider
and model were used for the response.
Args:
response_data: The response data dictionary to modify
arguments: The original arguments containing model context
"""
try:
# Get model information from arguments (set by server.py)
resolved_model_name = arguments.get("_resolved_model_name")
model_context = arguments.get("_model_context")
if resolved_model_name and model_context:
# Extract provider information from model context
provider = model_context.provider
provider_name = provider.get_provider_type().value if provider else "unknown"
# Create metadata dictionary
metadata = {
"tool_name": self.get_name(),
"model_used": resolved_model_name,
"provider_used": provider_name,
}
# Add metadata to response
response_data["metadata"] = metadata
logger.debug(
f"[WORKFLOW_METADATA] {self.get_name()}: Added metadata - "
f"model: {resolved_model_name}, provider: {provider_name}"
)
else:
# Fallback - try to get model info from request
request = self.get_workflow_request_model()(**arguments)
model_name = self.get_request_model_name(request)
# Basic metadata without provider info
metadata = {
"tool_name": self.get_name(),
"model_used": model_name,
"provider_used": "unknown",
}
response_data["metadata"] = metadata
logger.debug(
f"[WORKFLOW_METADATA] {self.get_name()}: Added fallback metadata - "
f"model: {model_name}, provider: unknown"
)
except Exception as e:
# Don't fail the workflow if metadata addition fails
logger.warning(f"[WORKFLOW_METADATA] {self.get_name()}: Failed to add metadata: {e}")
# Still add basic metadata with tool name
response_data["metadata"] = {"tool_name": self.get_name()}
def _extract_clean_workflow_content_for_history(self, response_data: dict) -> str:
"""
Extract clean content from workflow response suitable for conversation history.
@@ -1393,19 +1461,23 @@ class BaseWorkflowMixin(ABC):
try:
# Common validation
if not arguments:
return [
TextContent(type="text", text=json.dumps({"status": "error", "content": "No arguments provided"}))
]
error_data = {"status": "error", "content": "No arguments provided"}
# Add basic metadata even for validation errors
error_data["metadata"] = {"tool_name": self.get_name()}
return [TextContent(type="text", text=json.dumps(error_data))]
# Delegate to execute_workflow
return await self.execute_workflow(arguments)
except Exception as e:
logger.error(f"Error in {self.get_name()} tool execution: {e}", exc_info=True)
error_data = {"status": "error", "content": f"Error in {self.get_name()}: {str(e)}"}
# Add metadata to error responses
self._add_workflow_metadata(error_data, arguments)
return [
TextContent(
type="text",
text=json.dumps({"status": "error", "content": f"Error in {self.get_name()}: {str(e)}"}),
text=json.dumps(error_data),
)
]