Final cleanup
This commit is contained in:
@@ -46,8 +46,14 @@ class O3ModelSelectionTest(BaseSimulatorTest):
|
||||
self.logger.info(" Test: O3 model selection and usage validation")
|
||||
|
||||
# Check which API keys are configured
|
||||
check_cmd = ["docker", "exec", self.container_name, "python", "-c",
|
||||
"import os; print(f'OPENAI_KEY:{bool(os.environ.get(\"OPENAI_API_KEY\"))}|OPENROUTER_KEY:{bool(os.environ.get(\"OPENROUTER_API_KEY\"))}')"]
|
||||
check_cmd = [
|
||||
"docker",
|
||||
"exec",
|
||||
self.container_name,
|
||||
"python",
|
||||
"-c",
|
||||
'import os; print(f\'OPENAI_KEY:{bool(os.environ.get("OPENAI_API_KEY"))}|OPENROUTER_KEY:{bool(os.environ.get("OPENROUTER_API_KEY"))}\')',
|
||||
]
|
||||
result = subprocess.run(check_cmd, capture_output=True, text=True)
|
||||
|
||||
has_openai = False
|
||||
@@ -289,13 +295,21 @@ def multiply(x, y):
|
||||
logs = self.get_recent_server_logs()
|
||||
|
||||
# Check for OpenRouter API calls
|
||||
openrouter_api_logs = [line for line in logs.split("\n") if "openrouter" in line.lower() and ("API" in line or "request" in line)]
|
||||
openrouter_api_logs = [
|
||||
line
|
||||
for line in logs.split("\n")
|
||||
if "openrouter" in line.lower() and ("API" in line or "request" in line)
|
||||
]
|
||||
|
||||
# Check for model resolution through OpenRouter
|
||||
openrouter_model_logs = [line for line in logs.split("\n") if "openrouter" in line.lower() and ("o3" in line or "model" in line)]
|
||||
openrouter_model_logs = [
|
||||
line for line in logs.split("\n") if "openrouter" in line.lower() and ("o3" in line or "model" in line)
|
||||
]
|
||||
|
||||
# Check for successful responses
|
||||
openrouter_response_logs = [line for line in logs.split("\n") if "openrouter" in line.lower() and "response" in line]
|
||||
openrouter_response_logs = [
|
||||
line for line in logs.split("\n") if "openrouter" in line.lower() and "response" in line
|
||||
]
|
||||
|
||||
self.logger.info(f" OpenRouter API logs: {len(openrouter_api_logs)}")
|
||||
self.logger.info(f" OpenRouter model logs: {len(openrouter_model_logs)}")
|
||||
|
||||
@@ -8,7 +8,6 @@ Tests that verify the system correctly falls back to OpenRouter when:
|
||||
- Auto mode correctly selects OpenRouter models
|
||||
"""
|
||||
|
||||
import json
|
||||
import subprocess
|
||||
|
||||
from .base_test import BaseSimulatorTest
|
||||
@@ -45,6 +44,22 @@ class OpenRouterFallbackTest(BaseSimulatorTest):
|
||||
try:
|
||||
self.logger.info("Test: OpenRouter fallback behavior when only provider available")
|
||||
|
||||
# Check if OpenRouter API key is configured
|
||||
check_cmd = [
|
||||
"docker",
|
||||
"exec",
|
||||
self.container_name,
|
||||
"python",
|
||||
"-c",
|
||||
'import os; print("OPENROUTER_KEY:" + str(bool(os.environ.get("OPENROUTER_API_KEY"))))',
|
||||
]
|
||||
result = subprocess.run(check_cmd, capture_output=True, text=True)
|
||||
|
||||
if result.returncode == 0 and "OPENROUTER_KEY:False" in result.stdout:
|
||||
self.logger.info(" ⚠️ OpenRouter API key not configured - skipping test")
|
||||
self.logger.info(" ℹ️ This test requires OPENROUTER_API_KEY to be set in .env")
|
||||
return True # Return True to indicate test is skipped, not failed
|
||||
|
||||
# Setup test files
|
||||
self.setup_test_files()
|
||||
|
||||
@@ -137,28 +152,29 @@ class OpenRouterFallbackTest(BaseSimulatorTest):
|
||||
|
||||
# Check for provider fallback logs
|
||||
fallback_logs = [
|
||||
line for line in logs.split("\n")
|
||||
if "No Gemini API key found" in line or
|
||||
"No OpenAI API key found" in line or
|
||||
"Only OpenRouter available" in line or
|
||||
"Using OpenRouter" in line
|
||||
line
|
||||
for line in logs.split("\n")
|
||||
if "No Gemini API key found" in line
|
||||
or "No OpenAI API key found" in line
|
||||
or "Only OpenRouter available" in line
|
||||
or "Using OpenRouter" in line
|
||||
]
|
||||
|
||||
# Check for OpenRouter provider initialization
|
||||
provider_logs = [
|
||||
line for line in logs.split("\n")
|
||||
if "OpenRouter provider" in line or
|
||||
"OpenRouterProvider" in line or
|
||||
"openrouter.ai/api/v1" in line
|
||||
line
|
||||
for line in logs.split("\n")
|
||||
if "OpenRouter provider" in line or "OpenRouterProvider" in line or "openrouter.ai/api/v1" in line
|
||||
]
|
||||
|
||||
# Check for model resolution through OpenRouter
|
||||
model_resolution_logs = [
|
||||
line for line in logs.split("\n")
|
||||
if ("Resolved model" in line and "via OpenRouter" in line) or
|
||||
("Model alias" in line and "resolved to" in line) or
|
||||
("flash" in line and "gemini-flash" in line) or
|
||||
("pro" in line and "gemini-pro" in line)
|
||||
line
|
||||
for line in logs.split("\n")
|
||||
if ("Resolved model" in line and "via OpenRouter" in line)
|
||||
or ("Model alias" in line and "resolved to" in line)
|
||||
or ("flash" in line and "gemini-flash" in line)
|
||||
or ("pro" in line and "gemini-pro" in line)
|
||||
]
|
||||
|
||||
# Log findings
|
||||
|
||||
@@ -9,7 +9,6 @@ Tests that verify OpenRouter functionality including:
|
||||
- Error handling when models are not available
|
||||
"""
|
||||
|
||||
import json
|
||||
import subprocess
|
||||
|
||||
from .base_test import BaseSimulatorTest
|
||||
@@ -47,6 +46,22 @@ class OpenRouterModelsTest(BaseSimulatorTest):
|
||||
try:
|
||||
self.logger.info("Test: OpenRouter model functionality and alias mapping")
|
||||
|
||||
# Check if OpenRouter API key is configured
|
||||
check_cmd = [
|
||||
"docker",
|
||||
"exec",
|
||||
self.container_name,
|
||||
"python",
|
||||
"-c",
|
||||
'import os; print("OPENROUTER_KEY:" + str(bool(os.environ.get("OPENROUTER_API_KEY"))))',
|
||||
]
|
||||
result = subprocess.run(check_cmd, capture_output=True, text=True)
|
||||
|
||||
if result.returncode == 0 and "OPENROUTER_KEY:False" in result.stdout:
|
||||
self.logger.info(" ⚠️ OpenRouter API key not configured - skipping test")
|
||||
self.logger.info(" ℹ️ This test requires OPENROUTER_API_KEY to be set in .env")
|
||||
return True # Return True to indicate test is skipped, not failed
|
||||
|
||||
# Setup test files for later use
|
||||
self.setup_test_files()
|
||||
|
||||
@@ -189,15 +204,17 @@ class OpenRouterModelsTest(BaseSimulatorTest):
|
||||
|
||||
# Check for specific model mappings
|
||||
flash_mapping_logs = [
|
||||
line for line in logs.split("\n")
|
||||
if ("flash" in line and "google/gemini-flash" in line) or
|
||||
("Resolved model" in line and "google/gemini-flash" in line)
|
||||
line
|
||||
for line in logs.split("\n")
|
||||
if ("flash" in line and "google/gemini-flash" in line)
|
||||
or ("Resolved model" in line and "google/gemini-flash" in line)
|
||||
]
|
||||
|
||||
pro_mapping_logs = [
|
||||
line for line in logs.split("\n")
|
||||
if ("pro" in line and "google/gemini-pro" in line) or
|
||||
("Resolved model" in line and "google/gemini-pro" in line)
|
||||
line
|
||||
for line in logs.split("\n")
|
||||
if ("pro" in line and "google/gemini-pro" in line)
|
||||
or ("Resolved model" in line and "google/gemini-pro" in line)
|
||||
]
|
||||
|
||||
# Log findings
|
||||
|
||||
@@ -4,29 +4,21 @@ Simple test script to demonstrate model mapping through the MCP server.
|
||||
Tests how model aliases (flash, pro, o3) are mapped to OpenRouter models.
|
||||
"""
|
||||
|
||||
import subprocess
|
||||
import json
|
||||
import subprocess
|
||||
import sys
|
||||
from typing import Dict, Any
|
||||
from typing import Any
|
||||
|
||||
def call_mcp_server(model: str, message: str = "Hello, which model are you?") -> Dict[str, Any]:
|
||||
|
||||
def call_mcp_server(model: str, message: str = "Hello, which model are you?") -> dict[str, Any]:
|
||||
"""Call the MCP server with a specific model and return the response."""
|
||||
|
||||
# Prepare the request
|
||||
request = {
|
||||
"jsonrpc": "2.0",
|
||||
"method": "completion",
|
||||
"params": {
|
||||
"model": model,
|
||||
"messages": [
|
||||
{
|
||||
"role": "user",
|
||||
"content": message
|
||||
}
|
||||
],
|
||||
"max_tokens": 100
|
||||
},
|
||||
"id": 1
|
||||
"params": {"model": model, "messages": [{"role": "user", "content": message}], "max_tokens": 100},
|
||||
"id": 1,
|
||||
}
|
||||
|
||||
# Call the server
|
||||
@@ -35,55 +27,36 @@ def call_mcp_server(model: str, message: str = "Hello, which model are you?") ->
|
||||
try:
|
||||
# Send request to stdin and capture output
|
||||
process = subprocess.Popen(
|
||||
cmd,
|
||||
stdin=subprocess.PIPE,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE,
|
||||
text=True
|
||||
cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True
|
||||
)
|
||||
|
||||
stdout, stderr = process.communicate(input=json.dumps(request))
|
||||
|
||||
if process.returncode != 0:
|
||||
return {
|
||||
"error": f"Server returned non-zero exit code: {process.returncode}",
|
||||
"stderr": stderr
|
||||
}
|
||||
return {"error": f"Server returned non-zero exit code: {process.returncode}", "stderr": stderr}
|
||||
|
||||
# Parse the response
|
||||
try:
|
||||
response = json.loads(stdout)
|
||||
return response
|
||||
except json.JSONDecodeError:
|
||||
return {
|
||||
"error": "Failed to parse JSON response",
|
||||
"stdout": stdout,
|
||||
"stderr": stderr
|
||||
}
|
||||
return {"error": "Failed to parse JSON response", "stdout": stdout, "stderr": stderr}
|
||||
|
||||
except Exception as e:
|
||||
return {
|
||||
"error": f"Failed to call server: {str(e)}"
|
||||
}
|
||||
return {"error": f"Failed to call server: {str(e)}"}
|
||||
|
||||
def extract_model_info(response: Dict[str, Any]) -> Dict[str, str]:
|
||||
|
||||
def extract_model_info(response: dict[str, Any]) -> dict[str, str]:
|
||||
"""Extract model information from the response."""
|
||||
|
||||
if "error" in response:
|
||||
return {
|
||||
"status": "error",
|
||||
"message": response.get("error", "Unknown error")
|
||||
}
|
||||
return {"status": "error", "message": response.get("error", "Unknown error")}
|
||||
|
||||
# Look for result in the response
|
||||
result = response.get("result", {})
|
||||
|
||||
# Extract relevant information
|
||||
info = {
|
||||
"status": "success",
|
||||
"provider": "unknown",
|
||||
"model": "unknown"
|
||||
}
|
||||
info = {"status": "success", "provider": "unknown", "model": "unknown"}
|
||||
|
||||
# Try to find provider and model info in the response
|
||||
# This might be in metadata or debug info depending on server implementation
|
||||
@@ -101,6 +74,7 @@ def extract_model_info(response: Dict[str, Any]) -> Dict[str, str]:
|
||||
|
||||
return info
|
||||
|
||||
|
||||
def main():
|
||||
"""Test model mapping for different aliases."""
|
||||
|
||||
@@ -134,5 +108,6 @@ def main():
|
||||
print("\nNote: This test assumes the MCP server is configured with OpenRouter.")
|
||||
print("The actual model mappings depend on the server configuration.")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -193,5 +193,5 @@ class TestOpenRouterFunctionality:
|
||||
provider = OpenRouterProvider(api_key="test-key")
|
||||
|
||||
# Registry should be initialized
|
||||
assert hasattr(provider, '_registry')
|
||||
assert hasattr(provider, "_registry")
|
||||
assert provider._registry is not None
|
||||
|
||||
@@ -195,9 +195,7 @@ class BaseTool(ABC):
|
||||
# Show all aliases so Claude knows every option available
|
||||
all_aliases = sorted(aliases)
|
||||
alias_list = ", ".join(f"'{a}'" for a in all_aliases)
|
||||
model_desc_parts.append(
|
||||
f"\nOpenRouter models available via aliases: {alias_list}"
|
||||
)
|
||||
model_desc_parts.append(f"\nOpenRouter models available via aliases: {alias_list}")
|
||||
else:
|
||||
model_desc_parts.append(
|
||||
"\nOpenRouter models: If configured, you can also use ANY model available on OpenRouter."
|
||||
|
||||
Reference in New Issue
Block a user