Fixed restriction checks for OpenRouter
This commit is contained in:
@@ -14,7 +14,7 @@ import os
|
||||
# These values are used in server responses and for tracking releases
|
||||
# IMPORTANT: This is the single source of truth for version and author info
|
||||
# Semantic versioning: MAJOR.MINOR.PATCH
|
||||
__version__ = "5.6.1"
|
||||
__version__ = "5.6.2"
|
||||
# Last update date in ISO format
|
||||
__updated__ = "2025-06-23"
|
||||
# Primary maintainer
|
||||
|
||||
@@ -50,14 +50,6 @@ class OpenRouterProvider(OpenAICompatibleProvider):
|
||||
aliases = self._registry.list_aliases()
|
||||
logging.info(f"OpenRouter loaded {len(models)} models with {len(aliases)} aliases")
|
||||
|
||||
def _parse_allowed_models(self) -> None:
|
||||
"""Override to disable environment-based allow-list.
|
||||
|
||||
OpenRouter model access is controlled via the OpenRouter dashboard,
|
||||
not through environment variables.
|
||||
"""
|
||||
return None
|
||||
|
||||
def _resolve_model_name(self, model_name: str) -> str:
|
||||
"""Resolve model aliases to OpenRouter model names.
|
||||
|
||||
@@ -130,16 +122,34 @@ class OpenRouterProvider(OpenAICompatibleProvider):
|
||||
|
||||
As the catch-all provider, OpenRouter accepts any model name that wasn't
|
||||
handled by higher-priority providers. OpenRouter will validate based on
|
||||
the API key's permissions.
|
||||
the API key's permissions and local restrictions.
|
||||
|
||||
Args:
|
||||
model_name: Model name to validate
|
||||
|
||||
Returns:
|
||||
Always True - OpenRouter is the catch-all provider
|
||||
True if model is allowed, False if restricted
|
||||
"""
|
||||
# Accept any model name - OpenRouter is the fallback provider
|
||||
# Higher priority providers (native APIs, custom endpoints) get first chance
|
||||
# Check model restrictions if configured
|
||||
from utils.model_restrictions import get_restriction_service
|
||||
|
||||
restriction_service = get_restriction_service()
|
||||
if restriction_service:
|
||||
# Check if model name itself is allowed
|
||||
if restriction_service.is_allowed(self.get_provider_type(), model_name):
|
||||
return True
|
||||
|
||||
# Also check aliases - model_name might be an alias
|
||||
model_config = self._registry.resolve(model_name)
|
||||
if model_config and model_config.aliases:
|
||||
for alias in model_config.aliases:
|
||||
if restriction_service.is_allowed(self.get_provider_type(), alias):
|
||||
return True
|
||||
|
||||
# If restrictions are configured and model/alias not in allowed list, reject
|
||||
return False
|
||||
|
||||
# No restrictions configured - accept any model name as the fallback provider
|
||||
return True
|
||||
|
||||
def generate_content(
|
||||
|
||||
@@ -129,7 +129,6 @@ class ModelProviderRegistry:
|
||||
logging.debug(f"Available providers in registry: {list(instance._providers.keys())}")
|
||||
|
||||
for provider_type in PROVIDER_PRIORITY_ORDER:
|
||||
logging.debug(f"Checking provider_type: {provider_type}")
|
||||
if provider_type in instance._providers:
|
||||
logging.debug(f"Found {provider_type} in registry")
|
||||
# Get or create provider instance
|
||||
|
||||
16
server.py
16
server.py
@@ -673,6 +673,11 @@ def parse_model_option(model_string: str) -> tuple[str, Optional[str]]:
|
||||
"""
|
||||
Parse model:option format into model name and option.
|
||||
|
||||
Handles different formats:
|
||||
- OpenRouter models: preserve :free, :beta, :preview suffixes as part of model name
|
||||
- Ollama/Custom models: split on : to extract tags like :latest
|
||||
- Consensus stance: extract options like :for, :against
|
||||
|
||||
Args:
|
||||
model_string: String that may contain "model:option" format
|
||||
|
||||
@@ -680,6 +685,17 @@ def parse_model_option(model_string: str) -> tuple[str, Optional[str]]:
|
||||
tuple: (model_name, option) where option may be None
|
||||
"""
|
||||
if ":" in model_string and not model_string.startswith("http"): # Avoid parsing URLs
|
||||
# Check if this looks like an OpenRouter model (contains /)
|
||||
if "/" in model_string and model_string.count(":") == 1:
|
||||
# Could be openai/gpt-4:something - check what comes after colon
|
||||
parts = model_string.split(":", 1)
|
||||
suffix = parts[1].strip().lower()
|
||||
|
||||
# Known OpenRouter suffixes to preserve
|
||||
if suffix in ["free", "beta", "preview"]:
|
||||
return model_string.strip(), None
|
||||
|
||||
# For other patterns (Ollama tags, consensus stances), split normally
|
||||
parts = model_string.split(":", 1)
|
||||
model_name = parts[0].strip()
|
||||
model_option = parts[1].strip() if len(parts) > 1 else None
|
||||
|
||||
@@ -45,18 +45,32 @@ class TestCustomProvider:
|
||||
|
||||
def test_get_capabilities_from_registry(self):
|
||||
"""Test get_capabilities returns registry capabilities when available."""
|
||||
provider = CustomProvider(api_key="test-key", base_url="http://localhost:11434/v1")
|
||||
# Save original environment
|
||||
original_env = os.environ.get("OPENROUTER_ALLOWED_MODELS")
|
||||
|
||||
# Test with a model that should be in the registry (OpenRouter model) and is allowed by restrictions
|
||||
capabilities = provider.get_capabilities("o3") # o3 is in OPENROUTER_ALLOWED_MODELS
|
||||
try:
|
||||
# Clear any restrictions
|
||||
os.environ.pop("OPENROUTER_ALLOWED_MODELS", None)
|
||||
|
||||
assert capabilities.provider == ProviderType.OPENROUTER # o3 is an OpenRouter model (is_custom=false)
|
||||
assert capabilities.context_window > 0
|
||||
provider = CustomProvider(api_key="test-key", base_url="http://localhost:11434/v1")
|
||||
|
||||
# Test with a custom model (is_custom=true)
|
||||
capabilities = provider.get_capabilities("local-llama")
|
||||
assert capabilities.provider == ProviderType.CUSTOM # local-llama has is_custom=true
|
||||
assert capabilities.context_window > 0
|
||||
# Test with a model that should be in the registry (OpenRouter model)
|
||||
capabilities = provider.get_capabilities("o3") # o3 is an OpenRouter model
|
||||
|
||||
assert capabilities.provider == ProviderType.OPENROUTER # o3 is an OpenRouter model (is_custom=false)
|
||||
assert capabilities.context_window > 0
|
||||
|
||||
# Test with a custom model (is_custom=true)
|
||||
capabilities = provider.get_capabilities("local-llama")
|
||||
assert capabilities.provider == ProviderType.CUSTOM # local-llama has is_custom=true
|
||||
assert capabilities.context_window > 0
|
||||
|
||||
finally:
|
||||
# Restore original environment
|
||||
if original_env is None:
|
||||
os.environ.pop("OPENROUTER_ALLOWED_MODELS", None)
|
||||
else:
|
||||
os.environ["OPENROUTER_ALLOWED_MODELS"] = original_env
|
||||
|
||||
def test_get_capabilities_generic_fallback(self):
|
||||
"""Test get_capabilities returns generic capabilities for unknown models."""
|
||||
|
||||
79
tests/test_parse_model_option.py
Normal file
79
tests/test_parse_model_option.py
Normal file
@@ -0,0 +1,79 @@
|
||||
"""Tests for parse_model_option function."""
|
||||
|
||||
from server import parse_model_option
|
||||
|
||||
|
||||
class TestParseModelOption:
|
||||
"""Test cases for model option parsing."""
|
||||
|
||||
def test_openrouter_free_suffix_preserved(self):
|
||||
"""Test that OpenRouter :free suffix is preserved as part of model name."""
|
||||
model, option = parse_model_option("openai/gpt-3.5-turbo:free")
|
||||
assert model == "openai/gpt-3.5-turbo:free"
|
||||
assert option is None
|
||||
|
||||
def test_openrouter_beta_suffix_preserved(self):
|
||||
"""Test that OpenRouter :beta suffix is preserved as part of model name."""
|
||||
model, option = parse_model_option("anthropic/claude-3-opus:beta")
|
||||
assert model == "anthropic/claude-3-opus:beta"
|
||||
assert option is None
|
||||
|
||||
def test_openrouter_preview_suffix_preserved(self):
|
||||
"""Test that OpenRouter :preview suffix is preserved as part of model name."""
|
||||
model, option = parse_model_option("google/gemini-pro:preview")
|
||||
assert model == "google/gemini-pro:preview"
|
||||
assert option is None
|
||||
|
||||
def test_ollama_tag_parsed_as_option(self):
|
||||
"""Test that Ollama tags are parsed as options."""
|
||||
model, option = parse_model_option("llama3.2:latest")
|
||||
assert model == "llama3.2"
|
||||
assert option == "latest"
|
||||
|
||||
def test_consensus_stance_parsed_as_option(self):
|
||||
"""Test that consensus stances are parsed as options."""
|
||||
model, option = parse_model_option("o3:for")
|
||||
assert model == "o3"
|
||||
assert option == "for"
|
||||
|
||||
model, option = parse_model_option("gemini-2.5-pro:against")
|
||||
assert model == "gemini-2.5-pro"
|
||||
assert option == "against"
|
||||
|
||||
def test_openrouter_unknown_suffix_parsed_as_option(self):
|
||||
"""Test that unknown suffixes on OpenRouter models are parsed as options."""
|
||||
model, option = parse_model_option("openai/gpt-4:custom-tag")
|
||||
assert model == "openai/gpt-4"
|
||||
assert option == "custom-tag"
|
||||
|
||||
def test_plain_model_name(self):
|
||||
"""Test plain model names without colons."""
|
||||
model, option = parse_model_option("gpt-4")
|
||||
assert model == "gpt-4"
|
||||
assert option is None
|
||||
|
||||
def test_url_not_parsed(self):
|
||||
"""Test that URLs are not parsed for options."""
|
||||
model, option = parse_model_option("http://localhost:8080")
|
||||
assert model == "http://localhost:8080"
|
||||
assert option is None
|
||||
|
||||
def test_whitespace_handling(self):
|
||||
"""Test that whitespace is properly stripped."""
|
||||
model, option = parse_model_option(" openai/gpt-3.5-turbo:free ")
|
||||
assert model == "openai/gpt-3.5-turbo:free"
|
||||
assert option is None
|
||||
|
||||
model, option = parse_model_option(" llama3.2 : latest ")
|
||||
assert model == "llama3.2"
|
||||
assert option == "latest"
|
||||
|
||||
def test_case_insensitive_suffix_matching(self):
|
||||
"""Test that OpenRouter suffix matching is case-insensitive."""
|
||||
model, option = parse_model_option("openai/gpt-3.5-turbo:FREE")
|
||||
assert model == "openai/gpt-3.5-turbo:FREE" # Original case preserved
|
||||
assert option is None
|
||||
|
||||
model, option = parse_model_option("openai/gpt-3.5-turbo:Free")
|
||||
assert model == "openai/gpt-3.5-turbo:Free" # Original case preserved
|
||||
assert option is None
|
||||
@@ -58,7 +58,13 @@ class TestProviderRoutingBugs:
|
||||
"""
|
||||
# Save original environment
|
||||
original_env = {}
|
||||
for key in ["GEMINI_API_KEY", "OPENAI_API_KEY", "XAI_API_KEY", "OPENROUTER_API_KEY"]:
|
||||
for key in [
|
||||
"GEMINI_API_KEY",
|
||||
"OPENAI_API_KEY",
|
||||
"XAI_API_KEY",
|
||||
"OPENROUTER_API_KEY",
|
||||
"OPENROUTER_ALLOWED_MODELS",
|
||||
]:
|
||||
original_env[key] = os.environ.get(key)
|
||||
|
||||
try:
|
||||
@@ -66,6 +72,7 @@ class TestProviderRoutingBugs:
|
||||
os.environ.pop("GEMINI_API_KEY", None) # No Google API key
|
||||
os.environ.pop("OPENAI_API_KEY", None)
|
||||
os.environ.pop("XAI_API_KEY", None)
|
||||
os.environ.pop("OPENROUTER_ALLOWED_MODELS", None) # Clear any restrictions
|
||||
os.environ["OPENROUTER_API_KEY"] = "test-openrouter-key"
|
||||
|
||||
# Register only OpenRouter provider (like in server.py:configure_providers)
|
||||
@@ -113,12 +120,24 @@ class TestProviderRoutingBugs:
|
||||
"""
|
||||
# Save original environment
|
||||
original_env = {}
|
||||
for key in ["GEMINI_API_KEY", "OPENAI_API_KEY", "XAI_API_KEY", "OPENROUTER_API_KEY"]:
|
||||
for key in [
|
||||
"GEMINI_API_KEY",
|
||||
"OPENAI_API_KEY",
|
||||
"XAI_API_KEY",
|
||||
"OPENROUTER_API_KEY",
|
||||
"OPENROUTER_ALLOWED_MODELS",
|
||||
]:
|
||||
original_env[key] = os.environ.get(key)
|
||||
|
||||
try:
|
||||
# Set up scenario: NO API keys at all
|
||||
for key in ["GEMINI_API_KEY", "OPENAI_API_KEY", "XAI_API_KEY", "OPENROUTER_API_KEY"]:
|
||||
for key in [
|
||||
"GEMINI_API_KEY",
|
||||
"OPENAI_API_KEY",
|
||||
"XAI_API_KEY",
|
||||
"OPENROUTER_API_KEY",
|
||||
"OPENROUTER_ALLOWED_MODELS",
|
||||
]:
|
||||
os.environ.pop(key, None)
|
||||
|
||||
# Create tool to test fallback logic
|
||||
@@ -151,7 +170,13 @@ class TestProviderRoutingBugs:
|
||||
"""
|
||||
# Save original environment
|
||||
original_env = {}
|
||||
for key in ["GEMINI_API_KEY", "OPENAI_API_KEY", "XAI_API_KEY", "OPENROUTER_API_KEY"]:
|
||||
for key in [
|
||||
"GEMINI_API_KEY",
|
||||
"OPENAI_API_KEY",
|
||||
"XAI_API_KEY",
|
||||
"OPENROUTER_API_KEY",
|
||||
"OPENROUTER_ALLOWED_MODELS",
|
||||
]:
|
||||
original_env[key] = os.environ.get(key)
|
||||
|
||||
try:
|
||||
@@ -160,6 +185,7 @@ class TestProviderRoutingBugs:
|
||||
os.environ["OPENAI_API_KEY"] = "test-openai-key"
|
||||
os.environ["OPENROUTER_API_KEY"] = "test-openrouter-key"
|
||||
os.environ.pop("XAI_API_KEY", None)
|
||||
os.environ.pop("OPENROUTER_ALLOWED_MODELS", None) # Clear any restrictions
|
||||
|
||||
# Register providers in priority order (like server.py)
|
||||
from providers.gemini import GeminiModelProvider
|
||||
|
||||
@@ -48,7 +48,13 @@ class TestWorkflowMetadata:
|
||||
"""
|
||||
# Save original environment
|
||||
original_env = {}
|
||||
for key in ["GEMINI_API_KEY", "OPENAI_API_KEY", "XAI_API_KEY", "OPENROUTER_API_KEY"]:
|
||||
for key in [
|
||||
"GEMINI_API_KEY",
|
||||
"OPENAI_API_KEY",
|
||||
"XAI_API_KEY",
|
||||
"OPENROUTER_API_KEY",
|
||||
"OPENROUTER_ALLOWED_MODELS",
|
||||
]:
|
||||
original_env[key] = os.environ.get(key)
|
||||
|
||||
try:
|
||||
@@ -56,6 +62,7 @@ class TestWorkflowMetadata:
|
||||
os.environ.pop("GEMINI_API_KEY", None)
|
||||
os.environ.pop("OPENAI_API_KEY", None)
|
||||
os.environ.pop("XAI_API_KEY", None)
|
||||
os.environ.pop("OPENROUTER_ALLOWED_MODELS", None) # Clear any restrictions
|
||||
os.environ["OPENROUTER_API_KEY"] = "test-openrouter-key"
|
||||
|
||||
# Register OpenRouter provider
|
||||
@@ -124,7 +131,13 @@ class TestWorkflowMetadata:
|
||||
"""
|
||||
# Save original environment
|
||||
original_env = {}
|
||||
for key in ["GEMINI_API_KEY", "OPENAI_API_KEY", "XAI_API_KEY", "OPENROUTER_API_KEY"]:
|
||||
for key in [
|
||||
"GEMINI_API_KEY",
|
||||
"OPENAI_API_KEY",
|
||||
"XAI_API_KEY",
|
||||
"OPENROUTER_API_KEY",
|
||||
"OPENROUTER_ALLOWED_MODELS",
|
||||
]:
|
||||
original_env[key] = os.environ.get(key)
|
||||
|
||||
try:
|
||||
@@ -132,6 +145,7 @@ class TestWorkflowMetadata:
|
||||
os.environ.pop("GEMINI_API_KEY", None)
|
||||
os.environ.pop("OPENAI_API_KEY", None)
|
||||
os.environ.pop("XAI_API_KEY", None)
|
||||
os.environ.pop("OPENROUTER_ALLOWED_MODELS", None) # Clear any restrictions
|
||||
os.environ["OPENROUTER_API_KEY"] = "test-openrouter-key"
|
||||
|
||||
# Register OpenRouter provider
|
||||
@@ -182,43 +196,60 @@ class TestWorkflowMetadata:
|
||||
"""
|
||||
Test that workflow tools handle metadata gracefully when model context is missing.
|
||||
"""
|
||||
# Create debug tool
|
||||
debug_tool = DebugIssueTool()
|
||||
# Save original environment
|
||||
original_env = {}
|
||||
for key in ["OPENROUTER_ALLOWED_MODELS"]:
|
||||
original_env[key] = os.environ.get(key)
|
||||
|
||||
# Create arguments without model context (fallback scenario)
|
||||
arguments = {
|
||||
"step": "Test step without model context",
|
||||
"step_number": 1,
|
||||
"total_steps": 1,
|
||||
"next_step_required": False,
|
||||
"findings": "Test findings",
|
||||
"model": "flash",
|
||||
"confidence": "low",
|
||||
# No _model_context or _resolved_model_name
|
||||
}
|
||||
try:
|
||||
# Clear any restrictions
|
||||
os.environ.pop("OPENROUTER_ALLOWED_MODELS", None)
|
||||
|
||||
# Execute the workflow tool
|
||||
import asyncio
|
||||
# Create debug tool
|
||||
debug_tool = DebugIssueTool()
|
||||
|
||||
result = asyncio.run(debug_tool.execute_workflow(arguments))
|
||||
# Create arguments without model context (fallback scenario)
|
||||
arguments = {
|
||||
"step": "Test step without model context",
|
||||
"step_number": 1,
|
||||
"total_steps": 1,
|
||||
"next_step_required": False,
|
||||
"findings": "Test findings",
|
||||
"model": "flash",
|
||||
"confidence": "low",
|
||||
# No _model_context or _resolved_model_name
|
||||
}
|
||||
|
||||
# Parse the JSON response
|
||||
assert len(result) == 1
|
||||
response_text = result[0].text
|
||||
response_data = json.loads(response_text)
|
||||
# Execute the workflow tool
|
||||
import asyncio
|
||||
|
||||
# Verify metadata is still present with fallback values
|
||||
assert "metadata" in response_data, "Workflow response should include metadata even in fallback"
|
||||
metadata = response_data["metadata"]
|
||||
result = asyncio.run(debug_tool.execute_workflow(arguments))
|
||||
|
||||
# Verify fallback metadata
|
||||
assert "tool_name" in metadata, "Fallback metadata should include tool_name"
|
||||
assert "model_used" in metadata, "Fallback metadata should include model_used"
|
||||
assert "provider_used" in metadata, "Fallback metadata should include provider_used"
|
||||
# Parse the JSON response
|
||||
assert len(result) == 1
|
||||
response_text = result[0].text
|
||||
response_data = json.loads(response_text)
|
||||
|
||||
assert metadata["tool_name"] == "debug", "tool_name should be 'debug'"
|
||||
assert metadata["model_used"] == "flash", "model_used should be from request"
|
||||
assert metadata["provider_used"] == "unknown", "provider_used should be 'unknown' in fallback"
|
||||
# Verify metadata is still present with fallback values
|
||||
assert "metadata" in response_data, "Workflow response should include metadata even in fallback"
|
||||
metadata = response_data["metadata"]
|
||||
|
||||
# Verify fallback metadata
|
||||
assert "tool_name" in metadata, "Fallback metadata should include tool_name"
|
||||
assert "model_used" in metadata, "Fallback metadata should include model_used"
|
||||
assert "provider_used" in metadata, "Fallback metadata should include provider_used"
|
||||
|
||||
assert metadata["tool_name"] == "debug", "tool_name should be 'debug'"
|
||||
assert metadata["model_used"] == "flash", "model_used should be from request"
|
||||
assert metadata["provider_used"] == "unknown", "provider_used should be 'unknown' in fallback"
|
||||
|
||||
finally:
|
||||
# Restore original environment
|
||||
for key, value in original_env.items():
|
||||
if value is None:
|
||||
os.environ.pop(key, None)
|
||||
else:
|
||||
os.environ[key] = value
|
||||
|
||||
@pytest.mark.no_mock_provider
|
||||
def test_workflow_metadata_preserves_existing_response_fields(self):
|
||||
@@ -227,7 +258,13 @@ class TestWorkflowMetadata:
|
||||
"""
|
||||
# Save original environment
|
||||
original_env = {}
|
||||
for key in ["GEMINI_API_KEY", "OPENAI_API_KEY", "XAI_API_KEY", "OPENROUTER_API_KEY"]:
|
||||
for key in [
|
||||
"GEMINI_API_KEY",
|
||||
"OPENAI_API_KEY",
|
||||
"XAI_API_KEY",
|
||||
"OPENROUTER_API_KEY",
|
||||
"OPENROUTER_ALLOWED_MODELS",
|
||||
]:
|
||||
original_env[key] = os.environ.get(key)
|
||||
|
||||
try:
|
||||
@@ -235,6 +272,7 @@ class TestWorkflowMetadata:
|
||||
os.environ.pop("GEMINI_API_KEY", None)
|
||||
os.environ.pop("OPENAI_API_KEY", None)
|
||||
os.environ.pop("XAI_API_KEY", None)
|
||||
os.environ.pop("OPENROUTER_ALLOWED_MODELS", None) # Clear any restrictions
|
||||
os.environ["OPENROUTER_API_KEY"] = "test-openrouter-key"
|
||||
|
||||
# Register OpenRouter provider
|
||||
|
||||
@@ -128,6 +128,10 @@ class ModelRestrictionService:
|
||||
|
||||
allowed_set = self.restrictions[provider_type]
|
||||
|
||||
if len(allowed_set) == 0:
|
||||
# Empty set - allowed
|
||||
return True
|
||||
|
||||
# Check both the resolved name and original name (if different)
|
||||
names_to_check = {model_name.lower()}
|
||||
if original_name and original_name.lower() != model_name.lower():
|
||||
|
||||
Reference in New Issue
Block a user