feat(openrouter): add live model sync
This commit is contained in:
@@ -3,6 +3,7 @@
|
||||
import json
|
||||
import os
|
||||
import tempfile
|
||||
from pathlib import Path
|
||||
from unittest.mock import patch
|
||||
|
||||
import pytest
|
||||
@@ -22,6 +23,11 @@ class TestOpenRouterModelRegistry:
|
||||
assert len(registry.list_models()) > 0
|
||||
assert len(registry.list_aliases()) > 0
|
||||
|
||||
def test_registry_initialization_with_live_catalogue_defaults(self):
|
||||
registry = OpenRouterModelRegistry()
|
||||
|
||||
assert registry.list_models()
|
||||
|
||||
def test_custom_config_path(self):
|
||||
"""Test registry with custom config path."""
|
||||
# Create temporary config
|
||||
@@ -51,6 +57,8 @@ class TestOpenRouterModelRegistry:
|
||||
|
||||
def test_environment_variable_override(self):
|
||||
"""Test OPENROUTER_MODELS_CONFIG_PATH environment variable."""
|
||||
original_env = os.environ.get("OPENROUTER_MODELS_CONFIG_PATH")
|
||||
|
||||
# Create custom config
|
||||
config_data = {
|
||||
"models": [
|
||||
@@ -64,7 +72,6 @@ class TestOpenRouterModelRegistry:
|
||||
|
||||
try:
|
||||
# Set environment variable
|
||||
original_env = os.environ.get("OPENROUTER_MODELS_CONFIG_PATH")
|
||||
os.environ["OPENROUTER_MODELS_CONFIG_PATH"] = temp_path
|
||||
|
||||
# Create registry without explicit path
|
||||
@@ -214,6 +221,133 @@ class TestOpenRouterModelRegistry:
|
||||
finally:
|
||||
os.unlink(temp_path)
|
||||
|
||||
def test_live_catalogue_adds_unsynced_model_ids(self):
|
||||
curated_data = {
|
||||
"models": [
|
||||
{
|
||||
"model_name": "openai/gpt-5.2",
|
||||
"aliases": ["gpt5.2"],
|
||||
"context_window": 400000,
|
||||
"max_output_tokens": 128000,
|
||||
}
|
||||
]
|
||||
}
|
||||
live_data = {
|
||||
"models": [
|
||||
{
|
||||
"model_name": "openai/gpt-5.4",
|
||||
"aliases": [],
|
||||
"context_window": 400000,
|
||||
"max_output_tokens": 128000,
|
||||
"supports_extended_thinking": True,
|
||||
"supports_json_mode": True,
|
||||
"supports_function_calling": False,
|
||||
"supports_images": True,
|
||||
"max_image_size_mb": 20.0,
|
||||
"supports_temperature": True,
|
||||
"temperature_constraint": "range",
|
||||
"description": "Live-only GPT-5.4 entry",
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
with tempfile.NamedTemporaryFile(mode="w", suffix=".json", delete=False) as curated_file:
|
||||
json.dump(curated_data, curated_file)
|
||||
curated_path = curated_file.name
|
||||
|
||||
with tempfile.NamedTemporaryFile(mode="w", suffix=".json", delete=False) as live_file:
|
||||
json.dump(live_data, live_file)
|
||||
live_path = live_file.name
|
||||
|
||||
try:
|
||||
registry = OpenRouterModelRegistry(config_path=curated_path, live_config_path=live_path)
|
||||
assert "openai/gpt-5.4" in registry.list_models()
|
||||
caps = registry.resolve("openai/gpt-5.4")
|
||||
assert caps is not None
|
||||
assert caps.description == "Live-only GPT-5.4 entry"
|
||||
finally:
|
||||
os.unlink(curated_path)
|
||||
os.unlink(live_path)
|
||||
|
||||
def test_curated_manifest_overrides_live_metadata(self):
|
||||
curated_data = {
|
||||
"models": [
|
||||
{
|
||||
"model_name": "openai/gpt-5.4",
|
||||
"aliases": ["gpt5.4"],
|
||||
"context_window": 400000,
|
||||
"max_output_tokens": 128000,
|
||||
"supports_extended_thinking": True,
|
||||
"supports_json_mode": True,
|
||||
"supports_function_calling": True,
|
||||
"supports_images": True,
|
||||
"max_image_size_mb": 20.0,
|
||||
"supports_temperature": False,
|
||||
"temperature_constraint": "fixed",
|
||||
"description": "Curated override",
|
||||
"intelligence_score": 18,
|
||||
"allow_code_generation": True,
|
||||
"use_openai_response_api": True,
|
||||
}
|
||||
]
|
||||
}
|
||||
live_data = {
|
||||
"models": [
|
||||
{
|
||||
"model_name": "openai/gpt-5.4",
|
||||
"aliases": [],
|
||||
"context_window": 1234,
|
||||
"max_output_tokens": 5678,
|
||||
"supports_extended_thinking": False,
|
||||
"supports_json_mode": False,
|
||||
"supports_function_calling": False,
|
||||
"supports_images": False,
|
||||
"max_image_size_mb": 0.0,
|
||||
"supports_temperature": True,
|
||||
"temperature_constraint": "range",
|
||||
"description": "Live baseline",
|
||||
"intelligence_score": 10,
|
||||
"allow_code_generation": False,
|
||||
"use_openai_response_api": False,
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
with tempfile.NamedTemporaryFile(mode="w", suffix=".json", delete=False) as curated_file:
|
||||
json.dump(curated_data, curated_file)
|
||||
curated_path = curated_file.name
|
||||
|
||||
with tempfile.NamedTemporaryFile(mode="w", suffix=".json", delete=False) as live_file:
|
||||
json.dump(live_data, live_file)
|
||||
live_path = live_file.name
|
||||
|
||||
try:
|
||||
registry = OpenRouterModelRegistry(config_path=curated_path, live_config_path=live_path)
|
||||
caps = registry.resolve("gpt5.4")
|
||||
assert caps is not None
|
||||
assert caps.model_name == "openai/gpt-5.4"
|
||||
assert caps.description == "Curated override"
|
||||
assert caps.context_window == 400000
|
||||
assert caps.max_output_tokens == 128000
|
||||
assert caps.supports_function_calling is True
|
||||
assert caps.supports_temperature is False
|
||||
assert caps.allow_code_generation is True
|
||||
assert caps.use_openai_response_api is True
|
||||
finally:
|
||||
os.unlink(curated_path)
|
||||
os.unlink(live_path)
|
||||
|
||||
def test_missing_live_catalogue_keeps_curated_models_working(self, monkeypatch):
|
||||
missing_live_path = Path(tempfile.gettempdir()) / "pal-missing-openrouter-live.json"
|
||||
if missing_live_path.exists():
|
||||
missing_live_path.unlink()
|
||||
|
||||
monkeypatch.setenv("OPENROUTER_LIVE_MODELS_CONFIG_PATH", str(missing_live_path))
|
||||
|
||||
registry = OpenRouterModelRegistry()
|
||||
assert "openai/o3" in registry.list_models()
|
||||
assert registry.resolve("o3") is not None
|
||||
|
||||
def test_model_with_all_capabilities(self):
|
||||
"""Test model with all capability flags."""
|
||||
from providers.shared import TemperatureConstraint
|
||||
|
||||
Reference in New Issue
Block a user