feat(openrouter): add live model sync

This commit is contained in:
Torbjørn Lindahl
2026-04-01 11:41:53 +02:00
parent 4585833df1
commit cc8f6380d6
5 changed files with 6013 additions and 4 deletions

View File

@@ -3,6 +3,7 @@
import json
import os
import tempfile
from pathlib import Path
from unittest.mock import patch
import pytest
@@ -22,6 +23,11 @@ class TestOpenRouterModelRegistry:
assert len(registry.list_models()) > 0
assert len(registry.list_aliases()) > 0
def test_registry_initialization_with_live_catalogue_defaults(self):
registry = OpenRouterModelRegistry()
assert registry.list_models()
def test_custom_config_path(self):
"""Test registry with custom config path."""
# Create temporary config
@@ -51,6 +57,8 @@ class TestOpenRouterModelRegistry:
def test_environment_variable_override(self):
"""Test OPENROUTER_MODELS_CONFIG_PATH environment variable."""
original_env = os.environ.get("OPENROUTER_MODELS_CONFIG_PATH")
# Create custom config
config_data = {
"models": [
@@ -64,7 +72,6 @@ class TestOpenRouterModelRegistry:
try:
# Set environment variable
original_env = os.environ.get("OPENROUTER_MODELS_CONFIG_PATH")
os.environ["OPENROUTER_MODELS_CONFIG_PATH"] = temp_path
# Create registry without explicit path
@@ -214,6 +221,133 @@ class TestOpenRouterModelRegistry:
finally:
os.unlink(temp_path)
def test_live_catalogue_adds_unsynced_model_ids(self):
curated_data = {
"models": [
{
"model_name": "openai/gpt-5.2",
"aliases": ["gpt5.2"],
"context_window": 400000,
"max_output_tokens": 128000,
}
]
}
live_data = {
"models": [
{
"model_name": "openai/gpt-5.4",
"aliases": [],
"context_window": 400000,
"max_output_tokens": 128000,
"supports_extended_thinking": True,
"supports_json_mode": True,
"supports_function_calling": False,
"supports_images": True,
"max_image_size_mb": 20.0,
"supports_temperature": True,
"temperature_constraint": "range",
"description": "Live-only GPT-5.4 entry",
}
]
}
with tempfile.NamedTemporaryFile(mode="w", suffix=".json", delete=False) as curated_file:
json.dump(curated_data, curated_file)
curated_path = curated_file.name
with tempfile.NamedTemporaryFile(mode="w", suffix=".json", delete=False) as live_file:
json.dump(live_data, live_file)
live_path = live_file.name
try:
registry = OpenRouterModelRegistry(config_path=curated_path, live_config_path=live_path)
assert "openai/gpt-5.4" in registry.list_models()
caps = registry.resolve("openai/gpt-5.4")
assert caps is not None
assert caps.description == "Live-only GPT-5.4 entry"
finally:
os.unlink(curated_path)
os.unlink(live_path)
def test_curated_manifest_overrides_live_metadata(self):
curated_data = {
"models": [
{
"model_name": "openai/gpt-5.4",
"aliases": ["gpt5.4"],
"context_window": 400000,
"max_output_tokens": 128000,
"supports_extended_thinking": True,
"supports_json_mode": True,
"supports_function_calling": True,
"supports_images": True,
"max_image_size_mb": 20.0,
"supports_temperature": False,
"temperature_constraint": "fixed",
"description": "Curated override",
"intelligence_score": 18,
"allow_code_generation": True,
"use_openai_response_api": True,
}
]
}
live_data = {
"models": [
{
"model_name": "openai/gpt-5.4",
"aliases": [],
"context_window": 1234,
"max_output_tokens": 5678,
"supports_extended_thinking": False,
"supports_json_mode": False,
"supports_function_calling": False,
"supports_images": False,
"max_image_size_mb": 0.0,
"supports_temperature": True,
"temperature_constraint": "range",
"description": "Live baseline",
"intelligence_score": 10,
"allow_code_generation": False,
"use_openai_response_api": False,
}
]
}
with tempfile.NamedTemporaryFile(mode="w", suffix=".json", delete=False) as curated_file:
json.dump(curated_data, curated_file)
curated_path = curated_file.name
with tempfile.NamedTemporaryFile(mode="w", suffix=".json", delete=False) as live_file:
json.dump(live_data, live_file)
live_path = live_file.name
try:
registry = OpenRouterModelRegistry(config_path=curated_path, live_config_path=live_path)
caps = registry.resolve("gpt5.4")
assert caps is not None
assert caps.model_name == "openai/gpt-5.4"
assert caps.description == "Curated override"
assert caps.context_window == 400000
assert caps.max_output_tokens == 128000
assert caps.supports_function_calling is True
assert caps.supports_temperature is False
assert caps.allow_code_generation is True
assert caps.use_openai_response_api is True
finally:
os.unlink(curated_path)
os.unlink(live_path)
def test_missing_live_catalogue_keeps_curated_models_working(self, monkeypatch):
missing_live_path = Path(tempfile.gettempdir()) / "pal-missing-openrouter-live.json"
if missing_live_path.exists():
missing_live_path.unlink()
monkeypatch.setenv("OPENROUTER_LIVE_MODELS_CONFIG_PATH", str(missing_live_path))
registry = OpenRouterModelRegistry()
assert "openai/o3" in registry.list_models()
assert registry.resolve("o3") is not None
def test_model_with_all_capabilities(self):
"""Test model with all capability flags."""
from providers.shared import TemperatureConstraint

View File

@@ -0,0 +1,53 @@
import importlib.util
from pathlib import Path
SCRIPT_PATH = Path(__file__).resolve().parents[1] / "scripts" / "sync_openrouter_models.py"
SPEC = importlib.util.spec_from_file_location("sync_openrouter_models", SCRIPT_PATH)
assert SPEC is not None and SPEC.loader is not None
MODULE = importlib.util.module_from_spec(SPEC)
SPEC.loader.exec_module(MODULE)
def test_convert_model_maps_openrouter_payload_conservatively():
converted = MODULE.convert_model(
{
"id": "openai/gpt-5.4",
"description": "GPT-5.4 description",
"context_length": 400000,
"top_provider": {"max_completion_tokens": 128000},
"architecture": {"input_modalities": ["text", "image"]},
"supported_parameters": ["temperature", "reasoning", "response_format", "structured_outputs"],
}
)
assert converted is not None
assert converted["model_name"] == "openai/gpt-5.4"
assert converted["context_window"] == 400000
assert converted["max_output_tokens"] == 128000
assert converted["supports_extended_thinking"] is True
assert converted["supports_json_mode"] is True
assert converted["supports_function_calling"] is False
assert converted["supports_images"] is True
assert converted["max_image_size_mb"] == 20.0
assert converted["supports_temperature"] is True
assert converted["temperature_constraint"] == "range"
assert converted["allow_code_generation"] is False
def test_convert_model_marks_reasoning_models_without_temperature_as_fixed():
converted = MODULE.convert_model(
{
"id": "openai/o3",
"description": "o3 description",
"context_length": 200000,
"top_provider": {},
"architecture": {"input_modalities": ["text"]},
"supported_parameters": ["reasoning"],
}
)
assert converted is not None
assert converted["supports_temperature"] is False
assert converted["temperature_constraint"] == "fixed"
assert converted["supports_images"] is False