docs/openrouter-sync-operations #1
5578
conf/openrouter_models_live.json
Normal file
5578
conf/openrouter_models_live.json
Normal file
File diff suppressed because it is too large
Load Diff
@@ -2,14 +2,30 @@
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import importlib.resources
|
||||
import json
|
||||
import logging
|
||||
from pathlib import Path
|
||||
|
||||
from utils.env import get_env
|
||||
from utils.file_utils import read_json_file
|
||||
|
||||
from ..shared import ModelCapabilities, ProviderType
|
||||
from .base import CAPABILITY_FIELD_NAMES, CapabilityModelRegistry
|
||||
|
||||
|
||||
class OpenRouterModelRegistry(CapabilityModelRegistry):
|
||||
"""Capability registry backed by ``conf/openrouter_models.json``."""
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class OpenRouterModelRegistry(CapabilityModelRegistry):
|
||||
LIVE_ENV_VAR_NAME = "OPENROUTER_LIVE_MODELS_CONFIG_PATH"
|
||||
LIVE_DEFAULT_FILENAME = "openrouter_models_live.json"
|
||||
|
||||
def __init__(self, config_path: str | None = None, live_config_path: str | None = None) -> None:
|
||||
self._live_use_resources = False
|
||||
self._live_config_path: Path | None = None
|
||||
self._live_default_path = Path(__file__).resolve().parents[3] / "conf" / self.LIVE_DEFAULT_FILENAME
|
||||
|
||||
def __init__(self, config_path: str | None = None) -> None:
|
||||
super().__init__(
|
||||
env_var_name="OPENROUTER_MODELS_CONFIG_PATH",
|
||||
default_filename="openrouter_models.json",
|
||||
@@ -18,6 +34,79 @@ class OpenRouterModelRegistry(CapabilityModelRegistry):
|
||||
config_path=config_path,
|
||||
)
|
||||
|
||||
if live_config_path:
|
||||
self._live_config_path = Path(live_config_path)
|
||||
else:
|
||||
env_path = get_env(self.LIVE_ENV_VAR_NAME)
|
||||
if env_path:
|
||||
self._live_config_path = Path(env_path)
|
||||
else:
|
||||
try:
|
||||
resource = importlib.resources.files("conf").joinpath(self.LIVE_DEFAULT_FILENAME)
|
||||
if hasattr(resource, "read_text"):
|
||||
self._live_use_resources = True
|
||||
else:
|
||||
raise AttributeError("resource accessor not available")
|
||||
except Exception:
|
||||
self._live_config_path = self._live_default_path
|
||||
|
||||
self.reload()
|
||||
|
||||
def reload(self) -> None:
|
||||
live_data = self._load_live_config_data()
|
||||
curated_data = self._load_config_data()
|
||||
merged_data = self._merge_manifest_data(live_data, curated_data)
|
||||
|
||||
self._extras = {}
|
||||
configs = [config for config in self._parse_models(merged_data) if config is not None]
|
||||
self._build_maps(configs)
|
||||
|
||||
def _load_live_config_data(self) -> dict:
|
||||
if self._live_use_resources:
|
||||
try:
|
||||
resource = importlib.resources.files("conf").joinpath(self.LIVE_DEFAULT_FILENAME)
|
||||
if hasattr(resource, "read_text"):
|
||||
config_text = resource.read_text(encoding="utf-8")
|
||||
else:
|
||||
with resource.open("r", encoding="utf-8") as handle:
|
||||
config_text = handle.read()
|
||||
data = json.loads(config_text)
|
||||
except FileNotFoundError:
|
||||
logger.debug("Packaged %s not found", self.LIVE_DEFAULT_FILENAME)
|
||||
return {"models": []}
|
||||
except Exception as exc:
|
||||
logger.warning("Failed to read packaged %s: %s", self.LIVE_DEFAULT_FILENAME, exc)
|
||||
return {"models": []}
|
||||
return data or {"models": []}
|
||||
|
||||
if not self._live_config_path:
|
||||
return {"models": []}
|
||||
|
||||
if not self._live_config_path.exists():
|
||||
logger.debug("OpenRouter live registry config not found at %s", self._live_config_path)
|
||||
return {"models": []}
|
||||
|
||||
data = read_json_file(str(self._live_config_path))
|
||||
return data or {"models": []}
|
||||
|
||||
@staticmethod
|
||||
def _merge_manifest_data(live_data: dict, curated_data: dict) -> dict:
|
||||
merged_models: dict[str, dict] = {}
|
||||
|
||||
for source in (live_data, curated_data):
|
||||
for raw in source.get("models", []):
|
||||
if not isinstance(raw, dict):
|
||||
continue
|
||||
|
||||
model_name = raw.get("model_name")
|
||||
if not model_name:
|
||||
continue
|
||||
|
||||
existing = merged_models.get(model_name, {})
|
||||
merged_models[model_name] = {**existing, **dict(raw)}
|
||||
|
||||
return {"models": list(merged_models.values())}
|
||||
|
||||
def _finalise_entry(self, entry: dict) -> tuple[ModelCapabilities, dict]:
|
||||
provider_override = entry.get("provider")
|
||||
if isinstance(provider_override, str):
|
||||
|
||||
155
scripts/sync_openrouter_models.py
Normal file
155
scripts/sync_openrouter_models.py
Normal file
@@ -0,0 +1,155 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import argparse
|
||||
import json
|
||||
import sys
|
||||
from pathlib import Path
|
||||
from urllib.error import HTTPError, URLError
|
||||
from urllib.request import Request, urlopen
|
||||
|
||||
from providers.shared.temperature import TemperatureConstraint
|
||||
|
||||
|
||||
ROOT = Path(__file__).resolve().parents[1]
|
||||
DEFAULT_OUTPUT = ROOT / "conf" / "openrouter_models_live.json"
|
||||
OPENROUTER_MODELS_URL = "https://openrouter.ai/api/v1/models"
|
||||
|
||||
|
||||
def fetch_openrouter_models(url: str) -> dict:
|
||||
request = Request(
|
||||
url,
|
||||
headers={
|
||||
"Accept": "application/json",
|
||||
"User-Agent": "pal-mcp-server/openrouter-model-sync",
|
||||
},
|
||||
)
|
||||
|
||||
with urlopen(request, timeout=30) as response:
|
||||
charset = response.headers.get_content_charset("utf-8")
|
||||
payload = response.read().decode(charset)
|
||||
|
||||
data = json.loads(payload)
|
||||
if not isinstance(data, dict):
|
||||
raise ValueError("OpenRouter models payload must be a JSON object")
|
||||
return data
|
||||
|
||||
|
||||
def _supports_parameter(model_data: dict, parameter: str) -> bool:
|
||||
supported = model_data.get("supported_parameters")
|
||||
return isinstance(supported, list) and parameter in supported
|
||||
|
||||
|
||||
def _input_modalities(model_data: dict) -> list[str]:
|
||||
architecture = model_data.get("architecture")
|
||||
if not isinstance(architecture, dict):
|
||||
return []
|
||||
|
||||
modalities = architecture.get("input_modalities")
|
||||
if not isinstance(modalities, list):
|
||||
return []
|
||||
|
||||
return [str(item) for item in modalities]
|
||||
|
||||
|
||||
def _infer_temperature_fields(model_name: str, model_data: dict) -> tuple[bool, str]:
|
||||
if _supports_parameter(model_data, "temperature"):
|
||||
return True, "range"
|
||||
|
||||
supports_temperature, _constraint, _reason = TemperatureConstraint.resolve_settings(model_name)
|
||||
return supports_temperature, "fixed" if not supports_temperature else "range"
|
||||
|
||||
|
||||
def convert_model(model_data: dict) -> dict | None:
|
||||
model_name = model_data.get("id")
|
||||
if not isinstance(model_name, str) or not model_name:
|
||||
return None
|
||||
|
||||
modalities = _input_modalities(model_data)
|
||||
supports_temperature, temperature_constraint = _infer_temperature_fields(model_name, model_data)
|
||||
|
||||
context_window = model_data.get("context_length") or 0
|
||||
top_provider = model_data.get("top_provider")
|
||||
if not isinstance(top_provider, dict):
|
||||
top_provider = {}
|
||||
|
||||
max_output_tokens = top_provider.get("max_completion_tokens") or 0
|
||||
|
||||
return {
|
||||
"model_name": model_name,
|
||||
"aliases": [],
|
||||
"context_window": int(context_window) if context_window else 0,
|
||||
"max_output_tokens": int(max_output_tokens) if max_output_tokens else 0,
|
||||
"supports_extended_thinking": _supports_parameter(model_data, "reasoning")
|
||||
or _supports_parameter(model_data, "include_reasoning"),
|
||||
"supports_json_mode": _supports_parameter(model_data, "response_format")
|
||||
or _supports_parameter(model_data, "structured_outputs"),
|
||||
"supports_function_calling": _supports_parameter(model_data, "tools"),
|
||||
"supports_images": "image" in modalities,
|
||||
"max_image_size_mb": 20.0 if "image" in modalities else 0.0,
|
||||
"supports_temperature": supports_temperature,
|
||||
"temperature_constraint": temperature_constraint,
|
||||
"description": model_data.get("description") or model_data.get("name") or "",
|
||||
"intelligence_score": 10,
|
||||
"allow_code_generation": False,
|
||||
}
|
||||
|
||||
|
||||
def build_output_document(source: dict, source_url: str) -> dict:
|
||||
models = []
|
||||
for model_data in source.get("data", []):
|
||||
if not isinstance(model_data, dict):
|
||||
continue
|
||||
|
||||
converted = convert_model(model_data)
|
||||
if converted:
|
||||
models.append(converted)
|
||||
|
||||
models.sort(key=lambda item: item["model_name"])
|
||||
|
||||
return {
|
||||
"_README": {
|
||||
"description": "Generated baseline OpenRouter catalogue for PAL MCP Server.",
|
||||
"source": source_url,
|
||||
"usage": "Generated by scripts/sync_openrouter_models.py. Curated overrides belong in conf/openrouter_models.json.",
|
||||
"field_notes": "Entries are conservative and intended as discovery data only. Curated manifest values override these at runtime.",
|
||||
},
|
||||
"models": models,
|
||||
}
|
||||
|
||||
|
||||
def write_output(path: Path, document: dict) -> None:
|
||||
path.parent.mkdir(parents=True, exist_ok=True)
|
||||
with path.open("w", encoding="utf-8", newline="\n") as handle:
|
||||
json.dump(document, handle, indent=2, ensure_ascii=False)
|
||||
handle.write("\n")
|
||||
|
||||
|
||||
def parse_args() -> argparse.Namespace:
|
||||
parser = argparse.ArgumentParser(description="Sync OpenRouter live model catalogue into PAL config.")
|
||||
parser.add_argument("--url", default=OPENROUTER_MODELS_URL, help="OpenRouter models endpoint")
|
||||
parser.add_argument(
|
||||
"--output",
|
||||
default=str(DEFAULT_OUTPUT),
|
||||
help="Path to the generated live OpenRouter manifest",
|
||||
)
|
||||
return parser.parse_args()
|
||||
|
||||
|
||||
def main() -> int:
|
||||
args = parse_args()
|
||||
output_path = Path(args.output)
|
||||
|
||||
try:
|
||||
source = fetch_openrouter_models(args.url)
|
||||
document = build_output_document(source, args.url)
|
||||
write_output(output_path, document)
|
||||
except (HTTPError, URLError, TimeoutError, ValueError, json.JSONDecodeError) as exc:
|
||||
print(f"Failed to sync OpenRouter models: {exc}", file=sys.stderr)
|
||||
return 1
|
||||
|
||||
print(f"Wrote {len(document['models'])} OpenRouter models to {output_path}")
|
||||
return 0
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
raise SystemExit(main())
|
||||
@@ -3,6 +3,7 @@
|
||||
import json
|
||||
import os
|
||||
import tempfile
|
||||
from pathlib import Path
|
||||
from unittest.mock import patch
|
||||
|
||||
import pytest
|
||||
@@ -22,6 +23,11 @@ class TestOpenRouterModelRegistry:
|
||||
assert len(registry.list_models()) > 0
|
||||
assert len(registry.list_aliases()) > 0
|
||||
|
||||
def test_registry_initialization_with_live_catalogue_defaults(self):
|
||||
registry = OpenRouterModelRegistry()
|
||||
|
||||
assert registry.list_models()
|
||||
|
||||
def test_custom_config_path(self):
|
||||
"""Test registry with custom config path."""
|
||||
# Create temporary config
|
||||
@@ -51,6 +57,8 @@ class TestOpenRouterModelRegistry:
|
||||
|
||||
def test_environment_variable_override(self):
|
||||
"""Test OPENROUTER_MODELS_CONFIG_PATH environment variable."""
|
||||
original_env = os.environ.get("OPENROUTER_MODELS_CONFIG_PATH")
|
||||
|
||||
# Create custom config
|
||||
config_data = {
|
||||
"models": [
|
||||
@@ -64,7 +72,6 @@ class TestOpenRouterModelRegistry:
|
||||
|
||||
try:
|
||||
# Set environment variable
|
||||
original_env = os.environ.get("OPENROUTER_MODELS_CONFIG_PATH")
|
||||
os.environ["OPENROUTER_MODELS_CONFIG_PATH"] = temp_path
|
||||
|
||||
# Create registry without explicit path
|
||||
@@ -214,6 +221,133 @@ class TestOpenRouterModelRegistry:
|
||||
finally:
|
||||
os.unlink(temp_path)
|
||||
|
||||
def test_live_catalogue_adds_unsynced_model_ids(self):
|
||||
curated_data = {
|
||||
"models": [
|
||||
{
|
||||
"model_name": "openai/gpt-5.2",
|
||||
"aliases": ["gpt5.2"],
|
||||
"context_window": 400000,
|
||||
"max_output_tokens": 128000,
|
||||
}
|
||||
]
|
||||
}
|
||||
live_data = {
|
||||
"models": [
|
||||
{
|
||||
"model_name": "openai/gpt-5.4",
|
||||
"aliases": [],
|
||||
"context_window": 400000,
|
||||
"max_output_tokens": 128000,
|
||||
"supports_extended_thinking": True,
|
||||
"supports_json_mode": True,
|
||||
"supports_function_calling": False,
|
||||
"supports_images": True,
|
||||
"max_image_size_mb": 20.0,
|
||||
"supports_temperature": True,
|
||||
"temperature_constraint": "range",
|
||||
"description": "Live-only GPT-5.4 entry",
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
with tempfile.NamedTemporaryFile(mode="w", suffix=".json", delete=False) as curated_file:
|
||||
json.dump(curated_data, curated_file)
|
||||
curated_path = curated_file.name
|
||||
|
||||
with tempfile.NamedTemporaryFile(mode="w", suffix=".json", delete=False) as live_file:
|
||||
json.dump(live_data, live_file)
|
||||
live_path = live_file.name
|
||||
|
||||
try:
|
||||
registry = OpenRouterModelRegistry(config_path=curated_path, live_config_path=live_path)
|
||||
assert "openai/gpt-5.4" in registry.list_models()
|
||||
caps = registry.resolve("openai/gpt-5.4")
|
||||
assert caps is not None
|
||||
assert caps.description == "Live-only GPT-5.4 entry"
|
||||
finally:
|
||||
os.unlink(curated_path)
|
||||
os.unlink(live_path)
|
||||
|
||||
def test_curated_manifest_overrides_live_metadata(self):
|
||||
curated_data = {
|
||||
"models": [
|
||||
{
|
||||
"model_name": "openai/gpt-5.4",
|
||||
"aliases": ["gpt5.4"],
|
||||
"context_window": 400000,
|
||||
"max_output_tokens": 128000,
|
||||
"supports_extended_thinking": True,
|
||||
"supports_json_mode": True,
|
||||
"supports_function_calling": True,
|
||||
"supports_images": True,
|
||||
"max_image_size_mb": 20.0,
|
||||
"supports_temperature": False,
|
||||
"temperature_constraint": "fixed",
|
||||
"description": "Curated override",
|
||||
"intelligence_score": 18,
|
||||
"allow_code_generation": True,
|
||||
"use_openai_response_api": True,
|
||||
}
|
||||
]
|
||||
}
|
||||
live_data = {
|
||||
"models": [
|
||||
{
|
||||
"model_name": "openai/gpt-5.4",
|
||||
"aliases": [],
|
||||
"context_window": 1234,
|
||||
"max_output_tokens": 5678,
|
||||
"supports_extended_thinking": False,
|
||||
"supports_json_mode": False,
|
||||
"supports_function_calling": False,
|
||||
"supports_images": False,
|
||||
"max_image_size_mb": 0.0,
|
||||
"supports_temperature": True,
|
||||
"temperature_constraint": "range",
|
||||
"description": "Live baseline",
|
||||
"intelligence_score": 10,
|
||||
"allow_code_generation": False,
|
||||
"use_openai_response_api": False,
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
with tempfile.NamedTemporaryFile(mode="w", suffix=".json", delete=False) as curated_file:
|
||||
json.dump(curated_data, curated_file)
|
||||
curated_path = curated_file.name
|
||||
|
||||
with tempfile.NamedTemporaryFile(mode="w", suffix=".json", delete=False) as live_file:
|
||||
json.dump(live_data, live_file)
|
||||
live_path = live_file.name
|
||||
|
||||
try:
|
||||
registry = OpenRouterModelRegistry(config_path=curated_path, live_config_path=live_path)
|
||||
caps = registry.resolve("gpt5.4")
|
||||
assert caps is not None
|
||||
assert caps.model_name == "openai/gpt-5.4"
|
||||
assert caps.description == "Curated override"
|
||||
assert caps.context_window == 400000
|
||||
assert caps.max_output_tokens == 128000
|
||||
assert caps.supports_function_calling is True
|
||||
assert caps.supports_temperature is False
|
||||
assert caps.allow_code_generation is True
|
||||
assert caps.use_openai_response_api is True
|
||||
finally:
|
||||
os.unlink(curated_path)
|
||||
os.unlink(live_path)
|
||||
|
||||
def test_missing_live_catalogue_keeps_curated_models_working(self, monkeypatch):
|
||||
missing_live_path = Path(tempfile.gettempdir()) / "pal-missing-openrouter-live.json"
|
||||
if missing_live_path.exists():
|
||||
missing_live_path.unlink()
|
||||
|
||||
monkeypatch.setenv("OPENROUTER_LIVE_MODELS_CONFIG_PATH", str(missing_live_path))
|
||||
|
||||
registry = OpenRouterModelRegistry()
|
||||
assert "openai/o3" in registry.list_models()
|
||||
assert registry.resolve("o3") is not None
|
||||
|
||||
def test_model_with_all_capabilities(self):
|
||||
"""Test model with all capability flags."""
|
||||
from providers.shared import TemperatureConstraint
|
||||
|
||||
53
tests/test_sync_openrouter_models.py
Normal file
53
tests/test_sync_openrouter_models.py
Normal file
@@ -0,0 +1,53 @@
|
||||
import importlib.util
|
||||
from pathlib import Path
|
||||
|
||||
|
||||
SCRIPT_PATH = Path(__file__).resolve().parents[1] / "scripts" / "sync_openrouter_models.py"
|
||||
SPEC = importlib.util.spec_from_file_location("sync_openrouter_models", SCRIPT_PATH)
|
||||
assert SPEC is not None and SPEC.loader is not None
|
||||
MODULE = importlib.util.module_from_spec(SPEC)
|
||||
SPEC.loader.exec_module(MODULE)
|
||||
|
||||
|
||||
def test_convert_model_maps_openrouter_payload_conservatively():
|
||||
converted = MODULE.convert_model(
|
||||
{
|
||||
"id": "openai/gpt-5.4",
|
||||
"description": "GPT-5.4 description",
|
||||
"context_length": 400000,
|
||||
"top_provider": {"max_completion_tokens": 128000},
|
||||
"architecture": {"input_modalities": ["text", "image"]},
|
||||
"supported_parameters": ["temperature", "reasoning", "response_format", "structured_outputs"],
|
||||
}
|
||||
)
|
||||
|
||||
assert converted is not None
|
||||
assert converted["model_name"] == "openai/gpt-5.4"
|
||||
assert converted["context_window"] == 400000
|
||||
assert converted["max_output_tokens"] == 128000
|
||||
assert converted["supports_extended_thinking"] is True
|
||||
assert converted["supports_json_mode"] is True
|
||||
assert converted["supports_function_calling"] is False
|
||||
assert converted["supports_images"] is True
|
||||
assert converted["max_image_size_mb"] == 20.0
|
||||
assert converted["supports_temperature"] is True
|
||||
assert converted["temperature_constraint"] == "range"
|
||||
assert converted["allow_code_generation"] is False
|
||||
|
||||
|
||||
def test_convert_model_marks_reasoning_models_without_temperature_as_fixed():
|
||||
converted = MODULE.convert_model(
|
||||
{
|
||||
"id": "openai/o3",
|
||||
"description": "o3 description",
|
||||
"context_length": 200000,
|
||||
"top_provider": {},
|
||||
"architecture": {"input_modalities": ["text"]},
|
||||
"supported_parameters": ["reasoning"],
|
||||
}
|
||||
)
|
||||
|
||||
assert converted is not None
|
||||
assert converted["supports_temperature"] is False
|
||||
assert converted["temperature_constraint"] == "fixed"
|
||||
assert converted["supports_images"] is False
|
||||
Reference in New Issue
Block a user