Compare commits

...

4 Commits

Author SHA1 Message Date
Torbjørn Lindahl
65567ec40e feat(openrouter): add curated aliases for new models 2026-04-01 23:04:00 +02:00
Torbjørn Lindahl
4c4421b28f fix(openrouter): clean up registry loading 2026-04-01 12:07:53 +02:00
Torbjørn Lindahl
cc8f6380d6 feat(openrouter): add live model sync 2026-04-01 11:41:53 +02:00
Torbjørn Lindahl
4585833df1 docs: document openrouter sync workflow 2026-04-01 11:32:59 +02:00
8 changed files with 6144 additions and 23 deletions

View File

@@ -41,6 +41,15 @@
"description": "Claude Opus 4.5 - Anthropic's frontier reasoning model for complex software engineering and agentic workflows", "description": "Claude Opus 4.5 - Anthropic's frontier reasoning model for complex software engineering and agentic workflows",
"intelligence_score": 18 "intelligence_score": 18
}, },
{
"model_name": "anthropic/claude-opus-4.6",
"aliases": [
"opus-4.6",
"opus4.6"
],
"intelligence_score": 19,
"allow_code_generation": true
},
{ {
"model_name": "anthropic/claude-sonnet-4.5", "model_name": "anthropic/claude-sonnet-4.5",
"aliases": [ "aliases": [
@@ -57,6 +66,15 @@
"description": "Claude Sonnet 4.5 - High-performance model with exceptional reasoning and efficiency", "description": "Claude Sonnet 4.5 - High-performance model with exceptional reasoning and efficiency",
"intelligence_score": 12 "intelligence_score": 12
}, },
{
"model_name": "anthropic/claude-sonnet-4.6",
"aliases": [
"sonnet-4.6",
"sonnet4.6"
],
"intelligence_score": 18,
"allow_code_generation": true
},
{ {
"model_name": "anthropic/claude-opus-4.1", "model_name": "anthropic/claude-opus-4.1",
"aliases": [ "aliases": [
@@ -122,6 +140,15 @@
"description": "Google's Gemini 3.0 Pro via OpenRouter with vision", "description": "Google's Gemini 3.0 Pro via OpenRouter with vision",
"intelligence_score": 18 "intelligence_score": 18
}, },
{
"model_name": "google/gemini-3.1-pro-preview",
"aliases": [
"gemini-3.1-pro",
"gemini3.1-pro"
],
"intelligence_score": 19,
"allow_code_generation": true
},
{ {
"model_name": "google/gemini-2.5-pro", "model_name": "google/gemini-2.5-pro",
"aliases": [ "aliases": [
@@ -352,6 +379,35 @@
"description": "GPT-5.2 Pro - Advanced reasoning model with highest quality responses (text+image input, text output only)", "description": "GPT-5.2 Pro - Advanced reasoning model with highest quality responses (text+image input, text output only)",
"intelligence_score": 18 "intelligence_score": 18
}, },
{
"model_name": "openai/gpt-5.4",
"aliases": [
"gpt-5.4",
"gpt5.4"
],
"intelligence_score": 19,
"allow_code_generation": true
},
{
"model_name": "openai/gpt-5.4-pro",
"aliases": [
"gpt-5.4-pro",
"gpt5.4-pro",
"gpt5.4pro"
],
"intelligence_score": 20,
"allow_code_generation": true
},
{
"model_name": "openai/gpt-5.4-mini",
"aliases": [
"gpt-5.4-mini",
"gpt5.4-mini",
"gpt5.4mini"
],
"intelligence_score": 14,
"allow_code_generation": true
},
{ {
"model_name": "openai/gpt-5-codex", "model_name": "openai/gpt-5-codex",
"aliases": [ "aliases": [

File diff suppressed because it is too large Load Diff

View File

@@ -79,7 +79,8 @@ DEFAULT_MODEL=auto # Claude picks best model for each task (recommended)
- `conf/openai_models.json` OpenAI catalogue (can be overridden with `OPENAI_MODELS_CONFIG_PATH`) - `conf/openai_models.json` OpenAI catalogue (can be overridden with `OPENAI_MODELS_CONFIG_PATH`)
- `conf/gemini_models.json` Gemini catalogue (`GEMINI_MODELS_CONFIG_PATH`) - `conf/gemini_models.json` Gemini catalogue (`GEMINI_MODELS_CONFIG_PATH`)
- `conf/xai_models.json` X.AI / GROK catalogue (`XAI_MODELS_CONFIG_PATH`) - `conf/xai_models.json` X.AI / GROK catalogue (`XAI_MODELS_CONFIG_PATH`)
- `conf/openrouter_models.json` OpenRouter catalogue (`OPENROUTER_MODELS_CONFIG_PATH`) - `conf/openrouter_models.json` Curated OpenRouter overrides (`OPENROUTER_MODELS_CONFIG_PATH`)
- `conf/openrouter_models_live.json` Generated live OpenRouter catalogue (`OPENROUTER_LIVE_MODELS_CONFIG_PATH`)
- `conf/dial_models.json` DIAL aggregation catalogue (`DIAL_MODELS_CONFIG_PATH`) - `conf/dial_models.json` DIAL aggregation catalogue (`DIAL_MODELS_CONFIG_PATH`)
- `conf/custom_models.json` Custom/OpenAI-compatible endpoints (`CUSTOM_MODELS_CONFIG_PATH`) - `conf/custom_models.json` Custom/OpenAI-compatible endpoints (`CUSTOM_MODELS_CONFIG_PATH`)
@@ -92,10 +93,10 @@ DEFAULT_MODEL=auto # Claude picks best model for each task (recommended)
| OpenAI | `gpt-5.2`, `gpt-5.1-codex`, `gpt-5.1-codex-mini`, `gpt-5`, `gpt-5.2-pro`, `gpt-5-mini`, `gpt-5-nano`, `gpt-5-codex`, `gpt-4.1`, `o3`, `o3-mini`, `o3-pro`, `o4-mini` | `gpt5.2`, `gpt-5.2`, `5.2`, `gpt5.1-codex`, `codex-5.1`, `codex-mini`, `gpt5`, `gpt5pro`, `mini`, `nano`, `codex`, `o3mini`, `o3pro`, `o4mini` | | OpenAI | `gpt-5.2`, `gpt-5.1-codex`, `gpt-5.1-codex-mini`, `gpt-5`, `gpt-5.2-pro`, `gpt-5-mini`, `gpt-5-nano`, `gpt-5-codex`, `gpt-4.1`, `o3`, `o3-mini`, `o3-pro`, `o4-mini` | `gpt5.2`, `gpt-5.2`, `5.2`, `gpt5.1-codex`, `codex-5.1`, `codex-mini`, `gpt5`, `gpt5pro`, `mini`, `nano`, `codex`, `o3mini`, `o3pro`, `o4mini` |
| Gemini | `gemini-2.5-pro`, `gemini-2.5-flash`, `gemini-2.0-flash`, `gemini-2.0-flash-lite` | `pro`, `gemini-pro`, `flash`, `flash-2.0`, `flashlite` | | Gemini | `gemini-2.5-pro`, `gemini-2.5-flash`, `gemini-2.0-flash`, `gemini-2.0-flash-lite` | `pro`, `gemini-pro`, `flash`, `flash-2.0`, `flashlite` |
| X.AI | `grok-4`, `grok-4.1-fast` | `grok`, `grok4`, `grok-4.1-fast-reasoning` | | X.AI | `grok-4`, `grok-4.1-fast` | `grok`, `grok4`, `grok-4.1-fast-reasoning` |
| OpenRouter | See `conf/openrouter_models.json` for the continually evolving catalogue | e.g., `opus`, `sonnet`, `flash`, `pro`, `mistral` | | OpenRouter | Generated live catalogue plus curated overrides | e.g., `opus`, `sonnet`, `flash`, `pro`, `mistral` |
| Custom | User-managed entries such as `llama3.2` | Define your own aliases per entry | | Custom | User-managed entries such as `llama3.2` | Define your own aliases per entry |
Latest OpenAI entries (`gpt-5.2`, `gpt-5.1-codex`, `gpt-5.1-codex-mini`, `gpt-5.2-pro`) expose 400K-token contexts with large outputs, reasoning-token support, and multimodal inputs. `gpt-5.1-codex` and `gpt-5.2-pro` are Responses-only with streaming disabled, while the base `gpt-5.2` and Codex mini support streaming along with full code-generation flags. Update your manifests if you run custom deployments so these capability bits stay accurate. Latest OpenAI entries (`gpt-5.2`, `gpt-5.1-codex`, `gpt-5.1-codex-mini`, `gpt-5.2-pro`) expose 400K-token contexts with large outputs, reasoning-token support, and multimodal inputs. `gpt-5.1-codex` and `gpt-5.2-pro` are Responses-only with streaming disabled, while the base `gpt-5.2` and Codex mini support streaming along with full code-generation flags. For OpenRouter, keep PAL-specific metadata in the curated manifest and regenerate the live catalogue when OpenRouter adds or removes models; see [Refreshing the Live OpenRouter Catalogue](custom_models.md#refreshing-the-live-openrouter-catalogue).
> **Tip:** Copy the JSON file you need, customise it, and point the corresponding `*_MODELS_CONFIG_PATH` environment variable to your version. This lets you enable or disable capabilities (JSON mode, function calling, temperature support, code generation) without editing Python. > **Tip:** Copy the JSON file you need, customise it, and point the corresponding `*_MODELS_CONFIG_PATH` environment variable to your version. This lets you enable or disable capabilities (JSON mode, function calling, temperature support, code generation) without editing Python.

View File

@@ -221,7 +221,7 @@ CUSTOM_MODEL_NAME=your-loaded-model
The system automatically routes models to the appropriate provider: The system automatically routes models to the appropriate provider:
1. Entries in `conf/custom_models.json` → Always routed through the Custom API (requires `CUSTOM_API_URL`) 1. Entries in `conf/custom_models.json` → Always routed through the Custom API (requires `CUSTOM_API_URL`)
2. Entries in `conf/openrouter_models.json` → Routed through OpenRouter (requires `OPENROUTER_API_KEY`) 2. Entries in `conf/openrouter_models_live.json` and `conf/openrouter_models.json` → Routed through OpenRouter (requires `OPENROUTER_API_KEY`)
3. **Unknown models** → Fallback logic based on model name patterns 3. **Unknown models** → Fallback logic based on model name patterns
**Provider Priority Order:** **Provider Priority Order:**
@@ -241,7 +241,42 @@ These JSON files define model aliases and capabilities. You can:
### Adding Custom Models ### Adding Custom Models
Edit `conf/openrouter_models.json` to tweak OpenRouter behaviour or `conf/custom_models.json` to add local models. Each entry maps directly onto [`ModelCapabilities`](../providers/shared/model_capabilities.py). Edit `conf/openrouter_models.json` to tweak OpenRouter behaviour or `conf/custom_models.json` to add local models. The generated `conf/openrouter_models_live.json` file is discovery data from OpenRouter's `/api/v1/models` endpoint; curated entries in `conf/openrouter_models.json` override those generated defaults. Each entry maps directly onto [`ModelCapabilities`](../providers/shared/model_capabilities.py).
### Refreshing the Live OpenRouter Catalogue
Run the sync script whenever OpenRouter adds or removes models that you want `listmodels` and provider enumeration to expose, or before cutting a release that should include an updated OpenRouter catalogue.
```bash
source .pal_venv/bin/activate
python scripts/sync_openrouter_models.py
```
By default the script:
- fetches `https://openrouter.ai/api/v1/models`
- writes conservative discovery data to `conf/openrouter_models_live.json`
- leaves `conf/openrouter_models.json` untouched
Use the optional flags if you need to test against a different endpoint or write to a different file:
```bash
python scripts/sync_openrouter_models.py --url https://openrouter.ai/api/v1/models --output conf/openrouter_models_live.json
```
Important runtime behavior:
- `conf/openrouter_models_live.json` is the generated baseline catalogue
- `conf/openrouter_models.json` is the curated override layer for aliases and PAL-specific capability flags
- curated entries win when the same `model_name` appears in both files
- models missing from the curated file are still available from the generated catalogue
After refreshing the catalogue:
1. Review the diff in `conf/openrouter_models_live.json`
2. Add or update curated entries in `conf/openrouter_models.json` if a new model needs aliases or PAL-specific capability tweaks
3. Restart the server so the updated manifests are reloaded
4. Commit the generated JSON alongside any curated overrides so other contributors get the same catalogue state
#### Adding an OpenRouter Model #### Adding an OpenRouter Model

View File

@@ -2,14 +2,46 @@
from __future__ import annotations from __future__ import annotations
import importlib.resources
import json
import logging
from pathlib import Path
from utils.env import get_env
from utils.file_utils import read_json_file
from ..shared import ModelCapabilities, ProviderType from ..shared import ModelCapabilities, ProviderType
from .base import CAPABILITY_FIELD_NAMES, CapabilityModelRegistry from .base import CAPABILITY_FIELD_NAMES, CapabilityModelRegistry
class OpenRouterModelRegistry(CapabilityModelRegistry): logger = logging.getLogger(__name__)
"""Capability registry backed by ``conf/openrouter_models.json``."""
class OpenRouterModelRegistry(CapabilityModelRegistry):
LIVE_ENV_VAR_NAME = "OPENROUTER_LIVE_MODELS_CONFIG_PATH"
LIVE_DEFAULT_FILENAME = "openrouter_models_live.json"
def __init__(self, config_path: str | None = None, live_config_path: str | None = None) -> None:
self._live_resource = None
self._live_config_path: Path | None = None
self._live_default_path = Path(__file__).resolve().parents[3] / "conf" / self.LIVE_DEFAULT_FILENAME
if live_config_path:
self._live_config_path = Path(live_config_path)
else:
env_path = get_env(self.LIVE_ENV_VAR_NAME)
if env_path:
self._live_config_path = Path(env_path)
else:
try:
resource = importlib.resources.files("conf").joinpath(self.LIVE_DEFAULT_FILENAME)
if hasattr(resource, "read_text"):
self._live_resource = resource
else:
raise AttributeError("resource accessor not available")
except Exception:
self._live_config_path = self._live_default_path
def __init__(self, config_path: str | None = None) -> None:
super().__init__( super().__init__(
env_var_name="OPENROUTER_MODELS_CONFIG_PATH", env_var_name="OPENROUTER_MODELS_CONFIG_PATH",
default_filename="openrouter_models.json", default_filename="openrouter_models.json",
@@ -18,6 +50,60 @@ class OpenRouterModelRegistry(CapabilityModelRegistry):
config_path=config_path, config_path=config_path,
) )
def reload(self) -> None:
live_data = self._load_live_config_data()
curated_data = self._load_config_data()
merged_data = self._merge_manifest_data(live_data, curated_data)
self._extras = {}
configs = [config for config in self._parse_models(merged_data) if config is not None]
self._build_maps(configs)
def _load_live_config_data(self) -> dict:
if self._live_resource is not None:
try:
if hasattr(self._live_resource, "read_text"):
config_text = self._live_resource.read_text(encoding="utf-8")
else:
with self._live_resource.open("r", encoding="utf-8") as handle:
config_text = handle.read()
data = json.loads(config_text)
except FileNotFoundError:
logger.debug("Packaged %s not found", self.LIVE_DEFAULT_FILENAME)
return {"models": []}
except Exception as exc:
logger.warning("Failed to read packaged %s: %s", self.LIVE_DEFAULT_FILENAME, exc)
return {"models": []}
return data or {"models": []}
if not self._live_config_path:
return {"models": []}
if not self._live_config_path.exists():
logger.debug("OpenRouter live registry config not found at %s", self._live_config_path)
return {"models": []}
data = read_json_file(str(self._live_config_path))
return data or {"models": []}
@staticmethod
def _merge_manifest_data(live_data: dict, curated_data: dict) -> dict:
merged_models: dict[str, dict] = {}
for source in (live_data, curated_data):
for raw in source.get("models", []):
if not isinstance(raw, dict):
continue
model_name = raw.get("model_name")
if not model_name:
continue
existing = merged_models.get(model_name, {})
merged_models[model_name] = {**existing, **dict(raw)}
return {"models": list(merged_models.values())}
def _finalise_entry(self, entry: dict) -> tuple[ModelCapabilities, dict]: def _finalise_entry(self, entry: dict) -> tuple[ModelCapabilities, dict]:
provider_override = entry.get("provider") provider_override = entry.get("provider")
if isinstance(provider_override, str): if isinstance(provider_override, str):

View File

@@ -0,0 +1,155 @@
#!/usr/bin/env python3
import argparse
import json
import sys
from pathlib import Path
from urllib.error import HTTPError, URLError
from urllib.request import Request, urlopen
from providers.shared.temperature import TemperatureConstraint
ROOT = Path(__file__).resolve().parents[1]
DEFAULT_OUTPUT = ROOT / "conf" / "openrouter_models_live.json"
OPENROUTER_MODELS_URL = "https://openrouter.ai/api/v1/models"
def fetch_openrouter_models(url: str) -> dict:
request = Request(
url,
headers={
"Accept": "application/json",
"User-Agent": "pal-mcp-server/openrouter-model-sync",
},
)
with urlopen(request, timeout=30) as response:
charset = response.headers.get_content_charset("utf-8")
payload = response.read().decode(charset)
data = json.loads(payload)
if not isinstance(data, dict):
raise ValueError("OpenRouter models payload must be a JSON object")
return data
def _supports_parameter(model_data: dict, parameter: str) -> bool:
supported = model_data.get("supported_parameters")
return isinstance(supported, list) and parameter in supported
def _input_modalities(model_data: dict) -> list[str]:
architecture = model_data.get("architecture")
if not isinstance(architecture, dict):
return []
modalities = architecture.get("input_modalities")
if not isinstance(modalities, list):
return []
return [str(item) for item in modalities]
def _infer_temperature_fields(model_name: str, model_data: dict) -> tuple[bool, str]:
if _supports_parameter(model_data, "temperature"):
return True, "range"
supports_temperature, _constraint, _reason = TemperatureConstraint.resolve_settings(model_name)
return supports_temperature, "fixed" if not supports_temperature else "range"
def convert_model(model_data: dict) -> dict | None:
model_name = model_data.get("id")
if not isinstance(model_name, str) or not model_name:
return None
modalities = _input_modalities(model_data)
supports_temperature, temperature_constraint = _infer_temperature_fields(model_name, model_data)
context_window = model_data.get("context_length") or 0
top_provider = model_data.get("top_provider")
if not isinstance(top_provider, dict):
top_provider = {}
max_output_tokens = top_provider.get("max_completion_tokens") or 0
return {
"model_name": model_name,
"aliases": [],
"context_window": int(context_window) if context_window else 0,
"max_output_tokens": int(max_output_tokens) if max_output_tokens else 0,
"supports_extended_thinking": _supports_parameter(model_data, "reasoning")
or _supports_parameter(model_data, "include_reasoning"),
"supports_json_mode": _supports_parameter(model_data, "response_format")
or _supports_parameter(model_data, "structured_outputs"),
"supports_function_calling": _supports_parameter(model_data, "tools"),
"supports_images": "image" in modalities,
"max_image_size_mb": 20.0 if "image" in modalities else 0.0,
"supports_temperature": supports_temperature,
"temperature_constraint": temperature_constraint,
"description": model_data.get("description") or model_data.get("name") or "",
"intelligence_score": 10,
"allow_code_generation": False,
}
def build_output_document(source: dict, source_url: str) -> dict:
models = []
for model_data in source.get("data", []):
if not isinstance(model_data, dict):
continue
converted = convert_model(model_data)
if converted:
models.append(converted)
models.sort(key=lambda item: item["model_name"])
return {
"_README": {
"description": "Generated baseline OpenRouter catalogue for PAL MCP Server.",
"source": source_url,
"usage": "Generated by scripts/sync_openrouter_models.py. Curated overrides belong in conf/openrouter_models.json.",
"field_notes": "Entries are conservative and intended as discovery data only. Curated manifest values override these at runtime.",
},
"models": models,
}
def write_output(path: Path, document: dict) -> None:
path.parent.mkdir(parents=True, exist_ok=True)
with path.open("w", encoding="utf-8", newline="\n") as handle:
json.dump(document, handle, indent=2, ensure_ascii=False)
handle.write("\n")
def parse_args() -> argparse.Namespace:
parser = argparse.ArgumentParser(description="Sync OpenRouter live model catalogue into PAL config.")
parser.add_argument("--url", default=OPENROUTER_MODELS_URL, help="OpenRouter models endpoint")
parser.add_argument(
"--output",
default=str(DEFAULT_OUTPUT),
help="Path to the generated live OpenRouter manifest",
)
return parser.parse_args()
def main() -> int:
args = parse_args()
output_path = Path(args.output)
try:
source = fetch_openrouter_models(args.url)
document = build_output_document(source, args.url)
write_output(output_path, document)
except (HTTPError, URLError, TimeoutError, ValueError, json.JSONDecodeError) as exc:
print(f"Failed to sync OpenRouter models: {exc}", file=sys.stderr)
return 1
print(f"Wrote {len(document['models'])} OpenRouter models to {output_path}")
return 0
if __name__ == "__main__":
raise SystemExit(main())

View File

@@ -3,6 +3,7 @@
import json import json
import os import os
import tempfile import tempfile
from pathlib import Path
from unittest.mock import patch from unittest.mock import patch
import pytest import pytest
@@ -22,6 +23,15 @@ class TestOpenRouterModelRegistry:
assert len(registry.list_models()) > 0 assert len(registry.list_models()) > 0
assert len(registry.list_aliases()) > 0 assert len(registry.list_aliases()) > 0
def test_default_init_resolves_live_only_model(self):
registry = OpenRouterModelRegistry()
config = registry.resolve("x-ai/grok-4")
assert config is not None
assert config.model_name == "x-ai/grok-4"
assert config.context_window == 256000
assert config.supports_extended_thinking is True
def test_custom_config_path(self): def test_custom_config_path(self):
"""Test registry with custom config path.""" """Test registry with custom config path."""
# Create temporary config # Create temporary config
@@ -42,14 +52,14 @@ class TestOpenRouterModelRegistry:
try: try:
registry = OpenRouterModelRegistry(config_path=temp_path) registry = OpenRouterModelRegistry(config_path=temp_path)
assert len(registry.list_models()) == 1
assert "test/model-1" in registry.list_models() assert "test/model-1" in registry.list_models()
assert "test1" in registry.list_aliases() assert "test1" in registry.list_aliases()
assert "t1" in registry.list_aliases() assert "t1" in registry.list_aliases()
assert registry.resolve("x-ai/grok-4") is not None
finally: finally:
os.unlink(temp_path) os.unlink(temp_path)
def test_environment_variable_override(self): def test_environment_variable_override(self, monkeypatch):
"""Test OPENROUTER_MODELS_CONFIG_PATH environment variable.""" """Test OPENROUTER_MODELS_CONFIG_PATH environment variable."""
# Create custom config # Create custom config
config_data = { config_data = {
@@ -64,8 +74,7 @@ class TestOpenRouterModelRegistry:
try: try:
# Set environment variable # Set environment variable
original_env = os.environ.get("OPENROUTER_MODELS_CONFIG_PATH") monkeypatch.setenv("OPENROUTER_MODELS_CONFIG_PATH", temp_path)
os.environ["OPENROUTER_MODELS_CONFIG_PATH"] = temp_path
# Create registry without explicit path # Create registry without explicit path
registry = OpenRouterModelRegistry() registry = OpenRouterModelRegistry()
@@ -75,11 +84,6 @@ class TestOpenRouterModelRegistry:
assert "envtest" in registry.list_aliases() assert "envtest" in registry.list_aliases()
finally: finally:
# Restore environment
if original_env is not None:
os.environ["OPENROUTER_MODELS_CONFIG_PATH"] = original_env
else:
del os.environ["OPENROUTER_MODELS_CONFIG_PATH"]
os.unlink(temp_path) os.unlink(temp_path)
def test_alias_resolution(self): def test_alias_resolution(self):
@@ -195,9 +199,8 @@ class TestOpenRouterModelRegistry:
with patch.dict("os.environ", {}, clear=True): with patch.dict("os.environ", {}, clear=True):
registry = OpenRouterModelRegistry(config_path="/non/existent/path.json") registry = OpenRouterModelRegistry(config_path="/non/existent/path.json")
# Should initialize with empty maps assert len(registry.list_models()) > 0
assert len(registry.list_models()) == 0 assert registry.resolve("x-ai/grok-4") is not None
assert len(registry.list_aliases()) == 0
assert registry.resolve("anything") is None assert registry.resolve("anything") is None
def test_invalid_json_config(self): def test_invalid_json_config(self):
@@ -208,12 +211,166 @@ class TestOpenRouterModelRegistry:
try: try:
registry = OpenRouterModelRegistry(config_path=temp_path) registry = OpenRouterModelRegistry(config_path=temp_path)
# Should handle gracefully and initialize empty assert len(registry.list_models()) > 0
assert len(registry.list_models()) == 0 assert registry.resolve("x-ai/grok-4") is not None
assert len(registry.list_aliases()) == 0
finally: finally:
os.unlink(temp_path) os.unlink(temp_path)
def test_live_catalogue_adds_unsynced_model_ids(self):
curated_data = {
"models": [
{
"model_name": "openai/gpt-5.2",
"aliases": ["gpt5.2"],
"context_window": 400000,
"max_output_tokens": 128000,
}
]
}
live_data = {
"models": [
{
"model_name": "openai/gpt-5.4",
"aliases": [],
"context_window": 400000,
"max_output_tokens": 128000,
"supports_extended_thinking": True,
"supports_json_mode": True,
"supports_function_calling": False,
"supports_images": True,
"max_image_size_mb": 20.0,
"supports_temperature": True,
"temperature_constraint": "range",
"description": "Live-only GPT-5.4 entry",
}
]
}
with tempfile.NamedTemporaryFile(mode="w", suffix=".json", delete=False) as curated_file:
json.dump(curated_data, curated_file)
curated_path = curated_file.name
with tempfile.NamedTemporaryFile(mode="w", suffix=".json", delete=False) as live_file:
json.dump(live_data, live_file)
live_path = live_file.name
try:
registry = OpenRouterModelRegistry(config_path=curated_path, live_config_path=live_path)
assert "openai/gpt-5.4" in registry.list_models()
caps = registry.resolve("openai/gpt-5.4")
assert caps is not None
assert caps.description == "Live-only GPT-5.4 entry"
finally:
os.unlink(curated_path)
os.unlink(live_path)
def test_curated_manifest_overrides_live_metadata(self):
curated_data = {
"models": [
{
"model_name": "openai/gpt-5.4",
"aliases": ["gpt5.4"],
"context_window": 400000,
"max_output_tokens": 128000,
"supports_extended_thinking": True,
"supports_json_mode": True,
"supports_function_calling": True,
"supports_images": True,
"max_image_size_mb": 20.0,
"supports_temperature": False,
"temperature_constraint": "fixed",
"description": "Curated override",
"intelligence_score": 18,
"allow_code_generation": True,
"use_openai_response_api": True,
}
]
}
live_data = {
"models": [
{
"model_name": "openai/gpt-5.4",
"aliases": [],
"context_window": 1234,
"max_output_tokens": 5678,
"supports_extended_thinking": False,
"supports_json_mode": False,
"supports_function_calling": False,
"supports_images": False,
"max_image_size_mb": 0.0,
"supports_temperature": True,
"temperature_constraint": "range",
"description": "Live baseline",
"intelligence_score": 10,
"allow_code_generation": False,
"use_openai_response_api": False,
}
]
}
with tempfile.NamedTemporaryFile(mode="w", suffix=".json", delete=False) as curated_file:
json.dump(curated_data, curated_file)
curated_path = curated_file.name
with tempfile.NamedTemporaryFile(mode="w", suffix=".json", delete=False) as live_file:
json.dump(live_data, live_file)
live_path = live_file.name
try:
registry = OpenRouterModelRegistry(config_path=curated_path, live_config_path=live_path)
caps = registry.resolve("gpt5.4")
assert caps is not None
assert caps.model_name == "openai/gpt-5.4"
assert caps.description == "Curated override"
assert caps.context_window == 400000
assert caps.max_output_tokens == 128000
assert caps.supports_function_calling is True
assert caps.supports_temperature is False
assert caps.allow_code_generation is True
assert caps.use_openai_response_api is True
finally:
os.unlink(curated_path)
os.unlink(live_path)
def test_missing_live_catalogue_keeps_curated_models_working(self, monkeypatch):
missing_live_path = Path(tempfile.gettempdir()) / "pal-missing-openrouter-live.json"
if missing_live_path.exists():
missing_live_path.unlink()
monkeypatch.setenv("OPENROUTER_LIVE_MODELS_CONFIG_PATH", str(missing_live_path))
registry = OpenRouterModelRegistry()
assert "openai/o3" in registry.list_models()
assert registry.resolve("o3") is not None
def test_invalid_live_json_keeps_curated_models_working(self):
curated_data = {
"models": [
{
"model_name": "openai/gpt-5.2",
"aliases": ["gpt5.2"],
"context_window": 400000,
"max_output_tokens": 128000,
}
]
}
with tempfile.NamedTemporaryFile(mode="w", suffix=".json", delete=False) as curated_file:
json.dump(curated_data, curated_file)
curated_path = curated_file.name
with tempfile.NamedTemporaryFile(mode="w", suffix=".json", delete=False) as live_file:
live_file.write("{ invalid json }")
live_path = live_file.name
try:
registry = OpenRouterModelRegistry(config_path=curated_path, live_config_path=live_path)
assert "openai/gpt-5.2" in registry.list_models()
assert registry.resolve("gpt5.2") is not None
finally:
os.unlink(curated_path)
os.unlink(live_path)
def test_model_with_all_capabilities(self): def test_model_with_all_capabilities(self):
"""Test model with all capability flags.""" """Test model with all capability flags."""
from providers.shared import TemperatureConstraint from providers.shared import TemperatureConstraint

View File

@@ -0,0 +1,53 @@
import importlib.util
from pathlib import Path
SCRIPT_PATH = Path(__file__).resolve().parents[1] / "scripts" / "sync_openrouter_models.py"
SPEC = importlib.util.spec_from_file_location("sync_openrouter_models", SCRIPT_PATH)
assert SPEC is not None and SPEC.loader is not None
MODULE = importlib.util.module_from_spec(SPEC)
SPEC.loader.exec_module(MODULE)
def test_convert_model_maps_openrouter_payload_conservatively():
converted = MODULE.convert_model(
{
"id": "openai/gpt-5.4",
"description": "GPT-5.4 description",
"context_length": 400000,
"top_provider": {"max_completion_tokens": 128000},
"architecture": {"input_modalities": ["text", "image"]},
"supported_parameters": ["temperature", "reasoning", "response_format", "structured_outputs"],
}
)
assert converted is not None
assert converted["model_name"] == "openai/gpt-5.4"
assert converted["context_window"] == 400000
assert converted["max_output_tokens"] == 128000
assert converted["supports_extended_thinking"] is True
assert converted["supports_json_mode"] is True
assert converted["supports_function_calling"] is False
assert converted["supports_images"] is True
assert converted["max_image_size_mb"] == 20.0
assert converted["supports_temperature"] is True
assert converted["temperature_constraint"] == "range"
assert converted["allow_code_generation"] is False
def test_convert_model_marks_reasoning_models_without_temperature_as_fixed():
converted = MODULE.convert_model(
{
"id": "openai/o3",
"description": "o3 description",
"context_length": 200000,
"top_provider": {},
"architecture": {"input_modalities": ["text"]},
"supported_parameters": ["reasoning"],
}
)
assert converted is not None
assert converted["supports_temperature"] is False
assert converted["temperature_constraint"] == "fixed"
assert converted["supports_images"] is False