156 lines
5.4 KiB
Python
156 lines
5.4 KiB
Python
#!/usr/bin/env python3
|
|
|
|
import argparse
|
|
import json
|
|
import sys
|
|
from pathlib import Path
|
|
from urllib.error import HTTPError, URLError
|
|
from urllib.request import Request, urlopen
|
|
|
|
from providers.shared.temperature import TemperatureConstraint
|
|
|
|
|
|
ROOT = Path(__file__).resolve().parents[1]
|
|
DEFAULT_OUTPUT = ROOT / "conf" / "openrouter_models_live.json"
|
|
OPENROUTER_MODELS_URL = "https://openrouter.ai/api/v1/models"
|
|
|
|
|
|
def fetch_openrouter_models(url: str) -> dict:
|
|
request = Request(
|
|
url,
|
|
headers={
|
|
"Accept": "application/json",
|
|
"User-Agent": "pal-mcp-server/openrouter-model-sync",
|
|
},
|
|
)
|
|
|
|
with urlopen(request, timeout=30) as response:
|
|
charset = response.headers.get_content_charset("utf-8")
|
|
payload = response.read().decode(charset)
|
|
|
|
data = json.loads(payload)
|
|
if not isinstance(data, dict):
|
|
raise ValueError("OpenRouter models payload must be a JSON object")
|
|
return data
|
|
|
|
|
|
def _supports_parameter(model_data: dict, parameter: str) -> bool:
|
|
supported = model_data.get("supported_parameters")
|
|
return isinstance(supported, list) and parameter in supported
|
|
|
|
|
|
def _input_modalities(model_data: dict) -> list[str]:
|
|
architecture = model_data.get("architecture")
|
|
if not isinstance(architecture, dict):
|
|
return []
|
|
|
|
modalities = architecture.get("input_modalities")
|
|
if not isinstance(modalities, list):
|
|
return []
|
|
|
|
return [str(item) for item in modalities]
|
|
|
|
|
|
def _infer_temperature_fields(model_name: str, model_data: dict) -> tuple[bool, str]:
|
|
if _supports_parameter(model_data, "temperature"):
|
|
return True, "range"
|
|
|
|
supports_temperature, _constraint, _reason = TemperatureConstraint.resolve_settings(model_name)
|
|
return supports_temperature, "fixed" if not supports_temperature else "range"
|
|
|
|
|
|
def convert_model(model_data: dict) -> dict | None:
|
|
model_name = model_data.get("id")
|
|
if not isinstance(model_name, str) or not model_name:
|
|
return None
|
|
|
|
modalities = _input_modalities(model_data)
|
|
supports_temperature, temperature_constraint = _infer_temperature_fields(model_name, model_data)
|
|
|
|
context_window = model_data.get("context_length") or 0
|
|
top_provider = model_data.get("top_provider")
|
|
if not isinstance(top_provider, dict):
|
|
top_provider = {}
|
|
|
|
max_output_tokens = top_provider.get("max_completion_tokens") or 0
|
|
|
|
return {
|
|
"model_name": model_name,
|
|
"aliases": [],
|
|
"context_window": int(context_window) if context_window else 0,
|
|
"max_output_tokens": int(max_output_tokens) if max_output_tokens else 0,
|
|
"supports_extended_thinking": _supports_parameter(model_data, "reasoning")
|
|
or _supports_parameter(model_data, "include_reasoning"),
|
|
"supports_json_mode": _supports_parameter(model_data, "response_format")
|
|
or _supports_parameter(model_data, "structured_outputs"),
|
|
"supports_function_calling": _supports_parameter(model_data, "tools"),
|
|
"supports_images": "image" in modalities,
|
|
"max_image_size_mb": 20.0 if "image" in modalities else 0.0,
|
|
"supports_temperature": supports_temperature,
|
|
"temperature_constraint": temperature_constraint,
|
|
"description": model_data.get("description") or model_data.get("name") or "",
|
|
"intelligence_score": 10,
|
|
"allow_code_generation": False,
|
|
}
|
|
|
|
|
|
def build_output_document(source: dict, source_url: str) -> dict:
|
|
models = []
|
|
for model_data in source.get("data", []):
|
|
if not isinstance(model_data, dict):
|
|
continue
|
|
|
|
converted = convert_model(model_data)
|
|
if converted:
|
|
models.append(converted)
|
|
|
|
models.sort(key=lambda item: item["model_name"])
|
|
|
|
return {
|
|
"_README": {
|
|
"description": "Generated baseline OpenRouter catalogue for PAL MCP Server.",
|
|
"source": source_url,
|
|
"usage": "Generated by scripts/sync_openrouter_models.py. Curated overrides belong in conf/openrouter_models.json.",
|
|
"field_notes": "Entries are conservative and intended as discovery data only. Curated manifest values override these at runtime.",
|
|
},
|
|
"models": models,
|
|
}
|
|
|
|
|
|
def write_output(path: Path, document: dict) -> None:
|
|
path.parent.mkdir(parents=True, exist_ok=True)
|
|
with path.open("w", encoding="utf-8", newline="\n") as handle:
|
|
json.dump(document, handle, indent=2, ensure_ascii=False)
|
|
handle.write("\n")
|
|
|
|
|
|
def parse_args() -> argparse.Namespace:
|
|
parser = argparse.ArgumentParser(description="Sync OpenRouter live model catalogue into PAL config.")
|
|
parser.add_argument("--url", default=OPENROUTER_MODELS_URL, help="OpenRouter models endpoint")
|
|
parser.add_argument(
|
|
"--output",
|
|
default=str(DEFAULT_OUTPUT),
|
|
help="Path to the generated live OpenRouter manifest",
|
|
)
|
|
return parser.parse_args()
|
|
|
|
|
|
def main() -> int:
|
|
args = parse_args()
|
|
output_path = Path(args.output)
|
|
|
|
try:
|
|
source = fetch_openrouter_models(args.url)
|
|
document = build_output_document(source, args.url)
|
|
write_output(output_path, document)
|
|
except (HTTPError, URLError, TimeoutError, ValueError, json.JSONDecodeError) as exc:
|
|
print(f"Failed to sync OpenRouter models: {exc}", file=sys.stderr)
|
|
return 1
|
|
|
|
print(f"Wrote {len(document['models'])} OpenRouter models to {output_path}")
|
|
return 0
|
|
|
|
|
|
if __name__ == "__main__":
|
|
raise SystemExit(main())
|