feat: added intelligence_score to the model capabilities schema; a 1-20 number that can be specified to influence the sort order of models presented to the CLI in auto selection mode
fix: model definition re-introduced into the schema but intelligently and only a summary is generated per tool. Required to ensure CLI calls and uses the correct model fix: removed `model` param from some tools where this wasn't needed fix: fixed adherence to `*_ALLOWED_MODELS` by advertising only the allowed models to the CLI fix: removed duplicates across providers when passing canonical names back to the CLI; the first enabled provider wins
This commit is contained in:
@@ -1,5 +1,6 @@
|
||||
"""Dataclass describing the feature set of a model exposed by a provider."""
|
||||
|
||||
import math
|
||||
from dataclasses import dataclass, field
|
||||
from typing import Optional
|
||||
|
||||
@@ -32,6 +33,7 @@ class ModelCapabilities:
|
||||
provider: ProviderType
|
||||
model_name: str
|
||||
friendly_name: str
|
||||
intelligence_score: int = 10 # Human-curated 1–20 score reflecting general capability
|
||||
description: str = ""
|
||||
aliases: list[str] = field(default_factory=list)
|
||||
|
||||
@@ -69,6 +71,42 @@ class ModelCapabilities:
|
||||
|
||||
return self.temperature_constraint.get_corrected_value(requested_temperature)
|
||||
|
||||
def get_effective_capability_rank(self) -> int:
|
||||
"""Calculate the runtime capability rank from intelligence + capabilities."""
|
||||
|
||||
# Human signal drives the baseline (1–20 → 5–100 after scaling)
|
||||
base_intelligence = self.intelligence_score if self.intelligence_score else 10
|
||||
base_intelligence = max(1, min(20, base_intelligence))
|
||||
score = base_intelligence * 5
|
||||
|
||||
# Context window bonus with gentle diminishing returns
|
||||
ctx_bonus = 0
|
||||
ctx = max(self.context_window, 0)
|
||||
if ctx > 0:
|
||||
ctx_bonus = int(min(5, max(0.0, math.log10(ctx) - 3)))
|
||||
score += ctx_bonus
|
||||
|
||||
# Output token capacity adds a small bonus
|
||||
if self.max_output_tokens >= 65_000:
|
||||
score += 2
|
||||
elif self.max_output_tokens >= 32_000:
|
||||
score += 1
|
||||
|
||||
# Feature-level boosts
|
||||
if self.supports_extended_thinking:
|
||||
score += 3
|
||||
if self.supports_function_calling:
|
||||
score += 1
|
||||
if self.supports_json_mode:
|
||||
score += 1
|
||||
if self.supports_images:
|
||||
score += 1
|
||||
|
||||
if self.is_custom:
|
||||
score -= 1
|
||||
|
||||
return max(0, min(100, score))
|
||||
|
||||
@staticmethod
|
||||
def collect_aliases(model_configs: dict[str, "ModelCapabilities"]) -> dict[str, list[str]]:
|
||||
"""Build a mapping of model name to aliases from capability configs."""
|
||||
@@ -112,7 +150,13 @@ class ModelCapabilities:
|
||||
|
||||
formatted_names.append(formatted)
|
||||
|
||||
for base_model, capabilities in model_configs.items():
|
||||
# Sort models by capability rank (descending) then by name for deterministic ordering
|
||||
sorted_items = sorted(
|
||||
model_configs.items(),
|
||||
key=lambda item: (-item[1].get_effective_capability_rank(), item[0]),
|
||||
)
|
||||
|
||||
for base_model, capabilities in sorted_items:
|
||||
append_name(base_model)
|
||||
|
||||
if include_aliases and capabilities.aliases:
|
||||
|
||||
Reference in New Issue
Block a user