Files
my-pal-mcp-server/providers/shared/model_capabilities.py
Fahad 6d237d0970 refactor: moved temperature method from base provider to model capabilities
refactor: model listing cleanup, moved logic to model_capabilities.py
docs: added AGENTS.md for onboarding Codex
2025-10-02 10:25:41 +04:00

123 lines
4.2 KiB
Python

"""Dataclass describing the feature set of a model exposed by a provider."""
from dataclasses import dataclass, field
from typing import Optional
from .provider_type import ProviderType
from .temperature import RangeTemperatureConstraint, TemperatureConstraint
__all__ = ["ModelCapabilities"]
@dataclass
class ModelCapabilities:
"""Static description of what a model can do within a provider.
Role
Acts as the canonical record for everything the server needs to know
about a model—its provider, token limits, feature switches, aliases,
and temperature rules. Providers populate these objects so tools and
higher-level services can rely on a consistent schema.
Typical usage
* Provider subclasses declare `MODEL_CAPABILITIES` maps containing these
objects (for example ``OpenAIModelProvider``)
* Helper utilities (e.g. restriction validation, alias expansion) read
these objects to build model lists for tooling and policy enforcement
* Tool selection logic inspects attributes such as
``supports_extended_thinking`` or ``context_window`` to choose an
appropriate model for a task.
"""
provider: ProviderType
model_name: str
friendly_name: str
description: str = ""
aliases: list[str] = field(default_factory=list)
# Capacity limits / resource budgets
context_window: int = 0
max_output_tokens: int = 0
max_thinking_tokens: int = 0
# Capability flags
supports_extended_thinking: bool = False
supports_system_prompts: bool = True
supports_streaming: bool = True
supports_function_calling: bool = False
supports_images: bool = False
supports_json_mode: bool = False
supports_temperature: bool = True
# Additional attributes
max_image_size_mb: float = 0.0
is_custom: bool = False
temperature_constraint: TemperatureConstraint = field(
default_factory=lambda: RangeTemperatureConstraint(0.0, 2.0, 0.3)
)
def get_effective_temperature(self, requested_temperature: float) -> Optional[float]:
"""Return the temperature that should be sent to the provider.
Models that do not support temperature return ``None`` so that callers
can omit the parameter entirely. For supported models, the configured
constraint clamps the requested value into a provider-safe range.
"""
if not self.supports_temperature:
return None
return self.temperature_constraint.get_corrected_value(requested_temperature)
@staticmethod
def collect_aliases(model_configs: dict[str, "ModelCapabilities"]) -> dict[str, list[str]]:
"""Build a mapping of model name to aliases from capability configs."""
return {
base_model: capabilities.aliases
for base_model, capabilities in model_configs.items()
if capabilities.aliases
}
@staticmethod
def collect_model_names(
model_configs: dict[str, "ModelCapabilities"],
*,
include_aliases: bool = True,
lowercase: bool = False,
unique: bool = False,
) -> list[str]:
"""Build an ordered list of model names and aliases.
Args:
model_configs: Mapping of canonical model names to capabilities.
include_aliases: When True, include aliases for each model.
lowercase: When True, normalize names to lowercase.
unique: When True, ensure each returned name appears once (after formatting).
Returns:
Ordered list of model names (and optionally aliases) formatted per options.
"""
formatted_names: list[str] = []
seen: set[str] | None = set() if unique else None
def append_name(name: str) -> None:
formatted = name.lower() if lowercase else name
if seen is not None:
if formatted in seen:
return
seen.add(formatted)
formatted_names.append(formatted)
for base_model, capabilities in model_configs.items():
append_name(base_model)
if include_aliases and capabilities.aliases:
for alias in capabilities.aliases:
append_name(alias)
return formatted_names