Use consistent terminology

This commit is contained in:
Fahad
2025-06-13 09:06:12 +04:00
parent e2762c4ed0
commit b16f85979b
13 changed files with 38 additions and 52 deletions

View File

@@ -105,7 +105,7 @@ class ModelCapabilities:
provider: ProviderType
model_name: str
friendly_name: str # Human-friendly name like "Gemini" or "OpenAI"
max_tokens: int
context_window: int # Total context window size in tokens
supports_extended_thinking: bool = False
supports_system_prompts: bool = True
supports_streaming: bool = True

View File

@@ -14,12 +14,12 @@ class GeminiModelProvider(ModelProvider):
# Model configurations
SUPPORTED_MODELS = {
"gemini-2.5-flash-preview-05-20": {
"max_tokens": 1_048_576, # 1M tokens
"context_window": 1_048_576, # 1M tokens
"supports_extended_thinking": True,
"max_thinking_tokens": 24576, # Flash 2.5 thinking budget limit
},
"gemini-2.5-pro-preview-06-05": {
"max_tokens": 1_048_576, # 1M tokens
"context_window": 1_048_576, # 1M tokens
"supports_extended_thinking": True,
"max_thinking_tokens": 32768, # Pro 2.5 thinking budget limit
},
@@ -68,7 +68,7 @@ class GeminiModelProvider(ModelProvider):
provider=ProviderType.GOOGLE,
model_name=resolved_name,
friendly_name="Gemini",
max_tokens=config["max_tokens"],
context_window=config["context_window"],
supports_extended_thinking=config["supports_extended_thinking"],
supports_system_prompts=True,
supports_streaming=True,

View File

@@ -15,11 +15,11 @@ class OpenAIModelProvider(OpenAICompatibleProvider):
# Model configurations
SUPPORTED_MODELS = {
"o3": {
"max_tokens": 200_000, # 200K tokens
"context_window": 200_000, # 200K tokens
"supports_extended_thinking": False,
},
"o3-mini": {
"max_tokens": 200_000, # 200K tokens
"context_window": 200_000, # 200K tokens
"supports_extended_thinking": False,
},
}
@@ -49,7 +49,7 @@ class OpenAIModelProvider(OpenAICompatibleProvider):
provider=ProviderType.OPENAI,
model_name=model_name,
friendly_name="OpenAI",
max_tokens=config["max_tokens"],
context_window=config["context_window"],
supports_extended_thinking=config["supports_extended_thinking"],
supports_system_prompts=True,
supports_streaming=True,

View File

@@ -109,7 +109,7 @@ class OpenRouterProvider(OpenAICompatibleProvider):
provider=ProviderType.OPENROUTER,
model_name=resolved_name,
friendly_name=self.FRIENDLY_NAME,
max_tokens=32_768, # Conservative default context window
context_window=32_768, # Conservative default context window
supports_extended_thinking=False,
supports_system_prompts=True,
supports_streaming=True,

View File

@@ -30,7 +30,7 @@ class OpenRouterModelConfig:
provider=ProviderType.OPENROUTER,
model_name=self.model_name,
friendly_name="OpenRouter",
max_tokens=self.context_window, # ModelCapabilities still uses max_tokens
context_window=self.context_window,
supports_extended_thinking=self.supports_extended_thinking,
supports_system_prompts=self.supports_system_prompts,
supports_streaming=self.supports_streaming,
@@ -103,10 +103,6 @@ class OpenRouterModelRegistry:
# Parse models
configs = []
for model_data in data.get("models", []):
# Handle backwards compatibility - rename max_tokens to context_window
if "max_tokens" in model_data and "context_window" not in model_data:
model_data["context_window"] = model_data.pop("max_tokens")
config = OpenRouterModelConfig(**model_data)
configs.append(config)