refactor: removed method from provider, should use model capabilities instead

refactor: cleanup temperature factory method
This commit is contained in:
Fahad
2025-10-02 11:08:56 +04:00
parent 9c11ecc4bf
commit a254ff2220
25 changed files with 105 additions and 224 deletions

View File

@@ -12,12 +12,7 @@ from google import genai
from google.genai import types
from .base import ModelProvider
from .shared import (
ModelCapabilities,
ModelResponse,
ProviderType,
create_temperature_constraint,
)
from .shared import ModelCapabilities, ModelResponse, ProviderType, TemperatureConstraint
logger = logging.getLogger(__name__)
@@ -46,7 +41,7 @@ class GeminiModelProvider(ModelProvider):
supports_images=True, # Vision capability
max_image_size_mb=32.0, # Higher limit for Pro model
supports_temperature=True,
temperature_constraint=create_temperature_constraint("range"),
temperature_constraint=TemperatureConstraint.create("range"),
max_thinking_tokens=32768, # Max thinking tokens for Pro model
description="Deep reasoning + thinking mode (1M context) - Complex problems, architecture, deep analysis",
aliases=["pro", "gemini pro", "gemini-pro"],
@@ -65,7 +60,7 @@ class GeminiModelProvider(ModelProvider):
supports_images=True, # Vision capability
max_image_size_mb=20.0, # Conservative 20MB limit for reliability
supports_temperature=True,
temperature_constraint=create_temperature_constraint("range"),
temperature_constraint=TemperatureConstraint.create("range"),
max_thinking_tokens=24576, # Same as 2.5 flash for consistency
description="Gemini 2.0 Flash (1M context) - Latest fast model with experimental thinking, supports audio/video input",
aliases=["flash-2.0", "flash2"],
@@ -84,7 +79,7 @@ class GeminiModelProvider(ModelProvider):
supports_images=False, # Does not support images
max_image_size_mb=0.0, # No image support
supports_temperature=True,
temperature_constraint=create_temperature_constraint("range"),
temperature_constraint=TemperatureConstraint.create("range"),
description="Gemini 2.0 Flash Lite (1M context) - Lightweight fast model, text-only",
aliases=["flashlite", "flash-lite"],
),
@@ -102,7 +97,7 @@ class GeminiModelProvider(ModelProvider):
supports_images=True, # Vision capability
max_image_size_mb=20.0, # Conservative 20MB limit for reliability
supports_temperature=True,
temperature_constraint=create_temperature_constraint("range"),
temperature_constraint=TemperatureConstraint.create("range"),
max_thinking_tokens=24576, # Flash 2.5 thinking budget limit
description="Ultra-fast (1M context) - Quick analysis, simple queries, rapid iterations",
aliases=["flash", "flash2.5"],
@@ -397,11 +392,6 @@ class GeminiModelProvider(ModelProvider):
return True
def supports_thinking_mode(self, model_name: str) -> bool:
"""Check if the model supports extended thinking mode."""
capabilities = self.get_capabilities(model_name)
return capabilities.supports_extended_thinking
def get_thinking_budget(self, model_name: str, thinking_mode: str) -> int:
"""Get actual thinking token budget for a model and thinking mode."""
resolved_name = self._resolve_model_name(model_name)