Gemini model rename

This commit is contained in:
Fahad
2025-06-19 05:37:40 +04:00
parent b6ad76b39a
commit d0da6ce9e4
37 changed files with 187 additions and 187 deletions

View File

@@ -19,14 +19,14 @@ class GeminiModelProvider(ModelProvider):
# Model configurations
SUPPORTED_MODELS = {
"gemini-2.5-flash-preview-05-20": {
"gemini-2.5-flash": {
"context_window": 1_048_576, # 1M tokens
"supports_extended_thinking": True,
"max_thinking_tokens": 24576, # Flash 2.5 thinking budget limit
"supports_images": True, # Vision capability
"max_image_size_mb": 20.0, # Conservative 20MB limit for reliability
},
"gemini-2.5-pro-preview-06-05": {
"gemini-2.5-pro": {
"context_window": 1_048_576, # 1M tokens
"supports_extended_thinking": True,
"max_thinking_tokens": 32768, # Pro 2.5 thinking budget limit
@@ -34,8 +34,8 @@ class GeminiModelProvider(ModelProvider):
"max_image_size_mb": 32.0, # Higher limit for Pro model
},
# Shorthands
"flash": "gemini-2.5-flash-preview-05-20",
"pro": "gemini-2.5-pro-preview-06-05",
"flash": "gemini-2.5-flash",
"pro": "gemini-2.5-pro",
}
# Thinking mode configurations - percentages of model's max_thinking_tokens
@@ -364,8 +364,8 @@ class GeminiModelProvider(ModelProvider):
"""Check if the model supports vision (image processing)."""
# Gemini 2.5 models support vision
vision_models = {
"gemini-2.5-flash-preview-05-20",
"gemini-2.5-pro-preview-06-05",
"gemini-2.5-flash",
"gemini-2.5-pro",
"gemini-2.0-flash",
"gemini-1.5-pro",
"gemini-1.5-flash",

View File

@@ -105,7 +105,7 @@ class ModelProviderRegistry:
3. OPENROUTER - Catch-all for cloud models via unified API
Args:
model_name: Name of the model (e.g., "gemini-2.5-flash-preview-05-20", "o3-mini")
model_name: Name of the model (e.g., "gemini-2.5-flash", "o3-mini")
Returns:
ModelProvider instance that supports this model
@@ -295,7 +295,7 @@ class ModelProviderRegistry:
return custom_models[0]
else:
# Fallback to pro if nothing found
return "gemini-2.5-pro-preview-06-05"
return "gemini-2.5-pro"
elif tool_category == ToolModelCategory.FAST_RESPONSE:
# Prefer fast, cost-efficient models
@@ -325,7 +325,7 @@ class ModelProviderRegistry:
return custom_models[0]
else:
# Default to flash
return "gemini-2.5-flash-preview-05-20"
return "gemini-2.5-flash"
# BALANCED or no category specified - use existing balanced logic
if openai_available and "o4-mini" in openai_models:
@@ -353,7 +353,7 @@ class ModelProviderRegistry:
# This might happen if all models are restricted
logging.warning("No models available due to restrictions")
# Return a reasonable default for backward compatibility
return "gemini-2.5-flash-preview-05-20"
return "gemini-2.5-flash"
@classmethod
def _find_extended_thinking_model(cls) -> Optional[str]:
@@ -383,7 +383,7 @@ class ModelProviderRegistry:
preferred_models = [
"anthropic/claude-3.5-sonnet",
"anthropic/claude-3-opus-20240229",
"google/gemini-2.5-pro-preview-06-05",
"google/gemini-2.5-pro",
"google/gemini-pro-1.5",
"meta-llama/llama-3.1-70b-instruct",
"mistralai/mixtral-8x7b-instruct",