Fix GOOGLE_ALLOWED_MODELS shorthand restriction validation
- Fixed parameter order in is_allowed() calls to check original model name first - Fixed validate_parameters() to use original model name instead of resolved name - Fixed thinking capabilities check to use original model name - Enables GOOGLE_ALLOWED_MODELS=pro,flash to work correctly with shorthand names 🤖 Generated with Claude Code Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
@@ -67,8 +67,8 @@ class GeminiModelProvider(ModelProvider):
|
|||||||
from utils.model_restrictions import get_restriction_service
|
from utils.model_restrictions import get_restriction_service
|
||||||
|
|
||||||
restriction_service = get_restriction_service()
|
restriction_service = get_restriction_service()
|
||||||
if not restriction_service.is_allowed(ProviderType.GOOGLE, resolved_name, model_name):
|
if not restriction_service.is_allowed(ProviderType.GOOGLE, model_name, resolved_name):
|
||||||
raise ValueError(f"Gemini model '{model_name}' is not allowed by restriction policy.")
|
raise ValueError(f"Gemini model '{resolved_name}' is not allowed by restriction policy.")
|
||||||
|
|
||||||
config = self.SUPPORTED_MODELS[resolved_name]
|
config = self.SUPPORTED_MODELS[resolved_name]
|
||||||
|
|
||||||
@@ -100,7 +100,7 @@ class GeminiModelProvider(ModelProvider):
|
|||||||
"""Generate content using Gemini model."""
|
"""Generate content using Gemini model."""
|
||||||
# Validate parameters
|
# Validate parameters
|
||||||
resolved_name = self._resolve_model_name(model_name)
|
resolved_name = self._resolve_model_name(model_name)
|
||||||
self.validate_parameters(resolved_name, temperature)
|
self.validate_parameters(model_name, temperature)
|
||||||
|
|
||||||
# Combine system prompt with user prompt if provided
|
# Combine system prompt with user prompt if provided
|
||||||
if system_prompt:
|
if system_prompt:
|
||||||
@@ -119,7 +119,7 @@ class GeminiModelProvider(ModelProvider):
|
|||||||
generation_config.max_output_tokens = max_output_tokens
|
generation_config.max_output_tokens = max_output_tokens
|
||||||
|
|
||||||
# Add thinking configuration for models that support it
|
# Add thinking configuration for models that support it
|
||||||
capabilities = self.get_capabilities(resolved_name)
|
capabilities = self.get_capabilities(model_name)
|
||||||
if capabilities.supports_extended_thinking and thinking_mode in self.THINKING_BUDGETS:
|
if capabilities.supports_extended_thinking and thinking_mode in self.THINKING_BUDGETS:
|
||||||
# Get model's max thinking tokens and calculate actual budget
|
# Get model's max thinking tokens and calculate actual budget
|
||||||
model_config = self.SUPPORTED_MODELS.get(resolved_name)
|
model_config = self.SUPPORTED_MODELS.get(resolved_name)
|
||||||
|
|||||||
Reference in New Issue
Block a user