Use the new Gemini 2.5 Flash

Updated to support Thinking Tokens as a ratio of the max allowed
Updated tests
Updated README
This commit is contained in:
Fahad
2025-06-12 20:46:54 +04:00
parent b34c63d710
commit 3aedb16101
27 changed files with 135 additions and 98 deletions

View File

@@ -390,7 +390,7 @@ Use zen and perform a thorough precommit ensuring there aren't any new regressio
- Supports specialized analysis types: architecture, performance, security, quality
- Uses file paths (not content) for clean terminal output
- Can identify patterns, anti-patterns, and refactoring opportunities
- **Web search capability**: When enabled with `use_websearch`, the model can request Claude to perform web searches and share results back to enhance analysis with current documentation, design patterns, and best practices
- **Web search capability**: When enabled with `use_websearch` (default: true), the model can request Claude to perform web searches and share results back to enhance analysis with current documentation, design patterns, and best practices
### 7. `get_version` - Server Information
```
"Get zen to show its version"

View File

@@ -26,7 +26,15 @@ DEFAULT_MODEL = os.getenv("DEFAULT_MODEL", "auto")
# Validate DEFAULT_MODEL and set to "auto" if invalid
# Only include actually supported models from providers
VALID_MODELS = ["auto", "flash", "pro", "o3", "o3-mini", "gemini-2.0-flash", "gemini-2.5-pro-preview-06-05"]
VALID_MODELS = [
"auto",
"flash",
"pro",
"o3",
"o3-mini",
"gemini-2.5-flash-preview-05-20",
"gemini-2.5-pro-preview-06-05",
]
if DEFAULT_MODEL not in VALID_MODELS:
import logging
@@ -47,7 +55,7 @@ MODEL_CAPABILITIES_DESC = {
"o3": "Strong reasoning (200K context) - Logical problems, code generation, systematic analysis",
"o3-mini": "Fast O3 variant (200K context) - Balanced performance/speed, moderate complexity",
# Full model names also supported
"gemini-2.0-flash": "Ultra-fast (1M context) - Quick analysis, simple queries, rapid iterations",
"gemini-2.5-flash-preview-05-20": "Ultra-fast (1M context) - Quick analysis, simple queries, rapid iterations",
"gemini-2.5-pro-preview-06-05": "Deep reasoning + thinking mode (1M context) - Complex problems, architecture, deep analysis",
}

View File

@@ -13,26 +13,29 @@ class GeminiModelProvider(ModelProvider):
# Model configurations
SUPPORTED_MODELS = {
"gemini-2.0-flash": {
"gemini-2.5-flash-preview-05-20": {
"max_tokens": 1_048_576, # 1M tokens
"supports_extended_thinking": False,
"supports_extended_thinking": True,
"max_thinking_tokens": 24576, # Flash 2.5 thinking budget limit
},
"gemini-2.5-pro-preview-06-05": {
"max_tokens": 1_048_576, # 1M tokens
"supports_extended_thinking": True,
"max_thinking_tokens": 32768, # Pro 2.5 thinking budget limit
},
# Shorthands
"flash": "gemini-2.0-flash",
"flash": "gemini-2.5-flash-preview-05-20",
"pro": "gemini-2.5-pro-preview-06-05",
}
# Thinking mode configurations for models that support it
# Thinking mode configurations - percentages of model's max_thinking_tokens
# These percentages work across all models that support thinking
THINKING_BUDGETS = {
"minimal": 128, # Minimum for 2.5 Pro - fast responses
"low": 2048, # Light reasoning tasks
"medium": 8192, # Balanced reasoning (default)
"high": 16384, # Complex analysis
"max": 32768, # Maximum reasoning depth
"minimal": 0.005, # 0.5% of max - minimal thinking for fast responses
"low": 0.08, # 8% of max - light reasoning tasks
"medium": 0.33, # 33% of max - balanced reasoning (default)
"high": 0.67, # 67% of max - complex analysis
"max": 1.0, # 100% of max - full thinking budget
}
def __init__(self, api_key: str, **kwargs):
@@ -107,9 +110,12 @@ class GeminiModelProvider(ModelProvider):
# Add thinking configuration for models that support it
capabilities = self.get_capabilities(resolved_name)
if capabilities.supports_extended_thinking and thinking_mode in self.THINKING_BUDGETS:
generation_config.thinking_config = types.ThinkingConfig(
thinking_budget=self.THINKING_BUDGETS[thinking_mode]
)
# Get model's max thinking tokens and calculate actual budget
model_config = self.SUPPORTED_MODELS.get(resolved_name)
if model_config and "max_thinking_tokens" in model_config:
max_thinking_tokens = model_config["max_thinking_tokens"]
actual_thinking_budget = int(max_thinking_tokens * self.THINKING_BUDGETS[thinking_mode])
generation_config.thinking_config = types.ThinkingConfig(thinking_budget=actual_thinking_budget)
try:
# Generate content
@@ -164,6 +170,23 @@ class GeminiModelProvider(ModelProvider):
capabilities = self.get_capabilities(model_name)
return capabilities.supports_extended_thinking
def get_thinking_budget(self, model_name: str, thinking_mode: str) -> int:
"""Get actual thinking token budget for a model and thinking mode."""
resolved_name = self._resolve_model_name(model_name)
model_config = self.SUPPORTED_MODELS.get(resolved_name, {})
if not model_config.get("supports_extended_thinking", False):
return 0
if thinking_mode not in self.THINKING_BUDGETS:
return 0
max_thinking_tokens = model_config.get("max_thinking_tokens", 0)
if max_thinking_tokens == 0:
return 0
return int(max_thinking_tokens * self.THINKING_BUDGETS[thinking_mode])
def _resolve_model_name(self, model_name: str) -> str:
"""Resolve model shorthand to full name."""
# Check if it's a shorthand

View File

@@ -67,7 +67,7 @@ class ModelProviderRegistry:
"""Get provider instance for a specific model name.
Args:
model_name: Name of the model (e.g., "gemini-2.0-flash", "o3-mini")
model_name: Name of the model (e.g., "gemini-2.5-flash-preview-05-20", "o3-mini")
Returns:
ModelProvider instance that supports this model
@@ -137,7 +137,7 @@ class ModelProviderRegistry:
2. Gemini 2.0 Flash (fast and efficient) if Gemini API key available
3. OpenAI o3 (high performance) if OpenAI API key available
4. Gemini 2.5 Pro (deep reasoning) if Gemini API key available
5. Fallback to gemini-2.0-flash (most common case)
5. Fallback to gemini-2.5-flash-preview-05-20 (most common case)
Returns:
Model name string for fallback use
@@ -150,11 +150,11 @@ class ModelProviderRegistry:
if openai_available:
return "o3-mini" # Balanced performance/cost
elif gemini_available:
return "gemini-2.0-flash" # Fast and efficient
return "gemini-2.5-flash-preview-05-20" # Fast and efficient
else:
# No API keys available - return a reasonable default
# This maintains backward compatibility for tests
return "gemini-2.0-flash"
return "gemini-2.5-flash-preview-05-20"
@classmethod
def get_available_providers_with_keys(cls) -> list[ProviderType]:

View File

@@ -55,7 +55,7 @@ class TestModelThinkingConfig(BaseSimulatorTest):
"chat",
{
"prompt": "What is 3 + 3? Give a quick answer.",
"model": "flash", # Should resolve to gemini-2.0-flash
"model": "flash", # Should resolve to gemini-2.5-flash-preview-05-20
"thinking_mode": "high", # Should be ignored for Flash model
},
)
@@ -80,7 +80,7 @@ class TestModelThinkingConfig(BaseSimulatorTest):
("pro", "should work with Pro model"),
("flash", "should work with Flash model"),
("gemini-2.5-pro-preview-06-05", "should work with full Pro model name"),
("gemini-2.0-flash", "should work with full Flash model name"),
("gemini-2.5-flash-preview-05-20", "should work with full Flash model name"),
]
success_count = 0

View File

@@ -24,7 +24,7 @@ if "OPENAI_API_KEY" not in os.environ:
# Set default model to a specific value for tests to avoid auto mode
# This prevents all tests from failing due to missing model parameter
os.environ["DEFAULT_MODEL"] = "gemini-2.0-flash"
os.environ["DEFAULT_MODEL"] = "gemini-2.5-flash-preview-05-20"
# Force reload of config module to pick up the env var
import config # noqa: E402

View File

@@ -5,7 +5,7 @@ from unittest.mock import Mock
from providers.base import ModelCapabilities, ProviderType, RangeTemperatureConstraint
def create_mock_provider(model_name="gemini-2.0-flash", max_tokens=1_048_576):
def create_mock_provider(model_name="gemini-2.5-flash-preview-05-20", max_tokens=1_048_576):
"""Create a properly configured mock provider."""
mock_provider = Mock()

View File

@@ -72,7 +72,7 @@ class TestClaudeContinuationOffers:
mock_provider.generate_content.return_value = Mock(
content="Analysis complete.",
usage={"input_tokens": 10, "output_tokens": 20, "total_tokens": 30},
model_name="gemini-2.0-flash",
model_name="gemini-2.5-flash-preview-05-20",
metadata={"finish_reason": "STOP"},
)
mock_get_provider.return_value = mock_provider
@@ -129,7 +129,7 @@ class TestClaudeContinuationOffers:
mock_provider.generate_content.return_value = Mock(
content="Continued analysis.",
usage={"input_tokens": 10, "output_tokens": 20, "total_tokens": 30},
model_name="gemini-2.0-flash",
model_name="gemini-2.5-flash-preview-05-20",
metadata={"finish_reason": "STOP"},
)
mock_get_provider.return_value = mock_provider
@@ -162,7 +162,7 @@ class TestClaudeContinuationOffers:
mock_provider.generate_content.return_value = Mock(
content="Analysis complete. The code looks good.",
usage={"input_tokens": 10, "output_tokens": 20, "total_tokens": 30},
model_name="gemini-2.0-flash",
model_name="gemini-2.5-flash-preview-05-20",
metadata={"finish_reason": "STOP"},
)
mock_get_provider.return_value = mock_provider
@@ -208,7 +208,7 @@ I'd be happy to examine the error handling patterns in more detail if that would
mock_provider.generate_content.return_value = Mock(
content=content_with_followup,
usage={"input_tokens": 10, "output_tokens": 20, "total_tokens": 30},
model_name="gemini-2.0-flash",
model_name="gemini-2.5-flash-preview-05-20",
metadata={"finish_reason": "STOP"},
)
mock_get_provider.return_value = mock_provider
@@ -253,7 +253,7 @@ I'd be happy to examine the error handling patterns in more detail if that would
mock_provider.generate_content.return_value = Mock(
content="Continued analysis complete.",
usage={"input_tokens": 10, "output_tokens": 20, "total_tokens": 30},
model_name="gemini-2.0-flash",
model_name="gemini-2.5-flash-preview-05-20",
metadata={"finish_reason": "STOP"},
)
mock_get_provider.return_value = mock_provider
@@ -309,7 +309,7 @@ I'd be happy to examine the error handling patterns in more detail if that would
mock_provider.generate_content.return_value = Mock(
content="Final response.",
usage={"input_tokens": 10, "output_tokens": 20, "total_tokens": 30},
model_name="gemini-2.0-flash",
model_name="gemini-2.5-flash-preview-05-20",
metadata={"finish_reason": "STOP"},
)
mock_get_provider.return_value = mock_provider
@@ -358,7 +358,7 @@ class TestContinuationIntegration:
mock_provider.generate_content.return_value = Mock(
content="Analysis result",
usage={"input_tokens": 10, "output_tokens": 20, "total_tokens": 30},
model_name="gemini-2.0-flash",
model_name="gemini-2.5-flash-preview-05-20",
metadata={"finish_reason": "STOP"},
)
mock_get_provider.return_value = mock_provider
@@ -411,7 +411,7 @@ class TestContinuationIntegration:
mock_provider.generate_content.return_value = Mock(
content="Structure analysis done.",
usage={"input_tokens": 10, "output_tokens": 20, "total_tokens": 30},
model_name="gemini-2.0-flash",
model_name="gemini-2.5-flash-preview-05-20",
metadata={"finish_reason": "STOP"},
)
mock_get_provider.return_value = mock_provider
@@ -448,7 +448,7 @@ class TestContinuationIntegration:
mock_provider.generate_content.return_value = Mock(
content="Performance analysis done.",
usage={"input_tokens": 10, "output_tokens": 20, "total_tokens": 30},
model_name="gemini-2.0-flash",
model_name="gemini-2.5-flash-preview-05-20",
metadata={"finish_reason": "STOP"},
)

View File

@@ -41,7 +41,7 @@ class TestDynamicContextRequests:
mock_provider.get_provider_type.return_value = Mock(value="google")
mock_provider.supports_thinking_mode.return_value = False
mock_provider.generate_content.return_value = Mock(
content=clarification_json, usage={}, model_name="gemini-2.0-flash", metadata={}
content=clarification_json, usage={}, model_name="gemini-2.5-flash-preview-05-20", metadata={}
)
mock_get_provider.return_value = mock_provider
@@ -82,7 +82,7 @@ class TestDynamicContextRequests:
mock_provider.get_provider_type.return_value = Mock(value="google")
mock_provider.supports_thinking_mode.return_value = False
mock_provider.generate_content.return_value = Mock(
content=normal_response, usage={}, model_name="gemini-2.0-flash", metadata={}
content=normal_response, usage={}, model_name="gemini-2.5-flash-preview-05-20", metadata={}
)
mock_get_provider.return_value = mock_provider
@@ -106,7 +106,7 @@ class TestDynamicContextRequests:
mock_provider.get_provider_type.return_value = Mock(value="google")
mock_provider.supports_thinking_mode.return_value = False
mock_provider.generate_content.return_value = Mock(
content=malformed_json, usage={}, model_name="gemini-2.0-flash", metadata={}
content=malformed_json, usage={}, model_name="gemini-2.5-flash-preview-05-20", metadata={}
)
mock_get_provider.return_value = mock_provider
@@ -146,7 +146,7 @@ class TestDynamicContextRequests:
mock_provider.get_provider_type.return_value = Mock(value="google")
mock_provider.supports_thinking_mode.return_value = False
mock_provider.generate_content.return_value = Mock(
content=clarification_json, usage={}, model_name="gemini-2.0-flash", metadata={}
content=clarification_json, usage={}, model_name="gemini-2.5-flash-preview-05-20", metadata={}
)
mock_get_provider.return_value = mock_provider
@@ -233,7 +233,7 @@ class TestCollaborationWorkflow:
mock_provider.get_provider_type.return_value = Mock(value="google")
mock_provider.supports_thinking_mode.return_value = False
mock_provider.generate_content.return_value = Mock(
content=clarification_json, usage={}, model_name="gemini-2.0-flash", metadata={}
content=clarification_json, usage={}, model_name="gemini-2.5-flash-preview-05-20", metadata={}
)
mock_get_provider.return_value = mock_provider
@@ -272,7 +272,7 @@ class TestCollaborationWorkflow:
mock_provider.get_provider_type.return_value = Mock(value="google")
mock_provider.supports_thinking_mode.return_value = False
mock_provider.generate_content.return_value = Mock(
content=clarification_json, usage={}, model_name="gemini-2.0-flash", metadata={}
content=clarification_json, usage={}, model_name="gemini-2.5-flash-preview-05-20", metadata={}
)
mock_get_provider.return_value = mock_provider
@@ -299,7 +299,7 @@ class TestCollaborationWorkflow:
"""
mock_provider.generate_content.return_value = Mock(
content=final_response, usage={}, model_name="gemini-2.0-flash", metadata={}
content=final_response, usage={}, model_name="gemini-2.5-flash-preview-05-20", metadata={}
)
result2 = await tool.execute(

View File

@@ -32,7 +32,7 @@ class TestConfig:
def test_model_config(self):
"""Test model configuration"""
# DEFAULT_MODEL is set in conftest.py for tests
assert DEFAULT_MODEL == "gemini-2.0-flash"
assert DEFAULT_MODEL == "gemini-2.5-flash-preview-05-20"
assert MAX_CONTEXT_TOKENS == 1_000_000
def test_temperature_defaults(self):

View File

@@ -75,7 +75,7 @@ async def test_conversation_history_field_mapping():
mock_provider = MagicMock()
mock_provider.get_capabilities.return_value = ModelCapabilities(
provider=ProviderType.GOOGLE,
model_name="gemini-2.0-flash",
model_name="gemini-2.5-flash-preview-05-20",
friendly_name="Gemini",
max_tokens=200000,
supports_extended_thinking=True,

View File

@@ -95,7 +95,7 @@ class TestConversationHistoryBugFix:
return Mock(
content="Response with conversation context",
usage={"input_tokens": 10, "output_tokens": 20, "total_tokens": 30},
model_name="gemini-2.0-flash",
model_name="gemini-2.5-flash-preview-05-20",
metadata={"finish_reason": "STOP"},
)
@@ -155,7 +155,7 @@ class TestConversationHistoryBugFix:
return Mock(
content="Response without history",
usage={"input_tokens": 10, "output_tokens": 20, "total_tokens": 30},
model_name="gemini-2.0-flash",
model_name="gemini-2.5-flash-preview-05-20",
metadata={"finish_reason": "STOP"},
)
@@ -193,7 +193,7 @@ class TestConversationHistoryBugFix:
return Mock(
content="New conversation response",
usage={"input_tokens": 10, "output_tokens": 20, "total_tokens": 30},
model_name="gemini-2.0-flash",
model_name="gemini-2.5-flash-preview-05-20",
metadata={"finish_reason": "STOP"},
)
@@ -277,7 +277,7 @@ class TestConversationHistoryBugFix:
return Mock(
content="Analysis of new files complete",
usage={"input_tokens": 10, "output_tokens": 20, "total_tokens": 30},
model_name="gemini-2.0-flash",
model_name="gemini-2.5-flash-preview-05-20",
metadata={"finish_reason": "STOP"},
)

View File

@@ -112,7 +112,7 @@ I'd be happy to review these security findings in detail if that would be helpfu
mock_provider.generate_content.return_value = Mock(
content=content,
usage={"input_tokens": 10, "output_tokens": 20, "total_tokens": 30},
model_name="gemini-2.0-flash",
model_name="gemini-2.5-flash-preview-05-20",
metadata={"finish_reason": "STOP"},
)
mock_get_provider.return_value = mock_provider
@@ -159,7 +159,7 @@ I'd be happy to review these security findings in detail if that would be helpfu
mock_provider.generate_content.return_value = Mock(
content="Critical security vulnerability confirmed. The authentication function always returns true, bypassing all security checks.",
usage={"input_tokens": 10, "output_tokens": 20, "total_tokens": 30},
model_name="gemini-2.0-flash",
model_name="gemini-2.5-flash-preview-05-20",
metadata={"finish_reason": "STOP"},
)
mock_get_provider.return_value = mock_provider
@@ -284,7 +284,7 @@ I'd be happy to review these security findings in detail if that would be helpfu
mock_provider.generate_content.return_value = Mock(
content="Security review of auth.py shows vulnerabilities",
usage={"input_tokens": 10, "output_tokens": 20, "total_tokens": 30},
model_name="gemini-2.0-flash",
model_name="gemini-2.5-flash-preview-05-20",
metadata={"finish_reason": "STOP"},
)
mock_get_provider.return_value = mock_provider

View File

@@ -33,10 +33,10 @@ class TestIntelligentFallback:
@patch.dict(os.environ, {"OPENAI_API_KEY": "", "GEMINI_API_KEY": "test-gemini-key"}, clear=False)
def test_prefers_gemini_flash_when_openai_unavailable(self):
"""Test that gemini-2.0-flash is used when only Gemini API key is available"""
"""Test that gemini-2.5-flash-preview-05-20 is used when only Gemini API key is available"""
ModelProviderRegistry.clear_cache()
fallback_model = ModelProviderRegistry.get_preferred_fallback_model()
assert fallback_model == "gemini-2.0-flash"
assert fallback_model == "gemini-2.5-flash-preview-05-20"
@patch.dict(os.environ, {"OPENAI_API_KEY": "sk-test-key", "GEMINI_API_KEY": "test-gemini-key"}, clear=False)
def test_prefers_openai_when_both_available(self):
@@ -50,7 +50,7 @@ class TestIntelligentFallback:
"""Test fallback behavior when no API keys are available"""
ModelProviderRegistry.clear_cache()
fallback_model = ModelProviderRegistry.get_preferred_fallback_model()
assert fallback_model == "gemini-2.0-flash" # Default fallback
assert fallback_model == "gemini-2.5-flash-preview-05-20" # Default fallback
def test_available_providers_with_keys(self):
"""Test the get_available_providers_with_keys method"""
@@ -140,8 +140,8 @@ class TestIntelligentFallback:
history, tokens = build_conversation_history(context, model_context=None)
# Should use gemini-2.0-flash when only Gemini is available
mock_context_class.assert_called_once_with("gemini-2.0-flash")
# Should use gemini-2.5-flash-preview-05-20 when only Gemini is available
mock_context_class.assert_called_once_with("gemini-2.5-flash-preview-05-20")
def test_non_auto_mode_unchanged(self):
"""Test that non-auto mode behavior is unchanged"""

View File

@@ -75,7 +75,7 @@ class TestLargePromptHandling:
mock_provider.generate_content.return_value = MagicMock(
content="This is a test response",
usage={"input_tokens": 10, "output_tokens": 20, "total_tokens": 30},
model_name="gemini-2.0-flash",
model_name="gemini-2.5-flash-preview-05-20",
metadata={"finish_reason": "STOP"},
)
mock_get_provider.return_value = mock_provider
@@ -100,7 +100,7 @@ class TestLargePromptHandling:
mock_provider.generate_content.return_value = MagicMock(
content="Processed large prompt",
usage={"input_tokens": 10, "output_tokens": 20, "total_tokens": 30},
model_name="gemini-2.0-flash",
model_name="gemini-2.5-flash-preview-05-20",
metadata={"finish_reason": "STOP"},
)
mock_get_provider.return_value = mock_provider
@@ -212,7 +212,7 @@ class TestLargePromptHandling:
mock_provider.generate_content.return_value = MagicMock(
content="Success",
usage={"input_tokens": 10, "output_tokens": 20, "total_tokens": 30},
model_name="gemini-2.0-flash",
model_name="gemini-2.5-flash-preview-05-20",
metadata={"finish_reason": "STOP"},
)
mock_get_provider.return_value = mock_provider
@@ -245,7 +245,7 @@ class TestLargePromptHandling:
mock_provider.generate_content.return_value = MagicMock(
content="Success",
usage={"input_tokens": 10, "output_tokens": 20, "total_tokens": 30},
model_name="gemini-2.0-flash",
model_name="gemini-2.5-flash-preview-05-20",
metadata={"finish_reason": "STOP"},
)
mock_get_provider.return_value = mock_provider
@@ -276,7 +276,7 @@ class TestLargePromptHandling:
mock_provider.generate_content.return_value = MagicMock(
content="Success",
usage={"input_tokens": 10, "output_tokens": 20, "total_tokens": 30},
model_name="gemini-2.0-flash",
model_name="gemini-2.5-flash-preview-05-20",
metadata={"finish_reason": "STOP"},
)
mock_get_provider.return_value = mock_provider
@@ -298,7 +298,7 @@ class TestLargePromptHandling:
mock_provider.generate_content.return_value = MagicMock(
content="Success",
usage={"input_tokens": 10, "output_tokens": 20, "total_tokens": 30},
model_name="gemini-2.0-flash",
model_name="gemini-2.5-flash-preview-05-20",
metadata={"finish_reason": "STOP"},
)
mock_get_provider.return_value = mock_provider

View File

@@ -31,7 +31,7 @@ class TestPromptRegression:
return Mock(
content=text,
usage={"input_tokens": 10, "output_tokens": 20, "total_tokens": 30},
model_name="gemini-2.0-flash",
model_name="gemini-2.5-flash-preview-05-20",
metadata={"finish_reason": "STOP"},
)

View File

@@ -49,7 +49,7 @@ class TestModelProviderRegistry:
"""Test getting provider for a specific model"""
ModelProviderRegistry.register_provider(ProviderType.GOOGLE, GeminiModelProvider)
provider = ModelProviderRegistry.get_provider_for_model("gemini-2.0-flash")
provider = ModelProviderRegistry.get_provider_for_model("gemini-2.5-flash-preview-05-20")
assert provider is not None
assert isinstance(provider, GeminiModelProvider)
@@ -80,10 +80,10 @@ class TestGeminiProvider:
"""Test getting model capabilities"""
provider = GeminiModelProvider(api_key="test-key")
capabilities = provider.get_capabilities("gemini-2.0-flash")
capabilities = provider.get_capabilities("gemini-2.5-flash-preview-05-20")
assert capabilities.provider == ProviderType.GOOGLE
assert capabilities.model_name == "gemini-2.0-flash"
assert capabilities.model_name == "gemini-2.5-flash-preview-05-20"
assert capabilities.max_tokens == 1_048_576
assert not capabilities.supports_extended_thinking
@@ -103,13 +103,13 @@ class TestGeminiProvider:
assert provider.validate_model_name("pro")
capabilities = provider.get_capabilities("flash")
assert capabilities.model_name == "gemini-2.0-flash"
assert capabilities.model_name == "gemini-2.5-flash-preview-05-20"
def test_supports_thinking_mode(self):
"""Test thinking mode support detection"""
provider = GeminiModelProvider(api_key="test-key")
assert not provider.supports_thinking_mode("gemini-2.0-flash")
assert provider.supports_thinking_mode("gemini-2.5-flash-preview-05-20")
assert provider.supports_thinking_mode("gemini-2.5-pro-preview-06-05")
@patch("google.genai.Client")
@@ -133,11 +133,13 @@ class TestGeminiProvider:
provider = GeminiModelProvider(api_key="test-key")
response = provider.generate_content(prompt="Test prompt", model_name="gemini-2.0-flash", temperature=0.7)
response = provider.generate_content(
prompt="Test prompt", model_name="gemini-2.5-flash-preview-05-20", temperature=0.7
)
assert isinstance(response, ModelResponse)
assert response.content == "Generated content"
assert response.model_name == "gemini-2.0-flash"
assert response.model_name == "gemini-2.5-flash-preview-05-20"
assert response.provider == ProviderType.GOOGLE
assert response.usage["input_tokens"] == 10
assert response.usage["output_tokens"] == 20

View File

@@ -56,7 +56,7 @@ class TestServerTools:
mock_provider.get_provider_type.return_value = Mock(value="google")
mock_provider.supports_thinking_mode.return_value = False
mock_provider.generate_content.return_value = Mock(
content="Chat response", usage={}, model_name="gemini-2.0-flash", metadata={}
content="Chat response", usage={}, model_name="gemini-2.5-flash-preview-05-20", metadata={}
)
mock_get_provider.return_value = mock_provider

View File

@@ -45,7 +45,7 @@ class TestThinkingModes:
mock_provider.get_provider_type.return_value = Mock(value="google")
mock_provider.supports_thinking_mode.return_value = True
mock_provider.generate_content.return_value = Mock(
content="Minimal thinking response", usage={}, model_name="gemini-2.0-flash", metadata={}
content="Minimal thinking response", usage={}, model_name="gemini-2.5-flash-preview-05-20", metadata={}
)
mock_get_provider.return_value = mock_provider
@@ -82,7 +82,7 @@ class TestThinkingModes:
mock_provider.get_provider_type.return_value = Mock(value="google")
mock_provider.supports_thinking_mode.return_value = True
mock_provider.generate_content.return_value = Mock(
content="Low thinking response", usage={}, model_name="gemini-2.0-flash", metadata={}
content="Low thinking response", usage={}, model_name="gemini-2.5-flash-preview-05-20", metadata={}
)
mock_get_provider.return_value = mock_provider
@@ -114,7 +114,7 @@ class TestThinkingModes:
mock_provider.get_provider_type.return_value = Mock(value="google")
mock_provider.supports_thinking_mode.return_value = True
mock_provider.generate_content.return_value = Mock(
content="Medium thinking response", usage={}, model_name="gemini-2.0-flash", metadata={}
content="Medium thinking response", usage={}, model_name="gemini-2.5-flash-preview-05-20", metadata={}
)
mock_get_provider.return_value = mock_provider
@@ -145,7 +145,7 @@ class TestThinkingModes:
mock_provider.get_provider_type.return_value = Mock(value="google")
mock_provider.supports_thinking_mode.return_value = True
mock_provider.generate_content.return_value = Mock(
content="High thinking response", usage={}, model_name="gemini-2.0-flash", metadata={}
content="High thinking response", usage={}, model_name="gemini-2.5-flash-preview-05-20", metadata={}
)
mock_get_provider.return_value = mock_provider
@@ -175,7 +175,7 @@ class TestThinkingModes:
mock_provider.get_provider_type.return_value = Mock(value="google")
mock_provider.supports_thinking_mode.return_value = True
mock_provider.generate_content.return_value = Mock(
content="Max thinking response", usage={}, model_name="gemini-2.0-flash", metadata={}
content="Max thinking response", usage={}, model_name="gemini-2.5-flash-preview-05-20", metadata={}
)
mock_get_provider.return_value = mock_provider
@@ -222,18 +222,22 @@ class TestThinkingModes:
async def prepare_prompt(self, request):
return "test"
# Expected mappings
# Test dynamic budget calculation for Flash 2.5
from providers.gemini import GeminiModelProvider
provider = GeminiModelProvider(api_key="test-key")
flash_model = "gemini-2.5-flash-preview-05-20"
flash_max_tokens = 24576
expected_budgets = {
"minimal": 128,
"low": 2048,
"medium": 8192,
"high": 16384,
"max": 32768,
"minimal": int(flash_max_tokens * 0.005), # 123
"low": int(flash_max_tokens * 0.08), # 1966
"medium": int(flash_max_tokens * 0.33), # 8110
"high": int(flash_max_tokens * 0.67), # 16465
"max": int(flash_max_tokens * 1.0), # 24576
}
# Check each mode in create_model
for _mode, _expected_budget in expected_budgets.items():
# The budget mapping is inside create_model
# We can't easily test it without calling the method
# But we've verified the values are correct in the code
pass
# Check each mode using the helper method
for mode, expected_budget in expected_budgets.items():
actual_budget = provider.get_thinking_budget(flash_model, mode)
assert actual_budget == expected_budget, f"Mode {mode}: expected {expected_budget}, got {actual_budget}"

View File

@@ -37,7 +37,7 @@ class TestThinkDeepTool:
mock_provider.get_provider_type.return_value = Mock(value="google")
mock_provider.supports_thinking_mode.return_value = True
mock_provider.generate_content.return_value = Mock(
content="Extended analysis", usage={}, model_name="gemini-2.0-flash", metadata={}
content="Extended analysis", usage={}, model_name="gemini-2.5-flash-preview-05-20", metadata={}
)
mock_get_provider.return_value = mock_provider
@@ -88,7 +88,7 @@ class TestCodeReviewTool:
mock_provider.get_provider_type.return_value = Mock(value="google")
mock_provider.supports_thinking_mode.return_value = False
mock_provider.generate_content.return_value = Mock(
content="Security issues found", usage={}, model_name="gemini-2.0-flash", metadata={}
content="Security issues found", usage={}, model_name="gemini-2.5-flash-preview-05-20", metadata={}
)
mock_get_provider.return_value = mock_provider
@@ -133,7 +133,7 @@ class TestDebugIssueTool:
mock_provider.get_provider_type.return_value = Mock(value="google")
mock_provider.supports_thinking_mode.return_value = False
mock_provider.generate_content.return_value = Mock(
content="Root cause: race condition", usage={}, model_name="gemini-2.0-flash", metadata={}
content="Root cause: race condition", usage={}, model_name="gemini-2.5-flash-preview-05-20", metadata={}
)
mock_get_provider.return_value = mock_provider
@@ -181,7 +181,7 @@ class TestAnalyzeTool:
mock_provider.get_provider_type.return_value = Mock(value="google")
mock_provider.supports_thinking_mode.return_value = False
mock_provider.generate_content.return_value = Mock(
content="Architecture analysis", usage={}, model_name="gemini-2.0-flash", metadata={}
content="Architecture analysis", usage={}, model_name="gemini-2.5-flash-preview-05-20", metadata={}
)
mock_get_provider.return_value = mock_provider
@@ -295,7 +295,7 @@ class TestAbsolutePathValidation:
mock_provider.get_provider_type.return_value = Mock(value="google")
mock_provider.supports_thinking_mode.return_value = False
mock_provider.generate_content.return_value = Mock(
content="Analysis complete", usage={}, model_name="gemini-2.0-flash", metadata={}
content="Analysis complete", usage={}, model_name="gemini-2.5-flash-preview-05-20", metadata={}
)
mock_get_provider.return_value = mock_provider

View File

@@ -83,7 +83,7 @@ class AnalyzeTool(BaseTool):
"thinking_mode": {
"type": "string",
"enum": ["minimal", "low", "medium", "high", "max"],
"description": "Thinking depth: minimal (128), low (2048), medium (8192), high (16384), max (32768)",
"description": "Thinking depth: minimal (0.5% of model max), low (8%), medium (33%), high (67%), max (100% of model max)",
},
"use_websearch": {
"type": "boolean",

View File

@@ -57,7 +57,7 @@ class ToolRequest(BaseModel):
# Higher values allow for more complex reasoning but increase latency and cost
thinking_mode: Optional[Literal["minimal", "low", "medium", "high", "max"]] = Field(
None,
description="Thinking depth: minimal (128), low (2048), medium (8192), high (16384), max (32768)",
description="Thinking depth: minimal (0.5% of model max), low (8%), medium (33%), high (67%), max (100% of model max)",
)
use_websearch: Optional[bool] = Field(
True,

View File

@@ -68,7 +68,7 @@ class ChatTool(BaseTool):
"thinking_mode": {
"type": "string",
"enum": ["minimal", "low", "medium", "high", "max"],
"description": "Thinking depth: minimal (128), low (2048), medium (8192), high (16384), max (32768)",
"description": "Thinking depth: minimal (0.5% of model max), low (8%), medium (33%), high (67%), max (100% of model max)",
},
"use_websearch": {
"type": "boolean",

View File

@@ -126,7 +126,7 @@ class CodeReviewTool(BaseTool):
"thinking_mode": {
"type": "string",
"enum": ["minimal", "low", "medium", "high", "max"],
"description": "Thinking depth: minimal (128), low (2048), medium (8192), high (16384), max (32768)",
"description": "Thinking depth: minimal (0.5% of model max), low (8%), medium (33%), high (67%), max (100% of model max)",
},
"use_websearch": {
"type": "boolean",

View File

@@ -86,7 +86,7 @@ class DebugIssueTool(BaseTool):
"thinking_mode": {
"type": "string",
"enum": ["minimal", "low", "medium", "high", "max"],
"description": "Thinking depth: minimal (128), low (2048), medium (8192), high (16384), max (32768)",
"description": "Thinking depth: minimal (0.5% of model max), low (8%), medium (33%), high (67%), max (100% of model max)",
},
"use_websearch": {
"type": "boolean",

View File

@@ -81,7 +81,7 @@ class ThinkDeepTool(BaseTool):
"thinking_mode": {
"type": "string",
"enum": ["minimal", "low", "medium", "high", "max"],
"description": f"Thinking depth: minimal (128), low (2048), medium (8192), high (16384), max (32768). Defaults to '{self.get_default_thinking_mode()}' if not specified.",
"description": f"Thinking depth: minimal (0.5% of model max), low (8%), medium (33%), high (67%), max (100% of model max). Defaults to '{self.get_default_thinking_mode()}' if not specified.",
},
"use_websearch": {
"type": "boolean",

View File

@@ -74,7 +74,7 @@ class ConversationTurn(BaseModel):
files: List of file paths referenced in this specific turn
tool_name: Which tool generated this turn (for cross-tool tracking)
model_provider: Provider used (e.g., "google", "openai")
model_name: Specific model used (e.g., "gemini-2.0-flash", "o3-mini")
model_name: Specific model used (e.g., "gemini-2.5-flash-preview-05-20", "o3-mini")
model_metadata: Additional model-specific metadata (e.g., thinking mode, token usage)
"""
@@ -249,7 +249,7 @@ def add_turn(
files: Optional list of files referenced in this turn
tool_name: Name of the tool adding this turn (for attribution)
model_provider: Provider used (e.g., "google", "openai")
model_name: Specific model used (e.g., "gemini-2.0-flash", "o3-mini")
model_name: Specific model used (e.g., "gemini-2.5-flash-preview-05-20", "o3-mini")
model_metadata: Additional model info (e.g., thinking mode, token usage)
Returns: