Use the new Gemini 2.5 Flash

Updated to support Thinking Tokens as a ratio of the max allowed
Updated tests
Updated README
This commit is contained in:
Fahad
2025-06-12 20:46:54 +04:00
parent b34c63d710
commit 3aedb16101
27 changed files with 135 additions and 98 deletions

View File

@@ -32,7 +32,7 @@ class TestConfig:
def test_model_config(self):
"""Test model configuration"""
# DEFAULT_MODEL is set in conftest.py for tests
assert DEFAULT_MODEL == "gemini-2.0-flash"
assert DEFAULT_MODEL == "gemini-2.5-flash-preview-05-20"
assert MAX_CONTEXT_TOKENS == 1_000_000
def test_temperature_defaults(self):