refactor: remove MAX_OUTPUT_TOKENS configuration
Remove the hardcoded 32,768 token output limit to allow Gemini to use its default/dynamic output token allocation. This provides more flexibility for responses without artificial constraints. - Remove MAX_OUTPUT_TOKENS constant from config - Remove max_tokens parameter from ToolRequest base model - Clean up all references in server.py and tools/base.py - Remove test_output_tokens.py as it's no longer needed - Update imports to remove MAX_OUTPUT_TOKENS references 🤖 Generated with [Claude Code](https://claude.ai/code) Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
@@ -2,7 +2,7 @@
|
||||
Tests for configuration
|
||||
"""
|
||||
|
||||
from config import (DEFAULT_MODEL, MAX_CONTEXT_TOKENS, MAX_OUTPUT_TOKENS,
|
||||
from config import (DEFAULT_MODEL, MAX_CONTEXT_TOKENS,
|
||||
TEMPERATURE_ANALYTICAL, TEMPERATURE_BALANCED,
|
||||
TEMPERATURE_CREATIVE, TOOL_TRIGGERS, __author__,
|
||||
__updated__, __version__)
|
||||
@@ -27,7 +27,6 @@ class TestConfig:
|
||||
"""Test model configuration"""
|
||||
assert DEFAULT_MODEL == "gemini-2.5-pro-preview-06-05"
|
||||
assert MAX_CONTEXT_TOKENS == 1_000_000
|
||||
assert MAX_OUTPUT_TOKENS == 32_768
|
||||
|
||||
def test_temperature_defaults(self):
|
||||
"""Test temperature constants"""
|
||||
|
||||
Reference in New Issue
Block a user