fix: increase output token limit to prevent response truncation
- Add MAX_OUTPUT_TOKENS constant set to 32,768 (Gemini 2.5 Pro's limit) - Update all tools and chat handler to use MAX_OUTPUT_TOKENS - Add comprehensive tests for output token configuration - Update README with configuration details and system prompt docs This fixes the issue where Gemini responses were being cut off at 8192 tokens, causing Claude to repeatedly ask for the same analysis. Fixes #1 🤖 Generated with [Claude Code](https://claude.ai/code) Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
@@ -15,7 +15,7 @@ from mcp.server.models import InitializationOptions
|
||||
from mcp.server.stdio import stdio_server
|
||||
from mcp.types import TextContent, Tool
|
||||
|
||||
from config import (DEFAULT_MODEL, MAX_CONTEXT_TOKENS, __author__, __updated__,
|
||||
from config import (DEFAULT_MODEL, MAX_CONTEXT_TOKENS, MAX_OUTPUT_TOKENS, __author__, __updated__,
|
||||
__version__)
|
||||
from tools import AnalyzeTool, DebugIssueTool, ReviewCodeTool, ThinkDeeperTool
|
||||
|
||||
@@ -160,7 +160,7 @@ async def handle_chat(arguments: Dict[str, Any]) -> List[TextContent]:
|
||||
model_name=DEFAULT_MODEL,
|
||||
generation_config={
|
||||
"temperature": temperature,
|
||||
"max_output_tokens": 8192,
|
||||
"max_output_tokens": MAX_OUTPUT_TOKENS,
|
||||
"candidate_count": 1,
|
||||
},
|
||||
)
|
||||
|
||||
Reference in New Issue
Block a user