Major improvements to thinking capabilities and API integration: - Remove all output token limits for future-proof responses - Add 5-level thinking mode system: minimal, low, medium, high, max - Migrate from google-generativeai to google-genai library - Implement native thinkingBudget support for Gemini 2.5 Pro - Set medium thinking as default for all tools, max for think_deeper 🧠 Thinking Modes: - minimal (128 tokens) - simple tasks - low (2048 tokens) - basic reasoning - medium (8192 tokens) - default for most tools - high (16384 tokens) - complex analysis - max (32768 tokens) - default for think_deeper 🔧 Technical Changes: - Complete migration to google-genai>=1.19.0 - Remove google-generativeai dependency - Add ThinkingConfig with thinking_budget parameter - Update all tools to support thinking_mode parameter - Comprehensive test suite with 37 passing unit tests - CI-friendly testing (no API key required for unit tests) - Live integration tests for API verification 🧪 Testing & CI: - Add GitHub Actions workflow with multi-Python support - Unit tests use mocks, no API key required - Live integration tests optional with API key - Contributing guide with development setup - All tests pass without external dependencies 🐛 Generated with [Claude Code](https://claude.ai/code) Co-Authored-By: Claude <noreply@anthropic.com>
93 lines
3.0 KiB
Python
93 lines
3.0 KiB
Python
"""
|
|
Live integration tests for google-genai library
|
|
These tests require GEMINI_API_KEY to be set and will make real API calls
|
|
|
|
To run these tests manually:
|
|
python tests/test_live_integration.py
|
|
|
|
Note: These tests are excluded from regular pytest runs to avoid API rate limits.
|
|
They confirm that the google-genai library integration works correctly with live data.
|
|
"""
|
|
|
|
import os
|
|
import sys
|
|
import tempfile
|
|
import asyncio
|
|
from pathlib import Path
|
|
|
|
# Add parent directory to path to allow imports
|
|
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
|
|
|
from tools.analyze import AnalyzeTool
|
|
from tools.think_deeper import ThinkDeeperTool
|
|
from tools.review_code import ReviewCodeTool
|
|
from tools.debug_issue import DebugIssueTool
|
|
|
|
|
|
|
|
async def run_manual_live_tests():
|
|
"""Run live tests manually without pytest"""
|
|
print("🚀 Running manual live integration tests...")
|
|
|
|
# Check API key
|
|
if not os.environ.get("GEMINI_API_KEY"):
|
|
print("❌ GEMINI_API_KEY not found. Set it to run live tests.")
|
|
return False
|
|
|
|
try:
|
|
# Test google-genai import
|
|
from google import genai
|
|
from google.genai import types
|
|
print("✅ google-genai library import successful")
|
|
|
|
# Test tool integration
|
|
with tempfile.NamedTemporaryFile(mode='w', suffix='.py', delete=False) as f:
|
|
f.write("def hello(): return 'world'")
|
|
temp_path = f.name
|
|
|
|
try:
|
|
# Test AnalyzeTool
|
|
tool = AnalyzeTool()
|
|
result = await tool.execute({
|
|
"files": [temp_path],
|
|
"question": "What does this code do?",
|
|
"thinking_mode": "low"
|
|
})
|
|
|
|
if result and result[0].text:
|
|
print("✅ AnalyzeTool live test successful")
|
|
else:
|
|
print("❌ AnalyzeTool live test failed")
|
|
return False
|
|
|
|
# Test ThinkDeeperTool
|
|
think_tool = ThinkDeeperTool()
|
|
result = await think_tool.execute({
|
|
"current_analysis": "Testing live integration",
|
|
"thinking_mode": "minimal" # Fast test
|
|
})
|
|
|
|
if result and result[0].text and "Extended Analysis" in result[0].text:
|
|
print("✅ ThinkDeeperTool live test successful")
|
|
else:
|
|
print("❌ ThinkDeeperTool live test failed")
|
|
return False
|
|
|
|
finally:
|
|
Path(temp_path).unlink(missing_ok=True)
|
|
|
|
print("\n🎉 All manual live tests passed!")
|
|
print("✅ google-genai library working correctly")
|
|
print("✅ All tools can make live API calls")
|
|
print("✅ Thinking modes functioning properly")
|
|
return True
|
|
|
|
except Exception as e:
|
|
print(f"❌ Live test failed: {e}")
|
|
return False
|
|
|
|
|
|
if __name__ == "__main__":
|
|
# Run live tests when script is executed directly
|
|
success = asyncio.run(run_manual_live_tests())
|
|
exit(0 if success else 1) |