feat: implement comprehensive thinking modes and migrate to google-genai

Major improvements to thinking capabilities and API integration:

- Remove all output token limits for future-proof responses
- Add 5-level thinking mode system: minimal, low, medium, high, max
- Migrate from google-generativeai to google-genai library
- Implement native thinkingBudget support for Gemini 2.5 Pro
- Set medium thinking as default for all tools, max for think_deeper

🧠 Thinking Modes:
- minimal (128 tokens) - simple tasks
- low (2048 tokens) - basic reasoning
- medium (8192 tokens) - default for most tools
- high (16384 tokens) - complex analysis
- max (32768 tokens) - default for think_deeper

🔧 Technical Changes:
- Complete migration to google-genai>=1.19.0
- Remove google-generativeai dependency
- Add ThinkingConfig with thinking_budget parameter
- Update all tools to support thinking_mode parameter
- Comprehensive test suite with 37 passing unit tests
- CI-friendly testing (no API key required for unit tests)
- Live integration tests for API verification

🧪 Testing & CI:
- Add GitHub Actions workflow with multi-Python support
- Unit tests use mocks, no API key required
- Live integration tests optional with API key
- Contributing guide with development setup
- All tests pass without external dependencies

🐛 Generated with [Claude Code](https://claude.ai/code)

Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
Fahad
2025-06-09 09:35:21 +04:00
parent 9d45207d3f
commit fb5c04ea60
17 changed files with 813 additions and 171 deletions

View File

@@ -27,19 +27,15 @@ class TestThinkDeeperTool:
assert schema["required"] == ["current_analysis"]
@pytest.mark.asyncio
@patch("google.generativeai.GenerativeModel")
async def test_execute_success(self, mock_model, tool):
@patch("tools.base.BaseTool.create_model")
async def test_execute_success(self, mock_create_model, tool):
"""Test successful execution"""
# Mock response
mock_response = Mock()
mock_response.candidates = [Mock()]
mock_response.candidates[0].content.parts = [
Mock(text="Extended analysis")
]
mock_instance = Mock()
mock_instance.generate_content.return_value = mock_response
mock_model.return_value = mock_instance
# Mock model
mock_model = Mock()
mock_model.generate_content.return_value = Mock(
candidates=[Mock(content=Mock(parts=[Mock(text="Extended analysis")]))]
)
mock_create_model.return_value = mock_model
result = await tool.execute(
{
@@ -72,23 +68,19 @@ class TestReviewCodeTool:
assert schema["required"] == ["files"]
@pytest.mark.asyncio
@patch("google.generativeai.GenerativeModel")
async def test_execute_with_review_type(self, mock_model, tool, tmp_path):
@patch("tools.base.BaseTool.create_model")
async def test_execute_with_review_type(self, mock_create_model, tool, tmp_path):
"""Test execution with specific review type"""
# Create test file
test_file = tmp_path / "test.py"
test_file.write_text("def insecure(): pass", encoding="utf-8")
# Mock response
mock_response = Mock()
mock_response.candidates = [Mock()]
mock_response.candidates[0].content.parts = [
Mock(text="Security issues found")
]
mock_instance = Mock()
mock_instance.generate_content.return_value = mock_response
mock_model.return_value = mock_instance
# Mock model
mock_model = Mock()
mock_model.generate_content.return_value = Mock(
candidates=[Mock(content=Mock(parts=[Mock(text="Security issues found")]))]
)
mock_create_model.return_value = mock_model
result = await tool.execute(
{
@@ -122,19 +114,15 @@ class TestDebugIssueTool:
assert schema["required"] == ["error_description"]
@pytest.mark.asyncio
@patch("google.generativeai.GenerativeModel")
async def test_execute_with_context(self, mock_model, tool):
@patch("tools.base.BaseTool.create_model")
async def test_execute_with_context(self, mock_create_model, tool):
"""Test execution with error context"""
# Mock response
mock_response = Mock()
mock_response.candidates = [Mock()]
mock_response.candidates[0].content.parts = [
Mock(text="Root cause: race condition")
]
mock_instance = Mock()
mock_instance.generate_content.return_value = mock_response
mock_model.return_value = mock_instance
# Mock model
mock_model = Mock()
mock_model.generate_content.return_value = Mock(
candidates=[Mock(content=Mock(parts=[Mock(text="Root cause: race condition")]))]
)
mock_create_model.return_value = mock_model
result = await tool.execute(
{
@@ -168,7 +156,7 @@ class TestAnalyzeTool:
assert set(schema["required"]) == {"files", "question"}
@pytest.mark.asyncio
@patch("google.generativeai.GenerativeModel")
@patch("tools.base.BaseTool.create_model")
async def test_execute_with_analysis_type(
self, mock_model, tool, tmp_path
):