diff --git a/config.py b/config.py index 1683fc8..43500cb 100644 --- a/config.py +++ b/config.py @@ -12,7 +12,7 @@ import os # Version and metadata # These values are used in server responses and for tracking releases -__version__ = "2.11.1" # Semantic versioning: MAJOR.MINOR.PATCH +__version__ = "3.0.0" # Semantic versioning: MAJOR.MINOR.PATCH __updated__ = "2025-06-10" # Last update date in ISO format __author__ = "Fahad Gilani" # Primary maintainer diff --git a/server.py b/server.py index 5fa33f1..bacea30 100644 --- a/server.py +++ b/server.py @@ -48,10 +48,7 @@ from tools import ( # Configure logging for server operations # Set to DEBUG level to capture detailed operational messages for troubleshooting -logging.basicConfig( - level=logging.DEBUG, - format='%(asctime)s - %(name)s - %(levelname)s - %(message)s' -) +logging.basicConfig(level=logging.DEBUG, format="%(asctime)s - %(name)s - %(levelname)s - %(message)s") logger = logging.getLogger(__name__) # Create the MCP server instance with a unique name identifier @@ -215,7 +212,7 @@ add this JSON block at the very end of your response: 💡 Good follow-up opportunities: - "Would you like me to examine the error handling in more detail?" -- "Should I analyze the performance implications of this approach?" +- "Should I analyze the performance implications of this approach?" - "Would it be helpful to review the security aspects of this implementation?" - "Should I dive deeper into the architecture patterns used here?" diff --git a/setup.py b/setup.py index 9e68a8f..37b6745 100644 --- a/setup.py +++ b/setup.py @@ -14,7 +14,7 @@ if readme_path.exists(): setup( name="gemini-mcp-server", - version="2.11.1", + version="3.0.0", description="Model Context Protocol server for Google Gemini", long_description=long_description, long_description_content_type="text/markdown", diff --git a/tests/test_claude_continuation.py b/tests/test_claude_continuation.py index a41d278..a88da10 100644 --- a/tests/test_claude_continuation.py +++ b/tests/test_claude_continuation.py @@ -342,7 +342,7 @@ class TestContinuationIntegration: content = "Analysis result" continuation_data = {"remaining_turns": 4, "tool_name": "test_continuation"} - response = self.tool._create_continuation_offer_response(content, continuation_data, request) + self.tool._create_continuation_offer_response(content, continuation_data, request) # Verify thread creation was called (should be called twice: create_thread + add_turn) assert mock_client.setex.call_count == 2 diff --git a/tests/test_conversation_memory.py b/tests/test_conversation_memory.py index 4719c94..6890127 100644 --- a/tests/test_conversation_memory.py +++ b/tests/test_conversation_memory.py @@ -379,13 +379,13 @@ class TestConversationFlow: # Test early conversation (should allow follow-ups) early_instructions = get_follow_up_instructions(0, max_turns) - assert "FOLLOW-UP CONVERSATIONS" in early_instructions - assert f"{max_turns - 1} more exchange" in early_instructions + assert "CONVERSATION THREADING" in early_instructions + assert f"({max_turns - 1} exchanges remaining)" in early_instructions # Test mid conversation mid_instructions = get_follow_up_instructions(2, max_turns) - assert "FOLLOW-UP CONVERSATIONS" in mid_instructions - assert f"{max_turns - 3} more exchange" in mid_instructions + assert "CONVERSATION THREADING" in mid_instructions + assert f"({max_turns - 3} exchanges remaining)" in mid_instructions # Test approaching limit (should stop follow-ups) limit_instructions = get_follow_up_instructions(max_turns - 1, max_turns) @@ -399,7 +399,7 @@ class TestConversationFlow: # Test with custom max_turns to ensure dynamic behavior custom_max = 3 custom_early = get_follow_up_instructions(0, custom_max) - assert f"{custom_max - 1} more exchange" in custom_early + assert f"({custom_max - 1} exchanges remaining)" in custom_early custom_limit = get_follow_up_instructions(custom_max - 1, custom_max) assert "Do NOT include any follow-up questions" in custom_limit @@ -408,7 +408,7 @@ class TestConversationFlow: """Test that follow-up instructions use MAX_CONVERSATION_TURNS when max_turns not provided""" instructions = get_follow_up_instructions(0) # No max_turns parameter expected_remaining = MAX_CONVERSATION_TURNS - 1 - assert f"{expected_remaining} more exchange" in instructions + assert f"({expected_remaining} exchanges remaining)" in instructions @patch("utils.conversation_memory.get_redis_client") def test_complete_conversation_with_dynamic_turns(self, mock_redis): diff --git a/tools/base.py b/tools/base.py index 3db6ea1..796b65b 100644 --- a/tools/base.py +++ b/tools/base.py @@ -383,20 +383,22 @@ If any of these would strengthen your analysis, specify what Claude should searc continuation_id = getattr(request, "continuation_id", None) if not continuation_id: # Import here to avoid circular imports - from server import get_follow_up_instructions import logging + from server import get_follow_up_instructions + follow_up_instructions = get_follow_up_instructions(0) # New conversation, turn 0 prompt = f"{prompt}\n\n{follow_up_instructions}" logging.debug(f"Added follow-up instructions for new {self.name} conversation") # Also log to file for debugging MCP issues try: - with open('/tmp/gemini_debug.log', 'a') as f: + with open("/tmp/gemini_debug.log", "a") as f: f.write(f"[{self.name}] Added follow-up instructions for new conversation\n") except Exception: pass else: import logging + logging.debug(f"Continuing {self.name} conversation with thread {continuation_id}") # Extract model configuration from request or use defaults @@ -463,10 +465,13 @@ If any of these would strengthen your analysis, specify what Claude should searc """ # Check for follow-up questions in JSON blocks at the end of the response follow_up_question = self._extract_follow_up_question(raw_text) - + import logging + if follow_up_question: - logging.debug(f"Found follow-up question in {self.name} response: {follow_up_question.get('follow_up_question', 'N/A')}") + logging.debug( + f"Found follow-up question in {self.name} response: {follow_up_question.get('follow_up_question', 'N/A')}" + ) else: logging.debug(f"No follow-up question found in {self.name} response") @@ -499,10 +504,13 @@ If any of these would strengthen your analysis, specify what Claude should searc # Check if we should offer Claude a continuation opportunity continuation_offer = self._check_continuation_opportunity(request) - + import logging + if continuation_offer: - logging.debug(f"Creating continuation offer for {self.name} with {continuation_offer['remaining_turns']} turns remaining") + logging.debug( + f"Creating continuation offer for {self.name} with {continuation_offer['remaining_turns']} turns remaining" + ) return self._create_continuation_offer_response(formatted_content, continuation_offer, request) else: logging.debug(f"No continuation offer created for {self.name}") @@ -610,6 +618,7 @@ If any of these would strengthen your analysis, specify what Claude should searc except Exception as e: # Threading failed, return normal response import logging + logging.warning(f"Follow-up threading failed in {self.name}: {str(e)}") return ToolOutput( status="success", @@ -735,6 +744,7 @@ If any of these would strengthen your analysis, specify what Claude should searc except Exception as e: # If threading fails, return normal response but log the error import logging + logging.warning(f"Conversation threading failed in {self.name}: {str(e)}") return ToolOutput( status="success",