Conversation threading test fixes
This commit is contained in:
@@ -12,7 +12,7 @@ import os
|
|||||||
|
|
||||||
# Version and metadata
|
# Version and metadata
|
||||||
# These values are used in server responses and for tracking releases
|
# These values are used in server responses and for tracking releases
|
||||||
__version__ = "2.11.1" # Semantic versioning: MAJOR.MINOR.PATCH
|
__version__ = "3.0.0" # Semantic versioning: MAJOR.MINOR.PATCH
|
||||||
__updated__ = "2025-06-10" # Last update date in ISO format
|
__updated__ = "2025-06-10" # Last update date in ISO format
|
||||||
__author__ = "Fahad Gilani" # Primary maintainer
|
__author__ = "Fahad Gilani" # Primary maintainer
|
||||||
|
|
||||||
|
|||||||
@@ -48,10 +48,7 @@ from tools import (
|
|||||||
|
|
||||||
# Configure logging for server operations
|
# Configure logging for server operations
|
||||||
# Set to DEBUG level to capture detailed operational messages for troubleshooting
|
# Set to DEBUG level to capture detailed operational messages for troubleshooting
|
||||||
logging.basicConfig(
|
logging.basicConfig(level=logging.DEBUG, format="%(asctime)s - %(name)s - %(levelname)s - %(message)s")
|
||||||
level=logging.DEBUG,
|
|
||||||
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
|
|
||||||
)
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
# Create the MCP server instance with a unique name identifier
|
# Create the MCP server instance with a unique name identifier
|
||||||
@@ -215,7 +212,7 @@ add this JSON block at the very end of your response:
|
|||||||
|
|
||||||
💡 Good follow-up opportunities:
|
💡 Good follow-up opportunities:
|
||||||
- "Would you like me to examine the error handling in more detail?"
|
- "Would you like me to examine the error handling in more detail?"
|
||||||
- "Should I analyze the performance implications of this approach?"
|
- "Should I analyze the performance implications of this approach?"
|
||||||
- "Would it be helpful to review the security aspects of this implementation?"
|
- "Would it be helpful to review the security aspects of this implementation?"
|
||||||
- "Should I dive deeper into the architecture patterns used here?"
|
- "Should I dive deeper into the architecture patterns used here?"
|
||||||
|
|
||||||
|
|||||||
2
setup.py
2
setup.py
@@ -14,7 +14,7 @@ if readme_path.exists():
|
|||||||
|
|
||||||
setup(
|
setup(
|
||||||
name="gemini-mcp-server",
|
name="gemini-mcp-server",
|
||||||
version="2.11.1",
|
version="3.0.0",
|
||||||
description="Model Context Protocol server for Google Gemini",
|
description="Model Context Protocol server for Google Gemini",
|
||||||
long_description=long_description,
|
long_description=long_description,
|
||||||
long_description_content_type="text/markdown",
|
long_description_content_type="text/markdown",
|
||||||
|
|||||||
@@ -342,7 +342,7 @@ class TestContinuationIntegration:
|
|||||||
content = "Analysis result"
|
content = "Analysis result"
|
||||||
continuation_data = {"remaining_turns": 4, "tool_name": "test_continuation"}
|
continuation_data = {"remaining_turns": 4, "tool_name": "test_continuation"}
|
||||||
|
|
||||||
response = self.tool._create_continuation_offer_response(content, continuation_data, request)
|
self.tool._create_continuation_offer_response(content, continuation_data, request)
|
||||||
|
|
||||||
# Verify thread creation was called (should be called twice: create_thread + add_turn)
|
# Verify thread creation was called (should be called twice: create_thread + add_turn)
|
||||||
assert mock_client.setex.call_count == 2
|
assert mock_client.setex.call_count == 2
|
||||||
|
|||||||
@@ -379,13 +379,13 @@ class TestConversationFlow:
|
|||||||
|
|
||||||
# Test early conversation (should allow follow-ups)
|
# Test early conversation (should allow follow-ups)
|
||||||
early_instructions = get_follow_up_instructions(0, max_turns)
|
early_instructions = get_follow_up_instructions(0, max_turns)
|
||||||
assert "FOLLOW-UP CONVERSATIONS" in early_instructions
|
assert "CONVERSATION THREADING" in early_instructions
|
||||||
assert f"{max_turns - 1} more exchange" in early_instructions
|
assert f"({max_turns - 1} exchanges remaining)" in early_instructions
|
||||||
|
|
||||||
# Test mid conversation
|
# Test mid conversation
|
||||||
mid_instructions = get_follow_up_instructions(2, max_turns)
|
mid_instructions = get_follow_up_instructions(2, max_turns)
|
||||||
assert "FOLLOW-UP CONVERSATIONS" in mid_instructions
|
assert "CONVERSATION THREADING" in mid_instructions
|
||||||
assert f"{max_turns - 3} more exchange" in mid_instructions
|
assert f"({max_turns - 3} exchanges remaining)" in mid_instructions
|
||||||
|
|
||||||
# Test approaching limit (should stop follow-ups)
|
# Test approaching limit (should stop follow-ups)
|
||||||
limit_instructions = get_follow_up_instructions(max_turns - 1, max_turns)
|
limit_instructions = get_follow_up_instructions(max_turns - 1, max_turns)
|
||||||
@@ -399,7 +399,7 @@ class TestConversationFlow:
|
|||||||
# Test with custom max_turns to ensure dynamic behavior
|
# Test with custom max_turns to ensure dynamic behavior
|
||||||
custom_max = 3
|
custom_max = 3
|
||||||
custom_early = get_follow_up_instructions(0, custom_max)
|
custom_early = get_follow_up_instructions(0, custom_max)
|
||||||
assert f"{custom_max - 1} more exchange" in custom_early
|
assert f"({custom_max - 1} exchanges remaining)" in custom_early
|
||||||
|
|
||||||
custom_limit = get_follow_up_instructions(custom_max - 1, custom_max)
|
custom_limit = get_follow_up_instructions(custom_max - 1, custom_max)
|
||||||
assert "Do NOT include any follow-up questions" in custom_limit
|
assert "Do NOT include any follow-up questions" in custom_limit
|
||||||
@@ -408,7 +408,7 @@ class TestConversationFlow:
|
|||||||
"""Test that follow-up instructions use MAX_CONVERSATION_TURNS when max_turns not provided"""
|
"""Test that follow-up instructions use MAX_CONVERSATION_TURNS when max_turns not provided"""
|
||||||
instructions = get_follow_up_instructions(0) # No max_turns parameter
|
instructions = get_follow_up_instructions(0) # No max_turns parameter
|
||||||
expected_remaining = MAX_CONVERSATION_TURNS - 1
|
expected_remaining = MAX_CONVERSATION_TURNS - 1
|
||||||
assert f"{expected_remaining} more exchange" in instructions
|
assert f"({expected_remaining} exchanges remaining)" in instructions
|
||||||
|
|
||||||
@patch("utils.conversation_memory.get_redis_client")
|
@patch("utils.conversation_memory.get_redis_client")
|
||||||
def test_complete_conversation_with_dynamic_turns(self, mock_redis):
|
def test_complete_conversation_with_dynamic_turns(self, mock_redis):
|
||||||
|
|||||||
@@ -383,20 +383,22 @@ If any of these would strengthen your analysis, specify what Claude should searc
|
|||||||
continuation_id = getattr(request, "continuation_id", None)
|
continuation_id = getattr(request, "continuation_id", None)
|
||||||
if not continuation_id:
|
if not continuation_id:
|
||||||
# Import here to avoid circular imports
|
# Import here to avoid circular imports
|
||||||
from server import get_follow_up_instructions
|
|
||||||
import logging
|
import logging
|
||||||
|
|
||||||
|
from server import get_follow_up_instructions
|
||||||
|
|
||||||
follow_up_instructions = get_follow_up_instructions(0) # New conversation, turn 0
|
follow_up_instructions = get_follow_up_instructions(0) # New conversation, turn 0
|
||||||
prompt = f"{prompt}\n\n{follow_up_instructions}"
|
prompt = f"{prompt}\n\n{follow_up_instructions}"
|
||||||
logging.debug(f"Added follow-up instructions for new {self.name} conversation")
|
logging.debug(f"Added follow-up instructions for new {self.name} conversation")
|
||||||
# Also log to file for debugging MCP issues
|
# Also log to file for debugging MCP issues
|
||||||
try:
|
try:
|
||||||
with open('/tmp/gemini_debug.log', 'a') as f:
|
with open("/tmp/gemini_debug.log", "a") as f:
|
||||||
f.write(f"[{self.name}] Added follow-up instructions for new conversation\n")
|
f.write(f"[{self.name}] Added follow-up instructions for new conversation\n")
|
||||||
except Exception:
|
except Exception:
|
||||||
pass
|
pass
|
||||||
else:
|
else:
|
||||||
import logging
|
import logging
|
||||||
|
|
||||||
logging.debug(f"Continuing {self.name} conversation with thread {continuation_id}")
|
logging.debug(f"Continuing {self.name} conversation with thread {continuation_id}")
|
||||||
|
|
||||||
# Extract model configuration from request or use defaults
|
# Extract model configuration from request or use defaults
|
||||||
@@ -463,10 +465,13 @@ If any of these would strengthen your analysis, specify what Claude should searc
|
|||||||
"""
|
"""
|
||||||
# Check for follow-up questions in JSON blocks at the end of the response
|
# Check for follow-up questions in JSON blocks at the end of the response
|
||||||
follow_up_question = self._extract_follow_up_question(raw_text)
|
follow_up_question = self._extract_follow_up_question(raw_text)
|
||||||
|
|
||||||
import logging
|
import logging
|
||||||
|
|
||||||
if follow_up_question:
|
if follow_up_question:
|
||||||
logging.debug(f"Found follow-up question in {self.name} response: {follow_up_question.get('follow_up_question', 'N/A')}")
|
logging.debug(
|
||||||
|
f"Found follow-up question in {self.name} response: {follow_up_question.get('follow_up_question', 'N/A')}"
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
logging.debug(f"No follow-up question found in {self.name} response")
|
logging.debug(f"No follow-up question found in {self.name} response")
|
||||||
|
|
||||||
@@ -499,10 +504,13 @@ If any of these would strengthen your analysis, specify what Claude should searc
|
|||||||
|
|
||||||
# Check if we should offer Claude a continuation opportunity
|
# Check if we should offer Claude a continuation opportunity
|
||||||
continuation_offer = self._check_continuation_opportunity(request)
|
continuation_offer = self._check_continuation_opportunity(request)
|
||||||
|
|
||||||
import logging
|
import logging
|
||||||
|
|
||||||
if continuation_offer:
|
if continuation_offer:
|
||||||
logging.debug(f"Creating continuation offer for {self.name} with {continuation_offer['remaining_turns']} turns remaining")
|
logging.debug(
|
||||||
|
f"Creating continuation offer for {self.name} with {continuation_offer['remaining_turns']} turns remaining"
|
||||||
|
)
|
||||||
return self._create_continuation_offer_response(formatted_content, continuation_offer, request)
|
return self._create_continuation_offer_response(formatted_content, continuation_offer, request)
|
||||||
else:
|
else:
|
||||||
logging.debug(f"No continuation offer created for {self.name}")
|
logging.debug(f"No continuation offer created for {self.name}")
|
||||||
@@ -610,6 +618,7 @@ If any of these would strengthen your analysis, specify what Claude should searc
|
|||||||
except Exception as e:
|
except Exception as e:
|
||||||
# Threading failed, return normal response
|
# Threading failed, return normal response
|
||||||
import logging
|
import logging
|
||||||
|
|
||||||
logging.warning(f"Follow-up threading failed in {self.name}: {str(e)}")
|
logging.warning(f"Follow-up threading failed in {self.name}: {str(e)}")
|
||||||
return ToolOutput(
|
return ToolOutput(
|
||||||
status="success",
|
status="success",
|
||||||
@@ -735,6 +744,7 @@ If any of these would strengthen your analysis, specify what Claude should searc
|
|||||||
except Exception as e:
|
except Exception as e:
|
||||||
# If threading fails, return normal response but log the error
|
# If threading fails, return normal response but log the error
|
||||||
import logging
|
import logging
|
||||||
|
|
||||||
logging.warning(f"Conversation threading failed in {self.name}: {str(e)}")
|
logging.warning(f"Conversation threading failed in {self.name}: {str(e)}")
|
||||||
return ToolOutput(
|
return ToolOutput(
|
||||||
status="success",
|
status="success",
|
||||||
|
|||||||
Reference in New Issue
Block a user