From 9044b63809113047fe678d659e4fcd175f58e87a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E8=B0=A2=E6=A0=8B=E6=A2=81?= Date: Wed, 3 Sep 2025 10:55:22 +0800 Subject: [PATCH] fix: resolve consensus tool model_context parameter missing issue Fixed runtime bug where _prepare_file_content_for_prompt was called without required model_context parameter, causing RuntimeError when processing requests with relevant_files. - Create ModelContext instance with model_name in _consult_model method - Pass model_context parameter to _prepare_file_content_for_prompt call - Add comprehensive regression test to prevent future occurrences - Maintain consensus tool's blinded design with independent model contexts --- tests/test_consensus.py | 73 +++++++++++++++++++++++++++++++++++++++++ tools/consensus.py | 9 ++++- 2 files changed, 81 insertions(+), 1 deletion(-) diff --git a/tests/test_consensus.py b/tests/test_consensus.py index dc1a12e..7f23ccc 100644 --- a/tests/test_consensus.py +++ b/tests/test_consensus.py @@ -331,6 +331,79 @@ class TestConsensusTool: result = tool.customize_workflow_response(response_data, request) assert result["consensus_workflow_status"] == "ready_for_synthesis" + async def test_consensus_with_relevant_files_model_context_fix(self): + """Test that consensus tool properly handles relevant_files without RuntimeError. + + This is a regression test for the bug where _prepare_file_content_for_prompt + was called without model_context parameter, causing RuntimeError: + 'Model context not provided for file preparation' + + Bug details: + - Occurred when consensus tool processed requests with relevant_files + - _consult_model method called _prepare_file_content_for_prompt without model_context + - Method expected model_context parameter but got None (default value) + - Runtime validation in base_tool.py threw RuntimeError + """ + from unittest.mock import AsyncMock, Mock, patch + + from utils.model_context import ModelContext + + tool = ConsensusTool() + + # Create a mock request with relevant_files (the trigger condition) + mock_request = Mock() + mock_request.relevant_files = ["/test/file1.py", "/test/file2.js"] + mock_request.continuation_id = None + + # Mock model configuration + model_config = {"model": "flash", "stance": "neutral"} + + # Mock the provider and model name resolution + with ( + patch.object(tool, "get_model_provider") as mock_get_provider, + patch.object(tool, "_prepare_file_content_for_prompt") as mock_prepare_files, + patch.object(tool, "_get_stance_enhanced_prompt") as mock_get_prompt, + patch.object(tool, "get_name", return_value="consensus"), + ): + + # Setup mocks + mock_provider = Mock() + mock_provider.generate_content = AsyncMock(return_value={"response": "test response"}) + mock_get_provider.return_value = mock_provider + mock_prepare_files.return_value = ("file content", []) + mock_get_prompt.return_value = "system prompt" + + # Set up the tool's attributes that would be set during normal execution + tool.original_proposal = "Test proposal" + + try: + # This should not raise RuntimeError after the fix + # The method should create ModelContext and pass it to _prepare_file_content_for_prompt + await tool._consult_model(model_config, mock_request) + + # Verify that _prepare_file_content_for_prompt was called with model_context + mock_prepare_files.assert_called_once() + call_args = mock_prepare_files.call_args + + # Check that model_context was passed as keyword argument + assert "model_context" in call_args.kwargs, "model_context should be passed as keyword argument" + + # Verify the model_context is a proper ModelContext instance + model_context = call_args.kwargs["model_context"] + assert isinstance(model_context, ModelContext), "model_context should be ModelContext instance" + + # Verify model_context properties are correct + assert model_context.model_name == "flash" + # Note: provider is accessed lazily, conversation_history and tool_name + # are not part of ModelContext constructor in current implementation + + except RuntimeError as e: + if "Model context not provided" in str(e): + pytest.fail("The model_context fix is not working. RuntimeError still occurs: " + str(e)) + else: + # Re-raise if it's a different RuntimeError + raise + if __name__ == "__main__": import unittest diff --git a/tools/consensus.py b/tools/consensus.py index cc88697..87d662c 100644 --- a/tools/consensus.py +++ b/tools/consensus.py @@ -29,7 +29,6 @@ from mcp.types import TextContent from config import TEMPERATURE_ANALYTICAL from systemprompts import CONSENSUS_PROMPT from tools.shared.base_models import WorkflowRequest -from utils.model_context import ModelContext from .workflow.base import WorkflowTool @@ -534,10 +533,18 @@ of the evidence, even when it strongly points in one direction.""", # Steps 2+ contain summaries/notes that must NEVER be sent to other models prompt = self.original_proposal if self.original_proposal else self.initial_prompt if request.relevant_files: + # Create a model context for token allocation + from utils.model_context import ModelContext + + model_context = ModelContext( + model_name=model_name, + ) + file_content, _ = self._prepare_file_content_for_prompt( request.relevant_files, None, # Use None instead of request.continuation_id for blinded consensus "Context files", + model_context=model_context, ) if file_content: prompt = f"{prompt}\n\n=== CONTEXT FILES ===\n{file_content}\n=== END CONTEXT ==="