Merge pull request #260 from DragonFSKY/fix/consensus-model-context-issue
fix: resolve consensus tool model_context parameter missing issue
This commit is contained in:
@@ -331,6 +331,79 @@ class TestConsensusTool:
|
||||
result = tool.customize_workflow_response(response_data, request)
|
||||
assert result["consensus_workflow_status"] == "ready_for_synthesis"
|
||||
|
||||
async def test_consensus_with_relevant_files_model_context_fix(self):
|
||||
"""Test that consensus tool properly handles relevant_files without RuntimeError.
|
||||
|
||||
This is a regression test for the bug where _prepare_file_content_for_prompt
|
||||
was called without model_context parameter, causing RuntimeError:
|
||||
'Model context not provided for file preparation'
|
||||
|
||||
Bug details:
|
||||
- Occurred when consensus tool processed requests with relevant_files
|
||||
- _consult_model method called _prepare_file_content_for_prompt without model_context
|
||||
- Method expected model_context parameter but got None (default value)
|
||||
- Runtime validation in base_tool.py threw RuntimeError
|
||||
"""
|
||||
from unittest.mock import AsyncMock, Mock, patch
|
||||
|
||||
from utils.model_context import ModelContext
|
||||
|
||||
tool = ConsensusTool()
|
||||
|
||||
# Create a mock request with relevant_files (the trigger condition)
|
||||
mock_request = Mock()
|
||||
mock_request.relevant_files = ["/test/file1.py", "/test/file2.js"]
|
||||
mock_request.continuation_id = None
|
||||
|
||||
# Mock model configuration
|
||||
model_config = {"model": "flash", "stance": "neutral"}
|
||||
|
||||
# Mock the provider and model name resolution
|
||||
with (
|
||||
patch.object(tool, "get_model_provider") as mock_get_provider,
|
||||
patch.object(tool, "_prepare_file_content_for_prompt") as mock_prepare_files,
|
||||
patch.object(tool, "_get_stance_enhanced_prompt") as mock_get_prompt,
|
||||
patch.object(tool, "get_name", return_value="consensus"),
|
||||
):
|
||||
|
||||
# Setup mocks
|
||||
mock_provider = Mock()
|
||||
mock_provider.generate_content = AsyncMock(return_value={"response": "test response"})
|
||||
mock_get_provider.return_value = mock_provider
|
||||
mock_prepare_files.return_value = ("file content", [])
|
||||
mock_get_prompt.return_value = "system prompt"
|
||||
|
||||
# Set up the tool's attributes that would be set during normal execution
|
||||
tool.original_proposal = "Test proposal"
|
||||
|
||||
try:
|
||||
# This should not raise RuntimeError after the fix
|
||||
# The method should create ModelContext and pass it to _prepare_file_content_for_prompt
|
||||
await tool._consult_model(model_config, mock_request)
|
||||
|
||||
# Verify that _prepare_file_content_for_prompt was called with model_context
|
||||
mock_prepare_files.assert_called_once()
|
||||
call_args = mock_prepare_files.call_args
|
||||
|
||||
# Check that model_context was passed as keyword argument
|
||||
assert "model_context" in call_args.kwargs, "model_context should be passed as keyword argument"
|
||||
|
||||
# Verify the model_context is a proper ModelContext instance
|
||||
model_context = call_args.kwargs["model_context"]
|
||||
assert isinstance(model_context, ModelContext), "model_context should be ModelContext instance"
|
||||
|
||||
# Verify model_context properties are correct
|
||||
assert model_context.model_name == "flash"
|
||||
# Note: provider is accessed lazily, conversation_history and tool_name
|
||||
# are not part of ModelContext constructor in current implementation
|
||||
|
||||
except RuntimeError as e:
|
||||
if "Model context not provided" in str(e):
|
||||
pytest.fail("The model_context fix is not working. RuntimeError still occurs: " + str(e))
|
||||
else:
|
||||
# Re-raise if it's a different RuntimeError
|
||||
raise
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
import unittest
|
||||
|
||||
@@ -29,7 +29,6 @@ from mcp.types import TextContent
|
||||
from config import TEMPERATURE_ANALYTICAL
|
||||
from systemprompts import CONSENSUS_PROMPT
|
||||
from tools.shared.base_models import WorkflowRequest
|
||||
from utils.model_context import ModelContext
|
||||
|
||||
from .workflow.base import WorkflowTool
|
||||
|
||||
@@ -521,10 +520,16 @@ of the evidence, even when it strongly points in one direction.""",
|
||||
async def _consult_model(self, model_config: dict, request) -> dict:
|
||||
"""Consult a single model and return its response."""
|
||||
try:
|
||||
# Import and create ModelContext once at the beginning
|
||||
from utils.model_context import ModelContext
|
||||
|
||||
# Get the provider for this model
|
||||
model_name = model_config["model"]
|
||||
provider = self.get_model_provider(model_name)
|
||||
|
||||
# Create model context once and reuse for both file processing and temperature validation
|
||||
model_context = ModelContext(model_name=model_name)
|
||||
|
||||
# Prepare the prompt with any relevant files
|
||||
# Use continuation_id=None for blinded consensus - each model should only see
|
||||
# original prompt + files, not conversation history or other model responses
|
||||
@@ -536,6 +541,7 @@ of the evidence, even when it strongly points in one direction.""",
|
||||
request.relevant_files,
|
||||
None, # Use None instead of request.continuation_id for blinded consensus
|
||||
"Context files",
|
||||
model_context=model_context,
|
||||
)
|
||||
if file_content:
|
||||
prompt = f"{prompt}\n\n=== CONTEXT FILES ===\n{file_content}\n=== END CONTEXT ==="
|
||||
@@ -545,9 +551,6 @@ of the evidence, even when it strongly points in one direction.""",
|
||||
stance_prompt = model_config.get("stance_prompt")
|
||||
system_prompt = self._get_stance_enhanced_prompt(stance, stance_prompt)
|
||||
|
||||
# Get model context for temperature validation
|
||||
model_context = ModelContext(model_name=model_name)
|
||||
|
||||
# Validate temperature against model constraints (respects supports_temperature)
|
||||
validated_temperature, temp_warnings = self.validate_and_correct_temperature(
|
||||
self.get_default_temperature(), model_context
|
||||
|
||||
Reference in New Issue
Block a user