refactor: optimize ModelContext creation in consensus tool

Address code review feedback by creating ModelContext instance once
at the beginning of _consult_model method instead of creating it twice.

- Move ModelContext import to method beginning for better practice
- Create single ModelContext instance and reuse for both file processing
  and temperature validation
- Remove redundant ModelContext creation on line 558
- Improve code clarity and efficiency as suggested by code review
This commit is contained in:
谢栋梁
2025-09-03 11:03:04 +08:00
parent 9044b63809
commit 30a8952fbc

View File

@@ -522,10 +522,16 @@ of the evidence, even when it strongly points in one direction.""",
async def _consult_model(self, model_config: dict, request) -> dict: async def _consult_model(self, model_config: dict, request) -> dict:
"""Consult a single model and return its response.""" """Consult a single model and return its response."""
try: try:
# Import and create ModelContext once at the beginning
from utils.model_context import ModelContext
# Get the provider for this model # Get the provider for this model
model_name = model_config["model"] model_name = model_config["model"]
provider = self.get_model_provider(model_name) provider = self.get_model_provider(model_name)
# Create model context once and reuse for both file processing and temperature validation
model_context = ModelContext(model_name=model_name)
# Prepare the prompt with any relevant files # Prepare the prompt with any relevant files
# Use continuation_id=None for blinded consensus - each model should only see # Use continuation_id=None for blinded consensus - each model should only see
# original prompt + files, not conversation history or other model responses # original prompt + files, not conversation history or other model responses
@@ -533,13 +539,6 @@ of the evidence, even when it strongly points in one direction.""",
# Steps 2+ contain summaries/notes that must NEVER be sent to other models # Steps 2+ contain summaries/notes that must NEVER be sent to other models
prompt = self.original_proposal if self.original_proposal else self.initial_prompt prompt = self.original_proposal if self.original_proposal else self.initial_prompt
if request.relevant_files: if request.relevant_files:
# Create a model context for token allocation
from utils.model_context import ModelContext
model_context = ModelContext(
model_name=model_name,
)
file_content, _ = self._prepare_file_content_for_prompt( file_content, _ = self._prepare_file_content_for_prompt(
request.relevant_files, request.relevant_files,
None, # Use None instead of request.continuation_id for blinded consensus None, # Use None instead of request.continuation_id for blinded consensus
@@ -554,9 +553,6 @@ of the evidence, even when it strongly points in one direction.""",
stance_prompt = model_config.get("stance_prompt") stance_prompt = model_config.get("stance_prompt")
system_prompt = self._get_stance_enhanced_prompt(stance, stance_prompt) system_prompt = self._get_stance_enhanced_prompt(stance, stance_prompt)
# Get model context for temperature validation
model_context = ModelContext(model_name=model_name)
# Validate temperature against model constraints (respects supports_temperature) # Validate temperature against model constraints (respects supports_temperature)
validated_temperature, temp_warnings = self.validate_and_correct_temperature( validated_temperature, temp_warnings = self.validate_and_correct_temperature(
self.get_default_temperature(), model_context self.get_default_temperature(), model_context