Clear python cache when running script: https://github.com/BeehiveInnovations/zen-mcp-server/issues/96
Improved retry error logging
Cleanup
This commit is contained in:
Fahad
2025-06-21 05:56:50 +04:00
parent 76edd30e9a
commit 6fa2d63eac
14 changed files with 141 additions and 154 deletions

View File

@@ -14,9 +14,9 @@ import os
# These values are used in server responses and for tracking releases
# IMPORTANT: This is the single source of truth for version and author info
# Semantic versioning: MAJOR.MINOR.PATCH
__version__ = "5.5.0"
__version__ = "5.5.1"
# Last update date in ISO format
__updated__ = "2025-06-20"
__updated__ = "2025-06-21"
# Primary maintainer
__author__ = "Fahad Gilani"

View File

@@ -214,7 +214,8 @@ class GeminiModelProvider(ModelProvider):
time.sleep(delay)
# If we get here, all retries failed
error_msg = f"Gemini API error for model {resolved_name} after {max_retries} attempts: {str(last_exception)}"
actual_attempts = attempt + 1 # Convert from 0-based index to human-readable count
error_msg = f"Gemini API error for model {resolved_name} after {actual_attempts} attempt{'s' if actual_attempts > 1 else ''}: {str(last_exception)}"
raise RuntimeError(error_msg) from last_exception
def count_tokens(self, text: str, model_name: str) -> int:

View File

@@ -377,7 +377,8 @@ class OpenAICompatibleProvider(ModelProvider):
break
# If we get here, all retries failed
error_msg = f"o3-pro responses endpoint error after {max_retries} attempts: {str(last_exception)}"
actual_attempts = attempt + 1 # Convert from 0-based index to human-readable count
error_msg = f"o3-pro responses endpoint error after {actual_attempts} attempt{'s' if actual_attempts > 1 else ''}: {str(last_exception)}"
logging.error(error_msg)
raise RuntimeError(error_msg) from last_exception
@@ -541,9 +542,8 @@ class OpenAICompatibleProvider(ModelProvider):
time.sleep(delay)
# If we get here, all retries failed
error_msg = (
f"{self.FRIENDLY_NAME} API error for model {model_name} after {max_retries} attempts: {str(last_exception)}"
)
actual_attempts = attempt + 1 # Convert from 0-based index to human-readable count
error_msg = f"{self.FRIENDLY_NAME} API error for model {model_name} after {actual_attempts} attempt{'s' if actual_attempts > 1 else ''}: {str(last_exception)}"
logging.error(error_msg)
raise RuntimeError(error_msg) from last_exception

View File

@@ -56,6 +56,14 @@ get_version() {
grep -E '^__version__ = ' config.py 2>/dev/null | sed 's/__version__ = "\(.*\)"/\1/' || echo "unknown"
}
# Clear Python cache files to prevent import issues
clear_python_cache() {
print_info "Clearing Python cache files..."
find . -name "*.pyc" -delete 2>/dev/null || true
find . -name "__pycache__" -type d -exec rm -rf {} + 2>/dev/null || true
print_success "Python cache cleared"
}
# ----------------------------------------------------------------------------
# Platform Detection Functions
# ----------------------------------------------------------------------------
@@ -780,12 +788,14 @@ show_help() {
echo " -v, --version Show version information"
echo " -f, --follow Follow server logs in real-time"
echo " -c, --config Show configuration instructions for Claude clients"
echo " --clear-cache Clear Python cache and exit (helpful for import issues)"
echo ""
echo "Examples:"
echo " $0 Setup and start the MCP server"
echo " $0 -f Setup and follow logs"
echo " $0 -c Show configuration instructions"
echo " $0 --version Show version only"
echo " $0 --clear-cache Clear Python cache (fixes import issues)"
echo ""
echo "For more information, visit:"
echo " https://github.com/BeehiveInnovations/zen-mcp-server"
@@ -844,6 +854,14 @@ main() {
-f|--follow)
# Continue with normal setup then follow logs
;;
--clear-cache)
# Clear cache and exit
clear_python_cache
print_success "Cache cleared successfully"
echo ""
echo "You can now run './run-server.sh' normally"
exit 0
;;
"")
# Normal setup without following logs
;;
@@ -873,6 +891,9 @@ main() {
# Step 1: Docker cleanup
cleanup_docker
# Step 1.5: Clear Python cache to prevent import issues
clear_python_cache
# Step 2: Find Python
local python_cmd
python_cmd=$(find_python) || exit 1

View File

@@ -167,7 +167,7 @@ This happens every time a user tries to log in. The error occurs in the password
return False
response1_data = self._parse_debug_response(response1)
if not self._validate_investigation_response(response1_data, 1, True, "investigation_in_progress"):
if not self._validate_investigation_response(response1_data, 1, True, "pause_for_investigation"):
return False
self.logger.info(f" ✅ Step 1 successful, continuation_id: {continuation_id}")
@@ -184,7 +184,7 @@ This happens every time a user tries to log in. The error occurs in the password
"findings": "Missing 'import hashlib' statement at the top of user_auth.py file. The error occurs because hashlib is used in hash_password() method on line 12 but never imported. Simple one-line fix: add 'import hashlib' after line 2.",
"files_checked": [self.error_log_file, self.missing_import_file],
"relevant_files": [self.missing_import_file],
"relevant_methods": ["UserAuth.hash_password", "UserAuth.verify_password"],
"relevant_context": ["UserAuth.hash_password", "UserAuth.verify_password"],
"hypothesis": "Missing 'import hashlib' statement causes NameError when hash_password method executes",
"confidence": "certain", # Use certain - should skip expert analysis
"continuation_id": continuation_id,
@@ -264,7 +264,7 @@ This happens every time a user tries to log in. The error occurs in the password
"findings": "After thorough investigation, identified that the issue is caused by method name typo in Calculator.calculate_total() - calls self.add_number() instead of self.add_numbers(). Simple fix: change line 14 from 'add_number' to 'add_numbers'.",
"files_checked": [self.typo_bug_file],
"relevant_files": [self.typo_bug_file],
"relevant_methods": ["Calculator.calculate_total", "Calculator.add_numbers"],
"relevant_context": ["Calculator.calculate_total", "Calculator.add_numbers"],
"hypothesis": "Method name typo in calculate_total() calls non-existent add_number() instead of add_numbers()",
"confidence": "certain", # Should always be trusted
"model": "flash",
@@ -318,7 +318,7 @@ This happens every time a user tries to log in. The error occurs in the password
"findings": "IndentationError in data_processor.py line 8 - results.append(processed) is incorrectly indented. Should align with the 'if' statement above it.",
"files_checked": [self.indentation_file],
"relevant_files": [self.indentation_file],
"relevant_methods": ["process_data"],
"relevant_context": ["process_data"],
"hypothesis": "Incorrect indentation causes IndentationError in process_data function",
"confidence": "high", # Regular high confidence, NOT certain
"model": "flash",
@@ -400,7 +400,7 @@ This happens every time a user tries to log in. The error occurs in the password
"findings": "Found the issue: line 8 'results.append(processed)' is indented incorrectly. It should align with the 'if' statement, not be at the same level as the 'for' loop.",
"files_checked": [self.indentation_file],
"relevant_files": [self.indentation_file],
"relevant_methods": ["process_data"],
"relevant_context": ["process_data"],
"hypothesis": "Line 8 has incorrect indentation level causing IndentationError",
"confidence": "medium",
"continuation_id": continuation_id,
@@ -423,7 +423,7 @@ This happens every time a user tries to log in. The error occurs in the password
"findings": "Confirmed: line 8 'results.append(processed)' needs to be indented 4 more spaces to align with line 6 'if item > 0:'. This is a simple indentation fix.",
"files_checked": [self.indentation_file],
"relevant_files": [self.indentation_file],
"relevant_methods": ["process_data"],
"relevant_context": ["process_data"],
"hypothesis": "IndentationError on line 8 due to incorrect indentation level - needs 4 more spaces",
"confidence": "certain", # Final step with certain
"continuation_id": continuation_id,
@@ -455,10 +455,10 @@ This happens every time a user tries to log in. The error occurs in the password
self.logger.error("Expected at least 1 step in complete investigation")
return False
# Check that investigation summary includes progression
investigation_summary = complete_investigation.get("investigation_summary", "")
if "Total steps:" not in investigation_summary and "Steps taken:" not in investigation_summary:
self.logger.error("Investigation summary should show steps information")
# Check that work summary includes progression
work_summary = complete_investigation.get("work_summary", "")
if "Total steps:" not in work_summary and "Steps taken:" not in work_summary:
self.logger.error("Work summary should show steps information")
return False
self.logger.info(" ✅ Multi-step investigation with certain ending successful")

View File

@@ -191,7 +191,7 @@ RuntimeError: dictionary changed size during iteration
"findings": "Found the issue: cleanup_expired_sessions modifies self.active_sessions dictionary while iterating over it with .items(). This causes RuntimeError when del is called during iteration.",
"files_checked": [self.error_file, self.buggy_file],
"relevant_files": [self.buggy_file],
"relevant_methods": ["SessionManager.cleanup_expired_sessions"],
"relevant_context": ["SessionManager.cleanup_expired_sessions"],
"hypothesis": "Dictionary is being modified during iteration causing RuntimeError",
"confidence": "high",
"continuation_id": continuation_id,
@@ -212,8 +212,8 @@ RuntimeError: dictionary changed size during iteration
self.logger.error("Files checked count not properly tracked")
return False
if investigation_status.get("relevant_methods", 0) != 1:
self.logger.error("Relevant methods not properly tracked")
if investigation_status.get("relevant_context", 0) != 1:
self.logger.error("Relevant context not properly tracked")
return False
if investigation_status.get("current_confidence") != "high":
@@ -288,7 +288,7 @@ RuntimeError: dictionary changed size during iteration
"findings": "Found inefficient nested loops in data processor causing O(n²) complexity",
"files_checked": ["/processor/algorithm.py"],
"relevant_files": ["/processor/algorithm.py"],
"relevant_methods": ["DataProcessor.process_batch"],
"relevant_context": ["DataProcessor.process_batch"],
"hypothesis": "Inefficient algorithm causing performance issues",
"confidence": "medium",
"backtrack_from_step": 2, # Backtrack from step 2
@@ -331,7 +331,7 @@ RuntimeError: dictionary changed size during iteration
"findings": "Found dictionary modification during iteration",
"files_checked": [self.buggy_file],
"relevant_files": [self.buggy_file],
"relevant_methods": ["SessionManager.cleanup_expired_sessions"],
"relevant_context": ["SessionManager.cleanup_expired_sessions"],
},
)
if not response0 or not continuation_id:
@@ -350,7 +350,7 @@ RuntimeError: dictionary changed size during iteration
"findings": "Root cause identified: del self.active_sessions[session_id] on line 46 modifies dictionary during iteration starting at line 44. Fix: collect expired IDs first, then delete.",
"files_checked": [self.buggy_file],
"relevant_files": [self.buggy_file],
"relevant_methods": ["SessionManager.cleanup_expired_sessions"],
"relevant_context": ["SessionManager.cleanup_expired_sessions"],
"hypothesis": "Dictionary modification during iteration causes RuntimeError in cleanup_expired_sessions",
"confidence": "high",
"continuation_id": continuation_id,
@@ -404,11 +404,11 @@ RuntimeError: dictionary changed size during iteration
return False
complete_investigation = response_final_data["complete_investigation"]
if not complete_investigation.get("relevant_methods"):
self.logger.error("Missing relevant methods in complete investigation")
if not complete_investigation.get("relevant_context"):
self.logger.error("Missing relevant context in complete investigation")
return False
if "SessionManager.cleanup_expired_sessions" not in complete_investigation["relevant_methods"]:
if "SessionManager.cleanup_expired_sessions" not in complete_investigation["relevant_context"]:
self.logger.error("Expected method not found in investigation summary")
return False
@@ -436,7 +436,7 @@ RuntimeError: dictionary changed size during iteration
"findings": "The bug is on line 44-47: for loop iterates over dict.items() while del modifies the dict inside the loop. Fix is simple: collect expired IDs first, then delete after iteration.",
"files_checked": [self.buggy_file],
"relevant_files": [self.buggy_file],
"relevant_methods": ["SessionManager.cleanup_expired_sessions"],
"relevant_context": ["SessionManager.cleanup_expired_sessions"],
"hypothesis": "Dictionary modification during iteration causes RuntimeError - fix is straightforward",
"confidence": "certain", # This should skip expert analysis
"model": "flash",
@@ -604,7 +604,7 @@ def validate_input(data):
"findings": "Initial analysis of data processing components",
"files_checked": [file1, file2],
"relevant_files": [file1], # This should be referenced, not embedded
"relevant_methods": ["process_data"],
"relevant_context": ["process_data"],
"hypothesis": "Investigating data flow",
"confidence": "low",
"model": "flash",
@@ -644,7 +644,7 @@ def validate_input(data):
"findings": "Found potential issues in validation logic",
"files_checked": [file1, file2],
"relevant_files": [file1, file2], # Both files referenced
"relevant_methods": ["process_data", "validate_input"],
"relevant_context": ["process_data", "validate_input"],
"hypothesis": "Validation might be too strict",
"confidence": "medium",
"model": "flash",
@@ -690,7 +690,7 @@ def validate_input(data):
"findings": "Root cause: validator is rejecting valid data due to strict type checking",
"files_checked": [file1, file2],
"relevant_files": [file1, file2], # Should be fully embedded
"relevant_methods": ["process_data", "validate_input"],
"relevant_context": ["process_data", "validate_input"],
"hypothesis": "Validation logic is too restrictive for valid edge cases",
"confidence": "high",
"model": "flash",
@@ -797,7 +797,7 @@ class DatabaseServer:
"findings": "Application fails to start with configuration errors",
"files_checked": [config_file],
"relevant_files": [config_file],
"relevant_methods": [],
"relevant_context": [],
"hypothesis": "Configuration issue causing startup failure",
"confidence": "low",
"model": "flash",
@@ -831,7 +831,7 @@ class DatabaseServer:
"findings": "MAX_CONNECTIONS environment variable contains invalid value, causing CACHE_SIZE calculation to fail",
"files_checked": [config_file, server_file],
"relevant_files": [config_file, server_file],
"relevant_methods": ["DatabaseServer.__init__"],
"relevant_context": ["DatabaseServer.__init__"],
"hypothesis": "Invalid environment variable causing integer conversion error",
"confidence": "medium",
"model": "flash",
@@ -871,7 +871,7 @@ class DatabaseServer:
"findings": "Error occurs in config.py line 8 when MAX_CONNECTIONS is not numeric, then propagates to DatabaseServer.__init__",
"files_checked": [config_file, server_file],
"relevant_files": [config_file, server_file],
"relevant_methods": ["DatabaseServer.__init__"],
"relevant_context": ["DatabaseServer.__init__"],
"hypothesis": "Need proper error handling and validation for environment variables",
"confidence": "high",
"model": "flash",
@@ -905,7 +905,7 @@ class DatabaseServer:
"findings": "Root cause: config.py assumes MAX_CONNECTIONS env var is always a valid integer. Fix: add try/except with default value and proper validation.",
"files_checked": [config_file, server_file],
"relevant_files": [config_file, server_file],
"relevant_methods": ["DatabaseServer.__init__"],
"relevant_context": ["DatabaseServer.__init__"],
"hypothesis": "Environment variable validation needed with proper error handling",
"confidence": "high",
"model": "flash",

View File

@@ -1,16 +0,0 @@
{
"database": {
"host": "localhost",
"port": 5432,
"name": "testdb",
"ssl": true
},
"cache": {
"redis_url": "redis://localhost:6379",
"ttl": 3600
},
"logging": {
"level": "INFO",
"format": "%(asctime)s - %(name)s - %(levelname)s - %(message)s"
}
}

View File

@@ -1,32 +0,0 @@
"""
Sample Python module for testing MCP conversation continuity
"""
def fibonacci(n):
"""Calculate fibonacci number recursively"""
if n <= 1:
return n
return fibonacci(n-1) + fibonacci(n-2)
def factorial(n):
"""Calculate factorial iteratively"""
result = 1
for i in range(1, n + 1):
result *= i
return result
class Calculator:
"""Simple calculator class"""
def __init__(self):
self.history = []
def add(self, a, b):
result = a + b
self.history.append(f"{a} + {b} = {result}")
return result
def multiply(self, a, b):
result = a * b
self.history.append(f"{a} * {b} = {result}")
return result

View File

@@ -30,15 +30,14 @@ class TestDebugTool:
findings="Found potential null reference in user authentication flow",
files_checked=["/src/UserService.java"],
relevant_files=["/src/UserService.java"],
relevant_methods=["authenticate", "validateUser"],
relevant_context=["authenticate", "validateUser"],
confidence="medium",
hypothesis="Null pointer occurs when user object is not properly validated",
)
assert step_request.step_number == 1
assert step_request.confidence == "medium"
assert len(step_request.relevant_methods) == 2
assert len(step_request.relevant_context) == 2 # Should be mapped from relevant_methods
assert len(step_request.relevant_context) == 2
def test_input_schema_generation(self):
"""Test that input schema is generated correctly."""
@@ -51,33 +50,31 @@ class TestDebugTool:
assert "total_steps" in schema["properties"]
assert "next_step_required" in schema["properties"]
assert "findings" in schema["properties"]
assert "relevant_methods" in schema["properties"]
assert "relevant_context" in schema["properties"]
# Verify field types
assert schema["properties"]["step"]["type"] == "string"
assert schema["properties"]["step_number"]["type"] == "integer"
assert schema["properties"]["next_step_required"]["type"] == "boolean"
assert schema["properties"]["relevant_methods"]["type"] == "array"
assert schema["properties"]["relevant_context"]["type"] == "array"
def test_model_category_for_debugging(self):
"""Test that debug tool correctly identifies as extended reasoning category."""
tool = DebugIssueTool()
assert tool.get_model_category() == ToolModelCategory.EXTENDED_REASONING
def test_field_mapping_relevant_methods_to_context(self):
"""Test that relevant_methods maps to relevant_context internally."""
def test_relevant_context_handling(self):
"""Test that relevant_context is handled correctly."""
request = DebugInvestigationRequest(
step="Test investigation",
step_number=1,
total_steps=2,
next_step_required=True,
findings="Test findings",
relevant_methods=["method1", "method2"],
relevant_context=["method1", "method2"],
)
# External API should have relevant_methods
assert request.relevant_methods == ["method1", "method2"]
# Internal processing should map to relevant_context
# Should have relevant_context directly
assert request.relevant_context == ["method1", "method2"]
# Test step data preparation

View File

@@ -19,7 +19,7 @@ class TestThinkDeepTool:
def test_tool_metadata(self, tool):
"""Test tool metadata"""
assert tool.get_name() == "thinkdeep"
assert "EXTENDED THINKING" in tool.get_description()
assert "COMPREHENSIVE INVESTIGATION & REASONING" in tool.get_description()
assert tool.get_default_temperature() == 0.7
schema = tool.get_input_schema()

View File

@@ -37,7 +37,8 @@ CODEREVIEW_WORKFLOW_FIELD_DESCRIPTIONS = {
"step": (
"Describe what you're currently investigating for code review by thinking deeply about the code structure, "
"patterns, and potential issues. In step 1, clearly state your review plan and begin forming a systematic "
"approach after thinking carefully about what needs to be analyzed. CRITICAL: Remember to thoroughly examine "
"approach after thinking carefully about what needs to be analyzed. You must begin by passing the file path "
"for the initial code you are about to review in relevant_files. CRITICAL: Remember to thoroughly examine "
"code quality, security implications, performance concerns, and architectural patterns. Consider not only "
"obvious bugs and issues but also subtle concerns like over-engineering, unnecessary complexity, design "
"patterns that could be simplified, areas where architecture might not scale well, missing abstractions, "

View File

@@ -18,7 +18,7 @@ Key features:
import logging
from typing import TYPE_CHECKING, Any, Optional
from pydantic import Field, model_validator
from pydantic import Field
if TYPE_CHECKING:
from tools.models import ToolModelCategory
@@ -127,9 +127,6 @@ class DebugInvestigationRequest(WorkflowRequest):
relevant_context: list[str] = Field(
default_factory=list, description=DEBUG_INVESTIGATION_FIELD_DESCRIPTIONS["relevant_context"]
)
relevant_methods: list[str] = Field(
default_factory=list, description=DEBUG_INVESTIGATION_FIELD_DESCRIPTIONS["relevant_context"], exclude=True
)
hypothesis: Optional[str] = Field(None, description=DEBUG_INVESTIGATION_FIELD_DESCRIPTIONS["hypothesis"])
confidence: Optional[str] = Field("low", description=DEBUG_INVESTIGATION_FIELD_DESCRIPTIONS["confidence"])
@@ -146,14 +143,6 @@ class DebugInvestigationRequest(WorkflowRequest):
thinking_mode: Optional[str] = Field(default=None, exclude=True)
use_websearch: Optional[bool] = Field(default=None, exclude=True)
@model_validator(mode="after")
def map_relevant_methods_to_context(self):
"""Map relevant_methods from external input to relevant_context for internal processing."""
# If relevant_context is empty but relevant_methods has values, use relevant_methods
if not self.relevant_context and self.relevant_methods:
self.relevant_context = self.relevant_methods[:]
return self
class DebugIssueTool(WorkflowTool):
"""
@@ -261,11 +250,6 @@ class DebugIssueTool(WorkflowTool):
"minimum": 1,
"description": DEBUG_INVESTIGATION_FIELD_DESCRIPTIONS["backtrack_from_step"],
},
"relevant_methods": {
"type": "array",
"items": {"type": "string"},
"description": DEBUG_INVESTIGATION_FIELD_DESCRIPTIONS["relevant_context"],
},
"images": {
"type": "array",
"items": {"type": "string"},
@@ -350,7 +334,7 @@ class DebugIssueTool(WorkflowTool):
if error_context:
context_parts.append(f"\n=== ERROR CONTEXT/STACK TRACE ===\n{error_context}\n=== END CONTEXT ===")
# Add relevant methods if available (map relevant_context back to relevant_methods)
# Add relevant methods/functions if available
if consolidated_findings.relevant_context:
methods_text = "\n".join(f"- {method}" for method in consolidated_findings.relevant_context)
context_parts.append(f"\n=== RELEVANT METHODS/FUNCTIONS ===\n{methods_text}\n=== END METHODS ===")
@@ -466,7 +450,7 @@ class DebugIssueTool(WorkflowTool):
def prepare_step_data(self, request) -> dict:
"""
Map debug-specific fields: relevant_methods -> relevant_context for internal processing.
Prepare debug-specific step data for processing.
"""
step_data = {
"step": request.step,
@@ -603,21 +587,12 @@ class DebugIssueTool(WorkflowTool):
# Rename status field to match debug tool
if f"{tool_name}_status" in response_data:
response_data["investigation_status"] = response_data.pop(f"{tool_name}_status")
# Map relevant_context back to relevant_methods in status
if "relevant_context" in response_data["investigation_status"]:
response_data["investigation_status"]["relevant_methods"] = response_data["investigation_status"].pop(
"relevant_context"
)
# Add debug-specific status fields
response_data["investigation_status"]["hypotheses_formed"] = len(self.consolidated_findings.hypotheses)
# Map relevant_context back to relevant_methods in complete investigation
# Rename complete investigation data
if f"complete_{tool_name}" in response_data:
response_data["complete_investigation"] = response_data.pop(f"complete_{tool_name}")
if "relevant_context" in response_data["complete_investigation"]:
response_data["complete_investigation"]["relevant_methods"] = response_data[
"complete_investigation"
].pop("relevant_context")
# Map the completion flag to match original debug tool
if f"{tool_name}_complete" in response_data:

View File

@@ -139,14 +139,14 @@ class ThinkDeepTool(WorkflowTool):
name = "thinkdeep"
description = (
"EXTENDED THINKING & REASONING - Your deep thinking partner for complex problems. "
"Use this when you need to think deeper about a problem, extend your analysis, explore alternatives, "
"or validate approaches. Perfect for: architecture decisions, complex bugs, performance challenges, "
"security analysis. I'll challenge assumptions, find edge cases, and provide alternative solutions. "
"IMPORTANT: Choose the appropriate thinking_mode based on task complexity - 'low' for quick analysis, "
"COMPREHENSIVE INVESTIGATION & REASONING - Multi-stage workflow for complex problem analysis. "
"Use this when you need structured evidence-based investigation, systematic hypothesis testing, or expert validation. "
"Perfect for: architecture decisions, complex bugs, performance challenges, security analysis. "
"Provides methodical investigation with assumption validation, alternative solution exploration, and rigorous analysis. "
"IMPORTANT: Choose the appropriate mode based on task complexity - 'low' for quick investigation, "
"'medium' for standard problems, 'high' for complex issues (default), 'max' for extremely complex "
"challenges requiring deepest analysis. When in doubt, err on the side of a higher mode for truly "
"deep thought and evaluation. Note: If you're not currently using a top-tier model such as Opus 4 or above, "
"challenges requiring exhaustive investigation. When in doubt, err on the side of a higher mode for thorough "
"systematic analysis and expert validation. Note: If you're not currently using a top-tier model such as Opus 4 or above, "
"these tools can provide enhanced capabilities."
)
@@ -218,11 +218,21 @@ class ThinkDeepTool(WorkflowTool):
Customize the workflow response for thinkdeep-specific needs
"""
# Store request parameters for later use in expert analysis
self.stored_request_params = {
"temperature": getattr(request, "temperature", None),
"thinking_mode": getattr(request, "thinking_mode", None),
"use_websearch": getattr(request, "use_websearch", None),
}
self.stored_request_params = {}
try:
self.stored_request_params["temperature"] = request.temperature
except AttributeError:
self.stored_request_params["temperature"] = None
try:
self.stored_request_params["thinking_mode"] = request.thinking_mode
except AttributeError:
self.stored_request_params["thinking_mode"] = None
try:
self.stored_request_params["use_websearch"] = request.use_websearch
except AttributeError:
self.stored_request_params["use_websearch"] = None
# Add thinking-specific context to response
response_data.update(
@@ -307,8 +317,8 @@ Your role is to validate the thinking process, identify any gaps, challenge assu
additional insights or alternative perspectives.
ANALYSIS SCOPE:
- Problem Context: {getattr(request, 'problem_context', 'General analysis')}
- Focus Areas: {', '.join(getattr(request, 'focus_areas', ['comprehensive analysis']))}
- Problem Context: {self._get_problem_context(request)}
- Focus Areas: {', '.join(self._get_focus_areas(request))}
- Investigation Confidence: {request.confidence}
- Steps Completed: {request.step_number} of {request.total_steps}
@@ -350,22 +360,48 @@ but also acknowledge strong insights and valid conclusions.
def get_request_temperature(self, request) -> float:
"""Use stored temperature from initial request."""
if hasattr(self, "stored_request_params") and self.stored_request_params.get("temperature") is not None:
return self.stored_request_params["temperature"]
try:
stored_params = self.stored_request_params
if stored_params and stored_params.get("temperature") is not None:
return stored_params["temperature"]
except AttributeError:
pass
return super().get_request_temperature(request)
def get_request_thinking_mode(self, request) -> str:
"""Use stored thinking mode from initial request."""
if hasattr(self, "stored_request_params") and self.stored_request_params.get("thinking_mode") is not None:
return self.stored_request_params["thinking_mode"]
try:
stored_params = self.stored_request_params
if stored_params and stored_params.get("thinking_mode") is not None:
return stored_params["thinking_mode"]
except AttributeError:
pass
return super().get_request_thinking_mode(request)
def get_request_use_websearch(self, request) -> bool:
"""Use stored use_websearch from initial request."""
if hasattr(self, "stored_request_params") and self.stored_request_params.get("use_websearch") is not None:
return self.stored_request_params["use_websearch"]
try:
stored_params = self.stored_request_params
if stored_params and stored_params.get("use_websearch") is not None:
return stored_params["use_websearch"]
except AttributeError:
pass
return super().get_request_use_websearch(request)
def _get_problem_context(self, request) -> str:
"""Get problem context from request. Override for custom context handling."""
try:
return request.problem_context or "General analysis"
except AttributeError:
return "General analysis"
def _get_focus_areas(self, request) -> list[str]:
"""Get focus areas from request. Override for custom focus area handling."""
try:
return request.focus_areas or ["comprehensive analysis"]
except AttributeError:
return ["comprehensive analysis"]
def get_required_actions(self, step_number: int, confidence: str, findings: str, total_steps: int) -> list[str]:
"""
Return required actions for the current thinking step.
@@ -413,14 +449,20 @@ but also acknowledge strong insights and valid conclusions.
"""
Determine if expert analysis should be called based on confidence and completion.
"""
if request and hasattr(request, "confidence"):
if request:
try:
# Don't call expert analysis if confidence is "certain"
if request.confidence == "certain":
return False
except AttributeError:
pass
# Call expert analysis if investigation is complete (when next_step_required is False)
if request and hasattr(request, "next_step_required"):
if request:
try:
return not request.next_step_required
except AttributeError:
pass
# Fallback: call expert analysis if we have meaningful findings
return (

View File

@@ -677,8 +677,6 @@ class BaseWorkflowMixin(ABC):
def prepare_step_data(self, request) -> dict:
"""
Prepare step data from request. Tools can override to customize field mapping.
For example, debug tool maps relevant_methods to relevant_context.
"""
step_data = {
"step": request.step,