Clear python cache when running script: https://github.com/BeehiveInnovations/zen-mcp-server/issues/96
Improved retry error logging
Cleanup
This commit is contained in:
Fahad
2025-06-21 05:56:50 +04:00
parent 76edd30e9a
commit 6fa2d63eac
14 changed files with 141 additions and 154 deletions

View File

@@ -37,7 +37,8 @@ CODEREVIEW_WORKFLOW_FIELD_DESCRIPTIONS = {
"step": (
"Describe what you're currently investigating for code review by thinking deeply about the code structure, "
"patterns, and potential issues. In step 1, clearly state your review plan and begin forming a systematic "
"approach after thinking carefully about what needs to be analyzed. CRITICAL: Remember to thoroughly examine "
"approach after thinking carefully about what needs to be analyzed. You must begin by passing the file path "
"for the initial code you are about to review in relevant_files. CRITICAL: Remember to thoroughly examine "
"code quality, security implications, performance concerns, and architectural patterns. Consider not only "
"obvious bugs and issues but also subtle concerns like over-engineering, unnecessary complexity, design "
"patterns that could be simplified, areas where architecture might not scale well, missing abstractions, "

View File

@@ -18,7 +18,7 @@ Key features:
import logging
from typing import TYPE_CHECKING, Any, Optional
from pydantic import Field, model_validator
from pydantic import Field
if TYPE_CHECKING:
from tools.models import ToolModelCategory
@@ -127,9 +127,6 @@ class DebugInvestigationRequest(WorkflowRequest):
relevant_context: list[str] = Field(
default_factory=list, description=DEBUG_INVESTIGATION_FIELD_DESCRIPTIONS["relevant_context"]
)
relevant_methods: list[str] = Field(
default_factory=list, description=DEBUG_INVESTIGATION_FIELD_DESCRIPTIONS["relevant_context"], exclude=True
)
hypothesis: Optional[str] = Field(None, description=DEBUG_INVESTIGATION_FIELD_DESCRIPTIONS["hypothesis"])
confidence: Optional[str] = Field("low", description=DEBUG_INVESTIGATION_FIELD_DESCRIPTIONS["confidence"])
@@ -146,14 +143,6 @@ class DebugInvestigationRequest(WorkflowRequest):
thinking_mode: Optional[str] = Field(default=None, exclude=True)
use_websearch: Optional[bool] = Field(default=None, exclude=True)
@model_validator(mode="after")
def map_relevant_methods_to_context(self):
"""Map relevant_methods from external input to relevant_context for internal processing."""
# If relevant_context is empty but relevant_methods has values, use relevant_methods
if not self.relevant_context and self.relevant_methods:
self.relevant_context = self.relevant_methods[:]
return self
class DebugIssueTool(WorkflowTool):
"""
@@ -261,11 +250,6 @@ class DebugIssueTool(WorkflowTool):
"minimum": 1,
"description": DEBUG_INVESTIGATION_FIELD_DESCRIPTIONS["backtrack_from_step"],
},
"relevant_methods": {
"type": "array",
"items": {"type": "string"},
"description": DEBUG_INVESTIGATION_FIELD_DESCRIPTIONS["relevant_context"],
},
"images": {
"type": "array",
"items": {"type": "string"},
@@ -350,7 +334,7 @@ class DebugIssueTool(WorkflowTool):
if error_context:
context_parts.append(f"\n=== ERROR CONTEXT/STACK TRACE ===\n{error_context}\n=== END CONTEXT ===")
# Add relevant methods if available (map relevant_context back to relevant_methods)
# Add relevant methods/functions if available
if consolidated_findings.relevant_context:
methods_text = "\n".join(f"- {method}" for method in consolidated_findings.relevant_context)
context_parts.append(f"\n=== RELEVANT METHODS/FUNCTIONS ===\n{methods_text}\n=== END METHODS ===")
@@ -466,7 +450,7 @@ class DebugIssueTool(WorkflowTool):
def prepare_step_data(self, request) -> dict:
"""
Map debug-specific fields: relevant_methods -> relevant_context for internal processing.
Prepare debug-specific step data for processing.
"""
step_data = {
"step": request.step,
@@ -603,21 +587,12 @@ class DebugIssueTool(WorkflowTool):
# Rename status field to match debug tool
if f"{tool_name}_status" in response_data:
response_data["investigation_status"] = response_data.pop(f"{tool_name}_status")
# Map relevant_context back to relevant_methods in status
if "relevant_context" in response_data["investigation_status"]:
response_data["investigation_status"]["relevant_methods"] = response_data["investigation_status"].pop(
"relevant_context"
)
# Add debug-specific status fields
response_data["investigation_status"]["hypotheses_formed"] = len(self.consolidated_findings.hypotheses)
# Add debug-specific status fields
response_data["investigation_status"]["hypotheses_formed"] = len(self.consolidated_findings.hypotheses)
# Map relevant_context back to relevant_methods in complete investigation
# Rename complete investigation data
if f"complete_{tool_name}" in response_data:
response_data["complete_investigation"] = response_data.pop(f"complete_{tool_name}")
if "relevant_context" in response_data["complete_investigation"]:
response_data["complete_investigation"]["relevant_methods"] = response_data[
"complete_investigation"
].pop("relevant_context")
# Map the completion flag to match original debug tool
if f"{tool_name}_complete" in response_data:

View File

@@ -139,14 +139,14 @@ class ThinkDeepTool(WorkflowTool):
name = "thinkdeep"
description = (
"EXTENDED THINKING & REASONING - Your deep thinking partner for complex problems. "
"Use this when you need to think deeper about a problem, extend your analysis, explore alternatives, "
"or validate approaches. Perfect for: architecture decisions, complex bugs, performance challenges, "
"security analysis. I'll challenge assumptions, find edge cases, and provide alternative solutions. "
"IMPORTANT: Choose the appropriate thinking_mode based on task complexity - 'low' for quick analysis, "
"COMPREHENSIVE INVESTIGATION & REASONING - Multi-stage workflow for complex problem analysis. "
"Use this when you need structured evidence-based investigation, systematic hypothesis testing, or expert validation. "
"Perfect for: architecture decisions, complex bugs, performance challenges, security analysis. "
"Provides methodical investigation with assumption validation, alternative solution exploration, and rigorous analysis. "
"IMPORTANT: Choose the appropriate mode based on task complexity - 'low' for quick investigation, "
"'medium' for standard problems, 'high' for complex issues (default), 'max' for extremely complex "
"challenges requiring deepest analysis. When in doubt, err on the side of a higher mode for truly "
"deep thought and evaluation. Note: If you're not currently using a top-tier model such as Opus 4 or above, "
"challenges requiring exhaustive investigation. When in doubt, err on the side of a higher mode for thorough "
"systematic analysis and expert validation. Note: If you're not currently using a top-tier model such as Opus 4 or above, "
"these tools can provide enhanced capabilities."
)
@@ -218,11 +218,21 @@ class ThinkDeepTool(WorkflowTool):
Customize the workflow response for thinkdeep-specific needs
"""
# Store request parameters for later use in expert analysis
self.stored_request_params = {
"temperature": getattr(request, "temperature", None),
"thinking_mode": getattr(request, "thinking_mode", None),
"use_websearch": getattr(request, "use_websearch", None),
}
self.stored_request_params = {}
try:
self.stored_request_params["temperature"] = request.temperature
except AttributeError:
self.stored_request_params["temperature"] = None
try:
self.stored_request_params["thinking_mode"] = request.thinking_mode
except AttributeError:
self.stored_request_params["thinking_mode"] = None
try:
self.stored_request_params["use_websearch"] = request.use_websearch
except AttributeError:
self.stored_request_params["use_websearch"] = None
# Add thinking-specific context to response
response_data.update(
@@ -307,8 +317,8 @@ Your role is to validate the thinking process, identify any gaps, challenge assu
additional insights or alternative perspectives.
ANALYSIS SCOPE:
- Problem Context: {getattr(request, 'problem_context', 'General analysis')}
- Focus Areas: {', '.join(getattr(request, 'focus_areas', ['comprehensive analysis']))}
- Problem Context: {self._get_problem_context(request)}
- Focus Areas: {', '.join(self._get_focus_areas(request))}
- Investigation Confidence: {request.confidence}
- Steps Completed: {request.step_number} of {request.total_steps}
@@ -350,22 +360,48 @@ but also acknowledge strong insights and valid conclusions.
def get_request_temperature(self, request) -> float:
"""Use stored temperature from initial request."""
if hasattr(self, "stored_request_params") and self.stored_request_params.get("temperature") is not None:
return self.stored_request_params["temperature"]
try:
stored_params = self.stored_request_params
if stored_params and stored_params.get("temperature") is not None:
return stored_params["temperature"]
except AttributeError:
pass
return super().get_request_temperature(request)
def get_request_thinking_mode(self, request) -> str:
"""Use stored thinking mode from initial request."""
if hasattr(self, "stored_request_params") and self.stored_request_params.get("thinking_mode") is not None:
return self.stored_request_params["thinking_mode"]
try:
stored_params = self.stored_request_params
if stored_params and stored_params.get("thinking_mode") is not None:
return stored_params["thinking_mode"]
except AttributeError:
pass
return super().get_request_thinking_mode(request)
def get_request_use_websearch(self, request) -> bool:
"""Use stored use_websearch from initial request."""
if hasattr(self, "stored_request_params") and self.stored_request_params.get("use_websearch") is not None:
return self.stored_request_params["use_websearch"]
try:
stored_params = self.stored_request_params
if stored_params and stored_params.get("use_websearch") is not None:
return stored_params["use_websearch"]
except AttributeError:
pass
return super().get_request_use_websearch(request)
def _get_problem_context(self, request) -> str:
"""Get problem context from request. Override for custom context handling."""
try:
return request.problem_context or "General analysis"
except AttributeError:
return "General analysis"
def _get_focus_areas(self, request) -> list[str]:
"""Get focus areas from request. Override for custom focus area handling."""
try:
return request.focus_areas or ["comprehensive analysis"]
except AttributeError:
return ["comprehensive analysis"]
def get_required_actions(self, step_number: int, confidence: str, findings: str, total_steps: int) -> list[str]:
"""
Return required actions for the current thinking step.
@@ -413,14 +449,20 @@ but also acknowledge strong insights and valid conclusions.
"""
Determine if expert analysis should be called based on confidence and completion.
"""
if request and hasattr(request, "confidence"):
# Don't call expert analysis if confidence is "certain"
if request.confidence == "certain":
return False
if request:
try:
# Don't call expert analysis if confidence is "certain"
if request.confidence == "certain":
return False
except AttributeError:
pass
# Call expert analysis if investigation is complete (when next_step_required is False)
if request and hasattr(request, "next_step_required"):
return not request.next_step_required
if request:
try:
return not request.next_step_required
except AttributeError:
pass
# Fallback: call expert analysis if we have meaningful findings
return (

View File

@@ -677,8 +677,6 @@ class BaseWorkflowMixin(ABC):
def prepare_step_data(self, request) -> dict:
"""
Prepare step data from request. Tools can override to customize field mapping.
For example, debug tool maps relevant_methods to relevant_context.
"""
step_data = {
"step": request.step,