fix: Minor tweaks to prompts fix: Improved support for smaller models that struggle with strict structured JSON output Rearranged reasons to use the MCP above quick start (collapsed)
803 lines
42 KiB
Python
803 lines
42 KiB
Python
"""
|
|
CodeReview Workflow tool - Systematic code review with step-by-step analysis
|
|
|
|
This tool provides a structured workflow for comprehensive code review and analysis.
|
|
It guides the CLI agent through systematic investigation steps with forced pauses between each step
|
|
to ensure thorough code examination, issue identification, and quality assessment before proceeding.
|
|
The tool supports complex review scenarios including security analysis, performance evaluation,
|
|
and architectural assessment.
|
|
|
|
Key features:
|
|
- Step-by-step code review workflow with progress tracking
|
|
- Context-aware file embedding (references during investigation, full content for analysis)
|
|
- Automatic issue tracking with severity classification
|
|
- Expert analysis integration with external models
|
|
- Support for focused reviews (security, performance, architecture)
|
|
- Confidence-based workflow optimization
|
|
"""
|
|
|
|
import logging
|
|
from typing import TYPE_CHECKING, Any, Literal, Optional
|
|
|
|
from pydantic import Field, model_validator
|
|
|
|
if TYPE_CHECKING:
|
|
from tools.models import ToolModelCategory
|
|
|
|
from config import TEMPERATURE_ANALYTICAL
|
|
from systemprompts import CODEREVIEW_PROMPT
|
|
from tools.shared.base_models import WorkflowRequest
|
|
|
|
from .workflow.base import WorkflowTool
|
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
# Tool-specific field descriptions for code review workflow
|
|
CODEREVIEW_WORKFLOW_FIELD_DESCRIPTIONS = {
|
|
"step": (
|
|
"Write your review plan as a technical brief to another engineer. Use direct statements: 'I will examine code structure...' NOT 'Let me examine...'. "
|
|
"Step 1: State review strategy and begin forming a systematic approach after thinking carefully about what needs to be analyzed. "
|
|
"Later steps: Report findings with precision. "
|
|
"MANDATORY: Thoroughly examine code quality, security implications, performance concerns, and architectural patterns. "
|
|
"MANDATORY: Consider not only obvious bugs and issues but also subtle concerns like over-engineering, unnecessary complexity, "
|
|
"design patterns that could be simplified, areas where architecture might not scale well, missing abstractions, "
|
|
"and ways to reduce complexity while maintaining functionality. "
|
|
"MANDATORY: Use relevant_files parameter for code files. "
|
|
"FORBIDDEN: Large code snippets in this field - use only function/method names when needed."
|
|
),
|
|
"step_number": (
|
|
"The index of the current step in the code review sequence, beginning at 1. Each step should build upon or "
|
|
"revise the previous one."
|
|
),
|
|
"total_steps": (
|
|
"Your current estimate for how many steps will be needed to complete the code review. "
|
|
"IMPORTANT: When continuation_id is provided with external validation, set this to 2 maximum "
|
|
"(step 1: quick review, step 2: complete). For internal validation continuations, set to 1 as "
|
|
"we're not starting a new multi-step investigation."
|
|
),
|
|
"next_step_required": (
|
|
"Set to true if you plan to continue the investigation with another step. False means you believe the "
|
|
"code review analysis is complete and ready for expert validation. CRITICAL: For external continuations, "
|
|
"set to True on step 1, then False on step 2 to trigger expert analysis. For internal continuations, "
|
|
"set to False to complete immediately."
|
|
),
|
|
"findings": (
|
|
"Summarize everything discovered in this step about the code being reviewed. Include analysis of code quality, "
|
|
"security concerns, performance issues, architectural patterns, design decisions, potential bugs, code smells, "
|
|
"and maintainability considerations. Be specific and avoid vague language—document what you now know about "
|
|
"the code and how it affects your assessment. IMPORTANT: Document both positive findings (good patterns, "
|
|
"proper implementations, well-designed components) and concerns (potential issues, anti-patterns, security "
|
|
"risks, performance bottlenecks). In later steps, confirm or update past findings with additional evidence."
|
|
),
|
|
"files_checked": (
|
|
"List all files (as absolute paths, do not clip or shrink file names) examined during the code review "
|
|
"investigation so far. Include even files ruled out or found to be unrelated, as this tracks your "
|
|
"exploration path."
|
|
),
|
|
"relevant_files": (
|
|
"For when this is the first step, please pass absolute file paths of relevant code to review (do not clip "
|
|
"file paths). When used for the final step, this contains a subset of files_checked (as full absolute paths) "
|
|
"that contain code directly relevant to the review or contain significant issues, patterns, or examples worth "
|
|
"highlighting. Only list those that are directly tied to important findings, security concerns, performance "
|
|
"issues, or architectural decisions. This could include core implementation files, configuration files, or "
|
|
"files with notable patterns."
|
|
),
|
|
"relevant_context": (
|
|
"List methods, functions, classes, or modules that are central to the code review findings, in the format "
|
|
"'ClassName.methodName', 'functionName', or 'module.ClassName'. Prioritize those that contain issues, "
|
|
"demonstrate patterns, show security concerns, or represent key architectural decisions."
|
|
),
|
|
"issues_found": (
|
|
"List of issues identified during the investigation. Each issue should be a dictionary with 'severity' "
|
|
"(critical, high, medium, low) and 'description' fields. Include security vulnerabilities, performance "
|
|
"bottlenecks, code quality issues, architectural concerns, maintainability problems, over-engineering, "
|
|
"unnecessary complexity, etc."
|
|
),
|
|
"review_validation_type": (
|
|
"Type of code review validation to perform: 'external' (default - uses external model for validation) or "
|
|
"'internal' (performs validation without external model review). IMPORTANT: Always default to 'external' unless "
|
|
"the user explicitly requests internal-only validation or asks you not to use another model. External validation "
|
|
"provides additional expert review and should be the standard approach for comprehensive code review."
|
|
),
|
|
"backtrack_from_step": (
|
|
"If an earlier finding or assessment needs to be revised or discarded, specify the step number from which to "
|
|
"start over. Use this to acknowledge investigative dead ends and correct the course."
|
|
),
|
|
"images": (
|
|
"Optional list of absolute paths to architecture diagrams, UI mockups, design documents, or visual references "
|
|
"that help with code review context. Only include if they materially assist understanding or assessment."
|
|
),
|
|
"review_type": "Type of review to perform (full, security, performance, quick)",
|
|
"focus_on": "Specific aspects to focus on or additional context that would help understand areas of concern",
|
|
"standards": "Coding standards to enforce during the review",
|
|
"severity_filter": "Minimum severity level to report on the issues found",
|
|
}
|
|
|
|
|
|
class CodeReviewRequest(WorkflowRequest):
|
|
"""Request model for code review workflow investigation steps"""
|
|
|
|
# Required fields for each investigation step
|
|
step: str = Field(..., description=CODEREVIEW_WORKFLOW_FIELD_DESCRIPTIONS["step"])
|
|
step_number: int = Field(..., description=CODEREVIEW_WORKFLOW_FIELD_DESCRIPTIONS["step_number"])
|
|
total_steps: int = Field(..., description=CODEREVIEW_WORKFLOW_FIELD_DESCRIPTIONS["total_steps"])
|
|
next_step_required: bool = Field(..., description=CODEREVIEW_WORKFLOW_FIELD_DESCRIPTIONS["next_step_required"])
|
|
|
|
# Investigation tracking fields
|
|
findings: str = Field(..., description=CODEREVIEW_WORKFLOW_FIELD_DESCRIPTIONS["findings"])
|
|
files_checked: list[str] = Field(
|
|
default_factory=list, description=CODEREVIEW_WORKFLOW_FIELD_DESCRIPTIONS["files_checked"]
|
|
)
|
|
relevant_files: list[str] = Field(
|
|
default_factory=list, description=CODEREVIEW_WORKFLOW_FIELD_DESCRIPTIONS["relevant_files"]
|
|
)
|
|
relevant_context: list[str] = Field(
|
|
default_factory=list, description=CODEREVIEW_WORKFLOW_FIELD_DESCRIPTIONS["relevant_context"]
|
|
)
|
|
issues_found: list[dict] = Field(
|
|
default_factory=list, description=CODEREVIEW_WORKFLOW_FIELD_DESCRIPTIONS["issues_found"]
|
|
)
|
|
# Deprecated confidence field kept for backward compatibility only
|
|
confidence: Optional[str] = Field("low", exclude=True)
|
|
review_validation_type: Optional[Literal["external", "internal"]] = Field(
|
|
"external", description=CODEREVIEW_WORKFLOW_FIELD_DESCRIPTIONS.get("review_validation_type", "")
|
|
)
|
|
|
|
# Optional backtracking field
|
|
backtrack_from_step: Optional[int] = Field(
|
|
None, description=CODEREVIEW_WORKFLOW_FIELD_DESCRIPTIONS["backtrack_from_step"]
|
|
)
|
|
|
|
# Optional images for visual context
|
|
images: Optional[list[str]] = Field(default=None, description=CODEREVIEW_WORKFLOW_FIELD_DESCRIPTIONS["images"])
|
|
|
|
# Code review-specific fields (only used in step 1 to initialize)
|
|
review_type: Optional[Literal["full", "security", "performance", "quick"]] = Field(
|
|
"full", description=CODEREVIEW_WORKFLOW_FIELD_DESCRIPTIONS["review_type"]
|
|
)
|
|
focus_on: Optional[str] = Field(None, description=CODEREVIEW_WORKFLOW_FIELD_DESCRIPTIONS["focus_on"])
|
|
standards: Optional[str] = Field(None, description=CODEREVIEW_WORKFLOW_FIELD_DESCRIPTIONS["standards"])
|
|
severity_filter: Optional[Literal["critical", "high", "medium", "low", "all"]] = Field(
|
|
"all", description=CODEREVIEW_WORKFLOW_FIELD_DESCRIPTIONS["severity_filter"]
|
|
)
|
|
|
|
# Override inherited fields to exclude them from schema (except model which needs to be available)
|
|
temperature: Optional[float] = Field(default=None, exclude=True)
|
|
thinking_mode: Optional[str] = Field(default=None, exclude=True)
|
|
use_websearch: Optional[bool] = Field(default=None, exclude=True)
|
|
|
|
@model_validator(mode="after")
|
|
def validate_step_one_requirements(self):
|
|
"""Ensure step 1 has required relevant_files field."""
|
|
if self.step_number == 1 and not self.relevant_files:
|
|
raise ValueError("Step 1 requires 'relevant_files' field to specify code files or directories to review")
|
|
return self
|
|
|
|
|
|
class CodeReviewTool(WorkflowTool):
|
|
"""
|
|
Code Review workflow tool for step-by-step code review and expert analysis.
|
|
|
|
This tool implements a structured code review workflow that guides users through
|
|
methodical investigation steps, ensuring thorough code examination, issue identification,
|
|
and quality assessment before reaching conclusions. It supports complex review scenarios
|
|
including security audits, performance analysis, architectural review, and maintainability assessment.
|
|
"""
|
|
|
|
def __init__(self):
|
|
super().__init__()
|
|
self.initial_request = None
|
|
self.review_config = {}
|
|
|
|
def get_name(self) -> str:
|
|
return "codereview"
|
|
|
|
def get_description(self) -> str:
|
|
return (
|
|
"COMPREHENSIVE CODE REVIEW WORKFLOW - Step-by-step code review with expert analysis. "
|
|
"This tool guides you through a systematic investigation process where you:\n\n"
|
|
"1. Start with step 1: describe your code review investigation plan\n"
|
|
"2. STOP and investigate code structure, patterns, and potential issues\n"
|
|
"3. Report findings in step 2 with concrete evidence from actual code analysis\n"
|
|
"4. Continue investigating between each step\n"
|
|
"5. Track findings, relevant files, and issues throughout\n"
|
|
"6. Update assessments as understanding evolves\n"
|
|
"7. Once investigation is complete, receive expert analysis\n\n"
|
|
"IMPORTANT: This tool enforces investigation between steps:\n"
|
|
"- After each call, you MUST investigate before calling again\n"
|
|
"- Each step must include NEW evidence from code examination\n"
|
|
"- No recursive calls without actual investigation work\n"
|
|
"- The tool will specify which step number to use next\n"
|
|
"- Follow the required_actions list for investigation guidance\n\n"
|
|
"Perfect for: comprehensive code review, security audits, performance analysis, "
|
|
"architectural assessment, code quality evaluation, anti-pattern detection."
|
|
)
|
|
|
|
def get_system_prompt(self) -> str:
|
|
return CODEREVIEW_PROMPT
|
|
|
|
def get_default_temperature(self) -> float:
|
|
return TEMPERATURE_ANALYTICAL
|
|
|
|
def get_model_category(self) -> "ToolModelCategory":
|
|
"""Code review requires thorough analysis and reasoning"""
|
|
from tools.models import ToolModelCategory
|
|
|
|
return ToolModelCategory.EXTENDED_REASONING
|
|
|
|
def get_workflow_request_model(self):
|
|
"""Return the code review workflow-specific request model."""
|
|
return CodeReviewRequest
|
|
|
|
def get_input_schema(self) -> dict[str, Any]:
|
|
"""Generate input schema using WorkflowSchemaBuilder with code review-specific overrides."""
|
|
from .workflow.schema_builders import WorkflowSchemaBuilder
|
|
|
|
# Code review workflow-specific field overrides
|
|
codereview_field_overrides = {
|
|
"step": {
|
|
"type": "string",
|
|
"description": CODEREVIEW_WORKFLOW_FIELD_DESCRIPTIONS["step"],
|
|
},
|
|
"step_number": {
|
|
"type": "integer",
|
|
"minimum": 1,
|
|
"description": CODEREVIEW_WORKFLOW_FIELD_DESCRIPTIONS["step_number"],
|
|
},
|
|
"total_steps": {
|
|
"type": "integer",
|
|
"minimum": 1,
|
|
"description": CODEREVIEW_WORKFLOW_FIELD_DESCRIPTIONS["total_steps"],
|
|
},
|
|
"next_step_required": {
|
|
"type": "boolean",
|
|
"description": CODEREVIEW_WORKFLOW_FIELD_DESCRIPTIONS["next_step_required"],
|
|
},
|
|
"findings": {
|
|
"type": "string",
|
|
"description": CODEREVIEW_WORKFLOW_FIELD_DESCRIPTIONS["findings"],
|
|
},
|
|
"files_checked": {
|
|
"type": "array",
|
|
"items": {"type": "string"},
|
|
"description": CODEREVIEW_WORKFLOW_FIELD_DESCRIPTIONS["files_checked"],
|
|
},
|
|
"relevant_files": {
|
|
"type": "array",
|
|
"items": {"type": "string"},
|
|
"description": CODEREVIEW_WORKFLOW_FIELD_DESCRIPTIONS["relevant_files"],
|
|
},
|
|
"review_validation_type": {
|
|
"type": "string",
|
|
"enum": ["external", "internal"],
|
|
"default": "external",
|
|
"description": CODEREVIEW_WORKFLOW_FIELD_DESCRIPTIONS.get("review_validation_type", ""),
|
|
},
|
|
"backtrack_from_step": {
|
|
"type": "integer",
|
|
"minimum": 1,
|
|
"description": CODEREVIEW_WORKFLOW_FIELD_DESCRIPTIONS["backtrack_from_step"],
|
|
},
|
|
"issues_found": {
|
|
"type": "array",
|
|
"items": {"type": "object"},
|
|
"description": CODEREVIEW_WORKFLOW_FIELD_DESCRIPTIONS["issues_found"],
|
|
},
|
|
"images": {
|
|
"type": "array",
|
|
"items": {"type": "string"},
|
|
"description": CODEREVIEW_WORKFLOW_FIELD_DESCRIPTIONS["images"],
|
|
},
|
|
# Code review-specific fields (for step 1)
|
|
"review_type": {
|
|
"type": "string",
|
|
"enum": ["full", "security", "performance", "quick"],
|
|
"default": "full",
|
|
"description": CODEREVIEW_WORKFLOW_FIELD_DESCRIPTIONS["review_type"],
|
|
},
|
|
"focus_on": {
|
|
"type": "string",
|
|
"description": CODEREVIEW_WORKFLOW_FIELD_DESCRIPTIONS["focus_on"],
|
|
},
|
|
"standards": {
|
|
"type": "string",
|
|
"description": CODEREVIEW_WORKFLOW_FIELD_DESCRIPTIONS["standards"],
|
|
},
|
|
"severity_filter": {
|
|
"type": "string",
|
|
"enum": ["critical", "high", "medium", "low", "all"],
|
|
"default": "all",
|
|
"description": CODEREVIEW_WORKFLOW_FIELD_DESCRIPTIONS["severity_filter"],
|
|
},
|
|
}
|
|
|
|
# Use WorkflowSchemaBuilder with code review-specific tool fields
|
|
return WorkflowSchemaBuilder.build_schema(
|
|
tool_specific_fields=codereview_field_overrides,
|
|
model_field_schema=self.get_model_field_schema(),
|
|
auto_mode=self.is_effective_auto_mode(),
|
|
tool_name=self.get_name(),
|
|
)
|
|
|
|
def get_required_actions(
|
|
self, step_number: int, confidence: str, findings: str, total_steps: int, request=None
|
|
) -> list[str]:
|
|
"""Define required actions for each investigation phase.
|
|
|
|
Now includes request parameter for continuation-aware decisions.
|
|
"""
|
|
# Check for continuation - fast track mode
|
|
if request:
|
|
continuation_id = self.get_request_continuation_id(request)
|
|
validation_type = self.get_review_validation_type(request)
|
|
if continuation_id and validation_type == "external":
|
|
if step_number == 1:
|
|
return [
|
|
"Quickly review the code files to understand context",
|
|
"Identify any critical issues that need immediate attention",
|
|
"Note main architectural patterns and design decisions",
|
|
"Prepare summary of key findings for expert validation",
|
|
]
|
|
else:
|
|
return ["Complete review and proceed to expert analysis"]
|
|
|
|
if step_number == 1:
|
|
# Initial code review investigation tasks
|
|
return [
|
|
"Read and understand the code files specified for review",
|
|
"Examine the overall structure, architecture, and design patterns used",
|
|
"Identify the main components, classes, and functions in the codebase",
|
|
"Understand the business logic and intended functionality",
|
|
"Look for obvious issues: bugs, security concerns, performance problems",
|
|
"Note any code smells, anti-patterns, or areas of concern",
|
|
]
|
|
elif step_number == 2:
|
|
# Deeper investigation for step 2
|
|
return [
|
|
"Examine specific code sections you've identified as concerning",
|
|
"Analyze security implications: input validation, authentication, authorization",
|
|
"Check for performance issues: algorithmic complexity, resource usage, inefficiencies",
|
|
"Look for architectural problems: tight coupling, missing abstractions, scalability issues",
|
|
"Identify code quality issues: readability, maintainability, error handling",
|
|
"Search for over-engineering, unnecessary complexity, or design patterns that could be simplified",
|
|
]
|
|
elif step_number >= 3:
|
|
# Final verification for later steps
|
|
return [
|
|
"Verify all identified issues have been properly documented with severity levels",
|
|
"Check for any missed critical security vulnerabilities or performance bottlenecks",
|
|
"Confirm that architectural concerns and code quality issues are comprehensively captured",
|
|
"Ensure positive aspects and well-implemented patterns are also noted",
|
|
"Validate that your assessment aligns with the review type and focus areas specified",
|
|
"Double-check that findings are actionable and provide clear guidance for improvements",
|
|
]
|
|
else:
|
|
# General investigation needed
|
|
return [
|
|
"Continue examining the codebase for additional patterns and potential issues",
|
|
"Gather more evidence using appropriate code analysis techniques",
|
|
"Test your assumptions about code behavior and design decisions",
|
|
"Look for patterns that confirm or refute your current assessment",
|
|
"Focus on areas that haven't been thoroughly examined yet",
|
|
]
|
|
|
|
def should_call_expert_analysis(self, consolidated_findings, request=None) -> bool:
|
|
"""
|
|
Decide when to call external model based on investigation completeness.
|
|
|
|
For continuations with external type, always proceed with expert analysis.
|
|
"""
|
|
# Check if user requested to skip assistant model
|
|
if request and not self.get_request_use_assistant_model(request):
|
|
return False
|
|
|
|
# For continuations with external type, always proceed with expert analysis
|
|
continuation_id = self.get_request_continuation_id(request)
|
|
validation_type = self.get_review_validation_type(request)
|
|
if continuation_id and validation_type == "external":
|
|
return True # Always perform expert analysis for external continuations
|
|
|
|
# Check if we have meaningful investigation data
|
|
return (
|
|
len(consolidated_findings.relevant_files) > 0
|
|
or len(consolidated_findings.findings) >= 2
|
|
or len(consolidated_findings.issues_found) > 0
|
|
)
|
|
|
|
def prepare_expert_analysis_context(self, consolidated_findings) -> str:
|
|
"""Prepare context for external model call for final code review validation."""
|
|
context_parts = [
|
|
f"=== CODE REVIEW REQUEST ===\\n{self.initial_request or 'Code review workflow initiated'}\\n=== END REQUEST ==="
|
|
]
|
|
|
|
# Add investigation summary
|
|
investigation_summary = self._build_code_review_summary(consolidated_findings)
|
|
context_parts.append(
|
|
f"\\n=== AGENT'S CODE REVIEW INVESTIGATION ===\\n{investigation_summary}\\n=== END INVESTIGATION ==="
|
|
)
|
|
|
|
# Add review configuration context if available
|
|
if self.review_config:
|
|
config_text = "\\n".join(f"- {key}: {value}" for key, value in self.review_config.items() if value)
|
|
context_parts.append(f"\\n=== REVIEW CONFIGURATION ===\\n{config_text}\\n=== END CONFIGURATION ===")
|
|
|
|
# Add relevant code elements if available
|
|
if consolidated_findings.relevant_context:
|
|
methods_text = "\\n".join(f"- {method}" for method in consolidated_findings.relevant_context)
|
|
context_parts.append(f"\\n=== RELEVANT CODE ELEMENTS ===\\n{methods_text}\\n=== END CODE ELEMENTS ===")
|
|
|
|
# Add issues found if available
|
|
if consolidated_findings.issues_found:
|
|
issues_text = "\\n".join(
|
|
f"[{issue.get('severity', 'unknown').upper()}] {issue.get('description', 'No description')}"
|
|
for issue in consolidated_findings.issues_found
|
|
)
|
|
context_parts.append(f"\\n=== ISSUES IDENTIFIED ===\\n{issues_text}\\n=== END ISSUES ===")
|
|
|
|
# Add assessment evolution if available
|
|
if consolidated_findings.hypotheses:
|
|
assessments_text = "\\n".join(
|
|
f"Step {h['step']} ({h['confidence']} confidence): {h['hypothesis']}"
|
|
for h in consolidated_findings.hypotheses
|
|
)
|
|
context_parts.append(f"\\n=== ASSESSMENT EVOLUTION ===\\n{assessments_text}\\n=== END ASSESSMENTS ===")
|
|
|
|
# Add images if available
|
|
if consolidated_findings.images:
|
|
images_text = "\\n".join(f"- {img}" for img in consolidated_findings.images)
|
|
context_parts.append(
|
|
f"\\n=== VISUAL REVIEW INFORMATION ===\\n{images_text}\\n=== END VISUAL INFORMATION ==="
|
|
)
|
|
|
|
return "\\n".join(context_parts)
|
|
|
|
def _build_code_review_summary(self, consolidated_findings) -> str:
|
|
"""Prepare a comprehensive summary of the code review investigation."""
|
|
summary_parts = [
|
|
"=== SYSTEMATIC CODE REVIEW INVESTIGATION SUMMARY ===",
|
|
f"Total steps: {len(consolidated_findings.findings)}",
|
|
f"Files examined: {len(consolidated_findings.files_checked)}",
|
|
f"Relevant files identified: {len(consolidated_findings.relevant_files)}",
|
|
f"Code elements analyzed: {len(consolidated_findings.relevant_context)}",
|
|
f"Issues identified: {len(consolidated_findings.issues_found)}",
|
|
"",
|
|
"=== INVESTIGATION PROGRESSION ===",
|
|
]
|
|
|
|
for finding in consolidated_findings.findings:
|
|
summary_parts.append(finding)
|
|
|
|
return "\\n".join(summary_parts)
|
|
|
|
def should_include_files_in_expert_prompt(self) -> bool:
|
|
"""Include files in expert analysis for comprehensive code review."""
|
|
return True
|
|
|
|
def should_embed_system_prompt(self) -> bool:
|
|
"""Embed system prompt in expert analysis for proper context."""
|
|
return True
|
|
|
|
def get_expert_thinking_mode(self) -> str:
|
|
"""Use high thinking mode for thorough code review analysis."""
|
|
return "high"
|
|
|
|
def get_expert_analysis_instruction(self) -> str:
|
|
"""Get specific instruction for code review expert analysis."""
|
|
return (
|
|
"Please provide comprehensive code review analysis based on the investigation findings. "
|
|
"Focus on identifying any remaining issues, validating the completeness of the analysis, "
|
|
"and providing final recommendations for code improvements, following the severity-based "
|
|
"format specified in the system prompt."
|
|
)
|
|
|
|
# Hook method overrides for code review-specific behavior
|
|
|
|
def prepare_step_data(self, request) -> dict:
|
|
"""
|
|
Map code review-specific fields for internal processing.
|
|
"""
|
|
step_data = {
|
|
"step": request.step,
|
|
"step_number": request.step_number,
|
|
"findings": request.findings,
|
|
"files_checked": request.files_checked,
|
|
"relevant_files": request.relevant_files,
|
|
"relevant_context": request.relevant_context,
|
|
"issues_found": request.issues_found,
|
|
"review_validation_type": self.get_review_validation_type(request),
|
|
"hypothesis": request.findings, # Map findings to hypothesis for compatibility
|
|
"images": request.images or [],
|
|
"confidence": "high", # Dummy value for workflow_mixin compatibility
|
|
}
|
|
return step_data
|
|
|
|
def should_skip_expert_analysis(self, request, consolidated_findings) -> bool:
|
|
"""
|
|
Code review workflow skips expert analysis only when review_validation_type is "internal".
|
|
Default is always to use expert analysis (external).
|
|
For continuations with external type, always perform expert analysis immediately.
|
|
"""
|
|
# If it's a continuation and review_validation_type is external, don't skip
|
|
continuation_id = self.get_request_continuation_id(request)
|
|
validation_type = self.get_review_validation_type(request)
|
|
if continuation_id and validation_type != "internal":
|
|
return False # Always do expert analysis for external continuations
|
|
|
|
# Only skip if explicitly set to internal AND review is complete
|
|
return validation_type == "internal" and not request.next_step_required
|
|
|
|
def store_initial_issue(self, step_description: str):
|
|
"""Store initial request for expert analysis."""
|
|
self.initial_request = step_description
|
|
|
|
# Override inheritance hooks for code review-specific behavior
|
|
|
|
def get_review_validation_type(self, request) -> str:
|
|
"""Get review validation type from request. Hook method for clean inheritance."""
|
|
try:
|
|
return request.review_validation_type or "external"
|
|
except AttributeError:
|
|
return "external" # Default to external validation
|
|
|
|
def get_completion_status(self) -> str:
|
|
"""Code review tools use review-specific status."""
|
|
return "code_review_complete_ready_for_implementation"
|
|
|
|
def get_completion_data_key(self) -> str:
|
|
"""Code review uses 'complete_code_review' key."""
|
|
return "complete_code_review"
|
|
|
|
def get_final_analysis_from_request(self, request):
|
|
"""Code review tools use 'findings' field."""
|
|
return request.findings
|
|
|
|
def get_confidence_level(self, request) -> str:
|
|
"""Code review tools use 'certain' for high confidence."""
|
|
return "certain"
|
|
|
|
def get_completion_message(self) -> str:
|
|
"""Code review-specific completion message."""
|
|
return (
|
|
"Code review complete. You have identified all significant issues "
|
|
"and provided comprehensive analysis. MANDATORY: Present the user with the complete review results "
|
|
"categorized by severity, and IMMEDIATELY proceed with implementing the highest priority fixes "
|
|
"or provide specific guidance for improvements. Focus on actionable recommendations."
|
|
)
|
|
|
|
def get_skip_reason(self) -> str:
|
|
"""Code review-specific skip reason."""
|
|
return "Completed comprehensive code review with internal analysis only (no external model validation)"
|
|
|
|
def get_skip_expert_analysis_status(self) -> str:
|
|
"""Code review-specific expert analysis skip status."""
|
|
return "skipped_due_to_internal_analysis_type"
|
|
|
|
def prepare_work_summary(self) -> str:
|
|
"""Code review-specific work summary."""
|
|
return self._build_code_review_summary(self.consolidated_findings)
|
|
|
|
def get_completion_next_steps_message(self, expert_analysis_used: bool = False) -> str:
|
|
"""
|
|
Code review-specific completion message.
|
|
"""
|
|
base_message = (
|
|
"CODE REVIEW IS COMPLETE. You MUST now summarize and present ALL review findings organized by "
|
|
"severity (Critical → High → Medium → Low), specific code locations with line numbers, and exact "
|
|
"recommendations for improvement. Clearly prioritize the top 3 issues that need immediate attention. "
|
|
"Provide concrete, actionable guidance for each issue—make it easy for a developer to understand "
|
|
"exactly what needs to be fixed and how to implement the improvements."
|
|
)
|
|
|
|
# Add expert analysis guidance only when expert analysis was actually used
|
|
if expert_analysis_used:
|
|
expert_guidance = self.get_expert_analysis_guidance()
|
|
if expert_guidance:
|
|
return f"{base_message}\n\n{expert_guidance}"
|
|
|
|
return base_message
|
|
|
|
def get_expert_analysis_guidance(self) -> str:
|
|
"""
|
|
Provide specific guidance for handling expert analysis in code reviews.
|
|
"""
|
|
return (
|
|
"IMPORTANT: Analysis from an assistant model has been provided above. You MUST critically evaluate and validate "
|
|
"the expert findings rather than accepting them blindly. Cross-reference the expert analysis with "
|
|
"your own investigation findings, verify that suggested improvements are appropriate for this "
|
|
"codebase's context and patterns, and ensure recommendations align with the project's standards. "
|
|
"Present a synthesis that combines your systematic review with validated expert insights, clearly "
|
|
"distinguishing between findings you've independently confirmed and additional insights from expert analysis."
|
|
)
|
|
|
|
def get_step_guidance_message(self, request) -> str:
|
|
"""
|
|
Code review-specific step guidance with detailed investigation instructions.
|
|
"""
|
|
step_guidance = self.get_code_review_step_guidance(request.step_number, request)
|
|
return step_guidance["next_steps"]
|
|
|
|
def get_code_review_step_guidance(self, step_number: int, request) -> dict[str, Any]:
|
|
"""
|
|
Provide step-specific guidance for code review workflow.
|
|
Uses get_required_actions to determine what needs to be done,
|
|
then formats those actions into appropriate guidance messages.
|
|
"""
|
|
# Get the required actions from the single source of truth
|
|
required_actions = self.get_required_actions(
|
|
step_number,
|
|
"medium", # Dummy value for backward compatibility
|
|
request.findings or "",
|
|
request.total_steps,
|
|
request, # Pass request for continuation-aware decisions
|
|
)
|
|
|
|
# Check if this is a continuation to provide context-aware guidance
|
|
continuation_id = self.get_request_continuation_id(request)
|
|
validation_type = self.get_review_validation_type(request)
|
|
is_external_continuation = continuation_id and validation_type == "external"
|
|
is_internal_continuation = continuation_id and validation_type == "internal"
|
|
|
|
# Step 1 handling
|
|
if step_number == 1:
|
|
if is_external_continuation:
|
|
# Fast-track for external continuations
|
|
return {
|
|
"next_steps": (
|
|
"You are on step 1 of MAXIMUM 2 steps for continuation. CRITICAL: Quickly review the code NOW. "
|
|
"MANDATORY ACTIONS:\\n"
|
|
+ "\\n".join(f"{i+1}. {action}" for i, action in enumerate(required_actions))
|
|
+ "\\n\\nSet next_step_required=True and step_number=2 for the next call to trigger expert analysis."
|
|
)
|
|
}
|
|
elif is_internal_continuation:
|
|
# Internal validation mode
|
|
next_steps = (
|
|
"Continuing previous conversation with internal validation only. The analysis will build "
|
|
"upon the prior findings without external model validation. REQUIRED ACTIONS:\\n"
|
|
+ "\\n".join(f"{i+1}. {action}" for i, action in enumerate(required_actions))
|
|
)
|
|
else:
|
|
# Normal flow for new reviews
|
|
next_steps = (
|
|
f"MANDATORY: DO NOT call the {self.get_name()} tool again immediately. You MUST first examine "
|
|
f"the code files thoroughly using appropriate tools. CRITICAL AWARENESS: You need to:\\n"
|
|
+ "\\n".join(f"{i+1}. {action}" for i, action in enumerate(required_actions))
|
|
+ f"\\n\\nOnly call {self.get_name()} again AFTER completing your investigation. "
|
|
f"When you call {self.get_name()} next time, use step_number: {step_number + 1} "
|
|
f"and report specific files examined, issues found, and code quality assessments discovered."
|
|
)
|
|
|
|
elif step_number == 2:
|
|
# CRITICAL: Check if violating minimum step requirement
|
|
if (
|
|
request.total_steps >= 3
|
|
and request.step_number < request.total_steps
|
|
and not request.next_step_required
|
|
):
|
|
next_steps = (
|
|
f"ERROR: You set total_steps={request.total_steps} but next_step_required=False on step {request.step_number}. "
|
|
f"This violates the minimum step requirement. You MUST set next_step_required=True until you reach the final step. "
|
|
f"Call {self.get_name()} again with next_step_required=True and continue your investigation."
|
|
)
|
|
elif is_external_continuation or (not request.next_step_required and validation_type == "external"):
|
|
# Fast-track completion or about to complete for external validation
|
|
next_steps = (
|
|
"Proceeding immediately to expert analysis. "
|
|
f"MANDATORY: call {self.get_name()} tool immediately again, and set next_step_required=False to "
|
|
f"trigger external validation NOW."
|
|
)
|
|
else:
|
|
# Normal flow - deeper analysis needed
|
|
next_steps = (
|
|
f"STOP! Do NOT call {self.get_name()} again yet. You are on step 2 of {request.total_steps} minimum required steps. "
|
|
f"MANDATORY ACTIONS before calling {self.get_name()} step {step_number + 1}:\\n"
|
|
+ "\\n".join(f"{i+1}. {action}" for i, action in enumerate(required_actions))
|
|
+ f"\\n\\nRemember: You MUST set next_step_required=True until step {request.total_steps}. "
|
|
+ f"Only call {self.get_name()} again with step_number: {step_number + 1} AFTER completing these code review tasks."
|
|
)
|
|
|
|
elif step_number >= 3:
|
|
if not request.next_step_required and validation_type == "external":
|
|
# About to complete - ready for expert analysis
|
|
next_steps = (
|
|
"Completing review and proceeding to expert analysis. "
|
|
"Ensure all findings are documented with specific file references and line numbers."
|
|
)
|
|
else:
|
|
# Later steps - final verification
|
|
next_steps = (
|
|
f"WAIT! Your code review needs final verification. DO NOT call {self.get_name()} immediately. REQUIRED ACTIONS:\\n"
|
|
+ "\\n".join(f"{i+1}. {action}" for i, action in enumerate(required_actions))
|
|
+ f"\\n\\nREMEMBER: Ensure you have identified all significant issues across all severity levels and "
|
|
f"verified the completeness of your review. Document findings with specific file references and "
|
|
f"line numbers where applicable, then call {self.get_name()} with step_number: {step_number + 1}."
|
|
)
|
|
else:
|
|
# Fallback for any other case - check minimum step violation first
|
|
if (
|
|
request.total_steps >= 3
|
|
and request.step_number < request.total_steps
|
|
and not request.next_step_required
|
|
):
|
|
next_steps = (
|
|
f"ERROR: You set total_steps={request.total_steps} but next_step_required=False on step {request.step_number}. "
|
|
f"This violates the minimum step requirement. You MUST set next_step_required=True until step {request.total_steps}."
|
|
)
|
|
elif not request.next_step_required and validation_type == "external":
|
|
next_steps = (
|
|
"Completing review. "
|
|
"Ensure all findings are documented with specific file references and severity levels."
|
|
)
|
|
else:
|
|
next_steps = (
|
|
f"PAUSE REVIEW. Before calling {self.get_name()} step {step_number + 1}, you MUST examine more code thoroughly. "
|
|
+ "Required: "
|
|
+ ", ".join(required_actions[:2])
|
|
+ ". "
|
|
+ f"Your next {self.get_name()} call (step_number: {step_number + 1}) must include "
|
|
f"NEW evidence from actual code analysis, not just theories. NO recursive {self.get_name()} calls "
|
|
f"without investigation work!"
|
|
)
|
|
|
|
return {"next_steps": next_steps}
|
|
|
|
def customize_workflow_response(self, response_data: dict, request) -> dict:
|
|
"""
|
|
Customize response to match code review workflow format.
|
|
"""
|
|
# Store initial request on first step
|
|
if request.step_number == 1:
|
|
self.initial_request = request.step
|
|
# Store review configuration for expert analysis
|
|
if request.relevant_files:
|
|
self.review_config = {
|
|
"relevant_files": request.relevant_files,
|
|
"review_type": request.review_type,
|
|
"focus_on": request.focus_on,
|
|
"standards": request.standards,
|
|
"severity_filter": request.severity_filter,
|
|
}
|
|
|
|
# Convert generic status names to code review-specific ones
|
|
tool_name = self.get_name()
|
|
status_mapping = {
|
|
f"{tool_name}_in_progress": "code_review_in_progress",
|
|
f"pause_for_{tool_name}": "pause_for_code_review",
|
|
f"{tool_name}_required": "code_review_required",
|
|
f"{tool_name}_complete": "code_review_complete",
|
|
}
|
|
|
|
if response_data["status"] in status_mapping:
|
|
response_data["status"] = status_mapping[response_data["status"]]
|
|
|
|
# Rename status field to match code review workflow
|
|
if f"{tool_name}_status" in response_data:
|
|
response_data["code_review_status"] = response_data.pop(f"{tool_name}_status")
|
|
# Add code review-specific status fields
|
|
response_data["code_review_status"]["issues_by_severity"] = {}
|
|
for issue in self.consolidated_findings.issues_found:
|
|
severity = issue.get("severity", "unknown")
|
|
if severity not in response_data["code_review_status"]["issues_by_severity"]:
|
|
response_data["code_review_status"]["issues_by_severity"][severity] = 0
|
|
response_data["code_review_status"]["issues_by_severity"][severity] += 1
|
|
response_data["code_review_status"]["review_validation_type"] = self.get_review_validation_type(request)
|
|
|
|
# Map complete_codereviewworkflow to complete_code_review
|
|
if f"complete_{tool_name}" in response_data:
|
|
response_data["complete_code_review"] = response_data.pop(f"complete_{tool_name}")
|
|
|
|
# Map the completion flag to match code review workflow
|
|
if f"{tool_name}_complete" in response_data:
|
|
response_data["code_review_complete"] = response_data.pop(f"{tool_name}_complete")
|
|
|
|
return response_data
|
|
|
|
# Required abstract methods from BaseTool
|
|
def get_request_model(self):
|
|
"""Return the code review workflow-specific request model."""
|
|
return CodeReviewRequest
|
|
|
|
async def prepare_prompt(self, request) -> str:
|
|
"""Not used - workflow tools use execute_workflow()."""
|
|
return "" # Workflow tools use execute_workflow() directly
|