Files
my-pal-mcp-server/tools/codereview.py
Fahad 94f542c76a fix: critical conversation history bug and improve Docker integration
This commit addresses several critical issues and improvements:

🔧 Critical Fixes:
- Fixed conversation history not being included when using continuation_id in AI-to-AI conversations
- Fixed test mock targeting issues preventing proper conversation memory validation
- Fixed Docker debug logging functionality with Gemini tools

🐛 Bug Fixes:
- Docker compose configuration for proper container command execution
- Test mock import targeting from utils.conversation_memory.* to tools.base.*
- Version bump to 3.1.0 reflecting significant improvements

🚀 Improvements:
- Enhanced Docker environment configuration with comprehensive logging setup
- Added cross-tool continuation documentation and examples in README
- Improved error handling and validation across all tools
- Better logging configuration with LOG_LEVEL environment variable support
- Enhanced conversation memory system documentation

🧪 Testing:
- Added comprehensive conversation history bug fix tests
- Added cross-tool continuation functionality tests
- All 132 tests now pass with proper conversation history validation
- Improved test coverage for AI-to-AI conversation threading

 Code Quality:
- Applied black, isort, and ruff formatting across entire codebase
- Enhanced inline documentation for conversation memory system
- Cleaned up temporary files and improved repository hygiene
- Better test descriptions and coverage for critical functionality

🤖 Generated with [Claude Code](https://claude.ai/code)

Co-Authored-By: Claude <noreply@anthropic.com>
2025-06-11 08:53:45 +04:00

278 lines
11 KiB
Python

"""
Code Review tool - Comprehensive code analysis and review
This tool provides professional-grade code review capabilities using
Gemini's understanding of code patterns, best practices, and common issues.
It can analyze individual files or entire codebases, providing actionable
feedback categorized by severity.
Key Features:
- Multi-file and directory support
- Configurable review types (full, security, performance, quick)
- Severity-based issue filtering
- Custom focus areas and coding standards
- Structured output with specific remediation steps
"""
from typing import Any, Optional
from mcp.types import TextContent
from pydantic import Field
from config import TEMPERATURE_ANALYTICAL
from prompts import CODEREVIEW_PROMPT
from utils import read_files
from .base import BaseTool, ToolRequest
from .models import ToolOutput
class CodeReviewRequest(ToolRequest):
"""
Request model for the code review tool.
This model defines all parameters that can be used to customize
the code review process, from selecting files to specifying
review focus and standards.
"""
files: list[str] = Field(
...,
description="Code files or directories to review (must be absolute paths)",
)
context: str = Field(
...,
description="User's summary of what the code does, expected behavior, constraints, and review objectives",
)
review_type: str = Field("full", description="Type of review: full|security|performance|quick")
focus_on: Optional[str] = Field(None, description="Specific aspects to focus on during review")
standards: Optional[str] = Field(None, description="Coding standards or guidelines to enforce")
severity_filter: str = Field(
"all",
description="Minimum severity to report: critical|high|medium|all",
)
class CodeReviewTool(BaseTool):
"""
Professional code review tool implementation.
This tool analyzes code for bugs, security vulnerabilities, performance
issues, and code quality problems. It provides detailed feedback with
severity ratings and specific remediation steps.
"""
def get_name(self) -> str:
return "codereview"
def get_description(self) -> str:
return (
"PROFESSIONAL CODE REVIEW - Comprehensive analysis for bugs, security, and quality. "
"Supports both individual files and entire directories/projects. "
"Use this when you need to review code, check for issues, find bugs, or perform security audits. "
"I'll identify issues by severity (Critical→High→Medium→Low) with specific fixes. "
"Supports focused reviews: security, performance, or quick checks. "
"Choose thinking_mode based on review scope: 'low' for small code snippets, "
"'medium' for standard files/modules (default), 'high' for complex systems/architectures, "
"'max' for critical security audits or large codebases requiring deepest analysis."
)
def get_input_schema(self) -> dict[str, Any]:
return {
"type": "object",
"properties": {
"files": {
"type": "array",
"items": {"type": "string"},
"description": "Code files or directories to review (must be absolute paths)",
},
"context": {
"type": "string",
"description": "User's summary of what the code does, expected behavior, constraints, and review objectives",
},
"review_type": {
"type": "string",
"enum": ["full", "security", "performance", "quick"],
"default": "full",
"description": "Type of review to perform",
},
"focus_on": {
"type": "string",
"description": "Specific aspects to focus on",
},
"standards": {
"type": "string",
"description": "Coding standards to enforce",
},
"severity_filter": {
"type": "string",
"enum": ["critical", "high", "medium", "all"],
"default": "all",
"description": "Minimum severity level to report",
},
"temperature": {
"type": "number",
"description": "Temperature (0-1, default 0.2 for consistency)",
"minimum": 0,
"maximum": 1,
},
"thinking_mode": {
"type": "string",
"enum": ["minimal", "low", "medium", "high", "max"],
"description": "Thinking depth: minimal (128), low (2048), medium (8192), high (16384), max (32768)",
},
"use_websearch": {
"type": "boolean",
"description": "Enable web search for documentation, best practices, and current information. Particularly useful for: brainstorming sessions, architectural design discussions, exploring industry best practices, working with specific frameworks/technologies, researching solutions to complex problems, or when current documentation and community insights would enhance the analysis.",
"default": True,
},
"continuation_id": {
"type": "string",
"description": "Thread continuation ID for multi-turn conversations. Can be used to continue conversations across different tools. Only provide this if continuing a previous conversation thread.",
},
},
"required": ["files", "context"],
}
def get_system_prompt(self) -> str:
return CODEREVIEW_PROMPT
def get_default_temperature(self) -> float:
return TEMPERATURE_ANALYTICAL
def get_request_model(self):
return CodeReviewRequest
async def execute(self, arguments: dict[str, Any]) -> list[TextContent]:
"""Override execute to check focus_on size before processing"""
# First validate request
request_model = self.get_request_model()
request = request_model(**arguments)
# Check focus_on size if provided
if request.focus_on:
size_check = self.check_prompt_size(request.focus_on)
if size_check:
return [TextContent(type="text", text=ToolOutput(**size_check).model_dump_json())]
# Continue with normal execution
return await super().execute(arguments)
async def prepare_prompt(self, request: CodeReviewRequest) -> str:
"""
Prepare the code review prompt with customized instructions.
This method reads the requested files, validates token limits,
and constructs a detailed prompt based on the review parameters.
Args:
request: The validated review request
Returns:
str: Complete prompt for the Gemini model
Raises:
ValueError: If the code exceeds token limits
"""
# Check for prompt.txt in files
prompt_content, updated_files = self.handle_prompt_file(request.files)
# If prompt.txt was found, use it as focus_on
if prompt_content:
request.focus_on = prompt_content
# Update request files list
if updated_files is not None:
request.files = updated_files
# Read all requested files, expanding directories as needed
file_content = read_files(request.files)
# Validate that the code fits within model context limits
self._validate_token_limit(file_content, "Code")
# Build customized review instructions based on review type
review_focus = []
if request.review_type == "security":
review_focus.append("Focus on security vulnerabilities and authentication issues")
elif request.review_type == "performance":
review_focus.append("Focus on performance bottlenecks and optimization opportunities")
elif request.review_type == "quick":
review_focus.append("Provide a quick review focusing on critical issues only")
# Add any additional focus areas specified by the user
if request.focus_on:
review_focus.append(f"Pay special attention to: {request.focus_on}")
# Include custom coding standards if provided
if request.standards:
review_focus.append(f"Enforce these standards: {request.standards}")
# Apply severity filtering to reduce noise if requested
if request.severity_filter != "all":
review_focus.append(f"Only report issues of {request.severity_filter} severity or higher")
focus_instruction = "\n".join(review_focus) if review_focus else ""
# Add web search instruction if enabled
websearch_instruction = self.get_websearch_instruction(
request.use_websearch,
"""When reviewing code, consider if searches for these would help:
- Security vulnerabilities and CVEs for libraries/frameworks used
- Best practices for the languages and frameworks in the code
- Common anti-patterns and their solutions
- Performance optimization techniques
- Recent updates or deprecations in APIs used""",
)
# Construct the complete prompt with system instructions and code
full_prompt = f"""{self.get_system_prompt()}{websearch_instruction}
=== USER CONTEXT ===
{request.context}
=== END CONTEXT ===
{focus_instruction}
=== CODE TO REVIEW ===
{file_content}
=== END CODE ===
Please provide a code review aligned with the user's context and expectations, following the format specified in the system prompt."""
return full_prompt
def format_response(self, response: str, request: CodeReviewRequest) -> str:
"""
Format the review response with appropriate headers.
Adds context about the review type and focus area to help
users understand the scope of the review.
Args:
response: The raw review from the model
request: The original request for context
Returns:
str: Formatted response with headers
"""
header = f"Code Review ({request.review_type.upper()})"
if request.focus_on:
header += f" - Focus: {request.focus_on}"
return f"""{header}
{"=" * 50}
{response}
---
**Claude's Next Steps:**
1. **Understand the Context**: First examine the specific functions, files, and code sections mentioned in the review to understand each issue thoroughly.
2. **Present Options to User**: After understanding the issues, ask the user which specific improvements they would like to implement, presenting them as a clear list of options.
3. **Implement Selected Fixes**: Only implement the fixes the user chooses, ensuring each change is made correctly and maintains code quality.
Remember: Always understand the code context before suggesting fixes, and let the user decide which improvements to implement."""