Fixed linebreaks

Cleanup
Pass excluded fields to the schema builder directly
This commit is contained in:
Fahad
2025-06-27 14:29:10 +04:00
parent 0237fb3419
commit 090931d7cf
13 changed files with 193 additions and 221 deletions

View File

@@ -23,7 +23,7 @@ class TestConsensusThreeModels(BaseSimulatorTest):
try: try:
self.logger.info("Testing consensus tool with three models: flash:against, flash:for, local-llama:neutral") self.logger.info("Testing consensus tool with three models: flash:against, flash:for, local-llama:neutral")
# Send request with three ModelConfig objects using new workflow parameters # Send request with three objects using new workflow parameters
response, continuation_id = self.call_mcp_tool( response, continuation_id = self.call_mcp_tool(
"consensus", "consensus",
{ {

View File

@@ -21,7 +21,7 @@ class TestConsensusTool:
assert "COMPREHENSIVE CONSENSUS WORKFLOW" in tool.get_description() assert "COMPREHENSIVE CONSENSUS WORKFLOW" in tool.get_description()
assert tool.get_default_temperature() == 0.2 # TEMPERATURE_ANALYTICAL assert tool.get_default_temperature() == 0.2 # TEMPERATURE_ANALYTICAL
assert tool.get_model_category() == ToolModelCategory.EXTENDED_REASONING assert tool.get_model_category() == ToolModelCategory.EXTENDED_REASONING
assert tool.requires_model() is True assert tool.requires_model() is False # Consensus manages its own models
def test_request_validation_step1(self): def test_request_validation_step1(self):
"""Test Pydantic request model validation for step 1.""" """Test Pydantic request model validation for step 1."""
@@ -119,8 +119,11 @@ class TestConsensusTool:
# confidence field should be excluded # confidence field should be excluded
assert "confidence" not in schema["properties"] assert "confidence" not in schema["properties"]
assert "models" in schema["properties"] assert "models" in schema["properties"]
# relevant_files should also be excluded # relevant_files should be present as it's used by consensus
assert "relevant_files" not in schema["properties"] assert "relevant_files" in schema["properties"]
# model field should be present for Gemini compatibility (consensus uses 'models' as well)
assert "model" in schema["properties"]
# Verify workflow fields that should NOT be present # Verify workflow fields that should NOT be present
assert "files_checked" not in schema["properties"] assert "files_checked" not in schema["properties"]

View File

@@ -102,7 +102,6 @@ class TestServerTools:
# Check for expected content in the markdown output # Check for expected content in the markdown output
assert "# Zen MCP Server Version" in content assert "# Zen MCP Server Version" in content
assert "## Available Tools" in content assert "## Server Information" in content
assert "thinkdeep" in content assert "## Configuration" in content
assert "docgen" in content assert "Current Version" in content
assert "version" in content

View File

@@ -175,20 +175,20 @@ class AnalyzeTool(WorkflowTool):
def get_description(self) -> str: def get_description(self) -> str:
return ( return (
"COMPREHENSIVE ANALYSIS WORKFLOW - Step-by-step code analysis with expert validation. " "COMPREHENSIVE ANALYSIS WORKFLOW - Step-by-step code analysis with expert validation. "
"This tool guides you through a systematic investigation process where you:\\n\\n" "This tool guides you through a systematic investigation process where you:\n\n"
"1. Start with step 1: describe your analysis investigation plan\\n" "1. Start with step 1: describe your analysis investigation plan\n"
"2. STOP and investigate code structure, patterns, and architectural decisions\\n" "2. STOP and investigate code structure, patterns, and architectural decisions\n"
"3. Report findings in step 2 with concrete evidence from actual code analysis\\n" "3. Report findings in step 2 with concrete evidence from actual code analysis\n"
"4. Continue investigating between each step\\n" "4. Continue investigating between each step\n"
"5. Track findings, relevant files, and insights throughout\\n" "5. Track findings, relevant files, and insights throughout\n"
"6. Update assessments as understanding evolves\\n" "6. Update assessments as understanding evolves\n"
"7. Once investigation is complete, always receive expert validation\\n\\n" "7. Once investigation is complete, always receive expert validation\n\n"
"IMPORTANT: This tool enforces investigation between steps:\\n" "IMPORTANT: This tool enforces investigation between steps:\n"
"- After each call, you MUST investigate before calling again\\n" "- After each call, you MUST investigate before calling again\n"
"- Each step must include NEW evidence from code examination\\n" "- Each step must include NEW evidence from code examination\n"
"- No recursive calls without actual investigation work\\n" "- No recursive calls without actual investigation work\n"
"- The tool will specify which step number to use next\\n" "- The tool will specify which step number to use next\n"
"- Follow the required_actions list for investigation guidance\\n\\n" "- Follow the required_actions list for investigation guidance\n\n"
"Perfect for: comprehensive code analysis, architectural assessment, performance evaluation, " "Perfect for: comprehensive code analysis, architectural assessment, performance evaluation, "
"security analysis, maintainability review, pattern detection, strategic planning." "security analysis, maintainability review, pattern detection, strategic planning."
) )

View File

@@ -189,20 +189,20 @@ class CodeReviewTool(WorkflowTool):
def get_description(self) -> str: def get_description(self) -> str:
return ( return (
"COMPREHENSIVE CODE REVIEW WORKFLOW - Step-by-step code review with expert analysis. " "COMPREHENSIVE CODE REVIEW WORKFLOW - Step-by-step code review with expert analysis. "
"This tool guides you through a systematic investigation process where you:\\n\\n" "This tool guides you through a systematic investigation process where you:\n\n"
"1. Start with step 1: describe your code review investigation plan\\n" "1. Start with step 1: describe your code review investigation plan\n"
"2. STOP and investigate code structure, patterns, and potential issues\\n" "2. STOP and investigate code structure, patterns, and potential issues\n"
"3. Report findings in step 2 with concrete evidence from actual code analysis\\n" "3. Report findings in step 2 with concrete evidence from actual code analysis\n"
"4. Continue investigating between each step\\n" "4. Continue investigating between each step\n"
"5. Track findings, relevant files, and issues throughout\\n" "5. Track findings, relevant files, and issues throughout\n"
"6. Update assessments as understanding evolves\\n" "6. Update assessments as understanding evolves\n"
"7. Once investigation is complete, receive expert analysis\\n\\n" "7. Once investigation is complete, receive expert analysis\n\n"
"IMPORTANT: This tool enforces investigation between steps:\\n" "IMPORTANT: This tool enforces investigation between steps:\n"
"- After each call, you MUST investigate before calling again\\n" "- After each call, you MUST investigate before calling again\n"
"- Each step must include NEW evidence from code examination\\n" "- Each step must include NEW evidence from code examination\n"
"- No recursive calls without actual investigation work\\n" "- No recursive calls without actual investigation work\n"
"- The tool will specify which step number to use next\\n" "- The tool will specify which step number to use next\n"
"- Follow the required_actions list for investigation guidance\\n\\n" "- Follow the required_actions list for investigation guidance\n\n"
"Perfect for: comprehensive code review, security audits, performance analysis, " "Perfect for: comprehensive code review, security audits, performance analysis, "
"architectural assessment, code quality evaluation, anti-pattern detection." "architectural assessment, code quality evaluation, anti-pattern detection."
) )

View File

@@ -80,10 +80,6 @@ CONSENSUS_WORKFLOW_FIELD_DESCRIPTIONS = {
} }
class ModelConfig(dict):
"""Model configuration for consensus workflow"""
class ConsensusRequest(WorkflowRequest): class ConsensusRequest(WorkflowRequest):
"""Request model for consensus workflow steps""" """Request model for consensus workflow steps"""
@@ -95,7 +91,7 @@ class ConsensusRequest(WorkflowRequest):
# Investigation tracking fields # Investigation tracking fields
findings: str = Field(..., description=CONSENSUS_WORKFLOW_FIELD_DESCRIPTIONS["findings"]) findings: str = Field(..., description=CONSENSUS_WORKFLOW_FIELD_DESCRIPTIONS["findings"])
confidence: str | None = Field("exploring", exclude=True) # Not used in consensus workflow confidence: str = Field(default="exploring", exclude=True, description="Not used")
# Consensus-specific fields (only needed in step 1) # Consensus-specific fields (only needed in step 1)
models: list[dict] | None = Field(None, description=CONSENSUS_WORKFLOW_FIELD_DESCRIPTIONS["models"]) models: list[dict] | None = Field(None, description=CONSENSUS_WORKFLOW_FIELD_DESCRIPTIONS["models"])
@@ -114,8 +110,10 @@ class ConsensusRequest(WorkflowRequest):
description=CONSENSUS_WORKFLOW_FIELD_DESCRIPTIONS["model_responses"], description=CONSENSUS_WORKFLOW_FIELD_DESCRIPTIONS["model_responses"],
) )
# Optional images for visual debugging
images: list[str] | None = Field(default=None, description=CONSENSUS_WORKFLOW_FIELD_DESCRIPTIONS["images"])
# Override inherited fields to exclude them from schema # Override inherited fields to exclude them from schema
model: str | None = Field(default=None, exclude=True) # Consensus uses 'models' field instead
temperature: float | None = Field(default=None, exclude=True) temperature: float | None = Field(default=None, exclude=True)
thinking_mode: str | None = Field(default=None, exclude=True) thinking_mode: str | None = Field(default=None, exclude=True)
use_websearch: bool | None = Field(default=None, exclude=True) use_websearch: bool | None = Field(default=None, exclude=True)
@@ -126,7 +124,6 @@ class ConsensusRequest(WorkflowRequest):
issues_found: list[dict] | None = Field(default_factory=list, exclude=True) issues_found: list[dict] | None = Field(default_factory=list, exclude=True)
hypothesis: str | None = Field(None, exclude=True) hypothesis: str | None = Field(None, exclude=True)
backtrack_from_step: int | None = Field(None, exclude=True) backtrack_from_step: int | None = Field(None, exclude=True)
images: list[str] | None = Field(default_factory=list) # Enable images for consensus workflow
@model_validator(mode="after") @model_validator(mode="after")
def validate_step_one_requirements(self): def validate_step_one_requirements(self):
@@ -174,19 +171,19 @@ class ConsensusTool(WorkflowTool):
def get_description(self) -> str: def get_description(self) -> str:
return ( return (
"COMPREHENSIVE CONSENSUS WORKFLOW - Step-by-step multi-model consensus with structured analysis. " "COMPREHENSIVE CONSENSUS WORKFLOW - Step-by-step multi-model consensus with structured analysis. "
"This tool guides you through a systematic process where you:\\n\\n" "This tool guides you through a systematic process where you:\n\n"
"1. Start with step 1: provide your own neutral analysis of the proposal\\n" "1. Start with step 1: provide your own neutral analysis of the proposal\n"
"2. The tool will then consult each specified model one by one\\n" "2. The tool will then consult each specified model one by one\n"
"3. You'll receive each model's response in subsequent steps\\n" "3. You'll receive each model's response in subsequent steps\n"
"4. Track and synthesize perspectives as they accumulate\\n" "4. Track and synthesize perspectives as they accumulate\n"
"5. Final step: present comprehensive consensus and recommendations\\n\\n" "5. Final step: present comprehensive consensus and recommendations\n\n"
"IMPORTANT: This workflow enforces sequential model consultation:\\n" "IMPORTANT: This workflow enforces sequential model consultation:\n"
"- Step 1 is always your independent analysis\\n" "- Step 1 is always your independent analysis\n"
"- Each subsequent step processes one model response\\n" "- Each subsequent step processes one model response\n"
"- Total steps = number of models (each step includes consultation + response)\\n" "- Total steps = number of models (each step includes consultation + response)\n"
"- Models can have stances (for/against/neutral) for structured debate\\n" "- Models can have stances (for/against/neutral) for structured debate\n"
"- Same model can be used multiple times with different stances\\n" "- Same model can be used multiple times with different stances\n"
"- Each model + stance combination must be unique\\n\\n" "- Each model + stance combination must be unique\n\n"
"Perfect for: complex decisions, architectural choices, feature proposals, " "Perfect for: complex decisions, architectural choices, feature proposals, "
"technology evaluations, strategic planning." "technology evaluations, strategic planning."
) )
@@ -230,8 +227,9 @@ of the evidence, even when it strongly points in one direction.""",
"""Generate input schema for consensus workflow.""" """Generate input schema for consensus workflow."""
from .workflow.schema_builders import WorkflowSchemaBuilder from .workflow.schema_builders import WorkflowSchemaBuilder
# Consensus workflow-specific field overrides # Consensus tool-specific field definitions
consensus_field_overrides = { consensus_field_overrides = {
# Override standard workflow fields that need consensus-specific descriptions
"step": { "step": {
"type": "string", "type": "string",
"description": CONSENSUS_WORKFLOW_FIELD_DESCRIPTIONS["step"], "description": CONSENSUS_WORKFLOW_FIELD_DESCRIPTIONS["step"],
@@ -259,6 +257,7 @@ of the evidence, even when it strongly points in one direction.""",
"items": {"type": "string"}, "items": {"type": "string"},
"description": CONSENSUS_WORKFLOW_FIELD_DESCRIPTIONS["relevant_files"], "description": CONSENSUS_WORKFLOW_FIELD_DESCRIPTIONS["relevant_files"],
}, },
# consensus-specific fields (not in base workflow)
"models": { "models": {
"type": "array", "type": "array",
"items": { "items": {
@@ -289,31 +288,33 @@ of the evidence, even when it strongly points in one direction.""",
}, },
} }
# Build schema without standard workflow fields we don't use # Define excluded fields for consensus workflow
excluded_workflow_fields = [
"files_checked", # Not used in consensus workflow
"relevant_context", # Not used in consensus workflow
"issues_found", # Not used in consensus workflow
"hypothesis", # Not used in consensus workflow
"backtrack_from_step", # Not used in consensus workflow
"confidence", # Not used in consensus workflow
]
excluded_common_fields = [
"model", # Consensus uses 'models' field instead
"temperature", # Not used in consensus workflow
"thinking_mode", # Not used in consensus workflow
"use_websearch", # Not used in consensus workflow
]
# Build schema with proper field exclusion
# Note: We don't pass model_field_schema because consensus uses 'models' instead of 'model'
schema = WorkflowSchemaBuilder.build_schema( schema = WorkflowSchemaBuilder.build_schema(
tool_specific_fields=consensus_field_overrides, tool_specific_fields=consensus_field_overrides,
model_field_schema=self.get_model_field_schema(), model_field_schema=self.get_model_field_schema(),
auto_mode=self.is_effective_auto_mode(), auto_mode=self.is_effective_auto_mode(),
tool_name=self.get_name(), tool_name=self.get_name(),
excluded_workflow_fields=excluded_workflow_fields,
excluded_common_fields=excluded_common_fields,
) )
# Remove unused workflow fields
if "properties" in schema:
for field in [
"files_checked",
"relevant_context",
"issues_found",
"hypothesis",
"backtrack_from_step",
"confidence", # Not used in consensus workflow
"model", # Consensus uses 'models' field instead
"temperature", # Not used in consensus workflow
"thinking_mode", # Not used in consensus workflow
"use_websearch", # Not used in consensus workflow
"relevant_files", # Not used in consensus workflow
]:
schema["properties"].pop(field, None)
return schema return schema
def get_required_actions( def get_required_actions(
@@ -357,6 +358,17 @@ of the evidence, even when it strongly points in one direction.""",
"""Consensus workflow handles its own model consultations.""" """Consensus workflow handles its own model consultations."""
return False return False
def requires_model(self) -> bool:
"""
Consensus tool doesn't require model resolution at the MCP boundary.
Uses it's own set of models
Returns:
bool: False
"""
return False
# Hook method overrides for consensus-specific behavior # Hook method overrides for consensus-specific behavior
def prepare_step_data(self, request) -> dict: def prepare_step_data(self, request) -> dict:
@@ -601,9 +613,7 @@ YOUR SUPPORTIVE ANALYSIS SHOULD:
- Suggest optimizations that enhance value - Suggest optimizations that enhance value
- Present realistic implementation pathways - Present realistic implementation pathways
Remember: Being "for" means finding the BEST possible version of the idea IF it has merit, not blindly supporting bad """ Remember: Being "for" means finding the BEST possible version of the idea IF it has merit, not blindly supporting bad ideas.""",
"ideas."
"",
"against": """CRITICAL PERSPECTIVE WITH RESPONSIBILITY "against": """CRITICAL PERSPECTIVE WITH RESPONSIBILITY
You are tasked with critiquing this proposal, but with ESSENTIAL BOUNDARIES: You are tasked with critiquing this proposal, but with ESSENTIAL BOUNDARIES:
@@ -627,9 +637,7 @@ YOUR CRITICAL ANALYSIS SHOULD:
- Highlight potential negative consequences - Highlight potential negative consequences
- Question assumptions that may be flawed - Question assumptions that may be flawed
Remember: Being "against" means rigorous scrutiny to ensure quality, not undermining good ideas that deserve """ Remember: Being "against" means rigorous scrutiny to ensure quality, not undermining good ideas that deserve support.""",
"support."
"",
"neutral": """BALANCED PERSPECTIVE "neutral": """BALANCED PERSPECTIVE
Provide objective analysis considering both positive and negative aspects. However, if there is overwhelming evidence Provide objective analysis considering both positive and negative aspects. However, if there is overwhelming evidence

View File

@@ -21,7 +21,7 @@ architectural decisions, and breaking down large problems into manageable steps.
""" """
import logging import logging
from typing import TYPE_CHECKING, Any, Optional from typing import TYPE_CHECKING, Any
from pydantic import Field, field_validator from pydantic import Field, field_validator
@@ -67,12 +67,12 @@ class PlannerRequest(WorkflowRequest):
next_step_required: bool = Field(..., description=PLANNER_FIELD_DESCRIPTIONS["next_step_required"]) next_step_required: bool = Field(..., description=PLANNER_FIELD_DESCRIPTIONS["next_step_required"])
# Optional revision/branching fields (planning-specific) # Optional revision/branching fields (planning-specific)
is_step_revision: Optional[bool] = Field(False, description=PLANNER_FIELD_DESCRIPTIONS["is_step_revision"]) is_step_revision: bool | None = Field(False, description=PLANNER_FIELD_DESCRIPTIONS["is_step_revision"])
revises_step_number: Optional[int] = Field(None, description=PLANNER_FIELD_DESCRIPTIONS["revises_step_number"]) revises_step_number: int | None = Field(None, description=PLANNER_FIELD_DESCRIPTIONS["revises_step_number"])
is_branch_point: Optional[bool] = Field(False, description=PLANNER_FIELD_DESCRIPTIONS["is_branch_point"]) is_branch_point: bool | None = Field(False, description=PLANNER_FIELD_DESCRIPTIONS["is_branch_point"])
branch_from_step: Optional[int] = Field(None, description=PLANNER_FIELD_DESCRIPTIONS["branch_from_step"]) branch_from_step: int | None = Field(None, description=PLANNER_FIELD_DESCRIPTIONS["branch_from_step"])
branch_id: Optional[str] = Field(None, description=PLANNER_FIELD_DESCRIPTIONS["branch_id"]) branch_id: str | None = Field(None, description=PLANNER_FIELD_DESCRIPTIONS["branch_id"])
more_steps_needed: Optional[bool] = Field(False, description=PLANNER_FIELD_DESCRIPTIONS["more_steps_needed"]) more_steps_needed: bool | None = Field(False, description=PLANNER_FIELD_DESCRIPTIONS["more_steps_needed"])
# Exclude all investigation/analysis fields that aren't relevant to planning # Exclude all investigation/analysis fields that aren't relevant to planning
findings: str = Field( findings: str = Field(
@@ -85,15 +85,15 @@ class PlannerRequest(WorkflowRequest):
) )
issues_found: list[dict] = Field(default_factory=list, exclude=True, description="Planning doesn't find issues") issues_found: list[dict] = Field(default_factory=list, exclude=True, description="Planning doesn't find issues")
confidence: str = Field(default="planning", exclude=True, description="Planning uses different confidence model") confidence: str = Field(default="planning", exclude=True, description="Planning uses different confidence model")
hypothesis: Optional[str] = Field(default=None, exclude=True, description="Planning doesn't use hypothesis") hypothesis: str | None = Field(default=None, exclude=True, description="Planning doesn't use hypothesis")
backtrack_from_step: Optional[int] = Field(default=None, exclude=True, description="Planning uses revision instead") backtrack_from_step: int | None = Field(default=None, exclude=True, description="Planning uses revision instead")
# Exclude other non-planning fields # Exclude other non-planning fields
temperature: Optional[float] = Field(default=None, exclude=True) temperature: float | None = Field(default=None, exclude=True)
thinking_mode: Optional[str] = Field(default=None, exclude=True) thinking_mode: str | None = Field(default=None, exclude=True)
use_websearch: Optional[bool] = Field(default=None, exclude=True) use_websearch: bool | None = Field(default=None, exclude=True)
use_assistant_model: Optional[bool] = Field(default=False, exclude=True, description="Planning is self-contained") use_assistant_model: bool | None = Field(default=False, exclude=True, description="Planning is self-contained")
images: Optional[list] = Field(default=None, exclude=True, description="Planning doesn't use images") images: list | None = Field(default=None, exclude=True, description="Planning doesn't use images")
@field_validator("step_number") @field_validator("step_number")
@classmethod @classmethod
@@ -184,10 +184,18 @@ class PlannerTool(WorkflowTool):
"""Return the planner-specific request model.""" """Return the planner-specific request model."""
return PlannerRequest return PlannerRequest
def get_tool_fields(self) -> dict[str, dict[str, Any]]: def get_input_schema(self) -> dict[str, Any]:
"""Return planning-specific field definitions beyond the standard workflow fields.""" """Generate input schema for planner workflow using override pattern."""
return { from .workflow.schema_builders import WorkflowSchemaBuilder
# Planning-specific optional fields
# Planner tool-specific field definitions
planner_field_overrides = {
# Override standard workflow fields that need planning-specific descriptions
"step": {
"type": "string",
"description": PLANNER_FIELD_DESCRIPTIONS["step"], # Very planning-specific instructions
},
# NEW planning-specific fields (not in base workflow)
"is_step_revision": { "is_step_revision": {
"type": "boolean", "type": "boolean",
"description": PLANNER_FIELD_DESCRIPTIONS["is_step_revision"], "description": PLANNER_FIELD_DESCRIPTIONS["is_step_revision"],
@@ -216,11 +224,7 @@ class PlannerTool(WorkflowTool):
}, },
} }
def get_input_schema(self) -> dict[str, Any]: # Define excluded fields for planner workflow
"""Generate input schema using WorkflowSchemaBuilder with field exclusion."""
from .workflow.schema_builders import WorkflowSchemaBuilder
# Exclude investigation-specific fields that planning doesn't need
excluded_workflow_fields = [ excluded_workflow_fields = [
"findings", # Planning uses step content instead "findings", # Planning uses step content instead
"files_checked", # Planning doesn't examine files "files_checked", # Planning doesn't examine files
@@ -232,7 +236,6 @@ class PlannerTool(WorkflowTool):
"backtrack_from_step", # Planning uses revision instead "backtrack_from_step", # Planning uses revision instead
] ]
# Exclude common fields that planning doesn't need
excluded_common_fields = [ excluded_common_fields = [
"temperature", # Planning doesn't need temperature control "temperature", # Planning doesn't need temperature control
"thinking_mode", # Planning doesn't need thinking mode "thinking_mode", # Planning doesn't need thinking mode
@@ -241,8 +244,9 @@ class PlannerTool(WorkflowTool):
"files", # Planning doesn't use files "files", # Planning doesn't use files
] ]
# Build schema with proper field exclusion (following consensus pattern)
return WorkflowSchemaBuilder.build_schema( return WorkflowSchemaBuilder.build_schema(
tool_specific_fields=self.get_tool_fields(), tool_specific_fields=planner_field_overrides,
required_fields=[], # No additional required fields beyond workflow defaults required_fields=[], # No additional required fields beyond workflow defaults
model_field_schema=self.get_model_field_schema(), model_field_schema=self.get_model_field_schema(),
auto_mode=self.is_effective_auto_mode(), auto_mode=self.is_effective_auto_mode(),

View File

@@ -192,20 +192,20 @@ class PrecommitTool(WorkflowTool):
def get_description(self) -> str: def get_description(self) -> str:
return ( return (
"COMPREHENSIVE PRECOMMIT WORKFLOW - Step-by-step pre-commit validation with expert analysis. " "COMPREHENSIVE PRECOMMIT WORKFLOW - Step-by-step pre-commit validation with expert analysis. "
"This tool guides you through a systematic investigation process where you:\\n\\n" "This tool guides you through a systematic investigation process where you:\n\n"
"1. Start with step 1: describe your pre-commit validation plan\\n" "1. Start with step 1: describe your pre-commit validation plan\n"
"2. STOP and investigate git changes, repository status, and file modifications\\n" "2. STOP and investigate git changes, repository status, and file modifications\n"
"3. Report findings in step 2 with concrete evidence from actual changes\\n" "3. Report findings in step 2 with concrete evidence from actual changes\n"
"4. Continue investigating between each step\\n" "4. Continue investigating between each step\n"
"5. Track findings, relevant files, and issues throughout\\n" "5. Track findings, relevant files, and issues throughout\n"
"6. Update assessments as understanding evolves\\n" "6. Update assessments as understanding evolves\n"
"7. Once investigation is complete, receive expert analysis\\n\\n" "7. Once investigation is complete, receive expert analysis\n\n"
"IMPORTANT: This tool enforces investigation between steps:\\n" "IMPORTANT: This tool enforces investigation between steps:\n"
"- After each call, you MUST investigate before calling again\\n" "- After each call, you MUST investigate before calling again\n"
"- Each step must include NEW evidence from git analysis\\n" "- Each step must include NEW evidence from git analysis\n"
"- No recursive calls without actual investigation work\\n" "- No recursive calls without actual investigation work\n"
"- The tool will specify which step number to use next\\n" "- The tool will specify which step number to use next\n"
"- Follow the required_actions list for investigation guidance\\n\\n" "- Follow the required_actions list for investigation guidance\n\n"
"Perfect for: comprehensive pre-commit validation, multi-repository analysis, " "Perfect for: comprehensive pre-commit validation, multi-repository analysis, "
"security review, change impact assessment, completeness verification." "security review, change impact assessment, completeness verification."
) )

View File

@@ -188,20 +188,20 @@ class RefactorTool(WorkflowTool):
def get_description(self) -> str: def get_description(self) -> str:
return ( return (
"COMPREHENSIVE REFACTORING WORKFLOW - Step-by-step refactoring analysis with expert validation. " "COMPREHENSIVE REFACTORING WORKFLOW - Step-by-step refactoring analysis with expert validation. "
"This tool guides you through a systematic investigation process where you:\\n\\n" "This tool guides you through a systematic investigation process where you:\n\n"
"1. Start with step 1: describe your refactoring investigation plan\\n" "1. Start with step 1: describe your refactoring investigation plan\n"
"2. STOP and investigate code structure, patterns, and potential improvements\\n" "2. STOP and investigate code structure, patterns, and potential improvements\n"
"3. Report findings in step 2 with concrete evidence from actual code analysis\\n" "3. Report findings in step 2 with concrete evidence from actual code analysis\n"
"4. Continue investigating between each step\\n" "4. Continue investigating between each step\n"
"5. Track findings, relevant files, and refactoring opportunities throughout\\n" "5. Track findings, relevant files, and refactoring opportunities throughout\n"
"6. Update assessments as understanding evolves\\n" "6. Update assessments as understanding evolves\n"
"7. Once investigation is complete, receive expert analysis\\n\\n" "7. Once investigation is complete, receive expert analysis\n\n"
"IMPORTANT: This tool enforces investigation between steps:\\n" "IMPORTANT: This tool enforces investigation between steps:\n"
"- After each call, you MUST investigate before calling again\\n" "- After each call, you MUST investigate before calling again\n"
"- Each step must include NEW evidence from code examination\\n" "- Each step must include NEW evidence from code examination\n"
"- No recursive calls without actual investigation work\\n" "- No recursive calls without actual investigation work\n"
"- The tool will specify which step number to use next\\n" "- The tool will specify which step number to use next\n"
"- Follow the required_actions list for investigation guidance\\n\\n" "- Follow the required_actions list for investigation guidance\n\n"
"Perfect for: comprehensive refactoring analysis, code smell detection, decomposition planning, " "Perfect for: comprehensive refactoring analysis, code smell detection, decomposition planning, "
"modernization opportunities, organization improvements, maintainability enhancements." "modernization opportunities, organization improvements, maintainability enhancements."
) )

View File

@@ -218,20 +218,20 @@ class SecauditTool(WorkflowTool):
"""Return a description of the tool.""" """Return a description of the tool."""
return ( return (
"COMPREHENSIVE SECURITY AUDIT WORKFLOW - Step-by-step security assessment with expert analysis. " "COMPREHENSIVE SECURITY AUDIT WORKFLOW - Step-by-step security assessment with expert analysis. "
"This tool guides you through a systematic investigation process where you:\\n\\n" "This tool guides you through a systematic investigation process where you:\n\n"
"1. Start with step 1: describe your security investigation plan\\n" "1. Start with step 1: describe your security investigation plan\n"
"2. STOP and investigate code structure, patterns, and security issues\\n" "2. STOP and investigate code structure, patterns, and security issues\n"
"3. Report findings in step 2 with concrete evidence from actual code analysis\\n" "3. Report findings in step 2 with concrete evidence from actual code analysis\n"
"4. Continue investigating between each step\\n" "4. Continue investigating between each step\n"
"5. Track findings, relevant files, and security issues throughout\\n" "5. Track findings, relevant files, and security issues throughout\n"
"6. Update assessments as understanding evolves\\n" "6. Update assessments as understanding evolves\n"
"7. Once investigation is complete, receive expert security analysis\\n\\n" "7. Once investigation is complete, receive expert security analysis\n\n"
"IMPORTANT: This tool enforces investigation between steps:\\n" "IMPORTANT: This tool enforces investigation between steps:\n"
"- After each call, you MUST investigate before calling again\\n" "- After each call, you MUST investigate before calling again\n"
"- Each step must include NEW evidence from code examination\\n" "- Each step must include NEW evidence from code examination\n"
"- No recursive calls without actual investigation work\\n" "- No recursive calls without actual investigation work\n"
"- The tool will specify which step number to use next\\n" "- The tool will specify which step number to use next\n"
"- Follow the required_actions list for investigation guidance\\n\\n" "- Follow the required_actions list for investigation guidance\n\n"
"Perfect for: comprehensive security assessment, OWASP Top 10 analysis, compliance evaluation, " "Perfect for: comprehensive security assessment, OWASP Top 10 analysis, compliance evaluation, "
"vulnerability identification, threat modeling, security architecture review." "vulnerability identification, threat modeling, security architecture review."
) )

View File

@@ -309,26 +309,26 @@ class TestGenTool(WorkflowTool):
def prepare_expert_analysis_context(self, consolidated_findings) -> str: def prepare_expert_analysis_context(self, consolidated_findings) -> str:
"""Prepare context for external model call for test generation validation.""" """Prepare context for external model call for test generation validation."""
context_parts = [ context_parts = [
f"=== TEST GENERATION REQUEST ===\\n{self.initial_request or 'Test generation workflow initiated'}\\n=== END REQUEST ===" f"=== TEST GENERATION REQUEST ===\n{self.initial_request or 'Test generation workflow initiated'}\n=== END REQUEST ==="
] ]
# Add investigation summary # Add investigation summary
investigation_summary = self._build_test_generation_summary(consolidated_findings) investigation_summary = self._build_test_generation_summary(consolidated_findings)
context_parts.append( context_parts.append(
f"\\n=== CLAUDE'S TEST PLANNING INVESTIGATION ===\\n{investigation_summary}\\n=== END INVESTIGATION ===" f"\n=== CLAUDE'S TEST PLANNING INVESTIGATION ===\n{investigation_summary}\n=== END INVESTIGATION ==="
) )
# Add relevant code elements if available # Add relevant code elements if available
if consolidated_findings.relevant_context: if consolidated_findings.relevant_context:
methods_text = "\\n".join(f"- {method}" for method in consolidated_findings.relevant_context) methods_text = "\n".join(f"- {method}" for method in consolidated_findings.relevant_context)
context_parts.append(f"\\n=== CODE ELEMENTS TO TEST ===\\n{methods_text}\\n=== END CODE ELEMENTS ===") context_parts.append(f"\n=== CODE ELEMENTS TO TEST ===\n{methods_text}\n=== END CODE ELEMENTS ===")
# Add images if available # Add images if available
if consolidated_findings.images: if consolidated_findings.images:
images_text = "\\n".join(f"- {img}" for img in consolidated_findings.images) images_text = "\n".join(f"- {img}" for img in consolidated_findings.images)
context_parts.append(f"\\n=== VISUAL DOCUMENTATION ===\\n{images_text}\\n=== END VISUAL DOCUMENTATION ===") context_parts.append(f"\n=== VISUAL DOCUMENTATION ===\n{images_text}\n=== END VISUAL DOCUMENTATION ===")
return "\\n".join(context_parts) return "\n".join(context_parts)
def _build_test_generation_summary(self, consolidated_findings) -> str: def _build_test_generation_summary(self, consolidated_findings) -> str:
"""Prepare a comprehensive summary of the test generation investigation.""" """Prepare a comprehensive summary of the test generation investigation."""

View File

@@ -181,24 +181,24 @@ class TracerTool(WorkflowTool):
def get_description(self) -> str: def get_description(self) -> str:
return ( return (
"STEP-BY-STEP CODE TRACING WORKFLOW - Systematic code analysis through guided investigation. " "STEP-BY-STEP CODE TRACING WORKFLOW - Systematic code analysis through guided investigation. "
"This tool guides you through a structured investigation process where you:\\n\\n" "This tool guides you through a structured investigation process where you:\n\n"
"1. Start with step 1: describe your tracing plan and target\\n" "1. Start with step 1: describe your tracing plan and target\n"
"2. STOP and investigate code structure, patterns, and relationships\\n" "2. STOP and investigate code structure, patterns, and relationships\n"
"3. Report findings in step 2 with concrete evidence from actual code analysis\\n" "3. Report findings in step 2 with concrete evidence from actual code analysis\n"
"4. Continue investigating between each step\\n" "4. Continue investigating between each step\n"
"5. Track findings, relevant files, and code relationships throughout\\n" "5. Track findings, relevant files, and code relationships throughout\n"
"6. Build comprehensive understanding as analysis evolves\\n" "6. Build comprehensive understanding as analysis evolves\n"
"7. Complete with detailed output formatted according to trace mode\\n\\n" "7. Complete with detailed output formatted according to trace mode\n\n"
"IMPORTANT: This tool enforces investigation between steps:\\n" "IMPORTANT: This tool enforces investigation between steps:\n"
"- After each call, you MUST investigate before calling again\\n" "- After each call, you MUST investigate before calling again\n"
"- Each step must include NEW evidence from code examination\\n" "- Each step must include NEW evidence from code examination\n"
"- No recursive calls without actual investigation work\\n" "- No recursive calls without actual investigation work\n"
"- The tool will specify which step number to use next\\n" "- The tool will specify which step number to use next\n"
"- Follow the required_actions list for investigation guidance\\n\\n" "- Follow the required_actions list for investigation guidance\n\n"
"TRACE MODES:\\n" "TRACE MODES:\n"
"- 'ask': Default mode - prompts you to choose between precision or dependencies modes with explanations\\n" "- 'ask': Default mode - prompts you to choose between precision or dependencies modes with explanations\n"
"- 'precision': For methods/functions - traces execution flow, call chains, and usage patterns\\n" "- 'precision': For methods/functions - traces execution flow, call chains, and usage patterns\n"
"- 'dependencies': For classes/modules - maps structural relationships and bidirectional dependencies\\n\\n" "- 'dependencies': For classes/modules - maps structural relationships and bidirectional dependencies\n\n"
"Perfect for: method execution flow analysis, dependency mapping, call chain tracing, " "Perfect for: method execution flow analysis, dependency mapping, call chain tracing, "
"structural relationship analysis, architectural understanding, code comprehension." "structural relationship analysis, architectural understanding, code comprehension."
) )

View File

@@ -250,42 +250,6 @@ class VersionTool(BaseTool):
output_lines.append("") output_lines.append("")
# Python and system information
output_lines.append("## System Information")
output_lines.append(
f"**Python Version**: {sys.version_info.major}.{sys.version_info.minor}.{sys.version_info.micro}"
)
output_lines.append(f"**Platform**: {platform.system()} {platform.release()}")
output_lines.append(f"**Architecture**: {platform.machine()}")
output_lines.append("")
# Available tools
try:
# Import here to avoid circular imports
from server import TOOLS
tool_names = sorted(TOOLS.keys())
output_lines.append("## Available Tools")
output_lines.append(f"**Total Tools**: {len(tool_names)}")
output_lines.append("\n**Tool List**:")
for tool_name in tool_names:
tool = TOOLS[tool_name]
# Get the first line of the tool's description for a brief summary
description = tool.get_description().split("\n")[0]
# Truncate if too long
if len(description) > 80:
description = description[:77] + "..."
output_lines.append(f"- `{tool_name}` - {description}")
output_lines.append("")
except Exception as e:
logger.warning(f"Error loading tools list: {e}")
output_lines.append("## Available Tools")
output_lines.append("**Error**: Could not load tools list")
output_lines.append("")
# Configuration information # Configuration information
output_lines.append("## Configuration") output_lines.append("## Configuration")
@@ -301,10 +265,11 @@ class VersionTool(BaseTool):
ProviderType.GOOGLE, ProviderType.GOOGLE,
ProviderType.OPENAI, ProviderType.OPENAI,
ProviderType.XAI, ProviderType.XAI,
ProviderType.DIAL,
ProviderType.OPENROUTER, ProviderType.OPENROUTER,
ProviderType.CUSTOM, ProviderType.CUSTOM,
] ]
provider_names = ["Google Gemini", "OpenAI", "X.AI", "OpenRouter", "Custom/Local"] provider_names = ["Google Gemini", "OpenAI", "X.AI", "DIAL", "OpenRouter", "Custom/Local"]
for provider_type, provider_name in zip(provider_types, provider_names): for provider_type, provider_name in zip(provider_types, provider_names):
provider = ModelProviderRegistry.get_provider(provider_type) provider = ModelProviderRegistry.get_provider(provider_type)
@@ -317,23 +282,16 @@ class VersionTool(BaseTool):
# Get total available models # Get total available models
try: try:
available_models = ModelProviderRegistry.get_available_models(respect_restrictions=True) available_models = ModelProviderRegistry.get_available_models(respect_restrictions=True)
output_lines.append(f"\n**Available Models**: {len(available_models)}") output_lines.append(f"\n\n**Available Models**: {len(available_models)}")
except Exception: except Exception:
output_lines.append("\n**Available Models**: Unknown") output_lines.append("\n\n**Available Models**: Unknown")
except Exception as e: except Exception as e:
logger.warning(f"Error checking provider configuration: {e}") logger.warning(f"Error checking provider configuration: {e}")
output_lines.append("**Providers**: Error checking configuration") output_lines.append("\n\n**Providers**: Error checking configuration")
output_lines.append("") output_lines.append("")
# Usage information
output_lines.append("## Usage")
output_lines.append("- Use `listmodels` tool to see all available AI models")
output_lines.append("- Use `chat` for interactive conversations and brainstorming")
output_lines.append("- Use workflow tools (`debug`, `codereview`, `docgen`, etc.) for systematic analysis")
output_lines.append("- Set DEFAULT_MODEL=auto to let Claude choose the best model for each task")
# Format output # Format output
content = "\n".join(output_lines) content = "\n".join(output_lines)