Add DocGen tool with comprehensive documentation generation capabilities (#109)
* WIP: new workflow architecture * WIP: further improvements and cleanup * WIP: cleanup and docks, replace old tool with new * WIP: cleanup and docks, replace old tool with new * WIP: new planner implementation using workflow * WIP: precommit tool working as a workflow instead of a basic tool Support for passing False to use_assistant_model to skip external models completely and use Claude only * WIP: precommit workflow version swapped with old * WIP: codereview * WIP: replaced codereview * WIP: replaced codereview * WIP: replaced refactor * WIP: workflow for thinkdeep * WIP: ensure files get embedded correctly * WIP: thinkdeep replaced with workflow version * WIP: improved messaging when an external model's response is received * WIP: analyze tool swapped * WIP: updated tests * Extract only the content when building history * Use "relevant_files" for workflow tools only * WIP: updated tests * Extract only the content when building history * Use "relevant_files" for workflow tools only * WIP: fixed get_completion_next_steps_message missing param * Fixed tests Request for files consistently * Fixed tests Request for files consistently * Fixed tests * New testgen workflow tool Updated docs * Swap testgen workflow * Fix CI test failures by excluding API-dependent tests - Update GitHub Actions workflow to exclude simulation tests that require API keys - Fix collaboration tests to properly mock workflow tool expert analysis calls - Update test assertions to handle new workflow tool response format - Ensure unit tests run without external API dependencies in CI 🤖 Generated with [Claude Code](https://claude.ai/code) Co-Authored-By: Claude <noreply@anthropic.com> * WIP - Update tests to match new tools * WIP - Update tests to match new tools * WIP - Update tests to match new tools * Should help with https://github.com/BeehiveInnovations/zen-mcp-server/issues/97 Clear python cache when running script: https://github.com/BeehiveInnovations/zen-mcp-server/issues/96 Improved retry error logging Cleanup * WIP - chat tool using new architecture and improved code sharing * Removed todo * Removed todo * Cleanup old name * Tweak wordings * Tweak wordings Migrate old tests * Support for Flash 2.0 and Flash Lite 2.0 * Support for Flash 2.0 and Flash Lite 2.0 * Support for Flash 2.0 and Flash Lite 2.0 Fixed test * Improved consensus to use the workflow base class * Improved consensus to use the workflow base class * Allow images * Allow images * Replaced old consensus tool * Cleanup tests * Tests for prompt size * New tool: docgen Tests for prompt size Fixes: https://github.com/BeehiveInnovations/zen-mcp-server/issues/107 Use available token size limits: https://github.com/BeehiveInnovations/zen-mcp-server/issues/105 * Improved docgen prompt Exclude TestGen from pytest inclusion * Updated errors * Lint * DocGen instructed not to fix bugs, surface them and stick to d * WIP * Stop claude from being lazy and only documenting a small handful * More style rules --------- Co-authored-by: Claude <noreply@anthropic.com>
This commit is contained in:
committed by
GitHub
parent
0655590a51
commit
c960bcb720
158
tools/chat.py
158
tools/chat.py
@@ -1,5 +1,9 @@
|
||||
"""
|
||||
Chat tool - General development chat and collaborative thinking
|
||||
|
||||
This tool provides a conversational interface for general development assistance,
|
||||
brainstorming, problem-solving, and collaborative thinking. It supports file context,
|
||||
images, and conversation continuation for seamless multi-turn interactions.
|
||||
"""
|
||||
|
||||
from typing import TYPE_CHECKING, Any, Optional
|
||||
@@ -11,10 +15,11 @@ if TYPE_CHECKING:
|
||||
|
||||
from config import TEMPERATURE_BALANCED
|
||||
from systemprompts import CHAT_PROMPT
|
||||
from tools.shared.base_models import ToolRequest
|
||||
|
||||
from .base import BaseTool, ToolRequest
|
||||
from .simple.base import SimpleTool
|
||||
|
||||
# Field descriptions to avoid duplication between Pydantic and JSON schema
|
||||
# Field descriptions matching the original Chat tool exactly
|
||||
CHAT_FIELD_DESCRIPTIONS = {
|
||||
"prompt": (
|
||||
"You MUST provide a thorough, expressive question or share an idea with as much context as possible. "
|
||||
@@ -32,15 +37,23 @@ CHAT_FIELD_DESCRIPTIONS = {
|
||||
|
||||
|
||||
class ChatRequest(ToolRequest):
|
||||
"""Request model for chat tool"""
|
||||
"""Request model for Chat tool"""
|
||||
|
||||
prompt: str = Field(..., description=CHAT_FIELD_DESCRIPTIONS["prompt"])
|
||||
files: Optional[list[str]] = Field(default_factory=list, description=CHAT_FIELD_DESCRIPTIONS["files"])
|
||||
images: Optional[list[str]] = Field(default_factory=list, description=CHAT_FIELD_DESCRIPTIONS["images"])
|
||||
|
||||
|
||||
class ChatTool(BaseTool):
|
||||
"""General development chat and collaborative thinking tool"""
|
||||
class ChatTool(SimpleTool):
|
||||
"""
|
||||
General development chat and collaborative thinking tool using SimpleTool architecture.
|
||||
|
||||
This tool provides identical functionality to the original Chat tool but uses the new
|
||||
SimpleTool architecture for cleaner code organization and better maintainability.
|
||||
|
||||
Migration note: This tool is designed to be a drop-in replacement for the original
|
||||
Chat tool with 100% behavioral compatibility.
|
||||
"""
|
||||
|
||||
def get_name(self) -> str:
|
||||
return "chat"
|
||||
@@ -57,7 +70,33 @@ class ChatTool(BaseTool):
|
||||
"provide enhanced capabilities."
|
||||
)
|
||||
|
||||
def get_system_prompt(self) -> str:
|
||||
return CHAT_PROMPT
|
||||
|
||||
def get_default_temperature(self) -> float:
|
||||
return TEMPERATURE_BALANCED
|
||||
|
||||
def get_model_category(self) -> "ToolModelCategory":
|
||||
"""Chat prioritizes fast responses and cost efficiency"""
|
||||
from tools.models import ToolModelCategory
|
||||
|
||||
return ToolModelCategory.FAST_RESPONSE
|
||||
|
||||
def get_request_model(self):
|
||||
"""Return the Chat-specific request model"""
|
||||
return ChatRequest
|
||||
|
||||
# === Schema Generation ===
|
||||
# For maximum compatibility, we override get_input_schema() to match the original Chat tool exactly
|
||||
|
||||
def get_input_schema(self) -> dict[str, Any]:
|
||||
"""
|
||||
Generate input schema matching the original Chat tool exactly.
|
||||
|
||||
This maintains 100% compatibility with the original Chat tool by using
|
||||
the same schema generation approach while still benefiting from SimpleTool
|
||||
convenience methods.
|
||||
"""
|
||||
schema = {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
@@ -115,79 +154,62 @@ class ChatTool(BaseTool):
|
||||
|
||||
return schema
|
||||
|
||||
def get_system_prompt(self) -> str:
|
||||
return CHAT_PROMPT
|
||||
# === Tool-specific field definitions (alternative approach for reference) ===
|
||||
# These aren't used since we override get_input_schema(), but they show how
|
||||
# the tool could be implemented using the automatic SimpleTool schema building
|
||||
|
||||
def get_default_temperature(self) -> float:
|
||||
return TEMPERATURE_BALANCED
|
||||
def get_tool_fields(self) -> dict[str, dict[str, Any]]:
|
||||
"""
|
||||
Tool-specific field definitions for ChatSimple.
|
||||
|
||||
def get_model_category(self) -> "ToolModelCategory":
|
||||
"""Chat prioritizes fast responses and cost efficiency"""
|
||||
from tools.models import ToolModelCategory
|
||||
Note: This method isn't used since we override get_input_schema() for
|
||||
exact compatibility, but it demonstrates how ChatSimple could be
|
||||
implemented using automatic schema building.
|
||||
"""
|
||||
return {
|
||||
"prompt": {
|
||||
"type": "string",
|
||||
"description": CHAT_FIELD_DESCRIPTIONS["prompt"],
|
||||
},
|
||||
"files": {
|
||||
"type": "array",
|
||||
"items": {"type": "string"},
|
||||
"description": CHAT_FIELD_DESCRIPTIONS["files"],
|
||||
},
|
||||
"images": {
|
||||
"type": "array",
|
||||
"items": {"type": "string"},
|
||||
"description": CHAT_FIELD_DESCRIPTIONS["images"],
|
||||
},
|
||||
}
|
||||
|
||||
return ToolModelCategory.FAST_RESPONSE
|
||||
def get_required_fields(self) -> list[str]:
|
||||
"""Required fields for ChatSimple tool"""
|
||||
return ["prompt"]
|
||||
|
||||
def get_request_model(self):
|
||||
return ChatRequest
|
||||
# === Hook Method Implementations ===
|
||||
|
||||
async def prepare_prompt(self, request: ChatRequest) -> str:
|
||||
"""Prepare the chat prompt with optional context files"""
|
||||
# Check for prompt.txt in files
|
||||
prompt_content, updated_files = self.handle_prompt_file(request.files)
|
||||
"""
|
||||
Prepare the chat prompt with optional context files.
|
||||
|
||||
# Use prompt.txt content if available, otherwise use the prompt field
|
||||
user_content = prompt_content if prompt_content else request.prompt
|
||||
|
||||
# Check user input size at MCP transport boundary (before adding internal content)
|
||||
size_check = self.check_prompt_size(user_content)
|
||||
if size_check:
|
||||
# Need to return error, but prepare_prompt returns str
|
||||
# Use exception to handle this cleanly
|
||||
|
||||
from tools.models import ToolOutput
|
||||
|
||||
raise ValueError(f"MCP_SIZE_CHECK:{ToolOutput(**size_check).model_dump_json()}")
|
||||
|
||||
# Update request files list
|
||||
if updated_files is not None:
|
||||
request.files = updated_files
|
||||
|
||||
# Add context files if provided (using centralized file handling with filtering)
|
||||
if request.files:
|
||||
file_content, processed_files = self._prepare_file_content_for_prompt(
|
||||
request.files, request.continuation_id, "Context files"
|
||||
)
|
||||
self._actually_processed_files = processed_files
|
||||
if file_content:
|
||||
user_content = f"{user_content}\n\n=== CONTEXT FILES ===\n{file_content}\n=== END CONTEXT ===="
|
||||
|
||||
# Check token limits
|
||||
self._validate_token_limit(user_content, "Content")
|
||||
|
||||
# Add web search instruction if enabled
|
||||
websearch_instruction = self.get_websearch_instruction(
|
||||
request.use_websearch,
|
||||
"""When discussing topics, consider if searches for these would help:
|
||||
- Documentation for any technologies or concepts mentioned
|
||||
- Current best practices and patterns
|
||||
- Recent developments or updates
|
||||
- Community discussions and solutions""",
|
||||
)
|
||||
|
||||
# Combine system prompt with user content
|
||||
full_prompt = f"""{self.get_system_prompt()}{websearch_instruction}
|
||||
|
||||
=== USER REQUEST ===
|
||||
{user_content}
|
||||
=== END REQUEST ===
|
||||
|
||||
Please provide a thoughtful, comprehensive response:"""
|
||||
|
||||
return full_prompt
|
||||
This implementation matches the original Chat tool exactly while using
|
||||
SimpleTool convenience methods for cleaner code.
|
||||
"""
|
||||
# Use SimpleTool's Chat-style prompt preparation
|
||||
return self.prepare_chat_style_prompt(request)
|
||||
|
||||
def format_response(self, response: str, request: ChatRequest, model_info: Optional[dict] = None) -> str:
|
||||
"""Format the chat response"""
|
||||
"""
|
||||
Format the chat response to match the original Chat tool exactly.
|
||||
"""
|
||||
return (
|
||||
f"{response}\n\n---\n\n**Claude's Turn:** Evaluate this perspective alongside your analysis to "
|
||||
"form a comprehensive solution and continue with the user's request and task at hand."
|
||||
)
|
||||
|
||||
def get_websearch_guidance(self) -> str:
|
||||
"""
|
||||
Return Chat tool-style web search guidance.
|
||||
"""
|
||||
return self.get_chat_style_websearch_guidance()
|
||||
|
||||
Reference in New Issue
Block a user