fix: removed use_websearch; this parameter was confusing Codex. It started using this to prompt the external model to perform searches! web-search is enabled by Claude / Codex etc by default and the external agent can ask claude to search on its behalf.

This commit is contained in:
Fahad
2025-10-01 18:44:11 +04:00
parent 28cabe0833
commit cff6d8998f
27 changed files with 45 additions and 129 deletions

View File

@@ -125,8 +125,7 @@ class AnalyzeWorkflowRequest(WorkflowRequest):
"detailed", description=ANALYZE_WORKFLOW_FIELD_DESCRIPTIONS["output_format"]
)
# Keep thinking_mode and use_websearch from original analyze tool
# temperature is inherited from WorkflowRequest
# Keep thinking_mode from original analyze tool; temperature is inherited from WorkflowRequest
@model_validator(mode="after")
def validate_step_one_requirements(self):

View File

@@ -23,9 +23,9 @@ from .simple.base import SimpleTool
CHAT_FIELD_DESCRIPTIONS = {
"prompt": (
"Your question or idea for collaborative thinking. Provide detailed context, including your goal, what you've tried, and any specific challenges. "
"CRITICAL: To discuss code, provide file paths using the 'files' parameter instead of pasting large code blocks here."
"CRITICAL: To discuss code, use 'files' parameter instead of pasting code blocks here."
),
"files": "Absolute full-paths to existing files / folders for context. DO NOT SHORTEN.",
"files": "Always pass absolute full-paths (do NOT shorten) to existing files / folders containing code being discussed.",
"images": (
"Optional images for visual context (must be FULL absolute paths to real files / folders - DO NOT SHORTEN - OR these can be bas64 data)"
),
@@ -56,8 +56,8 @@ class ChatTool(SimpleTool):
def get_description(self) -> str:
return (
"General chat and collaborative thinking partner for brainstorming, development discussion, getting second opinions, and exploring ideas. "
"Use for bouncing ideas, validating approaches, asking questions, and getting explanations. "
"General chat and collaborative thinking partner for brainstorming, development discussion, "
"getting second opinions, and exploring ideas. Use for ideas, validations, questions, and thoughtful explanations."
)
def get_system_prompt(self) -> str:
@@ -116,11 +116,6 @@ class ChatTool(SimpleTool):
"enum": ["minimal", "low", "medium", "high", "max"],
"description": COMMON_FIELD_DESCRIPTIONS["thinking_mode"],
},
"use_websearch": {
"type": "boolean",
"description": COMMON_FIELD_DESCRIPTIONS["use_websearch"],
"default": True,
},
"continuation_id": {
"type": "string",
"description": COMMON_FIELD_DESCRIPTIONS["continuation_id"],

View File

@@ -118,7 +118,6 @@ class CodeReviewRequest(WorkflowRequest):
# Override inherited fields to exclude them from schema (except model which needs to be available)
temperature: Optional[float] = Field(default=None, exclude=True)
thinking_mode: Optional[str] = Field(default=None, exclude=True)
use_websearch: Optional[bool] = Field(default=None, exclude=True)
@model_validator(mode="after")
def validate_step_one_requirements(self):

View File

@@ -116,7 +116,6 @@ class ConsensusRequest(WorkflowRequest):
# Override inherited fields to exclude them from schema
temperature: float | None = Field(default=None, exclude=True)
thinking_mode: str | None = Field(default=None, exclude=True)
use_websearch: bool | None = Field(default=None, exclude=True)
# Not used in consensus workflow
files_checked: list[str] | None = Field(default_factory=list, exclude=True)
@@ -290,7 +289,6 @@ of the evidence, even when it strongly points in one direction.""",
"model", # Consensus uses 'models' field instead
"temperature", # Not used in consensus workflow
"thinking_mode", # Not used in consensus workflow
"use_websearch", # Not used in consensus workflow
]
# Build schema with proper field exclusion

View File

@@ -104,7 +104,6 @@ class DebugInvestigationRequest(WorkflowRequest):
# Override inherited fields to exclude them from schema (except model which needs to be available)
temperature: Optional[float] = Field(default=None, exclude=True)
thinking_mode: Optional[str] = Field(default=None, exclude=True)
use_websearch: Optional[bool] = Field(default=None, exclude=True)
class DebugIssueTool(WorkflowTool):

View File

@@ -243,7 +243,6 @@ class DocgenTool(WorkflowTool):
"model", # Documentation doesn't need external model selection
"temperature", # Documentation doesn't need temperature control
"thinking_mode", # Documentation doesn't need thinking mode
"use_websearch", # Documentation doesn't need web search
"images", # Documentation doesn't use images
]

View File

@@ -88,7 +88,6 @@ class PlannerRequest(WorkflowRequest):
# Exclude other non-planning fields
temperature: float | None = Field(default=None, exclude=True)
thinking_mode: str | None = Field(default=None, exclude=True)
use_websearch: bool | None = Field(default=None, exclude=True)
use_assistant_model: bool | None = Field(default=False, exclude=True, description="Planning is self-contained")
images: list | None = Field(default=None, exclude=True, description="Planning doesn't use images")
@@ -218,7 +217,6 @@ class PlannerTool(WorkflowTool):
excluded_common_fields = [
"temperature", # Planning doesn't need temperature control
"thinking_mode", # Planning doesn't need thinking mode
"use_websearch", # Planning doesn't need web search
"images", # Planning doesn't use images
"files", # Planning doesn't use files
]

View File

@@ -119,7 +119,6 @@ class PrecommitRequest(WorkflowRequest):
# Override inherited fields to exclude them from schema (except model which needs to be available)
temperature: Optional[float] = Field(default=None, exclude=True)
thinking_mode: Optional[str] = Field(default=None, exclude=True)
use_websearch: Optional[bool] = Field(default=None, exclude=True)
@model_validator(mode="after")
def validate_step_one_requirements(self):

View File

@@ -131,7 +131,6 @@ class RefactorRequest(WorkflowRequest):
# Override inherited fields to exclude them from schema (except model which needs to be available)
temperature: Optional[float] = Field(default=None, exclude=True)
thinking_mode: Optional[str] = Field(default=None, exclude=True)
use_websearch: Optional[bool] = Field(default=None, exclude=True)
@model_validator(mode="after")
def validate_step_one_requirements(self):

View File

@@ -21,25 +21,17 @@ logger = logging.getLogger(__name__)
# Shared field descriptions to avoid duplication
COMMON_FIELD_DESCRIPTIONS = {
"model": (
"Model to use. See tool's input schema for available models. "
"Use 'auto' to let Claude select the best model for the task."
),
"temperature": (
"Lower values: focused/deterministic; higher: creative. Tool-specific defaults apply if unspecified."
"Model to use. See tool's input schema for available models if required. Use 'auto' select the best model for the task."
),
"temperature": ("Lower values: deterministic; higher: creative."),
"thinking_mode": (
"Thinking depth: minimal (0.5%), low (8%), medium (33%), high (67%), "
"max (100% of model max). Higher modes: deeper reasoning but slower."
),
"use_websearch": (
"Enable web search for docs and current info. Model can request Claude to perform web-search for "
"best practices, framework docs, solution research, latest API information."
),
"continuation_id": (
"Unique thread continuation ID for multi-turn conversations. Reuse last continuation_id "
"when continuing discussion (unless user provides different ID) using exact unique identifer. "
"Embeds complete conversation history. Build upon history without repeating. "
"Focus on new insights. Works across different tools."
"Unique thread continuation ID for multi-turn conversations. Works across different tools. "
"ALWAYS reuse last continuation_id you were provided as-is when re-communicating with Zen MCP, "
"unless user provides different ID. When supplied, your complete conversation history is available, so focus on new insights."
),
"images": (
"Optional images for visual context. MUST be absolute paths or base64. "
@@ -88,9 +80,6 @@ class ToolRequest(BaseModel):
temperature: Optional[float] = Field(None, ge=0.0, le=1.0, description=COMMON_FIELD_DESCRIPTIONS["temperature"])
thinking_mode: Optional[str] = Field(None, description=COMMON_FIELD_DESCRIPTIONS["thinking_mode"])
# Features
use_websearch: Optional[bool] = Field(True, description=COMMON_FIELD_DESCRIPTIONS["use_websearch"])
# Conversation support
continuation_id: Optional[str] = Field(None, description=COMMON_FIELD_DESCRIPTIONS["continuation_id"])

View File

@@ -205,10 +205,10 @@ class BaseTool(ABC):
def _should_require_model_selection(self, model_name: str) -> bool:
"""
Check if we should require Claude to select a model at runtime.
Check if we should require the CLI to select a model at runtime.
This is called during request execution to determine if we need
to return an error asking Claude to provide a model parameter.
to return an error asking the CLI to provide a model parameter.
Args:
model_name: The model name from the request or DEFAULT_MODEL
@@ -237,7 +237,7 @@ class BaseTool(ABC):
Only returns models from providers that have valid API keys configured.
This fixes the namespace collision bug where models from disabled providers
were shown to Claude, causing routing conflicts.
were shown to the CLI, causing routing conflicts.
Returns:
List of model names from enabled providers only
@@ -405,7 +405,7 @@ class BaseTool(ABC):
if model_configs:
model_desc_parts.append("\nOpenRouter models (use these aliases):")
for alias, config in model_configs: # Show ALL models so Claude can choose
for alias, config in model_configs: # Show ALL models so the CLI can choose
# Format context window in human-readable form
context_tokens = config.context_window
if context_tokens >= 1_000_000:
@@ -445,7 +445,7 @@ class BaseTool(ABC):
else:
# Normal mode - model is optional with default
available_models = self._get_available_models()
models_str = ", ".join(f"'{m}'" for m in available_models) # Show ALL models so Claude can choose
models_str = ", ".join(f"'{m}'" for m in available_models) # Show ALL models so the CLI can choose
description = f"Model to use. Native models: {models_str}."
if has_openrouter:
@@ -456,7 +456,7 @@ class BaseTool(ABC):
# Show ALL aliases from the configuration
if aliases:
# Show all aliases so Claude knows every option available
# Show all aliases so the CLI knows every option available
all_aliases = sorted(aliases)
alias_list = ", ".join(f"'{a}'" for a in all_aliases)
description += f" OpenRouter aliases: {alias_list}."
@@ -763,7 +763,7 @@ class BaseTool(ABC):
This file is treated specially as the main prompt, not as an embedded file.
This mechanism allows us to work around MCP's ~25K token limit by having
Claude save large prompts to a file, effectively using the file transfer
the CLI save large prompts to a file, effectively using the file transfer
mechanism to bypass token constraints while preserving response capacity.
Args:
@@ -839,7 +839,7 @@ class BaseTool(ABC):
Check if USER INPUT text is too large for MCP transport boundary.
IMPORTANT: This method should ONLY be used to validate user input that crosses
the Claude CLI ↔ MCP Server transport boundary. It should NOT be used to limit
the CLI ↔ MCP Server transport boundary. It should NOT be used to limit
internal MCP Server operations.
Args:
@@ -1051,9 +1051,9 @@ class BaseTool(ABC):
base_instruction = """
WEB SEARCH CAPABILITY: You can request Claude to perform web searches to enhance your analysis with current information!
WEB SEARCH CAPABILITY: You can request the calling agent to perform web searches to enhance your analysis with current information!
IMPORTANT: When you identify areas where web searches would significantly improve your response (such as checking current documentation, finding recent solutions, verifying best practices, or gathering community insights), you MUST explicitly instruct Claude to perform specific web searches and then respond back using the continuation_id from this response to continue the analysis.
IMPORTANT: When you identify areas where web searches would significantly improve your response (such as checking current documentation, finding recent solutions, verifying best practices, or gathering community insights), you MUST explicitly instruct the agent to perform specific web searches and then respond back using the continuation_id from this response to continue the analysis.
Use clear, direct language based on the value of the search:
@@ -1083,7 +1083,7 @@ Consider requesting searches for:
- Security advisories and patches
- Performance benchmarks and optimizations
When recommending searches, be specific about what information you need and why it would improve your analysis. Always remember to instruct Claude to use the continuation_id from this response when providing search results."""
When recommending searches, be specific about what information you need and why it would improve your analysis. Always remember to instruct agent to use the continuation_id from this response when providing search results."""
def get_language_instruction(self) -> str:
"""
@@ -1158,10 +1158,10 @@ When recommending searches, be specific about what information you need and why
def _should_require_model_selection(self, model_name: str) -> bool:
"""
Check if we should require Claude to select a model at runtime.
Check if we should require the CLI to select a model at runtime.
This is called during request execution to determine if we need
to return an error asking Claude to provide a model parameter.
to return an error asking the CLI to provide a model parameter.
Args:
model_name: The model name from the request or DEFAULT_MODEL
@@ -1189,7 +1189,7 @@ When recommending searches, be specific about what information you need and why
Only returns models from providers that have valid API keys configured.
This fixes the namespace collision bug where models from disabled providers
were shown to Claude, causing routing conflicts.
were shown to the CLI, causing routing conflicts.
Returns:
List of model names from enabled providers only

View File

@@ -32,11 +32,6 @@ class SchemaBuilder:
"enum": ["minimal", "low", "medium", "high", "max"],
"description": COMMON_FIELD_DESCRIPTIONS["thinking_mode"],
},
"use_websearch": {
"type": "boolean",
"description": COMMON_FIELD_DESCRIPTIONS["use_websearch"],
"default": True,
},
"continuation_id": {
"type": "string",
"description": COMMON_FIELD_DESCRIPTIONS["continuation_id"],

View File

@@ -234,13 +234,6 @@ class SimpleTool(BaseTool):
except AttributeError:
return []
def get_request_use_websearch(self, request) -> bool:
"""Get use_websearch from request. Override for custom websearch handling."""
try:
return request.use_websearch if request.use_websearch is not None else True
except AttributeError:
return True
def get_request_as_dict(self, request) -> dict:
"""Convert request to dictionary. Override for custom serialization."""
try:
@@ -787,11 +780,8 @@ class SimpleTool(BaseTool):
content_to_validate = self.get_prompt_content_for_size_validation(user_content)
self._validate_token_limit(content_to_validate, "Content")
# Add web search instruction if enabled
websearch_instruction = ""
use_websearch = self.get_request_use_websearch(request)
if use_websearch:
websearch_instruction = self.get_websearch_instruction(use_websearch, self.get_websearch_guidance())
# Add standardized web search guidance
websearch_instruction = self.get_websearch_instruction(True, self.get_websearch_guidance())
# Combine system prompt with user content
full_prompt = f"""{system_prompt}{websearch_instruction}

View File

@@ -115,7 +115,6 @@ class TestGenRequest(WorkflowRequest):
# Override inherited fields to exclude them from schema (except model which needs to be available)
temperature: Optional[float] = Field(default=None, exclude=True)
thinking_mode: Optional[str] = Field(default=None, exclude=True)
use_websearch: Optional[bool] = Field(default=None, exclude=True)
@model_validator(mode="after")
def validate_step_one_requirements(self):

View File

@@ -90,11 +90,6 @@ class ThinkDeepWorkflowRequest(WorkflowRequest):
default=None,
description="Depth: minimal/low/medium/high/max. Default 'high'.",
)
use_websearch: Optional[bool] = Field(
default=None,
description="Enable web search for docs, brainstorming, architecture, solutions.",
)
# Context files and investigation scope
problem_context: Optional[str] = Field(
default=None,
@@ -200,11 +195,6 @@ class ThinkDeepTool(WorkflowTool):
except AttributeError:
self.stored_request_params["thinking_mode"] = None
try:
self.stored_request_params["use_websearch"] = request.use_websearch
except AttributeError:
self.stored_request_params["use_websearch"] = None
# Add thinking-specific context to response
response_data.update(
{
@@ -349,16 +339,6 @@ but also acknowledge strong insights and valid conclusions.
pass
return super().get_request_thinking_mode(request)
def get_request_use_websearch(self, request) -> bool:
"""Use stored use_websearch from initial request."""
try:
stored_params = self.stored_request_params
if stored_params and stored_params.get("use_websearch") is not None:
return stored_params["use_websearch"]
except AttributeError:
pass
return super().get_request_use_websearch(request)
def _get_problem_context(self, request) -> str:
"""Get problem context from request. Override for custom context handling."""
try:

View File

@@ -122,7 +122,6 @@ class TracerRequest(WorkflowRequest):
# Exclude other non-tracing fields
temperature: Optional[float] = Field(default=None, exclude=True)
thinking_mode: Optional[str] = Field(default=None, exclude=True)
use_websearch: Optional[bool] = Field(default=None, exclude=True)
use_assistant_model: Optional[bool] = Field(default=False, exclude=True, description="Tracing is self-contained")
@field_validator("step_number")
@@ -228,7 +227,6 @@ class TracerTool(WorkflowTool):
excluded_common_fields = [
"temperature", # Tracing doesn't need temperature control
"thinking_mode", # Tracing doesn't need thinking mode
"use_websearch", # Tracing doesn't need web search
"files", # Tracing uses relevant_files instead
]

View File

@@ -267,13 +267,6 @@ class BaseWorkflowMixin(ABC):
except AttributeError:
return self.get_expert_thinking_mode()
def get_request_use_websearch(self, request) -> bool:
"""Get use_websearch from request. Override for custom websearch handling."""
try:
return request.use_websearch if request.use_websearch is not None else True
except AttributeError:
return True
def get_expert_analysis_instruction(self) -> str:
"""
Get the instruction to append after the expert context.
@@ -590,10 +583,7 @@ class BaseWorkflowMixin(ABC):
# Create a simple reference note
file_names = [os.path.basename(f) for f in request_files]
reference_note = (
f"Files referenced in this step: {', '.join(file_names)}\n"
f"(File content available via conversation history or can be discovered by Claude)"
)
reference_note = f"Files referenced in this step: {', '.join(file_names)}\n"
self._file_reference_note = reference_note
logger.debug(f"[WORKFLOW_FILES] {self.get_name()}: Set _file_reference_note: {self._file_reference_note}")
@@ -1514,7 +1504,6 @@ class BaseWorkflowMixin(ABC):
system_prompt=system_prompt,
temperature=validated_temperature,
thinking_mode=self.get_request_thinking_mode(request),
use_websearch=self.get_request_use_websearch(request),
images=list(set(self.consolidated_findings.images)) if self.consolidated_findings.images else None,
)