Fixed planner tool warnings when model was auto (model not required)
This commit is contained in:
@@ -214,6 +214,18 @@ class BaseTool(ABC):
|
||||
"""
|
||||
pass
|
||||
|
||||
def requires_model(self) -> bool:
|
||||
"""
|
||||
Return whether this tool requires AI model access.
|
||||
|
||||
Tools that override execute() to do pure data processing (like planner)
|
||||
should return False to skip model resolution at the MCP boundary.
|
||||
|
||||
Returns:
|
||||
bool: True if tool needs AI model access (default), False for data-only tools
|
||||
"""
|
||||
return True
|
||||
|
||||
@classmethod
|
||||
def _get_openrouter_registry(cls):
|
||||
"""Get cached OpenRouter registry instance."""
|
||||
|
||||
@@ -244,6 +244,16 @@ class PlannerTool(BaseTool):
|
||||
def get_default_thinking_mode(self) -> str:
|
||||
return "high" # Default to high thinking for comprehensive planning
|
||||
|
||||
def requires_model(self) -> bool:
|
||||
"""
|
||||
Planner tool doesn't require AI model access - it's pure data processing.
|
||||
|
||||
This prevents the server from trying to resolve model names like "auto"
|
||||
when the planner tool is used, since it overrides execute() and doesn't
|
||||
make any AI API calls.
|
||||
"""
|
||||
return False
|
||||
|
||||
async def execute(self, arguments: dict[str, Any]) -> list:
|
||||
"""
|
||||
Override execute to work like original TypeScript tool - no AI calls, just data processing.
|
||||
|
||||
Reference in New Issue
Block a user