feat!: Full code can now be generated by an external model and shared with the AI tool (Claude Code / Codex etc)!
model definitions now support a new `allow_code_generation` flag, only to be used with higher reasoning models such as GPT-5-Pro and-Gemini 2.5-Pro When `true`, the `chat` tool can now request the external model to generate a full implementation / update / instructions etc and then share the implementation with the calling agent. This effectively allows us to utilize more powerful models such as GPT-5-Pro to generate code for us or entire implementations (which are either API-only or part of the $200 Pro plan from within the ChatGPT app)
This commit is contained in:
@@ -17,6 +17,7 @@ from typing import TYPE_CHECKING, Any, Optional
|
||||
from mcp.types import TextContent
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from providers.shared import ModelCapabilities
|
||||
from tools.models import ToolModelCategory
|
||||
|
||||
from config import MCP_PROMPT_SIZE_LIMIT
|
||||
@@ -165,6 +166,42 @@ class BaseTool(ABC):
|
||||
"""
|
||||
pass
|
||||
|
||||
def get_capability_system_prompts(self, capabilities: Optional["ModelCapabilities"]) -> list[str]:
|
||||
"""Return additional system prompt snippets gated on model capabilities.
|
||||
|
||||
Subclasses can override this hook to append capability-specific
|
||||
instructions (for example, enabling code-generation exports when a
|
||||
model advertises support). The default implementation returns an empty
|
||||
list so no extra instructions are appended.
|
||||
|
||||
Args:
|
||||
capabilities: The resolved capabilities for the active model.
|
||||
|
||||
Returns:
|
||||
List of prompt fragments to append after the base system prompt.
|
||||
"""
|
||||
|
||||
return []
|
||||
|
||||
def _augment_system_prompt_with_capabilities(
|
||||
self, base_prompt: str, capabilities: Optional["ModelCapabilities"]
|
||||
) -> str:
|
||||
"""Merge capability-driven prompt addenda with the base system prompt."""
|
||||
|
||||
additions: list[str] = []
|
||||
if capabilities is not None:
|
||||
additions = [fragment.strip() for fragment in self.get_capability_system_prompts(capabilities) if fragment]
|
||||
|
||||
if not additions:
|
||||
return base_prompt
|
||||
|
||||
addition_text = "\n\n".join(additions)
|
||||
if not base_prompt:
|
||||
return addition_text
|
||||
|
||||
suffix = "" if base_prompt.endswith("\n\n") else "\n\n"
|
||||
return f"{base_prompt}{suffix}{addition_text}"
|
||||
|
||||
def get_annotations(self) -> Optional[dict[str, Any]]:
|
||||
"""
|
||||
Return optional annotations for this tool.
|
||||
@@ -413,13 +450,16 @@ class BaseTool(ABC):
|
||||
for rank, canonical_name, capabilities in filtered[:limit]:
|
||||
details: list[str] = []
|
||||
|
||||
context_str = self._format_context_window(getattr(capabilities, "context_window", 0))
|
||||
context_str = self._format_context_window(capabilities.context_window)
|
||||
if context_str:
|
||||
details.append(context_str)
|
||||
|
||||
if getattr(capabilities, "supports_extended_thinking", False):
|
||||
if capabilities.supports_extended_thinking:
|
||||
details.append("thinking")
|
||||
|
||||
if capabilities.allow_code_generation:
|
||||
details.append("code-gen")
|
||||
|
||||
base = f"{canonical_name} (score {rank}"
|
||||
if details:
|
||||
base = f"{base}, {', '.join(details)}"
|
||||
|
||||
Reference in New Issue
Block a user