model definitions now support a new `allow_code_generation` flag, only to be used with higher reasoning models such as GPT-5-Pro and-Gemini 2.5-Pro When `true`, the `chat` tool can now request the external model to generate a full implementation / update / instructions etc and then share the implementation with the calling agent. This effectively allows us to utilize more powerful models such as GPT-5-Pro to generate code for us or entire implementations (which are either API-only or part of the $200 Pro plan from within the ChatGPT app)
36 lines
1008 B
Python
36 lines
1008 B
Python
"""
|
|
System prompts for Gemini tools
|
|
"""
|
|
|
|
from .analyze_prompt import ANALYZE_PROMPT
|
|
from .chat_prompt import CHAT_PROMPT
|
|
from .codereview_prompt import CODEREVIEW_PROMPT
|
|
from .consensus_prompt import CONSENSUS_PROMPT
|
|
from .debug_prompt import DEBUG_ISSUE_PROMPT
|
|
from .docgen_prompt import DOCGEN_PROMPT
|
|
from .generate_code_prompt import GENERATE_CODE_PROMPT
|
|
from .planner_prompt import PLANNER_PROMPT
|
|
from .precommit_prompt import PRECOMMIT_PROMPT
|
|
from .refactor_prompt import REFACTOR_PROMPT
|
|
from .secaudit_prompt import SECAUDIT_PROMPT
|
|
from .testgen_prompt import TESTGEN_PROMPT
|
|
from .thinkdeep_prompt import THINKDEEP_PROMPT
|
|
from .tracer_prompt import TRACER_PROMPT
|
|
|
|
__all__ = [
|
|
"THINKDEEP_PROMPT",
|
|
"CODEREVIEW_PROMPT",
|
|
"DEBUG_ISSUE_PROMPT",
|
|
"DOCGEN_PROMPT",
|
|
"GENERATE_CODE_PROMPT",
|
|
"ANALYZE_PROMPT",
|
|
"CHAT_PROMPT",
|
|
"CONSENSUS_PROMPT",
|
|
"PLANNER_PROMPT",
|
|
"PRECOMMIT_PROMPT",
|
|
"REFACTOR_PROMPT",
|
|
"SECAUDIT_PROMPT",
|
|
"TESTGEN_PROMPT",
|
|
"TRACER_PROMPT",
|
|
]
|