feat!: Full code can now be generated by an external model and shared with the AI tool (Claude Code / Codex etc)!
model definitions now support a new `allow_code_generation` flag, only to be used with higher reasoning models such as GPT-5-Pro and-Gemini 2.5-Pro When `true`, the `chat` tool can now request the external model to generate a full implementation / update / instructions etc and then share the implementation with the calling agent. This effectively allows us to utilize more powerful models such as GPT-5-Pro to generate code for us or entire implementations (which are either API-only or part of the $200 Pro plan from within the ChatGPT app)
This commit is contained in:
@@ -55,7 +55,7 @@ def _extract_number(text: str) -> str:
|
||||
|
||||
@pytest.mark.asyncio
|
||||
@pytest.mark.no_mock_provider
|
||||
async def test_chat_cross_model_continuation(monkeypatch):
|
||||
async def test_chat_cross_model_continuation(monkeypatch, tmp_path):
|
||||
"""Verify continuation across Gemini then OpenAI using recorded interactions."""
|
||||
|
||||
env_updates = {
|
||||
@@ -115,10 +115,13 @@ async def test_chat_cross_model_continuation(monkeypatch):
|
||||
m.setattr(conversation_memory.uuid, "uuid4", lambda: FIXED_THREAD_ID)
|
||||
|
||||
chat_tool = ChatTool()
|
||||
working_directory = str(tmp_path)
|
||||
|
||||
step1_args = {
|
||||
"prompt": "Pick a number between 1 and 10 and respond with JUST that number.",
|
||||
"model": "gemini-2.5-flash",
|
||||
"temperature": 0.2,
|
||||
"working_directory": working_directory,
|
||||
}
|
||||
|
||||
step1_result = await chat_tool.execute(step1_args)
|
||||
@@ -183,6 +186,7 @@ async def test_chat_cross_model_continuation(monkeypatch):
|
||||
"model": "gpt-5",
|
||||
"continuation_id": continuation_id,
|
||||
"temperature": 0.2,
|
||||
"working_directory": working_directory,
|
||||
}
|
||||
|
||||
step2_result = await chat_tool.execute(step2_args)
|
||||
|
||||
Reference in New Issue
Block a user