feat!: Full code can now be generated by an external model and shared with the AI tool (Claude Code / Codex etc)!

model definitions now support a new `allow_code_generation` flag, only to be used with higher reasoning models such as GPT-5-Pro and-Gemini 2.5-Pro

 When `true`, the `chat` tool can now request the external model to generate a full implementation / update / instructions etc and then share the implementation with the calling agent.

 This effectively allows us to utilize more powerful models such as GPT-5-Pro to generate code for us or entire implementations (which are either API-only or part of the $200 Pro plan from within the ChatGPT app)
This commit is contained in:
Fahad
2025-10-07 18:49:13 +04:00
parent 04f7ce5b03
commit ece8a5ebed
29 changed files with 1008 additions and 122 deletions

View File

@@ -137,7 +137,7 @@ class TestAutoMode:
importlib.reload(config)
@pytest.mark.asyncio
async def test_auto_mode_requires_model_parameter(self):
async def test_auto_mode_requires_model_parameter(self, tmp_path):
"""Test that auto mode enforces model parameter"""
# Save original
original = os.environ.get("DEFAULT_MODEL", "")
@@ -154,7 +154,7 @@ class TestAutoMode:
# Mock the provider to avoid real API calls
with patch.object(tool, "get_model_provider"):
# Execute without model parameter
result = await tool.execute({"prompt": "Test prompt"})
result = await tool.execute({"prompt": "Test prompt", "working_directory": str(tmp_path)})
# Should get error
assert len(result) == 1