fix: configure codex with a longer timeout

refactor: param names
This commit is contained in:
Fahad
2025-10-21 10:35:44 +04:00
parent 04132f1459
commit d2773f488a
47 changed files with 232 additions and 194 deletions

View File

@@ -156,7 +156,7 @@ class TestAutoMode:
with patch.object(tool, "get_model_provider"):
# Execute without model parameter and expect protocol error
with pytest.raises(ToolExecutionError) as exc_info:
await tool.execute({"prompt": "Test prompt", "working_directory": str(tmp_path)})
await tool.execute({"prompt": "Test prompt", "working_directory_absolute_path": str(tmp_path)})
# Should get error payload mentioning model requirement
error_payload = getattr(exc_info.value, "payload", str(exc_info.value))
@@ -208,7 +208,7 @@ class TestAutoMode:
try:
result = await tool.execute(
{
"files": ["/tmp/test.py"],
"absolute_file_paths": ["/tmp/test.py"],
"prompt": "Analyze this",
"model": "nonexistent-model-xyz", # This model definitely doesn't exist
}

View File

@@ -380,7 +380,7 @@ class TestAutoModeComprehensive:
await chat_tool.execute(
{
"prompt": "test",
"working_directory": str(workdir),
"working_directory_absolute_path": str(workdir),
# Note: no model parameter provided in auto mode
}
)
@@ -538,7 +538,7 @@ class TestAutoModeComprehensive:
workdir = tmp_path / "chat_artifacts"
workdir.mkdir(parents=True, exist_ok=True)
result = await chat_tool.execute(
{"prompt": "test", "model": "flash", "working_directory": str(workdir)}
{"prompt": "test", "model": "flash", "working_directory_absolute_path": str(workdir)}
) # Use alias in auto mode
# Should succeed with proper model resolution

View File

@@ -79,7 +79,7 @@ async def test_chat_codegen_saves_file(monkeypatch, tmp_path):
{
"prompt": prompt,
"model": "gemini-2.5-pro",
"working_directory": str(working_dir),
"working_directory_absolute_path": str(working_dir),
}
)

View File

@@ -121,7 +121,7 @@ async def test_chat_cross_model_continuation(monkeypatch, tmp_path):
"prompt": "Pick a number between 1 and 10 and respond with JUST that number.",
"model": "gemini-2.5-flash",
"temperature": 0.2,
"working_directory": working_directory,
"working_directory_absolute_path": working_directory,
}
step1_result = await chat_tool.execute(step1_args)
@@ -186,7 +186,7 @@ async def test_chat_cross_model_continuation(monkeypatch, tmp_path):
"model": "gpt-5",
"continuation_id": continuation_id,
"temperature": 0.2,
"working_directory": working_directory,
"working_directory_absolute_path": working_directory,
}
step2_result = await chat_tool.execute(step2_args)

View File

@@ -68,7 +68,7 @@ async def test_chat_auto_mode_with_openai(monkeypatch, tmp_path):
"prompt": "Use chat with gpt5 and ask how far the moon is from earth.",
"model": "gpt-5",
"temperature": 1.0,
"working_directory": working_directory,
"working_directory_absolute_path": working_directory,
}
result = await chat_tool.execute(arguments)
@@ -135,7 +135,7 @@ async def test_chat_openai_continuation(monkeypatch, tmp_path):
"prompt": "In one word, which sells better: iOS app or macOS app?",
"model": "gpt-5",
"temperature": 1.0,
"working_directory": working_directory,
"working_directory_absolute_path": working_directory,
}
first_result = await chat_tool.execute(first_args)
@@ -156,7 +156,7 @@ async def test_chat_openai_continuation(monkeypatch, tmp_path):
"model": "gpt-5",
"continuation_id": continuation_id,
"temperature": 1.0,
"working_directory": working_directory,
"working_directory_absolute_path": working_directory,
}
second_result = await chat_tool.execute(second_args)

View File

@@ -41,34 +41,34 @@ class TestChatTool:
# Required fields
assert "prompt" in schema["required"]
assert "working_directory" in schema["required"]
assert "working_directory_absolute_path" in schema["required"]
# Properties
properties = schema["properties"]
assert "prompt" in properties
assert "files" in properties
assert "absolute_file_paths" in properties
assert "images" in properties
assert "working_directory" in properties
assert "working_directory_absolute_path" in properties
def test_request_model_validation(self):
"""Test that the request model validates correctly"""
# Test valid request
request_data = {
"prompt": "Test prompt",
"files": ["test.txt"],
"absolute_file_paths": ["test.txt"],
"images": ["test.png"],
"model": "anthropic/claude-opus-4.1",
"temperature": 0.7,
"working_directory": "/tmp", # Dummy absolute path
"working_directory_absolute_path": "/tmp", # Dummy absolute path
}
request = ChatRequest(**request_data)
assert request.prompt == "Test prompt"
assert request.files == ["test.txt"]
assert request.absolute_file_paths == ["test.txt"]
assert request.images == ["test.png"]
assert request.model == "anthropic/claude-opus-4.1"
assert request.temperature == 0.7
assert request.working_directory == "/tmp"
assert request.working_directory_absolute_path == "/tmp"
def test_required_fields(self):
"""Test that required fields are enforced"""
@@ -76,7 +76,7 @@ class TestChatTool:
from pydantic import ValidationError
with pytest.raises(ValidationError):
ChatRequest(model="anthropic/claude-opus-4.1", working_directory="/tmp")
ChatRequest(model="anthropic/claude-opus-4.1", working_directory_absolute_path="/tmp")
def test_model_availability(self):
"""Test that model availability works"""
@@ -103,7 +103,11 @@ class TestChatTool:
@pytest.mark.asyncio
async def test_prompt_preparation(self):
"""Test that prompt preparation works correctly"""
request = ChatRequest(prompt="Test prompt", files=[], working_directory="/tmp")
request = ChatRequest(
prompt="Test prompt",
absolute_file_paths=[],
working_directory_absolute_path="/tmp",
)
# Mock the system prompt and file handling
with patch.object(self.tool, "get_system_prompt", return_value="System prompt"):
@@ -120,7 +124,7 @@ class TestChatTool:
def test_response_formatting(self):
"""Test that response formatting works correctly"""
response = "Test response content"
request = ChatRequest(prompt="Test", working_directory="/tmp")
request = ChatRequest(prompt="Test", working_directory_absolute_path="/tmp")
formatted = self.tool.format_response(response, request)
@@ -140,7 +144,7 @@ class TestChatTool:
"<GENERATED-CODE>print('world')</GENERATED-CODE>"
)
request = ChatRequest(prompt="Test", working_directory=str(tmp_path))
request = ChatRequest(prompt="Test", working_directory_absolute_path=str(tmp_path))
formatted = tool.format_response(response, request)
@@ -164,7 +168,7 @@ class TestChatTool:
"Closing thoughts after code."
)
request = ChatRequest(prompt="Test", working_directory=str(tmp_path))
request = ChatRequest(prompt="Test", working_directory_absolute_path=str(tmp_path))
formatted = tool.format_response(response, request)
@@ -183,7 +187,7 @@ class TestChatTool:
response = "Intro text\n<GENERATED-CODE>print('oops')\nStill ongoing"
request = ChatRequest(prompt="Test", working_directory=str(tmp_path))
request = ChatRequest(prompt="Test", working_directory_absolute_path=str(tmp_path))
formatted = tool.format_response(response, request)
@@ -198,7 +202,7 @@ class TestChatTool:
response = "Intro text\n</GENERATED-CODE> just text"
request = ChatRequest(prompt="Test", working_directory=str(tmp_path))
request = ChatRequest(prompt="Test", working_directory_absolute_path=str(tmp_path))
formatted = tool.format_response(response, request)
@@ -218,7 +222,7 @@ class TestChatTool:
"Further analysis and guidance after the generated snippet.\n"
)
request = ChatRequest(prompt="Test", working_directory=str(tmp_path))
request = ChatRequest(prompt="Test", working_directory_absolute_path=str(tmp_path))
formatted = tool.format_response(response, request)
@@ -247,12 +251,12 @@ class TestChatTool:
# Test that the tool fields are defined correctly
tool_fields = self.tool.get_tool_fields()
assert "prompt" in tool_fields
assert "files" in tool_fields
assert "absolute_file_paths" in tool_fields
assert "images" in tool_fields
required_fields = self.tool.get_required_fields()
assert "prompt" in required_fields
assert "working_directory" in required_fields
assert "working_directory_absolute_path" in required_fields
class TestChatRequestModel:
@@ -265,20 +269,20 @@ class TestChatRequestModel:
# Field descriptions should exist and be descriptive
assert len(CHAT_FIELD_DESCRIPTIONS["prompt"]) > 50
assert "context" in CHAT_FIELD_DESCRIPTIONS["prompt"]
files_desc = CHAT_FIELD_DESCRIPTIONS["files"].lower()
files_desc = CHAT_FIELD_DESCRIPTIONS["absolute_file_paths"].lower()
assert "absolute" in files_desc
assert "visual context" in CHAT_FIELD_DESCRIPTIONS["images"]
assert "directory" in CHAT_FIELD_DESCRIPTIONS["working_directory"].lower()
assert "directory" in CHAT_FIELD_DESCRIPTIONS["working_directory_absolute_path"].lower()
def test_working_directory_description_matches_behavior(self):
"""Working directory description should reflect automatic creation."""
def test_working_directory_absolute_path_description_matches_behavior(self):
"""Absolute working directory description should reflect existing-directory requirement."""
from tools.chat import CHAT_FIELD_DESCRIPTIONS
description = CHAT_FIELD_DESCRIPTIONS["working_directory"].lower()
assert "must already exist" in description
description = CHAT_FIELD_DESCRIPTIONS["working_directory_absolute_path"].lower()
assert "existing directory" in description
@pytest.mark.asyncio
async def test_working_directory_must_exist(self, tmp_path):
async def test_working_directory_absolute_path_must_exist(self, tmp_path):
"""Chat tool should reject non-existent working directories."""
tool = ChatTool()
missing_dir = tmp_path / "nonexistent_subdir"
@@ -287,9 +291,9 @@ class TestChatRequestModel:
await tool.execute(
{
"prompt": "test",
"files": [],
"absolute_file_paths": [],
"images": [],
"working_directory": str(missing_dir),
"working_directory_absolute_path": str(missing_dir),
}
)
@@ -299,17 +303,17 @@ class TestChatRequestModel:
def test_default_values(self):
"""Test that default values work correctly"""
request = ChatRequest(prompt="Test", working_directory="/tmp")
request = ChatRequest(prompt="Test", working_directory_absolute_path="/tmp")
assert request.prompt == "Test"
assert request.files == [] # Should default to empty list
assert request.absolute_file_paths == [] # Should default to empty list
assert request.images == [] # Should default to empty list
def test_inheritance(self):
"""Test that ChatRequest properly inherits from ToolRequest"""
from tools.shared.base_models import ToolRequest
request = ChatRequest(prompt="Test", working_directory="/tmp")
request = ChatRequest(prompt="Test", working_directory_absolute_path="/tmp")
assert isinstance(request, ToolRequest)
# Should have inherited fields

View File

@@ -24,7 +24,7 @@ async def test_clink_gemini_single_digit_sum():
"prompt": prompt,
"cli_name": "gemini",
"role": "default",
"files": [],
"absolute_file_paths": [],
"images": [],
}
)
@@ -56,7 +56,7 @@ async def test_clink_claude_single_digit_sum():
"prompt": prompt,
"cli_name": "claude",
"role": "default",
"files": [],
"absolute_file_paths": [],
"images": [],
}
)

View File

@@ -37,7 +37,7 @@ async def test_clink_tool_execute(monkeypatch):
"prompt": "Summarize the project",
"cli_name": "gemini",
"role": "default",
"files": [],
"absolute_file_paths": [],
"images": [],
}
@@ -85,7 +85,7 @@ async def test_clink_tool_defaults_to_first_cli(monkeypatch):
arguments = {
"prompt": "Hello",
"files": [],
"absolute_file_paths": [],
"images": [],
}
@@ -124,7 +124,7 @@ async def test_clink_tool_truncates_large_output(monkeypatch):
arguments = {
"prompt": "Summarize",
"cli_name": tool._default_cli_name,
"files": [],
"absolute_file_paths": [],
"images": [],
}
@@ -165,7 +165,7 @@ async def test_clink_tool_truncates_without_summary(monkeypatch):
arguments = {
"prompt": "Summarize",
"cli_name": tool._default_cli_name,
"files": [],
"absolute_file_paths": [],
"images": [],
}

View File

@@ -13,7 +13,11 @@ def test_first_response_persisted_in_conversation_history(tmp_path):
storage._store.clear() # type: ignore[attr-defined]
tool = ChatTool()
request = ChatRequest(prompt="First question?", model="local-llama", working_directory=str(tmp_path))
request = ChatRequest(
prompt="First question?",
model="local-llama",
working_directory_absolute_path=str(tmp_path),
)
response_text = "Here is the initial answer."
# Mimic the first tool invocation (no continuation_id supplied)

View File

@@ -70,7 +70,7 @@ async def test_conversation_history_field_mapping():
arguments = {
"continuation_id": "test-thread-123",
"prompt": test_case["original_value"],
"files": ["/test/file2.py"],
"absolute_file_paths": ["/test/file2.py"],
"model": "flash", # Use test model to avoid provider errors
}

View File

@@ -32,7 +32,7 @@ class TestConversationMemory:
mock_client = Mock()
mock_storage.return_value = mock_client
thread_id = create_thread("chat", {"prompt": "Hello", "files": ["/test.py"]})
thread_id = create_thread("chat", {"prompt": "Hello", "absolute_file_paths": ["/test.py"]})
assert thread_id is not None
assert len(thread_id) == 36 # UUID4 length
@@ -509,7 +509,7 @@ class TestConversationFlow:
mock_storage.return_value = mock_client
# Start conversation with files using a simple tool
thread_id = create_thread("chat", {"prompt": "Analyze this codebase", "files": ["/project/src/"]})
thread_id = create_thread("chat", {"prompt": "Analyze this codebase", "absolute_file_paths": ["/project/src/"]})
# Turn 1: Claude provides context with multiple files
initial_context = ThreadContext(
@@ -518,7 +518,10 @@ class TestConversationFlow:
last_updated_at="2023-01-01T00:00:00Z",
tool_name="chat",
turns=[],
initial_context={"prompt": "Analyze this codebase", "files": ["/project/src/"]},
initial_context={
"prompt": "Analyze this codebase",
"absolute_file_paths": ["/project/src/"],
},
)
mock_client.get.return_value = initial_context.model_dump_json()

View File

@@ -63,7 +63,7 @@ def helper_function():
try:
yield {
"directory": str(temp_dir),
"files": files,
"absolute_file_paths": files,
"swift_files": files[:-1], # All but the Python file
"python_file": str(python_file),
}
@@ -84,14 +84,14 @@ def helper_function():
mock_get_provider.return_value = mock_provider
directory = temp_directory_with_files["directory"]
expected_files = temp_directory_with_files["files"]
expected_files = temp_directory_with_files["absolute_file_paths"]
# Create a request with the directory (not individual files)
request_args = {
"prompt": "Analyze this codebase structure",
"files": [directory], # Directory path, not individual files
"absolute_file_paths": [directory], # Directory path, not individual files
"model": "flash",
"working_directory": directory,
"working_directory_absolute_path": directory,
}
# Execute the tool
@@ -148,10 +148,10 @@ def helper_function():
mock_get_provider.return_value = mock_provider
directory = temp_directory_with_files["directory"]
expected_files = temp_directory_with_files["files"]
expected_files = temp_directory_with_files["absolute_file_paths"]
# Step 1: Create a conversation thread manually with the expanded files
thread_id = create_thread("chat", {"prompt": "Initial analysis", "files": [directory]})
thread_id = create_thread("chat", {"prompt": "Initial analysis", "absolute_file_paths": [directory]})
# Add a turn with the expanded files (simulating what the fix should do)
success = add_turn(
@@ -166,10 +166,10 @@ def helper_function():
# Step 2: Continue the conversation with the same directory
continuation_args = {
"prompt": "Now focus on the Swift files specifically",
"files": [directory], # Same directory again
"absolute_file_paths": [directory], # Same directory again
"model": "flash",
"continuation_id": thread_id,
"working_directory": directory,
"working_directory_absolute_path": directory,
}
# Mock to capture file filtering behavior
@@ -217,10 +217,10 @@ def helper_function():
mock_storage.return_value = mock_client
directory = temp_directory_with_files["directory"]
expected_files = temp_directory_with_files["files"]
expected_files = temp_directory_with_files["absolute_file_paths"]
# Create a thread with expanded files
thread_id = create_thread("chat", {"prompt": "Initial analysis", "files": [directory]})
thread_id = create_thread("chat", {"prompt": "Initial analysis", "absolute_file_paths": [directory]})
# Add a turn with expanded files
success = add_turn(
@@ -261,7 +261,7 @@ def helper_function():
python_file = temp_directory_with_files["python_file"]
# Create a thread with some expanded files
thread_id = create_thread("chat", {"prompt": "Initial analysis", "files": [directory]})
thread_id = create_thread("chat", {"prompt": "Initial analysis", "absolute_file_paths": [directory]})
# Add a turn with only some of the files (simulate partial embedding)
swift_files = temp_directory_with_files["swift_files"]
@@ -294,14 +294,14 @@ def helper_function():
mock_get_provider.return_value = mock_provider
directory = temp_directory_with_files["directory"]
expected_files = temp_directory_with_files["files"]
expected_files = temp_directory_with_files["absolute_file_paths"]
# Execute the tool
request_args = {
"prompt": "Analyze this code",
"files": [directory],
"absolute_file_paths": [directory],
"model": "flash",
"working_directory": directory,
"working_directory_absolute_path": directory,
}
result = await tool.execute(request_args)

View File

@@ -283,7 +283,7 @@ class TestImageSupportIntegration:
"prompt": "What do you see in this image?",
"images": [temp_image_path],
"model": "gpt-4o",
"working_directory": working_directory,
"working_directory_absolute_path": working_directory,
}
)

View File

@@ -60,7 +60,7 @@ class TestLargePromptHandling:
temp_dir = tempfile.mkdtemp()
try:
with pytest.raises(ToolExecutionError) as exc_info:
await tool.execute({"prompt": large_prompt, "working_directory": temp_dir})
await tool.execute({"prompt": large_prompt, "working_directory_absolute_path": temp_dir})
finally:
shutil.rmtree(temp_dir, ignore_errors=True)
@@ -83,7 +83,7 @@ class TestLargePromptHandling:
try:
try:
result = await tool.execute(
{"prompt": normal_prompt, "model": "gemini-2.5-flash", "working_directory": temp_dir}
{"prompt": normal_prompt, "model": "gemini-2.5-flash", "working_directory_absolute_path": temp_dir}
)
except ToolExecutionError as exc:
output = json.loads(exc.payload if hasattr(exc, "payload") else str(exc))
@@ -114,9 +114,9 @@ class TestLargePromptHandling:
result = await tool.execute(
{
"prompt": "",
"files": [temp_prompt_file],
"absolute_file_paths": [temp_prompt_file],
"model": "gemini-2.5-flash",
"working_directory": temp_dir,
"working_directory_absolute_path": temp_dir,
}
)
except ToolExecutionError as exc:
@@ -297,8 +297,8 @@ class TestLargePromptHandling:
await tool.execute(
{
"prompt": "Test prompt",
"files": [temp_prompt_file, other_file],
"working_directory": os.path.dirname(temp_prompt_file),
"absolute_file_paths": [temp_prompt_file, other_file],
"working_directory_absolute_path": os.path.dirname(temp_prompt_file),
}
)
@@ -337,7 +337,7 @@ class TestLargePromptHandling:
temp_dir = tempfile.mkdtemp()
try:
try:
result = await tool.execute({"prompt": exact_prompt, "working_directory": temp_dir})
result = await tool.execute({"prompt": exact_prompt, "working_directory_absolute_path": temp_dir})
except ToolExecutionError as exc:
output = json.loads(exc.payload if hasattr(exc, "payload") else str(exc))
else:
@@ -355,7 +355,7 @@ class TestLargePromptHandling:
temp_dir = tempfile.mkdtemp()
try:
try:
result = await tool.execute({"prompt": over_prompt, "working_directory": temp_dir})
result = await tool.execute({"prompt": over_prompt, "working_directory_absolute_path": temp_dir})
except ToolExecutionError as exc:
output = json.loads(exc.payload if hasattr(exc, "payload") else str(exc))
else:
@@ -384,7 +384,7 @@ class TestLargePromptHandling:
temp_dir = tempfile.mkdtemp()
try:
try:
result = await tool.execute({"prompt": "", "working_directory": temp_dir})
result = await tool.execute({"prompt": "", "working_directory_absolute_path": temp_dir})
except ToolExecutionError as exc:
output = json.loads(exc.payload if hasattr(exc, "payload") else str(exc))
else:
@@ -428,7 +428,9 @@ class TestLargePromptHandling:
temp_dir = tempfile.mkdtemp()
try:
try:
result = await tool.execute({"prompt": "", "files": [bad_file], "working_directory": temp_dir})
result = await tool.execute(
{"prompt": "", "absolute_file_paths": [bad_file], "working_directory_absolute_path": temp_dir}
)
except ToolExecutionError as exc:
output = json.loads(exc.payload if hasattr(exc, "payload") else str(exc))
else:
@@ -477,9 +479,9 @@ class TestLargePromptHandling:
result = await tool.execute(
{
"prompt": "Summarize the design decisions",
"files": [str(large_file)],
"absolute_file_paths": [str(large_file)],
"model": "flash",
"working_directory": str(tmp_path),
"working_directory_absolute_path": str(tmp_path),
"_model_context": dummy_context,
}
)
@@ -540,7 +542,7 @@ class TestLargePromptHandling:
tool.prepare_prompt = mock_prepare_prompt
result = await tool.execute(
{"prompt": small_user_prompt, "model": "flash", "working_directory": temp_dir}
{"prompt": small_user_prompt, "model": "flash", "working_directory_absolute_path": temp_dir}
)
output = json.loads(result[0].text)
@@ -572,7 +574,7 @@ class TestLargePromptHandling:
try:
try:
result = await tool.execute(
{"prompt": large_user_input, "model": "flash", "working_directory": temp_dir}
{"prompt": large_user_input, "model": "flash", "working_directory_absolute_path": temp_dir}
)
except ToolExecutionError as exc:
output = json.loads(exc.payload if hasattr(exc, "payload") else str(exc))
@@ -590,7 +592,7 @@ class TestLargePromptHandling:
{
"prompt": small_user_input,
"model": "gemini-2.5-flash",
"working_directory": temp_dir,
"working_directory_absolute_path": temp_dir,
}
)
except ToolExecutionError as exc:
@@ -663,7 +665,7 @@ class TestLargePromptHandling:
"prompt": f"{huge_conversation_history}\n\n=== CURRENT REQUEST ===\n{small_continuation_prompt}",
"model": "flash",
"continuation_id": "test_thread_123",
"working_directory": temp_dir,
"working_directory_absolute_path": temp_dir,
}
# Mock the conversation history embedding to simulate server.py behavior

View File

@@ -45,9 +45,9 @@ async def test_tool_execution_error_sets_is_error_flag_for_mcp_response(monkeypa
handler = mcp_server.request_handlers[CallToolRequest]
arguments = {
"prompt": "Trigger working_directory validation failure",
"working_directory": "relative/path", # Not absolute -> ToolExecutionError from ChatTool
"files": [],
"prompt": "Trigger working_directory_absolute_path validation failure",
"working_directory_absolute_path": "relative/path", # Not absolute -> ToolExecutionError from ChatTool
"absolute_file_paths": [],
"model": "gemini-2.5-flash",
}

View File

@@ -98,7 +98,7 @@ class TestO3ProOutputTextFix:
"prompt": "What is 2 + 2?",
"model": "o3-pro",
"temperature": 1.0,
"working_directory": workdir,
"working_directory_absolute_path": workdir,
}
return await chat_tool.execute(arguments)

View File

@@ -296,7 +296,9 @@ class TestAutoModeErrorMessages:
temp_dir = tempfile.mkdtemp()
try:
with pytest.raises(ToolExecutionError) as exc_info:
await tool.execute({"prompt": "test", "model": "auto", "working_directory": temp_dir})
await tool.execute(
{"prompt": "test", "model": "auto", "working_directory_absolute_path": temp_dir}
)
finally:
shutil.rmtree(temp_dir, ignore_errors=True)
@@ -427,7 +429,7 @@ class TestRuntimeModelSelection:
try:
with pytest.raises(ToolExecutionError) as exc_info:
await tool.execute(
{"prompt": "test", "model": "gpt-5-turbo", "working_directory": temp_dir}
{"prompt": "test", "model": "gpt-5-turbo", "working_directory_absolute_path": temp_dir}
)
finally:
shutil.rmtree(temp_dir, ignore_errors=True)
@@ -527,7 +529,7 @@ class TestUnavailableModelFallback:
tool = ChatTool()
temp_dir = tempfile.mkdtemp()
try:
result = await tool.execute({"prompt": "test", "working_directory": temp_dir})
result = await tool.execute({"prompt": "test", "working_directory_absolute_path": temp_dir})
finally:
shutil.rmtree(temp_dir, ignore_errors=True)

View File

@@ -61,7 +61,7 @@ class TestPlannerTool:
# Check that workflow-based planner includes model field and excludes some fields
assert "model" in schema["properties"] # Workflow tools include model field
assert "images" not in schema["properties"] # Excluded for planning
assert "files" not in schema["properties"] # Excluded for planning
assert "absolute_file_paths" not in schema["properties"] # Excluded for planning
assert "temperature" not in schema["properties"]
assert "thinking_mode" not in schema["properties"]

View File

@@ -78,7 +78,7 @@ class TestPromptIntegration:
@pytest.mark.integration
@pytest.mark.asyncio
async def test_chat_with_files(self):
"""Test chat tool with files parameter using real API."""
"""Test chat tool with absolute_file_paths parameter using real API."""
skip_if_no_custom_api()
tool = ChatTool()
@@ -99,7 +99,11 @@ if __name__ == "__main__":
try:
result = await tool.execute(
{"prompt": "What does this Python code do?", "files": [temp_file], "model": "local-llama"}
{
"prompt": "What does this Python code do?",
"absolute_file_paths": [temp_file],
"model": "local-llama",
}
)
assert len(result) == 1
@@ -291,7 +295,7 @@ class UserController:
tool = ChatTool()
# Test with no files parameter
# Test with no absolute_file_paths parameter
result = await tool.execute({"prompt": "Hello", "model": "local-llama"})
assert len(result) == 1

View File

@@ -74,7 +74,7 @@ class TestThinkingModes:
try:
result = await tool.execute(
{
"files": ["/absolute/path/test.py"],
"absolute_file_paths": ["/absolute/path/test.py"],
"prompt": "What is this?",
"model": "o3-mini",
"thinking_mode": "minimal",
@@ -155,7 +155,7 @@ class TestThinkingModes:
try:
result = await tool.execute(
{
"files": ["/absolute/path/test.py"],
"absolute_file_paths": ["/absolute/path/test.py"],
"thinking_mode": "low",
"prompt": "Test code review for validation purposes",
"model": "o3-mini",
@@ -314,7 +314,7 @@ class TestThinkingModes:
try:
result = await tool.execute(
{
"files": ["/absolute/path/complex.py"],
"absolute_file_paths": ["/absolute/path/complex.py"],
"prompt": "Analyze architecture",
"thinking_mode": "high",
"model": "o3-mini",

View File

@@ -352,8 +352,8 @@ class TestAbsolutePathValidation:
await tool.execute(
{
"prompt": "Explain this code",
"files": ["code.py"], # relative path without ./
"working_directory": temp_dir,
"absolute_file_paths": ["code.py"], # relative path without ./
"working_directory_absolute_path": temp_dir,
}
)
finally: