Fixed imports and lint
This commit is contained in:
@@ -15,7 +15,7 @@ from utils.file_utils import LogTailer
|
||||
def _process_log_stream(tailer, filter_func=None, format_func=None):
|
||||
"""
|
||||
Process new lines from a log tailer with optional filtering and formatting.
|
||||
|
||||
|
||||
Args:
|
||||
tailer: LogTailer instance to read from
|
||||
filter_func: Optional function to filter lines (return True to include)
|
||||
@@ -26,15 +26,15 @@ def _process_log_stream(tailer, filter_func=None, format_func=None):
|
||||
# Apply filter if provided
|
||||
if filter_func and not filter_func(line):
|
||||
continue
|
||||
|
||||
|
||||
timestamp = datetime.now().strftime("%H:%M:%S")
|
||||
|
||||
|
||||
# Apply formatter if provided
|
||||
if format_func:
|
||||
formatted = format_func(line)
|
||||
else:
|
||||
formatted = line
|
||||
|
||||
|
||||
print(f"[{timestamp}] {formatted}")
|
||||
|
||||
|
||||
|
||||
@@ -160,7 +160,6 @@ class TestAutoMode:
|
||||
patch("providers.registry.ModelProviderRegistry.get_provider_for_model") as mock_provider,
|
||||
patch("providers.registry.ModelProviderRegistry.get_available_models") as mock_available,
|
||||
patch.object(tool, "_get_available_models") as mock_tool_available,
|
||||
patch("providers.registry.ModelProviderRegistry.is_model_available") as mock_is_available,
|
||||
):
|
||||
|
||||
# Mock that o3 is not available but actual available models are
|
||||
@@ -199,12 +198,6 @@ class TestAutoMode:
|
||||
# Mock the tool's available models method to return the actual available models
|
||||
mock_tool_available.return_value = available_models
|
||||
|
||||
# Mock is_model_available to return False for o3 specifically
|
||||
def mock_model_available(model_name):
|
||||
return model_name != "o3" and model_name in available_models
|
||||
|
||||
mock_is_available.side_effect = mock_model_available
|
||||
|
||||
# Execute with unavailable model
|
||||
result = await tool.execute(
|
||||
{"files": ["/tmp/test.py"], "prompt": "Analyze this", "model": "o3"} # This model is not available
|
||||
|
||||
@@ -150,16 +150,17 @@ class TestLargePromptHandling:
|
||||
async def test_codereview_large_focus(self, large_prompt):
|
||||
"""Test that codereview tool detects large focus_on field."""
|
||||
from unittest.mock import MagicMock
|
||||
|
||||
from providers.base import ModelCapabilities, ProviderType
|
||||
|
||||
tool = CodeReviewTool()
|
||||
|
||||
|
||||
# Mock provider to avoid MagicMock comparison errors that would prevent large prompt detection
|
||||
with patch.object(tool, "get_model_provider") as mock_get_provider:
|
||||
mock_provider = MagicMock()
|
||||
mock_provider.get_provider_type.return_value = MagicMock(value="google")
|
||||
mock_provider.supports_thinking_mode.return_value = False
|
||||
|
||||
|
||||
# Set up proper capabilities to avoid MagicMock comparison errors
|
||||
mock_capabilities = ModelCapabilities(
|
||||
provider=ProviderType.GOOGLE,
|
||||
@@ -170,7 +171,7 @@ class TestLargePromptHandling:
|
||||
)
|
||||
mock_provider.get_capabilities.return_value = mock_capabilities
|
||||
mock_get_provider.return_value = mock_provider
|
||||
|
||||
|
||||
result = await tool.execute(
|
||||
{
|
||||
"files": ["/some/file.py"],
|
||||
|
||||
@@ -127,20 +127,20 @@ class TestComprehensive(unittest.TestCase):
|
||||
def test_request_model_validation(self):
|
||||
"""Test request model validation"""
|
||||
# Valid request
|
||||
valid_request = TestGenRequest(files=["/tmp/test.py"], prompt="Generate tests for calculator functions")
|
||||
valid_request = TestGenerationRequest(files=["/tmp/test.py"], prompt="Generate tests for calculator functions")
|
||||
assert valid_request.files == ["/tmp/test.py"]
|
||||
assert valid_request.prompt == "Generate tests for calculator functions"
|
||||
assert valid_request.test_examples is None
|
||||
|
||||
# With test examples
|
||||
request_with_examples = TestGenRequest(
|
||||
request_with_examples = TestGenerationRequest(
|
||||
files=["/tmp/test.py"], prompt="Generate tests", test_examples=["/tmp/test_example.py"]
|
||||
)
|
||||
assert request_with_examples.test_examples == ["/tmp/test_example.py"]
|
||||
|
||||
# Invalid request (missing required fields)
|
||||
with pytest.raises(ValueError):
|
||||
TestGenRequest(files=["/tmp/test.py"]) # Missing prompt
|
||||
TestGenerationRequest(files=["/tmp/test.py"]) # Missing prompt
|
||||
|
||||
@pytest.mark.asyncio
|
||||
@patch("tools.base.BaseTool.get_model_provider")
|
||||
@@ -244,7 +244,7 @@ class TestComprehensive(unittest.TestCase):
|
||||
@pytest.mark.asyncio
|
||||
async def test_prepare_prompt_structure(self, tool, temp_files):
|
||||
"""Test prompt preparation structure"""
|
||||
request = TestGenRequest(files=[temp_files["code_file"]], prompt="Test the calculator functions")
|
||||
request = TestGenerationRequest(files=[temp_files["code_file"]], prompt="Test the calculator functions")
|
||||
|
||||
with patch.object(tool, "_prepare_file_content_for_prompt") as mock_prepare:
|
||||
mock_prepare.return_value = ("mocked file content", [temp_files["code_file"]])
|
||||
@@ -261,7 +261,7 @@ class TestComprehensive(unittest.TestCase):
|
||||
@pytest.mark.asyncio
|
||||
async def test_prepare_prompt_with_examples(self, tool, temp_files):
|
||||
"""Test prompt preparation with test examples"""
|
||||
request = TestGenRequest(
|
||||
request = TestGenerationRequest(
|
||||
files=[temp_files["code_file"]], prompt="Generate tests", test_examples=[temp_files["small_test"]]
|
||||
)
|
||||
|
||||
@@ -280,7 +280,7 @@ class TestComprehensive(unittest.TestCase):
|
||||
|
||||
def test_format_response(self, tool):
|
||||
"""Test response formatting"""
|
||||
request = TestGenRequest(files=["/tmp/test.py"], prompt="Generate tests")
|
||||
request = TestGenerationRequest(files=["/tmp/test.py"], prompt="Generate tests")
|
||||
|
||||
raw_response = "Generated test cases with edge cases"
|
||||
formatted = tool.format_response(raw_response, request)
|
||||
@@ -333,7 +333,7 @@ class TestComprehensive(unittest.TestCase):
|
||||
with patch.object(tool, "_prepare_file_content_for_prompt") as mock_prepare:
|
||||
mock_prepare.return_value = ("code content", ["/tmp/test.py"])
|
||||
|
||||
request = TestGenRequest(
|
||||
request = TestGenerationRequest(
|
||||
files=["/tmp/test.py"], prompt="Test prompt", test_examples=["/tmp/example.py"]
|
||||
)
|
||||
|
||||
@@ -353,7 +353,7 @@ class TestComprehensive(unittest.TestCase):
|
||||
with patch.object(tool, "_prepare_file_content_for_prompt") as mock_prepare:
|
||||
mock_prepare.return_value = ("code content", [temp_files["code_file"]])
|
||||
|
||||
request = TestGenRequest(
|
||||
request = TestGenerationRequest(
|
||||
files=[temp_files["code_file"]], prompt="Continue testing", continuation_id="test-thread-123"
|
||||
)
|
||||
|
||||
@@ -372,7 +372,7 @@ class TestComprehensive(unittest.TestCase):
|
||||
|
||||
def test_no_websearch_in_prompt(self, tool, temp_files):
|
||||
"""Test that web search instructions are not included"""
|
||||
request = TestGenRequest(files=[temp_files["code_file"]], prompt="Generate tests")
|
||||
request = TestGenerationRequest(files=[temp_files["code_file"]], prompt="Generate tests")
|
||||
|
||||
with patch.object(tool, "_prepare_file_content_for_prompt") as mock_prepare:
|
||||
mock_prepare.return_value = ("code content", [temp_files["code_file"]])
|
||||
@@ -391,7 +391,7 @@ class TestComprehensive(unittest.TestCase):
|
||||
# Create a scenario where the same file appears in both files and test_examples
|
||||
duplicate_file = temp_files["code_file"]
|
||||
|
||||
request = TestGenRequest(
|
||||
request = TestGenerationRequest(
|
||||
files=[duplicate_file, temp_files["large_test"]], # code_file appears in both
|
||||
prompt="Generate tests",
|
||||
test_examples=[temp_files["small_test"], duplicate_file], # code_file also here
|
||||
@@ -423,7 +423,7 @@ class TestComprehensive(unittest.TestCase):
|
||||
@pytest.mark.asyncio
|
||||
async def test_no_deduplication_when_no_test_examples(self, tool, temp_files):
|
||||
"""Test that no deduplication occurs when test_examples is None/empty"""
|
||||
request = TestGenRequest(
|
||||
request = TestGenerationRequest(
|
||||
files=[temp_files["code_file"], temp_files["large_test"]],
|
||||
prompt="Generate tests",
|
||||
# No test_examples
|
||||
@@ -453,7 +453,7 @@ class TestComprehensive(unittest.TestCase):
|
||||
# Add some path variations that should normalize to the same file
|
||||
variant_path = os.path.join(os.path.dirname(base_file), ".", os.path.basename(base_file))
|
||||
|
||||
request = TestGenRequest(
|
||||
request = TestGenerationRequest(
|
||||
files=[variant_path, temp_files["large_test"]], # variant path in files
|
||||
prompt="Generate tests",
|
||||
test_examples=[base_file], # base path in test_examples
|
||||
|
||||
@@ -41,7 +41,6 @@ class TestThinkingModes:
|
||||
@pytest.mark.asyncio
|
||||
async def test_thinking_mode_minimal(self):
|
||||
"""Test minimal thinking mode"""
|
||||
from unittest.mock import MagicMock
|
||||
from providers.base import ModelCapabilities, ProviderType
|
||||
|
||||
with patch("tools.base.BaseTool.get_model_provider") as mock_get_provider:
|
||||
@@ -51,7 +50,7 @@ class TestThinkingModes:
|
||||
mock_provider.generate_content.return_value = Mock(
|
||||
content="Minimal thinking response", usage={}, model_name="gemini-2.5-flash-preview-05-20", metadata={}
|
||||
)
|
||||
|
||||
|
||||
# Set up proper capabilities to avoid MagicMock comparison errors
|
||||
mock_capabilities = ModelCapabilities(
|
||||
provider=ProviderType.GOOGLE,
|
||||
@@ -91,7 +90,6 @@ class TestThinkingModes:
|
||||
@pytest.mark.asyncio
|
||||
async def test_thinking_mode_low(self):
|
||||
"""Test low thinking mode"""
|
||||
from unittest.mock import MagicMock
|
||||
from providers.base import ModelCapabilities, ProviderType
|
||||
|
||||
with patch("tools.base.BaseTool.get_model_provider") as mock_get_provider:
|
||||
@@ -101,7 +99,7 @@ class TestThinkingModes:
|
||||
mock_provider.generate_content.return_value = Mock(
|
||||
content="Low thinking response", usage={}, model_name="gemini-2.5-flash-preview-05-20", metadata={}
|
||||
)
|
||||
|
||||
|
||||
# Set up proper capabilities to avoid MagicMock comparison errors
|
||||
mock_capabilities = ModelCapabilities(
|
||||
provider=ProviderType.GOOGLE,
|
||||
@@ -136,7 +134,6 @@ class TestThinkingModes:
|
||||
@pytest.mark.asyncio
|
||||
async def test_thinking_mode_medium(self):
|
||||
"""Test medium thinking mode (default for most tools)"""
|
||||
from unittest.mock import MagicMock
|
||||
from providers.base import ModelCapabilities, ProviderType
|
||||
|
||||
with patch("tools.base.BaseTool.get_model_provider") as mock_get_provider:
|
||||
@@ -146,7 +143,7 @@ class TestThinkingModes:
|
||||
mock_provider.generate_content.return_value = Mock(
|
||||
content="Medium thinking response", usage={}, model_name="gemini-2.5-flash-preview-05-20", metadata={}
|
||||
)
|
||||
|
||||
|
||||
# Set up proper capabilities to avoid MagicMock comparison errors
|
||||
mock_capabilities = ModelCapabilities(
|
||||
provider=ProviderType.GOOGLE,
|
||||
@@ -180,7 +177,6 @@ class TestThinkingModes:
|
||||
@pytest.mark.asyncio
|
||||
async def test_thinking_mode_high(self):
|
||||
"""Test high thinking mode"""
|
||||
from unittest.mock import MagicMock
|
||||
from providers.base import ModelCapabilities, ProviderType
|
||||
|
||||
with patch("tools.base.BaseTool.get_model_provider") as mock_get_provider:
|
||||
@@ -190,7 +186,7 @@ class TestThinkingModes:
|
||||
mock_provider.generate_content.return_value = Mock(
|
||||
content="High thinking response", usage={}, model_name="gemini-2.5-flash-preview-05-20", metadata={}
|
||||
)
|
||||
|
||||
|
||||
# Set up proper capabilities to avoid MagicMock comparison errors
|
||||
mock_capabilities = ModelCapabilities(
|
||||
provider=ProviderType.GOOGLE,
|
||||
|
||||
@@ -78,7 +78,6 @@ class TestCodeReviewTool:
|
||||
@pytest.mark.asyncio
|
||||
async def test_execute_with_review_type(self, tool, tmp_path):
|
||||
"""Test execution with specific review type"""
|
||||
from unittest.mock import MagicMock
|
||||
from providers.base import ModelCapabilities, ProviderType
|
||||
|
||||
# Create test file
|
||||
@@ -93,7 +92,7 @@ class TestCodeReviewTool:
|
||||
mock_provider.generate_content.return_value = Mock(
|
||||
content="Security issues found", usage={}, model_name="gemini-2.5-flash-preview-05-20", metadata={}
|
||||
)
|
||||
|
||||
|
||||
# Set up proper capabilities to avoid MagicMock comparison errors
|
||||
mock_capabilities = ModelCapabilities(
|
||||
provider=ProviderType.GOOGLE,
|
||||
@@ -184,7 +183,6 @@ class TestAnalyzeTool:
|
||||
@pytest.mark.asyncio
|
||||
async def test_execute_with_analysis_type(self, tool, tmp_path):
|
||||
"""Test execution with specific analysis type"""
|
||||
from unittest.mock import MagicMock
|
||||
from providers.base import ModelCapabilities, ProviderType
|
||||
|
||||
# Create test file
|
||||
@@ -199,7 +197,7 @@ class TestAnalyzeTool:
|
||||
mock_provider.generate_content.return_value = Mock(
|
||||
content="Architecture analysis", usage={}, model_name="gemini-2.5-flash-preview-05-20", metadata={}
|
||||
)
|
||||
|
||||
|
||||
# Set up proper capabilities to avoid MagicMock comparison errors
|
||||
mock_capabilities = ModelCapabilities(
|
||||
provider=ProviderType.GOOGLE,
|
||||
@@ -329,7 +327,6 @@ class TestAbsolutePathValidation:
|
||||
@pytest.mark.asyncio
|
||||
async def test_analyze_tool_accepts_absolute_paths(self):
|
||||
"""Test that analyze tool accepts absolute paths"""
|
||||
from unittest.mock import MagicMock
|
||||
from providers.base import ModelCapabilities, ProviderType
|
||||
|
||||
tool = AnalyzeTool()
|
||||
@@ -342,7 +339,7 @@ class TestAbsolutePathValidation:
|
||||
mock_provider.generate_content.return_value = Mock(
|
||||
content="Analysis complete", usage={}, model_name="gemini-2.5-flash-preview-05-20", metadata={}
|
||||
)
|
||||
|
||||
|
||||
# Set up proper capabilities to avoid MagicMock comparison errors
|
||||
mock_capabilities = ModelCapabilities(
|
||||
provider=ProviderType.GOOGLE,
|
||||
|
||||
Reference in New Issue
Block a user