- Improved error handling and path resolution in run-server.ps1 for better reliability. - Implemented conversation tests for Docker mode compatibility in validation_crossplatform.py. - Updated run-server.ps1 to include detailed help documentation, configuration management, and backup retention for configuration files. - Added Docker path validation tests in validation_crossplatform.py to ensure correct path handling in Docker mode. - Enhanced integration test script run_integration_tests.ps1 with comprehensive documentation and parameter support for output customization.
587 lines
22 KiB
Python
587 lines
22 KiB
Python
#!/usr/bin/env python3
|
||
"""
|
||
Validation script for all cross-platform fixes.
|
||
|
||
This script runs a comprehensive series of tests to validate that all applied fixes
|
||
work correctly on Windows, including:
|
||
|
||
1. Home directory pattern detection (Windows, macOS, Linux)
|
||
2. Unix path validation on Windows
|
||
3. Safe files functionality with temporary files
|
||
4. Cross-platform file discovery with Path.parts
|
||
5. Communication simulator logger and Python path fixes
|
||
6. BaseSimulatorTest logger and Python path fixes
|
||
7. Shell scripts Windows virtual environment support
|
||
8. Docker path validation and mode compatibility
|
||
9. Conversation tests Docker mode compatibility
|
||
|
||
Tests cover all modified files:
|
||
- utils/file_utils.py
|
||
- tests/test_file_protection.py
|
||
- tests/test_utils.py
|
||
- communication_simulator_test.py
|
||
- simulator_tests/base_test.py
|
||
- run_integration_tests.sh
|
||
- code_quality_checks.sh
|
||
- tests/test_conversation_file_features.py (Docker mode)
|
||
"""
|
||
|
||
import os
|
||
import sys
|
||
import tempfile
|
||
from pathlib import Path
|
||
from unittest.mock import patch
|
||
|
||
# Add parent directory to path to import project modules
|
||
sys.path.insert(0, str(Path(__file__).parent.parent))
|
||
|
||
# Import functions to test
|
||
from utils.file_utils import (
|
||
expand_paths,
|
||
is_home_directory_root,
|
||
read_file_content,
|
||
resolve_and_validate_path,
|
||
)
|
||
|
||
|
||
def test_home_directory_patterns():
|
||
"""Test 1: Home directory patterns on Windows."""
|
||
print("🧪 Test 1: Home directory patterns on Windows")
|
||
print("-" * 60)
|
||
|
||
test_cases = [
|
||
("/home/ubuntu", True, "Linux home directory"),
|
||
("/home/testuser", True, "Linux home directory"),
|
||
("/Users/john", True, "macOS home directory"),
|
||
("/Users/developer", True, "macOS home directory"),
|
||
("C:\\Users\\John", True, "Windows home directory"),
|
||
("C:/Users/Jane", True, "Windows home directory"),
|
||
("/home/ubuntu/projects", False, "Linux home subdirectory"),
|
||
("/Users/john/Documents", False, "macOS home subdirectory"),
|
||
("C:\\Users\\John\\Documents", False, "Windows home subdirectory"),
|
||
]
|
||
|
||
passed = 0
|
||
for path_str, expected, description in test_cases:
|
||
try:
|
||
result = is_home_directory_root(Path(path_str))
|
||
status = "✅" if result == expected else "❌"
|
||
print(f" {status} {path_str:<30} -> {result} ({description})")
|
||
if result == expected:
|
||
passed += 1
|
||
except Exception as e:
|
||
print(f" ❌ {path_str:<30} -> Exception: {e}")
|
||
|
||
success = passed == len(test_cases)
|
||
print(f"\nResult: {passed}/{len(test_cases)} tests passed")
|
||
return success
|
||
|
||
|
||
def test_unix_path_validation():
|
||
"""Test 2: Unix path validation on Windows."""
|
||
print("\n🧪 Test 2: Unix path validation on Windows")
|
||
print("-" * 60)
|
||
|
||
test_cases = [
|
||
("/etc/passwd", True, "Unix system file"),
|
||
("/home/user/file.txt", True, "Unix user file"),
|
||
("/usr/local/bin/python", True, "Unix binary path"),
|
||
("./relative/path", False, "Relative path"),
|
||
("relative/file.txt", False, "Relative file"),
|
||
("C:\\Windows\\System32", True, "Windows absolute path"),
|
||
]
|
||
|
||
passed = 0
|
||
for path_str, should_pass, description in test_cases:
|
||
try:
|
||
resolve_and_validate_path(path_str)
|
||
result = True
|
||
status = "✅" if should_pass else "❌"
|
||
print(f" {status} {path_str:<30} -> Accepted ({description})")
|
||
except ValueError:
|
||
result = False
|
||
status = "✅" if not should_pass else "❌"
|
||
print(f" {status} {path_str:<30} -> Rejected ({description})")
|
||
except PermissionError:
|
||
result = True # Rejected for security, not path format
|
||
status = "✅" if should_pass else "❌"
|
||
print(f" {status} {path_str:<30} -> Secured ({description})")
|
||
except Exception as e:
|
||
result = False
|
||
status = "❌"
|
||
print(f" {status} {path_str:<30} -> Error: {e}")
|
||
|
||
if result == should_pass:
|
||
passed += 1
|
||
|
||
success = passed == len(test_cases)
|
||
print(f"\nResult: {passed}/{len(test_cases)} tests passed")
|
||
return success
|
||
|
||
|
||
def test_safe_files_functionality():
|
||
"""Test 3: Safe files functionality."""
|
||
print("\n🧪 Test 3: Safe files functionality")
|
||
print("-" * 60)
|
||
|
||
# Create a temporary file to test
|
||
with tempfile.NamedTemporaryFile(mode="w", delete=False, suffix=".txt") as f:
|
||
f.write("test content for validation")
|
||
temp_file = f.name
|
||
|
||
try:
|
||
# Test reading existing file
|
||
content, tokens = read_file_content(temp_file)
|
||
|
||
has_begin = f"--- BEGIN FILE: {temp_file} ---" in content
|
||
has_content = "test content for validation" in content
|
||
has_end = "--- END FILE:" in content
|
||
has_tokens = tokens > 0
|
||
|
||
print(f" ✅ BEGIN FILE found: {has_begin}")
|
||
print(f" ✅ Correct content: {has_content}")
|
||
print(f" ✅ END FILE found: {has_end}")
|
||
print(f" ✅ Tokens > 0: {has_tokens}")
|
||
|
||
success1 = all([has_begin, has_content, has_end, has_tokens])
|
||
|
||
# Test nonexistent Unix path (should return FILE NOT FOUND, not path error)
|
||
content, tokens = read_file_content("/etc/nonexistent")
|
||
not_found = "--- FILE NOT FOUND:" in content
|
||
no_path_error = "Relative paths are not supported" not in content
|
||
has_tokens2 = tokens > 0
|
||
|
||
print(f" ✅ Nonexistent Unix file: {not_found}")
|
||
print(f" ✅ No path error: {no_path_error}")
|
||
print(f" ✅ Tokens > 0: {has_tokens2}")
|
||
|
||
success2 = all([not_found, no_path_error, has_tokens2])
|
||
|
||
success = success1 and success2
|
||
print(f"\nResult: Safe files tests {'passed' if success else 'failed'}")
|
||
|
||
finally:
|
||
# Clean up
|
||
try:
|
||
Path(temp_file).unlink()
|
||
except Exception:
|
||
pass
|
||
|
||
return success
|
||
|
||
|
||
def test_cross_platform_file_discovery():
|
||
"""Test 4: Cross-platform file discovery."""
|
||
print("\n🧪 Test 4: Cross-platform file discovery")
|
||
print("-" * 60)
|
||
|
||
with tempfile.TemporaryDirectory() as tmp_dir:
|
||
tmp_path = Path(tmp_dir)
|
||
|
||
# Create test structure
|
||
project = tmp_path / "test-project"
|
||
project.mkdir()
|
||
|
||
(project / "README.md").write_text("# Test Project")
|
||
(project / "main.py").write_text("print('Hello')")
|
||
|
||
src = project / "src"
|
||
src.mkdir()
|
||
(src / "app.py").write_text("# App code")
|
||
|
||
# Test with mock MCP
|
||
def mock_is_mcp(path):
|
||
return False # No MCP in this test
|
||
|
||
with patch("utils.file_utils.is_mcp_directory", side_effect=mock_is_mcp):
|
||
files = expand_paths([str(project)])
|
||
|
||
file_paths = [str(f) for f in files]
|
||
|
||
# Use Path.parts for cross-platform checks
|
||
readme_found = any(Path(p).parts[-2:] == ("test-project", "README.md") for p in file_paths)
|
||
main_found = any(Path(p).parts[-2:] == ("test-project", "main.py") for p in file_paths)
|
||
app_found = any(Path(p).parts[-2:] == ("src", "app.py") for p in file_paths)
|
||
|
||
print(f" ✅ README.md found: {readme_found}")
|
||
print(f" ✅ main.py found: {main_found}")
|
||
print(f" ✅ app.py found: {app_found}")
|
||
print(f" ℹ️ Files found: {len(file_paths)}")
|
||
|
||
success = all([readme_found, main_found, app_found])
|
||
print(f"\nResult: Cross-platform discovery {'passed' if success else 'failed'}")
|
||
|
||
return success
|
||
|
||
|
||
def test_communication_simulator_fixes():
|
||
"""Test 5: Communication simulator fixes"""
|
||
print("\n🧪 Test 5: Communication simulator fixes")
|
||
print("-" * 60)
|
||
|
||
try:
|
||
# Import and test CommunicationSimulator
|
||
from communication_simulator_test import CommunicationSimulator
|
||
|
||
# Test that we can create an instance without logger errors
|
||
simulator = CommunicationSimulator(verbose=False, keep_logs=True)
|
||
|
||
# Check that logger is properly initialized
|
||
has_logger = hasattr(simulator, "logger") and simulator.logger is not None
|
||
print(f" ✅ Logger initialized: {has_logger}")
|
||
|
||
# Check that python_path is set
|
||
has_python_path = hasattr(simulator, "python_path") and simulator.python_path is not None
|
||
print(f" ✅ Python path set: {has_python_path}")
|
||
|
||
# Check that the path detection logic includes Windows
|
||
import os
|
||
import platform
|
||
|
||
if platform.system() == "Windows":
|
||
# Test Windows path detection
|
||
current_dir = os.getcwd()
|
||
expected_paths = [
|
||
os.path.join(current_dir, ".zen_venv", "Scripts", "python.exe"),
|
||
os.path.join(current_dir, "venv", "Scripts", "python.exe"),
|
||
]
|
||
|
||
# Check if the method would detect Windows paths
|
||
windows_detection = any("Scripts" in path for path in expected_paths)
|
||
print(f" ✅ Windows path detection: {windows_detection}")
|
||
else:
|
||
windows_detection = True # Pass on non-Windows systems
|
||
print(" ✅ Windows path detection: N/A (not Windows)")
|
||
|
||
success = all([has_logger, has_python_path, windows_detection])
|
||
print(f"\nResult: Communication simulator {'passed' if success else 'failed'}")
|
||
|
||
return success
|
||
|
||
except Exception as e:
|
||
print(f" ❌ Error testing CommunicationSimulator: {e}")
|
||
print("\nResult: Communication simulator failed")
|
||
return False
|
||
|
||
|
||
def test_base_simulator_test_fixes():
|
||
"""Test 6: BaseSimulatorTest fixes."""
|
||
print("\n🧪 Test 6: BaseSimulatorTest fixes")
|
||
print("-" * 60)
|
||
|
||
try:
|
||
# Import and test BaseSimulatorTest
|
||
from simulator_tests.base_test import BaseSimulatorTest
|
||
|
||
# Test that we can create an instance without logger errors
|
||
base_test = BaseSimulatorTest(verbose=False)
|
||
|
||
# Check that logger is properly initialized
|
||
has_logger = hasattr(base_test, "logger") and base_test.logger is not None
|
||
print(f" ✅ Logger initialized: {has_logger}")
|
||
|
||
# Check that python_path is set
|
||
has_python_path = hasattr(base_test, "python_path") and base_test.python_path is not None
|
||
print(f" ✅ Python path set: {has_python_path}")
|
||
|
||
# Check that the path detection logic includes Windows
|
||
import os
|
||
import platform
|
||
|
||
if platform.system() == "Windows":
|
||
# Test Windows path detection
|
||
current_dir = os.getcwd()
|
||
expected_path = os.path.join(current_dir, ".zen_venv", "Scripts", "python.exe")
|
||
|
||
# Check if the method would detect Windows paths
|
||
windows_detection = "Scripts" in expected_path
|
||
print(f" ✅ Windows path detection: {windows_detection}")
|
||
else:
|
||
windows_detection = True # Pass on non-Windows systems
|
||
print(" ✅ Windows path detection: N/A (not Windows)")
|
||
|
||
# Test that we can call methods that previously failed
|
||
try:
|
||
# Test accessing properties without calling abstract methods
|
||
# Just check that logger-related functionality works
|
||
logger_accessible = hasattr(base_test, "logger") and callable(getattr(base_test, "logger", None))
|
||
method_callable = True
|
||
print(f" ✅ Methods callable: {method_callable}")
|
||
print(f" ✅ Logger accessible: {logger_accessible}")
|
||
except AttributeError as e:
|
||
if "logger" in str(e):
|
||
method_callable = False
|
||
print(f" ❌ Logger error still present: {e}")
|
||
else:
|
||
method_callable = True # Different error, not logger-related
|
||
print(f" ✅ No logger errors (different error): {str(e)[:50]}...")
|
||
|
||
success = all([has_logger, has_python_path, windows_detection, method_callable])
|
||
print(f"\nResult: BaseSimulatorTest {'passed' if success else 'failed'}")
|
||
|
||
return success
|
||
|
||
except Exception as e:
|
||
print(f" ❌ Error testing BaseSimulatorTest: {e}")
|
||
print("\nResult: BaseSimulatorTest failed")
|
||
return False
|
||
|
||
|
||
def test_shell_scripts_windows_support():
|
||
"""Test 7: Shell scripts Windows support."""
|
||
print("\n🧪 Test 7: Shell scripts Windows support")
|
||
print("-" * 60)
|
||
|
||
try:
|
||
# Check run_integration_tests.sh
|
||
try:
|
||
with open("run_integration_tests.sh", encoding="utf-8") as f:
|
||
run_script_content = f.read()
|
||
|
||
has_windows_venv = 'elif [[ -f ".zen_venv/Scripts/activate" ]]; then' in run_script_content
|
||
has_windows_msg = "Using virtual environment (Windows)" in run_script_content
|
||
|
||
print(f" ✅ run_integration_tests.sh Windows venv: {has_windows_venv}")
|
||
print(f" ✅ run_integration_tests.sh Windows message: {has_windows_msg}")
|
||
|
||
run_script_ok = has_windows_venv and has_windows_msg
|
||
|
||
except FileNotFoundError:
|
||
print(" ⚠️ run_integration_tests.sh not found")
|
||
run_script_ok = True # Skip if file doesn't exist
|
||
|
||
# Check code_quality_checks.sh
|
||
try:
|
||
with open("code_quality_checks.sh", encoding="utf-8") as f:
|
||
quality_script_content = f.read()
|
||
|
||
has_windows_python = 'elif [[ -f ".zen_venv/Scripts/python.exe" ]]; then' in quality_script_content
|
||
has_windows_tools = 'elif [[ -f ".zen_venv/Scripts/ruff.exe" ]]; then' in quality_script_content
|
||
has_windows_msg = "Using venv (Windows)" in quality_script_content
|
||
|
||
print(f" ✅ code_quality_checks.sh Windows Python: {has_windows_python}")
|
||
print(f" ✅ code_quality_checks.sh Windows tools: {has_windows_tools}")
|
||
print(f" ✅ code_quality_checks.sh Windows message: {has_windows_msg}")
|
||
|
||
quality_script_ok = has_windows_python and has_windows_tools and has_windows_msg
|
||
|
||
except FileNotFoundError:
|
||
print(" ⚠️ code_quality_checks.sh not found")
|
||
quality_script_ok = True # Skip if file doesn't exist
|
||
|
||
success = run_script_ok and quality_script_ok
|
||
print(f"\nResult: Shell scripts {'passed' if success else 'failed'}")
|
||
|
||
return success
|
||
|
||
except Exception as e:
|
||
print(f" ❌ Error testing shell scripts: {e}")
|
||
print("\nResult: Shell scripts failed")
|
||
return False
|
||
|
||
|
||
def test_docker_path_validation():
|
||
"""Test 8: Docker path validation in file_utils.py."""
|
||
print("\n🧪 Test 8: Docker path validation")
|
||
print("-" * 60)
|
||
|
||
try:
|
||
# Test that path_detector import is available
|
||
try:
|
||
from utils.path_detector import PathModeDetector
|
||
|
||
detector_import = True
|
||
print(" ✅ Path detector import: True")
|
||
except ImportError as e:
|
||
detector_import = False
|
||
print(f" ❌ Path detector import: False ({e})")
|
||
|
||
# Test Docker path validation with mocked Docker mode
|
||
try:
|
||
# Mock Docker mode
|
||
with patch.dict(os.environ, {"MCP_FILE_PATH_MODE": "docker"}):
|
||
# Reset singleton to pick up new environment
|
||
PathModeDetector._instance = None
|
||
|
||
# Test Docker path validation
|
||
docker_path = "/app/project/test.py"
|
||
try:
|
||
from utils.file_utils import resolve_and_validate_path
|
||
|
||
resolve_and_validate_path(docker_path)
|
||
docker_validation = True
|
||
print(" ✅ Docker path validation: True")
|
||
except ValueError as e:
|
||
if "Relative paths are not supported" in str(e):
|
||
docker_validation = False
|
||
print(" ❌ Docker path validation: False (still rejected)")
|
||
else:
|
||
docker_validation = True # Different error, not path format
|
||
print(" ✅ Docker path validation: True (security error)")
|
||
except Exception as e:
|
||
docker_validation = False
|
||
print(f" ❌ Docker path validation: Error ({e})")
|
||
|
||
# Reset singleton after test
|
||
PathModeDetector._instance = None
|
||
|
||
except Exception as e:
|
||
docker_validation = False
|
||
print(f" ❌ Docker path test error: {e}")
|
||
|
||
# Test that file_utils.py contains Docker-related code
|
||
try:
|
||
with open("utils/file_utils.py", encoding="utf-8") as f:
|
||
file_utils_content = f.read()
|
||
|
||
has_detector_import = "from utils.path_detector import get_path_detector" in file_utils_content
|
||
has_docker_check = "is_docker_path = converted_path_str.startswith" in file_utils_content
|
||
has_docker_mode = "is_docker_mode" in file_utils_content
|
||
|
||
print(f" ✅ Has detector import: {has_detector_import}")
|
||
print(f" ✅ Has Docker path check: {has_docker_check}")
|
||
print(f" ✅ Has Docker mode check: {has_docker_mode}")
|
||
|
||
file_content_ok = has_detector_import and has_docker_check and has_docker_mode
|
||
|
||
except FileNotFoundError:
|
||
file_content_ok = False
|
||
print(" ❌ utils/file_utils.py not found")
|
||
|
||
success = detector_import and docker_validation and file_content_ok
|
||
print(f"\nResult: Docker path validation {'passed' if success else 'failed'}")
|
||
|
||
return success
|
||
|
||
except Exception as e:
|
||
print(f" ❌ Error testing Docker path validation: {e}")
|
||
print("\nResult: Docker path validation failed")
|
||
return False
|
||
|
||
|
||
def test_conversation_docker_compatibility():
|
||
"""Test 9: Conversation tests Docker mode compatibility."""
|
||
print("\n🧪 Test 9: Conversation tests Docker mode compatibility")
|
||
print("-" * 60)
|
||
|
||
try:
|
||
# Test that test_conversation_file_features.py contains Docker fixes
|
||
try:
|
||
with open("tests/test_conversation_file_features.py", encoding="utf-8") as f:
|
||
test_content = f.read()
|
||
|
||
has_local_mode = '"MCP_FILE_PATH_MODE": "local"' in test_content
|
||
has_detector_reset = "PathModeDetector._instance = None" in test_content
|
||
has_cache_reset = "detector._cached_mode = None" in test_content
|
||
has_subdir = "zen-mcp-server" in test_content
|
||
|
||
print(f" ✅ Forces local mode: {has_local_mode}")
|
||
print(f" ✅ Resets PathModeDetector: {has_detector_reset}")
|
||
print(f" ✅ Resets detector cache: {has_cache_reset}")
|
||
print(f" ✅ Uses project subdirectory: {has_subdir}")
|
||
|
||
test_content_ok = has_local_mode and has_detector_reset and has_cache_reset
|
||
|
||
except FileNotFoundError:
|
||
test_content_ok = False
|
||
print(" ❌ tests/test_conversation_file_features.py not found")
|
||
|
||
# Test PathModeDetector cache reset functionality
|
||
try:
|
||
from utils.path_detector import PathModeDetector, get_path_detector
|
||
|
||
# Test that we can reset the singleton
|
||
detector1 = get_path_detector()
|
||
detector1.get_path_mode()
|
||
|
||
# Reset and test again
|
||
PathModeDetector._instance = None
|
||
detector2 = get_path_detector()
|
||
|
||
# Test cache reset
|
||
detector2._cached_mode = None
|
||
detector2.get_path_mode()
|
||
|
||
reset_works = True
|
||
print(" ✅ PathModeDetector reset: True")
|
||
|
||
except Exception as e:
|
||
reset_works = False
|
||
print(f" ❌ PathModeDetector reset: False ({e})")
|
||
|
||
# Test environment patching works
|
||
try:
|
||
from utils.path_detector import get_path_detector
|
||
|
||
with patch.dict(os.environ, {"MCP_FILE_PATH_MODE": "local"}):
|
||
PathModeDetector._instance = None
|
||
detector = get_path_detector()
|
||
detector._cached_mode = None
|
||
mode = detector.get_path_mode()
|
||
|
||
env_patch_works = mode == "local"
|
||
print(f" ✅ Environment patching: {env_patch_works}")
|
||
|
||
except Exception as e:
|
||
env_patch_works = False
|
||
print(f" ❌ Environment patching: False ({e})")
|
||
|
||
success = test_content_ok and reset_works and env_patch_works
|
||
print(f"\nResult: Conversation Docker compatibility {'passed' if success else 'failed'}")
|
||
|
||
return success
|
||
|
||
except Exception as e:
|
||
print(f" ❌ Error testing conversation Docker compatibility: {e}")
|
||
print("\nResult: Conversation Docker compatibility failed")
|
||
return False
|
||
|
||
|
||
def main():
|
||
"""Main validation function."""
|
||
print("🔧 Final validation of cross-platform fixes")
|
||
print("=" * 70)
|
||
print("This script validates that all fixes work on Windows.")
|
||
print("=" * 70)
|
||
|
||
# Run all tests
|
||
results = []
|
||
|
||
results.append(("Home directory patterns", test_home_directory_patterns()))
|
||
results.append(("Unix path validation", test_unix_path_validation()))
|
||
results.append(("Safe files", test_safe_files_functionality()))
|
||
results.append(("Cross-platform discovery", test_cross_platform_file_discovery()))
|
||
results.append(("Communication simulator", test_communication_simulator_fixes()))
|
||
results.append(("BaseSimulatorTest", test_base_simulator_test_fixes()))
|
||
results.append(("Shell scripts Windows support", test_shell_scripts_windows_support()))
|
||
results.append(("Docker path validation", test_docker_path_validation()))
|
||
results.append(("Conversation Docker compatibility", test_conversation_docker_compatibility()))
|
||
|
||
# Final summary
|
||
print("\n" + "=" * 70)
|
||
print("📊 FINAL SUMMARY")
|
||
print("=" * 70)
|
||
|
||
passed_tests = 0
|
||
for test_name, success in results:
|
||
status = "PASSED" if success else "FAILED"
|
||
print(f"{status:<10} {test_name}")
|
||
if success:
|
||
passed_tests += 1
|
||
|
||
total_tests = len(results)
|
||
print(f"\nOverall result: {passed_tests}/{total_tests} test groups passed")
|
||
|
||
if passed_tests == total_tests:
|
||
print("\n🎉 COMPLETE SUCCESS!")
|
||
print("All cross-platform fixes work correctly.")
|
||
return 0
|
||
else:
|
||
print("\n❌ FAILURES DETECTED")
|
||
print("Some fixes need adjustments.")
|
||
return 1
|
||
|
||
|
||
if __name__ == "__main__":
|
||
sys.exit(main())
|