Merge pull request #151 from GiGiDKR/feat-cross-plateform_compatibility

Feat: Cross-Plateform Compatibily
This commit is contained in:
Beehive Innovations
2025-06-29 01:53:12 -07:00
committed by GitHub
7 changed files with 3425 additions and 0 deletions

View File

@@ -170,11 +170,20 @@ cd zen-mcp-server
# One-command setup installs Zen in Claude
./run-server.sh
# Or for Windows users using PowerShell:
./run-server.ps1
# To view MCP configuration for Claude
./run-server.sh -c
# PowerShell:
./run-server.ps1 -Config
# See help for more
./run-server.sh --help
# PowerShell:
./run-server.ps1 -Help
```
**What this does:**

231
code_quality_checks.ps1 Normal file
View File

@@ -0,0 +1,231 @@
#!/usr/bin/env pwsh
#Requires -Version 5.1
[CmdletBinding()]
param(
[switch]$SkipTests,
[switch]$SkipLinting,
[switch]$VerboseOutput
)
# Set error action preference
$ErrorActionPreference = "Stop"
# Colors for output
function Write-ColorText {
param(
[Parameter(Mandatory)]
[string]$Text,
[string]$Color = "White"
)
Write-Host $Text -ForegroundColor $Color
}
function Write-Emoji {
param(
[Parameter(Mandatory)]
[string]$Emoji,
[Parameter(Mandatory)]
[string]$Text,
[string]$Color = "White"
)
Write-Host "$Emoji " -NoNewline
Write-ColorText $Text -Color $Color
}
Write-Emoji "🔍" "Running Code Quality Checks for Zen MCP Server" -Color Cyan
Write-ColorText "=================================================" -Color Cyan
# Determine Python command
$pythonCmd = $null
$pipCmd = $null
if (Test-Path ".zen_venv") {
if ($IsWindows -or $env:OS -eq "Windows_NT") {
if (Test-Path ".zen_venv\Scripts\python.exe") {
$pythonCmd = ".zen_venv\Scripts\python.exe"
$pipCmd = ".zen_venv\Scripts\pip.exe"
}
} else {
if (Test-Path ".zen_venv/bin/python") {
$pythonCmd = ".zen_venv/bin/python"
$pipCmd = ".zen_venv/bin/pip"
}
}
if ($pythonCmd) {
Write-Emoji "" "Using venv" -Color Green
}
} elseif ($env:VIRTUAL_ENV) {
$pythonCmd = "python"
$pipCmd = "pip"
Write-Emoji "" "Using activated virtual environment: $env:VIRTUAL_ENV" -Color Green
} else {
Write-Emoji "" "No virtual environment found!" -Color Red
Write-ColorText "Please run: .\run-server.ps1 first to set up the environment" -Color Yellow
exit 1
}
Write-Host ""
# Check and install dev dependencies if needed
Write-Emoji "🔍" "Checking development dependencies..." -Color Cyan
$devDepsNeeded = $false
# List of dev tools to check
$devTools = @("ruff", "black", "isort", "pytest")
foreach ($tool in $devTools) {
$toolFound = $false
# Check in venv
if ($IsWindows -or $env:OS -eq "Windows_NT") {
if (Test-Path ".zen_venv\Scripts\$tool.exe") {
$toolFound = $true
}
} else {
if (Test-Path ".zen_venv/bin/$tool") {
$toolFound = $true
}
}
# Check in PATH
if (!$toolFound) {
try {
$null = Get-Command $tool -ErrorAction Stop
$toolFound = $true
} catch {
# Tool not found
}
}
if (!$toolFound) {
$devDepsNeeded = $true
break
}
}
if ($devDepsNeeded) {
Write-Emoji "📦" "Installing development dependencies..." -Color Yellow
try {
& $pipCmd install -q -r requirements-dev.txt
if ($LASTEXITCODE -ne 0) {
throw "Failed to install dev dependencies"
}
Write-Emoji "" "Development dependencies installed" -Color Green
} catch {
Write-Emoji "" "Failed to install development dependencies" -Color Red
Write-ColorText "Error: $_" -Color Red
exit 1
}
} else {
Write-Emoji "" "Development dependencies already installed" -Color Green
}
# Set tool paths
if ($IsWindows -or $env:OS -eq "Windows_NT") {
$ruffCmd = if (Test-Path ".zen_venv\Scripts\ruff.exe") { ".zen_venv\Scripts\ruff.exe" } else { "ruff" }
$blackCmd = if (Test-Path ".zen_venv\Scripts\black.exe") { ".zen_venv\Scripts\black.exe" } else { "black" }
$isortCmd = if (Test-Path ".zen_venv\Scripts\isort.exe") { ".zen_venv\Scripts\isort.exe" } else { "isort" }
$pytestCmd = if (Test-Path ".zen_venv\Scripts\pytest.exe") { ".zen_venv\Scripts\pytest.exe" } else { "pytest" }
} else {
$ruffCmd = if (Test-Path ".zen_venv/bin/ruff") { ".zen_venv/bin/ruff" } else { "ruff" }
$blackCmd = if (Test-Path ".zen_venv/bin/black") { ".zen_venv/bin/black" } else { "black" }
$isortCmd = if (Test-Path ".zen_venv/bin/isort") { ".zen_venv/bin/isort" } else { "isort" }
$pytestCmd = if (Test-Path ".zen_venv/bin/pytest") { ".zen_venv/bin/pytest" } else { "pytest" }
}
Write-Host ""
# Step 1: Linting and Formatting
if (!$SkipLinting) {
Write-Emoji "📋" "Step 1: Running Linting and Formatting Checks" -Color Cyan
Write-ColorText "--------------------------------------------------" -Color Cyan
try {
Write-Emoji "🔧" "Running ruff linting with auto-fix..." -Color Yellow
& $ruffCmd check --fix --exclude test_simulation_files --exclude .zen_venv
if ($LASTEXITCODE -ne 0) {
throw "Ruff linting failed"
}
Write-Emoji "🎨" "Running black code formatting..." -Color Yellow
& $blackCmd . --exclude="test_simulation_files/" --exclude=".zen_venv/"
if ($LASTEXITCODE -ne 0) {
throw "Black formatting failed"
}
Write-Emoji "📦" "Running import sorting with isort..." -Color Yellow
& $isortCmd . --skip-glob=".zen_venv/*" --skip-glob="test_simulation_files/*"
if ($LASTEXITCODE -ne 0) {
throw "Import sorting failed"
}
Write-Emoji "" "Verifying all linting passes..." -Color Yellow
& $ruffCmd check --exclude test_simulation_files --exclude .zen_venv
if ($LASTEXITCODE -ne 0) {
throw "Final linting verification failed"
}
Write-Emoji "" "Step 1 Complete: All linting and formatting checks passed!" -Color Green
} catch {
Write-Emoji "" "Step 1 Failed: Linting and formatting checks failed" -Color Red
Write-ColorText "Error: $_" -Color Red
exit 1
}
} else {
Write-Emoji "⏭️" "Skipping linting and formatting checks" -Color Yellow
}
Write-Host ""
# Step 2: Unit Tests
if (!$SkipTests) {
Write-Emoji "🧪" "Step 2: Running Complete Unit Test Suite" -Color Cyan
Write-ColorText "---------------------------------------------" -Color Cyan
try {
Write-Emoji "🏃" "Running unit tests (excluding integration tests)..." -Color Yellow
$pytestArgs = @("tests/", "-v", "-x", "-m", "not integration")
if ($VerboseOutput) {
$pytestArgs += "--verbose"
}
& $pythonCmd -m pytest @pytestArgs
if ($LASTEXITCODE -ne 0) {
throw "Unit tests failed"
}
Write-Emoji "" "Step 2 Complete: All unit tests passed!" -Color Green
} catch {
Write-Emoji "" "Step 2 Failed: Unit tests failed" -Color Red
Write-ColorText "Error: $_" -Color Red
exit 1
}
} else {
Write-Emoji "⏭️" "Skipping unit tests" -Color Yellow
}
Write-Host ""
# Step 3: Final Summary
Write-Emoji "🎉" "All Code Quality Checks Passed!" -Color Green
Write-ColorText "==================================" -Color Green
if (!$SkipLinting) {
Write-Emoji "" "Linting (ruff): PASSED" -Color Green
Write-Emoji "" "Formatting (black): PASSED" -Color Green
Write-Emoji "" "Import sorting (isort): PASSED" -Color Green
} else {
Write-Emoji "⏭️" "Linting: SKIPPED" -Color Yellow
}
if (!$SkipTests) {
Write-Emoji "" "Unit tests: PASSED" -Color Green
} else {
Write-Emoji "⏭️" "Unit tests: SKIPPED" -Color Yellow
}
Write-Host ""
Write-Emoji "🚀" "Your code is ready for commit and GitHub Actions!" -Color Green
Write-Emoji "💡" "Remember to add simulator tests if you modified tools" -Color Yellow

93
patch/README.md Normal file
View File

@@ -0,0 +1,93 @@
# Cross-Platform Compatibility Patches
This directory contains patch scripts to improve the cross-platform compatibility of the zen-mcp server.
## Files
### `patch_crossplatform.py`
Main script that automatically applies all necessary fixes to resolve cross-platform compatibility issues.
**Usage:**
```bash
# From the patch/ directory
python patch_crossplatform.py [--dry-run] [--backup] [--validate-only]
```
**Options:**
- `--dry-run`: Show changes without applying them
- `--backup`: Create a backup before modifying files
- `--validate-only`: Only check if the fixes are already applied
### `validation_crossplatform.py`
Validation script that tests whether all fixes work correctly.
**Usage:**
```bash
# From the patch/ directory
python validation_crossplatform.py
```
## Applied Fixes
1. **HOME DIRECTORY DETECTION ON WINDOWS:**
- Linux tests (/home/ubuntu) failed on Windows
- Unix patterns were not detected due to backslashes
- Solution: Added Windows patterns + double path check
2. **UNIX PATH VALIDATION ON WINDOWS:**
- Unix paths (/etc/passwd) were rejected as relative paths
- Solution: Accept Unix paths as absolute on Windows
3. **CROSS-PLATFORM TESTS:**
- Assertions used OS-specific separators
- The safe_files test used a non-existent file on Windows
- Solution: Use Path.parts + temporary files on Windows
4. **SHELL SCRIPT COMPATIBILITY ON WINDOWS:**
- Shell scripts did not detect Windows virtual environment paths
- Solution: Added detection for .zen_venv/Scripts/ paths
5. **COMMUNICATION SIMULATOR LOGGER BUG:**
- AttributeError: logger used before initialization
- Solution: Initialize logger before calling _get_python_path()
6. **PYTHON PATH DETECTION ON WINDOWS:**
- The simulator could not find the Windows Python executable
- Solution: Added Windows-specific detection
## How to Use
1. **Apply all fixes:**
```bash
cd patch/
python patch_crossplatform.py
```
2. **Test in dry-run mode (preview):**
```bash
cd patch/
python patch_crossplatform.py --dry-run
```
3. **Validate the fixes:**
```bash
cd patch/
python validation_crossplatform.py
```
4. **Check if fixes are already applied:**
```bash
cd patch/
python patch_crossplatform.py --validate-only
```
## Modified Files
- `utils/file_utils.py`: Home patterns + Unix path validation
- `tests/test_file_protection.py`: Cross-platform assertions
- `tests/test_utils.py`: Safe_files test with temporary file
- `run_integration_tests.sh`: Windows venv detection
- `code_quality_checks.sh`: venv and Windows tools detection
- `communication_simulator_test.py`: Logger initialization order + Windows paths
Tests should now pass on Windows, macOS, and Linux!

1252
patch/patch_crossplatform.py Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,423 @@
#!/usr/bin/env python3
"""
Validation script for all cross-platform fixes.
This script runs a comprehensive series of tests to validate that all applied fixes
work correctly on Windows, including:
1. Home directory pattern detection (Windows, macOS, Linux)
2. Unix path validation on Windows
3. Safe files functionality with temporary files
4. Cross-platform file discovery with Path.parts
5. Communication simulator logger and Python path fixes
6. BaseSimulatorTest logger and Python path fixes
7. Shell scripts Windows virtual environment support
Tests cover all modified files:
- utils/file_utils.py
- tests/test_file_protection.py
- tests/test_utils.py
- communication_simulator_test.py
- simulator_tests/base_test.py
- run_integration_tests.sh
- code_quality_checks.sh
"""
import sys
import tempfile
from pathlib import Path
from unittest.mock import patch
# Add parent directory to path to import project modules
sys.path.insert(0, str(Path(__file__).parent.parent))
# Import functions to test
from utils.file_utils import (
expand_paths,
is_home_directory_root,
read_file_content,
resolve_and_validate_path,
)
def test_home_directory_patterns():
"""Test 1: Home directory patterns on Windows."""
print("🧪 Test 1: Home directory patterns on Windows")
print("-" * 60)
test_cases = [
("/home/ubuntu", True, "Linux home directory"),
("/home/testuser", True, "Linux home directory"),
("/Users/john", True, "macOS home directory"),
("/Users/developer", True, "macOS home directory"),
("C:\\Users\\John", True, "Windows home directory"),
("C:/Users/Jane", True, "Windows home directory"),
("/home/ubuntu/projects", False, "Linux home subdirectory"),
("/Users/john/Documents", False, "macOS home subdirectory"),
("C:\\Users\\John\\Documents", False, "Windows home subdirectory"),
]
passed = 0
for path_str, expected, description in test_cases:
try:
result = is_home_directory_root(Path(path_str))
status = "" if result == expected else ""
print(f" {status} {path_str:<30} -> {result} ({description})")
if result == expected:
passed += 1
except Exception as e:
print(f"{path_str:<30} -> Exception: {e}")
success = passed == len(test_cases)
print(f"\nResult: {passed}/{len(test_cases)} tests passed")
return success
def test_unix_path_validation():
"""Test 2: Unix path validation on Windows."""
print("\n🧪 Test 2: Unix path validation on Windows")
print("-" * 60)
test_cases = [
("/etc/passwd", True, "Unix system file"),
("/home/user/file.txt", True, "Unix user file"),
("/usr/local/bin/python", True, "Unix binary path"),
("./relative/path", False, "Relative path"),
("relative/file.txt", False, "Relative file"),
("C:\\Windows\\System32", True, "Windows absolute path"),
]
passed = 0
for path_str, should_pass, description in test_cases:
try:
resolve_and_validate_path(path_str)
result = True
status = "" if should_pass else ""
print(f" {status} {path_str:<30} -> Accepted ({description})")
except ValueError:
result = False
status = "" if not should_pass else ""
print(f" {status} {path_str:<30} -> Rejected ({description})")
except PermissionError:
result = True # Rejected for security, not path format
status = "" if should_pass else ""
print(f" {status} {path_str:<30} -> Secured ({description})")
except Exception as e:
result = False
status = ""
print(f" {status} {path_str:<30} -> Error: {e}")
if result == should_pass:
passed += 1
success = passed == len(test_cases)
print(f"\nResult: {passed}/{len(test_cases)} tests passed")
return success
def test_safe_files_functionality():
"""Test 3: Safe files functionality."""
print("\n🧪 Test 3: Safe files functionality")
print("-" * 60)
# Create a temporary file to test
with tempfile.NamedTemporaryFile(mode="w", delete=False, suffix=".txt") as f:
f.write("test content for validation")
temp_file = f.name
try:
# Test reading existing file
content, tokens = read_file_content(temp_file)
has_begin = f"--- BEGIN FILE: {temp_file} ---" in content
has_content = "test content for validation" in content
has_end = "--- END FILE:" in content
has_tokens = tokens > 0
print(f" ✅ BEGIN FILE found: {has_begin}")
print(f" ✅ Correct content: {has_content}")
print(f" ✅ END FILE found: {has_end}")
print(f" ✅ Tokens > 0: {has_tokens}")
success1 = all([has_begin, has_content, has_end, has_tokens])
# Test nonexistent Unix path (should return FILE NOT FOUND, not path error)
content, tokens = read_file_content("/etc/nonexistent")
not_found = "--- FILE NOT FOUND:" in content
no_path_error = "Relative paths are not supported" not in content
has_tokens2 = tokens > 0
print(f" ✅ Nonexistent Unix file: {not_found}")
print(f" ✅ No path error: {no_path_error}")
print(f" ✅ Tokens > 0: {has_tokens2}")
success2 = all([not_found, no_path_error, has_tokens2])
success = success1 and success2
print(f"\nResult: Safe files tests {'passed' if success else 'failed'}")
finally:
# Clean up
try:
Path(temp_file).unlink()
except Exception:
pass
return success
def test_cross_platform_file_discovery():
"""Test 4: Cross-platform file discovery."""
print("\n🧪 Test 4: Cross-platform file discovery")
print("-" * 60)
with tempfile.TemporaryDirectory() as tmp_dir:
tmp_path = Path(tmp_dir)
# Create test structure
project = tmp_path / "test-project"
project.mkdir()
(project / "README.md").write_text("# Test Project")
(project / "main.py").write_text("print('Hello')")
src = project / "src"
src.mkdir()
(src / "app.py").write_text("# App code")
# Test with mock MCP
def mock_is_mcp(path):
return False # No MCP in this test
with patch("utils.file_utils.is_mcp_directory", side_effect=mock_is_mcp):
files = expand_paths([str(project)])
file_paths = [str(f) for f in files]
# Use Path.parts for cross-platform checks
readme_found = any(Path(p).parts[-2:] == ("test-project", "README.md") for p in file_paths)
main_found = any(Path(p).parts[-2:] == ("test-project", "main.py") for p in file_paths)
app_found = any(Path(p).parts[-2:] == ("src", "app.py") for p in file_paths)
print(f" ✅ README.md found: {readme_found}")
print(f" ✅ main.py found: {main_found}")
print(f" ✅ app.py found: {app_found}")
print(f" Files found: {len(file_paths)}")
success = all([readme_found, main_found, app_found])
print(f"\nResult: Cross-platform discovery {'passed' if success else 'failed'}")
return success
def test_communication_simulator_fixes():
"""Test 5: Communication simulator fixes"""
print("\n🧪 Test 5: Communication simulator fixes")
print("-" * 60)
try:
# Import and test CommunicationSimulator
from communication_simulator_test import CommunicationSimulator
# Test that we can create an instance without logger errors
simulator = CommunicationSimulator(verbose=False, keep_logs=True)
# Check that logger is properly initialized
has_logger = hasattr(simulator, "logger") and simulator.logger is not None
print(f" ✅ Logger initialized: {has_logger}")
# Check that python_path is set
has_python_path = hasattr(simulator, "python_path") and simulator.python_path is not None
print(f" ✅ Python path set: {has_python_path}")
# Check that the path detection logic includes Windows
import os
import platform
if platform.system() == "Windows":
# Test Windows path detection
current_dir = os.getcwd()
expected_paths = [
os.path.join(current_dir, ".zen_venv", "Scripts", "python.exe"),
os.path.join(current_dir, "venv", "Scripts", "python.exe"),
]
# Check if the method would detect Windows paths
windows_detection = any("Scripts" in path for path in expected_paths)
print(f" ✅ Windows path detection: {windows_detection}")
else:
windows_detection = True # Pass on non-Windows systems
print(" ✅ Windows path detection: N/A (not Windows)")
success = all([has_logger, has_python_path, windows_detection])
print(f"\nResult: Communication simulator {'passed' if success else 'failed'}")
return success
except Exception as e:
print(f" ❌ Error testing CommunicationSimulator: {e}")
print("\nResult: Communication simulator failed")
return False
def test_base_simulator_test_fixes():
"""Test 6: BaseSimulatorTest fixes."""
print("\n🧪 Test 6: BaseSimulatorTest fixes")
print("-" * 60)
try:
# Import and test BaseSimulatorTest
from simulator_tests.base_test import BaseSimulatorTest
# Test that we can create an instance without logger errors
base_test = BaseSimulatorTest(verbose=False)
# Check that logger is properly initialized
has_logger = hasattr(base_test, "logger") and base_test.logger is not None
print(f" ✅ Logger initialized: {has_logger}")
# Check that python_path is set
has_python_path = hasattr(base_test, "python_path") and base_test.python_path is not None
print(f" ✅ Python path set: {has_python_path}")
# Check that the path detection logic includes Windows
import os
import platform
if platform.system() == "Windows":
# Test Windows path detection
current_dir = os.getcwd()
expected_path = os.path.join(current_dir, ".zen_venv", "Scripts", "python.exe")
# Check if the method would detect Windows paths
windows_detection = "Scripts" in expected_path
print(f" ✅ Windows path detection: {windows_detection}")
else:
windows_detection = True # Pass on non-Windows systems
print(" ✅ Windows path detection: N/A (not Windows)")
# Test that we can call methods that previously failed
try:
# Test accessing properties without calling abstract methods
# Just check that logger-related functionality works
logger_accessible = hasattr(base_test, "logger") and callable(getattr(base_test, "logger", None))
method_callable = True
print(f" ✅ Methods callable: {method_callable}")
print(f" ✅ Logger accessible: {logger_accessible}")
except AttributeError as e:
if "logger" in str(e):
method_callable = False
print(f" ❌ Logger error still present: {e}")
else:
method_callable = True # Different error, not logger-related
print(f" ✅ No logger errors (different error): {str(e)[:50]}...")
success = all([has_logger, has_python_path, windows_detection, method_callable])
print(f"\nResult: BaseSimulatorTest {'passed' if success else 'failed'}")
return success
except Exception as e:
print(f" ❌ Error testing BaseSimulatorTest: {e}")
print("\nResult: BaseSimulatorTest failed")
return False
def test_shell_scripts_windows_support():
"""Test 7: Shell scripts Windows support."""
print("\n🧪 Test 7: Shell scripts Windows support")
print("-" * 60)
try:
# Check run_integration_tests.sh
try:
with open("run_integration_tests.sh", encoding="utf-8") as f:
run_script_content = f.read()
has_windows_venv = 'elif [[ -f ".zen_venv/Scripts/activate" ]]; then' in run_script_content
has_windows_msg = "Using virtual environment (Windows)" in run_script_content
print(f" ✅ run_integration_tests.sh Windows venv: {has_windows_venv}")
print(f" ✅ run_integration_tests.sh Windows message: {has_windows_msg}")
run_script_ok = has_windows_venv and has_windows_msg
except FileNotFoundError:
print(" ⚠️ run_integration_tests.sh not found")
run_script_ok = True # Skip if file doesn't exist
# Check code_quality_checks.sh
try:
with open("code_quality_checks.sh", encoding="utf-8") as f:
quality_script_content = f.read()
has_windows_python = 'elif [[ -f ".zen_venv/Scripts/python.exe" ]]; then' in quality_script_content
has_windows_tools = 'elif [[ -f ".zen_venv/Scripts/ruff.exe" ]]; then' in quality_script_content
has_windows_msg = "Using venv (Windows)" in quality_script_content
print(f" ✅ code_quality_checks.sh Windows Python: {has_windows_python}")
print(f" ✅ code_quality_checks.sh Windows tools: {has_windows_tools}")
print(f" ✅ code_quality_checks.sh Windows message: {has_windows_msg}")
quality_script_ok = has_windows_python and has_windows_tools and has_windows_msg
except FileNotFoundError:
print(" ⚠️ code_quality_checks.sh not found")
quality_script_ok = True # Skip if file doesn't exist
success = run_script_ok and quality_script_ok
print(f"\nResult: Shell scripts {'passed' if success else 'failed'}")
return success
except Exception as e:
print(f" ❌ Error testing shell scripts: {e}")
print("\nResult: Shell scripts failed")
return False
def main():
"""Main validation function."""
print("🔧 Final validation of cross-platform fixes")
print("=" * 70)
print("This script validates that all fixes work on Windows.")
print("=" * 70)
# Run all tests
results = []
results.append(("Home directory patterns", test_home_directory_patterns()))
results.append(("Unix path validation", test_unix_path_validation()))
results.append(("Safe files", test_safe_files_functionality()))
results.append(("Cross-platform discovery", test_cross_platform_file_discovery()))
results.append(("Communication simulator", test_communication_simulator_fixes()))
results.append(("BaseSimulatorTest", test_base_simulator_test_fixes()))
results.append(("Shell scripts Windows support", test_shell_scripts_windows_support()))
# Final summary
print("\n" + "=" * 70)
print("📊 FINAL SUMMARY")
print("=" * 70)
passed_tests = 0
for test_name, success in results:
status = "PASSED" if success else "FAILED"
print(f"{status:<10} {test_name}")
if success:
passed_tests += 1
total_tests = len(results)
print(f"\nOverall result: {passed_tests}/{total_tests} test groups passed")
if passed_tests == total_tests:
print("\n🎉 COMPLETE SUCCESS!")
print("All cross-platform fixes work correctly.")
return 0
else:
print("\n❌ FAILURES DETECTED")
print("Some fixes need adjustments.")
return 1
if __name__ == "__main__":
sys.exit(main())

1216
run-server.ps1 Normal file

File diff suppressed because it is too large Load Diff

201
run_integration_tests.ps1 Normal file
View File

@@ -0,0 +1,201 @@
#!/usr/bin/env pwsh
#Requires -Version 5.1
[CmdletBinding()]
param(
[switch]$WithSimulator,
[switch]$VerboseOutput
)
# Set error action preference
$ErrorActionPreference = "Stop"
# Colors for output
function Write-ColorText {
param(
[Parameter(Mandatory)]
[string]$Text,
[string]$Color = "White",
[switch]$NoNewline
)
if ($NoNewline) {
Write-Host $Text -ForegroundColor $Color -NoNewline
} else {
Write-Host $Text -ForegroundColor $Color
}
}
function Write-Emoji {
param(
[Parameter(Mandatory)]
[string]$Emoji,
[Parameter(Mandatory)]
[string]$Text,
[string]$Color = "White"
)
Write-Host "$Emoji " -NoNewline
Write-ColorText $Text -Color $Color
}
Write-Emoji "🧪" "Running Integration Tests for Zen MCP Server" -Color Cyan
Write-ColorText "==============================================" -Color Cyan
Write-ColorText "These tests use real API calls with your configured keys"
Write-Host ""
# Check for virtual environment
$venvPath = ".zen_venv"
$activateScript = if ($IsWindows -or $env:OS -eq "Windows_NT") {
"$venvPath\Scripts\Activate.ps1"
} else {
"$venvPath/bin/activate"
}
if (Test-Path $venvPath) {
Write-Emoji "" "Virtual environment found" -Color Green
# Activate virtual environment (for PowerShell on Windows)
if ($IsWindows -or $env:OS -eq "Windows_NT") {
if (Test-Path "$venvPath\Scripts\Activate.ps1") {
& "$venvPath\Scripts\Activate.ps1"
} elseif (Test-Path "$venvPath\Scripts\activate.bat") {
# Use Python directly from venv
$env:PATH = "$PWD\$venvPath\Scripts;$env:PATH"
}
}
} else {
Write-Emoji "" "No virtual environment found!" -Color Red
Write-ColorText "Please run: .\run-server.ps1 first" -Color Yellow
exit 1
}
# Check for .env file
if (!(Test-Path ".env")) {
Write-Emoji "⚠️" "Warning: No .env file found. Integration tests may fail without API keys." -Color Yellow
Write-Host ""
}
Write-Emoji "🔑" "Checking API key availability:" -Color Cyan
Write-ColorText "---------------------------------" -Color Cyan
# Function to check if API key is configured
function Test-ApiKey {
param(
[string]$KeyName
)
# Check environment variable
$envValue = [Environment]::GetEnvironmentVariable($KeyName)
if (![string]::IsNullOrWhiteSpace($envValue)) {
return $true
}
# Check .env file
if (Test-Path ".env") {
$envContent = Get-Content ".env" -ErrorAction SilentlyContinue
$found = $envContent | Where-Object { $_ -match "^$KeyName\s*=" -and $_ -notmatch "^$KeyName\s*=\s*$" }
return $found.Count -gt 0
}
return $false
}
# Check API keys
$apiKeys = @(
"GEMINI_API_KEY",
"OPENAI_API_KEY",
"XAI_API_KEY",
"OPENROUTER_API_KEY",
"CUSTOM_API_URL"
)
foreach ($key in $apiKeys) {
if (Test-ApiKey $key) {
if ($key -eq "CUSTOM_API_URL") {
Write-Emoji "" "$key configured (local models)" -Color Green
} else {
Write-Emoji "" "$key configured" -Color Green
}
} else {
Write-Emoji "" "$key not found" -Color Red
}
}
Write-Host ""
# Load environment variables from .env if it exists
if (Test-Path ".env") {
Get-Content ".env" | ForEach-Object {
if ($_ -match '^([^#][^=]*?)=(.*)$') {
$name = $matches[1].Trim()
$value = $matches[2].Trim()
# Remove quotes if present
$value = $value -replace '^["'']|["'']$', ''
[Environment]::SetEnvironmentVariable($name, $value, "Process")
}
}
}
# Run integration tests
Write-Emoji "🏃" "Running integration tests..." -Color Cyan
Write-ColorText "------------------------------" -Color Cyan
try {
# Build pytest command
$pytestArgs = @("tests/", "-v", "-m", "integration", "--tb=short")
if ($VerboseOutput) {
$pytestArgs += "--verbose"
}
# Run pytest
python -m pytest @pytestArgs
if ($LASTEXITCODE -ne 0) {
throw "Integration tests failed"
}
Write-Host ""
Write-Emoji "" "Integration tests completed!" -Color Green
} catch {
Write-Host ""
Write-Emoji "" "Integration tests failed!" -Color Red
Write-ColorText "Error: $_" -Color Red
exit 1
}
# Run simulator tests if requested
if ($WithSimulator) {
Write-Host ""
Write-Emoji "🤖" "Running simulator tests..." -Color Cyan
Write-ColorText "----------------------------" -Color Cyan
try {
if ($VerboseOutput) {
python communication_simulator_test.py --verbose
} else {
python communication_simulator_test.py
}
if ($LASTEXITCODE -ne 0) {
Write-Host ""
Write-Emoji "" "Simulator tests failed!" -Color Red
Write-ColorText "This may be due to a known issue in communication_simulator_test.py" -Color Yellow
Write-ColorText "Integration tests completed successfully - you can proceed." -Color Green
} else {
Write-Host ""
Write-Emoji "" "Simulator tests completed!" -Color Green
}
} catch {
Write-Host ""
Write-Emoji "" "Simulator tests failed!" -Color Red
Write-ColorText "Error: $_" -Color Red
Write-ColorText "This may be due to a known issue in communication_simulator_test.py" -Color Yellow
Write-ColorText "Integration tests completed successfully - you can proceed." -Color Green
}
}
Write-Host ""
Write-Emoji "💡" "Tips:" -Color Yellow
Write-ColorText "- Run '.\run_integration_tests.ps1' for integration tests only" -Color White
Write-ColorText "- Run '.\run_integration_tests.ps1 -WithSimulator' to also run simulator tests" -Color White
Write-ColorText "- Run '.\code_quality_checks.ps1' for unit tests and linting" -Color White
Write-ColorText "- Check logs in logs\mcp_server.log if tests fail" -Color White