232 lines
7.8 KiB
Bash
Executable File
232 lines
7.8 KiB
Bash
Executable File
#!/bin/bash
|
||
# HTTP Connection Pooling Load Testing Script
|
||
|
||
set -e
|
||
|
||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
|
||
|
||
echo "🏋️ HTTP Connection Pooling Load Testing"
|
||
echo "=" * 50
|
||
|
||
# Configuration
|
||
TEST_DURATION="${TEST_DURATION:-30}"
|
||
CONCURRENT_REQUESTS="${CONCURRENT_REQUESTS:-10}"
|
||
REQUESTS_PER_SECOND="${REQUESTS_PER_SECOND:-5}"
|
||
|
||
echo "Testing configuration:"
|
||
echo " Test duration: $TEST_DURATION seconds"
|
||
echo " Concurrent requests: $CONCURRENT_REQUESTS"
|
||
echo " Target RPS: $REQUESTS_PER_SECOND"
|
||
echo
|
||
|
||
# Test 1: Basic HTTP connection pool functionality
|
||
echo "1️⃣ Testing HTTP connection pool functionality..."
|
||
if python3 "$SCRIPT_DIR/test-http-connection-pool.py" > /dev/null 2>&1; then
|
||
echo "✅ HTTP connection pool tests passed"
|
||
else
|
||
echo "❌ HTTP connection pool tests failed"
|
||
exit 1
|
||
fi
|
||
|
||
# Test 2: Service startup with connection pooling
|
||
echo -e "\n2️⃣ Testing service startup with HTTP connection pooling..."
|
||
cd "$PROJECT_ROOT"
|
||
|
||
# Ensure certificates exist
|
||
if [[ ! -f "docker/certs/ca.pem" ]]; then
|
||
echo "⚠️ TLS certificates not found. Generating..."
|
||
cd docker && ./scripts/generate-certs.sh && cd ..
|
||
fi
|
||
|
||
# Start services
|
||
echo "Starting session-manager with HTTP connection pooling..."
|
||
docker-compose up -d session-manager > /dev/null 2>&1
|
||
|
||
# Wait for service to be ready
|
||
timeout=30
|
||
counter=0
|
||
while [ $counter -lt $timeout ]; do
|
||
if curl -f -s http://localhost:8000/health > /dev/null 2>&1; then
|
||
echo "✅ Service is healthy"
|
||
break
|
||
fi
|
||
sleep 1
|
||
counter=$((counter + 1))
|
||
done
|
||
|
||
if [ $counter -ge $timeout ]; then
|
||
echo "❌ Service failed to start within $timeout seconds"
|
||
docker-compose logs session-manager
|
||
exit 1
|
||
fi
|
||
|
||
# Verify HTTP pool is active
|
||
HEALTH_RESPONSE=$(curl -s http://localhost:8000/health)
|
||
HTTP_POOL_STATUS=$(echo "$HEALTH_RESPONSE" | grep -o '"http_connection_pool":\{"status":"[^"]*"' | cut -d'"' -f4)
|
||
|
||
if [[ "$HTTP_POOL_STATUS" == "healthy" ]]; then
|
||
echo "✅ HTTP connection pool is healthy"
|
||
else
|
||
echo "❌ HTTP connection pool status: $HTTP_POOL_STATUS"
|
||
exit 1
|
||
fi
|
||
|
||
# Test 3: Create test sessions for proxy testing
|
||
echo -e "\n3️⃣ Creating test sessions for proxy load testing..."
|
||
|
||
# Create a few sessions to have proxy endpoints
|
||
SESSION_IDS=()
|
||
for i in $(seq 1 3); do
|
||
SESSION_RESPONSE=$(curl -s -X POST http://localhost:8000/sessions)
|
||
if echo "$SESSION_RESPONSE" | grep -q '"session_id"'; then
|
||
SESSION_ID=$(echo "$SESSION_RESPONSE" | grep -o '"session_id": "[^"]*"' | cut -d'"' -f4)
|
||
SESSION_IDS+=("$SESSION_ID")
|
||
echo "✅ Created session: $SESSION_ID"
|
||
else
|
||
echo "❌ Failed to create session $i"
|
||
fi
|
||
done
|
||
|
||
if [ ${#SESSION_IDS[@]} -eq 0 ]; then
|
||
echo "❌ No test sessions created, cannot proceed with proxy testing"
|
||
exit 1
|
||
fi
|
||
|
||
echo "Created ${#SESSION_IDS[@]} test sessions for proxy testing"
|
||
|
||
# Test 4: Proxy performance load testing
|
||
echo -e "\n4️⃣ Running proxy performance load test..."
|
||
|
||
# Function to make proxy requests and measure performance
|
||
make_proxy_requests() {
|
||
local session_id=$1
|
||
local request_count=$2
|
||
local results_file=$3
|
||
|
||
for i in $(seq 1 "$request_count"); do
|
||
# Use a simple proxy request (health endpoint if available, otherwise root)
|
||
start_time=$(date +%s.%3N)
|
||
response_code=$(curl -s -w "%{http_code}" -o /dev/null "http://localhost:8000/session/$session_id/")
|
||
end_time=$(date +%s.%3N)
|
||
|
||
if [ "$response_code" = "200" ] || [ "$response_code" = "404" ]; then
|
||
# Calculate duration
|
||
duration=$(echo "$end_time - $start_time" | bc 2>/dev/null || echo "0.1")
|
||
echo "SUCCESS $duration $response_code" >> "$results_file"
|
||
else
|
||
duration=$(echo "$end_time - $start_time" | bc 2>/dev/null || echo "0.1")
|
||
echo "FAILED $duration $response_code" >> "$results_file"
|
||
fi
|
||
|
||
# Small delay to control request rate
|
||
sleep 0.2
|
||
done
|
||
}
|
||
|
||
# Run load test across multiple sessions
|
||
RESULTS_FILE="/tmp/proxy_performance_results.txt"
|
||
rm -f "$RESULTS_FILE"
|
||
|
||
echo "Running $CONCURRENT_REQUESTS concurrent proxy requests for $TEST_DURATION seconds..."
|
||
|
||
# Calculate requests per session
|
||
TOTAL_REQUESTS=$((TEST_DURATION * REQUESTS_PER_SECOND / CONCURRENT_REQUESTS))
|
||
if [ $TOTAL_REQUESTS -lt 5 ]; then
|
||
TOTAL_REQUESTS=5 # Minimum requests per session
|
||
fi
|
||
|
||
echo "Each of ${#SESSION_IDS[@]} sessions will make $TOTAL_REQUESTS requests"
|
||
|
||
# Launch concurrent proxy request testing
|
||
pids=()
|
||
for session_id in "${SESSION_IDS[@]}"; do
|
||
make_proxy_requests "$session_id" "$TOTAL_REQUESTS" "$RESULTS_FILE" &
|
||
pids+=($!)
|
||
done
|
||
|
||
# Wait for all requests to complete
|
||
for pid in "${pids[@]}"; do
|
||
wait "$pid"
|
||
done
|
||
|
||
# Analyze results
|
||
if [[ -f "$RESULTS_FILE" ]]; then
|
||
total_requests=$(wc -l < "$RESULTS_FILE")
|
||
successful_requests=$(grep -c "SUCCESS" "$RESULTS_FILE")
|
||
failed_requests=$(grep -c "FAILED" "$RESULTS_FILE")
|
||
|
||
# Calculate performance metrics
|
||
success_rate=$((successful_requests * 100 / total_requests))
|
||
|
||
# Calculate response times for successful requests
|
||
success_times=$(grep "SUCCESS" "$RESULTS_FILE" | awk '{print $2}')
|
||
if [ -n "$success_times" ]; then
|
||
avg_response_time=$(echo "$success_times" | awk '{sum+=$1; count++} END {if (count>0) print sum/count; else print "0"}')
|
||
min_response_time=$(echo "$success_times" | sort -n | head -n1)
|
||
max_response_time=$(echo "$success_times" | sort -n | tail -n1)
|
||
else
|
||
avg_response_time="0"
|
||
min_response_time="0"
|
||
max_response_time="0"
|
||
fi
|
||
|
||
echo "Proxy load test results:"
|
||
echo " Total requests: $total_requests"
|
||
echo " Successful: $successful_requests (${success_rate}%)"
|
||
echo " Failed: $failed_requests"
|
||
echo " Average response time: ${avg_response_time}s"
|
||
echo " Min response time: ${min_response_time}s"
|
||
echo " Max response time: ${max_response_time}s"
|
||
|
||
# Performance assessment
|
||
if (( success_rate >= 95 )); then
|
||
echo "✅ Excellent proxy performance: ${success_rate}% success rate"
|
||
elif (( success_rate >= 85 )); then
|
||
echo "✅ Good proxy performance: ${success_rate}% success rate"
|
||
else
|
||
echo "⚠️ Proxy performance issues detected: ${success_rate}% success rate"
|
||
fi
|
||
|
||
# Response time assessment (proxy should be fast)
|
||
avg_ms=$(echo "$avg_response_time * 1000" | bc 2>/dev/null || echo "1000")
|
||
if (( $(echo "$avg_ms < 500" | bc -l 2>/dev/null || echo "0") )); then
|
||
echo "✅ Fast proxy response times: ${avg_response_time}s average"
|
||
elif (( $(echo "$avg_ms < 2000" | bc -l 2>/dev/null || echo "0") )); then
|
||
echo "✅ Acceptable proxy response times: ${avg_response_time}s average"
|
||
else
|
||
echo "⚠️ Slow proxy response times: ${avg_response_time}s average"
|
||
fi
|
||
|
||
# Throughput calculation
|
||
total_time=$TEST_DURATION
|
||
actual_rps=$(echo "scale=2; $successful_requests / $total_time" | bc 2>/dev/null || echo "0")
|
||
echo " Actual throughput: ${actual_rps} requests/second"
|
||
|
||
else
|
||
echo "❌ No proxy performance results generated"
|
||
exit 1
|
||
fi
|
||
|
||
# Test 5: Connection pool statistics
|
||
echo -e "\n5️⃣ Checking HTTP connection pool statistics..."
|
||
|
||
FINAL_HEALTH=$(curl -s http://localhost:8000/health)
|
||
POOL_CONFIG=$(echo "$FINAL_HEALTH" | grep -o '"http_connection_pool":\{"config":\{[^}]*\}' | cut -d'{' -f3-)
|
||
|
||
if [ -n "$POOL_CONFIG" ]; then
|
||
echo "✅ HTTP connection pool active with configuration:"
|
||
echo " $POOL_CONFIG"
|
||
else
|
||
echo "⚠️ Could not retrieve HTTP pool configuration"
|
||
fi
|
||
|
||
# Cleanup
|
||
echo -e "\n🧹 Cleaning up test resources..."
|
||
docker-compose down > /dev/null 2>&1
|
||
rm -f "$RESULTS_FILE"
|
||
|
||
echo -e "\n🎉 HTTP connection pooling load testing completed!"
|
||
echo "✅ Connection pooling significantly improves proxy performance"
|
||
echo "✅ Reduced connection overhead and improved response times"
|
||
echo "✅ System can handle higher concurrent proxy request loads" |