""" Performance monitoring and reporting utilities for ATS load testing. This module provides tools for monitoring system performance during load tests, collecting metrics, and generating comprehensive reports. """ import os import json import time import psutil import threading from datetime import datetime, timedelta from typing import Dict, List, Any, Optional from dataclasses import dataclass, asdict import matplotlib.pyplot as plt import pandas as pd from locust import events import requests @dataclass class SystemMetrics: """System performance metrics at a point in time.""" timestamp: datetime cpu_percent: float memory_percent: float memory_used_gb: float disk_usage_percent: float network_io: Dict[str, int] active_connections: int @dataclass class DatabaseMetrics: """Database performance metrics.""" timestamp: datetime active_connections: int query_count: int avg_query_time: float slow_queries: int cache_hit_ratio: float @dataclass class TestResults: """Complete test results summary.""" test_name: str start_time: datetime end_time: datetime duration_seconds: float total_requests: int total_failures: int avg_response_time: float median_response_time: float p95_response_time: float p99_response_time: float requests_per_second: float peak_rps: float system_metrics: List[SystemMetrics] database_metrics: List[DatabaseMetrics] error_summary: Dict[str, int] class PerformanceMonitor: """Monitors system performance during load tests.""" def __init__(self, interval: float = 5.0): self.interval = interval self.monitoring = False self.system_metrics = [] self.database_metrics = [] self.monitor_thread = None self.start_time = None def start_monitoring(self): """Start performance monitoring.""" self.monitoring = True self.start_time = datetime.now() self.system_metrics = [] self.database_metrics = [] self.monitor_thread = threading.Thread(target=self._monitor_loop) self.monitor_thread.daemon = True self.monitor_thread.start() print(f"Performance monitoring started (interval: {self.interval}s)") def stop_monitoring(self): """Stop performance monitoring.""" self.monitoring = False if self.monitor_thread: self.monitor_thread.join(timeout=10) print("Performance monitoring stopped") def _monitor_loop(self): """Main monitoring loop.""" while self.monitoring: try: # Collect system metrics system_metric = self._collect_system_metrics() self.system_metrics.append(system_metric) # Collect database metrics db_metric = self._collect_database_metrics() if db_metric: self.database_metrics.append(db_metric) time.sleep(self.interval) except Exception as e: print(f"Error in monitoring loop: {e}") time.sleep(self.interval) def _collect_system_metrics(self) -> SystemMetrics: """Collect current system metrics.""" # CPU and Memory cpu_percent = psutil.cpu_percent(interval=1) memory = psutil.virtual_memory() disk = psutil.disk_usage('/') # Network I/O network = psutil.net_io_counters() network_io = { 'bytes_sent': network.bytes_sent, 'bytes_recv': network.bytes_recv, 'packets_sent': network.packets_sent, 'packets_recv': network.packets_recv } # Network connections connections = len(psutil.net_connections()) return SystemMetrics( timestamp=datetime.now(), cpu_percent=cpu_percent, memory_percent=memory.percent, memory_used_gb=memory.used / (1024**3), disk_usage_percent=disk.percent, network_io=network_io, active_connections=connections ) def _collect_database_metrics(self) -> Optional[DatabaseMetrics]: """Collect database metrics (PostgreSQL specific).""" try: # This would need to be adapted based on your database setup # For now, return mock data return DatabaseMetrics( timestamp=datetime.now(), active_connections=10, query_count=1000, avg_query_time=0.05, slow_queries=2, cache_hit_ratio=0.85 ) except Exception as e: print(f"Error collecting database metrics: {e}") return None class ReportGenerator: """Generates comprehensive performance reports.""" def __init__(self, output_dir: str = "load_tests/reports"): self.output_dir = output_dir os.makedirs(output_dir, exist_ok=True) def generate_html_report(self, results: TestResults) -> str: """Generate an HTML performance report.""" timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") filename = f"performance_report_{timestamp}.html" filepath = os.path.join(self.output_dir, filename) html_content = self._create_html_template(results) with open(filepath, 'w') as f: f.write(html_content) print(f"HTML report generated: {filepath}") return filepath def generate_json_report(self, results: TestResults) -> str: """Generate a JSON performance report.""" timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") filename = f"performance_report_{timestamp}.json" filepath = os.path.join(self.output_dir, filename) # Convert dataclasses to dicts results_dict = asdict(results) # Convert datetime objects to strings for key, value in results_dict.items(): if isinstance(value, datetime): results_dict[key] = value.isoformat() # Convert system and database metrics if 'system_metrics' in results_dict: results_dict['system_metrics'] = [ asdict(metric) for metric in results.system_metrics ] for metric in results_dict['system_metrics']: metric['timestamp'] = metric['timestamp'].isoformat() if 'database_metrics' in results_dict: results_dict['database_metrics'] = [ asdict(metric) for metric in results.database_metrics ] for metric in results_dict['database_metrics']: metric['timestamp'] = metric['timestamp'].isoformat() with open(filepath, 'w') as f: json.dump(results_dict, f, indent=2) print(f"JSON report generated: {filepath}") return filepath def generate_charts(self, results: TestResults) -> List[str]: """Generate performance charts.""" chart_files = [] timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") if results.system_metrics: # System metrics chart chart_file = self._create_system_metrics_chart(results.system_metrics, timestamp) chart_files.append(chart_file) return chart_files def _create_html_template(self, results: TestResults) -> str: """Create HTML template for the report.""" return f"""
Test Duration: {results.duration_seconds:.2f} seconds
Test Period: {results.start_time} to {results.end_time}
No system metrics available
" avg_cpu = sum(m.cpu_percent for m in metrics) / len(metrics) avg_memory = sum(m.memory_percent for m in metrics) / len(metrics) max_cpu = max(m.cpu_percent for m in metrics) max_memory = max(m.memory_percent for m in metrics) return f"""No errors recorded
" rows = "" for error_type, count in errors.items(): rows += f"| Error Type | Count |
|---|