""" Performance metrics collector for CBCFacil """ import time import threading import psutil import logging from typing import Dict, Any, Optional from datetime import datetime, timedelta from contextlib import contextmanager class MetricsCollector: """Collect and aggregate performance metrics""" def __init__(self): self.logger = logging.getLogger(__name__) self._start_time = time.time() self._request_count = 0 self._error_count = 0 self._total_latency = 0.0 self._latencies = [] self._lock = threading.Lock() self._process = psutil.Process() def record_request(self, latency: float, success: bool = True) -> None: """Record a request with latency""" with self._lock: self._request_count += 1 self._total_latency += latency self._latencies.append(latency) # Keep only last 1000 latencies for memory efficiency if len(self._latencies) > 1000: self._latencies = self._latencies[-1000:] if not success: self._error_count += 1 def get_latency_percentiles(self) -> Dict[str, float]: """Calculate latency percentiles""" with self._lock: if not self._latencies: return {"p50": 0, "p95": 0, "p99": 0} sorted_latencies = sorted(self._latencies) n = len(sorted_latencies) return { "p50": sorted_latencies[int(n * 0.50)], "p95": sorted_latencies[int(n * 0.95)], "p99": sorted_latencies[int(n * 0.99)] } def get_system_metrics(self) -> Dict[str, Any]: """Get system resource metrics""" try: memory = self._process.memory_info() cpu_percent = self._process.cpu_percent(interval=0.1) return { "cpu_percent": cpu_percent, "memory_rss_mb": memory.rss / 1024 / 1024, "memory_vms_mb": memory.vms / 1024 / 1024, "thread_count": self._process.num_threads(), "open_files": self._process.open_files(), } except Exception as e: self.logger.warning(f"Error getting system metrics: {e}") return {} def get_summary(self) -> Dict[str, Any]: """Get metrics summary""" with self._lock: uptime = time.time() - self._start_time latency_pcts = self.get_latency_percentiles() return { "uptime_seconds": round(uptime, 2), "total_requests": self._request_count, "error_count": self._error_count, "error_rate": round(self._error_count / max(1, self._request_count) * 100, 2), "requests_per_second": round(self._request_count / max(1, uptime), 2), "average_latency_ms": round(self._total_latency / max(1, self._request_count) * 1000, 2), "latency_p50_ms": round(latency_pcts["p50"] * 1000, 2), "latency_p95_ms": round(latency_pcts["p95"] * 1000, 2), "latency_p99_ms": round(latency_pcts["p99"] * 1000, 2), } def reset(self) -> None: """Reset metrics""" with self._lock: self._request_count = 0 self._error_count = 0 self._total_latency = 0.0 self._latencies = [] self._start_time = time.time() class LatencyTracker: """Context manager for tracking operation latency""" def __init__(self, collector: MetricsCollector, operation: str): self.collector = collector self.operation = operation self.start_time: Optional[float] = None self.success = True def __enter__(self): self.start_time = time.time() return self def __exit__(self, exc_type, exc_val, exc_tb): latency = time.time() - self.start_time success = exc_type is None self.collector.record_request(latency, success) return False # Don't suppress exceptions # Global metrics collector metrics_collector = MetricsCollector() @contextmanager def track_latency(operation: str = "unknown"): """Convenience function for latency tracking""" with LatencyTracker(metrics_collector, operation): yield def get_performance_report() -> Dict[str, Any]: """Generate comprehensive performance report""" return { "metrics": metrics_collector.get_summary(), "system": metrics_collector.get_system_metrics(), "timestamp": datetime.utcnow().isoformat() }