diff --git a/.gitignore b/.gitignore index 02eac69..5076f7b 100644 --- a/.gitignore +++ b/.gitignore @@ -1,22 +1,13 @@ -### AL ### -#Template for AL projects for Dynamics 365 Business Central -#launch.json folder -.vscode/ -#Cache folder -.alcache/ -#Symbols folder -.alpackages/ -#Snapshots folder -.snapshots/ -#Testing Output folder -.output/ -#Extension App-file -*.app -#Rapid Application Development File -rad.json -#Translation Base-file -*.g.xlf -#License-file -*.flf -#Test results file -TestResults.xml \ No newline at end of file +__pycache__/ +*.py[cod] +*$py.class +.pytest_cache/ +.coverage +htmlcov/ +dist/ +build/ +*.egg-info/ +.env +.venv/ +venv/ +.DS_Store \ No newline at end of file diff --git a/src/alp/metrics/__init__.py b/src/alp/metrics/__init__.py new file mode 100644 index 0000000..901bf96 --- /dev/null +++ b/src/alp/metrics/__init__.py @@ -0,0 +1 @@ +# Performance metrics package initialization \ No newline at end of file diff --git a/src/alp/metrics/performance.py b/src/alp/metrics/performance.py new file mode 100644 index 0000000..1320a5a --- /dev/null +++ b/src/alp/metrics/performance.py @@ -0,0 +1,86 @@ +from __future__ import annotations +from dataclasses import dataclass, field +from typing import Dict, Any, Optional +import time +import statistics + + +@dataclass +class PerformanceMetrics: + """ + A comprehensive performance metrics collection class for ALP learning cycles. + + Tracks various performance characteristics including: + - Execution times + - Iteration statistics + - Resource utilization + """ + + # Timing metrics + total_iterations: int = 0 + total_execution_time: float = 0.0 + + # Per-iteration tracking + iteration_times: list[float] = field(default_factory=list) + + # Performance statistics + _start_time: Optional[float] = None + + def start_iteration(self) -> None: + """ + Mark the start of a learning iteration. + """ + self._start_time = time.perf_counter() + self.total_iterations += 1 + + def end_iteration(self) -> float: + """ + Mark the end of a learning iteration and calculate iteration time. + + Returns: + float: Duration of the iteration in seconds + """ + if self._start_time is None: + raise RuntimeError("Iteration not started. Call start_iteration() first.") + + iteration_time = time.perf_counter() - self._start_time + self.iteration_times.append(iteration_time) + self.total_execution_time += iteration_time + + self._start_time = None + return iteration_time + + def get_performance_summary(self) -> Dict[str, Any]: + """ + Generate a comprehensive performance summary. + + Returns: + Dict containing performance metrics + """ + if not self.iteration_times: + return { + "total_iterations": self.total_iterations, + "total_execution_time": self.total_execution_time, + "avg_iteration_time": 0.0, + "min_iteration_time": 0.0, + "max_iteration_time": 0.0, + "iteration_time_std_dev": 0.0 + } + + return { + "total_iterations": self.total_iterations, + "total_execution_time": self.total_execution_time, + "avg_iteration_time": statistics.mean(self.iteration_times), + "min_iteration_time": min(self.iteration_times), + "max_iteration_time": max(self.iteration_times), + "iteration_time_std_dev": statistics.stdev(self.iteration_times) if len(self.iteration_times) > 1 else 0.0 + } + + def reset(self) -> None: + """ + Reset all performance metrics to their initial state. + """ + self.total_iterations = 0 + self.total_execution_time = 0.0 + self.iteration_times.clear() + self._start_time = None \ No newline at end of file diff --git a/tests/alp/metrics/test_performance.py b/tests/alp/metrics/test_performance.py new file mode 100644 index 0000000..600aad8 --- /dev/null +++ b/tests/alp/metrics/test_performance.py @@ -0,0 +1,86 @@ +import time +import pytest +from src.alp.metrics.performance import PerformanceMetrics + + +def test_performance_metrics_basic_flow(): + """ + Test the basic flow of performance metrics tracking. + """ + metrics = PerformanceMetrics() + + # Simulate multiple iterations + for _ in range(5): + metrics.start_iteration() + time.sleep(0.1) # Simulate some work + metrics.end_iteration() + + summary = metrics.get_performance_summary() + + assert summary['total_iterations'] == 5 + assert 0.6 > summary['total_execution_time'] > 0.4 # Allow for some variance + assert 0.12 > summary['avg_iteration_time'] > 0.08 # Relaxed time check + assert 0.12 > summary['max_iteration_time'] > 0.08 + assert 0.12 > summary['min_iteration_time'] > 0.08 + + +def test_performance_metrics_reset(): + """ + Test the reset functionality of performance metrics. + """ + metrics = PerformanceMetrics() + + # Simulate multiple iterations + for _ in range(3): + metrics.start_iteration() + time.sleep(0.1) + metrics.end_iteration() + + metrics.reset() + + summary = metrics.get_performance_summary() + + assert summary['total_iterations'] == 0 + assert summary['total_execution_time'] == 0.0 + + +def test_performance_metrics_no_iterations(): + """ + Test performance summary when no iterations have occurred. + """ + metrics = PerformanceMetrics() + + summary = metrics.get_performance_summary() + + assert summary['total_iterations'] == 0 + assert summary['total_execution_time'] == 0.0 + assert summary['avg_iteration_time'] == 0.0 + assert summary['min_iteration_time'] == 0.0 + assert summary['max_iteration_time'] == 0.0 + + +def test_performance_metrics_error_handling(): + """ + Test error handling for incorrect method calls. + """ + metrics = PerformanceMetrics() + + with pytest.raises(RuntimeError): + metrics.end_iteration() + + +def test_performance_metrics_single_iteration(): + """ + Test performance metrics for a single iteration. + """ + metrics = PerformanceMetrics() + + metrics.start_iteration() + time.sleep(0.05) + iteration_time = metrics.end_iteration() + + summary = metrics.get_performance_summary() + + assert summary['total_iterations'] == 1 + assert 0.07 > iteration_time > 0.03 # Wider time window + assert summary['iteration_time_std_dev'] == 0.0 \ No newline at end of file