Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
35 changes: 13 additions & 22 deletions .gitignore
Original file line number Diff line number Diff line change
@@ -1,22 +1,13 @@
### AL ###
#Template for AL projects for Dynamics 365 Business Central
#launch.json folder
.vscode/
#Cache folder
.alcache/
#Symbols folder
.alpackages/
#Snapshots folder
.snapshots/
#Testing Output folder
.output/
#Extension App-file
*.app
#Rapid Application Development File
rad.json
#Translation Base-file
*.g.xlf
#License-file
*.flf
#Test results file
TestResults.xml
__pycache__/
*.py[cod]
*$py.class
.pytest_cache/
.coverage
htmlcov/
dist/
build/
*.egg-info/
.env
.venv/
venv/
.DS_Store
1 change: 1 addition & 0 deletions src/alp/metrics/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
# Performance metrics package initialization
86 changes: 86 additions & 0 deletions src/alp/metrics/performance.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,86 @@
from __future__ import annotations
from dataclasses import dataclass, field
from typing import Dict, Any, Optional
import time
import statistics


@dataclass
class PerformanceMetrics:
"""
A comprehensive performance metrics collection class for ALP learning cycles.

Tracks various performance characteristics including:
- Execution times
- Iteration statistics
- Resource utilization
"""

# Timing metrics
total_iterations: int = 0
total_execution_time: float = 0.0

# Per-iteration tracking
iteration_times: list[float] = field(default_factory=list)

# Performance statistics
_start_time: Optional[float] = None

def start_iteration(self) -> None:
"""
Mark the start of a learning iteration.
"""
self._start_time = time.perf_counter()
self.total_iterations += 1

def end_iteration(self) -> float:
"""
Mark the end of a learning iteration and calculate iteration time.

Returns:
float: Duration of the iteration in seconds
"""
if self._start_time is None:
raise RuntimeError("Iteration not started. Call start_iteration() first.")

iteration_time = time.perf_counter() - self._start_time
self.iteration_times.append(iteration_time)
self.total_execution_time += iteration_time

self._start_time = None
return iteration_time

def get_performance_summary(self) -> Dict[str, Any]:
"""
Generate a comprehensive performance summary.

Returns:
Dict containing performance metrics
"""
if not self.iteration_times:
return {
"total_iterations": self.total_iterations,
"total_execution_time": self.total_execution_time,
"avg_iteration_time": 0.0,
"min_iteration_time": 0.0,
"max_iteration_time": 0.0,
"iteration_time_std_dev": 0.0
}

return {
"total_iterations": self.total_iterations,
"total_execution_time": self.total_execution_time,
"avg_iteration_time": statistics.mean(self.iteration_times),
"min_iteration_time": min(self.iteration_times),
"max_iteration_time": max(self.iteration_times),
"iteration_time_std_dev": statistics.stdev(self.iteration_times) if len(self.iteration_times) > 1 else 0.0
}

def reset(self) -> None:
"""
Reset all performance metrics to their initial state.
"""
self.total_iterations = 0
self.total_execution_time = 0.0
self.iteration_times.clear()
self._start_time = None
86 changes: 86 additions & 0 deletions tests/alp/metrics/test_performance.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,86 @@
import time
import pytest
from src.alp.metrics.performance import PerformanceMetrics


def test_performance_metrics_basic_flow():
"""
Test the basic flow of performance metrics tracking.
"""
metrics = PerformanceMetrics()

# Simulate multiple iterations
for _ in range(5):
metrics.start_iteration()
time.sleep(0.1) # Simulate some work
metrics.end_iteration()

summary = metrics.get_performance_summary()

assert summary['total_iterations'] == 5
assert 0.6 > summary['total_execution_time'] > 0.4 # Allow for some variance
assert 0.12 > summary['avg_iteration_time'] > 0.08 # Relaxed time check
assert 0.12 > summary['max_iteration_time'] > 0.08
assert 0.12 > summary['min_iteration_time'] > 0.08


def test_performance_metrics_reset():
"""
Test the reset functionality of performance metrics.
"""
metrics = PerformanceMetrics()

# Simulate multiple iterations
for _ in range(3):
metrics.start_iteration()
time.sleep(0.1)
metrics.end_iteration()

metrics.reset()

summary = metrics.get_performance_summary()

assert summary['total_iterations'] == 0
assert summary['total_execution_time'] == 0.0


def test_performance_metrics_no_iterations():
"""
Test performance summary when no iterations have occurred.
"""
metrics = PerformanceMetrics()

summary = metrics.get_performance_summary()

assert summary['total_iterations'] == 0
assert summary['total_execution_time'] == 0.0
assert summary['avg_iteration_time'] == 0.0
assert summary['min_iteration_time'] == 0.0
assert summary['max_iteration_time'] == 0.0


def test_performance_metrics_error_handling():
"""
Test error handling for incorrect method calls.
"""
metrics = PerformanceMetrics()

with pytest.raises(RuntimeError):
metrics.end_iteration()


def test_performance_metrics_single_iteration():
"""
Test performance metrics for a single iteration.
"""
metrics = PerformanceMetrics()

metrics.start_iteration()
time.sleep(0.05)
iteration_time = metrics.end_iteration()

summary = metrics.get_performance_summary()

assert summary['total_iterations'] == 1
assert 0.07 > iteration_time > 0.03 # Wider time window
assert summary['iteration_time_std_dev'] == 0.0