Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
33 changes: 11 additions & 22 deletions .gitignore
Original file line number Diff line number Diff line change
@@ -1,22 +1,11 @@
### AL ###
#Template for AL projects for Dynamics 365 Business Central
#launch.json folder
.vscode/
#Cache folder
.alcache/
#Symbols folder
.alpackages/
#Snapshots folder
.snapshots/
#Testing Output folder
.output/
#Extension App-file
*.app
#Rapid Application Development File
rad.json
#Translation Base-file
*.g.xlf
#License-file
*.flf
#Test results file
TestResults.xml
__pycache__/
*.py[cod]
*$py.class
.pytest_cache/
.coverage
htmlcov/
dist/
build/
*.egg-info/
.env
.venv/
1 change: 1 addition & 0 deletions src/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
# ALP Metrics Collection System
110 changes: 110 additions & 0 deletions src/metrics_collector.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,110 @@
from typing import Dict, Any, Optional
from dataclasses import dataclass, field
from enum import Enum, auto
import time
import logging

class MetricType(Enum):
"""Enum representing different types of metrics."""
PERFORMANCE = auto()
RESOURCE_USAGE = auto()
ERROR_RATE = auto()
LEARNING_PROGRESS = auto()

@dataclass
class MetricRecord:
"""Represents a single metric record with timestamp and value."""
value: Any
timestamp: float = field(default_factory=time.time)
type: MetricType = MetricType.PERFORMANCE

class PerformanceMetricsCollector:
"""
A lightweight, thread-safe metrics collection system for tracking
performance characteristics of the Adaptive Learning Process.

Supports multiple metric types, basic statistical tracking,
and configurable logging.
"""

def __init__(self, max_history: int = 100, logger: Optional[logging.Logger] = None):
"""
Initialize the metrics collector.

Args:
max_history (int): Maximum number of historical records to keep per metric.
logger (Optional[logging.Logger]): Optional logger for recording metrics.
"""
self._metrics: Dict[str, list[MetricRecord]] = {}
self._max_history = max_history
self._logger = logger or logging.getLogger(__name__)

def record_metric(self, name: str, value: Any, metric_type: MetricType = MetricType.PERFORMANCE):
"""
Record a new metric value.

Args:
name (str): Name of the metric.
value (Any): Value of the metric.
metric_type (MetricType): Type of metric being recorded.
"""
metric_record = MetricRecord(value=value, type=metric_type)

if name not in self._metrics:
self._metrics[name] = []

# Maintain max history
if len(self._metrics[name]) >= self._max_history:
self._metrics[name].pop(0)

self._metrics[name].append(metric_record)

# Optional logging
self._logger.debug(f"Metric recorded: {name} = {value}")

def get_metric_history(self, name: str) -> list[MetricRecord]:
"""
Retrieve the historical records for a specific metric.

Args:
name (str): Name of the metric.

Returns:
list[MetricRecord]: Historical records for the metric.
"""
return self._metrics.get(name, [])

def get_latest_metric(self, name: str) -> Optional[MetricRecord]:
"""
Get the most recent record for a specific metric.

Args:
name (str): Name of the metric.

Returns:
Optional[MetricRecord]: Most recent metric record, or None if not found.
"""
history = self.get_metric_history(name)
return history[-1] if history else None

def calculate_metric_average(self, name: str) -> Optional[float]:
"""
Calculate the average value for a given metric.

Args:
name (str): Name of the metric.

Returns:
Optional[float]: Average value of the metric, or None if no records.
"""
history = self.get_metric_history(name)

if not history:
return None

# Try to calculate average, handling potential type mismatches
try:
return sum(record.value for record in history) / len(history)
except (TypeError, ValueError):
self._logger.warning(f"Cannot calculate average for metric {name}")
return None
1 change: 1 addition & 0 deletions tests/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
# Tests for ALP Metrics Collection System
73 changes: 73 additions & 0 deletions tests/test_metrics_collector.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,73 @@
import pytest
import time
from src.metrics_collector import PerformanceMetricsCollector, MetricType

def test_metrics_collector_basic_recording():
"""Test basic metric recording functionality."""
collector = PerformanceMetricsCollector()

# Record a performance metric
collector.record_metric("learning_rate", 0.01)

# Verify metric was recorded
history = collector.get_metric_history("learning_rate")
assert len(history) == 1
assert history[0].value == 0.01

def test_metrics_collector_max_history():
"""Test max history limitation."""
collector = PerformanceMetricsCollector(max_history=3)

# Record more than max history
for i in range(5):
collector.record_metric("test_metric", i)

# Verify only last 3 records are kept
history = collector.get_metric_history("test_metric")
assert len(history) == 3
assert [record.value for record in history] == [2, 3, 4]

def test_latest_metric_retrieval():
"""Test retrieving the latest metric."""
collector = PerformanceMetricsCollector()

collector.record_metric("accuracy", 0.75)
collector.record_metric("accuracy", 0.80)

latest = collector.get_latest_metric("accuracy")
assert latest is not None
assert latest.value == 0.80

def test_metric_average_calculation():
"""Test average calculation for metrics."""
collector = PerformanceMetricsCollector()

collector.record_metric("error_rate", 0.1)
collector.record_metric("error_rate", 0.2)
collector.record_metric("error_rate", 0.3)

avg = collector.calculate_metric_average("error_rate")
assert avg == pytest.approx(0.2) # Use approx for floating-point comparison

def test_metric_types():
"""Test recording different metric types."""
collector = PerformanceMetricsCollector()

collector.record_metric("cpu_usage", 50.0, MetricType.RESOURCE_USAGE)

latest = collector.get_latest_metric("cpu_usage")
assert latest is not None
assert latest.type == MetricType.RESOURCE_USAGE

def test_non_existent_metric():
"""Test behavior with non-existent metrics."""
collector = PerformanceMetricsCollector()

history = collector.get_metric_history("non_existent")
assert len(history) == 0

latest = collector.get_latest_metric("non_existent")
assert latest is None

avg = collector.calculate_metric_average("non_existent")
assert avg is None