Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
36 changes: 14 additions & 22 deletions .gitignore
Original file line number Diff line number Diff line change
@@ -1,22 +1,14 @@
### AL ###
#Template for AL projects for Dynamics 365 Business Central
#launch.json folder
.vscode/
#Cache folder
.alcache/
#Symbols folder
.alpackages/
#Snapshots folder
.snapshots/
#Testing Output folder
.output/
#Extension App-file
*.app
#Rapid Application Development File
rad.json
#Translation Base-file
*.g.xlf
#License-file
*.flf
#Test results file
TestResults.xml
__pycache__/
*.py[cod]
*$py.class
.pytest_cache/
.coverage
htmlcov/
.env
.venv/
venv/
env/
dist/
build/
*.egg-info/
.DS_Store
138 changes: 138 additions & 0 deletions src/logging_schema.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,138 @@
from dataclasses import dataclass, field
from datetime import datetime
from typing import Dict, Any, Optional, List, Union
from enum import Enum, auto
import json
import uuid

class IterationStatus(Enum):
"""Enumeration of possible iteration statuses."""
STARTED = auto()
IN_PROGRESS = auto()
SUCCESS = auto()
FAILED = auto()
TERMINATED = auto()

@dataclass
class IterationMetrics:
"""
Comprehensive metrics tracking for each ALP loop iteration.

Captures detailed performance and state information during learning iterations.
"""
iteration_id: str = field(default_factory=lambda: str(uuid.uuid4()))
timestamp: datetime = field(default_factory=datetime.utcnow)
status: IterationStatus = IterationStatus.STARTED

# Performance metrics
duration_ms: float = 0.0
computational_resources: Dict[str, float] = field(default_factory=dict)

# Learning process details
learning_rate: Optional[float] = None
loss: Optional[float] = None
accuracy: Optional[float] = None

# Errors and exceptions
error_details: Optional[Dict[str, Any]] = None

# Additional configuration and context
configuration: Dict[str, Any] = field(default_factory=dict)

def update_status(self, status: IterationStatus) -> None:
"""Update the current iteration status."""
self.status = status

def record_performance(self, duration: float, resources: Dict[str, float]) -> None:
"""Record performance metrics for the iteration."""
self.duration_ms = duration
self.computational_resources = resources

def record_learning_metrics(
self,
learning_rate: Optional[float] = None,
loss: Optional[float] = None,
accuracy: Optional[float] = None
) -> None:
"""Record key learning metrics."""
self.learning_rate = learning_rate
self.loss = loss
self.accuracy = accuracy

def record_error(self, error_details: Dict[str, Any]) -> None:
"""Record error details if an exception occurs."""
self.status = IterationStatus.FAILED
self.error_details = error_details

def to_dict(self) -> Dict[str, Any]:
"""Convert iteration metrics to a dictionary representation."""
return {
'iteration_id': self.iteration_id,
'timestamp': self.timestamp.isoformat(),
'status': self.status.name,
'duration_ms': self.duration_ms,
'computational_resources': self.computational_resources,
'learning_rate': self.learning_rate,
'loss': self.loss,
'accuracy': self.accuracy,
'error_details': self.error_details,
'configuration': self.configuration
}

def to_json(self) -> str:
"""Serialize iteration metrics to a JSON string."""
return json.dumps(self.to_dict(), default=str)

class ALPLoggingManager:
"""
Manages logging and tracking of ALP loop iterations.

Provides comprehensive tracking and persistence of iteration metrics.
"""
def __init__(self, max_iterations: int = 100):
"""
Initialize the logging manager.

Args:
max_iterations: Maximum number of iterations to track
"""
self.max_iterations: int = max_iterations
self.iterations: List[IterationMetrics] = []

def start_iteration(self, configuration: Optional[Dict[str, Any]] = None) -> IterationMetrics:
"""
Start a new iteration and return the metrics tracker.

Args:
configuration: Optional configuration for the iteration

Returns:
IterationMetrics instance
"""
if len(self.iterations) >= self.max_iterations:
self.iterations.pop(0)

iteration = IterationMetrics()
if configuration:
iteration.configuration = configuration

self.iterations.append(iteration)
return iteration

def get_latest_iteration(self) -> Optional[IterationMetrics]:
"""
Retrieve the most recent iteration metrics.

Returns:
The latest IterationMetrics or None if no iterations exist
"""
return self.iterations[-1] if self.iterations else None

def get_all_iterations(self) -> List[IterationMetrics]:
"""
Retrieve all tracked iterations.

Returns:
List of all IterationMetrics
"""
return self.iterations.copy()
96 changes: 96 additions & 0 deletions tests/test_logging_schema.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,96 @@
import pytest
import json
from datetime import datetime, timedelta
from src.logging_schema import IterationMetrics, IterationStatus, ALPLoggingManager

def test_iteration_metrics_initialization():
"""Test basic initialization of IterationMetrics."""
metrics = IterationMetrics()

assert metrics.iteration_id is not None
assert isinstance(metrics.timestamp, datetime)
assert metrics.status == IterationStatus.STARTED
assert metrics.duration_ms == 0.0

def test_iteration_metrics_update_status():
"""Test updating iteration status."""
metrics = IterationMetrics()
metrics.update_status(IterationStatus.IN_PROGRESS)

assert metrics.status == IterationStatus.IN_PROGRESS

def test_iteration_metrics_record_performance():
"""Test recording performance metrics."""
metrics = IterationMetrics()
performance_data = {
'cpu_usage': 0.75,
'memory_usage': 0.6
}
metrics.record_performance(duration=123.45, resources=performance_data)

assert metrics.duration_ms == 123.45
assert metrics.computational_resources == performance_data

def test_iteration_metrics_record_learning_metrics():
"""Test recording learning metrics."""
metrics = IterationMetrics()
metrics.record_learning_metrics(
learning_rate=0.01,
loss=0.5,
accuracy=0.95
)

assert metrics.learning_rate == 0.01
assert metrics.loss == 0.5
assert metrics.accuracy == 0.95

def test_iteration_metrics_to_dict():
"""Test conversion of metrics to dictionary."""
metrics = IterationMetrics()
metrics_dict = metrics.to_dict()

assert 'iteration_id' in metrics_dict
assert 'timestamp' in metrics_dict
assert 'status' in metrics_dict
assert metrics_dict['status'] == 'STARTED'

def test_iteration_metrics_to_json():
"""Test JSON serialization of metrics."""
metrics = IterationMetrics()
metrics_json = metrics.to_json()

parsed_json = json.loads(metrics_json)
assert 'iteration_id' in parsed_json
assert 'timestamp' in parsed_json
assert 'status' in parsed_json

def test_alp_logging_manager_initialization():
"""Test ALPLoggingManager initialization."""
manager = ALPLoggingManager(max_iterations=5)

assert manager.max_iterations == 5
assert len(manager.iterations) == 0

def test_alp_logging_manager_start_iteration():
"""Test starting iterations in the logging manager."""
manager = ALPLoggingManager(max_iterations=3)

# Start 3 iterations
iteration1 = manager.start_iteration()
iteration2 = manager.start_iteration()
iteration3 = manager.start_iteration()

assert len(manager.iterations) == 3
assert manager.get_latest_iteration() == iteration3

def test_alp_logging_manager_max_iterations_limit():
"""Test that max iterations limit is respected."""
manager = ALPLoggingManager(max_iterations=3)

# Start 4 iterations
manager.start_iteration()
manager.start_iteration()
manager.start_iteration()
manager.start_iteration() # This should remove the first iteration

assert len(manager.iterations) == 3