Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
35 changes: 14 additions & 21 deletions .gitignore
Original file line number Diff line number Diff line change
@@ -1,22 +1,15 @@
### AL ###
#Template for AL projects for Dynamics 365 Business Central
#launch.json folder
__pycache__/
*.py[cod]
*$py.class
.pytest_cache/
.coverage
htmlcov/
dist/
build/
*.egg-info/
.env
.venv
venv/
.idea/
.vscode/
#Cache folder
.alcache/
#Symbols folder
.alpackages/
#Snapshots folder
.snapshots/
#Testing Output folder
.output/
#Extension App-file
*.app
#Rapid Application Development File
rad.json
#Translation Base-file
*.g.xlf
#License-file
*.flf
#Test results file
TestResults.xml
*.log
147 changes: 147 additions & 0 deletions src/alp_loop.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,147 @@
from abc import ABC, abstractmethod
from typing import Any, Dict, Optional, List
import logging
from dataclasses import dataclass, field

logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')

@dataclass
class LoopConfiguration:
"""Configuration class for ALP Loop parameters."""
max_iterations: int = 100
learning_rate: float = 0.01
convergence_threshold: float = 1e-5
logging_level: int = logging.INFO
early_stopping: bool = True
additional_params: Dict[str, Any] = field(default_factory=dict)

class ALPLoopBase(ABC):
"""
Abstract Base Class for Adaptive Learning Process (ALP) Loop Mechanism.

Provides a standardized template for implementing iterative learning cycles
with robust configuration, tracking, and error handling.

Key Features:
- Configurable learning parameters
- Iteration tracking and management
- Comprehensive logging
- Flexible error handling
- Performance metrics collection
"""

def __init__(self, config: Optional[LoopConfiguration] = None):
"""
Initialize the ALP Loop with configuration.

Args:
config (Optional[LoopConfiguration]): Configuration for the learning loop.
"""
self.config = config or LoopConfiguration()
self.logger = logging.getLogger(self.__class__.__name__)
self.logger.setLevel(self.config.logging_level)

# Iteration tracking
self.current_iteration = 0
self.best_performance = float('-inf')
self.iteration_history: List[Dict[str, Any]] = []

@abstractmethod
def initialize(self) -> None:
"""
Initialize learning process, models, or data structures.
Must be implemented by subclasses.
"""
pass

@abstractmethod
def learning_iteration(self) -> float:
"""
Perform a single learning iteration.

Returns:
float: Performance metric for the current iteration.

Raises:
NotImplementedError: If not implemented by subclass.
"""
pass

def run(self) -> Dict[str, Any]:
"""
Execute the main learning loop with comprehensive error handling.

Returns:
Dict[str, Any]: Final learning results and metadata.
"""
try:
self.initialize()

while not self._should_terminate():
performance = self.learning_iteration()

# Track iteration history
iteration_data = {
'iteration': self.current_iteration,
'performance': performance
}
self.iteration_history.append(iteration_data)

# Update best performance
if performance > self.best_performance:
self.best_performance = performance

self.current_iteration += 1

self.logger.info(
f"Iteration {self.current_iteration}: "
f"Performance = {performance}"
)

return self._finalize()

except Exception as e:
self.logger.error(f"Learning process failed: {e}")
raise

def _should_terminate(self) -> bool:
"""
Determine if the learning loop should terminate.

Returns:
bool: Whether to stop the learning process.
"""
conditions = [
self.current_iteration >= self.config.max_iterations,
(self.config.early_stopping and
len(self.iteration_history) > 1 and
abs(self.iteration_history[-1]['performance'] -
self.iteration_history[-2]['performance']) <
self.config.convergence_threshold)
]

return any(conditions)

def _finalize(self) -> Dict[str, Any]:
"""
Finalize the learning process and collect results.

Returns:
Dict[str, Any]: Comprehensive learning results.
"""
results = {
'total_iterations': self.current_iteration,
'best_performance': self.best_performance,
'iteration_history': self.iteration_history
}

self.logger.info(f"Learning process completed. Results: {results}")
return results

def reset(self) -> None:
"""
Reset the learning loop to its initial state.
"""
self.current_iteration = 0
self.best_performance = float('-inf')
self.iteration_history.clear()
56 changes: 56 additions & 0 deletions tests/test_alp_loop.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,56 @@
import pytest
from src.alp_loop import ALPLoopBase, LoopConfiguration

class TestALPLoop(ALPLoopBase):
def initialize(self):
self.test_data = [1, 2, 3, 4, 5]

def learning_iteration(self):
# Simulate a learning process that increases performance
return self.current_iteration * 0.1

def test_alp_loop_initialization():
default_config = LoopConfiguration()
loop = TestALPLoop()

assert loop.config.max_iterations == 100
assert loop.config.learning_rate == 0.01
assert loop.current_iteration == 0

def test_custom_configuration():
custom_config = LoopConfiguration(
max_iterations=50,
learning_rate=0.05,
convergence_threshold=1e-3
)
loop = TestALPLoop(custom_config)

assert loop.config.max_iterations == 50
assert loop.config.learning_rate == 0.05
assert loop.config.convergence_threshold == 1e-3

def test_loop_run():
loop = TestALPLoop()
results = loop.run()

assert 'total_iterations' in results
assert 'best_performance' in results
assert 'iteration_history' in results
assert results['total_iterations'] <= 100
assert len(results['iteration_history']) > 0

def test_loop_reset():
loop = TestALPLoop()
loop.run()
loop.reset()

assert loop.current_iteration == 0
assert loop.best_performance == float('-inf')
assert len(loop.iteration_history) == 0

def test_early_stopping_config():
config = LoopConfiguration(early_stopping=False)
loop = TestALPLoop(config)
results = loop.run()

assert results['total_iterations'] == 100