Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
75 changes: 54 additions & 21 deletions .gitignore
Original file line number Diff line number Diff line change
@@ -1,22 +1,55 @@
### AL ###
#Template for AL projects for Dynamics 365 Business Central
#launch.json folder
<<<<<<< HEAD
__pycache__/
*.py[cod]
*$py.class
.pytest_cache/
.env
.venv/
venv/
dist/
build/
*.egg-info/
.coverage
htmlcov/
.mypy_cache/
=======
# Python files
__pycache__/
*.py[cod]
*$py.class

# Distribution / packaging
dist/
build/
*.egg-info/

# Virtual environments
venv/
env/
.venv/

# IDEs and editors
.vscode/
#Cache folder
.alcache/
#Symbols folder
.alpackages/
#Snapshots folder
.snapshots/
#Testing Output folder
.output/
#Extension App-file
*.app
#Rapid Application Development File
rad.json
#Translation Base-file
*.g.xlf
#License-file
*.flf
#Test results file
TestResults.xml
.idea/
*.swp
*.swo

# Jupyter Notebook
.ipynb_checkpoints

# Configuration files
config.json
.env

# Logs
*.log

# Testing
.pytest_cache/
htmlcov/
.coverage

# OS generated files
.DS_Store
Thumbs.db
>>>>>>> pr-3-Aflame7121-ALP-Looping
116 changes: 116 additions & 0 deletions src/alp_config.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,116 @@
from enum import Enum
from typing import Optional, List, Dict, Any, Union
from pydantic import BaseModel, Field, field_validator, ConfigDict


class LearningAlgorithm(str, Enum):
"""Enumeration of supported learning algorithms."""
GRADIENT_DESCENT = "gradient_descent"
ADAM = "adam"
SGD = "stochastic_gradient_descent"
REINFORCEMENT = "reinforcement"


class LoggingLevel(str, Enum):
"""Enumeration of logging levels."""
DEBUG = "DEBUG"
INFO = "INFO"
WARNING = "WARNING"
ERROR = "ERROR"
CRITICAL = "CRITICAL"


class IterationConfig(BaseModel):
"""Configuration for iteration parameters."""
max_iterations: int = Field(default=1000, gt=0, description="Maximum number of iterations")
early_stopping_tolerance: float = Field(default=1e-4, ge=0, description="Early stopping threshold")
gradient_clip_value: Optional[float] = Field(default=None, ge=0, description="Gradient clipping value")


class HyperparameterConfig(BaseModel):
"""Configuration for hyperparameters."""
learning_rate: float = Field(default=0.01, gt=0, description="Learning rate for optimization")
batch_size: int = Field(default=32, gt=0, description="Batch size for training")
regularization_lambda: float = Field(default=0.01, ge=0, description="Regularization strength")


class ModelConfig(BaseModel):
"""Configuration for model architecture and settings."""
hidden_layers: List[int] = Field(default=[64, 32], description="Sizes of hidden layers")
activation_function: str = Field(default="relu", description="Activation function for hidden layers")
dropout_rate: float = Field(default=0.2, ge=0, lt=1, description="Dropout rate for regularization")


class AdaptiveLearningProcessConfig(BaseModel):
"""Comprehensive configuration model for Adaptive Learning Process."""
model_config = ConfigDict(
title="Adaptive Learning Process Configuration",
validate_default=True,
extra="forbid" # Prevents additional unexpected configuration keys
)

# Core learning configuration
learning_algorithm: LearningAlgorithm = Field(
default=LearningAlgorithm.ADAM,
description="Primary learning algorithm for the process"
)

# Configuration sub-models
iteration_config: IterationConfig = Field(
default_factory=IterationConfig,
description="Configuration for iteration control"
)
hyperparameters: HyperparameterConfig = Field(
default_factory=HyperparameterConfig,
description="Hyperparameter settings"
)
model_architecture: ModelConfig = Field(
default_factory=ModelConfig,
description="Model architecture configuration"
)

# Logging and monitoring
logging_level: LoggingLevel = Field(
default=LoggingLevel.INFO,
description="Logging verbosity level"
)
performance_metrics: List[str] = Field(
default=["accuracy", "loss"],
description="List of performance metrics to track"
)

# Advanced configuration
random_seed: Optional[int] = Field(
default=None,
description="Random seed for reproducibility"
)
custom_parameters: Optional[Dict[str, Any]] = Field(
default=None,
description="Additional custom parameters"
)

@field_validator('custom_parameters', mode='before')
@classmethod
def validate_custom_parameters(cls, v):
"""Validate custom parameters."""
if v is not None and not isinstance(v, dict):
raise ValueError("Custom parameters must be a dictionary")
return v


def validate_alp_config(config: Union[dict, AdaptiveLearningProcessConfig]) -> AdaptiveLearningProcessConfig:
"""
Validate and potentially convert a configuration to the AdaptiveLearningProcessConfig model.

Args:
config (Union[dict, AdaptiveLearningProcessConfig]): Configuration to validate

Returns:
AdaptiveLearningProcessConfig: Validated configuration
"""
if isinstance(config, dict):
return AdaptiveLearningProcessConfig(**config)
elif isinstance(config, AdaptiveLearningProcessConfig):
return config
else:
raise TypeError("Configuration must be a dictionary or AdaptiveLearningProcessConfig instance")
6 changes: 6 additions & 0 deletions src/config/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
"""
Configuration Management Service for Adaptive Learning Process (ALP) Loop

This module provides a comprehensive configuration management service
that handles loading, saving, and applying ALP loop configuration parameters.
"""
187 changes: 187 additions & 0 deletions src/config/config_manager.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,187 @@
import os
import json
from typing import Any, Dict, Optional
from copy import deepcopy
from src.alp_config import AdaptiveLearningProcessConfig, validate_alp_config, LearningAlgorithm, LoggingLevel

class ConfigurationError(Exception):
"""Custom exception for configuration-related errors."""
pass

class ConfigurationManager:
"""
Enhanced Configuration Manager that supports multiple configuration sources
and integrates with the AdaptiveLearningProcessConfig model.

Supports:
- Default configuration
- JSON file configuration
- Environment variable configuration
- Runtime configuration overrides
"""

DEFAULT_CONFIG_PATH = 'config.json'

def __init__(self, config_path: Optional[str] = None):
"""
Initialize the ConfigurationManager.

Args:
config_path (Optional[str]): Path to the configuration file.
Uses default path if not provided.
"""
self._config_path = config_path or self.DEFAULT_CONFIG_PATH

# Validate environment variables first
self._validate_env_vars()

# Then load configuration
self._config = self._load_configuration()

def _validate_env_vars(self):
"""
Validate environment variables before configuration loading.

Raises:
ConfigurationError: If any environment variable is invalid
"""
# Check learning rate
learning_rate_var = os.environ.get('ALP_LEARNING_RATE')
if learning_rate_var is not None:
try:
rate = float(learning_rate_var)
if rate <= 0:
raise ValueError("Learning rate must be positive")
except ValueError as e:
raise ConfigurationError(f"Invalid ALP_LEARNING_RATE: {e}")

# Check learning algorithm
learning_algo_var = os.environ.get('ALP_LEARNING_ALGORITHM')
if learning_algo_var is not None:
try:
LearningAlgorithm(learning_algo_var)
except ValueError as e:
raise ConfigurationError(f"Invalid ALP_LEARNING_ALGORITHM: {e}")

def _parse_env_value(self, field_name: str, value: str, default_value: Any) -> Any:
"""
Parse environment variable value with validation.

Args:
field_name (str): Name of the configuration field
value (str): Environment variable value
default_value (Any): Default value for type inference

Returns:
Parsed and validated value

Raises:
ConfigurationError: If value cannot be parsed or validated
"""
try:
if field_name == 'custom_parameters':
return json.loads(value)
elif field_name == 'learning_algorithm':
return LearningAlgorithm(value)
elif field_name == 'logging_level':
return LoggingLevel(value)
elif isinstance(default_value, float):
parsed_value = float(value)
if parsed_value <= 0:
raise ValueError("Value must be positive")
return parsed_value
elif isinstance(default_value, int):
parsed_value = int(value)
if parsed_value <= 0:
raise ValueError("Value must be positive")
return parsed_value
else:
return value
except (ValueError, json.JSONDecodeError) as e:
raise ConfigurationError(f"Invalid environment variable {field_name}: {e}")

def _load_configuration(self) -> AdaptiveLearningProcessConfig:
"""
Load configuration from multiple sources with precedence.

Precedence order:
1. Configuration file
2. Environment variables
3. Default configuration

Returns:
AdaptiveLearningProcessConfig: Loaded and merged configuration
"""
# Start with default configuration
config = AdaptiveLearningProcessConfig()
config_dict = config.model_dump()

# Try loading from JSON file if path exists
if os.path.exists(self._config_path):
try:
with open(self._config_path, 'r') as f:
file_config = json.load(f)
# Update config_dict with file_config, prioritizing file values
for key, value in file_config.items():
if value is not None:
config_dict[key] = value
except (json.JSONDecodeError, IOError) as e:
raise ConfigurationError(f"Error reading configuration file: {e}")

# Override with environment variables
for field_name, value in config_dict.items():
env_var = f'ALP_{field_name.upper()}'
env_value = os.environ.get(env_var)

if env_value is not None:
try:
config_dict[field_name] = self._parse_env_value(field_name, env_value, value)
except ConfigurationError:
raise

return validate_alp_config(config_dict)

def get_config(self) -> AdaptiveLearningProcessConfig:
"""
Get the current configuration.

Returns:
AdaptiveLearningProcessConfig: Current configuration
"""
return deepcopy(self._config)

def update_config(self, **kwargs) -> None:
"""
Update configuration with provided parameters.

Args:
**kwargs: Configuration parameters to update

Raises:
ConfigurationError: If invalid configuration parameters are provided
"""
try:
# Create a copy of current config to update
config_dict = self._config.model_dump()

# Update with provided kwargs
for key, value in kwargs.items():
if value is not None:
config_dict[key] = value

# Recreate configuration object
self._config = validate_alp_config(config_dict)
except Exception as e:
raise ConfigurationError(f"Invalid configuration update: {e}")

def save_config(self, path: Optional[str] = None) -> None:
"""
Save current configuration to a JSON file.

Args:
path (Optional[str]): Path to save configuration.
Uses default path if not provided.
"""
save_path = path or self._config_path
with open(save_path, 'w') as f:
json.dump(self._config.model_dump(), f, indent=4)
Loading