diff --git a/.gitignore b/.gitignore index 02eac69..75f58d6 100644 --- a/.gitignore +++ b/.gitignore @@ -1,22 +1,55 @@ -### AL ### -#Template for AL projects for Dynamics 365 Business Central -#launch.json folder +<<<<<<< HEAD +__pycache__/ +*.py[cod] +*$py.class +.pytest_cache/ +.env +.venv/ +venv/ +dist/ +build/ +*.egg-info/ +.coverage +htmlcov/ +.mypy_cache/ +======= +# Python files +__pycache__/ +*.py[cod] +*$py.class + +# Distribution / packaging +dist/ +build/ +*.egg-info/ + +# Virtual environments +venv/ +env/ +.venv/ + +# IDEs and editors .vscode/ -#Cache folder -.alcache/ -#Symbols folder -.alpackages/ -#Snapshots folder -.snapshots/ -#Testing Output folder -.output/ -#Extension App-file -*.app -#Rapid Application Development File -rad.json -#Translation Base-file -*.g.xlf -#License-file -*.flf -#Test results file -TestResults.xml \ No newline at end of file +.idea/ +*.swp +*.swo + +# Jupyter Notebook +.ipynb_checkpoints + +# Configuration files +config.json +.env + +# Logs +*.log + +# Testing +.pytest_cache/ +htmlcov/ +.coverage + +# OS generated files +.DS_Store +Thumbs.db +>>>>>>> pr-3-Aflame7121-ALP-Looping diff --git a/src/alp_config.py b/src/alp_config.py new file mode 100644 index 0000000..2ae5855 --- /dev/null +++ b/src/alp_config.py @@ -0,0 +1,116 @@ +from enum import Enum +from typing import Optional, List, Dict, Any, Union +from pydantic import BaseModel, Field, field_validator, ConfigDict + + +class LearningAlgorithm(str, Enum): + """Enumeration of supported learning algorithms.""" + GRADIENT_DESCENT = "gradient_descent" + ADAM = "adam" + SGD = "stochastic_gradient_descent" + REINFORCEMENT = "reinforcement" + + +class LoggingLevel(str, Enum): + """Enumeration of logging levels.""" + DEBUG = "DEBUG" + INFO = "INFO" + WARNING = "WARNING" + ERROR = "ERROR" + CRITICAL = "CRITICAL" + + +class IterationConfig(BaseModel): + """Configuration for iteration parameters.""" + max_iterations: int = Field(default=1000, gt=0, description="Maximum number of iterations") + early_stopping_tolerance: float = Field(default=1e-4, ge=0, description="Early stopping threshold") + gradient_clip_value: Optional[float] = Field(default=None, ge=0, description="Gradient clipping value") + + +class HyperparameterConfig(BaseModel): + """Configuration for hyperparameters.""" + learning_rate: float = Field(default=0.01, gt=0, description="Learning rate for optimization") + batch_size: int = Field(default=32, gt=0, description="Batch size for training") + regularization_lambda: float = Field(default=0.01, ge=0, description="Regularization strength") + + +class ModelConfig(BaseModel): + """Configuration for model architecture and settings.""" + hidden_layers: List[int] = Field(default=[64, 32], description="Sizes of hidden layers") + activation_function: str = Field(default="relu", description="Activation function for hidden layers") + dropout_rate: float = Field(default=0.2, ge=0, lt=1, description="Dropout rate for regularization") + + +class AdaptiveLearningProcessConfig(BaseModel): + """Comprehensive configuration model for Adaptive Learning Process.""" + model_config = ConfigDict( + title="Adaptive Learning Process Configuration", + validate_default=True, + extra="forbid" # Prevents additional unexpected configuration keys + ) + + # Core learning configuration + learning_algorithm: LearningAlgorithm = Field( + default=LearningAlgorithm.ADAM, + description="Primary learning algorithm for the process" + ) + + # Configuration sub-models + iteration_config: IterationConfig = Field( + default_factory=IterationConfig, + description="Configuration for iteration control" + ) + hyperparameters: HyperparameterConfig = Field( + default_factory=HyperparameterConfig, + description="Hyperparameter settings" + ) + model_architecture: ModelConfig = Field( + default_factory=ModelConfig, + description="Model architecture configuration" + ) + + # Logging and monitoring + logging_level: LoggingLevel = Field( + default=LoggingLevel.INFO, + description="Logging verbosity level" + ) + performance_metrics: List[str] = Field( + default=["accuracy", "loss"], + description="List of performance metrics to track" + ) + + # Advanced configuration + random_seed: Optional[int] = Field( + default=None, + description="Random seed for reproducibility" + ) + custom_parameters: Optional[Dict[str, Any]] = Field( + default=None, + description="Additional custom parameters" + ) + + @field_validator('custom_parameters', mode='before') + @classmethod + def validate_custom_parameters(cls, v): + """Validate custom parameters.""" + if v is not None and not isinstance(v, dict): + raise ValueError("Custom parameters must be a dictionary") + return v + + +def validate_alp_config(config: Union[dict, AdaptiveLearningProcessConfig]) -> AdaptiveLearningProcessConfig: + """ + Validate and potentially convert a configuration to the AdaptiveLearningProcessConfig model. + + Args: + config (Union[dict, AdaptiveLearningProcessConfig]): Configuration to validate + + Returns: + AdaptiveLearningProcessConfig: Validated configuration + """ + if isinstance(config, dict): + return AdaptiveLearningProcessConfig(**config) + elif isinstance(config, AdaptiveLearningProcessConfig): + return config + else: + raise TypeError("Configuration must be a dictionary or AdaptiveLearningProcessConfig instance") \ No newline at end of file diff --git a/src/config/__init__.py b/src/config/__init__.py new file mode 100644 index 0000000..905f0d7 --- /dev/null +++ b/src/config/__init__.py @@ -0,0 +1,6 @@ +""" +Configuration Management Service for Adaptive Learning Process (ALP) Loop + +This module provides a comprehensive configuration management service +that handles loading, saving, and applying ALP loop configuration parameters. +""" \ No newline at end of file diff --git a/src/config/config_manager.py b/src/config/config_manager.py new file mode 100644 index 0000000..5987035 --- /dev/null +++ b/src/config/config_manager.py @@ -0,0 +1,187 @@ +import os +import json +from typing import Any, Dict, Optional +from copy import deepcopy +from src.alp_config import AdaptiveLearningProcessConfig, validate_alp_config, LearningAlgorithm, LoggingLevel + +class ConfigurationError(Exception): + """Custom exception for configuration-related errors.""" + pass + +class ConfigurationManager: + """ + Enhanced Configuration Manager that supports multiple configuration sources + and integrates with the AdaptiveLearningProcessConfig model. + + Supports: + - Default configuration + - JSON file configuration + - Environment variable configuration + - Runtime configuration overrides + """ + + DEFAULT_CONFIG_PATH = 'config.json' + + def __init__(self, config_path: Optional[str] = None): + """ + Initialize the ConfigurationManager. + + Args: + config_path (Optional[str]): Path to the configuration file. + Uses default path if not provided. + """ + self._config_path = config_path or self.DEFAULT_CONFIG_PATH + + # Validate environment variables first + self._validate_env_vars() + + # Then load configuration + self._config = self._load_configuration() + + def _validate_env_vars(self): + """ + Validate environment variables before configuration loading. + + Raises: + ConfigurationError: If any environment variable is invalid + """ + # Check learning rate + learning_rate_var = os.environ.get('ALP_LEARNING_RATE') + if learning_rate_var is not None: + try: + rate = float(learning_rate_var) + if rate <= 0: + raise ValueError("Learning rate must be positive") + except ValueError as e: + raise ConfigurationError(f"Invalid ALP_LEARNING_RATE: {e}") + + # Check learning algorithm + learning_algo_var = os.environ.get('ALP_LEARNING_ALGORITHM') + if learning_algo_var is not None: + try: + LearningAlgorithm(learning_algo_var) + except ValueError as e: + raise ConfigurationError(f"Invalid ALP_LEARNING_ALGORITHM: {e}") + + def _parse_env_value(self, field_name: str, value: str, default_value: Any) -> Any: + """ + Parse environment variable value with validation. + + Args: + field_name (str): Name of the configuration field + value (str): Environment variable value + default_value (Any): Default value for type inference + + Returns: + Parsed and validated value + + Raises: + ConfigurationError: If value cannot be parsed or validated + """ + try: + if field_name == 'custom_parameters': + return json.loads(value) + elif field_name == 'learning_algorithm': + return LearningAlgorithm(value) + elif field_name == 'logging_level': + return LoggingLevel(value) + elif isinstance(default_value, float): + parsed_value = float(value) + if parsed_value <= 0: + raise ValueError("Value must be positive") + return parsed_value + elif isinstance(default_value, int): + parsed_value = int(value) + if parsed_value <= 0: + raise ValueError("Value must be positive") + return parsed_value + else: + return value + except (ValueError, json.JSONDecodeError) as e: + raise ConfigurationError(f"Invalid environment variable {field_name}: {e}") + + def _load_configuration(self) -> AdaptiveLearningProcessConfig: + """ + Load configuration from multiple sources with precedence. + + Precedence order: + 1. Configuration file + 2. Environment variables + 3. Default configuration + + Returns: + AdaptiveLearningProcessConfig: Loaded and merged configuration + """ + # Start with default configuration + config = AdaptiveLearningProcessConfig() + config_dict = config.model_dump() + + # Try loading from JSON file if path exists + if os.path.exists(self._config_path): + try: + with open(self._config_path, 'r') as f: + file_config = json.load(f) + # Update config_dict with file_config, prioritizing file values + for key, value in file_config.items(): + if value is not None: + config_dict[key] = value + except (json.JSONDecodeError, IOError) as e: + raise ConfigurationError(f"Error reading configuration file: {e}") + + # Override with environment variables + for field_name, value in config_dict.items(): + env_var = f'ALP_{field_name.upper()}' + env_value = os.environ.get(env_var) + + if env_value is not None: + try: + config_dict[field_name] = self._parse_env_value(field_name, env_value, value) + except ConfigurationError: + raise + + return validate_alp_config(config_dict) + + def get_config(self) -> AdaptiveLearningProcessConfig: + """ + Get the current configuration. + + Returns: + AdaptiveLearningProcessConfig: Current configuration + """ + return deepcopy(self._config) + + def update_config(self, **kwargs) -> None: + """ + Update configuration with provided parameters. + + Args: + **kwargs: Configuration parameters to update + + Raises: + ConfigurationError: If invalid configuration parameters are provided + """ + try: + # Create a copy of current config to update + config_dict = self._config.model_dump() + + # Update with provided kwargs + for key, value in kwargs.items(): + if value is not None: + config_dict[key] = value + + # Recreate configuration object + self._config = validate_alp_config(config_dict) + except Exception as e: + raise ConfigurationError(f"Invalid configuration update: {e}") + + def save_config(self, path: Optional[str] = None) -> None: + """ + Save current configuration to a JSON file. + + Args: + path (Optional[str]): Path to save configuration. + Uses default path if not provided. + """ + save_path = path or self._config_path + with open(save_path, 'w') as f: + json.dump(self._config.model_dump(), f, indent=4) \ No newline at end of file diff --git a/tests/test_alp_config.py b/tests/test_alp_config.py new file mode 100644 index 0000000..eee4712 --- /dev/null +++ b/tests/test_alp_config.py @@ -0,0 +1,85 @@ +import pytest +from src.alp_config import ( + AdaptiveLearningProcessConfig, + LearningAlgorithm, + LoggingLevel, + validate_alp_config +) + + +def test_default_configuration(): + """Test that the default configuration is created correctly.""" + config = AdaptiveLearningProcessConfig() + + assert config.learning_algorithm == LearningAlgorithm.ADAM + assert config.logging_level == LoggingLevel.INFO + assert config.performance_metrics == ["accuracy", "loss"] + assert config.random_seed is None + + +def test_custom_configuration(): + """Test creating a configuration with custom parameters.""" + config_data = { + "learning_algorithm": LearningAlgorithm.SGD, + "logging_level": LoggingLevel.DEBUG, + "performance_metrics": ["f1_score"], + "random_seed": 42, + "iteration_config": { + "max_iterations": 500, + "early_stopping_tolerance": 1e-3 + } + } + + config = AdaptiveLearningProcessConfig(**config_data) + + assert config.learning_algorithm == LearningAlgorithm.SGD + assert config.logging_level == LoggingLevel.DEBUG + assert config.performance_metrics == ["f1_score"] + assert config.random_seed == 42 + assert config.iteration_config.max_iterations == 500 + assert config.iteration_config.early_stopping_tolerance == 1e-3 + + +def test_validation_helper(): + """Test the validate_alp_config helper function.""" + config_data = { + "learning_algorithm": LearningAlgorithm.GRADIENT_DESCENT, + "hyperparameters": { + "learning_rate": 0.001, + "batch_size": 64 + } + } + + validated_config = validate_alp_config(config_data) + + assert isinstance(validated_config, AdaptiveLearningProcessConfig) + assert validated_config.learning_algorithm == LearningAlgorithm.GRADIENT_DESCENT + assert validated_config.hyperparameters.learning_rate == 0.001 + assert validated_config.hyperparameters.batch_size == 64 + + +def test_invalid_configuration(): + """Test that invalid configurations raise appropriate errors.""" + with pytest.raises(Exception, match="Input should be greater than 0"): + AdaptiveLearningProcessConfig( + iteration_config={"max_iterations": 0} + ) + + with pytest.raises(Exception, match="Input should be greater than 0"): + AdaptiveLearningProcessConfig( + hyperparameters={"learning_rate": -0.1} + ) + + +def test_custom_parameters(): + """Test custom parameters configuration.""" + config = AdaptiveLearningProcessConfig( + custom_parameters={"experimental_feature": True} + ) + + assert config.custom_parameters == {"experimental_feature": True} + + with pytest.raises(ValueError): + AdaptiveLearningProcessConfig( + custom_parameters="not a dictionary" + ) \ No newline at end of file diff --git a/tests/test_config_manager.py b/tests/test_config_manager.py new file mode 100644 index 0000000..0d786d8 --- /dev/null +++ b/tests/test_config_manager.py @@ -0,0 +1,117 @@ +import os +import json +import pytest +from src.config.config_manager import ConfigurationManager, ConfigurationError +from src.alp_config import LearningAlgorithm, LoggingLevel + +def test_default_configuration(): + """Test default configuration initialization.""" + config_manager = ConfigurationManager() + config = config_manager.get_config() + + assert config.learning_algorithm == LearningAlgorithm.ADAM + assert config.logging_level == LoggingLevel.INFO + assert config.iteration_config.max_iterations == 1000 + assert config.hyperparameters.learning_rate == 0.01 + +def test_file_configuration(tmp_path): + """Test loading configuration from a JSON file.""" + config_file = tmp_path / "config.json" + test_config = { + "learning_algorithm": "stochastic_gradient_descent", + "logging_level": "DEBUG", + "hyperparameters": { + "learning_rate": 0.05, + "batch_size": 64 + } + } + + with open(config_file, 'w') as f: + json.dump(test_config, f) + + config_manager = ConfigurationManager(str(config_file)) + config = config_manager.get_config() + + assert config.learning_algorithm == LearningAlgorithm.SGD + assert config.logging_level == LoggingLevel.DEBUG + assert config.hyperparameters.learning_rate == 0.05 + assert config.hyperparameters.batch_size == 64 + +def test_env_configuration(monkeypatch): + """Test configuration from environment variables.""" + monkeypatch.setenv('ALP_LEARNING_ALGORITHM', 'gradient_descent') + monkeypatch.setenv('ALP_LOGGING_LEVEL', 'WARNING') + monkeypatch.setenv('ALP_CUSTOM_PARAMETERS', '{"key": "value"}') + + config_manager = ConfigurationManager() + config = config_manager.get_config() + + assert config.learning_algorithm == LearningAlgorithm.GRADIENT_DESCENT + assert config.logging_level == LoggingLevel.WARNING + assert config.custom_parameters == {"key": "value"} + +def test_configuration_update(): + """Test updating configuration at runtime.""" + config_manager = ConfigurationManager() + + config_manager.update_config( + learning_algorithm=LearningAlgorithm.REINFORCEMENT, + logging_level=LoggingLevel.ERROR + ) + + config = config_manager.get_config() + + assert config.learning_algorithm == LearningAlgorithm.REINFORCEMENT + assert config.logging_level == LoggingLevel.ERROR + +def test_save_and_load_configuration(tmp_path): + """Test saving and loading configuration.""" + config_file = tmp_path / "saved_config.json" + + config_manager = ConfigurationManager() + config_manager.update_config( + learning_algorithm=LearningAlgorithm.SGD, + hyperparameters={ + "learning_rate": 0.03, + "batch_size": 128 + } + ) + + config_manager.save_config(str(config_file)) + + # Load saved configuration + loaded_config_manager = ConfigurationManager(str(config_file)) + loaded_config = loaded_config_manager.get_config() + + assert loaded_config.learning_algorithm == LearningAlgorithm.SGD + assert loaded_config.hyperparameters.learning_rate == 0.03 + assert loaded_config.hyperparameters.batch_size == 128 + +def test_configuration_error_handling(tmp_path, monkeypatch): + """Test error handling for invalid configurations.""" + # Invalid JSON file + invalid_config_file = tmp_path / "invalid_config.json" + with open(invalid_config_file, 'w') as f: + f.write("{invalid json}") + + with pytest.raises(ConfigurationError, match="Error reading configuration file"): + ConfigurationManager(str(invalid_config_file)) + + # Specific environment variable validation tests + # Test learning rate + with pytest.raises((ConfigurationError, ValueError), match="Invalid|must be positive"): + with monkeypatch.context() as m: + m.setenv('ALP_LEARNING_RATE', 'not a number') + ConfigurationManager() + + # Test learning algorithm + with pytest.raises((ConfigurationError, ValueError), match="Invalid|is not a valid"): + with monkeypatch.context() as m: + m.setenv('ALP_LEARNING_ALGORITHM', 'invalid_algorithm') + ConfigurationManager() + + # Test invalid logging level + with pytest.raises((ConfigurationError, ValueError), match="Invalid|is not a valid"): + with monkeypatch.context() as m: + m.setenv('ALP_LOGGING_LEVEL', 'SUPER_DEBUG') + ConfigurationManager() \ No newline at end of file