Skip to content
Open
72 changes: 51 additions & 21 deletions .gitignore
Original file line number Diff line number Diff line change
@@ -1,22 +1,52 @@
### AL ###
#Template for AL projects for Dynamics 365 Business Central
#launch.json folder
<<<<<<< HEAD
__pycache__/
*.py[cod]
*$py.class
.pytest_cache/
.coverage
htmlcov/
*.log
.env
dist/
build/
*.egg-info/
.venv/
venv/
.idea/
.vscode/
#Cache folder
.alcache/
#Symbols folder
.alpackages/
#Snapshots folder
.snapshots/
#Testing Output folder
.output/
#Extension App-file
*.app
#Rapid Application Development File
rad.json
#Translation Base-file
*.g.xlf
#License-file
*.flf
#Test results file
TestResults.xml
=======
# Python
__pycache__/
*.py[cod]
*$py.class
*.so

# Virtual Environments
venv/
env/
.env/

# Distribution / packaging
dist/
build/
*.egg-info/

# Logs
*.log
logs/

# Testing
.pytest_cache/
.coverage
htmlcov/

# IDEs and editors
.vscode/
.idea/
*.swp
*.swo

# OS generated files
.DS_Store
Thumbs.db
>>>>>>> pr-2-hubbahubba11x-ALP-Looping
173 changes: 173 additions & 0 deletions src/iteration_logger.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,173 @@
import logging
from typing import Dict, Any, Optional, List
from dataclasses import dataclass, asdict, field
from datetime import datetime
import json
import os


@dataclass
class IterationLogEntry:
"""
Structured log entry for a single ALP iteration.

Attributes:
iteration_number (int): Unique identifier for the iteration
timestamp (datetime): Timestamp of the iteration
metadata (Dict[str, Any]): Additional metadata about the iteration
performance_metrics (Dict[str, float]): Performance metrics for the iteration
error_info (Optional[Dict[str, Any]]): Error information if an error occurred
status (str): Status of the iteration (success, failure, warning)
"""
iteration_number: int
timestamp: datetime = field(default_factory=datetime.now)
metadata: Dict[str, Any] = field(default_factory=dict)
performance_metrics: Dict[str, float] = field(default_factory=dict)
error_info: Optional[Dict[str, Any]] = None
status: str = 'pending'

def to_dict(self) -> Dict[str, Any]:
"""
Convert log entry to a dictionary for JSON serialization.

Returns:
Dict[str, Any]: Serializable dictionary representation of the log entry
"""
entry_dict = asdict(self)
entry_dict['timestamp'] = self.timestamp.isoformat()
return entry_dict


class IterationLogger:
"""
Utility class for managing and writing log entries for ALP iterations.

Supports file-based and console logging with configurable log levels.
"""
def __init__(
self,
log_dir: str = 'logs',
log_file: str = 'iteration_log.json',
console_log_level: int = logging.INFO
):
"""
Initialize the IterationLogger.

Args:
log_dir (str): Directory to store log files
log_file (str): Name of the log file
console_log_level (int): Logging level for console output
"""
self.log_dir = log_dir
self.log_file = log_file

# Create logs directory if it doesn't exist
os.makedirs(log_dir, exist_ok=True)

# Full path for log file
self.log_path = os.path.join(log_dir, log_file)

# Configure console logging
logging.basicConfig(
level=console_log_level,
format='%(asctime)s - %(levelname)s: %(message)s'
)
self.logger = logging.getLogger(__name__)

def log_iteration(self, entry: IterationLogEntry) -> None:
"""
Log an iteration entry to file and console.

Args:
entry (IterationLogEntry): Iteration log entry to record
"""
try:
# Log to console
self._log_to_console(entry)

# Append to JSON log file
self._append_to_log_file(entry)
except Exception as e:
self.logger.error(f"Error logging iteration: {e}")

def _log_to_console(self, entry: IterationLogEntry) -> None:
"""
Log iteration details to console based on status.

Args:
entry (IterationLogEntry): Iteration log entry
"""
log_method = {
'success': self.logger.info,
'failure': self.logger.error,
'warning': self.logger.warning,
'pending': self.logger.debug
}.get(entry.status, self.logger.info)

log_method(
f"Iteration {entry.iteration_number} "
f"Status: {entry.status} "
f"Metrics: {entry.performance_metrics}"
)

def _append_to_log_file(self, entry: IterationLogEntry) -> None:
"""
Append iteration log entry to JSON log file.

Args:
entry (IterationLogEntry): Iteration log entry
"""
try:
# Read existing log entries or initialize empty list
log_entries = []
if os.path.exists(self.log_path):
with open(self.log_path, 'r') as f:
log_entries = json.load(f)

# Append new entry
log_entries.append(entry.to_dict())

# Write updated log entries
with open(self.log_path, 'w') as f:
json.dump(log_entries, f, indent=2)
except Exception as e:
self.logger.error(f"Failed to write log entry: {e}")

def get_log_entries(self) -> list:
"""
Retrieve all log entries from the log file.

Returns:
list: List of log entries
"""
try:
if os.path.exists(self.log_path):
with open(self.log_path, 'r') as f:
return json.load(f)
return []
except Exception as e:
self.logger.error(f"Error reading log entries: {e}")
return []

def filter_log_entries(self, status: Optional[str] = None, min_iteration: Optional[int] = None) -> List[Dict[str, Any]]:
"""
Filter log entries based on status and minimum iteration number.

Args:
status (Optional[str]): Filter entries by status (e.g., 'success', 'failure')
min_iteration (Optional[int]): Minimum iteration number to include

Returns:
List[Dict[str, Any]]: Filtered log entries
"""
log_entries = self.get_log_entries()

filtered_entries = log_entries.copy()

if status is not None:
filtered_entries = [entry for entry in filtered_entries if entry.get('status') == status]

if min_iteration is not None:
filtered_entries = [entry for entry in filtered_entries if entry.get('iteration_number', 0) >= min_iteration]

return filtered_entries
1 change: 1 addition & 0 deletions src/log_analysis/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
from .log_parser import LogParser
130 changes: 130 additions & 0 deletions src/log_analysis/log_parser.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,130 @@
from typing import List, Dict, Any
import json
import os
from datetime import datetime

class LogParser:
"""
A class responsible for parsing and analyzing iteration logs.

This class provides methods to:
- Read log files
- Parse log entries
- Analyze performance metrics
- Generate summary reports
"""

def __init__(self, log_directory: str = 'logs'):
"""
Initialize the LogParser with a specific log directory.

Args:
log_directory (str): Directory containing log files. Defaults to 'logs'.
"""
self.log_directory = log_directory

# Ensure log directory exists
os.makedirs(log_directory, exist_ok=True)

def get_log_files(self) -> List[str]:
"""
Retrieve all log files in the specified directory.

Returns:
List[str]: List of log file paths
"""
return [
os.path.join(self.log_directory, f)
for f in os.listdir(self.log_directory)
if f.endswith('.json')
]

def parse_log_file(self, file_path: str) -> List[Dict[str, Any]]:
"""
Parse a single log file and return its contents.

Args:
file_path (str): Path to the log file

Returns:
List[Dict[str, Any]]: Parsed log entries

Raises:
FileNotFoundError: If the log file doesn't exist
json.JSONDecodeError: If the log file is not valid JSON
"""
try:
with open(file_path, 'r') as log_file:
return json.load(log_file)
except FileNotFoundError:
raise FileNotFoundError(f"Log file not found: {file_path}")
except json.JSONDecodeError:
raise ValueError(f"Invalid JSON in log file: {file_path}")

def analyze_performance(self, log_entries: List[Dict[str, Any]]) -> Dict[str, Any]:
"""
Analyze performance metrics from log entries.

Args:
log_entries (List[Dict[str, Any]]): List of log entries to analyze

Returns:
Dict[str, Any]: Performance summary metrics
"""
if not log_entries:
return {}

# Extract performance-related metrics
performance_scores = []
for entry in log_entries:
# Check for performance_score in both top-level and nested locations
score = entry.get('performance_score', 0)
if isinstance(score, (int, float)):
performance_scores.append(score)

performance_metrics = {
'total_iterations': len(log_entries),
'start_time': log_entries[0].get('timestamp'),
'end_time': log_entries[-1].get('timestamp'),
'error_rate': sum(1 for entry in log_entries if entry.get('status') == 'error') / len(log_entries)
}

# Calculate performance metrics only if scores exist
if performance_scores:
performance_metrics.update({
'performance_scores': performance_scores,
'avg_performance': sum(performance_scores) / len(performance_scores),
'max_performance': max(performance_scores),
'min_performance': min(performance_scores)
})

return performance_metrics

def generate_report(self) -> Dict[str, Any]:
"""
Generate a comprehensive report by analyzing all log files.

Returns:
Dict[str, Any]: Comprehensive log analysis report
"""
log_files = self.get_log_files()
report = {
'total_log_files': len(log_files),
'file_analyses': []
}

for log_file in log_files:
try:
log_entries = self.parse_log_file(log_file)
file_report = {
'file_name': os.path.basename(log_file),
'performance': self.analyze_performance(log_entries)
}
report['file_analyses'].append(file_report)
except Exception as e:
report['file_analyses'].append({
'file_name': os.path.basename(log_file),
'error': str(e)
})

return report
Loading