Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion nbs/common.modules.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -154,7 +154,7 @@
"\n",
"\n",
"class CausalConv1d(nn.Module):\n",
" \"\"\" Causal Convolution 1d\n",
" r\"\"\" Causal Convolution 1d\n",
"\n",
" Receives `x` input of dim [N,C_in,T], and computes a causal convolution\n",
" in the time dimension. Skipping the H steps of the forecast horizon, through\n",
Expand Down
12 changes: 6 additions & 6 deletions nbs/common.scalers.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -193,7 +193,7 @@
"source": [
"#| export\n",
"def minmax_statistics(x, mask, eps=1e-6, dim=-1):\n",
" \"\"\" MinMax Scaler\n",
" r\"\"\" MinMax Scaler\n",
"\n",
" Standardizes temporal features by ensuring its range dweels between\n",
" [0,1] range. This transformation is often used as an alternative \n",
Expand Down Expand Up @@ -264,7 +264,7 @@
"source": [
"#| export\n",
"def minmax1_statistics(x, mask, eps=1e-6, dim=-1):\n",
" \"\"\" MinMax1 Scaler\n",
" r\"\"\" MinMax1 Scaler\n",
"\n",
" Standardizes temporal features by ensuring its range dweels between\n",
" [-1,1] range. This transformation is often used as an alternative \n",
Expand Down Expand Up @@ -337,7 +337,7 @@
"source": [
"#| export\n",
"def std_statistics(x, mask, dim=-1, eps=1e-6):\n",
" \"\"\" Standard Scaler\n",
" r\"\"\" Standard Scaler\n",
"\n",
" Standardizes features by removing the mean and scaling\n",
" to unit variance along the `dim` dimension. \n",
Expand Down Expand Up @@ -400,7 +400,7 @@
"source": [
"#| export\n",
"def robust_statistics(x, mask, dim=-1, eps=1e-6):\n",
" \"\"\" Robust Median Scaler\n",
" r\"\"\" Robust Median Scaler\n",
"\n",
" Standardizes features by removing the median and scaling\n",
" with the mean absolute deviation (mad) a robust estimator of variance.\n",
Expand Down Expand Up @@ -475,7 +475,7 @@
"source": [
"#| export\n",
"def invariant_statistics(x, mask, dim=-1, eps=1e-6):\n",
" \"\"\" Invariant Median Scaler\n",
" r\"\"\" Invariant Median Scaler\n",
"\n",
" Standardizes features by removing the median and scaling\n",
" with the mean absolute deviation (mad) a robust estimator of variance.\n",
Expand Down Expand Up @@ -615,7 +615,7 @@
"source": [
"#| export\n",
"class TemporalNorm(nn.Module):\n",
" \"\"\" Temporal Normalization\n",
" r\"\"\" Temporal Normalization\n",
"\n",
" Standardization of the features is a common requirement for many \n",
" machine learning estimators, and it is commonly achieved by removing \n",
Expand Down
48 changes: 24 additions & 24 deletions nbs/losses.pytorch.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -211,7 +211,7 @@
"source": [
"#| export\n",
"class MAE(BasePointLoss):\n",
" \"\"\"Mean Absolute Error\n",
" r\"\"\"Mean Absolute Error\n",
"\n",
" Calculates Mean Absolute Error between\n",
" `y` and `y_hat`. MAE measures the relative prediction\n",
Expand Down Expand Up @@ -296,7 +296,7 @@
"source": [
"#| export\n",
"class MSE(BasePointLoss):\n",
" \"\"\" Mean Squared Error\n",
" r\"\"\" Mean Squared Error\n",
"\n",
" Calculates Mean Squared Error between\n",
" `y` and `y_hat`. MSE measures the relative prediction\n",
Expand Down Expand Up @@ -382,7 +382,7 @@
"source": [
"#| export\n",
"class RMSE(BasePointLoss):\n",
" \"\"\" Root Mean Squared Error\n",
" r\"\"\" Root Mean Squared Error\n",
"\n",
" Calculates Root Mean Squared Error between\n",
" `y` and `y_hat`. RMSE measures the relative prediction\n",
Expand Down Expand Up @@ -482,7 +482,7 @@
"source": [
"#| export\n",
"class MAPE(BasePointLoss):\n",
" \"\"\" Mean Absolute Percentage Error\n",
" r\"\"\" Mean Absolute Percentage Error\n",
"\n",
" Calculates Mean Absolute Percentage Error between\n",
" `y` and `y_hat`. MAPE measures the relative prediction\n",
Expand Down Expand Up @@ -574,7 +574,7 @@
"source": [
"#| export\n",
"class SMAPE(BasePointLoss):\n",
" \"\"\" Symmetric Mean Absolute Percentage Error\n",
" r\"\"\" Symmetric Mean Absolute Percentage Error\n",
"\n",
" Calculates Symmetric Mean Absolute Percentage Error between\n",
" `y` and `y_hat`. SMAPE measures the relative prediction\n",
Expand Down Expand Up @@ -669,7 +669,7 @@
"source": [
"#| export\n",
"class MASE(BasePointLoss):\n",
" \"\"\" Mean Absolute Scaled Error \n",
" r\"\"\" Mean Absolute Scaled Error \n",
" Calculates the Mean Absolute Scaled Error between\n",
" `y` and `y_hat`. MASE measures the relative prediction\n",
" accuracy of a forecasting method by comparinng the mean absolute errors\n",
Expand Down Expand Up @@ -765,7 +765,7 @@
"source": [
"#| export\n",
"class relMSE(BasePointLoss):\n",
" \"\"\"Relative Mean Squared Error\n",
" r\"\"\"Relative Mean Squared Error\n",
" Computes Relative Mean Squared Error (relMSE), as proposed by Hyndman & Koehler (2006)\n",
" as an alternative to percentage errors, to avoid measure unstability.\n",
" $$ \\mathrm{relMSE}(\\\\mathbf{y}, \\\\mathbf{\\hat{y}}, \\\\mathbf{\\hat{y}}^{benchmark}) =\n",
Expand Down Expand Up @@ -867,7 +867,7 @@
"source": [
"#| export\n",
"class QuantileLoss(BasePointLoss):\n",
" \"\"\" Quantile Loss\n",
" r\"\"\" Quantile Loss\n",
"\n",
" Computes the quantile loss between `y` and `y_hat`.\n",
" QL measures the deviation of a quantile forecast.\n",
Expand Down Expand Up @@ -993,7 +993,7 @@
"source": [
"#| export\n",
"class MQLoss(BasePointLoss):\n",
" \"\"\" Multi-Quantile loss\n",
" r\"\"\" Multi-Quantile loss\n",
"\n",
" Calculates the Multi-Quantile loss (MQL) between `y` and `y_hat`.\n",
" MQL calculates the average multi-quantile Loss for\n",
Expand Down Expand Up @@ -1203,7 +1203,7 @@
"\n",
"\n",
"class IQLoss(QuantileLoss):\n",
" \"\"\"Implicit Quantile Loss\n",
" r\"\"\"Implicit Quantile Loss\n",
"\n",
" Computes the quantile loss between `y` and `y_hat`, with the quantile `q` provided as an input to the network. \n",
" IQL measures the deviation of a quantile forecast.\n",
Expand Down Expand Up @@ -1485,7 +1485,7 @@
"\n",
"\n",
"class Tweedie(Distribution):\n",
" \"\"\" Tweedie Distribution\n",
" r\"\"\" Tweedie Distribution\n",
"\n",
" The Tweedie distribution is a compound probability, special case of exponential\n",
" dispersion models EDMs defined by its mean-variance relationship.\n",
Expand Down Expand Up @@ -2629,7 +2629,7 @@
" y: torch.Tensor,\n",
" distr_args: torch.Tensor,\n",
" mask: Union[torch.Tensor, None] = None):\n",
" \"\"\"\n",
" r\"\"\"\n",
" Computes the negative log-likelihood objective function. \n",
" To estimate the following predictive distribution:\n",
"\n",
Expand Down Expand Up @@ -2753,7 +2753,7 @@
"source": [
"#| export\n",
"class PMM(torch.nn.Module):\n",
" \"\"\" Poisson Mixture Mesh\n",
" r\"\"\" Poisson Mixture Mesh\n",
"\n",
" This Poisson Mixture statistical model assumes independence across groups of \n",
" data $\\mathcal{G}=\\{[g_{i}]\\}$, and estimates relationships within the group.\n",
Expand Down Expand Up @@ -2928,7 +2928,7 @@
" y: torch.Tensor,\n",
" distr_args: torch.Tensor,\n",
" mask: Union[torch.Tensor, None] = None):\n",
" \"\"\"\n",
" r\"\"\"\n",
" Computes the negative log-likelihood objective function. \n",
" To estimate the following predictive distribution:\n",
"\n",
Expand Down Expand Up @@ -3104,7 +3104,7 @@
"source": [
"#| export\n",
"class GMM(torch.nn.Module):\n",
" \"\"\" Gaussian Mixture Mesh\n",
" r\"\"\" Gaussian Mixture Mesh\n",
"\n",
" This Gaussian Mixture statistical model assumes independence across groups of \n",
" data $\\mathcal{G}=\\{[g_{i}]\\}$, and estimates relationships within the group.\n",
Expand Down Expand Up @@ -3283,7 +3283,7 @@
" y: torch.Tensor,\n",
" distr_args: torch.Tensor,\n",
" mask: Union[torch.Tensor, None] = None):\n",
" \"\"\"\n",
" r\"\"\"\n",
" Computes the negative log-likelihood objective function. \n",
" To estimate the following predictive distribution:\n",
"\n",
Expand Down Expand Up @@ -3460,7 +3460,7 @@
"source": [
"#| export\n",
"class NBMM(torch.nn.Module):\n",
" \"\"\" Negative Binomial Mixture Mesh\n",
" r\"\"\" Negative Binomial Mixture Mesh\n",
"\n",
" This N. Binomial Mixture statistical model assumes independence across groups of \n",
" data $\\mathcal{G}=\\{[g_{i}]\\}$, and estimates relationships within the group.\n",
Expand Down Expand Up @@ -3641,7 +3641,7 @@
" y: torch.Tensor,\n",
" distr_args: torch.Tensor,\n",
" mask: Union[torch.Tensor, None] = None):\n",
" \"\"\"\n",
" r\"\"\"\n",
" Computes the negative log-likelihood objective function. \n",
" To estimate the following predictive distribution:\n",
"\n",
Expand Down Expand Up @@ -3790,7 +3790,7 @@
"source": [
"#| export\n",
"class HuberLoss(BasePointLoss):\n",
" \"\"\" Huber Loss\n",
" r\"\"\" Huber Loss\n",
"\n",
" The Huber loss, employed in robust regression, is a loss function that \n",
" exhibits reduced sensitivity to outliers in data when compared to the \n",
Expand Down Expand Up @@ -3887,7 +3887,7 @@
"source": [
"#| export\n",
"class TukeyLoss(BasePointLoss):\n",
" \"\"\" Tukey Loss\n",
" r\"\"\" Tukey Loss\n",
"\n",
" The Tukey loss function, also known as Tukey's biweight function, is a \n",
" robust statistical loss function used in robust statistics. Tukey's loss exhibits\n",
Expand Down Expand Up @@ -4019,7 +4019,7 @@
"source": [
"#| export\n",
"class HuberQLoss(BasePointLoss):\n",
" \"\"\" Huberized Quantile Loss\n",
" r\"\"\" Huberized Quantile Loss\n",
"\n",
" The Huberized quantile loss is a modified version of the quantile loss function that\n",
" combines the advantages of the quantile loss and the Huber loss. It is commonly used\n",
Expand Down Expand Up @@ -4125,7 +4125,7 @@
"source": [
"#| export\n",
"class HuberMQLoss(BasePointLoss):\n",
" \"\"\" Huberized Multi-Quantile loss\n",
" r\"\"\" Huberized Multi-Quantile loss\n",
"\n",
" The Huberized Multi-Quantile loss (HuberMQL) is a modified version of the multi-quantile loss function \n",
" that combines the advantages of the quantile loss and the Huber loss. HuberMQL is commonly used in regression \n",
Expand Down Expand Up @@ -4448,7 +4448,7 @@
"source": [
"#| export\n",
"class Accuracy(BasePointLoss):\n",
" \"\"\" Accuracy\n",
" r\"\"\" Accuracy\n",
"\n",
" Computes the accuracy between categorical `y` and `y_hat`.\n",
" This evaluation metric is only meant for evalution, as it\n",
Expand Down Expand Up @@ -4535,7 +4535,7 @@
"source": [
"#| export\n",
"class sCRPS(BasePointLoss):\n",
" \"\"\"Scaled Continues Ranked Probability Score\n",
" r\"\"\"Scaled Continues Ranked Probability Score\n",
"\n",
" Calculates a scaled variation of the CRPS, as proposed by Rangapuram (2021),\n",
" to measure the accuracy of predicted quantiles `y_hat` compared to the observation `y`.\n",
Expand Down
6 changes: 3 additions & 3 deletions nbs/models.hint.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -83,7 +83,7 @@
"source": [
"#| export\n",
"def get_bottomup_P(S: np.ndarray):\n",
" \"\"\"BottomUp Reconciliation Matrix.\n",
" r\"\"\"BottomUp Reconciliation Matrix.\n",
"\n",
" Creates BottomUp hierarchical \\\"projection\\\" matrix is defined as:\n",
" $$\\mathbf{P}_{\\\\text{BU}} = [\\mathbf{0}_{\\mathrm{[b],[a]}}\\;|\\;\\mathbf{I}_{\\mathrm{[b][b]}}]$$ \n",
Expand All @@ -106,7 +106,7 @@
" return P\n",
"\n",
"def get_mintrace_ols_P(S: np.ndarray):\n",
" \"\"\"MinTraceOLS Reconciliation Matrix.\n",
" r\"\"\"MinTraceOLS Reconciliation Matrix.\n",
"\n",
" Creates MinTraceOLS reconciliation matrix as proposed by Wickramasuriya et al.\n",
"\n",
Expand Down Expand Up @@ -137,7 +137,7 @@
" return P\n",
"\n",
"def get_mintrace_wls_P(S: np.ndarray):\n",
" \"\"\"MinTraceOLS Reconciliation Matrix.\n",
" r\"\"\"MinTraceOLS Reconciliation Matrix.\n",
"\n",
" Creates MinTraceOLS reconciliation matrix as proposed by Wickramasuriya et al.\n",
" Depending on a weighted GLS estimator and an estimator of the covariance matrix of the coherency errors $\\mathbf{W}_{h}$.\n",
Expand Down
2 changes: 1 addition & 1 deletion neuralforecast/common/_modules.py
Original file line number Diff line number Diff line change
Expand Up @@ -82,7 +82,7 @@ def forward(self, x):


class CausalConv1d(nn.Module):
"""Causal Convolution 1d
r"""Causal Convolution 1d

Receives `x` input of dim [N,C_in,T], and computes a causal convolution
in the time dimension. Skipping the H steps of the forecast horizon, through
Expand Down
14 changes: 8 additions & 6 deletions neuralforecast/common/_scalers.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,5 @@
"""Temporal normalization has proven to be essential in neural forecasting tasks, as it enables network's non-linearities to express themselves. Forecasting scaling methods take particular interest in the temporal dimension where most of the variance dwells, contrary to other deep learning techniques like `BatchNorm` that normalizes across batch and temporal dimensions, and `LayerNorm` that normalizes across the feature dimension. Currently we support the following techniques: `std`, `median`, `norm`, `norm1`, `invariant`, `revin`."""

# AUTOGENERATED! DO NOT EDIT! File to edit: ../../nbs/common.scalers.ipynb.

# %% auto 0
Expand Down Expand Up @@ -56,7 +58,7 @@ def masked_mean(x, mask, dim=-1, keepdim=True):

# %% ../../nbs/common.scalers.ipynb 14
def minmax_statistics(x, mask, eps=1e-6, dim=-1):
"""MinMax Scaler
r"""MinMax Scaler

Standardizes temporal features by ensuring its range dweels between
[0,1] range. This transformation is often used as an alternative
Expand Down Expand Up @@ -106,7 +108,7 @@ def inv_minmax_scaler(z, x_min, x_range):

# %% ../../nbs/common.scalers.ipynb 17
def minmax1_statistics(x, mask, eps=1e-6, dim=-1):
"""MinMax1 Scaler
r"""MinMax1 Scaler

Standardizes temporal features by ensuring its range dweels between
[-1,1] range. This transformation is often used as an alternative
Expand Down Expand Up @@ -158,7 +160,7 @@ def inv_minmax1_scaler(z, x_min, x_range):

# %% ../../nbs/common.scalers.ipynb 20
def std_statistics(x, mask, dim=-1, eps=1e-6):
"""Standard Scaler
r"""Standard Scaler

Standardizes features by removing the mean and scaling
to unit variance along the `dim` dimension.
Expand Down Expand Up @@ -196,7 +198,7 @@ def inv_std_scaler(z, x_mean, x_std):

# %% ../../nbs/common.scalers.ipynb 23
def robust_statistics(x, mask, dim=-1, eps=1e-6):
"""Robust Median Scaler
r"""Robust Median Scaler

Standardizes features by removing the median and scaling
with the mean absolute deviation (mad) a robust estimator of variance.
Expand Down Expand Up @@ -246,7 +248,7 @@ def inv_robust_scaler(z, x_median, x_mad):

# %% ../../nbs/common.scalers.ipynb 26
def invariant_statistics(x, mask, dim=-1, eps=1e-6):
"""Invariant Median Scaler
r"""Invariant Median Scaler

Standardizes features by removing the median and scaling
with the mean absolute deviation (mad) a robust estimator of variance.
Expand Down Expand Up @@ -328,7 +330,7 @@ def inv_identity_scaler(z, x_shift, x_scale):

# %% ../../nbs/common.scalers.ipynb 33
class TemporalNorm(nn.Module):
"""Temporal Normalization
r"""Temporal Normalization

Standardization of the features is a common requirement for many
machine learning estimators, and it is commonly achieved by removing
Expand Down
Loading
Loading