From 30360b23844c42c0d4114fc2207647601cf20b3d Mon Sep 17 00:00:00 2001 From: Zekrom_7780 Date: Sat, 11 Nov 2023 17:06:00 +0530 Subject: [PATCH 1/8] ECE_added --- .../regression/ExpectedCalibrationError.py | 53 +++++++++++++++++++ 1 file changed, 53 insertions(+) create mode 100644 ignite/contrib/metrics/regression/ExpectedCalibrationError.py diff --git a/ignite/contrib/metrics/regression/ExpectedCalibrationError.py b/ignite/contrib/metrics/regression/ExpectedCalibrationError.py new file mode 100644 index 000000000000..c8b672ad0844 --- /dev/null +++ b/ignite/contrib/metrics/regression/ExpectedCalibrationError.py @@ -0,0 +1,53 @@ +import torch +from ignite.metrics import Metric +from ignite.exceptions import NotComputableError + +class ExpectedCalibrationError(Metric): + def __init__(self, num_bins=10, device=None): + super(ExpectedCalibrationError, self).__init__() + self.num_bins = num_bins + self.device = device + self.reset() + + def reset(self): + self.confidences = torch.tensor([], device=self.device) + self.corrects = torch.tensor([], device=self.device) + + def update(self, output): + y_pred, y = output + + assert y_pred.dim() == 2 and y_pred.shape[1] == 2, "This metric is for binary classification." + + softmax_probs = torch.softmax(y_pred, dim=1) + max_probs, predicted_class = torch.max(softmax_probs, dim=1) + + self.confidences = torch.cat((self.confidences, max_probs)) + self.corrects = torch.cat((self.corrects, predicted_class == y)) + + def compute(self): + if self.confidences.numel() == 0: + raise NotComputableError("ExpectedCalibrationError must have at least one example before it can be computed.") + + bin_edges = torch.linspace(0, 1, self.num_bins + 1, device=self.device) + + bin_indices = torch.searchsorted(bin_edges, self.confidences) + + ece = 0.0 + bin_sizes = torch.zeros(self.num_bins, device=self.device) + bin_accuracies = torch.zeros(self.num_bins, device=self.device) + + for i in range(self.num_bins): + mask = bin_indices == i + bin_confidences = self.confidences[mask] + bin_corrects = self.corrects[mask] + + accuracy = torch.mean(bin_corrects) + + avg_confidence = torch.mean(bin_confidences) + + bin_size = bin_confidences.numel() + ece += (bin_size / len(self.confidences)) * abs(accuracy - avg_confidence) + bin_sizes[i] = bin_size + bin_accuracies[i] = accuracy + + return ece From 82a647ef4dfd6d7d6cfa52b9f1b8a3130544f3d9 Mon Sep 17 00:00:00 2001 From: Zekrom_7780 Date: Sat, 11 Nov 2023 17:07:46 +0530 Subject: [PATCH 2/8] Changed_location --- .../contrib/metrics/{regression => }/ExpectedCalibrationError.py | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename ignite/contrib/metrics/{regression => }/ExpectedCalibrationError.py (100%) diff --git a/ignite/contrib/metrics/regression/ExpectedCalibrationError.py b/ignite/contrib/metrics/ExpectedCalibrationError.py similarity index 100% rename from ignite/contrib/metrics/regression/ExpectedCalibrationError.py rename to ignite/contrib/metrics/ExpectedCalibrationError.py From 8d626dd7764ef6dfcd2ce9bb6929e67787176f18 Mon Sep 17 00:00:00 2001 From: Zekrom_7780 Date: Sat, 11 Nov 2023 17:09:42 +0530 Subject: [PATCH 3/8] Ran run_code_style.sh fmt --- ignite/contrib/metrics/ExpectedCalibrationError.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/ignite/contrib/metrics/ExpectedCalibrationError.py b/ignite/contrib/metrics/ExpectedCalibrationError.py index c8b672ad0844..f3d72d8a9c5c 100644 --- a/ignite/contrib/metrics/ExpectedCalibrationError.py +++ b/ignite/contrib/metrics/ExpectedCalibrationError.py @@ -1,6 +1,8 @@ import torch -from ignite.metrics import Metric + from ignite.exceptions import NotComputableError +from ignite.metrics import Metric + class ExpectedCalibrationError(Metric): def __init__(self, num_bins=10, device=None): @@ -26,7 +28,9 @@ def update(self, output): def compute(self): if self.confidences.numel() == 0: - raise NotComputableError("ExpectedCalibrationError must have at least one example before it can be computed.") + raise NotComputableError( + "ExpectedCalibrationError must have at least one example before it can be computed." + ) bin_edges = torch.linspace(0, 1, self.num_bins + 1, device=self.device) From 2233e13752cf68f92712e0389b97b4ee54806986 Mon Sep 17 00:00:00 2001 From: Zekrom_7780 Date: Tue, 14 Nov 2023 00:23:47 +0530 Subject: [PATCH 4/8] Made_changes --- .../metrics/ExpectedCalibrationError.py | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) rename ignite/{contrib => }/metrics/ExpectedCalibrationError.py (84%) diff --git a/ignite/contrib/metrics/ExpectedCalibrationError.py b/ignite/metrics/ExpectedCalibrationError.py similarity index 84% rename from ignite/contrib/metrics/ExpectedCalibrationError.py rename to ignite/metrics/ExpectedCalibrationError.py index f3d72d8a9c5c..800ad9586691 100644 --- a/ignite/contrib/metrics/ExpectedCalibrationError.py +++ b/ignite/metrics/ExpectedCalibrationError.py @@ -1,8 +1,6 @@ import torch - -from ignite.exceptions import NotComputableError from ignite.metrics import Metric - +from ignite.exceptions import NotComputableError class ExpectedCalibrationError(Metric): def __init__(self, num_bins=10, device=None): @@ -18,7 +16,10 @@ def reset(self): def update(self, output): y_pred, y = output - assert y_pred.dim() == 2 and y_pred.shape[1] == 2, "This metric is for binary classification." + if not (y_pred.dim() == 2 and y_pred.shape[1] == 2): + raise ValueError("This metric is for binary classification.") + + y_pred, y = y_pred.detach(), y.detach() softmax_probs = torch.softmax(y_pred, dim=1) max_probs, predicted_class = torch.max(softmax_probs, dim=1) @@ -28,9 +29,7 @@ def update(self, output): def compute(self): if self.confidences.numel() == 0: - raise NotComputableError( - "ExpectedCalibrationError must have at least one example before it can be computed." - ) + raise NotComputableError("ExpectedCalibrationError must have at least one example before it can be computed.") bin_edges = torch.linspace(0, 1, self.num_bins + 1, device=self.device) From 1849bcfbd2822405099cf39dd50d00d0d33db790 Mon Sep 17 00:00:00 2001 From: Zekrom_7780 Date: Tue, 14 Nov 2023 00:26:39 +0530 Subject: [PATCH 5/8] Ran run_code_style.sh fmt --- ignite/metrics/ExpectedCalibrationError.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/ignite/metrics/ExpectedCalibrationError.py b/ignite/metrics/ExpectedCalibrationError.py index 800ad9586691..3e1dcc8affd4 100644 --- a/ignite/metrics/ExpectedCalibrationError.py +++ b/ignite/metrics/ExpectedCalibrationError.py @@ -1,6 +1,8 @@ import torch -from ignite.metrics import Metric + from ignite.exceptions import NotComputableError +from ignite.metrics import Metric + class ExpectedCalibrationError(Metric): def __init__(self, num_bins=10, device=None): @@ -29,7 +31,9 @@ def update(self, output): def compute(self): if self.confidences.numel() == 0: - raise NotComputableError("ExpectedCalibrationError must have at least one example before it can be computed.") + raise NotComputableError( + "ExpectedCalibrationError must have at least one example before it can be computed." + ) bin_edges = torch.linspace(0, 1, self.num_bins + 1, device=self.device) From b9f948a76829f0294a1ebe15f1121625ac9fd8dd Mon Sep 17 00:00:00 2001 From: Zekrom_7780 Date: Wed, 22 Nov 2023 23:58:01 +0530 Subject: [PATCH 6/8] Added Multi_Class_ECE --- ignite/metrics/ExpectedCalibrationError.py | 32 ++++++++++++++++++---- 1 file changed, 26 insertions(+), 6 deletions(-) diff --git a/ignite/metrics/ExpectedCalibrationError.py b/ignite/metrics/ExpectedCalibrationError.py index 3e1dcc8affd4..fa34e6908b12 100644 --- a/ignite/metrics/ExpectedCalibrationError.py +++ b/ignite/metrics/ExpectedCalibrationError.py @@ -1,5 +1,4 @@ import torch - from ignite.exceptions import NotComputableError from ignite.metrics import Metric @@ -15,12 +14,17 @@ def reset(self): self.confidences = torch.tensor([], device=self.device) self.corrects = torch.tensor([], device=self.device) - def update(self, output): - y_pred, y = output + def update_binary(self, y_pred, y): + y_pred, y = y_pred.detach().unsqueeze(1), y.detach() - if not (y_pred.dim() == 2 and y_pred.shape[1] == 2): - raise ValueError("This metric is for binary classification.") + softmax_probs = torch.sigmoid(y_pred) + max_probs = softmax_probs.squeeze() + predicted_class = torch.round(max_probs) + self.confidences = torch.cat((self.confidences, max_probs)) + self.corrects = torch.cat((self.corrects, predicted_class == y)) + + def update_multi_class(self, y_pred, y): y_pred, y = y_pred.detach(), y.detach() softmax_probs = torch.softmax(y_pred, dim=1) @@ -29,6 +33,23 @@ def update(self, output): self.confidences = torch.cat((self.confidences, max_probs)) self.corrects = torch.cat((self.corrects, predicted_class == y)) + def update(self, output): + y_pred, y = output + + if y_pred.dim() == 2: + # Multi-class classification + if y_pred.shape[1] <= 1: + raise ValueError("Invalid number of classes for multi-class ECE computation.") + + self.update_multi_class(y_pred, y) + + elif y_pred.dim() == 1: + # Binary classification + self.update_binary(y_pred, y) + + else: + raise ValueError("Invalid input dimensions for ECE computation.") + def compute(self): if self.confidences.numel() == 0: raise NotComputableError( @@ -49,7 +70,6 @@ def compute(self): bin_corrects = self.corrects[mask] accuracy = torch.mean(bin_corrects) - avg_confidence = torch.mean(bin_confidences) bin_size = bin_confidences.numel() From 467aed1965277a98c49316ac62b3aca4f75e0119 Mon Sep 17 00:00:00 2001 From: Zekrom-7780 Date: Wed, 22 Nov 2023 18:28:57 +0000 Subject: [PATCH 7/8] autopep8 fix --- ignite/metrics/ExpectedCalibrationError.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/ignite/metrics/ExpectedCalibrationError.py b/ignite/metrics/ExpectedCalibrationError.py index fa34e6908b12..5f338528c453 100644 --- a/ignite/metrics/ExpectedCalibrationError.py +++ b/ignite/metrics/ExpectedCalibrationError.py @@ -1,4 +1,5 @@ import torch + from ignite.exceptions import NotComputableError from ignite.metrics import Metric @@ -40,7 +41,7 @@ def update(self, output): # Multi-class classification if y_pred.shape[1] <= 1: raise ValueError("Invalid number of classes for multi-class ECE computation.") - + self.update_multi_class(y_pred, y) elif y_pred.dim() == 1: From 21a21b5d12c1ebe5a95dadb318d36de1081c6281 Mon Sep 17 00:00:00 2001 From: Zekrom_7780 Date: Thu, 23 Nov 2023 00:00:37 +0530 Subject: [PATCH 8/8] Ran bash ./tests/run_code_style.sh fmt --- ignite/metrics/ExpectedCalibrationError.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/ignite/metrics/ExpectedCalibrationError.py b/ignite/metrics/ExpectedCalibrationError.py index fa34e6908b12..5f338528c453 100644 --- a/ignite/metrics/ExpectedCalibrationError.py +++ b/ignite/metrics/ExpectedCalibrationError.py @@ -1,4 +1,5 @@ import torch + from ignite.exceptions import NotComputableError from ignite.metrics import Metric @@ -40,7 +41,7 @@ def update(self, output): # Multi-class classification if y_pred.shape[1] <= 1: raise ValueError("Invalid number of classes for multi-class ECE computation.") - + self.update_multi_class(y_pred, y) elif y_pred.dim() == 1: