Skip to content

Commit

Permalink
Merge pull request #9 from torch-points3d/metrics
Browse files Browse the repository at this point in the history
Metrics
  • Loading branch information
humanpose1 authored Jul 29, 2021
2 parents 49052ce + 11e8a5e commit b29fc7d
Show file tree
Hide file tree
Showing 15 changed files with 443 additions and 42 deletions.
3 changes: 2 additions & 1 deletion conf/model/segmentation/default.yaml
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
# @package model
defaults:
- /model/default
- /tracker: segmentation/default

model:
_recursive_: false
Expand All @@ -11,4 +12,4 @@ model:

backbone:
input_nc: ${dataset.cfg.feature_dimension}
architecture: unet
architecture: unet
2 changes: 2 additions & 0 deletions conf/tracker/segmentation/default.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
_target_: torch_points3d.metrics.segmentation.segmentation_tracker.SegmentationTracker
num_classes: ${dataset.cfg.num_classes}
Empty file added test/__init__.py
Empty file.
59 changes: 59 additions & 0 deletions test/test_confusion_matrix.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,59 @@
import torch
import os
import sys
import unittest
import pytest
import numpy as np


DIR = os.path.dirname(os.path.realpath(__file__))
ROOT = os.path.join(DIR, "..")
sys.path.insert(0, ROOT)
sys.path.append('.')

from torch_points3d.metrics.segmentation.metrics import compute_intersection_union_per_class
from torch_points3d.metrics.segmentation.metrics import compute_average_intersection_union
from torch_points3d.metrics.segmentation.metrics import compute_overall_accuracy
from torch_points3d.metrics.segmentation.metrics import compute_mean_class_accuracy



def test_compute_intersection_union_per_class():
matrix = torch.tensor([[4, 1], [2, 10]])
iou, _ = compute_intersection_union_per_class(matrix)
miou = compute_average_intersection_union(matrix)
np.testing.assert_allclose(iou[0].item(), 4 / (4.0 + 1.0 + 2.0))
np.testing.assert_allclose(iou[1].item(), 10 / (10.0 + 1.0 + 2.0))
np.testing.assert_allclose(iou.mean().item(), miou.item())

def test_compute_overall_accuracy():
list_matrix = [
torch.tensor([[4, 1], [2, 10]]).float(),
torch.tensor([[4, 1], [2, 10]]).int(),
torch.tensor([[0, 0], [0, 0]]).float()
]
list_answer = [
(4.0+10.0)/(4.0 + 10.0 + 1.0 +2.0),
(4.0+10.0)/(4.0 + 10.0 + 1.0 +2.0),
0.0
]
for i in range(len(list_matrix)):
acc = compute_overall_accuracy(list_matrix[i])
if(isinstance(acc, torch.Tensor)):
np.testing.assert_allclose(acc.item(), list_answer[i])
else:
np.testing.assert_allclose(acc, list_answer[i])


def test_compute_mean_class_accuracy():
matrix = torch.tensor([[4, 1], [2, 10]]).float()
macc = compute_mean_class_accuracy(matrix)
np.testing.assert_allclose(macc.item(), (4/5 + 10/12)*0.5)



@pytest.mark.parametrize("missing_as_one, answer", [pytest.param(False, (0.5 + 0.5) / 2), pytest.param(True, (0.5 + 1 + 0.5) / 3)])
def test_test_getMeanIoUMissing(missing_as_one, answer):
matrix = torch.tensor([[1, 1, 0], [0, 1, 0], [0, 0, 0]])
np.testing.assert_allclose(compute_average_intersection_union(matrix, missing_as_one=missing_as_one).item(), answer)

38 changes: 18 additions & 20 deletions test/test_model.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
import unittest
import pytest
import sys
import os
import torch
Expand All @@ -9,27 +9,25 @@
DIR = os.path.dirname(os.path.realpath(__file__))
ROOT = os.path.join(DIR, "..")
sys.path.insert(0, ROOT)
sys.path.append(".")

from torch_points3d.models.segmentation.sparseconv3d import APIModel
from torch_points3d.models.segmentation.base_model import SegmentationBaseModel
from torch_points3d.core.instantiator import HydraInstantiator


class TestAPIModel(unittest.TestCase):
def test_forward(self):
option_dataset = OmegaConf.create({"feature_dimension": 1, "num_classes": 10})
@pytest.mark.skip("For now we skip the tests...")
def test_forward(self):
option_dataset = OmegaConf.create({"feature_dimension": 1, "num_classes": 10})
option_criterion = OmegaConf.create({"_target_": "torch.nn.NLLLoss"})
instantiator = HydraInstantiator()

option = OmegaConf.load(os.path.join(ROOT, "conf", "models", "segmentation", "sparseconv3d.yaml"))
name_model = list(option.keys())[0]
model = APIModel(option[name_model], option_dataset)
model = SegmentationBaseModel(instantiator, 10, option_backbone, option_criterion)

pos = torch.randn(1000, 3)
coords = torch.round(pos * 10000)
x = torch.ones(1000, 1)
batch = torch.zeros(1000).long()
y = torch.randint(0, 10, (1000,))
data = Batch(pos=pos, x=x, batch=batch, y=y, coords=coords)
model.set_input(data)
model.forward()


if __name__ == "__main__":
unittest.main()
pos = torch.randn(1000, 3)
coords = torch.round(pos * 10000)
x = torch.ones(1000, 6)
batch = torch.zeros(1000).long()
y = torch.randint(0, 10, (1000,))
data = Batch(pos=pos, x=x, batch=batch, y=y, coords=coords)
model.set_input(data)
model.forward()
120 changes: 120 additions & 0 deletions test/test_segmentation_tracker.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,120 @@
import numpy as np
import torch
import sys
import os

import pytest


from torch_geometric.data import Data

DIR = os.path.dirname(os.path.realpath(__file__))
ROOT = os.path.join(DIR, "..")
sys.path.insert(0, ROOT)
sys.path.append(".")

from torch_points3d.metrics.segmentation.segmentation_tracker import SegmentationTracker


class MockDataset:
INV_OBJECT_LABEL = {0: "first", 1: "wall", 2: "not", 3: "here", 4: "hoy"}
pos = torch.tensor([[1, 0, 0], [2, 0, 0], [3, 0, 0], [-1, 0, 0]]).float()
test_label = torch.tensor([1, 1, 0, 0])

def __init__(self):
self.num_classes = 2

@property
def test_data(self):
return Data(pos=self.pos, y=self.test_label)

def has_labels(self, stage):
return True


class MockModel:
def __init__(self):
self.iter = 0
self.losses = [
{"loss_1": 1, "loss_2": 2},
{"loss_1": 2, "loss_2": 2},
{"loss_1": 1, "loss_2": 2},
{"loss_1": 1, "loss_2": 2},
]
self.outputs = [
torch.tensor([[0, 1], [0, 1]]),
torch.tensor([[1, 0], [1, 0]]),
torch.tensor([[1, 0], [1, 0]]),
torch.tensor([[1, 0], [1, 0], [1, 0]]),
]
self.labels = [torch.tensor([1, 1]), torch.tensor([1, 1]), torch.tensor([1, 1]), torch.tensor([0, 0, -100])]
self.batch_idx = [torch.tensor([0, 1]), torch.tensor([0, 1]), torch.tensor([0, 1]), torch.tensor([0, 0, 1])]

def get_input(self):
return Data(pos=MockDataset.pos[:2, :], origin_id=torch.tensor([0, 1]))

def get_output(self):
return self.outputs[self.iter].float()

def get_labels(self):
return self.labels[self.iter]

def get_current_losses(self):
return self.losses[self.iter]

def get_batch(self):
return self.batch_idx[self.iter]

@property
def device(self):
return "cpu"


def test_forward():
tracker = SegmentationTracker(num_classes=2, stage="train")
model = MockModel()
output = {"preds": model.get_output(), "labels": model.get_labels()}
losses = model.get_current_losses()
metrics = tracker(output, losses)
# metrics = tracker.get_metrics()

for k in ["train_acc", "train_miou", "train_macc"]:
np.testing.assert_allclose(metrics[k], 100, rtol=1e-5)
model.iter += 1
output = {"preds": model.get_output(), "labels": model.get_labels()}
losses = model.get_current_losses()
metrics = tracker(output, losses)
# metrics = tracker.get_metrics()
metrics = tracker.finalise()
for k in ["train_acc", "train_macc"]:
assert metrics[k] == 50
np.testing.assert_allclose(metrics["train_miou"], 25, atol=1e-5)
assert metrics["train_loss_1"] == 1.5

tracker.reset("test")
model.iter += 1
output = {"preds": model.get_output(), "labels": model.get_labels()}
losses = model.get_current_losses()
metrics = tracker(output, losses)
# metrics = tracker.get_metrics()
for name in ["test_acc", "test_miou", "test_macc"]:
np.testing.assert_allclose(metrics[name].item(), 0, atol=1e-5)


@pytest.mark.parametrize("finalise", [pytest.param(True), pytest.param(False)])
def test_ignore_label(finalise):
tracker = SegmentationTracker(num_classes=2, ignore_label=-100)
tracker.reset("test")
model = MockModel()
model.iter = 3
output = {"preds": model.get_output(), "labels": model.get_labels()}
losses = model.get_current_losses()
metrics = tracker(output, losses)
if not finalise:
# metrics = tracker.get_metrics()
for k in ["test_acc", "test_miou", "test_macc"]:
np.testing.assert_allclose(metrics[k], 100)
else:
tracker.finalise()
with pytest.raises(RuntimeError):
tracker(output)
3 changes: 3 additions & 0 deletions torch_points3d/core/instantiator.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,6 +44,9 @@ def litmodel(self, cfg: DictConfig) -> "PointCloudBaseModule":
def model(self, cfg: DictConfig) -> "PointCloudBaseModel":
return self.instantiate(cfg, self)

def tracker(self, cfg: DictConfig, stage: str = ""):
return self.instantiate(cfg, stage=stage)

def backbone(self, cfg: DictConfig):
return self.instantiate(cfg)

Expand Down
Empty file.
59 changes: 59 additions & 0 deletions torch_points3d/metrics/base_tracker.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,59 @@
from typing import Any, Dict, Optional
import torch
from torch import nn
from torchmetrics import AverageMeter


class BaseTracker(nn.Module):
"""
pytorch Module to manage the losses and the metrics
"""

def __init__(self, stage: str = "train"):
super().__init__()
self.stage: str = stage
self._finalised: bool = False
self.loss_metrics: nn.ModuleDict = nn.ModuleDict()

def track(self, output_model, *args, **kwargs) -> Dict[str, Any]:
raise NotImplementedError

def track_loss(self, losses: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]:
out_loss = dict()
for key, loss in losses.items():
loss_key = f"{self.stage}_{key}"
if loss_key not in self.loss_metrics.keys():
self.loss_metrics[loss_key] = AverageMeter().to(loss)
val = self.loss_metrics[loss_key](loss)
out_loss[loss_key] = val
return out_loss

def forward(
self, output_model: Dict[str, Any], losses: Optional[Dict[str, torch.Tensor]] = None, *args, **kwargs
) -> Dict[str, Any]:
if self._finalised:
raise RuntimeError("Cannot track new values with a finalised tracker, you need to reset it first")
tracked_metric = self.track(output_model, *args, **kwargs)
if losses is not None:
tracked_loss = self.track_loss(losses)
tracked_results = dict(**tracked_loss, **tracked_metric)
else:
tracked_results = tracked_metric
return tracked_results

def _finalise(self) -> Dict[str, Any]:
raise NotImplementedError("method that aggregae metrics")

def finalise(self) -> Dict[str, Any]:
metrics = self._finalise()
self._finalised = True
loss_metrics = self.get_final_loss_metrics()
final_metrics = {**loss_metrics, **metrics}
return final_metrics

def get_final_loss_metrics(self):
metrics = dict()
for key, m in self.loss_metrics.items():
metrics[key] = m.compute()
self.loss_metrics = nn.ModuleDict()
return metrics
Empty file.
Loading

0 comments on commit b29fc7d

Please sign in to comment.