diff --git a/data/mtl_ds.py b/data/mtl_ds.py index 80df3c6..670dc58 100644 --- a/data/mtl_ds.py +++ b/data/mtl_ds.py @@ -22,13 +22,12 @@ from utils import mkdir_if_missing from torch.utils.data import DataLoader -from torch._six import string_classes from collections.abc import Mapping, Sequence from torchvision import transforms from easydict import EasyDict as edict from skimage.morphology import thin - +string_classes = (str,) int_classes = int _use_shared_memory = False r"""Whether to use shared memory in default_collate""" diff --git a/evaluation/jaccard.py b/evaluation/jaccard.py index a9b0b60..a36a3e3 100644 --- a/evaluation/jaccard.py +++ b/evaluation/jaccard.py @@ -17,9 +17,9 @@ def jaccard(gt, pred, void_pixels=None): void_pixels = np.zeros_like(gt) assert(void_pixels.shape == gt.shape) - gt = gt.astype(np.bool) - pred = pred.astype(np.bool) - void_pixels = void_pixels.astype(np.bool) + gt = gt.astype(bool) + pred = pred.astype(bool) + void_pixels = void_pixels.astype(bool) if np.isclose(np.sum(gt & np.logical_not(void_pixels)), 0) and np.isclose(np.sum(pred & np.logical_not(void_pixels)), 0): return 1 @@ -33,9 +33,9 @@ def precision_recall(gt, pred, void_pixels=None): if void_pixels is None: void_pixels = np.zeros_like(gt) - gt = gt.astype(np.bool) - pred = pred.astype(np.bool) - void_pixels = void_pixels.astype(np.bool) + gt = gt.astype(bool) + pred = pred.astype(bool) + void_pixels = void_pixels.astype(bool) tp = ((pred & gt) & ~void_pixels).sum() fn = ((~pred & gt) & ~void_pixels).sum() diff --git a/main.py b/main.py index 55e18a3..bc632bc 100644 --- a/main.py +++ b/main.py @@ -445,7 +445,7 @@ def train_one_epoch(config, model, criterion, data_loader, optimizer, epoch, mix if mixup_fn is not None: samples, targets = mixup_fn(samples, targets) teacher_outputs = None - with torch.cuda.amp.autocast(enabled=config.AMP_ENABLE): + with torch.amp.autocast('cuda', enabled=config.AMP_ENABLE): if config.TRAIN.CONTROLLERS_PRETRAIN or config.TRAIN.MTL_MULTI_OBJECTIVE_TRAIN: outputs, NBs, NTs, nTsPerTask, nBsPerTask = model( samples, return_activation_stats=True, task=task) @@ -620,7 +620,7 @@ def flatten(l): task_batch, dim=0) for task, task_batch in labels_batch.items()} # Measure performance - with torch.cuda.amp.autocast(enabled=config.AMP_ENABLE): + with torch.amp.autocast('cuda', enabled=config.AMP_ENABLE): loss, loss_dict = criterion( output_batch_tesnor, label_batch_tesnor) loss_meter.update(loss.item()) @@ -638,7 +638,7 @@ def flatten(l): task_batch, dim=0) for task, task_batch in labels_batch.items()} # Measure performance - with torch.cuda.amp.autocast(enabled=config.AMP_ENABLE): + with torch.amp.autocast('cuda', enabled=config.AMP_ENABLE): loss, loss_dict = criterion( output_batch_tesnor, label_batch_tesnor) loss_meter.update(loss.item()) diff --git a/utils.py b/utils.py index 458b405..9cd07de 100644 --- a/utils.py +++ b/utils.py @@ -8,7 +8,7 @@ import os import torch import torch.distributed as dist -from torch._six import inf +from math import inf import errno from PIL import Image @@ -235,7 +235,7 @@ class NativeScalerWithGradNormCount: state_dict_key = "amp_scaler" def __init__(self): - self._scaler = torch.cuda.amp.GradScaler() + self._scaler = torch.amp.GradScaler('cuda') def __call__(self, loss, optimizer, clip_grad=None, parameters=None, create_graph=False, update_grad=True): self._scaler.scale(loss).backward(create_graph=create_graph)