Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 1 addition & 2 deletions data/mtl_ds.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,13 +22,12 @@

from utils import mkdir_if_missing
from torch.utils.data import DataLoader
from torch._six import string_classes
from collections.abc import Mapping, Sequence
from torchvision import transforms
from easydict import EasyDict as edict
from skimage.morphology import thin


string_classes = (str,)
int_classes = int
_use_shared_memory = False
r"""Whether to use shared memory in default_collate"""
Expand Down
12 changes: 6 additions & 6 deletions evaluation/jaccard.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,9 +17,9 @@ def jaccard(gt, pred, void_pixels=None):
void_pixels = np.zeros_like(gt)
assert(void_pixels.shape == gt.shape)

gt = gt.astype(np.bool)
pred = pred.astype(np.bool)
void_pixels = void_pixels.astype(np.bool)
gt = gt.astype(bool)
pred = pred.astype(bool)
void_pixels = void_pixels.astype(bool)
if np.isclose(np.sum(gt & np.logical_not(void_pixels)), 0) and np.isclose(np.sum(pred & np.logical_not(void_pixels)), 0):
return 1

Expand All @@ -33,9 +33,9 @@ def precision_recall(gt, pred, void_pixels=None):
if void_pixels is None:
void_pixels = np.zeros_like(gt)

gt = gt.astype(np.bool)
pred = pred.astype(np.bool)
void_pixels = void_pixels.astype(np.bool)
gt = gt.astype(bool)
pred = pred.astype(bool)
void_pixels = void_pixels.astype(bool)

tp = ((pred & gt) & ~void_pixels).sum()
fn = ((~pred & gt) & ~void_pixels).sum()
Expand Down
6 changes: 3 additions & 3 deletions main.py
Original file line number Diff line number Diff line change
Expand Up @@ -445,7 +445,7 @@ def train_one_epoch(config, model, criterion, data_loader, optimizer, epoch, mix
if mixup_fn is not None:
samples, targets = mixup_fn(samples, targets)
teacher_outputs = None
with torch.cuda.amp.autocast(enabled=config.AMP_ENABLE):
with torch.amp.autocast('cuda', enabled=config.AMP_ENABLE):
if config.TRAIN.CONTROLLERS_PRETRAIN or config.TRAIN.MTL_MULTI_OBJECTIVE_TRAIN:
outputs, NBs, NTs, nTsPerTask, nBsPerTask = model(
samples, return_activation_stats=True, task=task)
Expand Down Expand Up @@ -620,7 +620,7 @@ def flatten(l):
task_batch, dim=0) for task, task_batch in labels_batch.items()}

# Measure performance
with torch.cuda.amp.autocast(enabled=config.AMP_ENABLE):
with torch.amp.autocast('cuda', enabled=config.AMP_ENABLE):
loss, loss_dict = criterion(
output_batch_tesnor, label_batch_tesnor)
loss_meter.update(loss.item())
Expand All @@ -638,7 +638,7 @@ def flatten(l):
task_batch, dim=0) for task, task_batch in labels_batch.items()}

# Measure performance
with torch.cuda.amp.autocast(enabled=config.AMP_ENABLE):
with torch.amp.autocast('cuda', enabled=config.AMP_ENABLE):
loss, loss_dict = criterion(
output_batch_tesnor, label_batch_tesnor)
loss_meter.update(loss.item())
Expand Down
4 changes: 2 additions & 2 deletions utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@
import os
import torch
import torch.distributed as dist
from torch._six import inf
from math import inf
import errno

from PIL import Image
Expand Down Expand Up @@ -235,7 +235,7 @@ class NativeScalerWithGradNormCount:
state_dict_key = "amp_scaler"

def __init__(self):
self._scaler = torch.cuda.amp.GradScaler()
self._scaler = torch.amp.GradScaler('cuda')

def __call__(self, loss, optimizer, clip_grad=None, parameters=None, create_graph=False, update_grad=True):
self._scaler.scale(loss).backward(create_graph=create_graph)
Expand Down