This repository was archived by the owner on Sep 16, 2025. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 32
Expand file tree
/
Copy pathtorchutils.py
More file actions
188 lines (154 loc) · 5.84 KB
/
torchutils.py
File metadata and controls
188 lines (154 loc) · 5.84 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
# Code for data enhancement and test metrics
import numpy as np
import pandas as pd
import os
from PIL import Image
import cv2
import math
import torch
import torchvision
import timm
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader
from tqdm import tqdm
from collections import defaultdict
import matplotlib.pyplot as plt
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
from sklearn.model_selection import train_test_split, cross_validate, StratifiedKFold, cross_val_score
# Metric Test accuracy required for packages
from sklearn.metrics import f1_score, accuracy_score, recall_score
# Augmentation Packages to be used for data enhancement
import albumentations
from albumentations.pytorch.transforms import ToTensorV2
from torchvision import datasets, models, transforms
def get_torch_transforms(img_size=224):
data_transforms = {
'train': transforms.Compose([
# transforms.RandomResizedCrop(img_size),
transforms.Resize((img_size, img_size)),
transforms.RandomHorizontalFlip(p=0.2),
transforms.RandomRotation((-5, 5)),
transforms.RandomAutocontrast(p=0.2),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
'val': transforms.Compose([
# transforms.Resize((img_size, img_size)),
# transforms.Resize(256),
# transforms.CenterCrop(img_size),
transforms.Resize((img_size, img_size)),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
}
return data_transforms
# Training set preprocessing and data enhancement
def get_train_transforms(img_size=320):
return albumentations.Compose(
[
albumentations.Resize(img_size, img_size),
albumentations.HorizontalFlip(p=0.5),
albumentations.VerticalFlip(p=0.5),
albumentations.Rotate(limit=180, p=0.7),
# albumentations.RandomBrightnessContrast(),
albumentations.ShiftScaleRotate(
shift_limit=0.25, scale_limit=0.1, rotate_limit=0
),
# albumentations.Random
albumentations.Normalize(
[0.485, 0.456, 0.406], [0.229, 0.224, 0.225],
max_pixel_value=255.0, always_apply=True
),
ToTensorV2(p=1.0),
]
)
# Validation set and test set preprocessing
def get_valid_transforms(img_size=224):
return albumentations.Compose(
[
albumentations.Resize(img_size, img_size),
albumentations.Normalize(
[0.485, 0.456, 0.406], [0.229, 0.224, 0.225],
max_pixel_value=255.0, always_apply=True
),
ToTensorV2(p=1.0)
]
)
# Load data in csv format
class LeafDataset(Dataset):
def __init__(self, images_filepaths, labels, transform=None):
self.images_filepaths = images_filepaths
self.labels = labels
self.transform = transform
def __len__(self):
return len(self.images_filepaths)
def __getitem__(self, idx):
image_filepath = self.images_filepaths[idx]
# print(image_filepath)
image = cv2.imread(image_filepath)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
label = self.labels[idx]
if self.transform is not None:
image = self.transform(image=image)["image"]
return image, label #
# Test accuracy
def accuracy(output, target):
y_pred = torch.softmax(output, dim=1)
y_pred = torch.argmax(y_pred, dim=1).cpu()
target = target.cpu()
return accuracy_score(target, y_pred)
# Calculating f1
def calculate_f1_macro(output, target):
y_pred = torch.softmax(output, dim=1)
y_pred = torch.argmax(y_pred, dim=1).cpu()
target = target.cpu()
return f1_score(target, y_pred, average='macro')
# Calculating recall
def calculate_recall_macro(output, target):
y_pred = torch.softmax(output, dim=1)
y_pred = torch.argmax(y_pred, dim=1).cpu()
target = target.cpu()
# tp fn fp
return recall_score(target, y_pred, average="macro", zero_division=0)
# Output information to use when training
class MetricMonitor:
def __init__(self, float_precision=3):
self.float_precision = float_precision
self.reset()
def reset(self):
self.metrics = defaultdict(lambda: {"val": 0, "count": 0, "avg": 0})
def update(self, metric_name, val):
metric = self.metrics[metric_name]
metric["val"] += val
metric["count"] += 1
metric["avg"] = metric["val"] / metric["count"]
def __str__(self):
return " | ".join(
[
"{metric_name}: {avg:.{float_precision}f}".format(
metric_name=metric_name, avg=metric["avg"],
float_precision=self.float_precision
)
for (metric_name, metric) in self.metrics.items()
]
)
# Adjusted learning rate
def adjust_learning_rate(optimizer, epoch, params, batch=0, nBatch=None):
""" adjust learning of a given optimizer and return the new learning rate """
new_lr = calc_learning_rate(epoch, params['lr'], params['epochs'], batch, nBatch)
for param_group in optimizer.param_groups:
param_group['lr'] = new_lr
return new_lr
""" learning rate schedule """
# Computational learning rate
def calc_learning_rate(epoch, init_lr, n_epochs, batch=0, nBatch=None, lr_schedule_type='cosine'):
if lr_schedule_type == 'cosine':
t_total = n_epochs * nBatch
t_cur = epoch * nBatch + batch
lr = 0.5 * init_lr * (1 + math.cos(math.pi * t_cur / t_total))
elif lr_schedule_type is None:
lr = init_lr
else:
raise ValueError('do not support: %s' % lr_schedule_type)
return lr