-
Notifications
You must be signed in to change notification settings - Fork 44
Expand file tree
/
Copy pathfinetune_evaluator.py
More file actions
79 lines (66 loc) · 2.59 KB
/
finetune_evaluator.py
File metadata and controls
79 lines (66 loc) · 2.59 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
import numpy as np
import torch
from sklearn.metrics import balanced_accuracy_score, f1_score, confusion_matrix, cohen_kappa_score, roc_auc_score, \
precision_recall_curve, auc, r2_score, mean_squared_error
from tqdm import tqdm
class Evaluator:
def __init__(self, params, data_loader):
self.params = params
self.data_loader = data_loader
def get_metrics_for_multiclass(self, model):
model.eval()
truths = []
preds = []
for x, y in tqdm(self.data_loader, mininterval=1):
x = x.cuda()
y = y.cuda()
pred = model(x)
pred_y = torch.max(pred, dim=-1)[1]
truths += y.cpu().squeeze().numpy().tolist()
preds += pred_y.cpu().squeeze().numpy().tolist()
truths = np.array(truths)
preds = np.array(preds)
acc = balanced_accuracy_score(truths, preds)
f1 = f1_score(truths, preds, average='weighted')
kappa = cohen_kappa_score(truths, preds)
cm = confusion_matrix(truths, preds)
return acc, kappa, f1, cm
def get_metrics_for_binaryclass(self, model):
model.eval()
truths = []
preds = []
scores = []
for x, y in tqdm(self.data_loader, mininterval=1):
x = x.cuda()
y = y.cuda()
pred = model(x)
score_y = torch.sigmoid(pred)
pred_y = torch.gt(score_y, 0.5).long()
truths += y.long().cpu().squeeze().numpy().tolist()
preds += pred_y.cpu().squeeze().numpy().tolist()
scores += score_y.cpu().numpy().tolist()
truths = np.array(truths)
preds = np.array(preds)
scores = np.array(scores)
acc = balanced_accuracy_score(truths, preds)
roc_auc = roc_auc_score(truths, scores)
precision, recall, thresholds = precision_recall_curve(truths, scores, pos_label=1)
pr_auc = auc(recall, precision)
cm = confusion_matrix(truths, preds)
return acc, pr_auc, roc_auc, cm
def get_metrics_for_regression(self, model):
model.eval()
truths = []
preds = []
for x, y in tqdm(self.data_loader, mininterval=1):
x = x.cuda()
y = y.cuda()
pred = model(x)
truths += y.cpu().squeeze().numpy().tolist()
preds += pred.cpu().squeeze().numpy().tolist()
truths = np.array(truths)
preds = np.array(preds)
corrcoef = np.corrcoef(truths, preds)[0, 1]
r2 = r2_score(truths, preds)
rmse = mean_squared_error(truths, preds) ** 0.5
return corrcoef, r2, rmse