-
Notifications
You must be signed in to change notification settings - Fork 26
/
Copy pathscoring.py
111 lines (99 loc) · 3.5 KB
/
scoring.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import DataLoader
# from torch.autograd import Variable
import argparse
import model
import math
import numpy as np
import sys
# Input:
# model: the torch model
# input: the input at current stage
# Torch tensor with size (Batchsize,length)
# Output: score with size (batchsize, length)
def random(model, inputs, pred, classes):
losses = torch.rand(inputs.size()[0],inputs.size()[1])
return losses
# Output a random list
def replaceone(model, inputs, pred, classes):
losses = torch.zeros(inputs.size()[0],inputs.size()[1])
for i in range(inputs.size()[1]):
tempinputs = inputs.clone()
tempinputs[:,i]=2
with torch.no_grad():
tempoutput = model(tempinputs)
losses[:,i] = F.nll_loss(tempoutput, pred, reduce=False)
return losses
def temporal(model, inputs, pred, classes):
losses1 = torch.zeros(inputs.size()[0],inputs.size()[1])
dloss = torch.zeros(inputs.size()[0],inputs.size()[1])
for i in range(inputs.size()[1]):
tempinputs = inputs[:,:i+1]
with torch.no_grad():
tempoutput = torch.exp(model(tempinputs))
losses1[:,i] = tempoutput.gather(1,pred.view(-1,1)).view(-1)
dloss[:,0] = losses1[:,0] - 1.0/classes
for i in range(1,inputs.size()[1]):
dloss[:,i] = losses1[:,i] - losses1[:,i-1]
return dloss
def temporaltail(model, inputs, pred, classes):
losses1 = torch.zeros(inputs.size()[0],inputs.size()[1])
dloss = torch.zeros(inputs.size()[0],inputs.size()[1])
for i in range(inputs.size()[1]):
tempinputs = inputs[:,i:]
with torch.no_grad():
tempoutput = torch.exp(model(tempinputs))
losses1[:,i] = tempoutput.gather(1,pred.view(-1,1)).view(-1)
dloss[:,-1] = losses1[:,-1] - 1.0/classes
for i in range(inputs.size()[1]-1):
dloss[:,i] = losses1[:,i] - losses1[:,i+1]
return dloss
def combined(model, inputs, pred, classes):
temp = temporal(model, inputs, pred, classes)
temptail = temporaltail(model, inputs, pred, classes)
return (temp+temptail)/2
def grad(model, inputs, pred, classes):
losses1 = torch.zeros(inputs.size()[0],inputs.size()[1])
dloss = torch.zeros(inputs.size()[0],inputs.size()[1])
if isinstance(model,torch.nn.DataParallel):
model = model.module
model.train()
embd,output = model(inputs, returnembd = True)
# embd.retain_grad()
loss = F.nll_loss(output,pred)
loss.backward()
score = (inputs<=2).float()
score = -score
score = embd.grad.norm(2,dim=2) + score * 1e9
return score
def grad_unconstrained(model, inputs, pred, classes):
losses1 = torch.zeros(inputs.size()[0],inputs.size()[1])
dloss = torch.zeros(inputs.size()[0],inputs.size()[1])
if isinstance(model,torch.nn.DataParallel):
model = model.module
model.train()
embd,output = model(inputs, returnembd = True)
loss = F.nll_loss(output,pred)
loss.backward()
score = embd.grad.norm(2,dim=2)
return score
def scorefunc(name):
if "temporal" in name:
return temporal
elif "tail" in name:
return temporaltail
elif "combined" in name:
return combined
elif "replaceone" in name:
return replaceone
elif "random" in name:
return random
elif 'ucgrad' in name:
return grad_unconstrained
elif "grad" in name:
return grad
else:
print('No scoring function found')
sys.exit(1)