Skip to content

Commit ca3fb21

Browse files
bug fix
1 parent 8f67e4d commit ca3fb21

File tree

5 files changed

+7
-7
lines changed

5 files changed

+7
-7
lines changed

deepcore/methods/craig.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -53,7 +53,7 @@ def calc_gradient(self, index=None):
5353
for i, (input, targets) in enumerate(batch_loader):
5454
self.model_optimizer.zero_grad()
5555
outputs = self.model(input.to(self.args.device))
56-
loss = self.criterion(torch.nn.functional.softmax(outputs.requires_grad_(True), dim=1),
56+
loss = self.criterion(outputs.requires_grad_(True),
5757
targets.to(self.args.device)).sum()
5858
batch_num = targets.shape[0]
5959
with torch.no_grad():

deepcore/methods/glister.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -46,7 +46,7 @@ def calc_gradient(self, index=None, val=False, record_val_detail=False):
4646
for i, (input, targets) in enumerate(batch_loader):
4747
self.model_optimizer.zero_grad()
4848
outputs = self.model(input.to(self.args.device))
49-
loss = self.criterion(torch.nn.functional.softmax(outputs.requires_grad_(True), dim=1), targets.to(self.args.device)).sum()
49+
loss = self.criterion(outputs.requires_grad_(True), targets.to(self.args.device)).sum()
5050
batch_num = targets.shape[0]
5151
with torch.no_grad():
5252
bias_parameters_grads = torch.autograd.grad(loss, outputs)[0]
@@ -93,7 +93,7 @@ def update_val_gradients(self, new_selection, selected_for_train):
9393
batch_indx = np.arange(sample_num)[i * self.args.selection_batch:min((i + 1) * self.args.selection_batch,
9494
sample_num)]
9595
new_out_puts_batch = new_outputs[batch_indx].clone().detach().requires_grad_(True)
96-
loss = self.criterion(torch.nn.functional.softmax(new_out_puts_batch, dim=1), self.init_y[batch_indx])
96+
loss = self.criterion(new_out_puts_batch, self.init_y[batch_indx])
9797
batch_num = len(batch_indx)
9898
bias_parameters_grads = torch.autograd.grad(loss.sum(), new_out_puts_batch, retain_graph=True)[0]
9999

deepcore/methods/gradmatch.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -142,7 +142,7 @@ def calc_gradient(self, index=None, val=False):
142142
for i, (input, targets) in enumerate(batch_loader):
143143
self.model_optimizer.zero_grad()
144144
outputs = self.model(input.to(self.args.device)).requires_grad_(True)
145-
loss = self.criterion(torch.nn.functional.softmax(outputs, dim=1), targets.to(self.args.device)).sum()
145+
loss = self.criterion(outputs, targets.to(self.args.device)).sum()
146146
batch_num = targets.shape[0]
147147
with torch.no_grad():
148148
bias_parameters_grads = torch.autograd.grad(loss, outputs, retain_graph=True)[0].cpu()

deepcore/methods/grand.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -38,7 +38,7 @@ def finish_run(self):
3838
for i, (input, targets) in enumerate(batch_loader):
3939
self.model_optimizer.zero_grad()
4040
outputs = self.model(input.to(self.args.device))
41-
loss = self.criterion(torch.nn.functional.softmax(outputs.requires_grad_(True), dim=1),
41+
loss = self.criterion(outputs.requires_grad_(True),
4242
targets.to(self.args.device)).sum()
4343
batch_num = targets.shape[0]
4444
with torch.no_grad():
@@ -60,7 +60,7 @@ def select(self, **kwargs):
6060

6161
for self.cur_repeat in range(self.repeat):
6262
self.run()
63-
self.random_seed = int(time.time() * 1000) % 100000
63+
self.random_seed = self.random_seed + 5
6464

6565
self.norm_mean = torch.mean(self.norm_matrix, dim=1).cpu().detach().numpy()
6666
if not self.balance:

deepcore/methods/submodular.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -62,7 +62,7 @@ def calc_gradient(self, index=None):
6262
for i, (input, targets) in enumerate(batch_loader):
6363
self.model_optimizer.zero_grad()
6464
outputs = self.model(input.to(self.args.device))
65-
loss = self.criterion(torch.nn.functional.softmax(outputs.requires_grad_(True), dim=1),
65+
loss = self.criterion(outputs.requires_grad_(True),
6666
targets.to(self.args.device)).sum()
6767
batch_num = targets.shape[0]
6868
with torch.no_grad():

0 commit comments

Comments
 (0)