|
| 1 | +""" distillation loss cell define """ |
| 2 | +from mindspore import nn |
| 3 | +from mindspore.ops import functional as F |
| 4 | + |
| 5 | + |
| 6 | +class HardDistillLossCell(nn.WithLossCell): |
| 7 | + """ |
| 8 | + Wraps the network with hard distillation loss function. |
| 9 | +
|
| 10 | + Get the loss of student network and an extra knowledge hard distillation loss |
| 11 | + by taking a teacher model prediction and using it as additional supervision. |
| 12 | +
|
| 13 | + Args: |
| 14 | + backbone (Cell): The student network to train and calculate base loss. |
| 15 | + loss_fn (Cell): The loss function used to compute loss of student network. |
| 16 | + teacher_model (Cell): The teacher network to calculate distillation loss. |
| 17 | + alpha (float): Distillation factor. the coefficient to balance the distillation |
| 18 | + loss and base loss. Default: 0.5. |
| 19 | + """ |
| 20 | + |
| 21 | + def __init__(self, backbone, loss_fn, teacher_model, alpha=0.5): |
| 22 | + super().__init__(backbone, loss_fn) |
| 23 | + self.teacher_model = teacher_model |
| 24 | + self.alpha = alpha |
| 25 | + |
| 26 | + def construct(self, data, label): |
| 27 | + out = self._backbone(data) |
| 28 | + |
| 29 | + out, out_kd = out |
| 30 | + base_loss = self._loss_fn(out, label) |
| 31 | + |
| 32 | + teacher_out = self.teacher_model(data) |
| 33 | + |
| 34 | + distillation_loss = F.cross_entropy(out_kd, teacher_out.argmax(axis=1)) |
| 35 | + loss = base_loss * (1 - self.alpha) + distillation_loss * self.alpha |
| 36 | + |
| 37 | + return loss |
| 38 | + |
| 39 | + |
| 40 | +class SoftDistillLossCell(nn.WithLossCell): |
| 41 | + """ |
| 42 | + Wraps the network with soft distillation loss function. |
| 43 | +
|
| 44 | + Get the loss of student network and an extra knowledge soft distillation loss |
| 45 | + by taking a teacher model prediction and using it as additional supervision. |
| 46 | +
|
| 47 | + Args: |
| 48 | + backbone (Cell): The student network to train and calculate base loss. |
| 49 | + loss_fn (Cell): The loss function used to compute loss of student network. |
| 50 | + teacher_model (Cell): The teacher network to calculate distillation loss. |
| 51 | + alpha (float): Distillation factor. the coefficient balancing the distillation |
| 52 | + loss and base loss. Default: 0.5. |
| 53 | + tau (float): Distillation temperature. The higher the temperature, the lower the |
| 54 | + dispersion of the loss calculated by Kullback-Leibler divergence loss. Default: 1.0. |
| 55 | + """ |
| 56 | + |
| 57 | + def __init__(self, backbone, loss_fn, teacher_model, alpha=0.5, tau=1.0): |
| 58 | + super().__init__(backbone, loss_fn) |
| 59 | + self.teacher_model = teacher_model |
| 60 | + self.alpha = alpha |
| 61 | + self.tau = tau |
| 62 | + |
| 63 | + def construct(self, data, label): |
| 64 | + out = self._backbone(data) |
| 65 | + |
| 66 | + out, out_kd = out |
| 67 | + base_loss = self._loss_fn(out, label) |
| 68 | + |
| 69 | + teacher_out = self.teacher_model(data) |
| 70 | + |
| 71 | + T = self.tau |
| 72 | + distillation_loss = ( |
| 73 | + F.kl_div( |
| 74 | + F.log_softmax(out_kd / T, axis=1), |
| 75 | + F.log_softmax(teacher_out / T, axis=1), |
| 76 | + reduction="sum", |
| 77 | + ) |
| 78 | + * (T * T) |
| 79 | + / F.size(out_kd) |
| 80 | + ) |
| 81 | + loss = base_loss * (1 - self.alpha) + distillation_loss * self.alpha |
| 82 | + |
| 83 | + return loss |
0 commit comments