Skip to content

Commit 46670e1

Browse files
committed
KL Div formula fix
Signed-off-by: realAsma <[email protected]>
1 parent 73fc080 commit 46670e1

File tree

1 file changed

+2
-2
lines changed

1 file changed

+2
-2
lines changed

modelopt/torch/quantization/algorithms.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1013,8 +1013,8 @@ def _get_kl_div_loss(
10131013
prob_unquant: torch.Tensor, logits_quant: torch.Tensor, lm_head: nn.Module = None
10141014
) -> torch.Tensor:
10151015
log_prob_quant = _get_prob_from_logits(logits_quant, return_log_prob=True, lm_head=lm_head)
1016-
# We dont need to calculate the full kl div loss here, just get p*log_q
1017-
return _get_p_log_q(prob_unquant, log_prob_quant)
1016+
# We dont need to calculate the full kl div loss here, just get - p*log_q
1017+
return -_get_p_log_q(prob_unquant, log_prob_quant)
10181018

10191019

10201020
def _get_lm_head(model: nn.Module) -> nn.Module:

0 commit comments

Comments
 (0)