15
15
16
16
import unittest
17
17
18
- from opacus .grad_sample import embedding_norm_sample
19
18
import torch
20
19
import torch .nn as nn
20
+ from opacus .grad_sample import embedding_norm_sample
21
21
22
22
23
23
class TestComputeEmbeddingNormSample (unittest .TestCase ):
@@ -36,15 +36,11 @@ def test_compute_embedding_norm_sample(self):
36
36
# Example input ids (activations). Shape: [3, 2]
37
37
input_ids = torch .tensor ([[1 , 1 ], [2 , 0 ], [2 , 0 ]], dtype = torch .long )
38
38
39
- # Example gradients with respect to the embedding output (backprops).
40
- # Shape: [6, 1]
41
- grad_values = torch .tensor (
42
- [[0.2 ], [0.2 ], [0.3 ], [0.1 ], [0.3 ], [0.1 ]], dtype = torch .float32
39
+ # Example backprops. Shape: [3, 2, 1]
40
+ backprops = torch .tensor (
41
+ [[[0.2 ], [0.2 ]], [[0.3 ], [0.1 ]], [[0.3 ], [0.1 ]]], dtype = torch .float32
43
42
)
44
43
45
- # Simulate backprop through embedding layer
46
- backprops = grad_values
47
-
48
44
# Wrap input_ids in a list as expected by the norm sample function
49
45
activations = [input_ids ]
50
46
@@ -70,17 +66,17 @@ def test_compute_embedding_norm_sample_with_non_one_embedding_dim(self):
70
66
71
67
# Manually set weights for the embedding layer for testing
72
68
embedding_layer .weight = nn .Parameter (
73
- torch .tensor ([[0.1 ], [0.2 ], [0.3 ]], dtype = torch .float32 )
69
+ torch .tensor ([[0.1 , 0.1 ], [0.2 , 0.2 ], [0.3 , 0.3 ]], dtype = torch .float32 )
74
70
)
75
71
76
72
# Example input ids (activations). Shape: [6, 1, 1].
77
73
input_ids = torch .tensor (
78
74
[[[1 ]], [[1 ]], [[2 ]], [[0 ]], [[2 ]], [[0 ]]], dtype = torch .long
79
75
)
80
76
81
- # Example gradients per input id, with embedding_dim=2.
77
+ # Example backprops per input id, with embedding_dim=2.
82
78
# Shape: [6, 1, 1, 2]
83
- grad_values = torch .tensor (
79
+ backprops = torch .tensor (
84
80
[
85
81
[[[0.2 , 0.2 ]]],
86
82
[[[0.2 , 0.2 ]]],
@@ -92,9 +88,6 @@ def test_compute_embedding_norm_sample_with_non_one_embedding_dim(self):
92
88
dtype = torch .float32 ,
93
89
)
94
90
95
- # Simulate backprop through embedding layer
96
- backprops = grad_values
97
-
98
91
# Wrap input_ids in a list as expected by the grad norm function
99
92
activations = [input_ids ]
100
93
@@ -211,7 +204,6 @@ def test_compute_embedding_norm_sample_with_extra_activations_per_example(self):
211
204
expected_norms = torch .tensor (
212
205
[0.0150 , 0.0071 , 0.0005 , 0.0081 , 0.0039 ], dtype = torch .float32
213
206
)
214
- print ("expected_norms: " , expected_norms )
215
207
computed_norms = result [embedding_layer .weight ]
216
208
217
209
# Verify the computed norms match the expected norms
0 commit comments