@@ -228,9 +228,9 @@ def standardize_images(x):
228
228
"""Image standardization on batches."""
229
229
with tf .name_scope ("standardize_images" , [x ]):
230
230
x = tf .to_float (x )
231
- x_mean = tf .reduce_mean (x , axis = [1 , 2 , 3 ], keep_dims = True )
231
+ x_mean = tf .reduce_mean (x , axis = [1 , 2 , 3 ], keepdims = True )
232
232
x_variance = tf .reduce_mean (
233
- tf .square (x - x_mean ), axis = [1 , 2 , 3 ], keep_dims = True )
233
+ tf .square (x - x_mean ), axis = [1 , 2 , 3 ], keepdims = True )
234
234
x_shape = shape_list (x )
235
235
num_pixels = tf .to_float (x_shape [1 ] * x_shape [2 ] * x_shape [3 ])
236
236
x = (x - x_mean ) / tf .maximum (tf .sqrt (x_variance ), tf .rsqrt (num_pixels ))
@@ -604,8 +604,8 @@ def layer_norm_vars(filters):
604
604
def layer_norm_compute_python (x , epsilon , scale , bias ):
605
605
"""Layer norm raw computation."""
606
606
epsilon , scale , bias = [tf .cast (t , x .dtype ) for t in [epsilon , scale , bias ]]
607
- mean = tf .reduce_mean (x , axis = [- 1 ], keep_dims = True )
608
- variance = tf .reduce_mean (tf .square (x - mean ), axis = [- 1 ], keep_dims = True )
607
+ mean = tf .reduce_mean (x , axis = [- 1 ], keepdims = True )
608
+ variance = tf .reduce_mean (tf .square (x - mean ), axis = [- 1 ], keepdims = True )
609
609
norm_x = (x - mean ) * tf .rsqrt (variance + epsilon )
610
610
return norm_x * scale + bias
611
611
@@ -1289,7 +1289,7 @@ def mask_from_embedding(emb):
1289
1289
Returns:
1290
1290
a 0.0/1.0 Tensor with shape [batch, width, height, 1].
1291
1291
"""
1292
- return weights_nonzero (tf .reduce_sum (tf .abs (emb ), axis = 3 , keep_dims = True ))
1292
+ return weights_nonzero (tf .reduce_sum (tf .abs (emb ), axis = 3 , keepdims = True ))
1293
1293
1294
1294
1295
1295
def mask_leq (target_length , source_length ):
@@ -1913,7 +1913,7 @@ def global_pool_1d(inputs, pooling_type="MAX", mask=None):
1913
1913
if mask is not None :
1914
1914
# Some elems are dummy elems so we can't just reduce the average.
1915
1915
output = tf .reduce_sum (inputs , axis = 1 )
1916
- num_elems = tf .reduce_sum (mask , axis = 1 , keep_dims = True )
1916
+ num_elems = tf .reduce_sum (mask , axis = 1 , keepdims = True )
1917
1917
output = tf .div (output , tf .maximum (num_elems , 1 ))
1918
1918
else :
1919
1919
output = tf .reduce_mean (inputs , axis = 1 )
@@ -2977,7 +2977,7 @@ def argmax_with_score(logits, axis=None):
2977
2977
2978
2978
2979
2979
def log_prob_from_logits (logits , reduce_axis = - 1 ):
2980
- return logits - tf .reduce_logsumexp (logits , axis = reduce_axis , keep_dims = True )
2980
+ return logits - tf .reduce_logsumexp (logits , axis = reduce_axis , keepdims = True )
2981
2981
2982
2982
2983
2983
def top_1_tpu (inputs ):
@@ -2992,7 +2992,7 @@ def top_1_tpu(inputs):
2992
2992
values: a Tensor with shape [...]
2993
2993
indices: a Tensor with shape [...]
2994
2994
"""
2995
- inputs_max = tf .reduce_max (inputs , axis = - 1 , keep_dims = True )
2995
+ inputs_max = tf .reduce_max (inputs , axis = - 1 , keepdims = True )
2996
2996
mask = tf .to_int32 (tf .equal (inputs_max , inputs ))
2997
2997
index = tf .range (tf .shape (inputs )[- 1 ]) * mask
2998
2998
return tf .squeeze (inputs_max , - 1 ), tf .reduce_max (index , axis = - 1 )
0 commit comments