From a6bc23f9ea7813a2cb21fcfcb3196f3d4e22580b Mon Sep 17 00:00:00 2001 From: liyulingyue <852433440@qq.com> Date: Fri, 31 Jan 2025 07:15:19 +0800 Subject: [PATCH] fix typo s parts2 --- _typos.toml | 7 ------- .../framework/new_executor/interpreter/interpreter_util.cc | 2 +- paddle/phi/kernels/gpu/batch_norm_grad_kernel.cu | 4 ++-- paddle/phi/kernels/gpu/batch_norm_kernel.cu | 4 ++-- python/paddle/distributed/auto_parallel/static/helper.py | 6 +++--- python/paddle/nn/functional/loss.py | 2 +- test/ir/pir/cinn/test_anchor_fusion.py | 2 +- test/legacy_test/test_hsigmoid_op.py | 4 ++-- 8 files changed, 12 insertions(+), 19 deletions(-) diff --git a/_typos.toml b/_typos.toml index 2f1b7ee4fca511..03191816f45049 100644 --- a/_typos.toml +++ b/_typos.toml @@ -99,13 +99,6 @@ Stati = 'Stati' STOPED = 'STOPED' Stoped = 'Stoped' stoped = 'stoped' -storeage = 'storeage' -sotring = 'sotring' -stragety = 'stragety' -strem = 'strem' -structed = 'structed' -sturcture = 'sturcture' -subsituted = 'subsituted' subsitute = 'subsitute' substitude = 'substitude' substitue = 'substitue' diff --git a/paddle/fluid/framework/new_executor/interpreter/interpreter_util.cc b/paddle/fluid/framework/new_executor/interpreter/interpreter_util.cc index 5b446605af2cc7..fc48aea76404d4 100644 --- a/paddle/fluid/framework/new_executor/interpreter/interpreter_util.cc +++ b/paddle/fluid/framework/new_executor/interpreter/interpreter_util.cc @@ -61,7 +61,7 @@ namespace paddle::framework::interpreter { using VariableIdMap = std::map>; -// NOTE(Ruibiao): SingleStreamGuard make some multi-strem op (i.e., +// NOTE(Ruibiao): SingleStreamGuard make some multi-stream op (i.e., // c_allreduce_sum) run in single stream. It is dedicated to BuildOpFuncList // which run kernel without stream synchronization. class SingleStreamGuard { diff --git a/paddle/phi/kernels/gpu/batch_norm_grad_kernel.cu b/paddle/phi/kernels/gpu/batch_norm_grad_kernel.cu index 1aebf4877a509c..35d37d5a0704e6 100644 --- a/paddle/phi/kernels/gpu/batch_norm_grad_kernel.cu +++ b/paddle/phi/kernels/gpu/batch_norm_grad_kernel.cu @@ -175,7 +175,7 @@ static __global__ LAUNCH_BOUNDS(BlockDim) void BNBackward( __shared__ typename BlockReduce::TempStorage ds_storage; __shared__ typename BlockReduce::TempStorage db_storage; __shared__ typename BlockReduce::TempStorage mean_storage; - __shared__ typename BlockReduce::TempStorage variance_storeage; + __shared__ typename BlockReduce::TempStorage variance_storage; __shared__ BatchNormParamType inv_var_val; __shared__ BatchNormParamType mean_val; __shared__ BatchNormParamType dscale_val; @@ -207,7 +207,7 @@ static __global__ LAUNCH_BOUNDS(BlockDim) void BNBackward( x_sum = BlockReduce(mean_storage).Reduce(x_sum, cub::Sum()); x_square_sum = - BlockReduce(variance_storeage).Reduce(x_square_sum, cub::Sum()); + BlockReduce(variance_storage).Reduce(x_square_sum, cub::Sum()); if (threadIdx.x == 0) { mean_val = x_sum / inner_size; inv_var_val = diff --git a/paddle/phi/kernels/gpu/batch_norm_kernel.cu b/paddle/phi/kernels/gpu/batch_norm_kernel.cu index 94086e397e83af..d93db90108fe81 100644 --- a/paddle/phi/kernels/gpu/batch_norm_kernel.cu +++ b/paddle/phi/kernels/gpu/batch_norm_kernel.cu @@ -130,7 +130,7 @@ static __global__ LAUNCH_BOUNDS(BlockDim) void BNForwardTraining( int inner_size = N * HxW; typedef cub::BlockReduce, BlockDim> BlockReduce; __shared__ typename BlockReduce::TempStorage mean_storage; - __shared__ typename BlockReduce::TempStorage variance_storeage; + __shared__ typename BlockReduce::TempStorage variance_storage; __shared__ BatchNormParamType mean_val; __shared__ BatchNormParamType variance_val; __shared__ BatchNormParamType inv_var_val; @@ -149,7 +149,7 @@ static __global__ LAUNCH_BOUNDS(BlockDim) void BNForwardTraining( } x_sum = BlockReduce(mean_storage).Reduce(x_sum, cub::Sum()); x_square_sum = - BlockReduce(variance_storeage).Reduce(x_square_sum, cub::Sum()); + BlockReduce(variance_storage).Reduce(x_square_sum, cub::Sum()); if (threadIdx.x == 0) { mean_val = x_sum / inner_size; variance_val = x_square_sum / inner_size - mean_val * mean_val; diff --git a/python/paddle/distributed/auto_parallel/static/helper.py b/python/paddle/distributed/auto_parallel/static/helper.py index 1a10072488a837..46ebf1fc0c1ee4 100644 --- a/python/paddle/distributed/auto_parallel/static/helper.py +++ b/python/paddle/distributed/auto_parallel/static/helper.py @@ -463,9 +463,9 @@ def init(self, main_program, place, dist_context): if self.lazy_init: return - amp_stragety = dist_context.strategy.amp - amp_config = copy.deepcopy(amp_stragety.to_dict()) - need_cast_parameter = amp_stragety.enable and amp_config["level"] in [ + amp_strategy = dist_context.strategy.amp + amp_config = copy.deepcopy(amp_strategy.to_dict()) + need_cast_parameter = amp_strategy.enable and amp_config["level"] in [ "o2", "o3", ] diff --git a/python/paddle/nn/functional/loss.py b/python/paddle/nn/functional/loss.py index 8dd0e2f0a41ede..fd861218b17ec6 100644 --- a/python/paddle/nn/functional/loss.py +++ b/python/paddle/nn/functional/loss.py @@ -4478,7 +4478,7 @@ def adaptive_log_softmax_with_loss( name (str|None, optional): Name for the operation (optional, default is ``None``). For more information, please refer to :ref:`api_guide_Name`. Returns: - - output (Tensor). The tensor sotring adaptive logsoftmax result, the shape of output is ``[N]`` + - output (Tensor). The tensor storing adaptive logsoftmax result, the shape of output is ``[N]`` - loss (Tensor). The tensor variable storing the adaptive_log_softmax_loss of input and label. Examples: diff --git a/test/ir/pir/cinn/test_anchor_fusion.py b/test/ir/pir/cinn/test_anchor_fusion.py index bbc1280f70da85..26be5f214faed8 100644 --- a/test/ir/pir/cinn/test_anchor_fusion.py +++ b/test/ir/pir/cinn/test_anchor_fusion.py @@ -216,7 +216,7 @@ def init(): self.check_accuracy_and_kernel_num(init, func, kernel_num=1) - def test_recompute_multidownstrema_trivial(self): + def test_recompute_multidownstream_trivial(self): # T # / \ # S S diff --git a/test/legacy_test/test_hsigmoid_op.py b/test/legacy_test/test_hsigmoid_op.py index 93977272fc58ca..dccad0a4f586ea 100644 --- a/test/legacy_test/test_hsigmoid_op.py +++ b/test/legacy_test/test_hsigmoid_op.py @@ -282,7 +282,7 @@ def test_check_output(self): @skip_check_grad_ci( - reason="[skip shape check] The huffman tree is structed separately. It will be complicated if use large shape." + reason="[skip shape check] The huffman tree is structured separately. It will be complicated if use large shape." ) class TestHSigmoidOpWithCustomTree(OpTest): def setUp(self): @@ -343,7 +343,7 @@ def test_check_grad(self): @skip_check_grad_ci( - reason="[skip shape check] The huffman tree is structed separately. It will be complicated if use large shape." + reason="[skip shape check] The huffman tree is structured separately. It will be complicated if use large shape." ) class TestHSigmoidOpWithCustomTreeWithoutBias(OpTest): def setUp(self):