Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[CodeStyle][Typos][S-[43-49]] Fix typo (storage, sotring...) #71001

Merged
merged 3 commits into from
Feb 8, 2025
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 0 additions & 7 deletions _typos.toml
Original file line number Diff line number Diff line change
Expand Up @@ -99,13 +99,6 @@ Stati = 'Stati'
STOPED = 'STOPED'
Stoped = 'Stoped'
stoped = 'stoped'
storeage = 'storeage'
sotring = 'sotring'
stragety = 'stragety'
strem = 'strem'
structed = 'structed'
sturcture = 'sturcture'
subsituted = 'subsituted'
subsitute = 'subsitute'
substitude = 'substitude'
substitue = 'substitue'
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,7 @@ namespace paddle::framework::interpreter {

using VariableIdMap = std::map<std::string, std::vector<int>>;

// NOTE(Ruibiao): SingleStreamGuard make some multi-strem op (i.e.,
// NOTE(Ruibiao): SingleStreamGuard make some multi-stream op (i.e.,
// c_allreduce_sum) run in single stream. It is dedicated to BuildOpFuncList
// which run kernel without stream synchronization.
class SingleStreamGuard {
Expand Down
4 changes: 2 additions & 2 deletions paddle/phi/kernels/gpu/batch_norm_grad_kernel.cu
Original file line number Diff line number Diff line change
Expand Up @@ -175,7 +175,7 @@ static __global__ LAUNCH_BOUNDS(BlockDim) void BNBackward(
__shared__ typename BlockReduce::TempStorage ds_storage;
__shared__ typename BlockReduce::TempStorage db_storage;
__shared__ typename BlockReduce::TempStorage mean_storage;
__shared__ typename BlockReduce::TempStorage variance_storeage;
__shared__ typename BlockReduce::TempStorage variance_storage;
__shared__ BatchNormParamType<T> inv_var_val;
__shared__ BatchNormParamType<T> mean_val;
__shared__ BatchNormParamType<T> dscale_val;
Expand Down Expand Up @@ -207,7 +207,7 @@ static __global__ LAUNCH_BOUNDS(BlockDim) void BNBackward(

x_sum = BlockReduce(mean_storage).Reduce(x_sum, cub::Sum());
x_square_sum =
BlockReduce(variance_storeage).Reduce(x_square_sum, cub::Sum());
BlockReduce(variance_storage).Reduce(x_square_sum, cub::Sum());
if (threadIdx.x == 0) {
mean_val = x_sum / inner_size;
inv_var_val =
Expand Down
4 changes: 2 additions & 2 deletions paddle/phi/kernels/gpu/batch_norm_kernel.cu
Original file line number Diff line number Diff line change
Expand Up @@ -130,7 +130,7 @@ static __global__ LAUNCH_BOUNDS(BlockDim) void BNForwardTraining(
int inner_size = N * HxW;
typedef cub::BlockReduce<BatchNormParamType<T>, BlockDim> BlockReduce;
__shared__ typename BlockReduce::TempStorage mean_storage;
__shared__ typename BlockReduce::TempStorage variance_storeage;
__shared__ typename BlockReduce::TempStorage variance_storage;
__shared__ BatchNormParamType<T> mean_val;
__shared__ BatchNormParamType<T> variance_val;
__shared__ BatchNormParamType<T> inv_var_val;
Expand All @@ -149,7 +149,7 @@ static __global__ LAUNCH_BOUNDS(BlockDim) void BNForwardTraining(
}
x_sum = BlockReduce(mean_storage).Reduce(x_sum, cub::Sum());
x_square_sum =
BlockReduce(variance_storeage).Reduce(x_square_sum, cub::Sum());
BlockReduce(variance_storage).Reduce(x_square_sum, cub::Sum());
if (threadIdx.x == 0) {
mean_val = x_sum / inner_size;
variance_val = x_square_sum / inner_size - mean_val * mean_val;
Expand Down
6 changes: 3 additions & 3 deletions python/paddle/distributed/auto_parallel/static/helper.py
Original file line number Diff line number Diff line change
Expand Up @@ -463,9 +463,9 @@ def init(self, main_program, place, dist_context):
if self.lazy_init:
return

amp_stragety = dist_context.strategy.amp
amp_config = copy.deepcopy(amp_stragety.to_dict())
need_cast_parameter = amp_stragety.enable and amp_config["level"] in [
amp_strategy = dist_context.strategy.amp
amp_config = copy.deepcopy(amp_strategy.to_dict())
need_cast_parameter = amp_strategy.enable and amp_config["level"] in [
"o2",
"o3",
]
Expand Down
2 changes: 1 addition & 1 deletion python/paddle/nn/functional/loss.py
Original file line number Diff line number Diff line change
Expand Up @@ -4478,7 +4478,7 @@ def adaptive_log_softmax_with_loss(
name (str|None, optional): Name for the operation (optional, default is ``None``). For more information, please refer to :ref:`api_guide_Name`.

Returns:
- output (Tensor). The tensor sotring adaptive logsoftmax result, the shape of output is ``[N]``
- output (Tensor). The tensor storing adaptive logsoftmax result, the shape of output is ``[N]``
- loss (Tensor). The tensor variable storing the adaptive_log_softmax_loss of input and label.

Examples:
Expand Down
2 changes: 1 addition & 1 deletion test/ir/pir/cinn/test_anchor_fusion.py
Original file line number Diff line number Diff line change
Expand Up @@ -216,7 +216,7 @@ def init():

self.check_accuracy_and_kernel_num(init, func, kernel_num=1)

def test_recompute_multidownstrema_trivial(self):
def test_recompute_multidownstream_trivial(self):
# T
# / \
# S S
Expand Down
4 changes: 2 additions & 2 deletions test/legacy_test/test_hsigmoid_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -282,7 +282,7 @@ def test_check_output(self):


@skip_check_grad_ci(
reason="[skip shape check] The huffman tree is structed separately. It will be complicated if use large shape."
reason="[skip shape check] The huffman tree is structured separately. It will be complicated if use large shape."
)
class TestHSigmoidOpWithCustomTree(OpTest):
def setUp(self):
Expand Down Expand Up @@ -343,7 +343,7 @@ def test_check_grad(self):


@skip_check_grad_ci(
reason="[skip shape check] The huffman tree is structed separately. It will be complicated if use large shape."
reason="[skip shape check] The huffman tree is structured separately. It will be complicated if use large shape."
)
class TestHSigmoidOpWithCustomTreeWithoutBias(OpTest):
def setUp(self):
Expand Down