Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[CodeStyle][Typos][S-[51-59]] Fix typo (substract, successed...) #71000

Merged
merged 1 commit into from
Feb 6, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -334,7 +334,7 @@ option(WITH_POCKETFFT "Compile with pocketfft support" ON)
option(WITH_RECORD_BUILDTIME
"Compile PaddlePaddle with record all targets build time" OFF)
option(WITH_CUSTOM_DEVICE "Compile with custom device support" OFF)
option(WITH_ARM_BRPC "Supprot Brpc in Arm" OFF)
option(WITH_ARM_BRPC "Support Brpc in Arm" OFF)
option(WITH_FLPS "FL PS mode" OFF)
option(WITH_RPC "Compile with rpc support" ${WITH_DISTRIBUTE})
option(WITH_CUDNN_FRONTEND
Expand Down
14 changes: 0 additions & 14 deletions _typos.toml
Original file line number Diff line number Diff line change
Expand Up @@ -111,18 +111,4 @@ substitude = 'substitude'
substitue = 'substitue'
Subsitute = 'Subsitute'
Substitude = 'Substitude'
substract = 'substract'
Substract = 'Substract'
successed = 'successed'
sucessfully = 'sucessfully'
Succeess = 'Succeess'
Suger = 'Suger'
supportted = 'supportted'
supoort = 'supoort'
Supprot = 'Supprot'
suport = 'suport'
suppport = 'suppport'
SWTICH = 'SWTICH'
Swith = 'Swith'
sysyem = 'sysyem'
UNSUPPORT = 'UNSUPPORT'
2 changes: 1 addition & 1 deletion paddle/fluid/distributed/ps/table/ssd_sparse_table.cc
Original file line number Diff line number Diff line change
Expand Up @@ -3040,7 +3040,7 @@ int32_t SSDSparseTable::LoadWithBinary(const std::string& path, int param) {
_db->get_estimate_key_num(ssd_key_num);
_cache_tk_size =
(LocalSize() + ssd_key_num) * _config.sparse_table_cache_rate();
VLOG(0) << " Load Binary Succeess. all feasign: " << feasign_size_all;
VLOG(0) << " Load Binary Success. all feasign: " << feasign_size_all;
return 0;
}

Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/framework/data_feed.cu
Original file line number Diff line number Diff line change
Expand Up @@ -3168,7 +3168,7 @@ int FillWalkBuf(const std::vector<uint64_t> &h_device_keys_len,
if (FLAGS_enable_graph_multi_node_sampling) {
if (sample_flag == EVENT_CONTINUE_SAMPLE) {
// Switching only occurs when multi machine sampling continues
switch_flag = EVENT_SWTICH_METAPATH;
switch_flag = EVENT_SWITCH_METAPATH;
}
} else {
cursor += 1;
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/framework/data_feed.h
Original file line number Diff line number Diff line change
Expand Up @@ -943,7 +943,7 @@ const int EVENT_FINISH_EPOCH = 0; // End of sampling single epoch
const int EVENT_CONTINUE_SAMPLE = 1; // Continue sampling
const int EVENT_WALKBUF_FULL = 2; // d_walk is full, end current pass sampling
const int EVENT_NOT_SWITCH = 0; // Continue sampling on the current metapath.
const int EVENT_SWTICH_METAPATH =
const int EVENT_SWITCH_METAPATH =
1; // Switch to the next metapath to perform sampling

struct GraphDataGeneratorConfig {
Expand Down
6 changes: 3 additions & 3 deletions paddle/fluid/platform/init.cc
Original file line number Diff line number Diff line change
Expand Up @@ -81,7 +81,7 @@ std::once_flag glog_init_flag;
std::once_flag memory_method_init_flag;

bool InitGflags(std::vector<std::string> args) {
bool successed = false;
bool succeeded = false;
std::call_once(gflags_init_flag, [&]() {
FLAGS_logtostderr = true;
// NOTE(zhiqiu): dummy is needed, since the function
Expand All @@ -104,11 +104,11 @@ bool InitGflags(std::vector<std::string> args) {
char **arr = argv.data();
paddle::flags::AllowUndefinedFlags();
paddle::flags::ParseCommandLineFlags(&argc, &arr);
successed = true;
succeeded = true;

VLOG(1) << "After Parse: argc is " << argc;
});
return successed;
return succeeded;
}

#ifdef PADDLE_WITH_CUDA
Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/api/generator/api_gen.py
Original file line number Diff line number Diff line change
Expand Up @@ -523,7 +523,7 @@ def generate_api(
if is_fused_ops_yaml is True
else "paddle/phi/api/include/api.h"
)
# not all fused ops supoort dygraph
# not all fused ops support dygraph
if is_fused_ops_yaml is True:
new_apis = [
api
Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/kernels/funcs/blas/blas_impl.hip.h
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,7 @@ struct CUBlas<float> {
phi::dynload::rocblas_sgemm_strided_batched(args...));
}

// HIP not supportted, refer to the doc here:
// HIP not supported, refer to the doc here:
// https://github.com/ROCm-Developer-Tools/HIP/blob/roc-3.5.x/docs/markdown/CUBLAS_API_supported_by_HIP.md
template <typename... ARGS>
static void GEMM_EX(ARGS... args) {
Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/kernels/funcs/detail/avx_mathfun.h
Original file line number Diff line number Diff line change
Expand Up @@ -279,7 +279,7 @@ v8sf exp256_ps(v8sf x) {

tmp = _mm256_floor_ps(fx);

/* if greater, substract 1 */
/* if greater, subtract 1 */
// v8sf mask = _mm256_cmpgt_ps(tmp, fx);
v8sf mask = _mm256_cmp_ps(tmp, fx, _CMP_GT_OS);
mask = _mm256_and_ps(mask, one);
Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/kernels/funcs/emb_eltwise_layer_norm_functor.cu
Original file line number Diff line number Diff line change
Expand Up @@ -200,7 +200,7 @@ void EmbEltwiseLayerNormFunctor<T>::operator()(int batch,

template class EmbEltwiseLayerNormFunctor<float>;

// device function 'operator()' is not supportted until cuda 10.0
// device function 'operator()' is not supported until cuda 10.0
// HIP defined __HIP_NO_HALF_CONVERSIONS__ in hip.cmake
#if defined(PADDLE_WITH_CUDA) && CUDA_VERSION >= 10000
template class EmbEltwiseLayerNormFunctor<half>;
Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/kernels/funcs/jit/gen/act.h
Original file line number Diff line number Diff line change
Expand Up @@ -111,7 +111,7 @@ class VActFunc : public JitCode {
vmovaps(jmm_tmp, ptr[reg_ptr_global + OFFSET_EXP_0P5]);
vaddps(jmm_fx, jmm_fx, jmm_tmp);
vroundps(jmm_fy, jmm_fx, 0x01);
// if greater, substract 1
// if greater, subtract 1
vcmpgtps(jmm_mask, jmm_fy, jmm_fx);
vmovaps(jmm_tmp, ptr[reg_ptr_global]);
vandps(jmm_mask, jmm_mask, jmm_tmp);
Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/kernels/funcs/math/bert_encoder_functor.cu
Original file line number Diff line number Diff line change
Expand Up @@ -414,7 +414,7 @@ void SkipLayerNormFunctor<T>::operator()(const int num,

template class SkipLayerNormFunctor<float>;

// device function 'operator()' is not supportted until cuda 10.0
// device function 'operator()' is not supported until cuda 10.0
// HIP defined __HIP_NO_HALF_CONVERSIONS__ in hip.cmake
#if defined(PADDLE_WITH_CUDA) && CUDA_VERSION >= 10000
template class SkipLayerNormFunctor<half>;
Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/kernels/funcs/multihead_matmul_functor.cu
Original file line number Diff line number Diff line change
Expand Up @@ -735,7 +735,7 @@ void MultiheadGPUComputeFunctor<T>::operator()(const phi::GPUContext &dev_ctx,

template class MultiheadGPUComputeFunctor<float>;

// device function 'operator()' is not supportted until cuda 10.0
// device function 'operator()' is not supported until cuda 10.0
// HIP defined __HIP_NO_HALF_CONVERSIONS__ in hip.cmake
#if defined(PADDLE_WITH_CUDA) && CUDA_VERSION >= 10000
template class MultiheadGPUComputeFunctor<half>;
Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/kernels/funcs/skip_layernorm_functor.cu
Original file line number Diff line number Diff line change
Expand Up @@ -403,7 +403,7 @@ void SkipLayerNormFunctor<T>::operator()(const int num,

template class SkipLayerNormFunctor<float>;

// device function 'operator()' is not supportted until cuda 10.0
// device function 'operator()' is not supported until cuda 10.0
// HIP defined __HIP_NO_HALF_CONVERSIONS__ in hip.cmake
#if defined(PADDLE_WITH_CUDA) && CUDA_VERSION >= 10000
template class SkipLayerNormFunctor<half>;
Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/kernels/gpu/check_numerics_kernel.cu
Original file line number Diff line number Diff line change
Expand Up @@ -313,7 +313,7 @@ static char* GetGpuHintStringPtr(const phi::GPUContext& ctx,
PADDLE_ENFORCE_EQ(iter != op_var2gpu_str.end(),
true,
common::errors::PreconditionNotMet(
"op_var=%s should successed insert into "
"op_var=%s should be successfully insert into "
"op_var2gpu_str, but now failed",
op_var));

Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/kernels/gpudnn/conv_grad_kernel.cu
Original file line number Diff line number Diff line change
Expand Up @@ -1198,7 +1198,7 @@ void ConvCudnnGradGradKernel(
ScalingParamType<T> alpha = 1.0f;
ScalingParamType<T> beta = 0.0f;

// NOTE(zhiqiu): inplace addto is not supportted in double grad yet.
// NOTE(zhiqiu): inplace addto is not supported in double grad yet.
// ScalingParamType<T> beta = ctx.Attr<bool>("use_addto") ? 1.0f :
// 0.0f;
// VLOG(4) << "Conv_grad_grad: use_addto = " << ctx.Attr<bool>("use_addto");
Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/kernels/gpudnn/conv_kernel.cu
Original file line number Diff line number Diff line change
Expand Up @@ -176,7 +176,7 @@ void ConvCudnnKernelImplV7(const DenseTensor* transformed_input,
ScalingParamType<T> alpha = 1.0f;
ScalingParamType<T> beta = 0.0f;

// NOTE(zhiqiu): inplace addto is not supportted in double grad yet.
// NOTE(zhiqiu): inplace addto is not supported in double grad yet.
// ScalingParamType<T> beta = ctx.Attr<bool>("use_addto") ? 1.0f : 0.0f;
// VLOG(4) << "Conv: use_addto = " << ctx.Attr<bool>("use_addto");

Expand Down
2 changes: 1 addition & 1 deletion python/paddle/incubate/jit/inference_decorator.py
Original file line number Diff line number Diff line change
Expand Up @@ -339,7 +339,7 @@ def forward(self, args):
)
f.write(line)
print(
f"the {func.__name__} function is sucessfully saved to {self.save_path}.pdmodel"
f"the {func.__name__} function is successfully saved to {self.save_path}.pdmodel"
)
sys.stdout.flush()

Expand Down
2 changes: 1 addition & 1 deletion python/paddle/tensorrt/impls/math.py
Original file line number Diff line number Diff line change
Expand Up @@ -157,7 +157,7 @@ def divide_converter(network, paddle_op, inputs):


@converter_registry.register("pd_op.subtract", trt_version="trt_version_ge=8.0")
def substract_converter(network, paddle_op, inputs):
def subtract_converter(network, paddle_op, inputs):
return add_elementwise_layer(
network, paddle_op, inputs, trt.ElementWiseOperation.SUB
)
Expand Down
2 changes: 1 addition & 1 deletion python/paddle/utils/download.py
Original file line number Diff line number Diff line change
Expand Up @@ -268,7 +268,7 @@ def _decompress(fname):

# For protecting decompressing interrupted,
# decompress to fpath_tmp directory firstly, if decompress
# successed, move decompress files to fpath and delete
# succeeded, move decompress files to fpath and delete
# fpath_tmp and remove download compress file.

if tarfile.is_tarfile(fname):
Expand Down
2 changes: 1 addition & 1 deletion test/dygraph_to_static/dygraph_to_static_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -359,7 +359,7 @@ def decorator(fn):
return decorator


# Suger decorators
# Sugar decorators
# These decorators can be simply composed by base decorators
def test_ast_only(fn):
fn = set_to_static_mode(ToStaticMode.AST)(fn)
Expand Down
2 changes: 1 addition & 1 deletion test/legacy_test/hygon_dcu/test_hygon_llama_op.sh
Original file line number Diff line number Diff line change
Expand Up @@ -81,7 +81,7 @@ c_tests=(
"test_squeeze2_op"
# stack
"test_stack_op"
# substract
# subtract
"test_subtract_op"
# transpose, transpose_grad
"test_transpose_op"
Expand Down
2 changes: 1 addition & 1 deletion test/legacy_test/test_dygraph_mnist_fp16.py
Original file line number Diff line number Diff line change
Expand Up @@ -129,7 +129,7 @@ def func_mnist_fp16(self):
x = paddle.to_tensor(x)
y = paddle.to_tensor(y)

# using amp.auto_cast because paddle.nn.Conv2D doesn't suppport setting dtype
# using amp.auto_cast because paddle.nn.Conv2D doesn't support setting dtype
with paddle.amp.auto_cast(dtype='float16'):
loss = model(x, y)
print(loss.numpy())
Expand Down
2 changes: 1 addition & 1 deletion test/tensorrt/test_converter_math.py
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,7 @@ def test_trt_result(self):
self.check_trt_result()


class TestSubstractTRTPattern(TensorRTBaseTest):
class TestSubtractTRTPattern(TensorRTBaseTest):
def setUp(self):
self.python_api = paddle.subtract
self.api_args = {
Expand Down