Skip to content

Commit

Permalink
Fix cppcoreguidelines-pro-type-member-init (pytorch#141787)
Browse files Browse the repository at this point in the history
Fixes #ISSUE_NUMBER

Pull Request resolved: pytorch#141787
Approved by: https://github.com/albanD
  • Loading branch information
cyyever authored and pytorchmergebot committed Dec 21, 2024
1 parent 7b2af25 commit d7e59c2
Show file tree
Hide file tree
Showing 14 changed files with 47 additions and 101 deletions.
3 changes: 1 addition & 2 deletions aten/src/ATen/CPUGeneratorImpl.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -198,8 +198,7 @@ void CPUGeneratorImpl::set_state(const c10::TensorImpl& new_state) {
// Note that CPUGeneratorImplStateLegacy stored a state array of 64 bit uints, whereas in our
// redefined mt19937, we have changed to a state array of 32 bit uints. Hence, we are
// doing a std::copy.
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
at::mt19937_data_pod rng_data;
at::mt19937_data_pod rng_data{};
std::copy(std::begin(legacy_pod->state), std::end(legacy_pod->state), rng_data.state_.begin());
rng_data.seed_ = legacy_pod->the_initial_seed;
rng_data.left_ = legacy_pod->left;
Expand Down
1 change: 0 additions & 1 deletion aten/src/ATen/DLConvertor.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -260,7 +260,6 @@ ScalarType toScalarType(const DLDataType& dtype) {
return stype;
}

// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
namespace {
struct ATenDLMTensor {
Tensor handle;
Expand Down
2 changes: 0 additions & 2 deletions aten/src/ATen/core/TensorAccessor.h
Original file line number Diff line number Diff line change
Expand Up @@ -121,7 +121,6 @@ template<typename T, size_t N, template <typename U> class PtrTraits = DefaultPt
class GenericPackedTensorAccessorBase {
public:
typedef typename PtrTraits<T>::PtrType PtrType;
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
C10_HOST GenericPackedTensorAccessorBase(
PtrType data_,
const index_t* sizes_,
Expand All @@ -133,7 +132,6 @@ class GenericPackedTensorAccessorBase {

// if index_t is not int64_t, we want to have an int64_t constructor
template <typename source_index_t, class = std::enable_if_t<std::is_same_v<source_index_t, int64_t>>>
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
C10_HOST GenericPackedTensorAccessorBase(
PtrType data_,
const source_index_t* sizes_,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -189,7 +189,6 @@ struct UnsignedIndicesTypeTrait<int8_t> {
using t = uint8_t;
};

// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
PackedLinearWeightQnnp::PackedLinearWeightQnnp(
const BCSRSerializationType& serialized)
: LinearPackedParamsBase(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -146,7 +146,6 @@ c10::intrusive_ptr<LinearPackedParamsBase> PackedLinearWeightQnnp::
weight, bias, out_features_block_size, in_features_block_size);
}

// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
PackedLinearWeightQnnp::PackedLinearWeightQnnp(
const at::Tensor& weight,
const std::optional<at::Tensor>& bias,
Expand Down
9 changes: 4 additions & 5 deletions aten/src/ATen/native/ao_sparse/quantized/cpu/qnnpack_utils.h
Original file line number Diff line number Diff line change
Expand Up @@ -7,14 +7,13 @@
// TODO: Refacto QnnpackUtils.h so as to separate code
// needed for quantized op from the generic qnnpack specific
// quantization utilities.
#include <ATen/native/ao_sparse/quantized/cpu/packed_params.h>
#include <ATen/native/quantized/cpu/QnnpackUtils.h>
#include <pack_block_sparse.h>
#include <ATen/native/ao_sparse/quantized/cpu/packed_params.h>

namespace ao::sparse {

struct TORCH_API PackedLinearWeightQnnp
: public LinearPackedParamsBase {
struct TORCH_API PackedLinearWeightQnnp : public LinearPackedParamsBase {
PackedLinearWeightQnnp(const at::Tensor& weight, const std::optional<at::Tensor>& bias, const int64_t out_features_block_size /* block sparsity size across output_features */, const int64_t in_features_block_size /* block sparsity size across input_features */);
explicit PackedLinearWeightQnnp(const BCSRSerializationType& serialized);
std::optional<at::Tensor> orig_bias_;
Expand All @@ -24,7 +23,7 @@ struct TORCH_API PackedLinearWeightQnnp
// In case bias is present bias_ is just a reference to orig_bias_
at::Tensor bias_;
c10::QScheme q_scheme_;
double input_scale_;
double input_scale_{};
std::unique_ptr<qnnpack::BCSRMatrix> bcsr_matrix_;
at::Tensor w_scales_;
std::vector<uint8_t> w_zero_points_;
Expand Down Expand Up @@ -86,6 +85,6 @@ struct TORCH_API PackedLinearWeightQnnp
at::Tensor apply_dynamic_impl(const at::Tensor& input);
};

} // namespace ao::sparse
} // namespace ao::sparse

#endif // USE_PYTORCH_QNNPACK
40 changes: 12 additions & 28 deletions aten/src/ATen/native/cpu/MultinomialKernel.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -51,10 +51,8 @@ multinomial_with_replacement_apply(
for (const auto i : c10::irange(n_dist)) {
/* Get normalized cumulative distribution from prob distribution */
scalar_t sum = 0;
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
scalar_t val;
for (const auto j : c10::irange(n_categories)) {
val = self_ptr[i * self_stride_0 + j * self_stride_1];
scalar_t val = self_ptr[i * self_stride_0 + j * self_stride_1];
TORCH_CHECK(
val >= 0,
"invalid multinomial distribution (encountering probability entry < 0)");
Expand Down Expand Up @@ -92,27 +90,21 @@ multinomial_with_replacement_apply(
double uniform_sample = uniform(gen);
/* Do a binary search for the slot in which the prob falls
ie cum_dist[row][slot-1] < uniform_prob < cum_distr[row][slot] */
int left_pointer = 0;
int right_pointer = n_categories;
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
int mid_pointer;
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
scalar_t cum_prob;
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
int sample_idx;
int64_t left_pointer = 0;
int64_t right_pointer = n_categories;
/* Make sure the last cumulative distribution bucket sums to 1 */
cum_dist_ptr[(n_categories - 1) * cum_dist_stride_0] = 1;

while (right_pointer - left_pointer > 0) {
mid_pointer = left_pointer + (right_pointer - left_pointer) / 2;
cum_prob = cum_dist_ptr[mid_pointer * cum_dist_stride_0];
int64_t mid_pointer = left_pointer + (right_pointer - left_pointer) / 2;
scalar_t cum_prob = cum_dist_ptr[mid_pointer * cum_dist_stride_0];
if (cum_prob < uniform_sample) {
left_pointer = mid_pointer + 1;
} else {
right_pointer = mid_pointer;
}
}
sample_idx = left_pointer;
auto sample_idx = left_pointer;

/* store in result tensor (will be incremented for lua compat by wrapper)
*/
Expand Down Expand Up @@ -155,10 +147,8 @@ multinomial_with_replacement_apply(
for (const auto i : c10::irange(n_dist)) {
/* Get normalized cumulative distribution from prob distribution */
float sum = 0;
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
float val;
for (const auto j : c10::irange(n_categories)) {
val = self_ptr[i * self_stride_0 + j * self_stride_1];
float val = self_ptr[i * self_stride_0 + j * self_stride_1];
TORCH_CHECK(
val >= 0,
"invalid multinomial distribution (encountering probability entry < 0)");
Expand Down Expand Up @@ -196,27 +186,21 @@ multinomial_with_replacement_apply(
double uniform_sample = uniform(gen);
/* Do a binary search for the slot in which the prob falls
ie cum_dist[row][slot-1] < uniform_prob < cum_distr[row][slot] */
int left_pointer = 0;
int right_pointer = n_categories;
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
int mid_pointer;
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
float cum_prob;
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
int sample_idx;
int64_t left_pointer = 0;
int64_t right_pointer = n_categories;
/* Make sure the last cumulative distribution bucket sums to 1 */
cum_dist_ptr[(n_categories - 1) * cum_dist_stride_0] = 1;

while (right_pointer - left_pointer > 0) {
mid_pointer = left_pointer + (right_pointer - left_pointer) / 2;
cum_prob = cum_dist_ptr[mid_pointer * cum_dist_stride_0];
int64_t mid_pointer = left_pointer + (right_pointer - left_pointer) / 2;
float cum_prob = cum_dist_ptr[mid_pointer * cum_dist_stride_0];
if (cum_prob < uniform_sample) {
left_pointer = mid_pointer + 1;
} else {
right_pointer = mid_pointer;
}
}
sample_idx = left_pointer;
auto sample_idx = left_pointer;

/* store in result tensor (will be incremented for lua compat by wrapper)
*/
Expand Down
19 changes: 7 additions & 12 deletions aten/src/ATen/native/cpu/SoftMaxKernel.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -227,27 +227,22 @@ inline void _vec_host_softmax_backward_lastdim(
scalar_t* grad_input_data = grad_input_data_base + i * dim_size;
const scalar_t* grad_data = grad_data_base + i * dim_size;
const scalar_t* output_data = output_data_base + i * dim_size;
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
scalar_t sum;
if (log_softmax) {
sum = vec::reduce_all<scalar_t>(
auto sum = vec::reduce_all<scalar_t>(
[](Vec& x, Vec& y) { return x + y; }, grad_data, dim_size);
} else {
sum = vec::map2_reduce_all<scalar_t>(
[](Vec x, Vec y) { return x * y; },
[](Vec x, Vec y) { return x + y; },
grad_data,
output_data,
dim_size);
}
if (log_softmax) {
vec::map2(
[sum](Vec x, Vec y) { return x - ((y.exp()) * Vec(sum)); },
grad_input_data,
grad_data,
output_data,
dim_size);
} else {
auto sum = vec::map2_reduce_all<scalar_t>(
[](Vec x, Vec y) { return x * y; },
[](Vec x, Vec y) { return x + y; },
grad_data,
output_data,
dim_size);
vec::map2(
[sum](Vec x, Vec y) { return (x - Vec(sum)) * y; },
grad_input_data,
Expand Down
17 changes: 6 additions & 11 deletions aten/src/ATen/native/cpu/SumKernel.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@
#include <c10/util/irange.h>
#include <ATen/cpu/vec/functional.h>
#include <algorithm>
#include <array>

namespace at::native {
namespace {
Expand Down Expand Up @@ -354,9 +355,10 @@ std::array<scalar_t, nrows> multi_row_sum(
const int64_t level_step = (1 << level_power);
const int64_t level_mask = level_step - 1;

// NOLINTNEXTLINE(modernize-avoid-c-arrays,cppcoreguidelines-avoid-c-arrays)
scalar_t acc[num_levels][nrows];
std::fill_n(&acc[0][0], num_levels * nrows, scalar_t(0));
std::array<std::array<scalar_t, nrows>, num_levels> acc{};
for (auto &row:acc) {
row.fill(scalar_t(0));
}

int64_t i = 0;
for (; i + level_step <= size;) {
Expand Down Expand Up @@ -404,13 +406,7 @@ std::array<scalar_t, nrows> multi_row_sum(
acc[0][k] += acc[j][k];
}
}

// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
std::array<scalar_t, nrows> ret;
for (const auto k : c10::irange(nrows)) {
ret[k] = acc[0][k];
}
return ret;
return acc[0];
}

template <typename scalar_t, typename LoadPolicy>
Expand Down Expand Up @@ -504,7 +500,6 @@ void vectorized_outer_sum(
const vacc_t sums = row_sum<vacc_t, VecLoadPolicy>(
row_in, inner_stride, size0);

// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
store<StorePolicy>(data[0], out_stride, j, sums);
}

Expand Down
19 changes: 5 additions & 14 deletions aten/src/ATen/native/quantized/cpu/AdaptiveAveragePooling.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,6 @@

#include <algorithm>
#include <cmath>
#include <limits>
#include <vector>

#include <ATen/native/quantized/cpu/QnnpackUtils.h>
Expand Down Expand Up @@ -72,22 +71,20 @@ static void adaptive_avg_pool_single_out_frame(
at::parallel_for(0, sizeC, 0, [&](int64_t start, int64_t end) {
for (const auto c : c10::irange(start, end)) {
/* loop over output */
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
int64_t od, oh, ow;
for (od = 0; od < osizeD; od++) {
for (int64_t od = 0; od < osizeD; od++) {
int istartD = start_index(od, osizeD, isizeD);
int iendD = end_index(od, osizeD, isizeD);
int kD = iendD - istartD;
// NOLINTNEXTLINE(cppcoreguidelines-narrowing-conversions,bugprone-narrowing-conversions)
float kDr = 1.0 / kD;
for (oh = 0; oh < osizeH; oh++) {
for (int64_t oh = 0; oh < osizeH; oh++) {
int istartH = start_index(oh, osizeH, isizeH);
int iendH = end_index(oh, osizeH, isizeH);
int kH = iendH - istartH;
// NOLINTNEXTLINE(cppcoreguidelines-narrowing-conversions,bugprone-narrowing-conversions)
float kDHr = kDr / kH;

for (ow = 0; ow < osizeW; ow++) {
for (int64_t ow = 0; ow < osizeW; ow++) {
int istartW = start_index(ow, osizeW, isizeW);
int iendW = end_index(ow, osizeW, isizeW);
int kW = iendW - istartW;
Expand Down Expand Up @@ -264,10 +261,6 @@ Tensor q_adaptive_avg_pool3d(Tensor& output, const Tensor& input,
Tensor qnnpack_adaptive_avg_pool2d(
const at::Tensor& input,
IntArrayRef output_size) {
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
std::array<int64_t, 2> kernel_size;
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
std::array<int64_t, 2> stride;
std::array<int64_t, 2> padding{0, 0};
bool ceil_mode{false};
bool count_include_pad{false};
Expand All @@ -277,12 +270,10 @@ Tensor qnnpack_adaptive_avg_pool2d(
auto output_width = output_shape[output_shape.size() - 1];
auto input_height = input.sizes()[input.dim() - 2];
auto input_width = input.sizes()[input.dim() - 1];
stride[0] = input_height / output_height;
stride[1] = input_width / output_width;
std::array<int64_t, 2> stride{input_height / output_height, input_width / output_width};
// Given the constraint that input_height/width % output_height/width == 0
// stride and kernel size are same.
kernel_size[0] = stride[0];
kernel_size[1] = stride[1];
std::array<int64_t, 2> kernel_size = stride;

return at::native::qnnp_avgpool_helper::qnnpack_avg_pool2d(
input,
Expand Down
18 changes: 6 additions & 12 deletions aten/src/ATen/native/quantized/cpu/fbgemm_utils.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -109,18 +109,12 @@ fbgemm::conv_param_t<kSpatialDim> MakeFbgemmConvParam(
const std::vector<int>& dilations,
const std::vector<int>& output_padding,
bool transposed) {
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
std::array<int, kSpatialDim> image_shape_;
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
std::array<int, kSpatialDim> kernels_;
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
std::array<int, kSpatialDim> strides_;
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
std::array<int, kSpatialDim * 2> pads_;
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
std::array<int, kSpatialDim> dilations_;
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
std::array<int, kSpatialDim> output_padding_;
std::array<int, kSpatialDim> image_shape_{};
std::array<int, kSpatialDim> kernels_{};
std::array<int, kSpatialDim> strides_{};
std::array<int, kSpatialDim * 2> pads_{};
std::array<int, kSpatialDim> dilations_{};
std::array<int, kSpatialDim> output_padding_{};
std::move(image_shape.begin(), image_shape.begin() + image_shape.size(), image_shape_.begin());
std::move(
kernels.begin(), kernels.begin() + kernels.size(), kernels_.begin());
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -3348,8 +3348,7 @@ void quantize_tensor_per_tensor_affine_cpu(
check_tensor_memory_format(rtensor, qtensor);
const float* rd = rtensor.const_data_ptr<float>();
auto qd = reinterpret_cast<underlying_t*>(qtensor.data_ptr<scalar_t>());
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
fbgemm::TensorQuantizationParams qparams;
fbgemm::TensorQuantizationParams qparams{};
qparams.scale = scale;
qparams.zero_point = zero_point;
qparams.precision = CHAR_BIT * sizeof(underlying_t);
Expand Down
12 changes: 4 additions & 8 deletions aten/src/ATen/native/quantized/cpu/qembeddingbag.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -67,8 +67,7 @@ at::Tensor& embedding_lookup_fallback_impl(
}

int64_t current = 0;
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
float* per_sample_weights_data;
float* per_sample_weights_data = nullptr;
if (per_sample_weights_.has_value()) {
per_sample_weights_data = per_sample_weights_.value().data_ptr<float>();
}
Expand All @@ -79,8 +78,7 @@ at::Tensor& embedding_lookup_fallback_impl(
"Expect the lengths data to be less than indices size");

for (int i = 0; i < lengths_data[m]; ++i, ++current) {
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
int64_t idx;
int64_t idx = -1;
if (!pruned) {
idx = indices_data[current];
TORCH_CHECK((idx >= 0 && idx < N), "Invalid indices data");
Expand Down Expand Up @@ -277,8 +275,7 @@ at::Tensor& embedding_bag_nbit_impl(
offsets_data = offsets_include_last_val.data();
}
{
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
std::array<int64_t, 3> shape_arr;
std::array<int64_t, 3> shape_arr{};
c10::IntArrayRef shape;
if(indices.dim() == 2 && is_embedding_op) {
const auto indices_sizes = indices.sizes();
Expand Down Expand Up @@ -447,8 +444,7 @@ at::Tensor& embedding_bag_byte_impl(
offsets_data = offsets_include_last_val.data();
}
{
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
std::array<int64_t, 3> shape_arr;
std::array<int64_t, 3> shape_arr{};
c10::IntArrayRef shape;
if (indices.dim() == 2 && is_embedding_op) {
const auto indices_sizes = indices.sizes();
Expand Down
3 changes: 1 addition & 2 deletions aten/src/ATen/native/xnnpack/Convolution.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -203,8 +203,7 @@ ContextConv2D create(
xnn_operator_t convolution_op{};
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
xnn_status create_status;
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
std::array<int64_t, 4> weight_sizes;
std::array<int64_t, 4> weight_sizes{};

if (transposed) {
const Tensor weight_reordered = reorder_weights_for_transpose_conv(weight_nhwc, groups);
Expand Down

0 comments on commit d7e59c2

Please sign in to comment.