From e107b8d63d82535ef6ca6182ccea7c01ad056be4 Mon Sep 17 00:00:00 2001 From: HydrogenSulfate <490868991@qq.com> Date: Fri, 29 Nov 2024 16:22:45 +0800 Subject: [PATCH 1/3] support any slice interval --- paddle/fluid/pybind/slice_utils.h | 11 +- paddle/phi/kernels/funcs/slice_utils.h | 233 ++++++++++++++---- paddle/phi/kernels/funcs/strided_slice.h | 73 +++--- paddle/phi/kernels/stride/slice_kernel.cc | 5 +- .../kernels/stride/strided_slice_kernel.cc | 49 ++-- test/indexing/test_getitem.py | 146 +++++++++++ 6 files changed, 380 insertions(+), 137 deletions(-) diff --git a/paddle/fluid/pybind/slice_utils.h b/paddle/fluid/pybind/slice_utils.h index 8925580950a09e..a9bb3a3a4b7b3f 100644 --- a/paddle/fluid/pybind/slice_utils.h +++ b/paddle/fluid/pybind/slice_utils.h @@ -31,6 +31,7 @@ #include "paddle/phi/core/compat/convert_utils.h" #include "paddle/phi/core/dense_tensor.h" #include "paddle/phi/kernels/funcs/common_infer_shape_functions.h" +#include "paddle/phi/kernels/funcs/strided_slice.h" #include "pybind11/numpy.h" #include "pybind11/pybind11.h" #include "pybind11/stl.h" @@ -143,8 +144,6 @@ static int _PySlice_GetIndices(PySliceObject* r, "tensor(int) and numpy(int) in slice item, but received %s.", std::string(Py_TYPE(r->start)->tp_name))); } - if (*start < 0) *start += length; - *start = std::max(*start, static_cast(0)); } if (r->stop == Py_None) { *stop = *step < 0 ? -1 : length; @@ -159,9 +158,13 @@ static int _PySlice_GetIndices(PySliceObject* r, "tensor(int) and numpy(int) in slice item, but received %s.", std::string(Py_TYPE(r->stop)->tp_name))); } - if (0 < *step && *stop < 0) *stop += length; - *stop = std::min(*stop, length); } + + // normalize start and stop + bool dummy_zero_dim_out = false; + phi::funcs::normalize_interval( + *start, *stop, *step, length, start, stop, &dummy_zero_dim_out); + // return value below seems to be useless... if (*stop > length) return -1; if (*start >= length) return -1; if (*step == 0) return -1; diff --git a/paddle/phi/kernels/funcs/slice_utils.h b/paddle/phi/kernels/funcs/slice_utils.h index 14cda4f9016fe0..c24ce7d51681c4 100644 --- a/paddle/phi/kernels/funcs/slice_utils.h +++ b/paddle/phi/kernels/funcs/slice_utils.h @@ -23,6 +23,164 @@ namespace phi { namespace funcs { +/** + * @brief Normalizes the slice interval [st, ed) with a given step and dimension + * size. + * + * This function adjusts the interval [st, ed) to fit within the bounds defined + * by the dimension size, taking into account the specified step. It handles + * both positive and negative steps and accounts for negative indices by + * converting them to equivalent positive indices within the dimension size. + * + * @tparam T The data type of the input parameters, which can be an integer or + * floating-point type. + * @param st The starting index of the interval. + * @param ed The ending index of the interval (exclusive). + * @param step The step size for iterating through the interval, which can be + * positive or negative. + * @param dim_size The size of the dimension, serving as the upper bound for + * valid indices. + * @param st_out Pointer to store the normalized starting index. + * @param ed_out Pointer to store the normalized ending index. + * @param zero_dim_out Pointer to a boolean flag that is set to true if the + * resulting interval is empty. + * + * @details + * - If `step > 0`, the function ensures that `st` and `ed` are adjusted to be + * within the range [0, dim_size). + * - If `step < 0`, the function adjusts `st` and `ed` to accommodate the + * reverse traversal of the interval. + * - Handles special cases where `st` and `ed` may be out of bounds or where + * `dim_size` is zero. + * - Uses pointer parameters for output to modify the values directly. + * - The function also handles scenarios involving negative indices, converting + * them appropriately. + * + * @example + * T st_out, ed_out; + * bool zero_dim; + * normalize_interval(-3, -2, 1, 4, &st_out, &ed_out, &zero_dim); + * // Results in: st_out = 1, ed_out = 2, zero_dim = false + * + * @note The function assumes that the pointers provided for output parameters + * are valid and non-null. + */ +template +void normalize_interval( + T st, T ed, T step, T dim_size, T* st_out, T* ed_out, bool* zero_dim_out) { + /* Normalize slice interval [st, ed) with given step and dim_size. + e.g. if given st = -3, ed = -2, step = 1, dim_size = 4, + then normalized st_out = 1(-3+4), st_ed = 2(-2+4). + + This function is general enough and applicable + for both step > 0 and step < 0 scenarios. + + Indicices dipicted as below: + + =============================================================== + | 0 1 2 3 ... D-1 | D D+1 ... + ... -D-2 -D-1 | -D -D+1 -D+2 -D+3 ... -1 | + =============================================================== + */ + // 0 dim size, just return + if (dim_size <= 0) { + *st_out = *ed_out = 0; + *zero_dim_out = true; + return; + } + + if (step > 0) { + /* positive step */ + // 0 dim size case 1 + if (st >= dim_size) { + *st_out = *ed_out = 0; + *zero_dim_out = true; + return; + } + + // 0 dim size case 2 + if (ed <= -dim_size) { + *st_out = *ed_out = 0; + *zero_dim_out = true; + return; + } + + // make st belongs: (-inf, -D-1)∪[0, D) + if (-dim_size <= st && st < 0) { + st += dim_size; + } + // make st belongs: [0, D) + st = std::max(st, static_cast(0)); + + // make ed belongs: [0, +inf) + if (-dim_size <= ed && ed < 0) { + ed += dim_size; + } + // make ed belongs: [0, D] + ed = std::min(ed, dim_size); + + // 0 dim size case 3 + if (st >= ed) { + *st_out = *ed_out = 0; + *zero_dim_out = true; + return; + } + *st_out = st; + *ed_out = ed; + return; + + } else { + /* negative step */ + // 0 dim size case 1 + if (st <= -dim_size - 1) { + *st_out = *ed_out = 0; + *zero_dim_out = true; + return; + } + + // 0 dim size case 2 + if (ed >= dim_size - 1) { + *st_out = *ed_out = 0; + *zero_dim_out = true; + return; + } + + // make st belongs: [0, D)∪[0, +inf) + if (-dim_size <= st && st < 0) { + st += dim_size; + } + // make st belongs: [0, D) + st = std::min(st, dim_size - 1); + + // make ed belongs: [-inf, -D)∪[0, D) + if (-dim_size <= ed && ed < 0) { + ed += dim_size; + } + // make ed belongs: [-D-1, -D)∪[0, D) ==> {-D-1}∪[0, D) + ed = std::max(ed, -dim_size - 1); + + if (ed == -dim_size - 1) { + // When ed=-D-1, it is symmetrical to when step is greater than 0 and + // ed=D. + *st_out = st; + *ed_out = ed; + return; + } + + // now only remain the case that ed belongs to: [0, D) + // 0 dim size case 3 + if (ed >= st) { + *st_out = *ed_out = 0; + *zero_dim_out = true; + return; + } + + *st_out = st; + *ed_out = ed; + return; + } +} + template inline void CheckAndUpdateSliceAttrs(const DDim in_dims, const std::vector& axes, @@ -56,41 +214,17 @@ inline void CheckAndUpdateSliceAttrs(const DDim in_dims, common::errors::InvalidArgument( "Step should not be 0, but received step = %d.", step)); - T start = (*starts)[i] < 0 ? ((*starts)[i] + dim_value) : (*starts)[i]; - start = std::max(start, static_cast(0)); - - T end = - 0 < step && (*ends)[i] < 0 ? ((*ends)[i] + dim_value) : (*ends)[i]; - end = std::min(end, dim_value); - - if (step > 0) { - start = std::min(start, dim_value); - end = std::max(end, static_cast(0)); - PADDLE_ENFORCE_GE( - end, - start, - common::errors::InvalidArgument( - "When step > 0, end should be greater than start, but " - "received end = %d, start = %d.", - end, - start)); - } else { - // NOTE(liym27): When step < 0, start should less and equal to - // dim_value-1 - // "end is -1" means contain the 0-th element of this axis. - start = std::min(start, dim_value - 1); - if (end < -1) { - end += dim_value; - } - end = std::max(end, static_cast(-1)); - PADDLE_ENFORCE_GE( - start, - end, - common::errors::InvalidArgument( - "When step < 0, start should be greater than end, but " - "received start = %d, end = %d.", - start, - end)); + T start, end; + bool dummy_zero_out_dim = false; + normalize_interval((*starts)[i], + (*ends)[i], + step, + dim_value, + &start, + &end, + &dummy_zero_out_dim); + if (end == -dim_value - 1) { + end = -1; } (*starts)[i] = start; @@ -117,24 +251,17 @@ inline void UpdateSliceAttrs(const DDim in_dims, T dim_value = in_dims[axis]; if (dim_value > 0) { T step = steps == nullptr ? 1 : (*steps)[i]; - T start = (*starts)[i] < 0 ? ((*starts)[i] + dim_value) : (*starts)[i]; - start = std::max(start, static_cast(0)); - T end = - 0 < step && (*ends)[i] < 0 ? ((*ends)[i] + dim_value) : (*ends)[i]; - end = std::min(end, dim_value); - - if (step > 0) { - start = std::min(start, dim_value); - end = std::max(end, static_cast(0)); - } else { - // NOTE: When step < 0, start should less and equal to - // dim_value-1 - // "end is -1" means contain the 0-th element of this axis. - start = std::min(start, dim_value - 1); - if (end < -1) { - end += dim_value; - } - end = std::max(end, static_cast(-1)); + T start = (*starts)[i]; + T end = (*ends)[i]; + + bool dummy_zero_out_dim = false; + normalize_interval( + start, end, step, dim_value, &start, &end, &dummy_zero_out_dim); + + // manually set the end to -1 when step < 0, + // which indicates that it can extend to the left endpoint. + if (end == -dim_value - 1 && step < 0) { + end = -1; } (*starts)[i] = start; (*ends)[i] = end; diff --git a/paddle/phi/kernels/funcs/strided_slice.h b/paddle/phi/kernels/funcs/strided_slice.h index 46342175a8213b..f14f2f5990b9fb 100644 --- a/paddle/phi/kernels/funcs/strided_slice.h +++ b/paddle/phi/kernels/funcs/strided_slice.h @@ -25,6 +25,7 @@ #include "paddle/phi/kernels/funcs/eigen/common.h" #include "paddle/phi/kernels/funcs/eigen/eigen_function.h" #include "paddle/phi/kernels/funcs/math_function.h" +#include "paddle/phi/kernels/funcs/slice_utils.h" namespace phi { namespace funcs { @@ -73,39 +74,26 @@ static void StridedSliceOutDims(const std::vector& starts, continue; } - if (start_index < 0) { - start_index = start_index + axis_size; - start_index = std::max(start_index, 0); - } - if (end_index < 0) { - if (!(end_index == -1 && stride_index < 0)) { // skip None stop condition - end_index = end_index + axis_size; - if (end_index < 0) { - end_index = 0; - } - } + bool neg_dim_condition = false; + normalize_interval(start_index, + end_index, + stride_index, + axis_size, + &start_index, + &end_index, + &neg_dim_condition); + if (end_index == -axis_size - 1) { + end_index = -1; } - if (stride_index < 0) { - start_index = start_index + 1; - end_index = end_index + 1; + int64_t out_dims_index; + if (neg_dim_condition) { + out_dims_index = 0; + } else { + int64_t step_size = std::abs(stride_index); + out_dims_index = + (std::abs(end_index - start_index) + step_size - 1) / step_size; } - - bool neg_dim_condition = ((stride_index < 0 && (start_index < end_index)) || - (stride_index > 0 && (start_index > end_index))); - PADDLE_ENFORCE_EQ(neg_dim_condition, - false, - errors::InvalidArgument( - "The start index and end index are invalid for their " - "corresponding stride.")); - - int64_t left = - std::max(static_cast(0), std::min(start_index, end_index)); - int64_t right = std::min(axis_size, std::max(start_index, end_index)); - int64_t step = std::abs(stride_index); - - auto out_dims_index = (std::abs(right - left) + step - 1) / step; - out_dims_vector[axes_index] = out_dims_index; } } @@ -136,19 +124,18 @@ static void StridedSliceFunctor(int64_t* starts, decrease_axis_affect = true; } } - // stride must not be zero - if (starts[axis_index] < 0) { - starts[axis_index] = starts[axis_index] + axis_size; - starts[axis_index] = std::max(starts[axis_index], 0); - } - if (ends[axis_index] < 0) { - if (!(ends[axis_index] == -1 && - strides[axis_index] < 0)) { // skip None stop condition - ends[axis_index] = ends[axis_index] + axis_size; - if (ends[axis_index] < 0) { - ends[axis_index] = 0; - } - } + bool dummy_zero_dim_out = false; + normalize_interval(starts[axis_index], + ends[axis_index], + strides[axis_index], + axis_size, + &starts[axis_index], + &ends[axis_index], + &dummy_zero_dim_out); + if (ends[axis_index] == -axis_size - 1) { + // manually set the end to -1 when step < 0, + // which indicates that it can extend to the left endpoint. + ends[axis_index] = -1; } if (decrease_axis_affect) { if (strides[axis_index] < 0) { diff --git a/paddle/phi/kernels/stride/slice_kernel.cc b/paddle/phi/kernels/stride/slice_kernel.cc index 71eaec4fd98d9e..fe65a002b67df6 100644 --- a/paddle/phi/kernels/stride/slice_kernel.cc +++ b/paddle/phi/kernels/stride/slice_kernel.cc @@ -49,7 +49,8 @@ void SliceStridedKernel(const Context& ctx, item = std::max(int64_t(0), item + int64_t(in_dims.size())); } } - + // axis = 0, dim_value = 3, st[0]=0, ed[0]=4 + // The step seems to be regarded as 1 here phi::funcs::CheckAndUpdateSliceAttrs( in_dims, new_axes, &starts, &ends, nullptr, nullptr); @@ -62,7 +63,7 @@ void SliceStridedKernel(const Context& ctx, output_offset = static_cast( output_offset + starts[i] * output_stride[new_axes[i]] * SizeOf(out->dtype())); - output_dims[new_axes[i]] = ends[i] - starts[i]; + output_dims[new_axes[i]] = std::abs(ends[i] - starts[i]); } std::vector decrease_flag(output_dims.size(), 0); diff --git a/paddle/phi/kernels/stride/strided_slice_kernel.cc b/paddle/phi/kernels/stride/strided_slice_kernel.cc index 69183b8b9a69c8..4779930fbd6ff7 100644 --- a/paddle/phi/kernels/stride/strided_slice_kernel.cc +++ b/paddle/phi/kernels/stride/strided_slice_kernel.cc @@ -15,6 +15,7 @@ #include "paddle/phi/kernels/strided_slice_kernel.h" #include "glog/logging.h" +#include "paddle/phi/kernels/funcs/slice_utils.h" #include "paddle/common/flags.h" #include "paddle/phi/backends/all_context.h" @@ -53,47 +54,25 @@ void StridedSliceRawStridedKernel(const Context& dev_ctx, if (axis_size < 0) { continue; } - - if (starts[i] < 0) { - starts[i] = starts[i] + axis_size; - starts[i] = std::max(starts[i], 0); - } - if (ends[i] < 0) { - if (!(ends[i] == -1 && strides[i] < 0)) { // skip None stop condition - ends[i] = ends[i] + axis_size; - if (ends[i] < 0) { - ends[i] = 0; - } - } + bool dummy_zero_dim_out = false; + funcs::normalize_interval(starts[i], + ends[i], + strides[i], + axis_size, + &starts[i], + &ends[i], + &dummy_zero_dim_out); + if (ends[i] == -axis_size - 1) { + ends[i] = -1; } - int64_t left = 0; - int64_t right = 0; - - if (strides[i] < 0) { - left = std::max(static_cast(-1), ends[i]); - right = std::min(axis_size - 1, starts[i]); - } else { - left = std::max(static_cast(0), starts[i]); - right = std::min(axis_size, ends[i]); - } - int64_t step = std::abs(strides[i]); + int64_t step_size = std::abs(strides[i]); - auto dim = (std::abs(right - left) + step - 1) / step; - - if (dim <= 0) { - dim = 0; - strides[i] = 1; - starts[i] = 0; - } - - if (starts[i] >= axis_size) { - starts[i] = (strides[i] < 0) ? axis_size - 1 : axis_size; - } + auto out_dim = (std::abs(ends[i] - starts[i]) + step_size - 1) / step_size; output_offset += static_cast(starts[i] * output_stride[axes[i]] * SizeOf(out->dtype())); - output_dims[axes[i]] = dim; + output_dims[axes[i]] = out_dim; output_stride[axes[i]] *= strides[i]; } diff --git a/test/indexing/test_getitem.py b/test/indexing/test_getitem.py index 86fe0919ffd6b4..40a3388b352b74 100644 --- a/test/indexing/test_getitem.py +++ b/test/indexing/test_getitem.py @@ -253,6 +253,68 @@ def test_combined_index_12(self): np.testing.assert_allclose(y.numpy(), np_res) + def test_indexing_with_all_possible_start_end_step_dygraph(self): + np_data = np.arange(3 * 4 * 5 * 6).reshape((3, 4, 5, 6)) + dim_size = np_data.shape[3] + for st in range(-dim_size - 2, dim_size + 2): + for ed in range(-dim_size - 2, dim_size + 2): + for step in range(-dim_size - 2, dim_size + 2): + try: + np_res = np_data[:, :, st:ed:step, :] + except Exception as e: + # skip the invalid case use try-except strategy + continue + pd_data = paddle.to_tensor(np_data) + pd_res_out = pd_data[:, :, st:ed:step, :] + self.assertEqual( + pd_res_out.shape, + list(np_res.shape), + f"Failed test in indexing with pd_data[{st}:{ed}:{step}]", + ) + np.testing.assert_allclose(pd_res_out.numpy(), np_res) + + def test_indexing_with_all_possible_start_end_step_dygraph_0_size(self): + np_data = np.arange(0 * 4 * 5 * 6).reshape((0, 4, 5, 6)) + dim_size = np_data.shape[3] + for st in range(-dim_size - 2, dim_size + 2): + for ed in range(-dim_size - 2, dim_size + 2): + for step in range(-dim_size - 2, dim_size + 2): + try: + np_res = np_data[:, :, st:ed:step, :] + except Exception as e: + # skip the invalid case use try-except strategy + continue + pd_data = paddle.to_tensor(np_data) + pd_res_out = pd_data[:, :, st:ed:step, :] + self.assertEqual( + pd_res_out.shape, + list(np_res.shape), + f"Failed test in indexing with pd_data[{st}:{ed}:{step}]", + ) + np.testing.assert_allclose(pd_res_out.numpy(), np_res) + + def test_indexing_with_all_possible_start_end_step_dygraph_0_size_self( + self, + ): + np_data = np.arange(3 * 4 * 0 * 6).reshape((3, 4, 0, 6)) + dim_size = np_data.shape[3] + for st in range(-dim_size - 2, dim_size + 2): + for ed in range(-dim_size - 2, dim_size + 2): + for step in range(-dim_size - 2, dim_size + 2): + try: + np_res = np_data[:, :, st:ed:step, :] + except Exception as e: + # skip the invalid case use try-except strategy + continue + pd_data = paddle.to_tensor(np_data) + pd_res_out = pd_data[:, :, st:ed:step, :] + self.assertEqual( + pd_res_out.shape, + list(np_res.shape), + f"Failed test in indexing with pd_data[{st}:{ed}:{step}]", + ) + np.testing.assert_allclose(pd_res_out.numpy(), np_res) + def test_index_has_range(self): np_data = ( np.arange(3 * 4 * 5 * 6).reshape((3, 4, 5, 6)).astype(self.ndtype) @@ -1028,6 +1090,90 @@ def test_combined_index_12(self): np.testing.assert_allclose(res[0], np_res) + def test_indexing_with_all_possible_start_end_step(self): + np_data = np.arange(3 * 4 * 5 * 6).reshape((3, 4, 5, 6)) + dim_size = np_data.shape[3] + for st in range(-dim_size - 2, dim_size + 2): + for ed in range(-dim_size - 2, dim_size + 2): + for step in [-1, 1]: + try: + np_res = np_data[:, :, st:ed:step, :] + except Exception as e: + # skip the invalid case use try-except strategy + continue + with paddle.static.program_guard( + paddle.static.Program(), paddle.static.Program() + ): + pd_data = paddle.to_tensor(np_data) + pd_res = _getitem_static( + pd_data, + ( + slice(None), + slice(None), + slice(st, ed, step), + slice(None), + ), + ) + (pd_res_out,) = self.exe.run(fetch_list=[pd_res]) + + np.testing.assert_allclose(pd_res_out, np_res) + + def test_indexing_with_all_possible_start_end_step_0_size(self): + np_data = np.arange(0 * 4 * 5 * 6).reshape((0, 4, 5, 6)) + dim_size = np_data.shape[3] + for st in range(-dim_size - 2, dim_size + 2): + for ed in range(-dim_size - 2, dim_size + 2): + for step in [-1, 1]: + try: + np_res = np_data[:, :, st:ed:step, :] + except Exception as e: + # skip the invalid case use try-except strategy + continue + with paddle.static.program_guard( + paddle.static.Program(), paddle.static.Program() + ): + pd_data = paddle.to_tensor(np_data) + pd_res = _getitem_static( + pd_data, + ( + slice(None), + slice(None), + slice(st, ed, step), + slice(None), + ), + ) + (pd_res_out,) = self.exe.run(fetch_list=[pd_res]) + + np.testing.assert_allclose(pd_res_out, np_res) + + def test_indexing_with_all_possible_start_end_step_0_size_self(self): + np_data = np.arange(3 * 4 * 0 * 6).reshape((3, 4, 0, 6)) + dim_size = np_data.shape[3] + for st in range(-dim_size - 2, dim_size + 2): + for ed in range(-dim_size - 2, dim_size + 2): + for step in [-1, 1]: + try: + np_res = np_data[:, :, st:ed:step, :] + except Exception as e: + # skip the invalid case use try-except strategy + continue + with paddle.static.program_guard( + paddle.static.Program(), paddle.static.Program() + ): + pd_data = paddle.to_tensor(np_data) + pd_res = _getitem_static( + pd_data, + ( + slice(None), + slice(None), + slice(st, ed, step), + slice(None), + ), + ) + (pd_res_out,) = self.exe.run(fetch_list=[pd_res]) + + np.testing.assert_allclose(pd_res_out, np_res) + def test_index_has_range(self): # only one bool tensor with all False np_data = np.arange(3 * 4 * 5 * 6).reshape((3, 4, 5, 6)) From cb6550c29d11c9301909187668b997e08ee0783d Mon Sep 17 00:00:00 2001 From: HydrogenSulfate <490868991@qq.com> Date: Fri, 29 Nov 2024 18:24:09 +0800 Subject: [PATCH 2/3] fix bug --- paddle/fluid/pybind/slice_utils.h | 2 +- python/paddle/base/variable_index.py | 5 +- test/indexing/test_getitem.py | 228 +++++++++++++----------- test/legacy_test/test_multinomial_op.py | 5 - 4 files changed, 131 insertions(+), 109 deletions(-) diff --git a/paddle/fluid/pybind/slice_utils.h b/paddle/fluid/pybind/slice_utils.h index a9bb3a3a4b7b3f..3a42f954538311 100644 --- a/paddle/fluid/pybind/slice_utils.h +++ b/paddle/fluid/pybind/slice_utils.h @@ -146,7 +146,7 @@ static int _PySlice_GetIndices(PySliceObject* r, } } if (r->stop == Py_None) { - *stop = *step < 0 ? -1 : length; + *stop = *step < 0 ? -length - 1 : length; } else { if (PyCheckInteger(r->stop) || IsNumpyType(r->stop)) { *stop = PyLong_AsLong(r->stop); diff --git a/python/paddle/base/variable_index.py b/python/paddle/base/variable_index.py index 09191281a46835..0345d7ea8a4762 100644 --- a/python/paddle/base/variable_index.py +++ b/python/paddle/base/variable_index.py @@ -20,6 +20,7 @@ from . import core, unique_name MAX_INTEGER = 2**31 - 1 +MIN_INTEGER = -(2**31) def replace_ellipsis(var, item): @@ -335,7 +336,7 @@ def parse_index(x, indices): if start is None: start = 0 if step > 0 else MAX_INTEGER if end is None: - end = MAX_INTEGER if step > 0 else -1 + end = MAX_INTEGER if step > 0 else MIN_INTEGER if not ( is_tensor_array @@ -343,7 +344,7 @@ def parse_index(x, indices): or isinstance(step, (paddle.base.Variable, paddle.pir.Value)) ): if x.shape[dim] != -1 and end >= x.shape[dim]: - end = MAX_INTEGER if step > 0 else -1 + end = MAX_INTEGER if step > 0 else MIN_INTEGER estimated_dim += 1 dim += 1 diff --git a/test/indexing/test_getitem.py b/test/indexing/test_getitem.py index 40a3388b352b74..685927af685274 100644 --- a/test/indexing/test_getitem.py +++ b/test/indexing/test_getitem.py @@ -253,68 +253,6 @@ def test_combined_index_12(self): np.testing.assert_allclose(y.numpy(), np_res) - def test_indexing_with_all_possible_start_end_step_dygraph(self): - np_data = np.arange(3 * 4 * 5 * 6).reshape((3, 4, 5, 6)) - dim_size = np_data.shape[3] - for st in range(-dim_size - 2, dim_size + 2): - for ed in range(-dim_size - 2, dim_size + 2): - for step in range(-dim_size - 2, dim_size + 2): - try: - np_res = np_data[:, :, st:ed:step, :] - except Exception as e: - # skip the invalid case use try-except strategy - continue - pd_data = paddle.to_tensor(np_data) - pd_res_out = pd_data[:, :, st:ed:step, :] - self.assertEqual( - pd_res_out.shape, - list(np_res.shape), - f"Failed test in indexing with pd_data[{st}:{ed}:{step}]", - ) - np.testing.assert_allclose(pd_res_out.numpy(), np_res) - - def test_indexing_with_all_possible_start_end_step_dygraph_0_size(self): - np_data = np.arange(0 * 4 * 5 * 6).reshape((0, 4, 5, 6)) - dim_size = np_data.shape[3] - for st in range(-dim_size - 2, dim_size + 2): - for ed in range(-dim_size - 2, dim_size + 2): - for step in range(-dim_size - 2, dim_size + 2): - try: - np_res = np_data[:, :, st:ed:step, :] - except Exception as e: - # skip the invalid case use try-except strategy - continue - pd_data = paddle.to_tensor(np_data) - pd_res_out = pd_data[:, :, st:ed:step, :] - self.assertEqual( - pd_res_out.shape, - list(np_res.shape), - f"Failed test in indexing with pd_data[{st}:{ed}:{step}]", - ) - np.testing.assert_allclose(pd_res_out.numpy(), np_res) - - def test_indexing_with_all_possible_start_end_step_dygraph_0_size_self( - self, - ): - np_data = np.arange(3 * 4 * 0 * 6).reshape((3, 4, 0, 6)) - dim_size = np_data.shape[3] - for st in range(-dim_size - 2, dim_size + 2): - for ed in range(-dim_size - 2, dim_size + 2): - for step in range(-dim_size - 2, dim_size + 2): - try: - np_res = np_data[:, :, st:ed:step, :] - except Exception as e: - # skip the invalid case use try-except strategy - continue - pd_data = paddle.to_tensor(np_data) - pd_res_out = pd_data[:, :, st:ed:step, :] - self.assertEqual( - pd_res_out.shape, - list(np_res.shape), - f"Failed test in indexing with pd_data[{st}:{ed}:{step}]", - ) - np.testing.assert_allclose(pd_res_out.numpy(), np_res) - def test_index_has_range(self): np_data = ( np.arange(3 * 4 * 5 * 6).reshape((3, 4, 5, 6)).astype(self.ndtype) @@ -472,6 +410,76 @@ def test_indexing_is_boolean_false(self): np.testing.assert_allclose(y.numpy(), np_res) +class TestMultipleIndexing(TestGetitemInDygraph): + def test_indexing_with_all_possible_start_end_step_dygraph(self): + np_data = np.arange(5 * 4 * 3 * 2).reshape((5, 4, 3, 2)) + dim_size = np_data.shape[3] + for st in [*list(range(-dim_size - 1, dim_size + 2)), None]: + for ed in [*list(range(-dim_size - 1, dim_size + 2)), None]: + for step in list(range(-dim_size - 1, dim_size + 2)): + if step == 0: + continue + try: + np_res = np_data[:, :, st:ed:step, :] + except Exception as e: + # skip the invalid case use try-except strategy + continue + pd_data = paddle.to_tensor(np_data) + pd_res_out = pd_data[:, :, st:ed:step, :] + self.assertEqual( + pd_res_out.shape, + list(np_res.shape), + f"Failed indexing test in case: x.shape={np_data.shape}, slice=({st},{ed},{step})", + ) + np.testing.assert_allclose(pd_res_out.numpy(), np_res) + + def test_indexing_with_all_possible_start_end_step_dygraph_0_size(self): + np_data = np.arange(0 * 4 * 3 * 2).reshape((0, 4, 3, 2)) + dim_size = np_data.shape[3] + for st in [*list(range(-dim_size - 1, dim_size + 2)), None]: + for ed in [*list(range(-dim_size - 1, dim_size + 2)), None]: + for step in list(range(-dim_size - 1, dim_size + 2)): + if step == 0: + continue + try: + np_res = np_data[:, :, st:ed:step, :] + except Exception as e: + # skip the invalid case use try-except strategy + continue + pd_data = paddle.to_tensor(np_data) + pd_res_out = pd_data[:, :, st:ed:step, :] + self.assertEqual( + pd_res_out.shape, + list(np_res.shape), + f"Failed indexing test in case: x.shape={np_data.shape}, slice=({st},{ed},{step})", + ) + np.testing.assert_allclose(pd_res_out.numpy(), np_res) + + def test_indexing_with_all_possible_start_end_step_dygraph_0_size_self( + self, + ): + np_data = np.arange(5 * 4 * 0 * 2).reshape((5, 4, 0, 2)) + dim_size = np_data.shape[3] + for st in [*list(range(-dim_size - 1, dim_size + 2)), None]: + for ed in [*list(range(-dim_size - 1, dim_size + 2)), None]: + for step in list(range(-dim_size - 1, dim_size + 2)): + if step == 0: + continue + try: + np_res = np_data[:, :, st:ed:step, :] + except Exception as e: + # skip the invalid case use try-except strategy + continue + pd_data = paddle.to_tensor(np_data) + pd_res_out = pd_data[:, :, st:ed:step, :] + self.assertEqual( + pd_res_out.shape, + list(np_res.shape), + f"Failed indexing test in case: x.shape={np_data.shape}, slice=({st},{ed},{step})", + ) + np.testing.assert_allclose(pd_res_out.numpy(), np_res) + + @unittest.skipIf( not core.is_compiled_with_cuda() or not core.is_float16_supported(core.CUDAPlace(0)), @@ -1091,19 +1099,21 @@ def test_combined_index_12(self): np.testing.assert_allclose(res[0], np_res) def test_indexing_with_all_possible_start_end_step(self): - np_data = np.arange(3 * 4 * 5 * 6).reshape((3, 4, 5, 6)) + np_data = np.arange(5 * 4 * 3 * 2).reshape((5, 4, 3, 2)) dim_size = np_data.shape[3] - for st in range(-dim_size - 2, dim_size + 2): - for ed in range(-dim_size - 2, dim_size + 2): - for step in [-1, 1]: - try: - np_res = np_data[:, :, st:ed:step, :] - except Exception as e: - # skip the invalid case use try-except strategy - continue - with paddle.static.program_guard( - paddle.static.Program(), paddle.static.Program() - ): + with paddle.static.program_guard( + paddle.static.Program(), paddle.static.Program() + ): + for st in [-dim_size - 1, dim_size + 1, 0, None]: + for ed in [-dim_size - 1, dim_size + 1, 0, None]: + for step in [-dim_size - 1, dim_size + 1, 0]: + if step == 0: + continue + try: + np_res = np_data[:, :, st:ed:step, :] + except Exception as e: + # skip the invalid case use try-except strategy + continue pd_data = paddle.to_tensor(np_data) pd_res = _getitem_static( pd_data, @@ -1116,22 +1126,28 @@ def test_indexing_with_all_possible_start_end_step(self): ) (pd_res_out,) = self.exe.run(fetch_list=[pd_res]) - np.testing.assert_allclose(pd_res_out, np_res) + np.testing.assert_allclose( + pd_res_out, + np_res, + err_msg=f"Failed indexing test in case: x.shape={np_data.shape}, slice=({st},{ed},{step})", + ) def test_indexing_with_all_possible_start_end_step_0_size(self): - np_data = np.arange(0 * 4 * 5 * 6).reshape((0, 4, 5, 6)) + np_data = np.arange(0 * 4 * 3 * 2).reshape((0, 4, 3, 2)) dim_size = np_data.shape[3] - for st in range(-dim_size - 2, dim_size + 2): - for ed in range(-dim_size - 2, dim_size + 2): - for step in [-1, 1]: - try: - np_res = np_data[:, :, st:ed:step, :] - except Exception as e: - # skip the invalid case use try-except strategy - continue - with paddle.static.program_guard( - paddle.static.Program(), paddle.static.Program() - ): + with paddle.static.program_guard( + paddle.static.Program(), paddle.static.Program() + ): + for st in [-dim_size - 1, dim_size + 1, 0, None]: + for ed in [-dim_size - 1, dim_size + 1, 0, None]: + for step in [-dim_size - 1, dim_size + 1, 0]: + if step == 0: + continue + try: + np_res = np_data[:, :, st:ed:step, :] + except Exception as e: + # skip the invalid case use try-except strategy + continue pd_data = paddle.to_tensor(np_data) pd_res = _getitem_static( pd_data, @@ -1144,22 +1160,28 @@ def test_indexing_with_all_possible_start_end_step_0_size(self): ) (pd_res_out,) = self.exe.run(fetch_list=[pd_res]) - np.testing.assert_allclose(pd_res_out, np_res) + np.testing.assert_allclose( + pd_res_out, + np_res, + err_msg=f"Failed indexing test in case: x.shape={np_data.shape}, slice=({st},{ed},{step})", + ) def test_indexing_with_all_possible_start_end_step_0_size_self(self): - np_data = np.arange(3 * 4 * 0 * 6).reshape((3, 4, 0, 6)) + np_data = np.arange(5 * 4 * 0 * 2).reshape((5, 4, 0, 2)) dim_size = np_data.shape[3] - for st in range(-dim_size - 2, dim_size + 2): - for ed in range(-dim_size - 2, dim_size + 2): - for step in [-1, 1]: - try: - np_res = np_data[:, :, st:ed:step, :] - except Exception as e: - # skip the invalid case use try-except strategy - continue - with paddle.static.program_guard( - paddle.static.Program(), paddle.static.Program() - ): + with paddle.static.program_guard( + paddle.static.Program(), paddle.static.Program() + ): + for st in [-dim_size - 1, dim_size + 1, 0, None]: + for ed in [-dim_size - 1, dim_size + 1, 0, None]: + for step in [-dim_size - 1, dim_size + 1, 0]: + if step == 0: + continue + try: + np_res = np_data[:, :, st:ed:step, :] + except Exception as e: + # skip the invalid case use try-except strategy + continue pd_data = paddle.to_tensor(np_data) pd_res = _getitem_static( pd_data, @@ -1172,7 +1194,11 @@ def test_indexing_with_all_possible_start_end_step_0_size_self(self): ) (pd_res_out,) = self.exe.run(fetch_list=[pd_res]) - np.testing.assert_allclose(pd_res_out, np_res) + np.testing.assert_allclose( + pd_res_out, + np_res, + err_msg=f"Failed indexing test in case: x.shape={np_data.shape}, slice=({st},{ed},{step})", + ) def test_index_has_range(self): # only one bool tensor with all False diff --git a/test/legacy_test/test_multinomial_op.py b/test/legacy_test/test_multinomial_op.py index 50333f2a602c40..c863bffad3b763 100644 --- a/test/legacy_test/test_multinomial_op.py +++ b/test/legacy_test/test_multinomial_op.py @@ -384,11 +384,6 @@ def test_dim_less_than_1(): with self.assertRaises(ValueError): y = paddle.multinomial(paddle.to_tensor([1.0, 2.0, -3.0])) - with self.assertRaises(ValueError): - prob = paddle.rand([20, 1000]) - prob[1:0] = 0 - y = paddle.multinomial(prob) - class TestRandomValue(unittest.TestCase): def test_fixed_random_number(self): From 9fdce129631a908654ee8e03be33d22ee63fdf7d Mon Sep 17 00:00:00 2001 From: HydrogenSulfate <490868991@qq.com> Date: Fri, 29 Nov 2024 19:51:16 +0800 Subject: [PATCH 3/3] fix more bug --- python/paddle/base/variable_index.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/paddle/base/variable_index.py b/python/paddle/base/variable_index.py index 0345d7ea8a4762..cd06712477aa4f 100644 --- a/python/paddle/base/variable_index.py +++ b/python/paddle/base/variable_index.py @@ -344,7 +344,7 @@ def parse_index(x, indices): or isinstance(step, (paddle.base.Variable, paddle.pir.Value)) ): if x.shape[dim] != -1 and end >= x.shape[dim]: - end = MAX_INTEGER if step > 0 else MIN_INTEGER + end = MAX_INTEGER if step > 0 else x.shape[dim] estimated_dim += 1 dim += 1