diff --git a/src/frontends/pytorch/src/op_table.cpp b/src/frontends/pytorch/src/op_table.cpp index 2f72ba91eb651c..a6b342f9560b5a 100644 --- a/src/frontends/pytorch/src/op_table.cpp +++ b/src/frontends/pytorch/src/op_table.cpp @@ -360,7 +360,6 @@ OP_CONVERTER(translate_embedding_ext); OP_CONVERTER(translate_linear_awq); OP_CONVERTER(translate_linear_bitnet); OP_CONVERTER(translate_linear_ext); - } // namespace op // Supported ops for TorchScript @@ -795,6 +794,7 @@ const std::unordered_map get_supported_ops_ts() { {"prim::TupleIndex", op::translate_tuple_index}, // prim::TupleUnpack - Supported in limited set of patterns {"prim::type", op::skip_node}, // Used with prim::device, pass PtFrameworkNode. + {"prim::data", op::skip_node}, {"quantized::add", op::translate_quantized_add}, {"quantized::add_relu", op::translate_quantized_add_relu}, {"quantized::cat", op::translate_quantized_cat}, diff --git a/tests/layer_tests/pytorch_tests/test_data.py b/tests/layer_tests/pytorch_tests/test_data.py new file mode 100644 index 00000000000000..afd65cef57688d --- /dev/null +++ b/tests/layer_tests/pytorch_tests/test_data.py @@ -0,0 +1,71 @@ +# Copyright (C) 2018-2025 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import numpy as np +import pytest +import torch + +from pytorch_layer_test_class import PytorchLayerTest + + +class Model(torch.nn.Module): + def forward(self, x): + return x.data + + +class ModelGrad(torch.nn.Module): + def forward(self, x): + y = x * 2.5 + return y.data + + +class TestPrimData(PytorchLayerTest): + def _prepare_input(self): + np.random.seed(self.seed) + if self.dtype in (torch.complex64, torch.complex128): + real = (np.random.randn(*self.shape) * 10).astype(np.float32) + imag = (np.random.randn(*self.shape) * 10).astype(np.float32) + data = real + 1j * imag + data = data.astype(np.complex128 if self.dtype == torch.complex128 else np.complex64) + else: + data = (np.random.randn(*self.shape) * 10).astype(np.float32) + tensor = torch.from_numpy(data).to(self.dtype) + return (tensor.numpy(),) + + @pytest.mark.parametrize("dtype", [torch.float32, torch.float64, torch.int32, torch.int64]) + @pytest.mark.parametrize("shape", [[2, 3, 4], [1, 5], [10]]) + def test_data_basic(self, shape, dtype, ie_device, precision, ir_version): + self.shape = shape + self.dtype = dtype + self.seed = 0 + self._test(Model(), None, "prim::data", ie_device, precision, ir_version) + + @pytest.mark.parametrize("dtype", [torch.float32, torch.int32]) + def test_data_requires_grad(self, dtype, ie_device, precision, ir_version): + self.shape = (3, 2) + self.dtype = dtype + self.seed = 1 + self._test(ModelGrad(), None, "prim::data", ie_device, precision, ir_version) + + @pytest.mark.parametrize("dtype", [torch.complex64, torch.complex128]) + @pytest.mark.parametrize("shape", [[2, 3], [1, 5], [4]]) + @pytest.mark.xfail( + reason="OpenVINO frontend does not yet support complex tensor inputs", + raises=AssertionError, + ) + def test_data_complex(self, shape, dtype, ie_device, precision, ir_version): + self.shape = shape + self.dtype = dtype + self.seed = 2 + self._test(Model(), None, "prim::data", ie_device, precision, ir_version) + + @pytest.mark.parametrize("dtype", [torch.complex64, torch.complex128]) + @pytest.mark.xfail( + reason="OpenVINO frontend does not yet support complex tensor inputs", + raises=AssertionError, + ) + def test_data_complex_requires_grad(self, dtype, ie_device, precision, ir_version): + self.shape = (2, 3) + self.dtype = dtype + self.seed = 3 + self._test(ModelGrad(), None, "prim::data", ie_device, precision, ir_version)