Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 3 additions & 1 deletion backends/nxp/backend/edge_helper.py
Original file line number Diff line number Diff line change
Expand Up @@ -441,8 +441,10 @@ def output_quantization_type(
│ <returned type>
"""
users = list(node.users)
if len(users) == 1:
if output_index is None:
# Basic QDQ case (without getitem nodes).
if not _is_quantize(quantize_node := users[0]):
# Broken QDQ schema.
Comment on lines 446 to +447
return None

else: # Multiple users
Expand Down
Original file line number Diff line number Diff line change
@@ -1,11 +1,14 @@
# Copyright 2025 NXP
# Copyright 2025-2026 NXP
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.


import torch

from executorch.backends.nxp.backend.ir.converter.node_converter import (
CustomDelegationOptions,
NeutronTargetSpec,
NodeConverter,
)
from executorch.backends.nxp.backend.ir.tflite_generator.builtin_options import (
Expand All @@ -25,6 +28,25 @@ def _is_supported_in_IR(
) -> bool:
return True

@staticmethod
def _is_supported_on_target(
node: Node,
neutron_target_spec: NeutronTargetSpec,
parameters_mapping: dict[str, Parameter],
custom_delegation_options: CustomDelegationOptions,
) -> bool:

if custom_delegation_options.use_new_flow_neutron_c:
# Requirements specified by the new Neutron flow documentation.

supported_types = [torch.int8, torch.uint8]
if not NodeConverter.uses_quantization_type_for_io(
node, supported_types, [0], None
):
return False

return True
Comment thread
novak-vaclav marked this conversation as resolved.

def convert(self, node: Node):
"""Convert 'aten::abs' operator to TFLite 'Abs'."""
self.assert_convertible(node)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@
import operator

import numpy as np
import torch

from executorch.backends.nxp.backend.edge_helper import try_get_arg
from executorch.backends.nxp.backend.ir.converter.conversion import (
Expand Down Expand Up @@ -73,32 +74,54 @@ def _is_supported_on_target(
MaxPool2DWithIndicesConverter._get_node_args(node)
)

output_shape = node.meta["val"][0].shape # Shape of the main output (index 0)
if output_shape[0] != 1:
# /neutron-converter/src/OperatorC/MaxPoolPlugin.cpp?at=NEUTRON_SOFTWARE_2.2.2#106
return False

# Neutron only has a restriction on `stride_h`. `stride_w` is not restricted.
stride_h = stride[0]
if stride_h not in (1, 2):
# /neutron-library/src/utils/NeutronLibraryInterrogation.cpp?at=refs%2Ftags%2FNEUTRON_SOFTWARE_2.2.2#901
# /neutron-library/src/utils/NeutronLibraryInterrogation.cpp?at=refs%2Ftags%2FNEUTRON_SOFTWARE_2.2.2#923
return False

channels = output_shape[1]
if channels % neutron_target_spec.get_num_macs() != 0:
# /neutron-library/src/utils/NeutronLibraryInterrogation.cpp?at=refs%2Ftags%2FNEUTRON_SOFTWARE_2.2.2#903
# /neutron-library/src/utils/NeutronLibraryInterrogation.cpp?at=refs%2Ftags%2FNEUTRON_SOFTWARE_2.2.2#925
return False

if any(pad > kernel_dim for pad, kernel_dim in zip(padding, kernel_size)):
# /neutron-library/src/utils/NeutronLibraryInterrogation.cpp?at=refs%2Ftags%2FNEUTRON_SOFTWARE_2.2.2#904-907
# /neutron-library/src/utils/NeutronLibraryInterrogation.cpp?at=refs%2Ftags%2FNEUTRON_SOFTWARE_2.2.2#926-929

# Cannot be tested as PyTorch crashes in this case. It requires the padding to be at most half of the
# effective kernel size, which is an even stricter requirement than what Neutron imposes.
# https://github.com/pytorch/pytorch/blob/449b1768410104d3ed79d3bcfe4ba1d65c7f22c0/torch/_meta_registrations.py#L4483-L4489
return False
if custom_delegation_options.use_new_flow_neutron_c:
# Requirements specified by the new Neutron flow documentation.

supported_types = [torch.int8, torch.uint8]
if not NodeConverter.uses_quantization_type_for_io(
node, supported_types, [0], [0]
):
return False

maximum_supported_kernel_size = 4096
# If there is no padding, Neutron allows maximum stride of 4096. Otherwise, it's 32. But the converter
# always inserts a `Pad` operator to add the padding, so the `MaxPool` never pads it's input itself, so
# 4096 is always the limit. And similarly, the `MaxPool` input padding limitation does not apply either.
maximum_supported_stride = 4096

if any(k > maximum_supported_kernel_size for k in kernel_size):
return False
if any(s > maximum_supported_stride for s in stride):
return False

else:
# Shape of the main output (index 0)
output_shape = node.meta["val"][0].shape
if output_shape[0] != 1:
# /neutron-converter/src/OperatorC/MaxPoolPlugin.cpp?at=NEUTRON_SOFTWARE_2.2.2#106
return False

# Neutron only has a restriction on `stride_h`. `stride_w` is not restricted.
stride_h = stride[0]
if stride_h not in (1, 2):
# /neutron-library/src/utils/NeutronLibraryInterrogation.cpp?at=refs%2Ftags%2FNEUTRON_SOFTWARE_2.2.2#901
# /neutron-library/src/utils/NeutronLibraryInterrogation.cpp?at=refs%2Ftags%2FNEUTRON_SOFTWARE_2.2.2#923
return False

channels = output_shape[1]
if channels % neutron_target_spec.get_num_macs() != 0:
# /neutron-library/src/utils/NeutronLibraryInterrogation.cpp?at=refs%2Ftags%2FNEUTRON_SOFTWARE_2.2.2#903
# /neutron-library/src/utils/NeutronLibraryInterrogation.cpp?at=refs%2Ftags%2FNEUTRON_SOFTWARE_2.2.2#925
return False

if any(pad > kernel_dim for pad, kernel_dim in zip(padding, kernel_size)):
# /neutron-library/src/utils/NeutronLibraryInterrogation.cpp?at=refs%2Ftags%2FNEUTRON_SOFTWARE_2.2.2#904-907
# /neutron-library/src/utils/NeutronLibraryInterrogation.cpp?at=refs%2Ftags%2FNEUTRON_SOFTWARE_2.2.2#926-929

# Cannot be tested as PyTorch crashes in this case. It requires the padding to be at most half of the
# effective kernel size, which is an even stricter requirement than what Neutron imposes.
# https://github.com/pytorch/pytorch/blob/449b1768410104d3ed79d3bcfe4ba1d65c7f22c0/torch/_meta_registrations.py#L4483-L4489
return False

return True

Expand Down
12 changes: 8 additions & 4 deletions backends/nxp/tests/dataset_creator.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,8 +45,10 @@ def generate_samples(
class RandomDatasetCreator(DatasetCreator):
"""Dataset creator that generates random input samples."""

def __init__(self, num_samples=2):
def __init__(self, num_samples=2, low=0.0, high=1.0):
self._num_samples = num_samples
self.low = low
self.high = high

def generate_samples(
self, dataset_dir: str, input_spec: list[ModelInputSpec]
Expand Down Expand Up @@ -103,9 +105,11 @@ def _gen_samples(
case _:
raise ValueError(f"Unsupported dim_order: {spec.dim_order}")

sample_vector = rng.random(
np.prod(shape), torch_type_to_numpy_type(spec.dtype)
).reshape(shape)
sample_vector = (
rng.uniform(self.low, self.high, size=np.prod(shape))
.astype(torch_type_to_numpy_type(spec.dtype))
.reshape(shape)
)
file_name = (
f"{str(spec_idx).zfill(2)}.bin"
if len(input_spec) > 1
Expand Down
Loading
Loading