Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
30 changes: 0 additions & 30 deletions backends/nxp/backend/ir/converter/conversion/common.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,8 +23,6 @@
transpose_conv_options,
)

from torch.fx import Node


def try_get_input(t_op: tflite_model.Operator, idx: int) -> tflite_model.Tensor | None:
"""Return the input tensors of 't_op' at index 'idx', or None if the operator doesn't have that input.
Expand Down Expand Up @@ -135,34 +133,6 @@ def uses_shape_broadcasting(t_op: tflite_model.Operator) -> bool:
)


def node_uses_shape_broadcasting(node: Node) -> bool:
"""Determine if given PyTorch fx Node uses shape broadcasting for it's input nodes or not.

:param node: PyTorch fx Node with 'all_input_nodes' initialized.
:return: True, if the node uses shape broadcasting for it's input nodes.
False otherwise.
"""

if node.all_input_nodes is None:
logger.e(
logger.Code.INTERNAL_ERROR,
"common.node_uses_shape_broadcasting(): 'all_input_nodes' are None!",
)

if len(node.all_input_nodes) == 0:
logger.e(
logger.Code.INTERNAL_ERROR,
"common.node_uses_shape_broadcasting(): Operator has no inputs!",
)

first_input_shape = node.all_input_nodes[0].meta["val"].shape

return any(
input_tensor.meta["val"].shape != first_input_shape
for input_tensor in node.all_input_nodes[1:]
)


class OpsList:
"""
Holder of TFLite operator (middle_op) that can be prefixed (pre_ops) of suffixed (post_ops)
Expand Down
57 changes: 57 additions & 0 deletions backends/nxp/backend/ir/converter/node_converter.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@
input_quantization_type,
output_quantization_type,
)
from executorch.backends.nxp.backend.ir import logger as logger
from executorch.backends.nxp.backend.ir.conversion_context import ConversionContext
from executorch.backends.nxp.backend.ir.converter.builder.aten_model_builder_director import (
AtenModelBuilderDirector,
Expand Down Expand Up @@ -377,3 +378,59 @@ def uses_quantization_type_for_io(
) and NodeConverter.uses_quantization_type_for_outputs(
node, supported_types, output_indices
)

@staticmethod
def uses_shape_broadcasting(node: Node) -> bool:
"""Determine if given PyTorch fx Node uses shape broadcasting for it's input nodes or not.

:param node: PyTorch fx Node with 'all_input_nodes' initialized.
:return: True, if the node uses shape broadcasting for it's input nodes.
False otherwise.
"""

if node.all_input_nodes is None:
logger.e(
logger.Code.INTERNAL_ERROR,
"common.node_uses_shape_broadcasting(): 'all_input_nodes' are None!",
Comment thread
roman-janik-nxp marked this conversation as resolved.
)

if len(node.all_input_nodes) == 0:
logger.e(
logger.Code.INTERNAL_ERROR,
"common.node_uses_shape_broadcasting(): Operator has no inputs!",
)

first_input_shape = node.all_input_nodes[0].meta["val"].shape

return any(
input_tensor.meta["val"].shape != first_input_shape
for input_tensor in node.all_input_nodes[1:]
)

@staticmethod
def uses_at_least_one_input_shape_broadcasting(node: Node) -> bool:
Comment thread
roman-janik-nxp marked this conversation as resolved.
"""Determine if given PyTorch fx Node uses at least one input shape broadcasting for it's input nodes or not.

:param node: PyTorch fx Node with 'all_input_nodes' initialized.
:return: True, if at least one input has the same shape as the output node.
False otherwise.
"""

if node.all_input_nodes is None:
logger.e(
logger.Code.INTERNAL_ERROR,
"common.node_uses_shape_broadcasting(): 'all_input_nodes' are None!",
)

if len(node.all_input_nodes) == 0:
logger.e(
logger.Code.INTERNAL_ERROR,
"common.node_uses_shape_broadcasting(): Operator has no inputs!",
)

output_shape = node.meta["val"].shape

return any(
input_tensor.meta["val"].shape == output_shape
for input_tensor in node.all_input_nodes
)
Original file line number Diff line number Diff line change
@@ -1,11 +1,8 @@
# Copyright 2025 NXP
# Copyright 2025-2026 NXP
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.

from executorch.backends.nxp.backend.ir.converter.conversion.common import (
node_uses_shape_broadcasting,
)
from executorch.backends.nxp.backend.ir.converter.node_converter import (
CustomDelegationOptions,
NodeConverter,
Expand All @@ -26,7 +23,7 @@ def _is_supported_on_target(
parameters_mapping: dict[str, Parameter],
custom_delegation_options: CustomDelegationOptions,
) -> bool:
if node_uses_shape_broadcasting(node):
if NodeConverter.uses_shape_broadcasting(node):
# Shape broadcasting may require the addition of `Transpose` ops during conversion.
return False

Expand Down
Original file line number Diff line number Diff line change
@@ -1,11 +1,10 @@
# Copyright 2025 NXP
# Copyright 2025-2026 NXP
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.

from executorch.backends.nxp.backend.ir.converter.conversion.common import (
node_uses_shape_broadcasting,
)
import torch

from executorch.backends.nxp.backend.ir.converter.node_converter import (
CustomDelegationOptions,
NodeConverter,
Expand All @@ -26,19 +25,31 @@ def _is_supported_on_target(
parameters_mapping: dict[str, Parameter],
custom_delegation_options: CustomDelegationOptions,
) -> bool:
if node_uses_shape_broadcasting(node):
# Shape broadcasting may require the addition of `Transpose` ops during conversion.
return False
if custom_delegation_options.use_new_flow_neutron_c:
if not NodeConverter.uses_at_least_one_input_shape_broadcasting(node):
return False

supported_types = [torch.int8, torch.uint8]
if not NodeConverter.uses_quantization_type_for_io(
node, supported_types, [0, 1], [0]
):
return False

return True
else:
if NodeConverter.uses_shape_broadcasting(node):
# Shape broadcasting may require the addition of `Transpose` ops during conversion.
return False

node_shape = node.meta["val"].shape
node_shape = node.meta["val"].shape

# Check that at least one dimension is divisible by number of MACS
# or all dimensions are equal to one
# Otherwise Neutron cannot convert it
dim_divisible = any(s % 8 == 0 for s in node_shape) or all(
s == 1 for s in node_shape
)
return dim_divisible
# Check that at least one dimension is divisible by number of MACS
# or all dimensions are equal to one
# Otherwise Neutron cannot convert it
dim_divisible = any(s % 8 == 0 for s in node_shape) or all(
s == 1 for s in node_shape
)
return dim_divisible

@staticmethod
def _is_supported_in_IR(
Expand All @@ -53,7 +64,10 @@ def _is_supported_in_IR(

# mul.Tensor Node format: (Tensor self, Tensor other, *)
def convert(self, node: Node):
"""Convert 'mul_tensor' operator to NeutronIR 'Mul'."""
"""Convert 'mul_tensor' operator to NeutronIR 'Mul'.
The ExecuTorch schema is:
mul.Tensor(Tensor self, Tensor other)
"""
self.assert_convertible(node)
t_op = self._create_tflite_op_with_io_tensors(node)
t_op.builtin_options = mul_options.Mul()
Expand Down
Original file line number Diff line number Diff line change
@@ -1,11 +1,8 @@
# Copyright 2025 NXP
# Copyright 2025-2026 NXP
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.

from executorch.backends.nxp.backend.ir.converter.conversion.common import (
node_uses_shape_broadcasting,
)
from executorch.backends.nxp.backend.ir.converter.node_converter import (
CustomDelegationOptions,
NodeConverter,
Expand All @@ -26,7 +23,7 @@ def _is_supported_on_target(
parameters_mapping: dict[str, Parameter],
custom_delegation_options: CustomDelegationOptions,
) -> bool:
if node_uses_shape_broadcasting(node):
if NodeConverter.uses_shape_broadcasting(node):
# Shape broadcasting may require the addition of `Transpose` ops during conversion.
return False

Expand Down
2 changes: 1 addition & 1 deletion backends/nxp/quantizer/patterns.py
Original file line number Diff line number Diff line change
Expand Up @@ -830,7 +830,7 @@ class MulTensorPattern(QuantizationPattern):
Basic quantization for all inputs and output.
"""

def partition_types(self) -> list[torch.nn.Module]:
def partition_types(self) -> list[OpOverload]:
return [torch.ops.aten.mul.Tensor]

def get_anchors(
Expand Down
Loading
Loading