diff --git a/LICENSE b/LICENSE index 6c1f7a760a7..fdd029058a6 100644 --- a/LICENSE +++ b/LICENSE @@ -7,6 +7,7 @@ Copyright 2023 Arm Limited and/or its affiliates. Copyright (c) Qualcomm Innovation Center, Inc. Copyright (c) 2023 Apple Inc. Copyright (c) 2024 MediaTek Inc. +Copyright 2023 NXP Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: diff --git a/backends/arm/_passes/arm_pass_manager.py b/backends/arm/_passes/arm_pass_manager.py index dd9f8b7bed0..6e87a0ae006 100644 --- a/backends/arm/_passes/arm_pass_manager.py +++ b/backends/arm/_passes/arm_pass_manager.py @@ -57,7 +57,7 @@ from executorch.backends.arm.tosa_specification import Tosa_0_80, TosaSpecification from executorch.backends.transforms.fuse_view_copy import FuseViewCopyTransform -from executorch.backends.xnnpack._passes.remove_getitem_op import RemoveGetItemPass +from executorch.backends.transforms.remove_getitem_op import RemoveGetItemPass from executorch.exir import ExportedProgram from executorch.exir.pass_manager import PassManager from torch.fx import GraphModule diff --git a/backends/nxp/README.md b/backends/nxp/README.md index 103bcf52f75..8ed09d12eea 100644 --- a/backends/nxp/README.md +++ b/backends/nxp/README.md @@ -27,14 +27,45 @@ In the future the NXP eIQ Neutron Backend will be extended to support [i.MX 9 Ap with eIQ Neutron NPU, like the [i.MX 95](https://www.nxp.com/products/iMX95). -## Layout -TBD - ## Backend Status and Maturity **Current Status:** Prototype Quality -The eIQ Neutron NPU Backend should be considered as prototype quality at this moment. Subject to significant changes and -improvements. NXP and the ExecuTorch community is actively developing this codebase. +The eIQ Neutron NPU Backend should be considered as prototype quality at this moment. Subject to significant changes and +improvements. NXP and the ExecuTorch community is actively developing this codebase. + +## Neutron Backend implementation and SW architecture +Neutron Backend uses the eIQ Neutron Converter as ML compiler to compile the delegated subgraph to Neutron microcode. +The Neutron Converter accepts the ML model in LiteRT format, for the **eIQ Neutron N3** class therefore the Neutron Backend uses the LiteRT flatbuffers format as IR between the ExecuTorch and Neutron Converter ML compiler. + +The Neutron Backend in its early prototype phase, is based on existing NXP products, such as +onnx2tflite, known from the NXP's eIQ Toolkit. +The **onnx2tflite** is a converter from the ONNX format to LiteRT (formerly known as TFLite). +It consists of 3 stages: +* ONNX Model Parsing +* Tensor Format Inference, to identify tensors using channel-first layer +* ONNX to LiteRT Conversion +* Optimization Passes, which operate on top of the LiteRT format +* LiteRT Serialization + +As of the analogy of ONNX to LiteRT and Edge to LiteRT conversion, the Neutron Backend's +currently leverages the Tensor format Inference and LiteRT Optimizer. +This shall be considered as temporal solution, intended to be replaced with: +* Dim Order (https://github.com/pytorch/executorch/issues/4873) +* Corresponding ExecuTorch/ATen passes + +before reaching higher maturity status by the end of 2025. + +## Layout +The current code base is as follows: +* `backend/ir/` - TFLite/LiteRT based IR to represent the Edge Subgraph, taken from onnx2tflite code base and extended to + support Edge Dialect to LiteRT conversion. + * `backend/ir/converter` - Neutron Backends conversion from Edge (ATen) Dialect to LiteRT, TFLite. The subfolder + `node_conveters` is structured as single module for each Edge operator. + * `backend/ir/lib` - automatically generated handlers from LiteRT flatbuffers schema + * `backend/ir/tflite_generator` and `backend/ir/tflite_optimizer` handle the serialization + of the in-memory built subgraph for delegation into LiteRT/TFLite flatbuffers + representation. Code taken from the onnx2tflite tool. +* `quantizer` - Neutron Backends quantizer implementation. ## Help & Improvements If you have problems or questions or have suggestions for ways to make diff --git a/backends/nxp/backend/edge_helper.py b/backends/nxp/backend/edge_helper.py new file mode 100644 index 00000000000..9b584d5166b --- /dev/null +++ b/backends/nxp/backend/edge_helper.py @@ -0,0 +1,40 @@ +# Copyright 2024 NXP +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +import torch +from torch.fx import Node + + +def input_tensor(node: Node, input_index: int) -> torch.Tensor: + if len(node.all_input_nodes) <= input_index: + raise IndexError + + return node.all_input_nodes[input_index].meta["val"] + + +def output_tensor(node: Node) -> torch.Tensor: + return node.meta["val"] + + +def tensor_rank(tensor: torch.Tensor) -> int: + return len(tensor.size()) + + +def input_rank(node: Node, input_index: int) -> int: + return tensor_rank(input_tensor(node, input_index)) + + +def input_tensor_safe(node: Node, input_index: int) -> torch.Tensor | None: + """Return the input tensor of 'node' at index 'input_index', or None if the node doesn't have that input. + + :param node: Edge node to get the input tensor from. + :param input_index: Index of the input tensor to get. + :return: The input tensor at index 'input_index', or None. + """ + + if len(node.all_input_nodes) <= input_index: + return None + + return input_tensor(node, input_index) diff --git a/backends/nxp/backend/edge_program_converter.py b/backends/nxp/backend/edge_program_converter.py new file mode 100644 index 00000000000..488703db120 --- /dev/null +++ b/backends/nxp/backend/edge_program_converter.py @@ -0,0 +1,194 @@ +# Copyright 2024 NXP +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +import executorch.backends.nxp.backend.ir.logger as logger +import flatbuffers +from executorch.backends.nxp.backend.ir.conversion_config import ConversionConfig +from executorch.backends.nxp.backend.ir.conversion_context import ConversionContext +from executorch.backends.nxp.backend.ir.converter.builder.aten_model_builder_director import ( + AtenModelBuilderDirector, +) +from torch.export import ExportedProgram +from torch.export.graph_signature import InputKind +from torch.fx import Node +from torch.nn.parameter import Parameter +from executorch.backends.nxp.backend.ir.converter.node_converters.ops_converters import * # noqa F403 +from executorch.backends.nxp.backend.node_format_inference import ( + NodeFormat, + NodeFormatInference, +) +from executorch.exir.dialects._ops import ops as exir_ops + +# noinspection PyProtectedMember +functions_converters = { + exir_ops.edge.aten.addmm.default: AddMMConverter, # noqa F405 + exir_ops.edge.aten.avg_pool2d.default: AvgPool2dConverter, # noqa F405 + exir_ops.edge.aten.constant_pad_nd.default: ConstantPadNDConverter, # noqa F405 + exir_ops.edge.aten.convolution.default: ConvolutionConverter, # noqa F405 + exir_ops.edge.aten.max_pool2d.default: MaxPool2dConverter, # noqa F405 + exir_ops.edge.aten.mm.default: MMConverter, # noqa F405 + exir_ops.edge.aten.permute_copy.default: PermuteCopyConverter, # noqa F405 + exir_ops.edge.aten.relu.default: ReLUConverter, # noqa F405 + exir_ops.edge.aten._softmax.default: SoftmaxConverter, # noqa F405 + exir_ops.edge.aten.view_copy.default: ViewCopyConverter, # noqa F405 +} + + +class EdgeProgramToIRConverter: + """ + Converter from convertion of ExportedProgram in Edge dialect to IR (TFLite Flatbuffers). + """ + + _default_conversion_config = ConversionConfig() + + def convert_program( + self, + edge_program: ExportedProgram, + conversion_config=_default_conversion_config, + ) -> (bytes, dict): + """ + Convert ExportedProgram in Edge dialect to IR (TFLite flatbuffers) as bytes. + + :param edge_program: Converter ExportedProgram. + :param conversion_config: ConversionConfig instance. + :return: TFLite flatbuffers as bytes. + """ + node_formats = NodeFormatInference(edge_program).identify_node_formats() + parameters_mapping = self.map_inputs_to_parameters(edge_program) + + cc = self.build_conversion_context( + parameters_mapping, node_formats, conversion_config + ) + + # Program conversion + self.append_placeholders_and_tensors(edge_program.graph.nodes, cc) + self._convert_qdq_cluster_q_dq_nodes(edge_program.graph.nodes, cc) + self._process_nodes(edge_program.graph.nodes, cc) + + # Assign output + io_formats = cc.tflite_builder.assign_model_io_to_subgraph_and_get_io_formats( + edge_program.graph_signature + ) + + # TFLite model generation + internal_tflite_model = cc.tflite_builder.finish() + flatbuffers_builder = flatbuffers.Builder() + internal_tflite_model.gen_tflite(flatbuffers_builder) + + return bytes(flatbuffers_builder.Output()), io_formats + + @staticmethod + def append_placeholders_and_tensors(nodes: list[Node], context: ConversionContext): + for node in nodes: + if node.op == "placeholder": + node_format = context.node_formats[node] + + if node.name in context.parameters_mapping: + # Node is placeholder and has data -> append as static tensor with data + tensor = context.parameters_mapping[node.name] + context.tflite_builder.append_as_static_tensor( + node, node_format, tensor + ) + else: + # Node is placeholder and doesn't have data (user input) -> append as fake tensor + context.tflite_builder.append_as_fake_tensor(node, node_format) + elif node.op == "call_function": + # Node is call function -> append only output as a tensor + node_format = context.node_formats[node] + context.tflite_builder.append_as_fake_tensor(node, node_format) + elif node.op == "output": + # Nothing to do + pass + else: + logger.e( + logger.Code.INTERNAL_ERROR, f"Unexpected node op type: '{node.op}'!" + ) + + def _process_nodes(self, nodes: list[Node], conversion_context: ConversionContext): + """ + Go through program nodes and append their TFLite siblings into ModelBuilder. + + :param nodes: Program's nodes. + :param conversion_context: ConversionContext instance. + """ + + qdq_related_functions = [ + exir_ops.edge.quantized_decomposed.dequantize_per_tensor.default, + exir_ops.edge.quantized_decomposed.quantize_per_tensor.default, + ] + + for node in nodes: + if node.op == "call_function": + if node.target in qdq_related_functions and "cluster" in node.meta: + # Skip (De)Quantize nodes that were already processed + pass + elif node.target in functions_converters: + functions_converters[node.target](conversion_context).convert(node) + else: + logger.e( + logger.Code.NOT_IMPLEMENTED, + f"Converter for '{node.target.__name__}' not implemented!", + ) + + @staticmethod + def map_inputs_to_parameters(edge_program: ExportedProgram) -> dict[str, Parameter]: + """ + Create mapping between program parameters (input nodes & static data nodes) and their names. + + :param edge_program: EdgeProgram instance. + :return: Mapping from parameter name to parameter instance. + """ + result_map = {} + + for input_spec in edge_program.graph_signature.input_specs: + if input_spec.kind in [InputKind.PARAMETER, InputKind.BUFFER]: + result_map[input_spec.arg.name] = edge_program.state_dict[ + input_spec.target + ] + + return result_map + + @staticmethod + def build_conversion_context( + parameters_mapping: dict, + node_formats: dict[Node, NodeFormat], + conversion_config: ConversionConfig = _default_conversion_config, + ) -> ConversionContext: + tflite_builder = AtenModelBuilderDirector( + 3, "TFLite from EdgeProgram", conversion_config + ) + + # Add "sentinel" buffer (defined in schema.fbs) + tflite_builder.build_empty_buffer() + + context = ConversionContext( + tflite_builder, conversion_config, parameters_mapping, node_formats + ) + + return context + + def _convert_qdq_cluster_q_dq_nodes( + self, nodes: list[Node], conversion_context: ConversionContext + ): + """ + Go through program and convert De(Quantize) nodes that are part of the QDQ cluster into + tensors. + + :param nodes: Program's nodes. + :param conversion_context: ConversionContext instance. + """ + qdq_q_ops_converters = { + exir_ops.edge.quantized_decomposed.dequantize_per_tensor.default: QDQDequantizeConverter, # noqa F405 + exir_ops.edge.quantized_decomposed.quantize_per_tensor.default: QDQQuantizeConverter, # noqa F405 + } + + for node in nodes: + part_of_qdq_cluster = "cluster" in node.meta + if ( + node.op == "call_function" + and node.target in qdq_q_ops_converters + and part_of_qdq_cluster + ): + qdq_q_ops_converters[node.target](conversion_context).convert(node) diff --git a/backends/nxp/backend/ir/conversion_config.py b/backends/nxp/backend/ir/conversion_config.py new file mode 100644 index 00000000000..4ac88eb467c --- /dev/null +++ b/backends/nxp/backend/ir/conversion_config.py @@ -0,0 +1,64 @@ +# Copyright 2024 NXP +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + + +class ConversionConfig: + + def __init__(self, args: dict | None = None): + """ + Conversion configuration passed through command line arguments or gathered during + the conversion process. + + :param args: Optional dictionary with conversion arguments. Unknown arguments are ignored. + """ + self.keep_io_format: bool = False + self.skip_shape_inference: bool = False + self.allow_inputs_stripping: bool = True + self.qdq_aware_conversion: bool = True + self.symbolic_dimensions_mapping: dict[str, int] | None = None + self.input_shapes_mapping: dict[str, tuple] | None = None + self.dont_skip_nodes_with_known_outputs: bool = False + self.allow_select_ops: bool = True + self.generate_artifacts_after_failed_shape_inference: bool = True + + self.optimization_whitelist: list | None = None + self.optimization_blacklist: list | None = None + + self.non_negative_indices: bool = False + self.cast_int64_to_int32: bool = False + self.accept_resize_rounding_error: bool = False + self.ignore_opset_version: bool = False + + self.tflite_quantization_integrity_check: bool = True + + if args is not None: + for key, value in args.items(): + if key in self.__dict__: + setattr(self, key, value) + + def __repr__(self): + attrs = [] + for attr in self.__dict__: + attrs.append(f"{attr}={getattr(self, attr)}") + + return "ConversionConfig[" + ", ".join(attrs) + "]" + + +class SkipShapeInferenceConfig(ConversionConfig): + + def __init__(self): + """ + Conversion config shortcut with disabled shape inference. + """ + super().__init__({"skip_shape_inference": True}) + + +class QDQAwareConfig(ConversionConfig): + + def __init__(self): + """ + Conversion config shortcut with QDQ aware conversion enabled. + """ + super().__init__({"qdq_aware_conversion": True}) diff --git a/backends/nxp/backend/ir/conversion_context.py b/backends/nxp/backend/ir/conversion_context.py new file mode 100644 index 00000000000..6ec80f02a66 --- /dev/null +++ b/backends/nxp/backend/ir/conversion_context.py @@ -0,0 +1,37 @@ +# Copyright 2024 NXP +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +from executorch.backends.nxp.backend.ir.conversion_config import ConversionConfig +from executorch.backends.nxp.backend.ir.converter.builder.aten_model_builder_director import ( + AtenModelBuilderDirector, +) +from executorch.backends.nxp.backend.node_format_inference import NodeFormat +from torch import Node +from torch.nn import Parameter + + +class ConversionContext: + tflite_builder: AtenModelBuilderDirector + conversion_config: ConversionConfig + parameters_mapping: dict[str, Parameter] + node_formats: dict[Node, NodeFormat] + + def __init__( + self, + tflite_builder: AtenModelBuilderDirector, + conversion_config: ConversionConfig, + parameters_mapping: dict, + node_formats: dict[Node, NodeFormat], + ): + """ + Context with data related to current conversion. + + :param tflite_builder: TFLite model builder. + :param conversion_config: Conversion configuration flags and metadata. + """ + self.tflite_builder = tflite_builder + self.conversion_config = conversion_config + self.parameters_mapping = parameters_mapping + self.node_formats = node_formats diff --git a/backends/nxp/backend/ir/converter/__init__.py b/backends/nxp/backend/ir/converter/__init__.py new file mode 100755 index 00000000000..e69de29bb2d diff --git a/backends/nxp/backend/ir/converter/builder/__init__.py b/backends/nxp/backend/ir/converter/builder/__init__.py new file mode 100755 index 00000000000..e69de29bb2d diff --git a/backends/nxp/backend/ir/converter/builder/aten_model_builder_director.py b/backends/nxp/backend/ir/converter/builder/aten_model_builder_director.py new file mode 100644 index 00000000000..a420cea9aa7 --- /dev/null +++ b/backends/nxp/backend/ir/converter/builder/aten_model_builder_director.py @@ -0,0 +1,126 @@ +# Copyright 2024 NXP +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +from executorch.backends.nxp.backend.ir.converter.builder.model_builder import ( + ModelBuilder, +) +from executorch.backends.nxp.backend.ir.converter.conversion import translator +from executorch.backends.nxp.backend.ir.tensor_formatting import TensorFormat +from executorch.backends.nxp.backend.ir.tflite_generator import tflite_model +from executorch.backends.nxp.backend.node_format_inference import NodeFormat +from torch.fx import Node +from torch.nn import Parameter + + +class AtenModelBuilderDirector(ModelBuilder): + """ + ModelBuilder's extension that simplifies some actions during build process. It also + contains methods related to Edge program nodes conversion. + """ + + def append_as_fake_tensor(self, node: Node, node_format: NodeFormat): + """ + Append node into ModelBuilder as tensor without data (FakeTensor). Can be used + for activations and output tensors. + + :param node: Node instance. + :param node_format: NodeFormat definition. + """ + if self.tensor_exists(node.name): + return + + tensor = node.meta["val"] + if isinstance(tensor, tuple): + tensor = tensor[0] # Fake tensor + _type = translator.convert_data_type(tensor.dtype) + shape = list(tensor.shape) + + if node_format.is_channels_first(): + shape = translator.dims_to_channels_last(shape) + + tensor = self.create_empty_tensor(node.name, _type, shape) + tensor.tensor_format = TensorFormat.from_node_format(node_format) + + def append_as_static_tensor( + self, node: Node, node_format: NodeFormat, tensor: Parameter + ): + """ + Append node into ModelBuilder as tensor with data (static). Can be used for weights, + permutations etc. + + :param node: Node instance. + :param node_format: NodeFormat definition. + :param tensor: Torch Tensor (Parameter) that holds tensor data. + """ + assert not self.tensor_exists(node.name), f"Tensor '{node.name}' already added!" + + if self.tensor_exists(node.name): + return + + data = tensor.data.numpy() + + if node_format.is_channels_first(): + data = translator.convert_data_to_channels_last(data) + + tensor = self.create_tensor_for_data(data, node.name) + tensor.tensor_format = TensorFormat.from_node_format(node_format) + + def append_operators(self, ops_to_add: list[tflite_model.Operator]): + """ + Append list of TFLite operators to created model via ModelBuilder. + + :param ops_to_add: List of operators to be added. + """ + for op in ops_to_add: + if op.builtin_options is not None: + op.opcode_index = self.op_code_index_for_op_type( + op.builtin_options.operator_type, op.tmp_version + ) + + elif op.custom_options is not None: + op.opcode_index = self.op_code_index_for_op_type( + op.custom_options.operator_type, + op.tmp_version, + op.custom_options.custom_code, + ) + + self.check_and_append_operator(op) + + def assign_model_io_to_subgraph_and_get_io_formats( + self, graph_signature + ) -> dict[str, dict]: + """ + Assign model's inputs/outputs to SubGraph. + + :param graph_signature: Instance of GraphSignature. + :returns: Mapping between IO tensors' names and their formats. + """ + io_formats = { + "inputs": {}, + "outputs": {}, + } + + self.get_sub_graph().inputs = tflite_model.SubGraphInputs() + for input_name in graph_signature.user_inputs: + tensor = self.tensor_for_name(input_name) + assert input_name == tensor.name, ( + "Program's input name doesn't match with tensor name in TFLite. " + "Input was probably redirected." + ) + self.get_sub_graph().inputs.tmp_inputs.append(tensor) + io_formats["inputs"][tensor.name] = tensor.tensor_format + + self.get_sub_graph().outputs = tflite_model.SubGraphOutputs() + for output_name in graph_signature.user_outputs: + tensor = self.tensor_for_name(output_name) + assert output_name == tensor.name, ( + "Program's output name doesn't match with tensor name in TFLite. " + "Output was probably redirected." + ) + self.get_sub_graph().outputs.tmp_outputs.append(tensor) + + io_formats["outputs"][tensor.name] = tensor.tensor_format + + return io_formats diff --git a/backends/nxp/backend/ir/converter/builder/model_builder.py b/backends/nxp/backend/ir/converter/builder/model_builder.py new file mode 100755 index 00000000000..1ca46237814 --- /dev/null +++ b/backends/nxp/backend/ir/converter/builder/model_builder.py @@ -0,0 +1,1648 @@ +# +# Copyright 2023 Martin Pavella +# Copyright 2023-2024 NXP +# +# License: MIT +# See the LICENSE_MIT for more details. +# +from copy import deepcopy +from typing import Dict, List, Optional, Union + +import executorch.backends.nxp.backend.ir.converter.conversion.translator as translator +import executorch.backends.nxp.backend.ir.logger as logger +import executorch.backends.nxp.backend.ir.tflite_generator.tflite_model as tflite_model + +import numpy as np +from executorch.backends.nxp.backend.ir.conversion_config import ConversionConfig +from executorch.backends.nxp.backend.ir.converter.builder import ( + quantization_verification, +) +from executorch.backends.nxp.backend.ir.converter.conversion.common import ( + uses_shape_broadcasting, +) +from executorch.backends.nxp.backend.ir.converter.quantization_utils import ( + propagate_quantization, +) +from executorch.backends.nxp.backend.ir.converter.tensor_utils import ( + _buffer_has_data, + all_tensors_are_static, + tensor_has_data, +) +from executorch.backends.nxp.backend.ir.lib.tflite.BuiltinOperator import ( + BuiltinOperator, +) +from executorch.backends.nxp.backend.ir.lib.tflite.TensorType import TensorType +from executorch.backends.nxp.backend.ir.tensor_formatting import TensorFormat +from executorch.backends.nxp.backend.ir.tflite_generator.builtin_options import ( + cast_options, + dequantize_options, + gather_options, + pad_options, + pad_v2_options, + quantize_options, + reshape_options, + slice_options, + transpose_options, +) +from executorch.backends.nxp.backend.ir.tflite_generator.custom_options.flex_transpose_options import ( + FlexTranspose, +) +from executorch.backends.nxp.backend.ir.tflite_optimizer import optimizer + + +class ModelBuilder: + """ + Class encapsulates a TFLite object model defined in '/src/tflite_generator/'. + Provides methods to create and modify the TFLite model. + At the end call 'finish()' to finalize and optimise the model. + """ + + _tfl_model: tflite_model.Model + + _tensor_name_map: Dict # Mapping 'str' to 'tflT.Tensor' + + # Maps BuiltinOperator to a Dict, mapping version to index. Operators of type 'BuiltinOperator.CUSTOM' + # have their 'version' prepended with its name, for example "FlexErf_1". + op_code_type_index_map: Dict[BuiltinOperator, Dict[Union[str, int], int]] + + _nchw_tensor_version: Dict # Mapping 'tflT.Tensor' to 'tflT.Tensor' which is + # equal, but in NCHW format + + _skipped_output_map: Dict # Mapping 'tflT.Tensor' objects that were outputs + # of skipped operators, to 'tflT.Tensor' outputs of + # previous operators + + _zeros_tensor_map: Dict # Mapping 'string' shapes to 'tflT.Tensor' objects + + _default_conversion_config = ConversionConfig() + + conversion_config: ConversionConfig + + def __init__( + self, + model_version: int, + model_description: str, + conversion_config: ConversionConfig = _default_conversion_config, + ) -> None: + self._tfl_model = tflite_model.Model(model_version, model_description) + self.conversion_config = conversion_config + + self.op_code_type_index_map = {} + self._tensor_name_map = {} + self._nchw_tensor_version = {} + self._skipped_output_map = {} + self._zeros_tensor_map = {} + + def create_zeros_tensor( + self, dims: List[int], name: str, dtype: np.dtype, can_reuse: bool = False + ) -> tflite_model.Tensor: + """Create and return a Tensor with given shape, name and dtype that only contains zeros. + If 'can_reuse' is True, created tensor can be shared with other operators. + """ + + def _dims_to_string(dims: List[int]): + """Convert a list of integers to a string.""" + tmp = [str(dim) for dim in dims] + return "_".join(tmp) + + if can_reuse: + # The zeros tensor can be shared with other operators + str_dims = _dims_to_string(dims) + tensor_as_string = str_dims + dtype.name + + # Check if such tensor already exists + if tensor_as_string in self._zeros_tensor_map.keys(): + logger.d( + f"REUSING zero tensor of size {str_dims} with type {dtype.name}." + ) + return self._zeros_tensor_map[tensor_as_string] + + else: + # Create a new one and register it for potential future use. + logger.d( + f"ADDING zero tensor of size {str_dims} with type {dtype.name}." + ) + data = np.zeros(dims, dtype) + new_tensor = self.create_tensor_for_data(data, name) + + self._zeros_tensor_map[tensor_as_string] = new_tensor + + return new_tensor + + # Tensor cannot be shared. Just create one and return it + data = np.zeros(dims, dtype) + + return self.create_tensor_for_data(data, name) + + def create_pad_operator_before( + self, + before_op: tflite_model.Operator, + on_input_index: int, + explicit_padding: List[List[int]], + constant_value: np.ndarray = None, + ) -> tflite_model.Operator: + """Create a TFLite 'Pad' operator before the 'before_op' operator. The input of 'before_op' on index + 'on_input_index' is where the 'Pad' operator will connect. + + :param before_op: TFLite operator that will consume the output of the new 'Pad' operator. + :param on_input_index: Index of an input tensor of the 'before_op' operator, which will serve as the new input + for the 'Pad' operator. + :param explicit_padding: TFLite style explicit padding compatible with the TFLite 'Pad' operator. + :param constant_value: The scalar array used as pad value. Must be same type as input tensor at + index 'on_input_index'. + :return: The TFLite 'Pad' operator. + """ + if on_input_index >= len(before_op.tmp_inputs): + logger.e( + logger.Code.INTERNAL_ERROR, + f"ModelBuilder.create_pad_operator_before(): input index '{on_input_index}' is out of range!", + ) + + input_tensor = before_op.tmp_inputs[on_input_index] + + # New shape of the tensor after padding + padded_shape = translator.get_tflite_tensor_shape_with_explicit_padding( + input_tensor.shape.vector, explicit_padding + ) + + # Create the output tensor of the 'Pad' operator. + padded_tensor = self.duplicate_tensor( + input_tensor, input_tensor.name + "_padded", empty_buffer=True + ) + padded_tensor.shape = tflite_model.Shape(padded_shape) + + # Create the second input of the 'Pad' operator. + explicit_padding_tensor = self.create_tensor_for_data( + np.asarray(explicit_padding, dtype=np.int32), "padding" + ) + + # Create the 'Pad' operator + pad_operator = tflite_model.Operator(builtin_options=pad_options.Pad()) + pad_operator.tmp_inputs = [input_tensor, explicit_padding_tensor] + pad_operator.tmp_outputs = [padded_tensor] + + # Add tensor with constant values (scalar array) + if constant_value is not None: + # Only PadV2 supports the constant value tensor. (Seems that regular Pad does too, but it's not documented.) + pad_operator.builtin_options = pad_v2_options.PadV2() + + constant_value_tensor = self.create_tensor_for_data( + constant_value, "constant_values" + ) + + # Input tensor 'constant_values' must have same quantization params as 'input' + propagate_quantization(input_tensor, constant_value_tensor) + pad_operator.tmp_inputs.append(constant_value_tensor) + + # Connect the operators + before_op.tmp_inputs[on_input_index] = padded_tensor + + return pad_operator + + def channels_first_version_of(self, t_tensor: tflite_model.Tensor): + """Get the channels first version of non-static 't_tensor'. If one is not + available in the graph yet, add transpose operator to create it.""" + if t_tensor in self._nchw_tensor_version.keys(): + return self._nchw_tensor_version[t_tensor] + + # Need to add Transpose operator to transform 't_tensor' to NCHW. + + new_tensor = self.duplicate_tensor( + t_tensor, t_tensor.name + "_channels_first", empty_buffer=True + ) + new_tensor.shape = translator.channels_last_shape_to_channels_first( + t_tensor.shape + ) + new_tensor.tensor_format = new_tensor.tensor_format.to_node_format() + + perm = translator.create_channels_last_to_channels_first_permutation( + t_tensor.rank + ) + transpose = self._create_transpose_operator(t_tensor, new_tensor, perm) + + self.check_and_append_operator(transpose) + + self._nchw_tensor_version[t_tensor] = new_tensor + + return new_tensor + + def redirect_tensor( + self, from_tensor: tflite_model.Tensor, to_tensor: tflite_model.Tensor + ): + """Create a mapping of 'from_tensor' to 'to_tensor', which will ensure that when 'check_and_append_operator()' + is called with an operator that references 'from_tensor', it will be replaced by 'to_tensor'. This ensures + that future operators will not use output tensors of operators, which are not actually in the model. + + This method should be explicitly used when an operator is skipped during conversion, so that other operators + which used the output tensors of the skipped operator will be redirected to valid tensors, such as an + appropriate input tensor of the skipped operator. + + :param from_tensor: Tensor which will be replaced by 'to_tensor'. + :param to_tensor: Valid tensor, that the future operators will use instead of the 'from_operator'. + """ + + old_replacement = self._skipped_output_map.get(from_tensor, None) + if old_replacement is not None: + if old_replacement != to_tensor: + logger.e( + logger.Code.INTERNAL_ERROR, + "redirect_tensor(): Tensor has already been redirected before!", + ) + else: + # Tensor has already been mapped to 'to_tensor'. + return + + # 'to_tensor' might have been redirected too (and so on) -> find the root of the redirection. + while to_tensor in self._skipped_output_map.keys(): + to_tensor = self._skipped_output_map[to_tensor] + + # Map 'from_tensor' to 'to_tensor'. + self._skipped_output_map[from_tensor] = to_tensor + + # Swap the names of the tensors to preserve the model IO interface. + self.swap_tensor_names(from_tensor, to_tensor) + + def check_and_append_operator(self, t_op: tflite_model.Operator): + """Append the new TFLite operator to the model.""" + + self.get_operators().append(t_op) + + def create_transposed_tensor( + self, tflite_tensor: tflite_model.Tensor, axes: list[int] | None = None + ) -> tflite_model.Tensor: + """Create a transposed version of given static TFLite tensor using numpy.transpose(). + + :param tflite_tensor: Static TFLite tensor to create the transposed version for. + :param axes: Permutation applied during transposition. If None, current axes in reversed order are used. + :return: The new transposed TFLite tensor. + """ + + if not tensor_has_data(tflite_tensor): + logger.e( + logger.Code.INTERNAL_ERROR, + "ModelBuilder.create_transposed_tensor() requires a static tensor!", + ) + + new_tensor = self.duplicate_tensor( + tflite_tensor, tflite_tensor.name + "_transposed" + ) + + new_tensor.tmp_buffer.data = np.transpose(new_tensor.tmp_buffer.data, axes) + new_tensor.shape = tflite_model.Shape(list(new_tensor.tmp_buffer.data.shape)) + + return new_tensor + + def duplicate_tensor( + self, + tensor: tflite_model.Tensor, + new_name: Optional[str] = None, + name_suffix: str = "", + empty_buffer: bool = False, + ) -> tflite_model.Tensor: + """Create a new TFLite tensor, which is an identical copy of 'tensor', with a new name. + If 'new_name' is given, it will be used as the name for the new tensor. + If instead the 'name_suffix' is given, it will be appended to the name of 'tensor'. + If neither is given, the new tensor will have a similar name as 'tensor'. + + The final name may be altered automatically, to ensure uniqueness. + + :param tensor: TFLite tensor to duplicate. + :param new_name: Optional name for the new tensor. + :param name_suffix: Optional suffix for the name of the new tensor. + :param empty_buffer: If `True`, the new copied tensor will have its own new empy buffer with no data. + If `False`, the new copied tensor will also have a copy of the buffer (data) of the + original tensor. + :return: A copy of 'tensor'. + """ + + new_tensor = deepcopy(tensor) + + new_name = new_name or tensor.name + name_suffix + new_tensor.name = self._validate_new_tensor_name(new_name) + + self.append_new_buffer(new_tensor.tmp_buffer) + if empty_buffer: + new_tensor.tmp_buffer.data = None + + self.append_new_tensor(new_tensor) + + return new_tensor + + def swap_tensor_names(self, t1: tflite_model.Tensor, t2: tflite_model.Tensor): + """Correctly swap the names of the 2 provided tensors.""" + + logger.internal_assert( + self._tensor_name_map.get(t1.name, t1) == t1 + and self._tensor_name_map.get(t2.name, t2) == t2, + "ModelBuilder.swap_tensor_names(): The name to tensor mapping is not valid.", + ) + + self._tensor_name_map[t1.name] = t2 + self._tensor_name_map[t2.name] = t1 + + t1.name, t2.name = t2.name, t1.name + + def _make_inputs_channels_first(self): + new_inputs = [] + + for input_tensor in self.get_sub_graph().inputs.tmp_inputs: + + if input_tensor.tensor_format.is_channels_last(): + # Create a Transpose operator and replace the graph input + + if input_tensor.rank > 6: + msg = ( + f"Couldn't preserve the shape of input tensor '{input_tensor.name}', because it has " + f"'{input_tensor.rank}' dimensions. TFLite Transpose only supports up to 6 dimensions." + ) + logger.e(logger.Code.IO_PRESERVATION_ERROR, msg) + + new_input = self.duplicate_tensor( + input_tensor, input_tensor.name + "_channels_first" + ) + new_input.shape = translator.channels_last_shape_to_channels_first( + input_tensor.shape + ) + new_input.tensor_format = input_tensor.tensor_format.to_node_format() + + perm = translator.create_channels_first_to_channels_last_permutation( + input_tensor.rank + ) + transpose = self._create_transpose_operator( + new_input, input_tensor, perm + ) + + self.get_operators().vector.insert(0, transpose) + + # Swap the names of `new_input` and `input_tensor`. + self.swap_tensor_names(new_input, input_tensor) + + new_inputs.append(new_input) + + else: + # Keep the input + new_inputs.append(input_tensor) + + self.get_sub_graph().inputs.tmp_inputs = new_inputs + + def _make_outputs_channels_first(self): + new_outputs = [] + + for output_tensor in self.get_sub_graph().outputs.tmp_outputs: + if output_tensor.tensor_format.is_channels_last(): + # Add a Transpose operator, to make the output channels first + + if output_tensor.rank > 6: + logger.e( + logger.Code.IO_PRESERVATION_ERROR, + f"Couldn't preserve the shape of output tensor '{output_tensor.name}', because it has " + f"'{output_tensor.rank}' dimensions. TFLite Transpose only supports up to 6 " + "dimensions.", + ) + + new_output = self.channels_first_version_of(output_tensor) + + # Swap the names of `new_output` and `output_tensor`. + self.swap_tensor_names(new_output, output_tensor) + + new_outputs.append(new_output) + + else: + new_outputs.append(output_tensor) + + self.get_sub_graph().outputs.tmp_outputs = new_outputs + + def finish(self) -> tflite_model.Model: + """Finalize and optimize the converted TFLite model. Then return it. + + At least one of 'optimization_whitelist' and 'optimization_blacklist' must be 'None'. + :return: The final TFLite model. + """ + + if self.conversion_config.keep_io_format: + # If the input or output is channels last, add a Transpose operator, to make is channels first. + self._make_inputs_channels_first() + self._make_outputs_channels_first() + + # Apply optimizations to the internal TFLite model. + optimizer.Optimizer(self, self.conversion_config).optimize( + self.conversion_config.optimization_whitelist, + self.conversion_config.optimization_blacklist, + ) + + # Remove outputs, which are not produced by any node. Otherwise, there would be errors after inference. + operator_outputs = [] + for op in self.get_operators().vector: + operator_outputs.extend(op.tmp_outputs) + graph_outputs = self.get_sub_graph().outputs.tmp_outputs.copy() + for output in graph_outputs: + if output not in operator_outputs: + self.get_sub_graph().outputs.tmp_outputs.remove(output) + + # Switch from using 'tmp' references to 'index' references in tensors and buffers. + self._assign_tensor_and_buffer_indices( + self.conversion_config.allow_inputs_stripping + ) + + if self.conversion_config.tflite_quantization_integrity_check: + quantization_verification.verify_quantization_integrity(self._tfl_model) + + return self._tfl_model + + def _assign_tensor_and_buffer_indices( # noqa C901 + self, allow_inputs_stripping: bool + ): + """Correctly initialize all references via indices in all tensors and buffers.""" + + # Assign each buffer its index + for i, buffer in enumerate(self.get_buffers().vector): + buffer.tmp_index = i + + # Assign each tensor its index and its buffer index + for i, tensor in enumerate(self.get_tensors().vector): + if tensor.tmp_null_tensor: + # Using -1 as the index to the 'tensors' vector is way of telling the TFLite inference engine, that + # this tensor should not be used. + # https://github.com/tensorflow/tensorflow/blob/05404d959119d41a8ffb8a75c6f232cfd8540d45/tensorflow/lite/kernels/kernel_util.cc#L79-L98 + tensor.tmp_index = -1 + else: + tensor.tmp_index = i + + tensor.buffer = tensor.tmp_buffer.tmp_index + + # TODO Remove inputs and outputs that are not in the tensors collection + + # Assign 'Outputs' and 'Inputs' their tensor indices + outputs = self.get_sub_graph().outputs + for tensor in outputs.tmp_outputs: + try: + outputs.append(tensor.tmp_index) + except Exception: + logger.e( + logger.Code.GENERATED_MODEL_INVALID, + f"The tensor '{tensor.name}' is among the model outputs, but does NOT appear in the graph!", + ) + + inputs = self.get_sub_graph().inputs + for tensor in inputs.tmp_inputs: + try: + inputs.append(tensor.tmp_index) + except Exception: + if allow_inputs_stripping: + logger.i( + f"The input tensor '{tensor.name}' will not be present in generated TFLite graph." + ) + else: + logger.e( + logger.Code.GENERATED_MODEL_INVALID, + f"The tensor '{tensor.name}' is among the model inputs, but does NOT appear in the graph!", + ) + + # Assign each operator its inputs and outputs indices + for operator in self.get_sub_graph().operators.vector: + for inputTensor in operator.tmp_inputs: + operator.inputs.append(inputTensor.tmp_index) + + for outputTensor in operator.tmp_outputs: + operator.outputs.append(outputTensor.tmp_index) + + def _build_operator_code( + self, op_type: BuiltinOperator, version, custom_code: str = None + ): + """Add a new OperatorCode for given 'op_type' and 'version' to the 'operator_codes' vector.""" + op_code = tflite_model.OperatorCode(op_type, version, custom_code) + + self.get_operator_codes().append(op_code) + + def build_empty_buffer(self) -> tflite_model.Buffer: + """Create, register and return a new empty 'Buffer' object.""" + buffer = tflite_model.Buffer() + + self.get_buffers().append(buffer) + + return buffer + + def create_tensor_for_data(self, data: np.ndarray, name: str): + data_type = translator.numpy_type_to_tf_lite(data.dtype) + + buffer = tflite_model.Buffer(data, data_type) + self.append_new_buffer(buffer) + + shape = translator.shape_from_numpy(data) + name = self._validate_new_tensor_name(name) + + tensor = tflite_model.Tensor(shape, name, data_type=data_type) + + tensor.tmp_buffer = buffer + + self.append_new_tensor(tensor) + + return tensor + + def create_empty_tensor( + self, name: str, tensor_type: TensorType, shape: Optional[List[int]] = None + ): + name = self._validate_new_tensor_name(name) + + if shape is not None: + shape = tflite_model.Shape(list(shape)) + + tensor = tflite_model.Tensor(shape, name, data_type=tensor_type) + tensor.tmp_buffer = self.build_empty_buffer() + + self.append_new_tensor(tensor) + + return tensor + + def create_null_tensor(self, name: str = "null_"): + """Create and return a TFLite tensor, which will be recognized by the TFLite inference engine as an empty + tensor. Internal TFLite kernel functions will return 'nullptr' when accessing this tensor. + + :param name: Optional name for the null tensor. + :return: The new TFLite null tensor. + """ + + tensor = self.create_empty_tensor(name, TensorType.FLOAT32) + tensor.tmp_null_tensor = True + return tensor + + """ -------------------- 'quality of life' functions. -------------------- """ + + def operator_can_be_skipped(self, t_op: tflite_model.Operator) -> bool: + """Determine whether operator 't_op' uses both a graph input and a graph output tensor. If it does, it cannot + be skipped. + + :param t_op: TFLite operator to check. + :return: True, if 't_op' doesn't use both a graph input and a graph output. + """ + sub_graph = self.get_sub_graph() + graph_inputs = sub_graph.inputs.tmp_inputs + graph_outputs = sub_graph.outputs.tmp_outputs + + produces_graph_output = any( + op_output in graph_outputs for op_output in t_op.tmp_outputs + ) + + consumes_graph_input = False + for op_input in t_op.tmp_inputs: + root = self._skipped_output_map.get(op_input, op_input) + if root in graph_inputs: + consumes_graph_input = True + + if produces_graph_output and consumes_graph_input: + # The input and output would be disconnected. + return False + + input_data_is_known = all_tensors_are_static(*t_op.tmp_inputs) + + if produces_graph_output and input_data_is_known: + # If the operator is skipped, the output tensor would be assigned static data, which is not allowed for + # model outputs. + return False + + return True + + def turn_operator_to_identity(self, t_op: tflite_model.Operator): + """Turn the operator 't_op' to a Transpose operator, which does nothing. + 't_op' MUST have exactly 1 input tensor. + + :param t_op: TFLite operator to turn into Transpose. + """ + if len(t_op.tmp_inputs) != 1: + logger.e( + logger.Code.INTERNAL_ERROR, + "turn_operator_to_identity(): Operator doesn't have 1 input!", + ) + + if len(t_op.tmp_outputs) != 1: + logger.e( + logger.Code.INTERNAL_ERROR, + "turn_operator_to_identity(): Operator doesn't have 1 output!", + ) + + if t_op.tmp_inputs[0].rank <= 6: + # Create regular `Transpose`. + t_op.builtin_options = transpose_options.Transpose() + else: + # 6D and bigger require the Flex delegate `Transpose`. + if t_op.tmp_inputs[0].quantization is not None: + logger.e( + logger.Code.CONVERSION_IMPOSSIBLE, + "Conversion requires the addition of a `Transpose` operator with more than 6 dimensions, " + "which doesn't support quantization.", + ) + + if not self.conversion_config.allow_select_ops: + logger.e( + logger.Code.CONVERSION_IMPOSSIBLE, + "Conversion requires the addition of a `Transpose` operator with more than 6 dimensions, " + "which requires the use of Flex delegate. " + + logger.Message.ALLOW_SELECT_OPS, + ) + + t_op.custom_options = FlexTranspose() + + rank = t_op.tmp_inputs[0].rank + identity = np.asarray(range(rank), np.int32) + identity_tensor = self.create_tensor_for_data(identity, "identity") + t_op.tmp_inputs.append(identity_tensor) + + def _validate_new_tensor_name(self, name: str) -> str: + """Take tensor name 'name' and make it unique in the model. Returns a unique tensor name.""" + + # Try adding numbers to the 'name' until it is unique + suffix = 0 + new_name = name + while self.tensor_exists(new_name): + new_name = name + str(suffix) + suffix += 1 + + return new_name + + def op_code_index_for_op_type( + self, op_type: BuiltinOperator, version: int = 1, custom_code: str = None + ): + """ + Return the index to the 'operator_codes' vector in the TFLite model for the operator + with given 'op_type' and 'version'. If corresponding OperatorCode doesn't exist, add + it and create a new mapping. + + :param op_type: Operator type. One of BuiltinOperator enum. + :param version: Operator version. Defaults to 1. + :param custom_code: Custom code name. Must be used with 'op_type' equal to 'BuiltinOperator.CUSTOM'. + :return: Index of the operator in 'operator_codes' vector. + """ + + version_name = version + if custom_code is not None: + version_name = f"{custom_code}_{version}" + + if op_type not in self.op_code_type_index_map.keys(): + self.op_code_type_index_map[op_type] = {} + + if version_name not in self.op_code_type_index_map[op_type].keys(): + self.op_code_type_index_map[op_type][ + version_name + ] = self.operator_codes_size() + self._build_operator_code(op_type, version, custom_code) + + return self.op_code_type_index_map[op_type][version_name] + + def tensor_exists(self, name: str): + """Determine if a tensor with 'name' already exists or not.""" + return name in self._tensor_name_map.keys() + + def _remove_tensor_with_name_from_collection(self, name, collection): + """Find and remove a tensor with given 'name' from given 'collection'.""" + to_remove = None + + for t in collection: + if t.name == name: + to_remove = t + break + + if to_remove is not None: + collection.remove(to_remove) + + def _tensors_similar( + self, t_tensor1: tflite_model.Tensor, t_tensor2: tflite_model.Tensor + ) -> bool: + """Determine if the given TFLite tensors have the same shape and + datatype.""" + + if t_tensor1.type != t_tensor2.type: + return False + + return translator.collections_equal( + t_tensor1.shape.vector, t_tensor2.shape.vector + ) + + def tensor_for_name(self, name: str) -> tflite_model.Tensor: + """ + Get an existing TFLite tensor with given 'name'. If such tensor does NOT exist, function will + create and register a new tensor with shape '[]', which will be returned. If the tensor was + redirected, destination tensor is returned instead. + + :param name: Name of the tensor. + :return: Tensor instance. + """ + if name not in self._tensor_name_map.keys(): + logger.d(f"Tensor '{name}' is not yet in the tensors. Adding it!") + + new_tensor = tflite_model.Tensor(tflite_model.Shape([]), name) + new_tensor.tmp_buffer = self.build_empty_buffer() + + self.append_new_tensor(new_tensor) + else: + tensor = self._tensor_name_map[name] + if new_tensor := self._skipped_output_map.get(tensor, None): + # Tensor was redirected - return destination tensor + if not self._tensors_similar(tensor, new_tensor): + logger.e( + logger.Code.INTERNAL_ERROR, + "Attempt to return non-matching tensor after redirect!", + ) + + return new_tensor + + return self._tensor_name_map[name] + + def buffers_size(self): + """Return the number of buffers that are currently in the model.""" + return self.get_buffers().len() + + def operator_codes_size(self): + """Return the number of operator codes that are currently in the model.""" + return self.get_operator_codes().len() + + def _remove_input_with_name(self, name): + """Find and remove a tensor in the sub_graph 'inputs' with given 'name'.""" + self._remove_tensor_with_name_from_collection( + name, self.get_sub_graph().inputs.tmp_inputs + ) + + def _remove_output_with_name(self, name): + """Find and remove a tensor in the sub_graph 'outputs' with given 'name'.""" + self._remove_tensor_with_name_from_collection( + name, self.get_sub_graph().outputs.tmp_outputs + ) + + def _remove_tensor_with_name(self, name): + """Find and remove a tensor in the graph with given 'name'.""" + self._remove_tensor_with_name_from_collection(name, self.get_tensors().vector) + + def append_new_tensor(self, t_tensor: tflite_model.Tensor, overwrite: bool = False): + """Append the TFLite tensor 't_tensor' to the 'SubGraph.tensors' and register it.""" + + if t_tensor.name in self._tensor_name_map.keys(): + """Tensor has already been added. Sometimes however, ONNX models + will have tensors in their 'inputs' or 'outputs', which don't + belong there and are in fact static. I this case we need to + overwrite the existing tensors.""" + + if overwrite: + self._remove_tensor_with_name(t_tensor.name) + + # If the tenor previously appeared in ONNX 'inputs' or 'outputs', + # the old version MUST be removed from there. + self._remove_input_with_name(t_tensor.name) + self._remove_output_with_name(t_tensor.name) + + self.get_tensors().append(t_tensor) + self._tensor_name_map[t_tensor.name] = t_tensor + else: + logger.w(f"Tensor '{t_tensor.name}' is already in the tensors!") + + else: + self._tensor_name_map[t_tensor.name] = t_tensor + self.get_tensors().append(t_tensor) + + def append_new_buffer(self, buffer: tflite_model.Buffer): + """Append the 'buffer' to the 'model.buffers'.""" + self.get_buffers().append(buffer) + + def get_first_empty_buffer(self) -> tflite_model.Buffer: + """Return the first empty buffer in the model. It should be the one on index 0.""" + for b in self.get_buffers().vector: + if not _buffer_has_data(b): + return b + + # No empty buffers in the model -> create one. This is uncommon, but can happen in weird models. + return self.build_empty_buffer() + + def get_operator_with_output( + self, t_tensor: tflite_model.Tensor + ) -> Optional[tflite_model.Operator]: + """Get the first operator from the graph, that has 't_tensor' in its 'tmp_outputs' list. + If such operator doesn't exist, return None. + """ + + for op in self.get_operators().vector: + if t_tensor in op.tmp_outputs: + return op + + return None + + def _create_transpose_operator( + self, + input_tensor: tflite_model.Tensor, + output_tensor: tflite_model.Tensor, + permutation: list[int] | np.ndarray, + ): + """Create a `Transpose` operator with given input, output and permutation.""" + if isinstance(permutation, list): + permutation = np.asarray(permutation, np.int32) + elif isinstance(permutation, np.ndarray): + logger.internal_assert( + permutation.dtype == np.int32, + "model_builder._create_transpose_operator(): " + "permutation doesn't have type int32.", + ) + else: + logger.e( + logger.Code.INTERNAL_ERROR, + "model_builder._create_transpose_operator(): permutation is not " + "a list or a numpy array.", + ) + + permutation_tensor = self.create_tensor_for_data(permutation, "perm") + + if input_tensor.rank <= 6: + # Create regular `Transpose`. + transpose = tflite_model.Operator( + builtin_options=transpose_options.Transpose(), + opcode_index=self.op_code_index_for_op_type(BuiltinOperator.TRANSPOSE), + ) + else: + # 7D and bigger require the Flex delegate `Transpose`. + + if input_tensor.quantization is not None: + logger.e( + logger.Code.CONVERSION_IMPOSSIBLE, + "Conversion requires the addition of a `Transpose` operator with more than 6 dimensions, " + "which doesn't support quantization.", + ) + + if not self.conversion_config.allow_select_ops: + logger.e( + logger.Code.CONVERSION_IMPOSSIBLE, + "Conversion requires the addition of a `Transpose` operator with more than 6 dimensions, " + "which requires the use of Flex delegate. " + + logger.Message.ALLOW_SELECT_OPS, + ) + + transpose = tflite_model.Operator( + custom_options=FlexTranspose(), + opcode_index=self.op_code_index_for_op_type( + FlexTranspose.operator_type, 1, FlexTranspose.custom_code + ), + ) + + transpose.tmp_inputs = [input_tensor, permutation_tensor] + transpose.tmp_outputs = [output_tensor] + transpose.tmp_added_extra = True + + return transpose + + def create_transpose_operator_before( + self, + before_operator: tflite_model.Operator, + on_input_index: int, + permutation: list[int] | np.ndarray, + ): + """ + Create a TFLite 'Transpose' operator before the 'before_operator'. + The input of 'before_operator' at index 'on_input_index', is where the Transpose operator will connect to + the graph. + + :param before_operator: Create the Transpose operator in front of this operator. + :param on_input_index: Attach the output of the Transpose op to the input of 'before_operator' on this index. + :param permutation: The permutation that will be applied by the Transpose operator. + """ + + input_tensor = before_operator.tmp_inputs[on_input_index] + output_tensor = self.duplicate_tensor( + input_tensor, name_suffix="_transposed_", empty_buffer=True + ) + permuted_shape = translator.apply_permutation_to( + output_tensor.shape.vector, permutation + ) + output_tensor.shape = tflite_model.Shape(permuted_shape) + + transpose = self._create_transpose_operator( + input_tensor, output_tensor, permutation + ) + + before_operator.tmp_inputs[on_input_index] = output_tensor + + return transpose + + def create_transpose_operator_after( + self, + after_operator: tflite_model.Operator, + on_output_index: int, + permutation: list[int] | np.ndarray, + keep_output_shape: bool = True, + ): + """ + Create a TFLite 'Transpose' operator after the 'after_operator'. + The output of 'after_operator' at index 'on_output_index' is where the Transpose operator will be connected. + + The original output tensor of 'after_operator' will be used as the output of the Transpose operator. + Meaning that operators which use that output of 'after_operator' will now use the output of the Transpose + operator instead! + + If 'keep_output_shape' is True, the output of the Transpose operator will have the same shape as the + original output of the 'after_operator', and the 'after_operator' will have its output shape changed to + match the permutation. + If 'keep_output_shape' is False, the output of the Transpose operator will have the new permuted shape, and + the shape of the output of 'after_operator' will stay the same. + + :param after_operator: Create the Transpose operator right after this operator. + :param on_output_index: Attach the input of the Transpose op to the output of 'after_operator' on this index. + :param permutation: The permutation that will be applied by the Transpose operator. + :param keep_output_shape: If True, the output of the Transpose will have the same shape as the original output + of 'after_operator', and 'after_operator' will have its output modified to match. + If False, the output of the Transpose operator will have the new permuted shape, and + the output of 'after_operator' will remain unchanged. + """ + + # Input and output tensors of the Transpose operator + output_tensor = after_operator.tmp_outputs[on_output_index] + input_tensor = self.duplicate_tensor( + output_tensor, output_tensor.name, empty_buffer=True + ) + + if keep_output_shape: + # The output of Transpose keeps its shape. Input of Transpose must be changed + inverse_permutation = translator.create_inverse_permutation(permutation) + pre_permuted_shape = translator.apply_permutation_to( + input_tensor.shape.vector, inverse_permutation + ) + input_tensor.shape = tflite_model.Shape(pre_permuted_shape) + + else: + # Set the shape of the Transpose output + permuted_shape = translator.apply_permutation_to( + output_tensor.shape.vector, permutation + ) + output_tensor.shape = tflite_model.Shape(permuted_shape) + + transpose = self._create_transpose_operator( + input_tensor, output_tensor, permutation + ) + + after_operator.tmp_outputs[on_output_index] = input_tensor + + return transpose + + def create_quantize_operator_before( + self, + before_operator: tflite_model.Operator, + on_input_index: int, + new_input_data_type: TensorType, + new_input_scale: Optional[List[float]] = None, + new_input_zero_point: Optional[List[int]] = None, + ): + """ + Create a TFLite 'Quantize' operator before the 'before_operator'. + The input of 'before_operator' at index 'on_input_index', is where the Quantize operator will connect to the + graph. + The input of 'before_operator' will now have a new data type and quantization parameters. + + :param before_operator: Create the Quantize operator in front of this operator. + :param on_input_index: Attach the output of the Quantize op to the input of 'before_operator' on this index. + :param new_input_data_type: New input TFLite data type of the 'before_operator' operator. + :param new_input_scale: New input scale of the 'before_operator' operator. + :param new_input_zero_point: New input zero point of the 'before_operator' operator. + """ + + input_tensor = before_operator.tmp_inputs[on_input_index] + output_tensor = self.duplicate_tensor( + input_tensor, input_tensor.name, empty_buffer=True + ) + + quantized_dimension = input_tensor.quantization.quantized_dimension + + if new_input_scale is None: + new_input_scale = input_tensor.quantization.scale.vector.copy() + if new_input_zero_point is None: + new_input_zero_point = input_tensor.quantization.zero_point.vector.copy() + + output_tensor.type = new_input_data_type + output_tensor.quantization = tflite_model.Quantization( + scale=tflite_model.Scale(new_input_scale), + zero_point=tflite_model.ZeroPoint(new_input_zero_point), + quantized_dimension=quantized_dimension, + ) + quantize = tflite_model.Operator( + builtin_options=quantize_options.Quantize(), + opcode_index=self.op_code_index_for_op_type(BuiltinOperator.QUANTIZE), + ) + quantize.tmp_inputs = [input_tensor] + quantize.tmp_outputs = [output_tensor] + quantize.tmp_added_extra = True + + before_operator.tmp_inputs[on_input_index] = output_tensor + + return quantize + + def create_quantize_operator_after( + self, + after_operator: tflite_model.Operator, + on_output_index: int, + new_output_data_type: TensorType, + new_output_scale: Optional[List[float]] = None, + new_output_zero_point: Optional[List[int]] = None, + ) -> tflite_model.Operator: + """ + Create a TFLite 'Quantize' operator after the 'after_operator'. + The output of 'after_operator' at index 'on_output_index', is where the Quantize operator will connect to + the graph. + The output of 'after_operator' will now have a new data type and quantization parameters. + + :param after_operator: Create the Quantize operator behind this operator. + :param on_output_index: Attach the input of the Quantize op to the output of 'before_operator' on this index. + :param new_output_data_type: New output TFLite data type of the 'after_operator' operator. + :param new_output_scale: New output scale of the 'after_operator' operator. + :param new_output_zero_point: New output zero point of the 'after_operator' operator. + """ + + output_tensor = after_operator.tmp_outputs[on_output_index] + input_tensor = self.duplicate_tensor( + output_tensor, output_tensor.name, empty_buffer=True + ) + + quantized_dimension = output_tensor.quantization.quantized_dimension + + if new_output_scale is None: + new_output_scale = input_tensor.quantization.scale.vector.copy() + if new_output_zero_point is None: + new_output_zero_point = input_tensor.quantization.zero_point.vector.copy() + + input_tensor.type = new_output_data_type + input_tensor.quantization = tflite_model.Quantization( + scale=tflite_model.Scale(new_output_scale), + zero_point=tflite_model.ZeroPoint(new_output_zero_point), + quantized_dimension=quantized_dimension, + ) + + quantize = tflite_model.Operator( + builtin_options=quantize_options.Quantize(), + opcode_index=self.op_code_index_for_op_type(BuiltinOperator.QUANTIZE), + ) + quantize.tmp_inputs = [input_tensor] + quantize.tmp_outputs = [output_tensor] + quantize.tmp_added_extra = True + + after_operator.tmp_outputs[on_output_index] = input_tensor + + return quantize + + def create_dequantize_operator_after( + self, + after_operator: tflite_model.Operator, + on_output_index: int, + new_output_data_type: TensorType, + new_output_scale: list[float], + new_output_zero_point: list[int], + quantized_dimension: int, + ) -> tflite_model.Operator: + """ + Create a TFLite 'Dequantize' operator after the 'after_operator'. + The output of 'after_operator' at index 'on_output_index', is where the Quantize operator will connect to + the graph. + The output of 'after_operator' will now have a new quantized data type. + This method was designed for the use case, where 'after_operator' has a FLOAT32 output tensor, but the + operator will produce a quantized output. The following operators however expect the float data. + This is in line with other similar methods. + + :param after_operator: Create the Dequantize operator behind this operator. + :param on_output_index: Attach the input of the Dequantize op to the output of 'before_operator' on this index. + :param new_output_data_type: New output TFLite data type of the 'after_operator' operator. + :param new_output_scale: New output scale of the 'after_operator' operator. + :param new_output_zero_point: New output zero point of the 'after_operator' operator. + :param quantized_dimension: The quantized dimension parameter of the new output tensor of 'after_operator'. + :return: The Dequantize operator. + """ + + output_tensor = after_operator.tmp_outputs[on_output_index] + input_tensor = self.duplicate_tensor( + output_tensor, output_tensor.name, empty_buffer=True + ) + + input_tensor.type = new_output_data_type + input_tensor.quantization = tflite_model.Quantization( + scale=tflite_model.Scale(new_output_scale), + zero_point=tflite_model.ZeroPoint(new_output_zero_point), + quantized_dimension=quantized_dimension, + ) + + dequantize = tflite_model.Operator( + builtin_options=dequantize_options.Dequantize(), + opcode_index=self.op_code_index_for_op_type(BuiltinOperator.DEQUANTIZE), + ) + dequantize.tmp_inputs = [input_tensor] + dequantize.tmp_outputs = [output_tensor] + dequantize.tmp_added_extra = True + + after_operator.tmp_outputs[on_output_index] = input_tensor + + return dequantize + + def create_reshape_before( + self, + before_op: tflite_model.Operator, + on_input_index: int, + new_shape: List[int], + ) -> tflite_model.Operator: + """ + Create a TFLite 'Reshape' operator before the 'before_op' operator. The input of 'before_op' on index + 'on_input_index' is where the 'Reshape' operator will connect. With this function it is expected + to change input shape of 'before_op' operator on index 'on_input_index'. + + :param before_op: TFLite operator that will consume the output of the new 'Reshape' operator. + :param on_input_index: Index of an input tensor of the 'before_op' operator, which will serve as the new input + for the 'Reshape' operator. + :param new_shape: Shape of the new tensor that will serve as an output of 'Reshape' operator. + :return: The TFLite 'Reshape' operator. + """ + + input_tensor = before_op.tmp_inputs[on_input_index] + + reshape_output = self.duplicate_tensor( + input_tensor, input_tensor.name + "_reshaped", empty_buffer=True + ) + reshape_output.shape = tflite_model.Shape(new_shape) + + reshape_op = tflite_model.Operator( + builtin_options=reshape_options.Reshape(new_shape) + ) + + reshape_op.tmp_inputs = [input_tensor] + reshape_op.tmp_outputs = [reshape_output] + + before_op.tmp_inputs[on_input_index] = reshape_output + + return reshape_op + + def create_reshape_after( + self, + after_op: tflite_model.Operator, + on_output_index: int, + new_shape: List[int], + ) -> tflite_model.Operator: + """ + Create a TFLite 'Reshape' operator after the 'after_op' operator. The output of 'after_op' on index + 'on_output_index' is where the 'Reshape' operator will connect. This function will preserve output + shape of 'after_op' operator on index 'on_output_index'. + + :param after_op: TFLite operator that will produce the input of the new 'Reshape' operator. + :param on_output_index: Index of an output tensor of the 'after_op' operator, which will serve as the new input + for the 'Reshape' operator. + :param new_shape: Shape of the new tensor that will serve as an output of 'Reshape' operator. + :return: The TFLite 'Reshape' operator. + """ + + output_tensor = after_op.tmp_outputs[on_output_index] + + reshape_input = self.duplicate_tensor( + output_tensor, output_tensor.name + "_reshaped", empty_buffer=True + ) + output_tensor.shape = tflite_model.Shape(new_shape) + + reshape_op = tflite_model.Operator( + builtin_options=reshape_options.Reshape(new_shape) + ) + + reshape_op.tmp_inputs = [reshape_input] + reshape_op.tmp_outputs = [output_tensor] + reshape_op.tmp_added_extra = True + + after_op.tmp_outputs[on_output_index] = reshape_input + + return reshape_op + + def create_cast_before( + self, + before_op: tflite_model.Operator, + on_input_index: int, + new_type: TensorType, + ) -> tflite_model.Operator: + """ + Create a TFLite 'Cast' operator before the 'before_op' operator. The input of 'before_op' on index + 'on_input_index' is where the 'Cast' operator will connect. + + :param before_op: TFLite operator that will consume the output of the new 'Cast' operator. + :param on_input_index: Index of an input tensor of the 'before_op' operator, which will serve as the new input + for the 'Cast' operator. + :param new_type: Type of output tensor of 'Cast' operator. + :return: The TFLite 'Cast' operator. + """ + + input_tensor = before_op.tmp_inputs[on_input_index] + + cast_output = self.duplicate_tensor( + input_tensor, input_tensor.name + "_casted", empty_buffer=True + ) + cast_output.type = new_type + + cast_op = tflite_model.Operator( + builtin_options=cast_options.Cast(input_tensor.type, new_type) + ) + cast_op.tmp_inputs = [input_tensor] + cast_op.tmp_outputs = [cast_output] + cast_op.tmp_added_extra = True + + before_op.tmp_inputs[on_input_index] = cast_output + + return cast_op + + def create_cast_after( + self, + after_op: tflite_model.Operator, + on_output_index: int, + new_type: TensorType, + ) -> tflite_model.Operator: + """ + Create a TFLite 'Cast' operator after the 'after_op' operator. The output of 'after_op' on index + 'on_output_index' is where the 'Cast' operator will connect. This function will change output + type of 'after_op' operator on index 'on_output_index' to 'new_type'. + + :param after_op: TFLite operator that will produce the input of the new 'Cast' operator. + :param on_output_index: Index of an output tensor of the 'after_op' operator, which will serve as the new input + for the 'Cast' operator. + :param new_type: Type of the new tensor that will serve as an input of 'Cast' operator. + :return: The TFLite 'Cast' operator. + """ + + output_tensor = after_op.tmp_outputs[on_output_index] + + cast_input = self.duplicate_tensor( + output_tensor, output_tensor.name + "_casted", empty_buffer=True + ) + cast_input.type = new_type + + cast_builtin_options = cast_options.Cast( + in_data_type=new_type, out_data_type=output_tensor.type + ) + cast_op = tflite_model.Operator(builtin_options=cast_builtin_options) + + cast_op.tmp_inputs = [cast_input] + cast_op.tmp_outputs = [output_tensor] + cast_op.tmp_added_extra = True + + after_op.tmp_outputs[on_output_index] = cast_input + + return cast_op + + def create_slice_after( + self, + after_op: tflite_model.Operator, + on_output_index: int, + begin: list[int], + size: list[int], + ): + """ + Create a TFLite 'Slice' operator after the 'after_op' operator. The output of 'after_op' on index + 'on_output_index' is where the 'Slice' operator will connect. This function will preserve output + shape of 'after_op' operator on index 'on_output_index'. + + :param after_op: TFLite operator that will produce the input of the new 'Slice' operator. + :param on_output_index: Index of an output tensor of the 'after_op' operator, which will serve as the new input + for the 'Slice' operator. + :param begin: List of indices where slicing begins. Must have same length as sliced tensor. + :param size: List of sliced sizes. Defines how many items is sliced per dimension. Must + have same length as sliced tensor. + :return: The TFLite 'Slice' operator. + """ + + output_tensor = after_op.tmp_outputs[on_output_index] + + logger.internal_assert( + len(begin) == len(size), + "create_slice_after(): Rank of 'begin' tensor and 'size' tensor don't match.", + ) + logger.internal_assert( + len(begin) == len(output_tensor.shape.vector), + "create_slice_after(): Rank of 'begin' tensor and sliced tensor don't match.", + ) + + slice_input = self.duplicate_tensor( + output_tensor, output_tensor.name + "_sliced", empty_buffer=True + ) + output_tensor.shape = tflite_model.Shape(size) + + begin_tensor = self.create_tensor_for_data(np.asarray(begin, np.int32), "begin") + size_tensor = self.create_tensor_for_data(np.asarray(size, np.int32), "size") + + slice_op = tflite_model.Operator(builtin_options=slice_options.Slice()) + slice_op.tmp_inputs = [slice_input, begin_tensor, size_tensor] + slice_op.tmp_outputs = [output_tensor] + slice_op.tmp_added_extra = True + + after_op.tmp_outputs[on_output_index] = slice_input + + return slice_op + + def create_gather_before( + self, + before_op: tflite_model.Operator, + on_input_index: int, + indices: list[int], + output_shape: list[int], + axis: int = 0, + ) -> tflite_model.Operator: + """ + Create a TFLite 'Gather' operator before the 'before_op' operator. The input of 'before_op' on index + 'on_input_index' is where the 'Gather' operator will connect. + + :param before_op: TFLite operator that will consume the output of the new 'Gather' operator. + :param on_input_index: Index of an input tensor of the 'before_op' operator, which will serve as the new output + for the 'Gather' operator. + :param indices: The `indices` operand of the TFLite 'Gather' operator. + :param output_shape: The shape of the output of the 'Gather' operator. + :param axis: The `axis` attribute of the TFLite 'Gather' operator. + :return: The TFLite 'Gather' operator. + """ + + input_tensor = before_op.tmp_inputs[on_input_index] + + gather_output = self.duplicate_tensor(input_tensor, empty_buffer=True) + gather_output.shape = tflite_model.Shape(output_shape) + + indices_tensor = self.create_tensor_for_data( + np.array(indices, np.int32), "indices" + ) + + gather_op = tflite_model.Operator(builtin_options=gather_options.Gather(axis)) + + gather_op.tmp_inputs = [input_tensor, indices_tensor] + gather_op.tmp_outputs = [gather_output] + gather_op.tmp_added_extra = True + + before_op.tmp_inputs[on_input_index] = gather_output + + return gather_op + + def ensure_correct_broadcasting( + self, t_op: tflite_model.Operator, main_output: tflite_model.Tensor + ) -> List[tflite_model.Operator]: + """Make sure that all input tensors of 't_op' can have their shape broadcasted correctly. + Static input tensors will be altered statically and for dynamic tensors, Reshape and Transpose operators + will be added to ensure a valid shape. + Note: The TFLite 't_op' operator still has to support shape broadcasting! This function just makes sure, the + shapes are broadcastable correctly. it doesn't eliminate the need for broadcasting. + + :param t_op: TFLite operator with input tensors that need to be made broadcastable. + :param main_output: The TFLite tensor, that is the main output of the operation carried out by 't_op'. + :return: A list of TFLite operators Reshape and Transpose, that need to be added to the model before 't_op'. + """ + + if main_output not in t_op.tmp_outputs: + logger.e( + logger.Code.INTERNAL_ERROR, + "ModelBuilder.ensure_correct_broadcasting(): 'main_output' is not among the outputs of 't_op'!", + ) + + if not uses_shape_broadcasting(t_op): + # Operator doesn't use shape broadcasting + return [] + + if not main_output.tensor_format.is_channels_last() and not any( + input_tensor.tensor_format.is_channels_last() + for input_tensor in t_op.tmp_inputs + ): + # Operator uses only formatless tensors + return [] + + # -- Operator uses channels last tensors and shape broadcasting -- + + ops_to_add = [] + new_tmp_inputs = [] + output_shape = main_output.shape + output_rank = output_shape.len() + + for input_tensor in t_op.tmp_inputs: + + if input_tensor.shape != main_output.shape: + if tensor_has_data(input_tensor): + # Replace the static input with one with a corrected shape. + x = self.prepare_static_tensor_for_correct_broadcasting_with_channels_first_tensors( + input_tensor, output_rank + ) + new_tmp_inputs.append(x) + else: + # Prepend Reshape and Transpose + ops = self.prepare_dynamic_tensor_for_correct_broadcasting_with_channels_first_tensors( + input_tensor, output_rank + ) + + if len(ops) != 0: + # The output of the 'Transpose' (last returned op) will be the new input of the operator + new_tmp_inputs.append(ops[-1].tmp_outputs[0]) + else: + new_tmp_inputs.append(input_tensor) + + ops_to_add.extend(ops) + + else: + # Keep the original input as is + new_tmp_inputs.append(input_tensor) + + t_op.tmp_inputs = new_tmp_inputs + + return ops_to_add + + def prepare_dynamic_tensor_for_correct_broadcasting_with_channels_first_tensors( + self, tensor: tflite_model.Tensor, output_rank: int + ) -> List[tflite_model.Operator]: + """Create Reshape and Transpose operators, to make sure the shape of the dynamic 'tensor' can be correctly + broadcasted with other TFLite channels last tensors. + The assumption is that the 'tensor' needs to be broadcasted with channels last tensors with a greater or + equal rank. And due to its smaller rank, the shapes will not line up. + The output tensor of the last returned operator is new, and must be set as a new input of the original + operator. + + :param tensor: Dynamic TFLite tensor, that needs to be broadcastable with channels last tensors, but the shape + doesn't line up, due to prior (possibly incorrect) conversion. + :param output_rank: The rank of the output tensor of the operator. + :return: A list of Reshape and Transpose operators, which need to be added to the model before 't_op'. + """ + input_rank = tensor.shape.len() + rank_diff = output_rank - input_rank + + if rank_diff < 0: + logger.e( + logger.Code.INTERNAL_ERROR, "'tensor' rank must be <= output_rank!" + ) + + if rank_diff == 0: + # The tensor is already broadcastable + return [] + + ops_to_add = [] + + # -- Add a Reshape operator to extend the rank -- + + extended_shape = [1] * rank_diff + tensor.shape.vector + transpose_input = self.duplicate_tensor(tensor) + transpose_input.shape = tflite_model.Shape(extended_shape) + + reshape = tflite_model.Operator( + builtin_options=reshape_options.Reshape(extended_shape) + ) + reshape.tmp_inputs = [tensor] + reshape.tmp_outputs = [transpose_input] + + ops_to_add.append(reshape) + + # Add Transpose operator + if tensor.tensor_format.is_channels_last(): + # The 'tensor' was incorrectly converted from channels first before. Revert it and then convert properly. + + revert_perm = translator.create_channels_last_to_channels_first_permutation( + input_rank + ) + + # The indices refer to dimensions according to the rank of the input. But the Reshape may have increased the + # rank by prepending 1s. Therefore, we need to increment these indices according to the rank difference, to + # still refer to the same dimensions from the right. + revert_perm += rank_diff + + # Prepend a partial identity, to keep leading dimensions unchanged. + revert_perm = list(range(rank_diff)) + list(revert_perm) + + # Now add a permutation to convert the extended ONNX shape to a TFLite shape + to_tflite_perm = ( + translator.create_channels_first_to_channels_last_permutation( + output_rank + ) + ) + + perm = translator.combine_permutations(revert_perm, to_tflite_perm) + + else: + # The 'tensor' was NOT incorrectly converted earlier. Just convert the extended shape to TFLite. + perm = translator.create_channels_first_to_channels_last_permutation( + output_rank + ) + + transpose_output = self.duplicate_tensor(transpose_input) + transpose_output.shape = tflite_model.Shape( + translator.apply_permutation_to(transpose_output.shape.vector, perm) + ) + transpose_output.tensor_format = TensorFormat.CHANNELS_LAST + + transpose = self._create_transpose_operator( + transpose_input, transpose_output, perm + ) + ops_to_add.append(transpose) + + return ops_to_add + + def prepare_static_tensor_for_correct_broadcasting_with_channels_first_tensors( + self, tensor: tflite_model.Tensor, output_rank: int + ) -> tflite_model.Tensor: + """Create a TFLite tensor based on the static 'tensor', so that it can be correctly broadcasted with channels + last tensors, and return it. + The assumption is that the 'tensor' needs to be broadcasted with channels last tensors with a greater or + equal rank. And due to its smaller rank, the shapes will not line up. + + :param tensor: Static TFLite tensor, that needs to be broadcastable with channels last tensors, but the shape + doesn't line up, due to prior incorrect conversion. + :param output_rank: The rank of the output tensor of the operator. + :return: A new static tensor, with a corrected shape for TFLite broadcasting. + """ + if not tensor_has_data(tensor): + logger.e( + logger.Code.INTERNAL_ERROR, + "ModelBuilder._reshape_static_tensor_to_be_broadcastable(): 'tensor' is not static!", + ) + + tensor = self.duplicate_tensor( + tensor + ) # Work with a clean copy, in case the tensor is also used elsewhere. + data = tensor.tmp_buffer.data + shape = tensor.shape.vector + + rank_diff = output_rank - len(shape) + if rank_diff < 0: + logger.e( + logger.Code.INTERNAL_ERROR, "'tensor' rank must be <= output_rank!" + ) + + if tensor.tensor_format.is_channels_last(): + # The tensor was incorrectly converted to channels last. Extend it with 1s and convert properly. + + original_shape = translator.dims_to_channels_first( + shape + ) # Same shape as in the ONNX model + + # Prepend 1s to the shape + extended_onnx_shape = [1] * rank_diff + original_shape + + # Convert the full shape to TFLite format + tflite_shape = translator.dims_to_channels_last(extended_onnx_shape) + tensor.shape = tflite_model.Shape(tflite_shape) + + # Statically transpose the data + data = translator.convert_data_to_channels_first( + data + ) # To the same shape as in the ONNX model + data = data.reshape(extended_onnx_shape) # Extend with leading 1s + tensor.tmp_buffer.data = translator.convert_data_to_channels_last( + data + ) # Convert to TFLite format + + assert tflite_shape == list(tensor.tmp_buffer.data.shape) + + else: + # The tensor is the same as in the ONNX model. + + extended_onnx_shape = [1] * rank_diff + shape + + # Convert the full shape to TFLite format + tflite_shape = translator.dims_to_channels_last(extended_onnx_shape) + tensor.shape = tflite_model.Shape(tflite_shape) + + # Statically transpose the data + data = data.reshape(extended_onnx_shape) # Extend with leading 1s + tensor.tmp_buffer.data = translator.convert_data_to_channels_last( + data + ) # Convert to TFLite format + + assert tflite_shape == list(tensor.tmp_buffer.data.shape) + + return tensor + + def operator_produces_graph_output(self, t_op: tflite_model.Operator) -> bool: + """Determine whether any output tensor of the operator 't_op' is also an output of the entire graph. + + :param t_op: TFLite operator to check, + :return: True, if at least 1 output of 't_op' is also an output of the graph. + """ + graph_outputs = self.get_sub_graph().outputs.tmp_outputs + return any(output_tensor in graph_outputs for output_tensor in t_op.tmp_outputs) + + """ ---------------- Functions to get an element of the TFLite model. ---------------- + If the element doesn't exist, it is created. So functions always return a valid object. """ + + def get_sub_graphs(self) -> tflite_model.SubGraphs: + if self._tfl_model.sub_graphs is None: + self._tfl_model.sub_graphs = tflite_model.SubGraphs() + + return self._tfl_model.sub_graphs + + def get_sub_graph(self) -> tflite_model.SubGraph: + sub_graphs = self.get_sub_graphs() + if sub_graphs.len() == 0: + sub_graphs.append(tflite_model.SubGraph()) + + return sub_graphs.get(0) + + def get_tensors(self) -> tflite_model.Tensors: + sub_graph = self.get_sub_graph() + if sub_graph.tensors is None: + sub_graph.tensors = tflite_model.Tensors() + + return sub_graph.tensors + + def get_buffers(self) -> tflite_model.Buffers: + if self._tfl_model.buffers is None: + self._tfl_model.buffers = tflite_model.Buffers() + + return self._tfl_model.buffers + + def get_operators(self) -> tflite_model.Operators: + sub_graph = self.get_sub_graph() + if sub_graph.operators is None: + sub_graph.operators = tflite_model.Operators() + + return sub_graph.operators + + def get_operator_codes(self) -> tflite_model.OperatorCodes: + if self._tfl_model.operator_codes is None: + self._tfl_model.operator_codes = tflite_model.OperatorCodes() + + return self._tfl_model.operator_codes diff --git a/backends/nxp/backend/ir/converter/builder/quantization_verification.py b/backends/nxp/backend/ir/converter/builder/quantization_verification.py new file mode 100755 index 00000000000..25989123385 --- /dev/null +++ b/backends/nxp/backend/ir/converter/builder/quantization_verification.py @@ -0,0 +1,377 @@ +# Copyright 2024 NXP +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +import abc + +import numpy as np +from executorch.backends.nxp.backend.ir import logger + +from executorch.backends.nxp.backend.ir.lib.tflite.BuiltinOperator import ( + BuiltinOperator, +) +from executorch.backends.nxp.backend.ir.lib.tflite.TensorType import TensorType +from executorch.backends.nxp.backend.ir.tflite_generator import tflite_model + + +class IOTensor(abc.ABC): + idx: int + + +class Input(IOTensor): + def __init__(self, idx): + self.idx = idx + + def __str__(self): + return f"Input(idx={self.idx})" + + +class OptionalInput(IOTensor): + def __init__(self, idx): + self.idx = idx + + def __str__(self): + return f"OptionalInput(idx={self.idx})" + + +class Output(IOTensor): + def __init__(self, idx): + self.idx = idx + + def __str__(self): + return f"Output(idx={self.idx})" + + +class QuantizationRule(abc.ABC): + + @abc.abstractmethod + def valid(self, op: tflite_model.Operator) -> bool: + pass + + @abc.abstractmethod + def __str__(self): + pass + + +class SharedParamsForType(QuantizationRule): + + def __init__(self, tensor_type: TensorType, *tensors: IOTensor): + self.tensor_type = tensor_type + self.tensors = tensors + + def valid(self, op: tflite_model.Operator) -> bool: + shared_tensors = [] + for tensor in self.tensors: + if isinstance(tensor, Input): + shared_tensors.append(op.tmp_inputs[tensor.idx]) + elif isinstance(tensor, OptionalInput): + if tensor.idx < len(op.tmp_inputs): + shared_tensors.append(op.tmp_inputs[tensor.idx]) + else: + return True + elif isinstance(tensor, Output): + shared_tensors.append(op.tmp_outputs[tensor.idx]) + else: + logger.e( + logger.Code.INTERNAL_ERROR, f"Unknown IOTensor type: {type(tensor)}" + ) + + if shared_tensors[0].type != self.tensor_type: + return True + + if all(tensor.quantization is None for tensor in shared_tensors): + return True + + first_quantization = shared_tensors[0].quantization + + # Check quantization values (scales & zero-points) + scales_same = all( + first_quantization.scale == t.quantization.scale for t in shared_tensors[1:] + ) + zp_same = all( + first_quantization.zero_point == t.quantization.zero_point + for t in shared_tensors[1:] + ) + return scales_same and zp_same + + def __str__(self): + return ( + f"Q-params match required for tensors: {', '.join(map(str, self.tensors))}" + ) + + +class ExactValueForType(QuantizationRule): + + def __init__( + self, + tensor_type: TensorType, + tensor: IOTensor, + scale: list[float], + zero_point: list, + ): + self.tensor = tensor + self.tensor_type = tensor_type + self.scale = scale + self.zero_point = zero_point + + def valid(self, op: tflite_model.Operator) -> bool: + if isinstance(self.tensor, Input): + tflite_tensor = op.tmp_inputs[self.tensor.idx] + elif isinstance(self.tensor, OptionalInput): + if self.tensor.idx < len(op.tmp_inputs): + tflite_tensor = op.tmp_outputs[self.tensor.idx] + else: + return True + elif isinstance(self.tensor, Output): + tflite_tensor = op.tmp_outputs[self.tensor.idx] + else: + logger.e( + logger.Code.INTERNAL_ERROR, + f"Unknown IOTensor type: {type(self.tensor)}", + ) + + if tflite_tensor.quantization is None or self.tensor_type != tflite_tensor.type: + return True + + scale = tflite_tensor.quantization.scale.vector + zp = tflite_tensor.quantization.zero_point.vector + + # noinspection PyTypeChecker + return np.allclose(scale, self.scale) and np.allclose(zp, self.zero_point) + + def __str__(self): + return f"ExactValue(scale={self.scale}, zero_point={self.zero_point}, type={self.tensor_type}, tensor={self.tensor})" + + +class FullyConnectedWeightZeroPoint(QuantizationRule): + """LiteRT documentation says that `FullyConnected` must have weight zero point = 0 + (https://ai.google.dev/edge/litert/models/quantization_spec) + If this condition is not satisfied, LiteRT will not raise any errors but the output will not be correct. + + However, if the `weights` are dynamic the kernels DO in fact support any zero point. Not just 0s. + """ + + def valid(self, op: tflite_model.Operator) -> bool: + weights = op.tmp_inputs[1] + if weights.quantization is None: + return True + + if weights.tmp_buffer is None or weights.tmp_buffer.data is None: + # The `weights` are dynamic. LiteRT supports any zero point in this case. + return True + + else: + # Static `weights`. + if weights.type == TensorType.INT8: + zero_point = 0 + elif weights.type == TensorType.UINT8: + zero_point = 128 + else: + return True + + return all(zp == zero_point for zp in weights.quantization.zero_point) + + def __str__(self): + return "FullyConnectedWeightZeroPoint()" + + +class ValidBiasValues(QuantizationRule): + + def valid(self, op: tflite_model.Operator) -> bool: + if len(op.tmp_inputs) < 3: + # Bias tensor not present -> ignore + return True + if (bias_quant := op.tmp_inputs[2].quantization) is None: + # Not quantized -> ignore + return True + + if (input_1_quant := op.tmp_inputs[0].quantization) is None: + logger.w( + "Bias tensor quantized but first input tensor not. This is not supported in TFLite." + ) + return False + if (input_2_quant := op.tmp_inputs[1].quantization) is None: + logger.w( + "Bias tensor quantized but weight tensor not. This is not supported in TFLite." + ) + return False + + if op.tmp_inputs[2].type != TensorType.INT32: + logger.w( + "Quantized bias tensor's type isn't INT32. This is not supported in TFLite." + ) + return False + + expected_bias_scale = np.array(input_1_quant.scale.vector) * np.array( + input_2_quant.scale.vector + ) + + if not np.allclose( + expected_bias_scale.astype(np.float32), + np.array(bias_quant.scale.vector, dtype=np.float32), + ): + logger.w( + f"Scale of quantized bias tensor '{op.tmp_inputs[2].name}' is not equal to 'input0_scale * " + "input1_scale[...]'. This is not supported in TFLite." + ) + return False + + if bias_quant.zero_point.vector[0] != 0: + logger.w( + "Zero point of quantized bias tensor is not equal to '0'. This is not supported in TFLite." + ) + return False + + return True + + def __str__(self): + return "ExactBiasValues()" + + +def verify_quantization_integrity(model: tflite_model.Model): + rules = { + BuiltinOperator.AVERAGE_POOL_2D: [ + SharedParamsForType(TensorType.INT8, Input(0), Output(0)), + SharedParamsForType(TensorType.UINT8, Input(0), Output(0)), + ], + BuiltinOperator.BROADCAST_TO: [ + SharedParamsForType(TensorType.INT8, Input(0), Output(0)), + SharedParamsForType(TensorType.UINT8, Input(0), Output(0)), + ], + BuiltinOperator.CONCATENATION: [ + SharedParamsForType(TensorType.INT8, Input(0), Output(0)), + SharedParamsForType(TensorType.INT8, Input(0), OptionalInput(1)), + SharedParamsForType(TensorType.INT8, Input(0), OptionalInput(2)), + SharedParamsForType(TensorType.INT8, Input(0), OptionalInput(3)), + SharedParamsForType(TensorType.INT8, Input(0), OptionalInput(4)), + ], + BuiltinOperator.CONV_2D: [ValidBiasValues()], + BuiltinOperator.DEPTHWISE_CONV_2D: [ValidBiasValues()], + BuiltinOperator.FULLY_CONNECTED: [ + ValidBiasValues(), + FullyConnectedWeightZeroPoint(), + ], + BuiltinOperator.GATHER: [ + SharedParamsForType(TensorType.INT8, Input(0), Output(0)), + SharedParamsForType(TensorType.UINT8, Input(0), Output(0)), + ], + BuiltinOperator.GATHER_ND: [ + SharedParamsForType(TensorType.INT8, Input(0), Output(0)), + SharedParamsForType(TensorType.UINT8, Input(0), Output(0)), + ], + BuiltinOperator.L2_NORMALIZATION: [ + ExactValueForType(TensorType.INT8, Output(0), [1.0 / 128.0], [0]), + ], + BuiltinOperator.LOG_SOFTMAX: [ + ExactValueForType(TensorType.INT8, Output(0), [16.0 / 256.0], [127]), + ExactValueForType(TensorType.UINT8, Output(0), [16.0 / 256.0], [255]), + ], + BuiltinOperator.LOGISTIC: [ + ExactValueForType(TensorType.INT8, Output(0), [1.0 / 256.0], [-128]), + ExactValueForType(TensorType.UINT8, Output(0), [1.0 / 256.0], [0]), + ], + BuiltinOperator.MAX_POOL_2D: [ + SharedParamsForType(TensorType.INT8, Input(0), Output(0)), + SharedParamsForType(TensorType.UINT8, Input(0), Output(0)), + ], + BuiltinOperator.MAXIMUM: [ + SharedParamsForType(TensorType.INT8, Input(0), Output(0)), + SharedParamsForType(TensorType.INT8, Input(0), Input(1)), + SharedParamsForType(TensorType.UINT8, Input(0), Output(0)), + SharedParamsForType(TensorType.UINT8, Input(0), Input(1)), + ], + BuiltinOperator.MINIMUM: [ + SharedParamsForType(TensorType.INT8, Input(0), Output(0)), + SharedParamsForType(TensorType.INT8, Input(0), Input(1)), + SharedParamsForType(TensorType.UINT8, Input(0), Output(0)), + SharedParamsForType(TensorType.UINT8, Input(0), Input(1)), + ], + BuiltinOperator.PAD: [ + SharedParamsForType(TensorType.INT8, Input(0), Output(0)), + SharedParamsForType(TensorType.UINT8, Input(0), Output(0)), + ], + BuiltinOperator.PADV2: [ + SharedParamsForType(TensorType.INT8, Input(0), Output(0)), + SharedParamsForType(TensorType.INT8, Input(0), OptionalInput(2)), + SharedParamsForType(TensorType.UINT8, Input(0), Output(0)), + SharedParamsForType(TensorType.UINT8, Input(0), OptionalInput(2)), + ], + BuiltinOperator.RESHAPE: [ + SharedParamsForType(TensorType.INT8, Input(0), Output(0)), + SharedParamsForType(TensorType.UINT8, Input(0), Output(0)), + ], + BuiltinOperator.RESIZE_BILINEAR: [ + SharedParamsForType(TensorType.INT8, Input(0), Output(0)), + SharedParamsForType(TensorType.UINT8, Input(0), Output(0)), + ], + BuiltinOperator.RESIZE_NEAREST_NEIGHBOR: [ + SharedParamsForType(TensorType.INT8, Input(0), Output(0)), + SharedParamsForType(TensorType.UINT8, Input(0), Output(0)), + ], + BuiltinOperator.SCATTER_ND: [ + SharedParamsForType(TensorType.INT8, Input(1), Output(0)), + SharedParamsForType(TensorType.UINT8, Input(1), Output(0)), + ], + BuiltinOperator.SELECT_V2: [ + SharedParamsForType(TensorType.INT8, Input(1), Output(0)), + SharedParamsForType(TensorType.INT8, Input(1), Input(2)), + SharedParamsForType(TensorType.UINT8, Input(1), Output(0)), + SharedParamsForType(TensorType.UINT8, Input(1), Input(2)), + ], + BuiltinOperator.SLICE: [ + SharedParamsForType(TensorType.INT8, Input(0), Output(0)), + SharedParamsForType(TensorType.UINT8, Input(0), Output(0)), + ], + BuiltinOperator.SOFTMAX: [ + ExactValueForType(TensorType.INT8, Output(0), [1.0 / 256.0], [-128]), + ExactValueForType(TensorType.UINT8, Output(0), [1.0 / 256.0], [0]), + ], + BuiltinOperator.SQUEEZE: [ + SharedParamsForType(TensorType.INT8, Input(0), Output(0)), + SharedParamsForType(TensorType.UINT8, Input(0), Output(0)), + ], + BuiltinOperator.TANH: [ + ExactValueForType(TensorType.INT8, Output(0), [1.0 / 128.0], [0]), + ], + BuiltinOperator.TILE: [ + SharedParamsForType(TensorType.INT8, Input(0), Output(0)), + SharedParamsForType(TensorType.UINT8, Input(0), Output(0)), + ], + BuiltinOperator.TRANSPOSE: [ + SharedParamsForType(TensorType.INT8, Input(0), Output(0)), + SharedParamsForType(TensorType.UINT8, Input(0), Output(0)), + ], + } + + ops: list[tflite_model.Operator] = model.sub_graphs.vector[0].operators.vector + operator_codes = { + idx: code.builtin_code for idx, code in enumerate(model.operator_codes.vector) + } + is_error = False + + for op in ops: + if op.builtin_options: + if op.builtin_options.operator_type in rules: + for rule in rules[op.builtin_options.operator_type]: + if not rule.valid(op): + logger.w( + f"TFLite operator with op_type='{op.builtin_options.operator_type}' wasn't quantized " + f"properly. Following TFLite quantization rule was not satisfied: '{rule}'." + ) + is_error = True + else: + if operator_codes[op.opcode_index] in rules: + for rule in rules[operator_codes[op.opcode_index]]: + if not rule.valid(op): + logger.w( + f"TFLite operator with op_type='{operator_codes[op.opcode_index]}' wasn't quantized " + f"properly. Following TFLite quantization rule was not satisfied: '{rule}'." + ) + is_error = True + + if is_error: + logger.e( + logger.Code.INTERNAL_ERROR, + "Some ops were not correctly quantized. Refer to previous log messages and please report this issue.", + ) diff --git a/backends/nxp/backend/ir/converter/conversion/__init__.py b/backends/nxp/backend/ir/converter/conversion/__init__.py new file mode 100755 index 00000000000..e69de29bb2d diff --git a/backends/nxp/backend/ir/converter/conversion/aten_translator.py b/backends/nxp/backend/ir/converter/conversion/aten_translator.py new file mode 100755 index 00000000000..075ad9abd57 --- /dev/null +++ b/backends/nxp/backend/ir/converter/conversion/aten_translator.py @@ -0,0 +1,71 @@ +# Copyright 2023 Martin Pavella +# Copyright 2024-2025 NXP +# +# License: MIT +# See the LICENSE_MIT for more details. +# +""" + translator + +Module contains functions for context-free conversion of various +things from Torch ATEN to TFLite. +""" + +from typing import Optional, Tuple + +import executorch.backends.nxp.backend.ir.lib.tflite.Padding as tflPadding +import executorch.backends.nxp.backend.ir.logger as logger + + +def torch_explicit_padding_to_tflite(torch_padding: list[int]) -> list[list[int]]: + """Convert the attribute or input 'pad' of the Torch 'Pad' operator to the 'paddings' input of the TFLite 'Pad' + class of operators. + + This function does NOT take tensor formats into consideration. + """ + return [[dim_padding, dim_padding] for dim_padding in torch_padding] + + +def torch_padding_to_tflite_explicit_padding( + torch_padding: list[int], +) -> list[list[int]]: + """Convert a Torch attribute 'padding' of operators such as Conv, MaxPool or AveragePool, to a list of ints which + is compatible with the TFLite 'Pad' operator. + """ + tflite_padding = torch_explicit_padding_to_tflite(torch_padding) + + # TFLite also allows padding to the 'batch' and 'channels'. Torch does not + tflite_padding.insert(0, [0, 0]) + tflite_padding.append([0, 0]) + + return tflite_padding + + +def convert_padding( + t_padding: list[int], +) -> Tuple[tflPadding.Padding, Optional[list[list[int]]]]: + """Convert Torch operator attributes 'pads' and 'auto_pad' to TFLite. + + :param t_padding: Torch operator attribute 'padding' + :return: A tuple. + The first element is the converted TFLite padding. + The second is None, if conversion is finished. Or it is a list of ints representing the explicit + padding in TFLite format (compatible with the 'Pad' operator), which needs to be provided by a + 'Pad' operator. Caller must add this operator using model_builder! + """ + + if t_padding == [0, 0]: + return tflPadding.Padding.VALID, None + else: + # 'padding' cannot be converted directly. Return 'VALID' and the required explicit padding and caller must + # implement conversion by adding a 'Pad' operator. + + logger.d( + "Explicit Torch 'padding' cannot be represented directly as 'VALID'. " + "Inserting an extra 'Pad' operator." + ) + + # Torch 'padding' uses different format than TFLite 'Pad' operator. Convert the explicit padding. + tflite_explicit_padding = torch_padding_to_tflite_explicit_padding(t_padding) + + return tflPadding.Padding.VALID, tflite_explicit_padding diff --git a/backends/nxp/backend/ir/converter/conversion/common.py b/backends/nxp/backend/ir/converter/conversion/common.py new file mode 100755 index 00000000000..d56893731f0 --- /dev/null +++ b/backends/nxp/backend/ir/converter/conversion/common.py @@ -0,0 +1,236 @@ +# +# Copyright 2023 Martin Pavella +# Copyright 2023-2024 NXP +# +# License: MIT +# See the LICENSE_MIT for more details. +# +""" + common + +This file contains functions shared by the various files in the +'conversion/builtin/' directory. +""" + +from typing import Any, List, MutableSequence, Optional + +import executorch.backends.nxp.backend.ir.logger as logger +from executorch.backends.nxp.backend.ir.tflite_generator import tflite_model +from executorch.backends.nxp.backend.ir.tflite_generator.builtin_options import ( + average_pool_2d_options, + conv_2d_options, + max_pool_2d_options, + transpose_conv_options, +) + + +def exactly_one_is_none(obj1: Optional, obj2: Optional) -> bool: + """Determine if exactly 1 of the arguments is None, or not.""" + return (obj1 is None and obj2 is not None) or (obj1 is not None and obj2 is None) + + +def contains_duplicates(list_to_check: List[Any]) -> bool: + """Determine if given list has duplicate elements or not.""" + return len(list_to_check) != len(set(list_to_check)) + + +def clamp(val: int, start: int, end: int) -> int: + """Clamp an int value between start and end (inclusive) and return it.""" + if val < start: + return start + + elif val > end: + return end + + return val + + +def try_get_input(t_op: tflite_model.Operator, idx: int) -> tflite_model.Tensor | None: + """Return the input tensors of 't_op' at index 'idx', or None if the operator doesn't have that input. + + This function should ALWAYS be used to get optional input tensors. + + :param t_op: TFLite operator to get the input tensor from. + :param idx: Index of the input tensor to get. + :return: The input tensor at index 'idx', or None. + """ + + if len(t_op.tmp_inputs) < idx + 1: + # The operator doesn't have that many inputs. + return None + + tensor = t_op.tmp_inputs[idx] + + if tensor.name == "": + # ONNX allows the name "" for optional tensors. It indicates that the tensor should be ignored, and a default + # value should be used. Just like if the tensor was omitted altogether. + return None + + return tensor + + +def extend_1d_pads_to_2d(onnx_1d_pads: MutableSequence): + """Extend the onnx 'pads' operator attribute that represents padding for a 1D kernel to 2D, by adding '0's.""" + if onnx_1d_pads is not None: + onnx_1d_pads.insert(1, 0) + onnx_1d_pads.append(0) + + +def extend_1d_strides_to_2d(onnx_1d_strides: MutableSequence): + """Extend the onnx 'strides' operator attribute that represents strides for a 1D kernel to 2D, by adding '1'.""" + if onnx_1d_strides is not None: + onnx_1d_strides.append(1) + + +def extend_1d_dilations_to_2d(onnx_1d_dilations: MutableSequence): + """Extend the onnx 'dilations' operator attribute that represents dilations for a 1D kernel to 2D, by adding '1'.""" + if onnx_1d_dilations is not None: + onnx_1d_dilations.append(1) + + +def extend_1d_kernel_shape_to_2d(onnx_1d_kernel_shape: MutableSequence): + """Extend the onnx 1D 'kernel_shape' operator attribute to 2D, by adding '1'.""" + if onnx_1d_kernel_shape is not None: + onnx_1d_kernel_shape.append(1) + + +StridedOptions = ( + average_pool_2d_options.AveragePool2D + | conv_2d_options.Conv2D + | max_pool_2d_options.MaxPool2D + | transpose_conv_options.TransposeConv +) + + +def assign_2d_strides(options: StridedOptions, strides: Optional[List[int]]): + """Assign to 'obj' the attributes 'stride_h' and 'stride_w' from 'strides'. + If 'strides' is None, assign 1s. + + :param options: TFLite AveragePool2D, Conv2D, MaxPool2D or TransposeConv options object. + :param strides: An optional list of ONNX strides attribute. + """ + + if strides is None: + # Default values are [1, 1] + options.stride_h = 1 + options.stride_w = 1 + + elif len(strides) == 2: + options.stride_h = strides[0] + options.stride_w = strides[1] + + else: + logger.e( + logger.Code.INVALID_ONNX_OPERATOR_ATTRIBUTE, + f"ONNX operator has invalid 'strides' attribute! ('{strides}')", + ) + + +def assign_2d_dilations(conv_2d: conv_2d_options, dilations: Optional[List[int]]): + """Assign the 'conv_2d' attributes 'dilations_h' and 'dilations_2' from 'dilations'.""" + + if dilations is None: + return + + if len(dilations) == 2: + conv_2d.dilation_h_factor = dilations[0] + conv_2d.dilation_w_factor = dilations[1] + else: + logger.d(f"Expected 2D dilations, got '{dilations}'. Leaving default values.") + + +def uses_shape_broadcasting(t_op: tflite_model.Operator) -> bool: + """Determine if given TFLite operator uses shape broadcasting for it's input tensors or not. + + :param t_op: TFLite operator with 'tmp_inputs' initialized. + :return: True, if the operator uses shape broadcasting for it's input tensors. + False otherwise. + """ + + if t_op.tmp_inputs is None: + logger.e( + logger.Code.INTERNAL_ERROR, + "common.uses_shape_broadcasting(): 'tmp_inputs' are None!", + ) + + if len(t_op.tmp_inputs) == 0: + logger.e( + logger.Code.INTERNAL_ERROR, + "common.uses_shape_broadcasting(): Operator has no inputs!", + ) + + first_input_shape = t_op.tmp_inputs[0].shape + + return any( + input_tensor.shape != first_input_shape for input_tensor in t_op.tmp_inputs[1:] + ) + + +def uses_multiple_input_types(t_op: tflite_model.Operator) -> bool: + """Determine if the input tensors of given TFLite operator use different data types or not. + + :param t_op: TFLite operator with 'tmp_inputs' initialized. + :return: True, if any two input tensors have a different data type. + False, if all input tensors use the same data type. + """ + + if t_op.tmp_inputs is None: + logger.e( + logger.Code.INTERNAL_ERROR, + "common.uses_multiple_input_types(): 'tmp_inputs' are None!", + ) + + if len(t_op.tmp_inputs) == 0: + logger.e( + logger.Code.INTERNAL_ERROR, + "common.uses_multiple_input_types(): Operator has no inputs!", + ) + + first_input_type = t_op.tmp_inputs[0].type + return any( + input_tensor.type != first_input_type for input_tensor in t_op.tmp_inputs[1:] + ) + + +class OpsList: + """ + Holder of TFLite operator (middle_op) that can be prefixed (pre_ops) of suffixed (post_ops) + by other operators. When flattened, order of the operators is preserved. + """ + + pre_ops: List[tflite_model.Operator] + middle_op: tflite_model.Operator + post_ops: List[tflite_model.Operator] + + def __init__( + self, + pre_ops: List[tflite_model.Operator] | None = None, + middle_op=None, + post_ops: List[tflite_model.Operator] | None = None, + ): + self.pre_ops = pre_ops or [] + self.middle_op = middle_op + self.post_ops = post_ops or [] + + def flatten(self): + return self.pre_ops + [self.middle_op] + self.post_ops + + def add_pre(self, ops: tflite_model.Operator | list[tflite_model.Operator]): + if isinstance(ops, tflite_model.Operator): + ops = [ops] + + logger.internal_assert( + isinstance(ops, list), "OpsList: add_pre() called with invalid value." + ) + + self.pre_ops.extend(ops) + + def add_post(self, ops: tflite_model.Operator | list[tflite_model.Operator]): + if isinstance(ops, tflite_model.Operator): + ops = [ops] + + logger.internal_assert( + isinstance(ops, list), "OpsList: add_post() called with invalid value." + ) + + self.post_ops.extend(ops) diff --git a/backends/nxp/backend/ir/converter/conversion/translator.py b/backends/nxp/backend/ir/converter/conversion/translator.py new file mode 100755 index 00000000000..4f327c6ac80 --- /dev/null +++ b/backends/nxp/backend/ir/converter/conversion/translator.py @@ -0,0 +1,961 @@ +# +# Copyright 2023 Martin Pavella +# Copyright 2023-2024 NXP +# +# License: MIT +# See the LICENSE_MIT for more details. +# +""" + translator + +Module contains functions for context-free conversion of various +things from ONNX to TFLite. +""" + +from typing import Any, Collection, List, Optional, Sequence, Tuple + +import executorch.backends.nxp.backend.ir.lib.tflite.Padding as tflPadding +import executorch.backends.nxp.backend.ir.logger as logger +import executorch.backends.nxp.backend.ir.tflite_generator.tflite_model as tflite_model + +import numpy as np +import torch +from executorch.backends.nxp.backend.ir.lib.tflite.TensorType import TensorType +from executorch.backends.nxp.backend.ir.tensor_formatting import TensorFormat +from executorch.backends.nxp.backend.ir.tflite_generator.meta.types import ( + TensorFlowDataType, +) + + +def permute_static_tensor(tensor: tflite_model.Tensor, perm: list[int]): + """Take a static TFLite tensor and permute its shape and data according to the permutation in 'perm'. + + :param tensor: Static TFLite tensor to permute. + :param perm: Permutation to apply to the tensor. + """ + + logger.internal_assert( + tensor.tmp_buffer is not None, "permute_static_tensor: tensor is not static." + ) + + data = tensor.tmp_buffer.data + data = np.transpose(data, perm) + + shape = apply_permutation_to(tensor.shape.vector.copy(), perm) + logger.internal_assert( + shape == list(data.shape), "permute_static_tensor: shapes do not match." + ) + + tensor.tmp_buffer.data = data + tensor.shape = tflite_model.Shape(shape) + + +def get_tflite_tensor_shape_with_explicit_padding( + tflite_shape: List[int], explicit_padding: List[List[int]] +) -> List[int]: + """Get the resulting shape of a tensor with shape 'tflite_shape' (in TFLite format), after 'explicit_padding' is + applied to it. + """ + + if (len(tflite_shape) != len(explicit_padding)) or any( + len(sub_list) != 2 for sub_list in explicit_padding + ): + logger.e( + logger.Code.INTERNAL_ERROR, + f"Cannot apply padding '{explicit_padding}' to TFLite shape '{tflite_shape}'!", + ) + + total_padding = [ + start + end for start, end in explicit_padding + ] # Total padding for each dimension + + padded_shape = [] + for dimension, padding in zip(tflite_shape, total_padding): + if isinstance(dimension, int) and dimension > 0: + padded_shape.append(dimension + padding) + + else: + # Variable shape + + if padding == 0: + padded_shape.append(dimension) + + else: + # Cannot add padding to a variable dimension. + logger.e( + logger.Code.CONVERSION_IMPOSSIBLE, + "Adding explicit padding to a variable sized tensor is not supported!", + ) + + return padded_shape + + +def convert_tensor_format_to_tflite(tensor_format: TensorFormat) -> TensorFormat: + """Convert the format of a tensor from ONNX to TFLite. + :return: The tensor_format converted to TFLite. + """ + if tensor_format is TensorFormat.CHANNELS_FIRST: + return TensorFormat.CHANNELS_LAST + + elif tensor_format not in (TensorFormat.FORMATLESS, TensorFormat.NONE): + logger.d( + f"translator.convert_tensor_format(): Got unexpected format '{tensor_format}'." + ) + + return tensor_format + + +def dims_to_channels_first(channels_last_dimensions: List[int]) -> List[int]: + """Convert a list of ints which represent dimensions in the channels last (TFLite) format to the channels first + (ONNX) format. + """ + assert len(channels_last_dimensions) > 0, "Dimensions list is empty!" + + if len(channels_last_dimensions) == 1: + return [0] + + res = list(channels_last_dimensions) + + res.insert(1, res.pop()) # Insert 'C' (last item) to index 1 + + return res + + +def dims_to_channels_last(channels_first_dimensions: List[int]) -> List[int]: + """Convert a list of ints which represent dimensions in the channels first (ONNX) format to the channels last + (TFLite) format. + """ + assert len(channels_first_dimensions) > 0, "Dimensions list is empty!" + + if len(channels_first_dimensions) == 1: + return [0] + + res = list(channels_first_dimensions) + + res.append(res.pop(1)) # Move 'C' (idx 1) to the end + + return res + + +def collections_equal(col_a, col_b): + """Compare each individual element of both collections. + They can be any combination of lists, tuples or numpy arrays. + Return True if they are equal. + """ + if len(col_a) != len(col_b): + return False + + for a, b in zip(col_a, col_b): + if a != b: + return False + return True + + +def _calculate_effective_kernel_shape( + kernel_shape: List[int], dilations: Optional[List[int]] +) -> List[int]: + """Calculate the reach of a kernel with respect to its shape and dilations. + For example a [3, 3] kernel with dilations [2, 2] has effective shape of [5, 5]. + """ + + if dilations is None: + dilations = [1] * len(kernel_shape) + + return [(k - 1) * d + 1 for k, d in zip(kernel_shape, dilations)] + + +def _same_upper_equals_same_lower( + tflite_input_shape: List[int], + tflite_output_shape: List[int], + o_kernel_shape: List[int], + o_strides: Optional[List[int]] = None, + o_dilations: Optional[List[int]] = None, +) -> bool: + """Determine if in a given particular setting, the values of the ONNX `auto_pads` attribute SAME_UPPER and + SAME_LOWER represent the exact same padding. + """ + + padding, offset = tflite_compute_padding_with_offset( + tflite_input_shape, o_kernel_shape, tflite_output_shape, o_strides, o_dilations + ) + + # Only if offset for every dimension is 0, SAME_UPPER and SAME_LOWER will behave equally. + return all(elt == 0 for elt in offset) + + +def _tflite_padding_compute_output_size( + padding: tflPadding.Padding, + tflite_spatial_input_shape: List[int], + tflite_kernel_shape: List[int], + strides: Optional[List[int]] = None, + dilations: Optional[List[int]] = None, +) -> List[int]: + """ + Calculates the output shape of the tensor with particular setting as tflite would. Implementation corresponds to + tensorflow/lite/kernels/padding.h:ComputeOutSize() + :param padding: TFLite Padding value - 'Same' or 'Valid' + :param tflite_spatial_input_shape: input tensor shape + :param tflite_kernel_shape: convolution kernel shape + :param strides: strides (default is 1) + :param dilations: dilation (default is 1) + :return: Output shape of the tensor with particular padding settings + """ + if strides is None: + strides = [1] * len(tflite_kernel_shape) + + effective_kernel_shape = _calculate_effective_kernel_shape( + tflite_kernel_shape, dilations + ) + + if padding == tflPadding.Padding.SAME: + return [ + (in_shape + stride - 1) // stride + for in_shape, stride in zip(tflite_spatial_input_shape, strides) + ] + elif padding == tflPadding.Padding.VALID: + return [ + (in_shape + stride - ef_kernel_shape) // stride + for in_shape, stride, ef_kernel_shape in zip( + tflite_spatial_input_shape, strides, effective_kernel_shape + ) + ] + + +def tflite_compute_padding_with_offset( + tflite_input_shape: List[int], + tflite_kernel_shape: List[int], + tflite_output_shape: List[int], + strides: Optional[List[int]] = None, + dilations: Optional[List[int]] = None, +) -> (List[int], List[int]): + """ + Calculate padding and offset for each dimension for particular convolution setting as TFLite. + Implementation corresponds to tensorflow/lite/kernels/padding.h:ComputePaddingWithOffset() + :param tflite_input_shape: tensorflow lite input shape + :param tflite_kernel_shape: tensorflow lite kernel shape + :param tflite_output_shape: tensorflow lite output shape + :param strides: stride setting, default is 1 + :param dilations: dilation setting, default is 1 + :return: (padding, offset) - padding and offset for each axis. Padding is added on beginning and end of the axis. + Offset to be optionally added to end of the axis if odd. + """ + if strides is None: + strides = [1] * len(tflite_kernel_shape) + + spatial_input_shape = tflite_input_shape[1:-1] # The spatial portion of the input + spatial_output_shape = tflite_output_shape[ + 1:-1 + ] # The spatial portion of the output + + effective_kernel_shape = _calculate_effective_kernel_shape( + tflite_kernel_shape, dilations + ) + + total_padding = [ + (spatial_output - 1) * stride + effective_kernel - spatial_input + for spatial_output, stride, effective_kernel, spatial_input in zip( + spatial_output_shape, strides, effective_kernel_shape, spatial_input_shape + ) + ] + + padding = [tp // 2 for tp in total_padding] + offset = [tp % 2 for tp in total_padding] + + return padding, offset + + +def _is_same_padding( + o_pads: List[int], + tflite_input_shape: List[int], + tflite_output_shape: List[int], + o_kernel_shape: List[int], + o_strides: Optional[List[int]] = None, + o_dilations: Optional[List[int]] = None, +) -> bool: + """Determine if given ONNX 'pads' padding can be represented exactly with the TFLite 'SAME' padding type. + + :param o_pads: ONNX 'pads' attribute. + :param tflite_input_shape: The shape of the main input of the operator in TFLite format. + :param tflite_output_shape: The shape of the main output of the operator in TFLite format. + :param o_kernel_shape: ONNX 'kernel_shape' attribute. + :param o_strides: ONNX 'strides' attribute. Can be omitted. + :param o_dilations: ONNX 'dilations' attribute. Can be omitted. + """ + + if len(tflite_input_shape) == 0 or len(tflite_output_shape) == 0: + logger.e( + logger.Code.INVALID_TENSOR_SHAPE, + f"Cannot verify that padding '{o_pads}' can be represented as 'SAME' for input shape " + f"'{tflite_input_shape}' and output shape '{tflite_output_shape}'.", + ) + + # Calculate if the output shape corresponds to Same padding setting in TFLite + tflite_spatial_input_shape = tflite_input_shape[1:-1] + tmp_spatial_output_shape = _tflite_padding_compute_output_size( + tflPadding.Padding.SAME, + tflite_spatial_input_shape, + o_kernel_shape, + o_strides, + o_dilations, + ) + if tmp_spatial_output_shape != tflite_output_shape[1:-1]: + return False + + # For every dimension, the padding is added to the start and end of the dimension. + # TFLite padding 'SAME' tries to split it evenly, but in case of odd padding, 'SAME' adds the excess 1 at the end. + # TFLite represents this in the offset. The offset is added to the end of particular dimension, + # i.e. bottom for H dim, right for W dim and so on. + # ONNX represents this in 'pads' as [x1_begin, x2_begin,... , x1_end, x2_end,...]. + padding, offset = tflite_compute_padding_with_offset( + tflite_input_shape, o_kernel_shape, tflite_output_shape, o_strides, o_dilations + ) + start_padding = padding + end_padding = [p + o for p, o in zip(padding, offset)] + effective_padding = start_padding + end_padding + + if effective_padding != o_pads: + return False + + return True + + +def permutations_are_inverse( + permutation1: Sequence[int], permutation2: Sequence[int] +) -> bool: + """Determine if given Transpose permutations are inverse of each other. + i.e. when applied back to back, there will be no effect. + + Example: + 0 3 1 2 + 0 2 3 1 + """ + + if len(permutation1) != len(permutation2): + logger.e( + logger.Code.INTERNAL_ERROR, + "translator.permutations_are_inverse(): permutations have different size!", + ) + + for i, perm2 in enumerate(permutation2): + if i != permutation1[perm2]: + return False + + return True + + +def combine_permutations( + permutation1: Sequence[int], permutation2: Sequence[int] +) -> List[int]: + """Combine 2 permutations into 1. + + :param permutation1: The first permutation to apply. + :param permutation2: The second permutation to apply. + :return: The combined permutation. + """ + if len(permutation1) != len(permutation2): + logger.e( + logger.Code.INTERNAL_ERROR, + "translator.combine_permutations(): permutations have different size!", + ) + + return [permutation1[perm2] for perm2 in permutation2] + + +def nhc_dimensions_to_nhwc(nhc_dimensions: List[int]) -> List[int]: + """Convert a list of ints representing the shape of an NHC tensor to NHWC, where W = 1.""" + nhwc_dimensions = nhc_dimensions.copy() + nhwc_dimensions.insert(2, 1) + + return nhwc_dimensions + + +def shape_from_numpy(numpy_array): + """Return a 'Shape' object representing the shape of given 'numpy_array'.""" + dims = list(numpy_array.shape) + return tflite_model.Shape(dims) + + +def onnx_explicit_padding_to_tflite(onnx_pads: list[int]) -> list[list[int]]: + """Convert the attribute or input 'pads' of the ONNX 'Pad' operator to the 'paddings' input of the TFLite 'Pad' + class of operators. + + This function does NOT take tensor formats into consideration. + """ + + start_padding = onnx_pads[ + : len(onnx_pads) // 2 + ] # Padding at the start of each dimension + end_padding = onnx_pads[ + len(onnx_pads) // 2 : + ] # Padding at the end of each dimension + + return list(zip(start_padding, end_padding)) + + +def onnx_pads_to_tflite_explicit_padding(onnx_pads: List[int]) -> List[List[int]]: + """Convert an ONNX attribute 'pads' of operators such as Conv, MaxPool or AveragePool, to a list of ints which is + compatible with the TFLite 'Pad' operator. + """ + + tflite_padding = onnx_explicit_padding_to_tflite(onnx_pads) + + # TFLite also allows padding to the 'batch' and 'channels'. ONNX does not + tflite_padding.insert(0, [0, 0]) + tflite_padding.append([0, 0]) + + return tflite_padding + + +def _get_explicit_tflite_padding_for_same_lower( + tflite_input_shape: List[int], + tflite_output_shape: List[int], + o_kernel_shape: List[int], + o_strides: Optional[List[int]] = None, + o_dilations: Optional[List[int]] = None, +) -> List[List[int]]: + """Get the TFLite explicit padding required to represent ONNX 'SAME_LOWER' auto_pad for a particular setting. + + :param tflite_input_shape: TFLite (NHWC) shape of the input tensor of the operator. + :param tflite_output_shape: TFLite (NHWC) shape of the output tensor of the operator. + :param o_kernel_shape: ONNX 'kernel_shape' attribute. + :param o_strides: Optional ONNX 'o_strides' attribute. + :param o_dilations: Optional ONNX 'o_dilations' attribute. + + :return: A TFLite style explicit padding, compatible with the TFLite 'Pad' operator. + """ + + padding, offset = tflite_compute_padding_with_offset( + tflite_input_shape, o_kernel_shape, tflite_output_shape, o_strides, o_dilations + ) + + start_padding = [ + p + o for p, o in zip(padding, offset) + ] # In case of odd padding, the excess is added at the start + end_padding = padding + + onnx_explicit_padding = start_padding + end_padding + + # Return explicit ONNX padding converted to TFLite padding + return onnx_pads_to_tflite_explicit_padding(onnx_explicit_padding) + + +def convert_padding( + o_auto_pad: str, + o_pads: List[int], + tflite_input_shape: List[int], + tflite_output_shape: List[int], + o_kernel_shape: List[int], + o_strides: Optional[List[int]], + o_dilations: Optional[List[int]] = None, +) -> Tuple[tflPadding.Padding, Optional[List[List[int]]]]: + """Convert ONNX operator attributes 'pads' and 'auto_pad' to TFLite. + + :param o_auto_pad: ONNX operator attribute 'auto_pad' + :param o_pads: ONNX operator attribute 'pads' + :param tflite_input_shape: The shape of the main input tensor in the TFLite format. + :param tflite_output_shape: The shape of the main output tensor in the TFLite format. + :param o_kernel_shape: ONNX operator attribute 'kernel_shape' + :param o_strides: ONNX operator attribute 'strides' + :param o_dilations: ONNX operator attribute 'dilations' + + :return: A tuple. + The first element is the converted TFLite padding. + The second is None, if conversion is finished. Or it is a list of ints representing the explicit + padding in TFLite format (compatible with the 'Pad' operator), which needs to be provided by a + 'Pad' operator. Caller must add this operator using model_builder! + """ + + if o_auto_pad == "SAME_UPPER": + return tflPadding.Padding.SAME, None + + elif o_auto_pad == "SAME_LOWER": + if _same_upper_equals_same_lower( + tflite_input_shape, + tflite_output_shape, + o_kernel_shape, + o_strides, + o_dilations, + ): + return tflPadding.Padding.SAME, None + + else: + logger.d( + "'SAME_LOWER' auto_pad cannot be exactly represented in TFLite as padding 'SAME' or 'VALID'. " + "Inserting an extra 'Pad' operator." + ) + tflite_explicit_padding = _get_explicit_tflite_padding_for_same_lower( + tflite_input_shape, + tflite_output_shape, + o_kernel_shape, + o_strides, + o_dilations, + ) + return tflPadding.Padding.VALID, tflite_explicit_padding + + elif o_auto_pad == "VALID": + return tflPadding.Padding.VALID, None + + # auto_pad is NOTSET -> use explicit padding + elif o_pads is None or all(val == 0 for val in o_pads): + # No padding in any direction + return tflPadding.Padding.VALID, None + + elif _is_same_padding( + o_pads, + tflite_input_shape, + tflite_output_shape, + o_kernel_shape, + o_strides, + o_dilations, + ): + # Explicit padding can be represented with TFLite 'SAME' padding. + return tflPadding.Padding.SAME, None + + else: + # 'pads' cannot be converted directly. Return 'VALID' and the required explicit padding and caller must + # implement conversion by adding a 'Pad' operator. + + logger.d( + "Explicit ONNX 'pads' cannot be represented directly as 'SAME' or 'VALID'. " + "Inserting an extra 'Pad' operator." + ) + + # ONNX 'pads' uses different format than TFLite 'Pad' operator. Convert the explicit padding. + tflite_explicit_padding = onnx_pads_to_tflite_explicit_padding(o_pads) + + return tflPadding.Padding.VALID, tflite_explicit_padding + + +def convert_data_to_channels_first(array: np.ndarray) -> np.ndarray: + """Convert a numpy array representing the data of a tensor from the channels last format (TFLite), to channels + first format (ONNX). + + :param array: Numpy array holding the tensor's data. + :return: The transformed data. + """ + if len(array.shape) < 3: + logger.e( + logger.Code.INTERNAL_ERROR, + f"translator.convert_data_to_channels_first(): 'array' only has '{len(array.shape)}' dimensions!", + ) + + return np.moveaxis(array, -1, 1) # Move last axis (C), to index 1 + + +def convert_data_to_channels_last(array: np.ndarray) -> np.ndarray: + """Convert a numpy array representing the data of a tensor from the channels first format (ONNX), to channels last + format (TFLite). + + :param array: Numpy array holding the tensor's data. + :return: The transformed data. + """ + if len(array.shape) < 3: + logger.e( + logger.Code.INTERNAL_ERROR, + f"translator.convert_data_to_channels_last(): 'array' only has '{len(array.shape)}' dimensions!", + ) + + return np.moveaxis(array, 1, -1) # Move the second axis (C), to the end + + +def channels_first_shape_to_channels_last( + channels_first_shape: tflite_model.Shape, +) -> tflite_model.Shape: + """Create a channels last version of a channels first 'tflite_model.Shape' object.""" + + dims = channels_first_shape.vector.copy() + dims = dims_to_channels_last(dims) + + return tflite_model.Shape(dims) + + +def channels_last_shape_to_channels_first( + nhwc_shape: tflite_model.Shape, +) -> tflite_model.Shape: + """Create a channels first version of a channels last 'tflite_model.Shape' object.""" + + dims = nhwc_shape.vector.copy() + dims = dims_to_channels_first(dims) + + return tflite_model.Shape(dims) + + +def convert_onnx_dimensions_to_tflite_shape(o_dims: List[int]) -> tflite_model.Shape: + """Convert list of ints representing the shape of an ONNX channels first Tensor to a TFLite 'Shape' object.""" + + dims = list(o_dims) # Copy just in case + + dims = dims_to_channels_last(dims) + + return tflite_model.Shape(dims) + + +def create_channels_last_to_channels_first_permutation( + rank: int, return_list: bool = False +) -> np.ndarray | list[int]: + """Return a numpy array with data that describes the permutation, which would change a tensor from the channels + last (TFLite) format to the channels first (ONNX) format. + + This permutation is compatible with the TFLite `Transpose` operator. + + :param rank: The rank of the required permutation. + :param return_list: If True, the function returns a list of ints. If False, a numpy array is returned. + :return: A numpy array, or a list of ints, representing the desired permutation. + """ + + perm = dims_to_channels_first(list(range(rank))) + + if return_list: + return perm + else: + return np.asarray(perm, np.int32) + + +def create_channels_first_to_channels_last_permutation( + rank: int, return_list: bool = False +) -> np.ndarray | list[int]: + """Return a numpy array with data that describes the permutation, which would change a tensor from the channels + first (ONNX) format to the channels last (TFLite) format. + + This permutation is compatible with the TFLite `Transpose` operator. + + :param rank: The rank of the required permutation. + :param return_list: If True, the function returns a list of ints. If False, a numpy array is returned. + :return: A numpy array, or a list of ints, representing the desired permutation. + """ + + perm = dims_to_channels_last(list(range(rank))) + + if return_list: + return perm + else: + return np.asarray(perm, np.int32) + + +def create_axis_to_last_perm(axis, num_dims): + """Create a numpy array representing the transpose permutations needed, to + make the 'axis' dimension, the last dimension. + """ + + dims = list(range(num_dims)) + + if axis == num_dims - 1: + return dims + elif axis >= num_dims or axis < 0: + logger.e( + logger.Code.INTERNAL_ERROR, + f"translator.create_axis_to_last_perm({axis},{num_dims}). Inputs don't make sense!", + ) + + # Remember axis dimension + axis_dim = dims[axis] + + # Move dimensions after 'axis' to the left + dims[axis:-1] = dims[axis + 1 : -1] + + # Add axis dimension to the end + dims.append(axis_dim) + + return np.asarray(dims, np.int32) + + +def apply_permutation_to(target: List[Any], permutation: Collection[int]) -> List: + """Permute a list according to a permutation. Uses the same permutation format as the TFLite Transpose operator. + + :param target: A list of any types, to permute. Must be same size as the permutation. + :param permutation: The permutation to apply to the target. + :return: Permuted list. + """ + + if len(target) != len(permutation): + logger.e( + logger.Code.INTERNAL_ERROR, + "translator.apply_permutation_to(): 'target' and 'permutation' have different length!", + ) + + return [target[perm] for perm in permutation] + + +def create_inverse_permutation(permutation: List[int]) -> List[int]: + """Create and return a permutation, that is the inverse of the given 'permutation' parameter. + Uses the same permutation format as the TFLite Transpose operator. + + :param permutation: The permutation to create the inverse of. + :return: Inverse permutation. + """ + + if set(permutation) != set(range(len(permutation))): + # Irreversible permutation. For example [0, 1, 2, 2] (information is lost by applying permutation). + logger.e( + logger.Code.INTERNAL_ERROR, + "translator.create_inverse_permutation(): permutation is not reversible!", + ) + + return [permutation.index(perm) for perm in range(len(permutation))] + + +def get_max_value_for_type(dtype: np.dtype) -> any: + """Return the maximum possible value for given numpy type.""" + if dtype.kind in ("i", "u"): + return np.iinfo(dtype).max + + elif dtype.kind == "f": + return np.finfo(dtype).max + + else: + logger.e( + logger.Code.INTERNAL_ERROR, + f"translator.get_max_value_for_type(): unexpected type {dtype.name}.", + ) + + +def get_min_value_for_type(dtype: np.dtype) -> any: + """Return the minimum possible value for given numpy type.""" + if dtype.kind in ("i", "u"): + return np.iinfo(dtype).min + + elif dtype.kind == "f": + return np.finfo(dtype).min + + else: + logger.e( + logger.Code.INTERNAL_ERROR, + f"translator.get_min_value_for_type(): unexpected type {dtype.name}.", + ) + + +def convert_data_type(torch_type: torch.TensorType) -> TensorType: + """Convert Torch DataType to TFLite TensorType""" + + if torch_type == torch.float32: + return TensorType.FLOAT32 + + elif torch_type == torch.uint8: + return TensorType.UINT8 + + elif torch_type == torch.int8: + return TensorType.INT8 + + elif torch_type == torch.int32: + return TensorType.INT32 + + elif torch_type == torch.int64: + return TensorType.INT64 + + elif torch_type == torch.bool: + return TensorType.BOOL + + else: + logger.e( + logger.Code.NOT_IMPLEMENTED, + f"Conversion of Torch type '{torch_type}' not supported.", + ) + + +def torch_type_to_numpy_type(torch_type: torch.TensorType) -> np.ScalarType: + """Convert Torch DataType to TFLite TensorType""" + + if torch_type == torch.float32: + return np.dtype(np.float32) + + elif torch_type == torch.uint8: + return np.dtype(np.uint8) + + elif torch_type == torch.int8: + return np.dtype(np.int8) + + elif torch_type == torch.int32: + return np.dtype(np.int32) + + elif torch_type == torch.int64: + return np.dtype(np.int64) + + else: + logger.e( + logger.Code.NOT_IMPLEMENTED, + f"Conversion of Torch type '{torch_type}' not supported.", + ) + + +def numpy_type_to_tf_lite(numpy_type: np.dtype) -> TensorType: # noqa C901 + """Convert the numpy data type to a corresponding TFLite 'TensorType'. + + :param numpy_type: Numpy dtype to convert. + :return: Corresponding TFLite TensorType. + """ + numpy_type = numpy_type.type + + if numpy_type == np.float32: + return TensorType.FLOAT32 + + elif numpy_type == np.uint8: + return TensorType.UINT8 + + elif numpy_type == np.int8: + return TensorType.INT8 + + elif numpy_type == np.uint16: + return TensorType.UINT16 + + elif numpy_type == np.int16: + return TensorType.INT16 + + elif numpy_type == np.int32: + return TensorType.INT32 + + elif numpy_type == np.int64: + return TensorType.INT64 + + elif numpy_type == np.string_: + return TensorType.STRING + + elif numpy_type == np.bool_: + return TensorType.BOOL + + elif numpy_type == np.float16: + return TensorType.FLOAT16 + + elif numpy_type == np.float64: + return TensorType.FLOAT64 + elif numpy_type == np.double: + return TensorType.FLOAT64 + + elif numpy_type == np.uint32: + return TensorType.UINT32 + + elif numpy_type == np.uint64: + return TensorType.UINT64 + + elif numpy_type == np.complex64: + return TensorType.COMPLEX64 + + elif numpy_type == np.complex128: + return TensorType.COMPLEX128 + + else: + logger.e( + logger.Code.CONVERSION_IMPOSSIBLE, + f"Cannot convert numpy data type '{numpy_type}' to TFLite.", + ) + + +def tf_lite_type_to_numpy(tfl_type: TensorType) -> np.ScalarType: # noqa C901 + """Convert TFLite TensorType to numpy dtype""" + + if tfl_type == TensorType.FLOAT32: + return np.dtype(np.float32) + + elif tfl_type == TensorType.UINT8: + return np.dtype(np.uint8) + + elif tfl_type == TensorType.INT8: + return np.dtype(np.int8) + + elif tfl_type == TensorType.UINT16: + return np.dtype(np.uint16) + + elif tfl_type == TensorType.INT16: + return np.dtype(np.int16) + + elif tfl_type == TensorType.INT32: + return np.dtype(np.int32) + + elif tfl_type == TensorType.INT64: + return np.dtype(np.int64) + + elif tfl_type == TensorType.STRING: + return np.dtype(np.string_) + + elif tfl_type == TensorType.BOOL: + return np.dtype(np.bool_) + + elif tfl_type == TensorType.FLOAT16: + return np.dtype(np.float16) + + elif tfl_type == TensorType.FLOAT64: + return np.dtype(np.float64) + + elif tfl_type == TensorType.UINT32: + return np.dtype(np.uint32) + + elif tfl_type == TensorType.UINT64: + return np.dtype(np.uint64) + + elif tfl_type == TensorType.COMPLEX64: + return np.dtype(np.complex64) + + elif tfl_type == TensorType.COMPLEX128: + return np.dtype(np.complex128) + + else: + logger.e( + logger.Code.CONVERSION_IMPOSSIBLE, + f"Cannot convert TFLite type '{tfl_type}' to numpy dtype.", + ) + + +def tflite_type_to_tensor_flow_data_type(tfl_type: TensorType) -> TensorFlowDataType: + """Convert TFLite TensorType to the internal type of TensorFlow.""" + match tfl_type: + case TensorType.FLOAT16: + # There seems to be no counterpart in the TF DataType. + logger.e( + logger.Code.INTERNAL_ERROR, + "tflite_type_to_tensor_flow_data_type(): float16.", + ) + case TensorType.FLOAT32: + return TensorFlowDataType.DT_FLOAT.value + case TensorType.FLOAT64: + return TensorFlowDataType.DT_DOUBLE.value + + case TensorType.INT4: + return TensorFlowDataType.DT_INT4.value + case TensorType.INT8: + return TensorFlowDataType.DT_INT8.value + case TensorType.INT16: + return TensorFlowDataType.DT_INT16.value + case TensorType.INT32: + return TensorFlowDataType.DT_INT32.value + case TensorType.INT64: + return TensorFlowDataType.DT_INT64.value + + case TensorType.UINT8: + return TensorFlowDataType.DT_UINT8.value + case TensorType.UINT16: + return TensorFlowDataType.DT_UINT16.value + case TensorType.UINT32: + return TensorFlowDataType.DT_UINT32.value + case TensorType.UINT64: + return TensorFlowDataType.DT_UINT64.value + + case TensorType.COMPLEX64: + return TensorFlowDataType.DT_COMPLEX64.value + case TensorType.COMPLEX128: + return TensorFlowDataType.DT_COMPLEX128.value + + case TensorType.STRING: + return TensorFlowDataType.DT_STRING.value + + case TensorType.BOOL: + return TensorFlowDataType.DT_BOOL.value + + case TensorType.RESOURCE: + return TensorFlowDataType.DT_RESOURCE.value + case TensorType.VARIANT: + return TensorFlowDataType.DT_VARIANT.value + + case _: + # All TFLite types are covered. Must be an invalid type. + logger.e( + logger.Code.INTERNAL_ERROR, + f"tflite_type_to_tensor_flow_data_type(): invalid TFLite type `{tfl_type}`.", + ) + + +def infer_kernel_shape(weight_tensor: tflite_model.Tensor) -> list[int]: + """Returns the kernel shape inferred from the weight tensor. + + Weight tensors shape expected in TFlite Format, where the 0th index is output channels count, last is input channels + count. + """ + return weight_tensor.shape.vector[1:-1] diff --git a/backends/nxp/backend/ir/converter/node_converter.py b/backends/nxp/backend/ir/converter/node_converter.py new file mode 100755 index 00000000000..a953e8e976a --- /dev/null +++ b/backends/nxp/backend/ir/converter/node_converter.py @@ -0,0 +1,189 @@ +# Copyright 2024 NXP +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +from abc import ABC, abstractmethod +from enum import Enum +from typing import Collection + +import torch + +from executorch.backends.nxp.backend.ir.conversion_context import ConversionContext +from executorch.backends.nxp.backend.ir.converter.builder.aten_model_builder_director import ( + AtenModelBuilderDirector, +) +from executorch.backends.nxp.backend.ir.tflite_generator import tflite_model +from executorch.exir.dialects._ops import ops as exir_ops +from torch.fx import Node +from torch.nn import Parameter + + +def _is_quant_node(node: torch.fx.Node) -> bool: + return node.target in [ + exir_ops.edge.quantized_decomposed.quantize_per_channel.default, + exir_ops.edge.quantized_decomposed.quantize_per_tensor.default, + exir_ops.edge.quantized_decomposed.quantize_per_tensor.tensor, + ] + + +def _is_dequant_node(node: torch.fx.Node) -> bool: + return node.target in [ + exir_ops.edge.quantized_decomposed.dequantize_per_channel.default, + exir_ops.edge.quantized_decomposed.dequantize_per_tensor.default, + exir_ops.edge.quantized_decomposed.dequantize_per_tensor.tensor, + ] + + +class Target(Enum): + IGNORE = "ignore" # No target platform. Any target specific restrictions will be ignored. + + RT700 = "imxrt700" + IMX95 = "imx95" + + @classmethod + def values(cls) -> list[str]: + return [elt.value for elt in cls] + + +class NodeConverter(ABC): + """ + Classes which implement conversion of torch.Node to TFLite should inherit from this class and overwrite the + 'convert()' method. + """ + + context: ConversionContext + supported_targets: Collection + + def __init__(self, context: ConversionContext): + self.context = context + + @abstractmethod + def convert(self, node: Node): + """Convert the torch.Node in 'node' to TFLite and append changes to ModelBuilder. + + Classes which implement conversion for individual operators must overwrite this method. + + :param node: torch.Node to convert. + """ + pass + + # noinspection PyPep8Naming + @staticmethod + @abstractmethod + def _is_supported_in_IR( + node: Node, parameters_mapping: dict[str, Parameter] + ) -> bool: + """Check if the `node` can be converted to the intermediate representation. + Classes which implement conversion for individual operators must overwrite this method. + + :param node: torch.Node to check. + """ + pass + + @classmethod + def _is_supported_on_target(cls, target: Target) -> bool: + """Check if the node is supported on the target platform. It uses the 'supported_platform' attribute, which is + a list of supported target platforms, and it must be defined by the specific `NodeConverter`. + + :param target: Value of the `Target` enum representing the target platform to check for. + """ + if not ( + hasattr(cls, "supported_targets") + and isinstance(cls.supported_targets, Collection) + ): + raise NotImplementedError( + f"The NodeConverter `{cls}` does not define its `supported_targets` collection." + ) + + return target == Target.IGNORE or target in cls.supported_targets + + @classmethod + def is_supported( + cls, node: Node, target: Target, parameters_mapping: dict[str, Parameter] + ) -> bool: + """Check if the given `node` is supported in the IR and on the given `target` platform. + + :param node: torch.Node to check. + :param target: Value of the `Target` enum representing the target platform to check for. + :param parameters_mapping: Dict mapping tensor names to their data. + """ + return cls._is_supported_in_IR( + node, parameters_mapping + ) and cls._is_supported_on_target(target) + + @staticmethod + def _has_shared_q_params_if_quantized(node: Node) -> bool: + """Check if node has shared quantization parameters if it's quantized.""" + if len(node.users) < 1 or len(node.all_input_nodes) < 1: + # Some exotic operator (only consumer or only produces) + return True + + pre_node = node.prev + post_node = node.next + + if pre_node.name == node.all_input_nodes[0] and post_node.name == node.users[0]: + raise RuntimeError( + "Prev & next nodes are not the same as inputs and outputs." + ) + + if _is_dequant_node(pre_node) and _is_quant_node(post_node): + # Node is quantized + pre_zp = pre_node.args[1] + pre_scale = pre_node.args[2] + pre_type = pre_node.args[5] + + post_zp = post_node.args[1] + post_scale = post_node.args[2] + post_type = pre_node.args[5] + + # Q-params match? + return ( + pre_zp == post_zp and pre_scale == post_scale and pre_type == post_type + ) + + # Node not quantized + return True + + def assert_convertible(self, node): + """Assert that the call `_is_supported_in_IR()` returns `True`. Otherwise, raise an exception and print an + error message. + """ + assert self._is_supported_in_IR(node, self.context.parameters_mapping), ( + f"Node `{node}` is not convertible to the intermediate representation. " + "There is an error in the partitioner." + ) + + @property + def builder(self) -> AtenModelBuilderDirector: + """ + Get instance of TFLite ModelBuilder from conversion context. + :return: AtenModelBuilderDirector instance. + """ + return self.context.tflite_builder + + def _create_tflite_op_with_io_tensors(self, node: Node) -> tflite_model.Operator: + """ + Create TFLite op wrapper with input/output tensors added into 'tmp_inputs' and 'tmp_outputs'. + + :param node: Node instance. + :return: TFLite operator with assigned input/output tensors. + """ + t_operator = tflite_model.Operator() + + # Initialize node's inputs + t_operator.inputs = tflite_model.OperatorInputs() + for ancestor_node in node.all_input_nodes: + assert self.context.tflite_builder.tensor_exists(ancestor_node.name) + t_operator.tmp_inputs.append( + self.context.tflite_builder.tensor_for_name(ancestor_node.name) + ) + + # Add node's output as a new tensor + assert self.context.tflite_builder.tensor_exists(node.name) + t_operator.outputs = tflite_model.OperatorOutputs() + t_operator.tmp_outputs.append( + self.context.tflite_builder.tensor_for_name(node.name) + ) + + return t_operator diff --git a/backends/nxp/backend/ir/converter/node_converters/__init__.py b/backends/nxp/backend/ir/converter/node_converters/__init__.py new file mode 100755 index 00000000000..9ccf2983b2d --- /dev/null +++ b/backends/nxp/backend/ir/converter/node_converters/__init__.py @@ -0,0 +1,4 @@ +# Copyright 2024 NXP +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. diff --git a/backends/nxp/backend/ir/converter/node_converters/ops_converters/__init__.py b/backends/nxp/backend/ir/converter/node_converters/ops_converters/__init__.py new file mode 100755 index 00000000000..7ed81272091 --- /dev/null +++ b/backends/nxp/backend/ir/converter/node_converters/ops_converters/__init__.py @@ -0,0 +1,51 @@ +from executorch.backends.nxp.backend.ir.converter.node_converters.ops_converters.addmm_converter import ( + AddMMConverter, +) +from executorch.backends.nxp.backend.ir.converter.node_converters.ops_converters.avg_pool_2d_converter import ( + AvgPool2dConverter, +) +from executorch.backends.nxp.backend.ir.converter.node_converters.ops_converters.constant_pad_nd_converter import ( + ConstantPadNDConverter, +) +from executorch.backends.nxp.backend.ir.converter.node_converters.ops_converters.convolution_converter import ( + ConvolutionConverter, +) +from executorch.backends.nxp.backend.ir.converter.node_converters.ops_converters.max_pool_2d_converter import ( + MaxPool2dConverter, +) +from executorch.backends.nxp.backend.ir.converter.node_converters.ops_converters.mm_converter import ( + MMConverter, +) +from executorch.backends.nxp.backend.ir.converter.node_converters.ops_converters.permute_copy_converter import ( + PermuteCopyConverter, +) +from executorch.backends.nxp.backend.ir.converter.node_converters.ops_converters.qdq_dequantize_converter import ( + QDQDequantizeConverter, +) +from executorch.backends.nxp.backend.ir.converter.node_converters.ops_converters.qdq_quantize_converter import ( + QDQQuantizeConverter, +) +from executorch.backends.nxp.backend.ir.converter.node_converters.ops_converters.relu_converter import ( + ReLUConverter, +) +from executorch.backends.nxp.backend.ir.converter.node_converters.ops_converters.softmax_converter import ( + SoftmaxConverter, +) +from executorch.backends.nxp.backend.ir.converter.node_converters.ops_converters.view_copy_converter import ( + ViewCopyConverter, +) + +__all__ = [ + "AddMMConverter", + "ConvolutionConverter", + "MMConverter", + "PermuteCopyConverter", + "SoftmaxConverter", + "ViewCopyConverter", + "QDQDequantizeConverter", + "QDQQuantizeConverter", + "ConstantPadNDConverter", + "ReLUConverter", + "MaxPool2dConverter", + "AvgPool2dConverter", +] diff --git a/backends/nxp/backend/ir/converter/node_converters/ops_converters/addmm_converter.py b/backends/nxp/backend/ir/converter/node_converters/ops_converters/addmm_converter.py new file mode 100644 index 00000000000..820d1414f3b --- /dev/null +++ b/backends/nxp/backend/ir/converter/node_converters/ops_converters/addmm_converter.py @@ -0,0 +1,62 @@ +# Copyright 2024 NXP +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +from executorch.backends.nxp.backend.edge_helper import input_rank +from executorch.backends.nxp.backend.ir.converter.conversion.common import OpsList +from executorch.backends.nxp.backend.ir.converter.node_converter import ( + NodeConverter, + Target, +) +from executorch.backends.nxp.backend.ir.tflite_generator.builtin_options import ( + fully_connected_options, +) +from torch.fx import Node +from torch.nn import Parameter + + +class AddMMConverter(NodeConverter): + """Convert the `aten.addmm` operator to TFLite `FullyConnected` with a bias input.""" + + @staticmethod + def _is_supported_in_IR( + node: Node, parameters_mapping: dict[str, Parameter] + ) -> bool: + if len(node.all_input_nodes) != 3: + return False + + # The weights must be 2D. + if input_rank(node, 2) != 2: + return False + + return True + + supported_targets = [Target.RT700] + + def convert(self, node: Node): + self.assert_convertible(node) + + t_op = self._create_tflite_op_with_io_tensors(node) + t_op.builtin_options = fully_connected_options.FullyConnected( + keep_num_dims=True + ) + + bias = t_op.tmp_inputs[0] + x = t_op.tmp_inputs[1] + w = t_op.tmp_inputs[2] + y = t_op.tmp_outputs[0] + + # Assign the operator its TFLite inputs and outputs + t_op.tmp_inputs = [x, w, bias] + t_op.tmp_outputs = [y] + + ops = OpsList(middle_op=t_op) + + # The `aten.addmm` uses main input with shape [M, N] and the weights have the shape [N, O]. + # TFLite `FullyConnected` requires the weights to have shape [O, N] (if the main input has shape [M, N]). + # Insert a `Transpose` operator to permute the weights to achieve correct conversion. (The `Transpose` will not + # be present in the output model if the weights are static.) + ops.add_pre(self.builder.create_transpose_operator_before(t_op, 1, [1, 0])) + + self.builder.append_operators(ops.flatten()) diff --git a/backends/nxp/backend/ir/converter/node_converters/ops_converters/avg_pool_2d_converter.py b/backends/nxp/backend/ir/converter/node_converters/ops_converters/avg_pool_2d_converter.py new file mode 100644 index 00000000000..41150f52d98 --- /dev/null +++ b/backends/nxp/backend/ir/converter/node_converters/ops_converters/avg_pool_2d_converter.py @@ -0,0 +1,85 @@ +# Copyright (c) 2025 NXP +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +from executorch.backends.nxp.backend.ir.converter.conversion import ( + aten_translator, + common, +) +from executorch.backends.nxp.backend.ir.converter.conversion.common import OpsList +from executorch.backends.nxp.backend.ir.converter.node_converter import ( + NodeConverter, + Target, +) +from executorch.backends.nxp.backend.ir.tflite_generator import tflite_model +from executorch.backends.nxp.backend.ir.tflite_generator.builtin_options import ( + average_pool_2d_options, +) +from torch.fx import Node +from torch.nn import Parameter + + +class AvgPool2dConverter(NodeConverter): + supported_targets = [Target.RT700] + + @staticmethod + def _is_supported_in_IR( + node: Node, parameters_mapping: dict[str, Parameter] + ) -> bool: + n_args = len(node.args) + + padding = node.args[3] if n_args >= 4 else [0, 0] + ceil_mode = node.args[4] if n_args >= 5 else False + count_include_pad = node.args[5] if n_args >= 6 else True + divisor_override = node.args[6] if n_args == 7 else None + _, explicit_padding = aten_translator.convert_padding(padding) + + if ( + (not count_include_pad and explicit_padding is not None) + or divisor_override is not None + or ceil_mode + ): + return False + + if not NodeConverter._has_shared_q_params_if_quantized(node): + return False + + return True + + # noinspection PyMethodMayBeStatic + def _convert_2d_avg_pool( + self, kernel_size, stride, padding, t_op: tflite_model.Operator + ) -> list[tflite_model.Operator]: + ops = OpsList(middle_op=t_op) + t_op.builtin_options = average_pool_2d_options.AveragePool2D() + t_op.builtin_options.filter_h = kernel_size[0] + t_op.builtin_options.filter_w = kernel_size[1] + common.assign_2d_strides(t_op.builtin_options, stride) + t_op.builtin_options.padding, explicit_padding = ( + aten_translator.convert_padding(padding) + ) + + if explicit_padding is not None: + # Need to prepend a 'Pad' operator, which adds 0s. But these will be included in the computation! + ops.add_pre( + self.builder.create_pad_operator_before(t_op, 0, explicit_padding) + ) + + return ops.flatten() + + # AvgPool2d Node format: (Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, bool ceil_mode=False + # bool count_include_pad=True, int? divisor_override=None) + def convert(self, node: Node): + """Convert 'avg_pool2d' operator to TFLite 'AveragePool2D'.""" + self.assert_convertible(node) + + kernel_size = node.args[1] + stride = node.args[2] + padding = node.args[3] if len(node.args) >= 4 else [0, 0] + + t_op = self._create_tflite_op_with_io_tensors(node) + + ops_to_add = self._convert_2d_avg_pool(kernel_size, stride, padding, t_op) + self.builder.append_operators(ops_to_add) diff --git a/backends/nxp/backend/ir/converter/node_converters/ops_converters/constant_pad_nd_converter.py b/backends/nxp/backend/ir/converter/node_converters/ops_converters/constant_pad_nd_converter.py new file mode 100644 index 00000000000..761840c379f --- /dev/null +++ b/backends/nxp/backend/ir/converter/node_converters/ops_converters/constant_pad_nd_converter.py @@ -0,0 +1,129 @@ +# Copyright 2024 NXP +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +import copy +from typing import Collection + +import numpy as np + +from executorch.backends.nxp.backend.edge_helper import input_rank +from executorch.backends.nxp.backend.ir.converter.conversion.common import OpsList +from executorch.backends.nxp.backend.ir.converter.conversion.translator import ( + apply_permutation_to, + create_channels_first_to_channels_last_permutation, + tf_lite_type_to_numpy, +) +from executorch.backends.nxp.backend.ir.converter.node_converter import ( + NodeConverter, + Target, +) +from executorch.backends.nxp.backend.ir.converter.quantization_utils import ( + quantize_int8, +) +from executorch.backends.nxp.backend.ir.tflite_generator import tflite_model +from executorch.backends.nxp.backend.ir.tflite_generator.builtin_options import ( + pad_v2_options, +) +from torch.fx import Node +from torch.nn import Parameter + + +class ConstantPadNDConverter(NodeConverter): + supported_targets = [Target.RT700] + + @staticmethod + def _is_supported_in_IR( + node: Node, parameters_mapping: dict[str, Parameter] + ) -> bool: + paddings = node.args[1] + + # https://github.com/pytorch/pytorch/blob/v2.4.0/aten/src/ATen/native/PadNd.cpp#L38-L40 + if len(paddings) > (input_rank(node, 0) * 2): + return False + + # https://github.com/pytorch/pytorch/blob/v2.4.0/aten/src/ATen/native/PadNd.cpp#L30-L31 + if len(paddings) % 2 != 0: + return False + + if not NodeConverter._has_shared_q_params_if_quantized(node): + return False + + return True + + # noinspection PyMethodMayBeStatic + def _convert_paddings_to_tflite( + self, paddings: Collection[int], input_tensor: tflite_model.Tensor + ) -> list[int]: + """Convert the PyTorch paddings to TFLite paddings. + The PyTorch padding is added to the individual dimensions from the back (slightly confusing), see: + https://pytorch.org/docs/stable/generated/torch.nn.functional.pad.html#torch.nn.functional.pad + TFLite padding has shape [input_rank, 2], where start padding and end padding is specified for every + corresponding dimension. + + :param paddings: The PyTorch paddings. + :param input_tensor: Main input tensor of the `aten.constant_pad_nd` operator. + :return: The equivalent TFLite paddings. + """ + + # 1st, group the individual paddings into groups of 2 (padding at the start and at the end for every dimension). + paddings = np.array(paddings).reshape(-1, 2) + + # 2nd, reverse the padding groups. (The order is inverse between PyTorch and TFLite). + paddings = list(reversed(paddings)) + + # 3rd, add [0, 0]s from the start to get `rank` padding groups. + paddings = [[0, 0]] * (input_tensor.rank - len(paddings)) + paddings + + if input_tensor.tensor_format.is_channels_last(): + # Permute the `tfl_paddings` to match. + to_tflite_perm = create_channels_first_to_channels_last_permutation( + input_tensor.rank + ) + paddings = apply_permutation_to(paddings, to_tflite_perm) + + return paddings + + def convert(self, node: Node): + """Convert the `aten.constant_pad_nd` operator to TFLite `PadV2`.""" + self.assert_convertible(node) + + t_op = self._create_tflite_op_with_io_tensors(node) + + x = t_op.tmp_inputs[0] + y = t_op.tmp_outputs[0] + paddings = node.args[1] + constant = node.args[2] + + paddings = self._convert_paddings_to_tflite(paddings, x) + paddings_tensor = self.builder.create_tensor_for_data( + np.asarray(paddings, "int32"), "paddings" + ) + + if x.quantization is None: + constant_tensor = self.builder.create_tensor_for_data( + np.array([constant], tf_lite_type_to_numpy(x.type)), "constant" + ) + else: + quantization = copy.copy(x.quantization) + scale, zero_point = ( + quantization.scale.vector, + quantization.zero_point.vector, + ) + constant_data = quantize_int8( + np.array([constant], np.float32), scale, zero_point + ) + constant_tensor = self.builder.create_tensor_for_data( + constant_data, "constant" + ) + constant_tensor.quantization = quantization + + # Assign the operator its TFLite inputs and outputs. + t_op.tmp_inputs = [x, paddings_tensor, constant_tensor] + t_op.tmp_outputs = [y] + t_op.builtin_options = pad_v2_options.PadV2() + + ops_to_add = OpsList(middle_op=t_op) + + self.builder.append_operators(ops_to_add.flatten()) diff --git a/backends/nxp/backend/ir/converter/node_converters/ops_converters/convolution_converter.py b/backends/nxp/backend/ir/converter/node_converters/ops_converters/convolution_converter.py new file mode 100644 index 00000000000..efecebfc783 --- /dev/null +++ b/backends/nxp/backend/ir/converter/node_converters/ops_converters/convolution_converter.py @@ -0,0 +1,127 @@ +# Copyright 2024 NXP +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +import numpy as np +import torch + +from executorch.backends.nxp.backend.edge_helper import input_tensor, input_tensor_safe +from executorch.backends.nxp.backend.ir.converter.conversion import ( + aten_translator, + common, +) +from executorch.backends.nxp.backend.ir.converter.conversion.common import ( + OpsList, + try_get_input, +) +from executorch.backends.nxp.backend.ir.converter.node_converter import ( + NodeConverter, + Target, +) +from executorch.backends.nxp.backend.ir.converter.quantization_utils import ( + set_quantization_parameters_to_tensor, +) +from executorch.backends.nxp.backend.ir.lib.tflite.TensorType import TensorType +from executorch.backends.nxp.backend.ir.tflite_generator import tflite_model +from executorch.backends.nxp.backend.ir.tflite_generator.builtin_options import ( + conv_2d_options, +) +from torch.fx import Node +from torch.nn import Parameter + + +class ConvolutionConverter(NodeConverter): + supported_targets = [Target.RT700] + + @staticmethod + def _is_supported_in_IR( + node: Node, parameters_mapping: dict[str, Parameter] + ) -> bool: + is_transposed = node.args[6] + output_padding = node.args[7] + groups = node.args[8] + + if is_transposed: + return False + + if output_padding != [0, 0]: + return False + + if groups != 1: + return False + + if input_tensor_safe(node, 2) is None: + # No bias tensor. + weight_tensor = input_tensor(node, 1) + if weight_tensor.dtype not in [torch.float32, torch.int8, torch.uint8]: + return False + + return True + + def _convert_2d_conv( + self, stride, padding, dilation, t_op: tflite_model.Operator + ) -> list[tflite_model.Operator]: + ops = OpsList(middle_op=t_op) + t_op.builtin_options = conv_2d_options.Conv2D() + common.assign_2d_strides(t_op.builtin_options, stride) + common.assign_2d_dilations(t_op.builtin_options, dilation) + t_op.builtin_options.padding, explicit_padding = ( + aten_translator.convert_padding(padding) + ) + + if explicit_padding is not None: + # Need to prepend a 'Pad' operator, which adds 0s. But these will be included in the computation! + ops.add_pre( + self.builder.create_pad_operator_before(t_op, 0, explicit_padding) + ) + + input_tensor: tflite_model.Tensor = t_op.tmp_inputs[0] + weight_tensor: tflite_model.Tensor = t_op.tmp_inputs[1] + output_tensor: tflite_model.Tensor = t_op.tmp_outputs[0] + + if (bias_tensor := try_get_input(t_op, 2)) is None: + # Operator has no bias. Convolution aten op can omit it, TFLite can't. + output_channels = weight_tensor.shape.vector[0] + + if weight_tensor.type == TensorType.FLOAT32: + bias_type = np.dtype(np.float32) + elif weight_tensor.type in [TensorType.INT8, TensorType.UINT8]: + bias_type = np.dtype(np.int32) + else: + # Should never happen. + raise NotImplementedError( + f"Convolution node with unsupported weight type: {weight_tensor.type}" + ) + + bias_tensor = self.builder.create_zeros_tensor( + [output_channels], "zero_bias", bias_type, True + ) + + # Compute scale and zero point for bias tensor + input_scale = np.array(input_tensor.quantization.scale.vector) + weight_scale = np.array(weight_tensor.quantization.scale.vector) + bias_scale = input_scale * weight_scale + bias_zero_point = np.zeros(weight_scale.shape, dtype=np.int64) + + set_quantization_parameters_to_tensor( + bias_tensor, bias_scale, bias_zero_point, quantized_dimension=0 + ) + + # Assign the operator its TFLite inputs and outputs + t_op.tmp_inputs = [input_tensor, weight_tensor, bias_tensor] + t_op.tmp_outputs = [output_tensor] + + return ops.flatten() + + def convert(self, node: Node): + self.assert_convertible(node) + + stride = node.args[3] + padding = node.args[4] + dilation = node.args[5] + + t_op = self._create_tflite_op_with_io_tensors(node) + ops_to_add = self._convert_2d_conv(stride, padding, dilation, t_op) + + self.builder.append_operators(ops_to_add) diff --git a/backends/nxp/backend/ir/converter/node_converters/ops_converters/max_pool_2d_converter.py b/backends/nxp/backend/ir/converter/node_converters/ops_converters/max_pool_2d_converter.py new file mode 100644 index 00000000000..cd917e9d217 --- /dev/null +++ b/backends/nxp/backend/ir/converter/node_converters/ops_converters/max_pool_2d_converter.py @@ -0,0 +1,104 @@ +# Copyright 2024 NXP +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. +import numpy as np + +from executorch.backends.nxp.backend.ir.converter.conversion import ( + aten_translator, + common, +) +from executorch.backends.nxp.backend.ir.converter.conversion.common import OpsList +from executorch.backends.nxp.backend.ir.converter.node_converter import ( + NodeConverter, + Target, +) +from executorch.backends.nxp.backend.ir.lib.tflite.TensorType import TensorType +from executorch.backends.nxp.backend.ir.tflite_generator import tflite_model +from executorch.backends.nxp.backend.ir.tflite_generator.builtin_options import ( + max_pool_2d_options, +) +from torch.fx import Node +from torch.nn import Parameter + + +class MaxPool2dConverter(NodeConverter): + """Convert 'max_pool2d' operator to TFLite 'MaxPool2D'. + NOTE: max_pool2d_with_indices is a different operator and is unsupported. + """ + + supported_targets = [Target.RT700] + + @staticmethod + def _is_supported_in_IR( + node: Node, parameters_mapping: dict[str, Parameter] + ) -> bool: + n_args = len(node.args) + + dilation = node.args[4] if n_args >= 5 else [1, 1] + ceil_mode = node.args[5] if n_args == 6 else False + + if any(dil != 1 for dil in dilation) or ceil_mode: + return False + + if not NodeConverter._has_shared_q_params_if_quantized(node): + return False + + return True + + def _get_pad_constant_value(self, input_type: TensorType) -> np.ndarray: + """ + Get scalar NumPy array with constant value used as constant value for 'Pad' operator. + + :param input_type: Input tensor type. + :return: Scalar array with single minimum value of given type. + """ + + match input_type: + case TensorType.INT8: + return np.asarray([np.iinfo(np.int8).min], dtype=np.int8) + case TensorType.UINT8: + return np.asarray([np.iinfo(np.uint8).min], dtype=np.uint8) + case TensorType.FLOAT32: + return np.asarray([np.finfo(np.float32).min], dtype=np.float32) + case _: + raise RuntimeError("Unexpected input type for MaxPool operator.") + + # noinspection PyMethodMayBeStatic + def _convert_2d_max_pool( + self, kernel_size, stride, padding, t_op: tflite_model.Operator + ) -> list[tflite_model.Operator]: + x = t_op.tmp_inputs[0] + + ops = OpsList(middle_op=t_op) + t_op.builtin_options = max_pool_2d_options.MaxPool2D() + t_op.builtin_options.filter_h = kernel_size[0] + t_op.builtin_options.filter_w = kernel_size[1] + common.assign_2d_strides(t_op.builtin_options, stride) + t_op.builtin_options.padding, explicit_padding = ( + aten_translator.convert_padding(padding) + ) + + if explicit_padding is not None: + # Need to prepend a 'Pad' operator, which adds min values for type. + constant_value = self._get_pad_constant_value(x.type) + pre_pad_op = self.builder.create_pad_operator_before( + t_op, 0, explicit_padding, constant_value=constant_value + ) + ops.add_pre(pre_pad_op) + + return ops.flatten() + + # Maxpool2d Node format: (Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) + def convert(self, node: Node): + self.assert_convertible(node) + + n_args = len(node.args) + + kernel_size = node.args[1] + stride = node.args[2] + padding = node.args[3] if n_args >= 4 else [0, 0] + + t_op = self._create_tflite_op_with_io_tensors(node) + ops_to_add = self._convert_2d_max_pool(kernel_size, stride, padding, t_op) + self.builder.append_operators(ops_to_add) diff --git a/backends/nxp/backend/ir/converter/node_converters/ops_converters/mm_converter.py b/backends/nxp/backend/ir/converter/node_converters/ops_converters/mm_converter.py new file mode 100644 index 00000000000..fc513240c44 --- /dev/null +++ b/backends/nxp/backend/ir/converter/node_converters/ops_converters/mm_converter.py @@ -0,0 +1,58 @@ +# Copyright 2024 NXP +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +from executorch.backends.nxp.backend.edge_helper import input_rank +from executorch.backends.nxp.backend.ir.converter.conversion.common import OpsList +from executorch.backends.nxp.backend.ir.converter.node_converter import ( + NodeConverter, + Target, +) +from executorch.backends.nxp.backend.ir.tflite_generator.builtin_options import ( + fully_connected_options, +) +from torch.fx import Node +from torch.nn import Parameter + + +class MMConverter(NodeConverter): + supported_targets = [Target.RT700] + + @staticmethod + def _is_supported_in_IR( + node: Node, parameters_mapping: dict[str, Parameter] + ) -> bool: + if len(node.all_input_nodes) != 2: + return False + + # The weights must be 2D. + if input_rank(node, 1) != 2: + return False + + return True + + def convert(self, node: Node): + """Convert the `aten.mm` operator to TFLite `FullyConnected` without a bias input.""" + self.assert_convertible(node) + + t_op = self._create_tflite_op_with_io_tensors(node) + t_op.builtin_options = fully_connected_options.FullyConnected() + + x = t_op.tmp_inputs[0] + w = t_op.tmp_inputs[1] + y = t_op.tmp_outputs[0] + + # Assign the operator its TFLite inputs and outputs + t_op.tmp_inputs = [x, w] + t_op.tmp_outputs = [y] + + ops = OpsList(middle_op=t_op) + + # The `aten.mm` uses main input with shape [M, N] and the weights have the shape [N, O]. + # TFLite `FullyConnected` requires the weights to have shape [O, N] (if the main input has shape [M, N]). + # Insert a `Transpose` operator to permute the weights to achieve correct conversion. (The `Transpose` will not + # be present in the output model if the weights are static.) + ops.add_pre(self.builder.create_transpose_operator_before(t_op, 1, [1, 0])) + + self.builder.append_operators(ops.flatten()) diff --git a/backends/nxp/backend/ir/converter/node_converters/ops_converters/permute_copy_converter.py b/backends/nxp/backend/ir/converter/node_converters/ops_converters/permute_copy_converter.py new file mode 100644 index 00000000000..e24ed4f6863 --- /dev/null +++ b/backends/nxp/backend/ir/converter/node_converters/ops_converters/permute_copy_converter.py @@ -0,0 +1,64 @@ +# Copyright 2024 NXP +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +import numpy as np + +from executorch.backends.nxp.backend.ir.converter import quantization_utils +from executorch.backends.nxp.backend.ir.converter.conversion.common import OpsList +from executorch.backends.nxp.backend.ir.converter.node_converter import ( + NodeConverter, + Target, +) +from executorch.backends.nxp.backend.ir.tflite_generator.builtin_options import ( + transpose_options, +) +from torch.fx import Node +from torch.nn import Parameter + + +class PermuteCopyConverter(NodeConverter): + supported_targets = [Target.RT700] + + @staticmethod + def _is_supported_in_IR( + node: Node, parameters_mapping: dict[str, Parameter] + ) -> bool: + return True + + def convert(self, node: Node): + """Convert the `aten.permute_copy` operator to TFLite `Transpose`.""" + self.assert_convertible(node) + + t_op = self._create_tflite_op_with_io_tensors(node) + t_op.builtin_options = transpose_options.Transpose() + + x = t_op.tmp_inputs[0] + y = t_op.tmp_outputs[0] + + if ( + x.quantization is not None + and y.quantization is None + and "cluster" in node.meta + ): + # We know this node is part of QDQ cluster, so we can propagate quantization to inputs of "call_function" + # node of this cluster. + quantization_utils.propagate_quantization(x, y) + + y.type = x.type + assert x.quantization == y.quantization, ( + "PermuteCopyConverter: Q-params of input and output doesn't " + "match. This indicates error in quantizer." + ) + + perm = np.array(node.args[1], "int32") + perm_tensor = self.builder.create_tensor_for_data(perm, "perm") + + # Assign the operator its TFLite inputs and outputs + t_op.tmp_inputs = [x, perm_tensor] + t_op.tmp_outputs = [y] + + ops_to_add = OpsList(middle_op=t_op) + + self.builder.append_operators(ops_to_add.flatten()) diff --git a/backends/nxp/backend/ir/converter/node_converters/ops_converters/qdq_dequantize_converter.py b/backends/nxp/backend/ir/converter/node_converters/ops_converters/qdq_dequantize_converter.py new file mode 100644 index 00000000000..8731b3f6ed2 --- /dev/null +++ b/backends/nxp/backend/ir/converter/node_converters/ops_converters/qdq_dequantize_converter.py @@ -0,0 +1,64 @@ +# Copyright 2024 NXP +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +import numpy as np + +from executorch.backends.nxp.backend.ir.converter.conversion.translator import ( + torch_type_to_numpy_type, +) +from executorch.backends.nxp.backend.ir.converter.node_converter import ( + NodeConverter, + Target, +) +from executorch.backends.nxp.backend.ir.converter.quantization_utils import ( + set_quantization_parameters_to_tensor, +) +from torch.fx import Node +from torch.nn import Parameter + + +class QDQDequantizeConverter(NodeConverter): + supported_targets = [Target.RT700] + + @staticmethod + def _is_supported_in_IR( + node: Node, parameters_mapping: dict[str, Parameter] + ) -> bool: + zero_point_type = torch_type_to_numpy_type(node.args[5]) + if "cluster" not in node.meta or zero_point_type not in [np.int8, np.int32]: + return False + + return True + + def convert(self, node: Node): + self.assert_convertible(node) + + from_tensor = self.builder.tensor_for_name(node.name) + to_tensor = self.builder.tensor_for_name(node.args[0].name) + + zero_point_type = torch_type_to_numpy_type(node.args[5]) + + scale = np.array(node.args[1], dtype=np.float32) + zero_point = np.array(node.args[2], dtype=zero_point_type) + + if self.context.parameters_mapping.get(node.args[0].name, None) is None: + # Convert dequantize as identity op (Transpose that will be removed) because + # input tensor is input of the model and don't have static data. If we do redirection + # here we will change input name of the model. + t_op = self._create_tflite_op_with_io_tensors(node) + + set_quantization_parameters_to_tensor(to_tensor, scale, zero_point, 0) + set_quantization_parameters_to_tensor(from_tensor, scale, zero_point, 0) + from_tensor.type = to_tensor.type + + self.builder.turn_operator_to_identity(t_op) + self.builder.append_operators([t_op]) + else: + # Dequantize consumes tensor with static data -> convert as a tensor + set_quantization_parameters_to_tensor(to_tensor, scale, zero_point, 0) + + # Change type so we pass check tensor similarity check when redirecting + from_tensor.type = to_tensor.type + self.builder.redirect_tensor(from_tensor, to_tensor) diff --git a/backends/nxp/backend/ir/converter/node_converters/ops_converters/qdq_quantize_converter.py b/backends/nxp/backend/ir/converter/node_converters/ops_converters/qdq_quantize_converter.py new file mode 100644 index 00000000000..b0680e9b949 --- /dev/null +++ b/backends/nxp/backend/ir/converter/node_converters/ops_converters/qdq_quantize_converter.py @@ -0,0 +1,45 @@ +# Copyright 2024 NXP +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +import numpy as np +import torch + +from executorch.backends.nxp.backend.ir.converter.node_converter import ( + NodeConverter, + Target, +) +from executorch.backends.nxp.backend.ir.converter.quantization_utils import ( + set_quantization_parameters_to_tensor, +) +from torch.fx import Node +from torch.nn import Parameter + + +class QDQQuantizeConverter(NodeConverter): + supported_targets = [Target.RT700] + + @staticmethod + def _is_supported_in_IR( + node: Node, parameters_mapping: dict[str, Parameter] + ) -> bool: + if "cluster" not in node.meta or node.args[5] != torch.int8: + return False + + return True + + def convert(self, node: Node): + self.assert_convertible(node) + + from_tensor = self.builder.tensor_for_name(node.name) + to_tensor = self.builder.tensor_for_name(node.args[0].name) + + scale = np.array(node.args[1], dtype=np.float32) + zero_point = np.array(node.args[2], dtype=np.int8) + + set_quantization_parameters_to_tensor(to_tensor, scale, zero_point, 0) + + # Change type so we pass check tensor similarity check when redirecting + to_tensor.type = from_tensor.type + self.builder.redirect_tensor(from_tensor, to_tensor) diff --git a/backends/nxp/backend/ir/converter/node_converters/ops_converters/relu_converter.py b/backends/nxp/backend/ir/converter/node_converters/ops_converters/relu_converter.py new file mode 100644 index 00000000000..5835667671f --- /dev/null +++ b/backends/nxp/backend/ir/converter/node_converters/ops_converters/relu_converter.py @@ -0,0 +1,31 @@ +# Copyright (c) 2024 NXP +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +from executorch.backends.nxp.backend.ir.converter.node_converter import ( + NodeConverter, + Target, +) +from executorch.backends.nxp.backend.ir.lib.tflite.BuiltinOperator import ( + BuiltinOperator, +) +from torch.fx import Node +from torch.nn import Parameter + + +class ReLUConverter(NodeConverter): + supported_targets = [Target.RT700] + + @staticmethod + def _is_supported_in_IR( + node: Node, parameters_mapping: dict[str, Parameter] + ) -> bool: + return True + + def convert(self, node: Node): + t_op = self._create_tflite_op_with_io_tensors(node) + t_op.opcode_index = self.builder.op_code_index_for_op_type(BuiltinOperator.RELU) + + self.builder.append_operators([t_op]) diff --git a/backends/nxp/backend/ir/converter/node_converters/ops_converters/softmax_converter.py b/backends/nxp/backend/ir/converter/node_converters/ops_converters/softmax_converter.py new file mode 100644 index 00000000000..99932602c2f --- /dev/null +++ b/backends/nxp/backend/ir/converter/node_converters/ops_converters/softmax_converter.py @@ -0,0 +1,49 @@ +# Copyright 2024 NXP +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +from executorch.backends.nxp.backend.edge_helper import input_rank +from executorch.backends.nxp.backend.ir.converter.node_converter import NodeConverter +from executorch.backends.nxp.backend.ir.tflite_generator.builtin_options import ( + softmax_options, +) +from torch.fx import Node +from torch.nn import Parameter + + +class SoftmaxConverter(NodeConverter): + supported_targets = [] + + @staticmethod + def _is_supported_in_IR( + node: Node, parameters_mapping: dict[str, Parameter] + ) -> bool: + # The IR only supports the `dim` as the last dimension. But that depends on the format of the input tensor, + # which is only known after the `Partitioner` has divided the model. So if the input shape can be channels + # first (i.e. is more than 2D), we cannot determine IR support (we assume it's not supported). + x_rank = input_rank(node, 0) + if x_rank > 2: + return False + + dim = SoftmaxConverter._normalize_dim(node.args[1], x_rank) + if dim != x_rank - 1: + return False + + return True + + @staticmethod + def _normalize_dim(dim, rank): + # convert negative index to positive + if dim < 0: + dim += rank + return dim + + def convert(self, node: Node): + self.assert_convertible(node) + + t_op = self._create_tflite_op_with_io_tensors(node) + + t_op.builtin_options = softmax_options.Softmax(beta=1.0) + + self.builder.append_operators([t_op]) diff --git a/backends/nxp/backend/ir/converter/node_converters/ops_converters/view_copy_converter.py b/backends/nxp/backend/ir/converter/node_converters/ops_converters/view_copy_converter.py new file mode 100644 index 00000000000..2eceeba9b24 --- /dev/null +++ b/backends/nxp/backend/ir/converter/node_converters/ops_converters/view_copy_converter.py @@ -0,0 +1,98 @@ +# Copyright 2024 NXP +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +import numpy as np + +from executorch.backends.nxp.backend.edge_helper import ( + input_tensor, + output_tensor, + tensor_rank, +) +from executorch.backends.nxp.backend.ir.converter import quantization_utils +from executorch.backends.nxp.backend.ir.converter.conversion.common import OpsList +from executorch.backends.nxp.backend.ir.converter.node_converter import ( + NodeConverter, + Target, +) +from executorch.backends.nxp.backend.ir.converter.node_converters.shared.reshape_transposition import ( + ensure_reshape_transposition, +) +from executorch.backends.nxp.backend.ir.tflite_generator.builtin_options import ( + reshape_options, +) +from torch.fx import Node +from torch.nn import Parameter + + +class ViewCopyConverter(NodeConverter): + supported_targets = [Target.RT700] + + @staticmethod + def _is_supported_in_IR( + node: Node, parameters_mapping: dict[str, Parameter] + ) -> bool: + x = input_tensor(node, 0) + y = output_tensor(node) + + flat_input_size = ViewCopyConverter._safe_compute_flat_size(list(x.size())) + flat_output_size = ViewCopyConverter._safe_compute_flat_size(list(y.size())) + + if tensor_rank(y) >= 8 or flat_input_size != flat_output_size: + return False + + return True + + @staticmethod + def _safe_compute_flat_size(shape: list[int | str]) -> int: + """Compute the flat size of a tensor with given shape. Strings and negative dimensions are treated as '1'. + + :param shape: Shape of the tensor. Can include integers and strings. + :return: The flat size of the tensor. + """ + flat_size = 1 + for dim in shape: + if isinstance(dim, int) and dim > 1: + flat_size *= dim + + return flat_size + + def convert(self, node: Node): + """Convert the `aten.view_copy` operator to TFLite `Reshape`.""" + self.assert_convertible(node) + + t_op = self._create_tflite_op_with_io_tensors(node) + + x = t_op.tmp_inputs[0] + y = t_op.tmp_outputs[0] + + ops = OpsList(middle_op=t_op) + + if ( + x.quantization is not None + and y.quantization is None + and "cluster" in node.meta + ): + # We know this node is part of QDQ cluster, so we can propagate quantization to inputs of "call_function" + # node of this cluster. + quantization_utils.propagate_quantization(x, y) + + y.type = x.type + assert x.quantization == y.quantization, ( + "ViewCopyConverter: Q-params of input and output doesn't match. This " + "indicates error in quantizer." + ) + + new_shape = ensure_reshape_transposition(self.builder, ops) + + # Create the TFLite Reshape with the new shape + t_op.builtin_options = reshape_options.Reshape(new_shape) + + # Required by neutron-converter, but it will remove this tensor in optimization phase + new_shape_tensor = self.builder.create_tensor_for_data( + np.asarray(new_shape, dtype=np.int32), "new_shape" + ) + t_op.tmp_inputs.append(new_shape_tensor) + + self.builder.append_operators(ops.flatten()) diff --git a/backends/nxp/backend/ir/converter/node_converters/shared/__init__.py b/backends/nxp/backend/ir/converter/node_converters/shared/__init__.py new file mode 100755 index 00000000000..e69de29bb2d diff --git a/backends/nxp/backend/ir/converter/node_converters/shared/recurrent_utils.py b/backends/nxp/backend/ir/converter/node_converters/shared/recurrent_utils.py new file mode 100755 index 00000000000..50b9aef6d18 --- /dev/null +++ b/backends/nxp/backend/ir/converter/node_converters/shared/recurrent_utils.py @@ -0,0 +1,112 @@ +# Copyright 2024 NXP +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +from executorch.backends.nxp.backend.ir import logger +from executorch.backends.nxp.backend.ir.converter.builder import model_builder +from executorch.backends.nxp.backend.ir.converter.conversion import translator +from executorch.backends.nxp.backend.ir.converter.conversion.common import ( + OpsList, + try_get_input, +) +from executorch.backends.nxp.backend.ir.converter.tensor_utils import tensor_has_data +from executorch.backends.nxp.backend.ir.lib.tflite.ActivationFunctionType import ( + ActivationFunctionType, +) +from executorch.backends.nxp.backend.ir.tensor_formatting import TensorFormat +from executorch.backends.nxp.backend.ir.tflite_generator import tflite_model + + +def ensure_correct_tensor_formatting( + t_op: tflite_model.Operator, builder: model_builder.ModelBuilder, ops: OpsList +): + """Make sure that all input and output tensors of 't_op' have the correct format. 't_op' is assumed to be an LSTM + or RNN operator. + + The LSTM/RNN may be using channels last tensors, because of the surrounding operators. LSTM/RNN requires its own + format, however I think the input tensors should be marked as 'FORMATLESS', because the main inputs of TFLite + and ONNX version of the operators have the same shape. + I believe that the cleanest and most robust way to solve this, is to mark LSTM/RNN as an operator which can + change the formats of its tensors, and solve any format related issues in this module. + + :param t_op: TFLite operator with inputs and outputs corresponding to the ONNX LSTM/RNN operator. + :param builder: ModelBuilder object. + :param ops: OpsList object, with operators to add to the model. May already contain some operators. + """ + + if t_op.tmp_inputs[0].tensor_format == TensorFormat.FORMATLESS: + # Nothing to be done. All tensors should be formatless. + return + + # Permute the inputs. + for idx, tensor in enumerate(t_op.tmp_inputs.copy()): + if tensor.tensor_format.is_channels_last(): + revert_perm = translator.create_channels_last_to_channels_first_permutation( + tensor.rank, return_list=True + ) + if tensor_has_data(tensor): + translator.permute_static_tensor(tensor, revert_perm) + + else: + # Prepend a Transpose operator. + transpose = builder.create_transpose_operator_before( + t_op, idx, revert_perm + ) + ops.pre_ops.append(transpose) + + t_op.tmp_inputs[idx].tensor_format = TensorFormat.FORMATLESS + + # LSTM/RNN produces 'FORMATLESS' outputs. However, if the output tensors have the 'channels_last' format, Transpose + # operators must be added, to actually make the inputs 'channels_last'. + for idx, tensor in enumerate(t_op.tmp_outputs.copy()): + if tensor.tensor_format.is_channels_last(): + # Append a Transpose operator. + revert_perm = translator.create_channels_first_to_channels_last_permutation( + tensor.rank, return_list=True + ) + transpose = builder.create_transpose_operator_after(t_op, idx, revert_perm) + ops.post_ops.append(transpose) + + t_op.tmp_outputs[idx].tensor_format = TensorFormat.FORMATLESS + + +def get_activation_function_for_name( + name: str, op_type: str = "LSTM" +) -> ActivationFunctionType: + get_activation_function_for_name.map = { + "Tanh": ActivationFunctionType.TANH, + "Relu": ActivationFunctionType.RELU, + } + + if act_fun := get_activation_function_for_name.map.get(name, None): + return act_fun + + # Couldn't find a corresponding activation function + logger.e( + logger.Code.CONVERSION_IMPOSSIBLE, + f"Conversion of ONNX {op_type} with activation function '{name}' is not possible.", + ) + + +def check_sequence_lens( + t_op: tflite_model.Operator, seq_length: int, op_type: str = "LSTM" +): + """Check if the 'sequence_lens' operand of ONNX LSTM/RNN has an effect. If it does, exit with error. + + :param t_op: TFLite operator with inputs and outputs corresponding to the ONNX operator. + :param seq_length: The first dimension of the main LSTM input. + :param op_type: Operator type of 't_op'. Used only for printing a specific error message. + """ + if sequence_lens := try_get_input(t_op, 4): + # 'sequence_lens' allows each sequence to have a different length. As far as I can tell, TFLite doesn't support + # this. + if (not tensor_has_data(sequence_lens)) or any( + elt != seq_length for elt in sequence_lens.tmp_buffer.data + ): + # The 'sequence_lens' is either dynamic, or static with at least one value different from 'seq_length'. + # Conversion most likely impossible. + logger.e( + logger.Code.CONVERSION_IMPOSSIBLE, + f"Conversion of ONNX {op_type} with 'sequence_lens' input is not possible.", + ) diff --git a/backends/nxp/backend/ir/converter/node_converters/shared/reduce_utils.py b/backends/nxp/backend/ir/converter/node_converters/shared/reduce_utils.py new file mode 100755 index 00000000000..fad32edfd26 --- /dev/null +++ b/backends/nxp/backend/ir/converter/node_converters/shared/reduce_utils.py @@ -0,0 +1,200 @@ +# Copyright 2024 NXP +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +import numpy as np +from executorch.backends.nxp.backend.ir.converter.builder.model_builder import ( + ModelBuilder, +) +from executorch.backends.nxp.backend.ir.converter.conversion import translator +from executorch.backends.nxp.backend.ir.converter.conversion.common import OpsList +from executorch.backends.nxp.backend.ir.tensor_formatting import TensorFormat +from executorch.backends.nxp.backend.ir.tflite_generator import tflite_model + + +def convert_axes_from_attribute( + t_op: tflite_model.Operator, builder: ModelBuilder, axes: list[int] | None +): + """Create an `axes` tensor and assign it as an input to the `t_op`, which is expected to represent an ONNX + reduction operator. + """ + x = t_op.tmp_inputs[0] + rank = x.rank + + if axes is None: + # Default axes -> reduce over all dimensions. + axes = np.arange(rank).astype(np.int32) + + else: + # Axes are initialized. + axes = np.asarray(axes, np.int32) + + # TFLite has `axes` as input tensor -> create it. + axes_tensor = builder.create_tensor_for_data(axes, "axes") + t_op.tmp_inputs.append(axes_tensor) + + +# def convert_axes_from_input_tensor( +# t_op: tflite_model.Operator, +# builder: ModelBuilder, +# inspector: ONNXModelInspector, +# ops: OpsList, +# noop_with_empty_axes: int, +# op_type: str, +# ): +# """Verify the `axes` tensor (on input index 1) of the `t_op`, which is expected to represent an ONNX reduction +# operator. +# """ +# x = t_op.tmp_inputs[0] +# rank = x.rank +# +# if axes_tensor := try_get_input(t_op, 1): +# +# # ONNX uses int64, while TFLite requires int32 for the `axes` tensor. +# if axes_tensor.type != TensorType.INT64: +# logger.e( +# logger.Code.INVALID_ONNX_OPERATOR, +# f"ONNX `{op_type}` has `axes` of type `{name_for_type(axes_tensor.type)}`, instead of INT64.", +# ) +# +# # Try to get the inferred data for the `axes` input. +# if ( +# axes_data := inspector.try_get_inferred_tensor_data(axes_tensor.name) +# ) is not None: +# # The `axes` were inferred during shape inference. +# logger.d( +# f"Using inferred data for the `axes` input tensor of ONNX `{op_type}`." +# ) +# +# # Create a new tensor, in case the original `axes` tensor is used by multiple ops. +# axes_tensor = builder.create_tensor_for_data( +# axes_data.astype(np.int32), "axes" +# ) +# +# # Make sure the `axes` are int32. +# if tensor_has_data(axes_tensor): +# # Cast the `axes` to int32 statically. +# axes_tensor.tmp_buffer.data = axes_tensor.tmp_buffer.data.astype(np.int32) +# axes_tensor.type = TensorType.INT32 +# +# else: +# # The `axes` are dynamic and there is no inferred data for them. The shape inference is not possible in +# # this case, so it must have been skipped. If the `axes` are empty at runtime, ONNX will reduce over +# # all dimensions, whereas TFLite will not reduce at all. So the behavior is different, and it depends +# # on runtime data. Conversion could be implemented by adding multiple extra operators. +# # I don't thing that completely prohibiting the conversion here is ideal, since the issue arises only in +# # an edge case, which is hopefully not very common. Just print a warning message for now. +# logger.w( +# f"Conversion of ONNX `{op_type}` with a dynamic `axes` input will not be correct, if the `axes`" +# "are empty at runtime!" +# ) +# +# # Insert a `Cast` op, to make the `axes` int32. +# cast_op = builder.create_cast_before(t_op, 1, TensorType.INT32) +# ops.add_pre(cast_op) +# +# # For future references. Following code only cares about the final axes tensor. +# axes_tensor = cast_op.tmp_outputs[0] +# +# # Assign the new `axes_tensor` to the ReduceX operator. +# t_op.tmp_inputs[1] = axes_tensor +# +# else: +# # No axes specified. +# +# if noop_with_empty_axes == 1: +# # ONNXRT: According to the documentation, the operator should do nothing in this situation. But that's +# # not what happens in ONNX Runtime. ORT seems to simply ignore the `noop_with_empty_axes` attribute. +# # https://github.com/microsoft/onnxruntime/issues/19147 +# # For now, exit with error. If later ORT adds support for this attribute, simply uncomment the +# # following code. +# +# # if self.builder.operator_can_be_skipped(t_op, self.inspector): +# # # Skip the operator. +# # self.builder.redirect_tensor(t_op.tmp_outputs[0], t_op.tmp_inputs[0]) +# # return [] +# # +# # else: +# # # Return an operator which does nothing. +# # self.builder.turn_operator_to_identity(t_op) +# # return [t_op] +# +# logger.e( +# logger.Code.INVALID_ONNX_OPERATOR, +# f"ONNX `{op_type}` has `noop_with_empty_axes` == 1 and the `axes` are not specified, which" +# " indicates that the operator should do nothing. This is however not supported by ONNX" +# " Runtime, and therefore the conversion is also not supported.", +# ) +# +# else: +# # Default is to reduce all axes. +# axes_tensor = builder.create_tensor_for_data( +# np.arange(rank).astype(np.int32), "axes" +# ) +# +# t_op.tmp_inputs[1:] = ( +# [] +# ) # If the optional input was passed with name "", remove it. +# t_op.tmp_inputs.append(axes_tensor) + + +def ensure_reduce_transposition(builder, ops: OpsList): + """ + Ensure transposition of ReduceX operator is defined correctly based on tensor format. + New operators (Transpose) are added into "ops" collection when necessary. + + :param builder: ModelBuilder instance. + :param ops: OpsList instance with operators related to currently converted ReduceX operator. + """ + t_op = ops.middle_op + input_tensor = t_op.tmp_inputs[0] + input_rank = input_tensor.rank + input_format = input_tensor.tensor_format + output_tensor = t_op.tmp_outputs[0] + output_rank = output_tensor.rank + output_format = output_tensor.tensor_format + + if input_format.is_channels_last() and output_format.is_channels_last(): + to_onnx_perm = translator.create_channels_last_to_channels_first_permutation( + input_rank + ) + to_tflite_perm = translator.create_channels_first_to_channels_last_permutation( + output_rank, return_list=True + ) + + transpose_before = builder.create_transpose_operator_before( + t_op, 0, to_onnx_perm + ) + transpose_before.tmp_outputs[0].tensor_format = TensorFormat.CHANNELS_FIRST + ops.add_pre(transpose_before) + + transpose_after = builder.create_transpose_operator_after( + t_op, 0, to_tflite_perm + ) + transpose_after.tmp_inputs[0].tensor_format = TensorFormat.CHANNELS_FIRST + ops.post_ops.insert(0, transpose_after) + + elif input_format.is_channels_last() and not output_format.is_channels_last(): + # The dimensions of the tensor lose their meaning! Insert a transpose op, to change input to match ONNX. + + permutation = list( + translator.create_channels_last_to_channels_first_permutation(input_rank) + ) + transpose = builder.create_transpose_operator_before(t_op, 0, permutation) + transpose.tmp_outputs[0].tensor_format = TensorFormat.CHANNELS_FIRST + + ops.add_pre(transpose) + + elif not input_format.is_channels_last() and output_format.is_channels_last(): + # The ReduceX introduces format to the tensor + # The ONNX ReduceX outputs a 'channels first' tensor. This has to stay the same, and then a Transpose operator + # must be added, to change the tensor to 'channels last'. + + permutation = list( + translator.create_channels_first_to_channels_last_permutation(output_rank) + ) + transpose = builder.create_transpose_operator_after(t_op, 0, permutation) + transpose.tmp_inputs[0].tensor_format = TensorFormat.CHANNELS_FIRST + + ops.post_ops.insert(0, transpose) diff --git a/backends/nxp/backend/ir/converter/node_converters/shared/reshape_transposition.py b/backends/nxp/backend/ir/converter/node_converters/shared/reshape_transposition.py new file mode 100755 index 00000000000..0e55c27684b --- /dev/null +++ b/backends/nxp/backend/ir/converter/node_converters/shared/reshape_transposition.py @@ -0,0 +1,233 @@ +# Copyright 2023 NXP +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +from enum import Enum + +import numpy as np + +from executorch.backends.nxp.backend.ir.converter.conversion import translator +from executorch.backends.nxp.backend.ir.converter.conversion.common import OpsList +from executorch.backends.nxp.backend.ir.tensor_formatting import TensorFormat + + +class SingleUnitaryDimensionChangeType(Enum): + SQUEEZE = (0,) # Removing one dimension with value 1 + UNSQUEEZE = 1 # Adding one dimensions with value 1 + + +def _single_unitary_dimension_change( # noqa C901 + from_shape, to_shape +) -> tuple[int, SingleUnitaryDimensionChangeType] | None: + """ + Get change details (index of change and type of change) if there's only single unitary change + between input shapes. If there is no such a change, None is returned otherwise. + + :param from_shape: First compared shape. + :param to_shape: Second compared shape. + :return: Tuple with change details (changed index and type of change) or None. + """ + change_type = SingleUnitaryDimensionChangeType.UNSQUEEZE + + if ( + abs(len(from_shape) - len(to_shape)) != 1 + ): # More than one added/removed dimension + return None + elif len(from_shape) > len(to_shape): # Make sure 'from_shape' is a shorter one + from_shape, to_shape = to_shape, from_shape + change_type = SingleUnitaryDimensionChangeType.SQUEEZE + + # All dimensions in both shapes are ones + if np.all(np.array(to_shape) == 1) and np.all(np.array(from_shape) == 1): + return 0, change_type + + # Iterate from the beginning of the shorter shape and find first non-matching dimension + first_non_matching_forward = None + for i in range(len(from_shape)): + if from_shape[i] != to_shape[i]: + first_non_matching_forward = i + break + + # Iterate from the end of the shorter shape and find first non-matching dimension + first_non_matching_backward = None + for i in range(-1, -len(from_shape) - 1, -1): + if from_shape[i] != to_shape[i]: + first_non_matching_backward = i + break + + # Normalize (from negative to positive value) index of non-matching dimension with + # respect to shape with more dims + if first_non_matching_backward is not None: + first_non_matching_backward = first_non_matching_backward + len(to_shape) + + # 'from_shape' completely matched the beginning of 'to_shape', for example: + # from_shape=(2,3,4), to_shape=(2,3,4,1) + if first_non_matching_forward is None and first_non_matching_backward is not None: + if to_shape[first_non_matching_backward] == 1: + return first_non_matching_backward, change_type + # 'from_shape' completely matched the end of 'to_shape', for example: + # from_shape=(2,3,4), to_shape=(1,2,3,4) + elif first_non_matching_forward is not None and first_non_matching_backward is None: + if to_shape[first_non_matching_forward] == 1: + return first_non_matching_forward, change_type + # 'from_shape' matched partially from the beginning and partly from the end of 'to_shape', + # for example: from_shape=(2,3,4), to_shape=(2,1,3,4) + elif (first_non_matching_forward == first_non_matching_backward) and to_shape[ + first_non_matching_forward + ] == 1: + return first_non_matching_forward, change_type + + return None + + +def _get_permutation_for_single_unitary_change_in_NC_dims( + shape_from: list[int], to_shape: list[int] +) -> list[int] | None: + """ + Get permutation used by prepended 'Transpose' operator if there's only single unitary + dimension change (single added/removed dimension with value 1) in batch or channel dimension + done by 'Reshape' operator. + + :param shape_from: Input shape of 'Reshape' operator. + :param to_shape: Output shape of 'Reshape' operator. + :return: Permutation as list of ints, or None if there is no single unitary change in NC dimensions. + """ + + old_shape_channel_first = translator.dims_to_channels_first(shape_from) + new_shape_channel_first = translator.dims_to_channels_first(to_shape) + + change_details = _single_unitary_dimension_change( + old_shape_channel_first, new_shape_channel_first + ) + + # Mapping from dimension change details into permutation used in prepended 'Transpose' op + # in format: permutation_mapping[SQUEEZE/UNSQUEEZE][old_shape dimension][changed index] + permutation_mapping = { + SingleUnitaryDimensionChangeType.SQUEEZE: { + 4: { + 0: [0, 3, 2, 1], + 1: [0, 2, 1, 3], + }, + 5: { + 0: [0, 4, 2, 3, 1], + 1: [0, 2, 3, 1, 4], + }, + }, + SingleUnitaryDimensionChangeType.UNSQUEEZE: { + 3: { + 0: [2, 1, 0], + 1: [0, 2, 1], + }, + 4: { + 0: [3, 1, 2, 0], + 1: [0, 3, 1, 2], + }, + }, + } + + if change_details is not None: + changed_index, change_type = change_details + if changed_index > 1: + # There is single unitary change in other than NC dimensions -> ignoring + return None + return permutation_mapping[change_type][len(shape_from)][changed_index] + + return None + + +def ensure_reshape_transposition(builder, ops: OpsList) -> list[int]: + """ + Ensure transposition of Reshape operator is defined correctly based on tensor format. + New operators (Transpose) are added into "ops" collection when necessary. + + :param builder: ModelBuilder instance. + :param ops: OpsList instance with Reshape as "middle_op". + :return: New shape of Reshape operator. + """ + t_op = ops.middle_op + input_tensor = t_op.tmp_inputs[0] + input_rank = input_tensor.rank + input_format = input_tensor.tensor_format + output_tensor = t_op.tmp_outputs[0] + output_rank = output_tensor.rank + output_format = output_tensor.tensor_format + + # Shapes in TFLite format + input_shape = input_tensor.shape.vector + new_shape = output_tensor.shape.vector + + if input_format.is_channels_last() and not output_format.is_channels_last(): + # The dimensions of the tensor lose their meaning! Insert a transpose op, to change input to match ONNX. + + permutation = list( + translator.create_channels_last_to_channels_first_permutation(input_rank) + ) + transpose = builder.create_transpose_operator_before(t_op, 0, permutation) + transpose.tmp_outputs[0].tensor_format = TensorFormat.CHANNELS_FIRST + + ops.add_pre(transpose) + + elif not input_format.is_channels_last() and output_format.is_channels_last(): + # The Reshape introduces format to the tensor (2D -> 4D for example) + # The ONNX Reshape outputs a 'channels first' tensor. This has to stay the same, and then a Transpose operator + # must be added, to change the tensor to 'channels last'. + + permutation = list( + translator.create_channels_first_to_channels_last_permutation(output_rank) + ) + transpose = builder.create_transpose_operator_after(t_op, 0, permutation) + transpose.tmp_inputs[0].tensor_format = TensorFormat.CHANNELS_FIRST + + new_shape = translator.dims_to_channels_first(new_shape) + + ops.post_ops.insert(0, transpose) + elif input_format.is_channels_last() and output_format.is_channels_last(): + batch_match = input_tensor.shape.vector[0] == output_tensor.shape.vector[0] + channels_match = input_tensor.shape.vector[-1] == output_tensor.shape.vector[-1] + + if batch_match and channels_match: + # It is safe to skip 'Transposition' at all because 'NC' dimensions are the same and + # not mixed with other dimensions + pass + elif permutation := _get_permutation_for_single_unitary_change_in_NC_dims( + input_shape, new_shape + ): + # Single added/removed dimension with value 1 + transpose = builder.create_transpose_operator_before(t_op, 0, permutation) + transpose.tmp_outputs[0].tensor_format = ( + TensorFormat.RESHAPE_SINGLE_UNITARY_TRANSPOSITION + ) + + ops.add_pre(transpose) + else: + # The only way to convert this correctly is to insert a Transpose operator before, to make the input + # channels first, and another Transpose after, to make the output channels last again. + last_to_first_perm = ( + translator.create_channels_last_to_channels_first_permutation( + input_rank + ) + ) + ops.add_pre( + builder.create_transpose_operator_before( + t_op, 0, list(last_to_first_perm) + ) + ) + t_op.tmp_inputs[0].tensor_format = TensorFormat.CHANNELS_FIRST + + new_shape = translator.dims_to_channels_first(new_shape) + + first_to_last_perm = ( + translator.create_channels_first_to_channels_last_permutation( + output_rank + ) + ) + ops.post_ops.insert( + 0, + builder.create_transpose_operator_after( + t_op, 0, list(first_to_last_perm) + ), + ) + t_op.tmp_outputs[0].tensor_format = TensorFormat.CHANNELS_FIRST + + return new_shape diff --git a/backends/nxp/backend/ir/converter/quantization_utils.py b/backends/nxp/backend/ir/converter/quantization_utils.py new file mode 100755 index 00000000000..d9e7674d953 --- /dev/null +++ b/backends/nxp/backend/ir/converter/quantization_utils.py @@ -0,0 +1,484 @@ +# Copyright 2023 NXP +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +import copy +from typing import Iterable, List, Optional + +import executorch.backends.nxp.backend.ir.converter.builder.model_builder as model_builder + +import numpy as np +from executorch.backends.nxp.backend.ir import logger as logger +from executorch.backends.nxp.backend.ir.converter.conversion.translator import ( + tf_lite_type_to_numpy, +) +from executorch.backends.nxp.backend.ir.lib.tflite import TensorType as tflTensorType +from executorch.backends.nxp.backend.ir.lib.tflite.TensorType import TensorType +from executorch.backends.nxp.backend.ir.tflite_generator import ( + tflite_model as tflite_model, +) + + +def quantization_is_equal( + x_scale: np.ndarray, + x_zp: np.ndarray, + x_type: TensorType, + y_scale: np.ndarray, + y_zp: np.ndarray, + y_type: TensorType, +) -> bool: + """Determine if provided quantization parameters of tensors 'x' and 'y' are the same. + + :param x_scale: Scale of the 'x' tensor. + :param x_zp: Zero point of the 'x' tensor. + :param x_type: TFLite data type of the 'x' tensor. + :param y_scale: Scale of the 'y' tensor. + :param y_zp: Zero point of the 'y' tensor. + :param y_type: TFLite data type of the 'y' tensor. + :return: True, if the quantization parameters are equal. + """ + if x_type != y_type: + return False + + if not (x_scale.size == x_zp.size == y_scale.size == y_zp.size): + return False + + x_scale, x_zp = quantization_params_to_lists(x_scale, x_zp) + y_scale, y_zp = quantization_params_to_lists(y_scale, y_zp) + + return all( + x_s == y_s and x_z == y_z + for x_s, y_s, x_z, y_z in zip(x_scale, y_scale, x_zp, y_zp) + ) + + +def quantization_params_to_lists( + scale: np.ndarray, zero_point: np.ndarray +) -> (List[float], List[int]): + if (scale is None) or (zero_point is None): + logger.e( + logger.Code.INTERNAL_ERROR, + "Missing zero_point and/or scale quantization params when converting to list!", + ) + + if (scale.size == 1) and (zero_point.size == 1): + # Per tensor quantization + scale = [scale.item()] + zero_point = [zero_point.item()] + elif (scale.size != 1) and (zero_point.size != 1): + # Per channel quantization + scale = scale.tolist() + zero_point = zero_point.tolist() + else: + logger.e( + logger.Code.CONVERSION_IMPOSSIBLE, + "TFLite doesn't support combination of per-channel and per-tensor quantization params.", + ) + + return scale, zero_point + + +def is_quantization_valid(scale, zero_point): + return scale.size == zero_point.size + + +def is_per_tensor_quantized(scale, zero_point): + return (scale.size == 1) and (zero_point.size == 1) + + +def is_per_channel_quantized(scale, zero_point): + return is_quantization_valid(scale, zero_point) and not is_per_tensor_quantized( + scale, zero_point + ) + + +def get_symmetric_zero_point_for_type(tensor_type: TensorType): + match tensor_type: + case TensorType.INT8: + return 0 + case TensorType.UINT8: + return 128 + case _: + logger.e( + logger.Code.INTERNAL_ERROR, + f"Attempt to get zero point definition for type: {tensor_type}", + ) + + +def _validate_or_set_quant_params( + tensor: tflite_model.Tensor, quant: tflite_model.Quantization +) -> bool: + """ + Set quantization parameters 'quant' in the tensor. If tensor already has any quantization parameters, + checks if equals to quant + :param tensor: tensor where to set the quantization parameters + :param quant: Quantization parameters + :return: False if validation failed, True otherwise + """ + + if tensor.quantization is not None: + return tensor.quantization == quant + tensor.quantization = copy.copy(quant) + + return True + + +def propagate_quantization( + from_tensor: tflite_model.Tensor, to_tensor: tflite_model.Tensor +): + """ + Propagates quantization parameters from from_tensor to to_tensor. If to_tensor already has the params set + checks the consistency. + :raises: logger.Error - INVALID_ONNX_MODEL + """ + + if ( + from_tensor.quantization is not None + and from_tensor.quantization.is_per_channel() + ): + # Note: For simplicity the quantization propagation is allowed only for per tensor quantized tensors. + # Typically, operator inputs and outputs are per-tensor quantized. Per channel is only for weights. + logger.e( + logger.Code.NOT_IMPLEMENTED, + "Propagation of quantization for PerChannel quantized tensors is not yet supported", + ) + + # noinspection PyTypeChecker + if not _validate_or_set_quant_params(to_tensor, from_tensor.quantization): + logger.e( + logger.Code.INVALID_ONNX_MODEL, + f'Mismatched quantization parameters between tensors "{from_tensor.name}" and "{to_tensor.name}"', + ) + + +def set_quantization_parameters_to_tensor( + tflite_tensor: tflite_model.Tensor, + scale: np.ndarray, + zero_point: np.ndarray, + quantized_dimension: int = 0, +): + """Create a TFLite QuantizationParameters object, initialize it from given parameters and add it to the + 'tflite_tensor'. + :param tflite_tensor: The TFLite tensor in the model, to add the quantization to. + :param scale: The data of the tensor, which is an input of a quantized ONNX operator and represents the + quantization scale. + :param zero_point: The data of the tensor, which is an input of a quantized ONNX operator and represents the + quantization zero point. + :param quantized_dimension: The quantized dimension attribute of TFLite QuantizationParameters. + """ + if (scale is None) or (zero_point is None): + logger.e( + logger.Code.NOT_IMPLEMENTED, + "Conversion of ONNX quantized operators is only supported when " + "the quantization parameters are static!", + ) + + if (scale.size == 1) and (zero_point.size == 1): + # Per tensor quantization + scale = [scale.item()] + zero_point = [zero_point.item()] + + elif (scale.size != 1) and (zero_point.size != 1): + # Per channel quantization + + if scale.size != zero_point.size: + logger.e( + logger.Code.INVALID_ONNX_MODEL, + f"The per channel quantization parameters of ONNX tensor " + f"'{tflite_tensor.name}' are of different sizes! ('{scale.size}'" + f" != '{zero_point.size}')", + ) + + quantized_dimension_size = tflite_tensor.shape.get(quantized_dimension) + if scale.size != quantized_dimension_size: + logger.e( + logger.Code.INVALID_ONNX_MODEL, + f"The ONNX per channel quantization parameter vectors do not " + f"match the size of the quantized dimension! ('{scale.size}' != " + f"'{quantized_dimension_size}')", + ) + + scale = scale.tolist() + zero_point = zero_point.tolist() + + else: + # Combination of per tensor and per channel quantization parameters + logger.e( + logger.Code.INVALID_ONNX_MODEL, + f"ONNX tensor '{tflite_tensor.name}' uses a combination of per " + f"tensor and per channel quantization parameters. Conversion to " + f"TFLite is not possible!", + ) + + quant = tflite_model.Quantization( + scale=tflite_model.Scale(scale), + zero_point=tflite_model.ZeroPoint(zero_point), + quantized_dimension=quantized_dimension, + ) + if not _validate_or_set_quant_params(tflite_tensor, quant): + logger.e( + logger.Code.INVALID_ONNX_MODEL, + f'Mismatched quantization parameters between tensors: "{tflite_tensor.name}" already ' + f"has the quantization params set", + ) + + +def calculate_uint_to_int_re_quantization_zero_point( + data_type_byte_size: int, old_zero_point: Iterable[int] +) -> np.ndarray: + """ + Calculate the new zero points, after a quantized tensor with an unsigned int data type is re-quantized to + a signed type. + :param data_type_byte_size: Size of the data type that is used, in Bytes. For example 1 for INT8. + :param old_zero_point: The zero point quantisation parameter, of the original data, before re-quantization. + :return: The new zero point quantisation parameter, after re-quantization. + """ + data_type_bit_size = 8 * data_type_byte_size + zero_point_shift = 2 ** (data_type_bit_size - 1) + return np.asarray(np.subtract(np.array(old_zero_point, np.int32), zero_point_shift)) + + +def _re_quantize_uint8_to_int8(tensor_data: np.ndarray) -> np.ndarray: + """Re-quantize static uint8 data to int8.""" + int16_data = np.asarray(tensor_data, np.int16) + return np.array(int16_data - 128, np.int8) + + +def quantize_int8( + data: np.ndarray, scale: List[float], zero_point: List[int] +) -> np.ndarray: + new_data = np.add(np.round(np.divide(data, scale)), zero_point) + return np.clip(new_data, -128, 127).astype(np.int8) + + +def quantize_uint8( + data: np.ndarray, scale: List[float], zero_point: List[int] +) -> np.ndarray: + new_data = np.add(np.round(np.divide(data, scale)), zero_point) + return np.clip(new_data, 0, 255).astype(np.uint8) + + +def quantize_int32( + data: np.ndarray, scale: List[float], zero_point: List[int] +) -> np.ndarray: + new_data = np.add(np.round(np.divide(data, scale)), zero_point) + return np.clip(new_data, -2_147_483_648, 2_147_483_648).astype(np.int32) + + +def dequantize( + data: np.ndarray, scale: List[float], zero_point: List[int] +) -> np.ndarray: + return np.multiply( + np.subtract(np.array(data, dtype=np.float32), zero_point), + scale, + dtype=np.float32, + ) + + +def re_quantize_static_tensor( + builder: "model_builder.ModelBuilder", + tflite_tensor: tflite_model.Tensor, + to_type: tflTensorType.TensorType, + new_scale: Optional[List[float]] = None, + new_zero_point: Optional[List[int]] = None, +) -> tflite_model.Tensor: + """Create a new TFLite Tensor with new quantization parameters, type and data. + + :param builder: A ModelBuilder instance. + :param tflite_tensor: TFLite tensor to re-quantize. + :param to_type: The TFLite TensorType, that the tensor will be re-quantized to. + :param new_scale: New scale quantization parameter. Used only when re-quantizing to the same type. + :param new_zero_point: New zero point quantization parameter. Used only when re-quantizing to the same type. + :return: A new re-quantized tensor. + """ + if tflite_tensor.quantization is None: + logger.e( + logger.Code.INTERNAL_ERROR, + "translator.re_quantize_static_tensor(): Got tensor without quantization!", + ) + + if tflite_tensor.tmp_buffer.data is None: + logger.e( + logger.Code.INTERNAL_ERROR, + "translator.re_quantize_static_tensor(): Got tensor without static data!", + ) + + new_dtype = tf_lite_type_to_numpy(to_type) + re_quantized_tensor = builder.duplicate_tensor(tflite_tensor) + tensor_data = re_quantized_tensor.tmp_buffer.data + + if tensor_data.dtype == np.uint8 and new_dtype == np.int8: # INT8 -> UINT8 + re_quantized_tensor.tmp_buffer.data = _re_quantize_uint8_to_int8(tensor_data) + re_quantized_tensor.type = tflTensorType.TensorType.INT8 + calculated_zero_point = calculate_uint_to_int_re_quantization_zero_point( + 1, re_quantized_tensor.quantization.zero_point.vector + ) + re_quantized_tensor.quantization.zero_point = tflite_model.ZeroPoint( + list(calculated_zero_point) + ) + + elif tensor_data.dtype == np.int32 and new_dtype == np.int8: # INT32 -> INT8 + if new_zero_point is None or new_scale is None: + logger.e( + logger.Code.INTERNAL_ERROR, + "Missing new zero_point or new scale when re-quantizing tensor.", + ) + + old_zp = re_quantized_tensor.quantization.zero_point.vector + old_scale = re_quantized_tensor.quantization.scale.vector + float_data = dequantize(tensor_data, old_scale, old_zp) + int8_data = quantize_int8(float_data, new_scale, new_zero_point) + + re_quantized_tensor.tmp_buffer.data = int8_data + re_quantized_tensor.type = tflTensorType.TensorType.INT8 + re_quantized_tensor.quantization.zero_point = tflite_model.ZeroPoint( + list(new_zero_point) + ) + re_quantized_tensor.quantization.scale = tflite_model.Scale(list(new_scale)) + + elif tensor_data.dtype == np.int32 and new_dtype == np.uint8: # INT32 -> UINT8 + if new_zero_point is None or new_scale is None: + logger.e( + logger.Code.INTERNAL_ERROR, + "Missing new zero_point or new scale when re-quantizing tensor.", + ) + + old_zp = re_quantized_tensor.quantization.zero_point.vector + old_scale = re_quantized_tensor.quantization.scale.vector + float_data = dequantize(tensor_data, old_scale, old_zp) + uint8_data = quantize_uint8(float_data, new_scale, new_zero_point) + + re_quantized_tensor.tmp_buffer.data = uint8_data + re_quantized_tensor.type = tflTensorType.TensorType.UINT8 + re_quantized_tensor.quantization.zero_point = tflite_model.ZeroPoint( + list(new_zero_point) + ) + re_quantized_tensor.quantization.scale = tflite_model.Scale(list(new_scale)) + + elif tensor_data.dtype == np.int8 and new_dtype == np.int8: # INT8 -> INT8 + # Re-quantizing int8 tensor data with different quantization parameters + if new_zero_point is None or new_scale is None: + logger.e( + logger.Code.INTERNAL_ERROR, + "Missing new zero_point or new scale when re-quantizing tensor.", + ) + + zero_point_data = re_quantized_tensor.quantization.zero_point.vector + scale_data = re_quantized_tensor.quantization.scale.vector + new_tensor_data = dequantize(tensor_data, scale_data, zero_point_data) + + re_quantized_tensor.tmp_buffer.data = quantize_int8( + new_tensor_data, new_scale, new_zero_point + ) + re_quantized_tensor.quantization.scale = tflite_model.Scale(new_scale) + re_quantized_tensor.quantization.zero_point = tflite_model.ZeroPoint( + new_zero_point + ) + + elif tensor_data.dtype == np.int32 and new_dtype == np.int32: # INT32 -> INT32 + if new_zero_point is None or new_scale is None: + logger.e( + logger.Code.INTERNAL_ERROR, + "Missing new zero_point or new scale when re-quantizing tensor.", + ) + + old_zp = re_quantized_tensor.quantization.zero_point.vector + old_scale = re_quantized_tensor.quantization.scale.vector + float_data = dequantize(tensor_data, old_scale, old_zp) + int32_data = quantize_int32(float_data, new_scale, new_zero_point) + + re_quantized_tensor.tmp_buffer.data = int32_data + re_quantized_tensor.quantization.zero_point = tflite_model.ZeroPoint( + list(new_zero_point) + ) + re_quantized_tensor.quantization.scale = tflite_model.Scale(list(new_scale)) + + else: + logger.e( + logger.Code.NOT_IMPLEMENTED, + f"Re-quantization of static tensors from type '{tensor_data.dtype}' " + f"to type '{to_type}' is not yet implemented!", + ) + + return re_quantized_tensor + + +def quantize_static_float_tensor( + builder: "model_builder.ModelBuilder", + tflite_tensor: tflite_model.Tensor, + to_type: tflTensorType.TensorType, + scale: List[float], + zero_point: List[int], + quantized_dimension: int = 0, +) -> tflite_model.Tensor: + """Quantize tensor 'tflite_tensor' with passed quantization params. + + :param builder: A ModelBuilder instance. + :param tflite_tensor: TFLite tensor to quantize. + :param to_type: The TFLite TensorType, that the tensor will be quantized to. + :param scale: Scale quantization parameter. + :param zero_point: Zero point quantization parameter. + :param quantized_dimension: Quantized dimension. + """ + if tflite_tensor.quantization is not None: + logger.e(logger.Code.INTERNAL_ERROR, "Got tensor with quantization!") + + if tflite_tensor.tmp_buffer.data is None: + logger.e(logger.Code.INTERNAL_ERROR, "Got tensor without static data!") + + quantized_tensor = builder.duplicate_tensor(tflite_tensor) + tensor_data = quantized_tensor.tmp_buffer.data + + if zero_point is None or scale is None: + logger.e( + logger.Code.INTERNAL_ERROR, + "Missing new zero_point or new scale when quantizing tensor.", + ) + + new_dtype = tf_lite_type_to_numpy(to_type) + + if tensor_data.dtype == np.float32 and new_dtype == np.int8: + int8_data = quantize_int8(tensor_data, scale, zero_point) + + quantized_tensor.tmp_buffer.data = int8_data + quantized_tensor.type = tflTensorType.TensorType.INT8 + quantized_tensor.quantization = tflite_model.Quantization() + quantized_tensor.quantization.zero_point = tflite_model.ZeroPoint( + list(zero_point) + ) + quantized_tensor.quantization.scale = tflite_model.Scale(list(scale)) + quantized_tensor.quantization.quantized_dimension = quantized_dimension + + elif tensor_data.dtype == np.float32 and new_dtype == np.uint8: + uint8_data = quantize_uint8(tensor_data, scale, zero_point) + + quantized_tensor.tmp_buffer.data = uint8_data + quantized_tensor.type = tflTensorType.TensorType.UINT8 + quantized_tensor.quantization = tflite_model.Quantization() + quantized_tensor.quantization.zero_point = tflite_model.ZeroPoint( + list(zero_point) + ) + quantized_tensor.quantization.scale = tflite_model.Scale(list(scale)) + quantized_tensor.quantization.quantized_dimension = quantized_dimension + + elif tensor_data.dtype == np.float32 and new_dtype == np.int32: + int32_data = quantize_int32(tensor_data, scale, zero_point) + + quantized_tensor.tmp_buffer.data = int32_data + quantized_tensor.type = tflTensorType.TensorType.INT32 + quantized_tensor.quantization = tflite_model.Quantization() + quantized_tensor.quantization.zero_point = tflite_model.ZeroPoint( + list(zero_point) + ) + quantized_tensor.quantization.scale = tflite_model.Scale(list(scale)) + quantized_tensor.quantization.quantized_dimension = quantized_dimension + + else: + logger.e( + logger.Code.NOT_IMPLEMENTED, + f"Quantization of static tensors from type '{tensor_data.dtype}' " + f"to type '{to_type}' is not yet implemented!", + ) + + return quantized_tensor diff --git a/backends/nxp/backend/ir/converter/tensor_utils.py b/backends/nxp/backend/ir/converter/tensor_utils.py new file mode 100755 index 00000000000..efa0bdc2a42 --- /dev/null +++ b/backends/nxp/backend/ir/converter/tensor_utils.py @@ -0,0 +1,50 @@ +# Copyright 2024 NXP +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +from typing import Optional + +from executorch.backends.nxp.backend.ir import logger as logger +from executorch.backends.nxp.backend.ir.tflite_generator import ( + tflite_model as tflite_model, +) + + +def _buffer_has_data(t_buffer: tflite_model.Buffer) -> Optional[bool]: + """Determine if given buffer has any data in it.""" + + try: + if t_buffer.data is None: + return False + + size = t_buffer.data.size + return size != 0 + + except Exception as e: + logger.d("'ModelBuilder.bufferHasData()' failed!") + print(e) + return None + + +def tensor_has_data(t_tensor: tflite_model.Tensor) -> bool: + """Determine if given TFLite tensor has any data.""" + + if t_tensor.tmp_buffer is None: + return False + + res = _buffer_has_data(t_tensor.tmp_buffer) + if res is None: + res = False + + return res + + +def all_tensors_are_static(*list_of_tensors) -> bool: + """Return True, if all tensors in 'list_of_tensors' have data stored in them. + + :param list_of_tensors: List of TFLite tensors to check. + :return: True, if all tensors are static. False, if at least 1 is not static. + """ + + return all(tensor_has_data(t) for t in list_of_tensors) diff --git a/backends/nxp/backend/ir/lib/LICENSE_APACHE_2.0 b/backends/nxp/backend/ir/lib/LICENSE_APACHE_2.0 new file mode 100644 index 00000000000..12d255f8e0f --- /dev/null +++ b/backends/nxp/backend/ir/lib/LICENSE_APACHE_2.0 @@ -0,0 +1,251 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +## Some of TensorFlow's code is derived from Caffe, which is subject to the following copyright notice: + +COPYRIGHT + +All contributions by the University of California: + +Copyright (c) 2014, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: + +Copyright (c) 2014, the respective contributors +All rights reserved. + +Caffe uses a shared copyright model: each contributor holds copyright over +their contributions to Caffe. The project versioning records all such +contribution and copyright details. If a contributor wants to further mark +their specific copyright on a particular contribution, they should indicate +their copyright solely in the commit message of the change when it is +committed. + +LICENSE + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR + ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +CONTRIBUTION AGREEMENT + +By contributing to the BVLC/caffe repository through pull-request, comment, +or otherwise, the contributor releases their content to the +license and copyright terms herein. \ No newline at end of file diff --git a/backends/nxp/backend/ir/lib/__init__.py b/backends/nxp/backend/ir/lib/__init__.py new file mode 100755 index 00000000000..e69de29bb2d diff --git a/backends/nxp/backend/ir/lib/tflite/ATan2Options.py b/backends/nxp/backend/ir/lib/tflite/ATan2Options.py new file mode 100755 index 00000000000..7418141a593 --- /dev/null +++ b/backends/nxp/backend/ir/lib/tflite/ATan2Options.py @@ -0,0 +1,50 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy + +np = import_numpy() + + +class ATan2Options(object): + __slots__ = ["_tab"] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = ATan2Options() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsATan2Options(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + + @classmethod + def ATan2OptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed + ) + + # ATan2Options + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + +def ATan2OptionsStart(builder): + builder.StartObject(0) + + +def Start(builder): + ATan2OptionsStart(builder) + + +def ATan2OptionsEnd(builder): + return builder.EndObject() + + +def End(builder): + return ATan2OptionsEnd(builder) diff --git a/backends/nxp/backend/ir/lib/tflite/AbsOptions.py b/backends/nxp/backend/ir/lib/tflite/AbsOptions.py new file mode 100755 index 00000000000..3cd401c07ee --- /dev/null +++ b/backends/nxp/backend/ir/lib/tflite/AbsOptions.py @@ -0,0 +1,50 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy + +np = import_numpy() + + +class AbsOptions(object): + __slots__ = ["_tab"] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = AbsOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsAbsOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + + @classmethod + def AbsOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed + ) + + # AbsOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + +def AbsOptionsStart(builder): + builder.StartObject(0) + + +def Start(builder): + AbsOptionsStart(builder) + + +def AbsOptionsEnd(builder): + return builder.EndObject() + + +def End(builder): + return AbsOptionsEnd(builder) diff --git a/backends/nxp/backend/ir/lib/tflite/ActivationFunctionType.py b/backends/nxp/backend/ir/lib/tflite/ActivationFunctionType.py new file mode 100755 index 00000000000..a3235396477 --- /dev/null +++ b/backends/nxp/backend/ir/lib/tflite/ActivationFunctionType.py @@ -0,0 +1,12 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + + +class ActivationFunctionType(object): + NONE = 0 + RELU = 1 + RELU_N1_TO_1 = 2 + RELU6 = 3 + TANH = 4 + SIGN_BIT = 5 diff --git a/backends/nxp/backend/ir/lib/tflite/AddNOptions.py b/backends/nxp/backend/ir/lib/tflite/AddNOptions.py new file mode 100755 index 00000000000..b3a45971094 --- /dev/null +++ b/backends/nxp/backend/ir/lib/tflite/AddNOptions.py @@ -0,0 +1,50 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy + +np = import_numpy() + + +class AddNOptions(object): + __slots__ = ["_tab"] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = AddNOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsAddNOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + + @classmethod + def AddNOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed + ) + + # AddNOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + +def AddNOptionsStart(builder): + builder.StartObject(0) + + +def Start(builder): + AddNOptionsStart(builder) + + +def AddNOptionsEnd(builder): + return builder.EndObject() + + +def End(builder): + return AddNOptionsEnd(builder) diff --git a/backends/nxp/backend/ir/lib/tflite/AddOptions.py b/backends/nxp/backend/ir/lib/tflite/AddOptions.py new file mode 100755 index 00000000000..1d3625f3c9c --- /dev/null +++ b/backends/nxp/backend/ir/lib/tflite/AddOptions.py @@ -0,0 +1,82 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy + +np = import_numpy() + + +class AddOptions(object): + __slots__ = ["_tab"] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = AddOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsAddOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + + @classmethod + def AddOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed + ) + + # AddOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # AddOptions + def FusedActivationFunction(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos) + return 0 + + # AddOptions + def PotScaleInt16(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + return bool( + self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos) + ) + return True + + +def AddOptionsStart(builder): + builder.StartObject(2) + + +def Start(builder): + AddOptionsStart(builder) + + +def AddOptionsAddFusedActivationFunction(builder, fusedActivationFunction): + builder.PrependInt8Slot(0, fusedActivationFunction, 0) + + +def AddFusedActivationFunction(builder, fusedActivationFunction): + AddOptionsAddFusedActivationFunction(builder, fusedActivationFunction) + + +def AddOptionsAddPotScaleInt16(builder, potScaleInt16): + builder.PrependBoolSlot(1, potScaleInt16, 1) + + +def AddPotScaleInt16(builder, potScaleInt16): + AddOptionsAddPotScaleInt16(builder, potScaleInt16) + + +def AddOptionsEnd(builder): + return builder.EndObject() + + +def End(builder): + return AddOptionsEnd(builder) diff --git a/backends/nxp/backend/ir/lib/tflite/ArgMaxOptions.py b/backends/nxp/backend/ir/lib/tflite/ArgMaxOptions.py new file mode 100755 index 00000000000..4e055b96710 --- /dev/null +++ b/backends/nxp/backend/ir/lib/tflite/ArgMaxOptions.py @@ -0,0 +1,65 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy + +np = import_numpy() + + +class ArgMaxOptions(object): + __slots__ = ["_tab"] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = ArgMaxOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsArgMaxOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + + @classmethod + def ArgMaxOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed + ) + + # ArgMaxOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # ArgMaxOptions + def OutputType(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos) + return 0 + + +def ArgMaxOptionsStart(builder): + builder.StartObject(1) + + +def Start(builder): + ArgMaxOptionsStart(builder) + + +def ArgMaxOptionsAddOutputType(builder, outputType): + builder.PrependInt8Slot(0, outputType, 0) + + +def AddOutputType(builder, outputType): + ArgMaxOptionsAddOutputType(builder, outputType) + + +def ArgMaxOptionsEnd(builder): + return builder.EndObject() + + +def End(builder): + return ArgMaxOptionsEnd(builder) diff --git a/backends/nxp/backend/ir/lib/tflite/ArgMinOptions.py b/backends/nxp/backend/ir/lib/tflite/ArgMinOptions.py new file mode 100755 index 00000000000..163468c34e9 --- /dev/null +++ b/backends/nxp/backend/ir/lib/tflite/ArgMinOptions.py @@ -0,0 +1,65 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy + +np = import_numpy() + + +class ArgMinOptions(object): + __slots__ = ["_tab"] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = ArgMinOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsArgMinOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + + @classmethod + def ArgMinOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed + ) + + # ArgMinOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # ArgMinOptions + def OutputType(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos) + return 0 + + +def ArgMinOptionsStart(builder): + builder.StartObject(1) + + +def Start(builder): + ArgMinOptionsStart(builder) + + +def ArgMinOptionsAddOutputType(builder, outputType): + builder.PrependInt8Slot(0, outputType, 0) + + +def AddOutputType(builder, outputType): + ArgMinOptionsAddOutputType(builder, outputType) + + +def ArgMinOptionsEnd(builder): + return builder.EndObject() + + +def End(builder): + return ArgMinOptionsEnd(builder) diff --git a/backends/nxp/backend/ir/lib/tflite/AssignVariableOptions.py b/backends/nxp/backend/ir/lib/tflite/AssignVariableOptions.py new file mode 100755 index 00000000000..a0c8365b1f3 --- /dev/null +++ b/backends/nxp/backend/ir/lib/tflite/AssignVariableOptions.py @@ -0,0 +1,50 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy + +np = import_numpy() + + +class AssignVariableOptions(object): + __slots__ = ["_tab"] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = AssignVariableOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsAssignVariableOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + + @classmethod + def AssignVariableOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed + ) + + # AssignVariableOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + +def AssignVariableOptionsStart(builder): + builder.StartObject(0) + + +def Start(builder): + AssignVariableOptionsStart(builder) + + +def AssignVariableOptionsEnd(builder): + return builder.EndObject() + + +def End(builder): + return AssignVariableOptionsEnd(builder) diff --git a/backends/nxp/backend/ir/lib/tflite/BatchMatMulOptions.py b/backends/nxp/backend/ir/lib/tflite/BatchMatMulOptions.py new file mode 100755 index 00000000000..cbd8f4a198f --- /dev/null +++ b/backends/nxp/backend/ir/lib/tflite/BatchMatMulOptions.py @@ -0,0 +1,101 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy + +np = import_numpy() + + +class BatchMatMulOptions(object): + __slots__ = ["_tab"] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = BatchMatMulOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsBatchMatMulOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + + @classmethod + def BatchMatMulOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed + ) + + # BatchMatMulOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # BatchMatMulOptions + def AdjX(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return bool( + self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos) + ) + return False + + # BatchMatMulOptions + def AdjY(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + return bool( + self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos) + ) + return False + + # BatchMatMulOptions + def AsymmetricQuantizeInputs(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + if o != 0: + return bool( + self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos) + ) + return False + + +def BatchMatMulOptionsStart(builder): + builder.StartObject(3) + + +def Start(builder): + BatchMatMulOptionsStart(builder) + + +def BatchMatMulOptionsAddAdjX(builder, adjX): + builder.PrependBoolSlot(0, adjX, 0) + + +def AddAdjX(builder, adjX): + BatchMatMulOptionsAddAdjX(builder, adjX) + + +def BatchMatMulOptionsAddAdjY(builder, adjY): + builder.PrependBoolSlot(1, adjY, 0) + + +def AddAdjY(builder, adjY): + BatchMatMulOptionsAddAdjY(builder, adjY) + + +def BatchMatMulOptionsAddAsymmetricQuantizeInputs(builder, asymmetricQuantizeInputs): + builder.PrependBoolSlot(2, asymmetricQuantizeInputs, 0) + + +def AddAsymmetricQuantizeInputs(builder, asymmetricQuantizeInputs): + BatchMatMulOptionsAddAsymmetricQuantizeInputs(builder, asymmetricQuantizeInputs) + + +def BatchMatMulOptionsEnd(builder): + return builder.EndObject() + + +def End(builder): + return BatchMatMulOptionsEnd(builder) diff --git a/backends/nxp/backend/ir/lib/tflite/BatchToSpaceNDOptions.py b/backends/nxp/backend/ir/lib/tflite/BatchToSpaceNDOptions.py new file mode 100755 index 00000000000..6caac9e63d5 --- /dev/null +++ b/backends/nxp/backend/ir/lib/tflite/BatchToSpaceNDOptions.py @@ -0,0 +1,50 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy + +np = import_numpy() + + +class BatchToSpaceNDOptions(object): + __slots__ = ["_tab"] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = BatchToSpaceNDOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsBatchToSpaceNDOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + + @classmethod + def BatchToSpaceNDOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed + ) + + # BatchToSpaceNDOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + +def BatchToSpaceNDOptionsStart(builder): + builder.StartObject(0) + + +def Start(builder): + BatchToSpaceNDOptionsStart(builder) + + +def BatchToSpaceNDOptionsEnd(builder): + return builder.EndObject() + + +def End(builder): + return BatchToSpaceNDOptionsEnd(builder) diff --git a/backends/nxp/backend/ir/lib/tflite/BidirectionalSequenceLSTMOptions.py b/backends/nxp/backend/ir/lib/tflite/BidirectionalSequenceLSTMOptions.py new file mode 100755 index 00000000000..3d9bed3ae03 --- /dev/null +++ b/backends/nxp/backend/ir/lib/tflite/BidirectionalSequenceLSTMOptions.py @@ -0,0 +1,160 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy + +np = import_numpy() + + +class BidirectionalSequenceLSTMOptions(object): + __slots__ = ["_tab"] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = BidirectionalSequenceLSTMOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsBidirectionalSequenceLSTMOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + + @classmethod + def BidirectionalSequenceLSTMOptionsBufferHasIdentifier( + cls, buf, offset, size_prefixed=False + ): + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed + ) + + # BidirectionalSequenceLSTMOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # BidirectionalSequenceLSTMOptions + def FusedActivationFunction(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos) + return 0 + + # BidirectionalSequenceLSTMOptions + def CellClip(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + return self._tab.Get( + flatbuffers.number_types.Float32Flags, o + self._tab.Pos + ) + return 0.0 + + # BidirectionalSequenceLSTMOptions + def ProjClip(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + if o != 0: + return self._tab.Get( + flatbuffers.number_types.Float32Flags, o + self._tab.Pos + ) + return 0.0 + + # BidirectionalSequenceLSTMOptions + def MergeOutputs(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10)) + if o != 0: + return bool( + self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos) + ) + return False + + # BidirectionalSequenceLSTMOptions + def TimeMajor(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12)) + if o != 0: + return bool( + self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos) + ) + return True + + # BidirectionalSequenceLSTMOptions + def AsymmetricQuantizeInputs(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14)) + if o != 0: + return bool( + self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos) + ) + return False + + +def BidirectionalSequenceLSTMOptionsStart(builder): + builder.StartObject(6) + + +def Start(builder): + BidirectionalSequenceLSTMOptionsStart(builder) + + +def BidirectionalSequenceLSTMOptionsAddFusedActivationFunction( + builder, fusedActivationFunction +): + builder.PrependInt8Slot(0, fusedActivationFunction, 0) + + +def AddFusedActivationFunction(builder, fusedActivationFunction): + BidirectionalSequenceLSTMOptionsAddFusedActivationFunction( + builder, fusedActivationFunction + ) + + +def BidirectionalSequenceLSTMOptionsAddCellClip(builder, cellClip): + builder.PrependFloat32Slot(1, cellClip, 0.0) + + +def AddCellClip(builder, cellClip): + BidirectionalSequenceLSTMOptionsAddCellClip(builder, cellClip) + + +def BidirectionalSequenceLSTMOptionsAddProjClip(builder, projClip): + builder.PrependFloat32Slot(2, projClip, 0.0) + + +def AddProjClip(builder, projClip): + BidirectionalSequenceLSTMOptionsAddProjClip(builder, projClip) + + +def BidirectionalSequenceLSTMOptionsAddMergeOutputs(builder, mergeOutputs): + builder.PrependBoolSlot(3, mergeOutputs, 0) + + +def AddMergeOutputs(builder, mergeOutputs): + BidirectionalSequenceLSTMOptionsAddMergeOutputs(builder, mergeOutputs) + + +def BidirectionalSequenceLSTMOptionsAddTimeMajor(builder, timeMajor): + builder.PrependBoolSlot(4, timeMajor, 1) + + +def AddTimeMajor(builder, timeMajor): + BidirectionalSequenceLSTMOptionsAddTimeMajor(builder, timeMajor) + + +def BidirectionalSequenceLSTMOptionsAddAsymmetricQuantizeInputs( + builder, asymmetricQuantizeInputs +): + builder.PrependBoolSlot(5, asymmetricQuantizeInputs, 0) + + +def AddAsymmetricQuantizeInputs(builder, asymmetricQuantizeInputs): + BidirectionalSequenceLSTMOptionsAddAsymmetricQuantizeInputs( + builder, asymmetricQuantizeInputs + ) + + +def BidirectionalSequenceLSTMOptionsEnd(builder): + return builder.EndObject() + + +def End(builder): + return BidirectionalSequenceLSTMOptionsEnd(builder) diff --git a/backends/nxp/backend/ir/lib/tflite/BidirectionalSequenceRNNOptions.py b/backends/nxp/backend/ir/lib/tflite/BidirectionalSequenceRNNOptions.py new file mode 100755 index 00000000000..7fb26888b5d --- /dev/null +++ b/backends/nxp/backend/ir/lib/tflite/BidirectionalSequenceRNNOptions.py @@ -0,0 +1,126 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy + +np = import_numpy() + + +class BidirectionalSequenceRNNOptions(object): + __slots__ = ["_tab"] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = BidirectionalSequenceRNNOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsBidirectionalSequenceRNNOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + + @classmethod + def BidirectionalSequenceRNNOptionsBufferHasIdentifier( + cls, buf, offset, size_prefixed=False + ): + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed + ) + + # BidirectionalSequenceRNNOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # BidirectionalSequenceRNNOptions + def TimeMajor(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return bool( + self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos) + ) + return False + + # BidirectionalSequenceRNNOptions + def FusedActivationFunction(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos) + return 0 + + # BidirectionalSequenceRNNOptions + def MergeOutputs(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + if o != 0: + return bool( + self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos) + ) + return False + + # BidirectionalSequenceRNNOptions + def AsymmetricQuantizeInputs(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10)) + if o != 0: + return bool( + self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos) + ) + return False + + +def BidirectionalSequenceRNNOptionsStart(builder): + builder.StartObject(4) + + +def Start(builder): + BidirectionalSequenceRNNOptionsStart(builder) + + +def BidirectionalSequenceRNNOptionsAddTimeMajor(builder, timeMajor): + builder.PrependBoolSlot(0, timeMajor, 0) + + +def AddTimeMajor(builder, timeMajor): + BidirectionalSequenceRNNOptionsAddTimeMajor(builder, timeMajor) + + +def BidirectionalSequenceRNNOptionsAddFusedActivationFunction( + builder, fusedActivationFunction +): + builder.PrependInt8Slot(1, fusedActivationFunction, 0) + + +def AddFusedActivationFunction(builder, fusedActivationFunction): + BidirectionalSequenceRNNOptionsAddFusedActivationFunction( + builder, fusedActivationFunction + ) + + +def BidirectionalSequenceRNNOptionsAddMergeOutputs(builder, mergeOutputs): + builder.PrependBoolSlot(2, mergeOutputs, 0) + + +def AddMergeOutputs(builder, mergeOutputs): + BidirectionalSequenceRNNOptionsAddMergeOutputs(builder, mergeOutputs) + + +def BidirectionalSequenceRNNOptionsAddAsymmetricQuantizeInputs( + builder, asymmetricQuantizeInputs +): + builder.PrependBoolSlot(3, asymmetricQuantizeInputs, 0) + + +def AddAsymmetricQuantizeInputs(builder, asymmetricQuantizeInputs): + BidirectionalSequenceRNNOptionsAddAsymmetricQuantizeInputs( + builder, asymmetricQuantizeInputs + ) + + +def BidirectionalSequenceRNNOptionsEnd(builder): + return builder.EndObject() + + +def End(builder): + return BidirectionalSequenceRNNOptionsEnd(builder) diff --git a/backends/nxp/backend/ir/lib/tflite/BitcastOptions.py b/backends/nxp/backend/ir/lib/tflite/BitcastOptions.py new file mode 100755 index 00000000000..8f8569ab950 --- /dev/null +++ b/backends/nxp/backend/ir/lib/tflite/BitcastOptions.py @@ -0,0 +1,50 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy + +np = import_numpy() + + +class BitcastOptions(object): + __slots__ = ["_tab"] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = BitcastOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsBitcastOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + + @classmethod + def BitcastOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed + ) + + # BitcastOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + +def BitcastOptionsStart(builder): + builder.StartObject(0) + + +def Start(builder): + BitcastOptionsStart(builder) + + +def BitcastOptionsEnd(builder): + return builder.EndObject() + + +def End(builder): + return BitcastOptionsEnd(builder) diff --git a/backends/nxp/backend/ir/lib/tflite/BitwiseXorOptions.py b/backends/nxp/backend/ir/lib/tflite/BitwiseXorOptions.py new file mode 100755 index 00000000000..b17cd8047c6 --- /dev/null +++ b/backends/nxp/backend/ir/lib/tflite/BitwiseXorOptions.py @@ -0,0 +1,50 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy + +np = import_numpy() + + +class BitwiseXorOptions(object): + __slots__ = ["_tab"] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = BitwiseXorOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsBitwiseXorOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + + @classmethod + def BitwiseXorOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed + ) + + # BitwiseXorOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + +def BitwiseXorOptionsStart(builder): + builder.StartObject(0) + + +def Start(builder): + BitwiseXorOptionsStart(builder) + + +def BitwiseXorOptionsEnd(builder): + return builder.EndObject() + + +def End(builder): + return BitwiseXorOptionsEnd(builder) diff --git a/backends/nxp/backend/ir/lib/tflite/BroadcastToOptions.py b/backends/nxp/backend/ir/lib/tflite/BroadcastToOptions.py new file mode 100755 index 00000000000..dca37ff4b1e --- /dev/null +++ b/backends/nxp/backend/ir/lib/tflite/BroadcastToOptions.py @@ -0,0 +1,50 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy + +np = import_numpy() + + +class BroadcastToOptions(object): + __slots__ = ["_tab"] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = BroadcastToOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsBroadcastToOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + + @classmethod + def BroadcastToOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed + ) + + # BroadcastToOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + +def BroadcastToOptionsStart(builder): + builder.StartObject(0) + + +def Start(builder): + BroadcastToOptionsStart(builder) + + +def BroadcastToOptionsEnd(builder): + return builder.EndObject() + + +def End(builder): + return BroadcastToOptionsEnd(builder) diff --git a/backends/nxp/backend/ir/lib/tflite/BucketizeOptions.py b/backends/nxp/backend/ir/lib/tflite/BucketizeOptions.py new file mode 100755 index 00000000000..f64e7c4e64f --- /dev/null +++ b/backends/nxp/backend/ir/lib/tflite/BucketizeOptions.py @@ -0,0 +1,98 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy + +np = import_numpy() + + +class BucketizeOptions(object): + __slots__ = ["_tab"] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = BucketizeOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsBucketizeOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + + @classmethod + def BucketizeOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed + ) + + # BucketizeOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # BucketizeOptions + def Boundaries(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + a = self._tab.Vector(o) + return self._tab.Get( + flatbuffers.number_types.Float32Flags, + a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4), + ) + return 0 + + # BucketizeOptions + def BoundariesAsNumpy(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Float32Flags, o) + return 0 + + # BucketizeOptions + def BoundariesLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # BucketizeOptions + def BoundariesIsNone(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + return o == 0 + + +def BucketizeOptionsStart(builder): + builder.StartObject(1) + + +def Start(builder): + BucketizeOptionsStart(builder) + + +def BucketizeOptionsAddBoundaries(builder, boundaries): + builder.PrependUOffsetTRelativeSlot( + 0, flatbuffers.number_types.UOffsetTFlags.py_type(boundaries), 0 + ) + + +def AddBoundaries(builder, boundaries): + BucketizeOptionsAddBoundaries(builder, boundaries) + + +def BucketizeOptionsStartBoundariesVector(builder, numElems): + return builder.StartVector(4, numElems, 4) + + +def StartBoundariesVector(builder, numElems: int) -> int: + return BucketizeOptionsStartBoundariesVector(builder, numElems) + + +def BucketizeOptionsEnd(builder): + return builder.EndObject() + + +def End(builder): + return BucketizeOptionsEnd(builder) diff --git a/backends/nxp/backend/ir/lib/tflite/Buffer.py b/backends/nxp/backend/ir/lib/tflite/Buffer.py new file mode 100755 index 00000000000..5a36140725c --- /dev/null +++ b/backends/nxp/backend/ir/lib/tflite/Buffer.py @@ -0,0 +1,132 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy + +np = import_numpy() + + +class Buffer(object): + __slots__ = ["_tab"] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = Buffer() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsBuffer(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + + @classmethod + def BufferBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed + ) + + # Buffer + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # Buffer + def Data(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + a = self._tab.Vector(o) + return self._tab.Get( + flatbuffers.number_types.Uint8Flags, + a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 1), + ) + return 0 + + # Buffer + def DataAsNumpy(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Uint8Flags, o) + return 0 + + # Buffer + def DataLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # Buffer + def DataIsNone(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + return o == 0 + + # Buffer + def Offset(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + return self._tab.Get( + flatbuffers.number_types.Uint64Flags, o + self._tab.Pos + ) + return 0 + + # Buffer + def Size(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + if o != 0: + return self._tab.Get( + flatbuffers.number_types.Uint64Flags, o + self._tab.Pos + ) + return 0 + + +def BufferStart(builder): + builder.StartObject(3) + + +def Start(builder): + BufferStart(builder) + + +def BufferAddData(builder, data): + builder.PrependUOffsetTRelativeSlot( + 0, flatbuffers.number_types.UOffsetTFlags.py_type(data), 0 + ) + + +def AddData(builder, data): + BufferAddData(builder, data) + + +def BufferStartDataVector(builder, numElems): + return builder.StartVector(1, numElems, 1) + + +def StartDataVector(builder, numElems: int) -> int: + return BufferStartDataVector(builder, numElems) + + +def BufferAddOffset(builder, offset): + builder.PrependUint64Slot(1, offset, 0) + + +def AddOffset(builder, offset): + BufferAddOffset(builder, offset) + + +def BufferAddSize(builder, size): + builder.PrependUint64Slot(2, size, 0) + + +def AddSize(builder, size): + BufferAddSize(builder, size) + + +def BufferEnd(builder): + return builder.EndObject() + + +def End(builder): + return BufferEnd(builder) diff --git a/backends/nxp/backend/ir/lib/tflite/BuiltinOperator.py b/backends/nxp/backend/ir/lib/tflite/BuiltinOperator.py new file mode 100755 index 00000000000..6dbadcc91c3 --- /dev/null +++ b/backends/nxp/backend/ir/lib/tflite/BuiltinOperator.py @@ -0,0 +1,212 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + + +class BuiltinOperator(object): + ADD = 0 + AVERAGE_POOL_2D = 1 + CONCATENATION = 2 + CONV_2D = 3 + DEPTHWISE_CONV_2D = 4 + DEPTH_TO_SPACE = 5 + DEQUANTIZE = 6 + EMBEDDING_LOOKUP = 7 + FLOOR = 8 + FULLY_CONNECTED = 9 + HASHTABLE_LOOKUP = 10 + L2_NORMALIZATION = 11 + L2_POOL_2D = 12 + LOCAL_RESPONSE_NORMALIZATION = 13 + LOGISTIC = 14 + LSH_PROJECTION = 15 + LSTM = 16 + MAX_POOL_2D = 17 + MUL = 18 + RELU = 19 + RELU_N1_TO_1 = 20 + RELU6 = 21 + RESHAPE = 22 + RESIZE_BILINEAR = 23 + RNN = 24 + SOFTMAX = 25 + SPACE_TO_DEPTH = 26 + SVDF = 27 + TANH = 28 + CONCAT_EMBEDDINGS = 29 + SKIP_GRAM = 30 + CALL = 31 + CUSTOM = 32 + EMBEDDING_LOOKUP_SPARSE = 33 + PAD = 34 + UNIDIRECTIONAL_SEQUENCE_RNN = 35 + GATHER = 36 + BATCH_TO_SPACE_ND = 37 + SPACE_TO_BATCH_ND = 38 + TRANSPOSE = 39 + MEAN = 40 + SUB = 41 + DIV = 42 + SQUEEZE = 43 + UNIDIRECTIONAL_SEQUENCE_LSTM = 44 + STRIDED_SLICE = 45 + BIDIRECTIONAL_SEQUENCE_RNN = 46 + EXP = 47 + TOPK_V2 = 48 + SPLIT = 49 + LOG_SOFTMAX = 50 + DELEGATE = 51 + BIDIRECTIONAL_SEQUENCE_LSTM = 52 + CAST = 53 + PRELU = 54 + MAXIMUM = 55 + ARG_MAX = 56 + MINIMUM = 57 + LESS = 58 + NEG = 59 + PADV2 = 60 + GREATER = 61 + GREATER_EQUAL = 62 + LESS_EQUAL = 63 + SELECT = 64 + SLICE = 65 + SIN = 66 + TRANSPOSE_CONV = 67 + SPARSE_TO_DENSE = 68 + TILE = 69 + EXPAND_DIMS = 70 + EQUAL = 71 + NOT_EQUAL = 72 + LOG = 73 + SUM = 74 + SQRT = 75 + RSQRT = 76 + SHAPE = 77 + POW = 78 + ARG_MIN = 79 + FAKE_QUANT = 80 + REDUCE_PROD = 81 + REDUCE_MAX = 82 + PACK = 83 + LOGICAL_OR = 84 + ONE_HOT = 85 + LOGICAL_AND = 86 + LOGICAL_NOT = 87 + UNPACK = 88 + REDUCE_MIN = 89 + FLOOR_DIV = 90 + REDUCE_ANY = 91 + SQUARE = 92 + ZEROS_LIKE = 93 + FILL = 94 + FLOOR_MOD = 95 + RANGE = 96 + RESIZE_NEAREST_NEIGHBOR = 97 + LEAKY_RELU = 98 + SQUARED_DIFFERENCE = 99 + MIRROR_PAD = 100 + ABS = 101 + SPLIT_V = 102 + UNIQUE = 103 + CEIL = 104 + REVERSE_V2 = 105 + ADD_N = 106 + GATHER_ND = 107 + COS = 108 + WHERE = 109 + RANK = 110 + ELU = 111 + REVERSE_SEQUENCE = 112 + MATRIX_DIAG = 113 + QUANTIZE = 114 + MATRIX_SET_DIAG = 115 + ROUND = 116 + HARD_SWISH = 117 + IF = 118 + WHILE = 119 + NON_MAX_SUPPRESSION_V4 = 120 + NON_MAX_SUPPRESSION_V5 = 121 + SCATTER_ND = 122 + SELECT_V2 = 123 + DENSIFY = 124 + SEGMENT_SUM = 125 + BATCH_MATMUL = 126 + PLACEHOLDER_FOR_GREATER_OP_CODES = 127 + CUMSUM = 128 + CALL_ONCE = 129 + BROADCAST_TO = 130 + RFFT2D = 131 + CONV_3D = 132 + IMAG = 133 + REAL = 134 + COMPLEX_ABS = 135 + HASHTABLE = 136 + HASHTABLE_FIND = 137 + HASHTABLE_IMPORT = 138 + HASHTABLE_SIZE = 139 + REDUCE_ALL = 140 + CONV_3D_TRANSPOSE = 141 + VAR_HANDLE = 142 + READ_VARIABLE = 143 + ASSIGN_VARIABLE = 144 + BROADCAST_ARGS = 145 + RANDOM_STANDARD_NORMAL = 146 + BUCKETIZE = 147 + RANDOM_UNIFORM = 148 + MULTINOMIAL = 149 + GELU = 150 + DYNAMIC_UPDATE_SLICE = 151 + RELU_0_TO_1 = 152 + UNSORTED_SEGMENT_PROD = 153 + UNSORTED_SEGMENT_MAX = 154 + UNSORTED_SEGMENT_SUM = 155 + ATAN2 = 156 + UNSORTED_SEGMENT_MIN = 157 + SIGN = 158 + BITCAST = 159 + BITWISE_XOR = 160 + RIGHT_SHIFT = 161 + STABLEHLO_LOGISTIC = 162 + STABLEHLO_ADD = 163 + STABLEHLO_DIVIDE = 164 + STABLEHLO_MULTIPLY = 165 + STABLEHLO_MAXIMUM = 166 + STABLEHLO_RESHAPE = 167 + STABLEHLO_CLAMP = 168 + STABLEHLO_CONCATENATE = 169 + STABLEHLO_BROADCAST_IN_DIM = 170 + STABLEHLO_CONVOLUTION = 171 + STABLEHLO_SLICE = 172 + STABLEHLO_CUSTOM_CALL = 173 + STABLEHLO_REDUCE = 174 + STABLEHLO_ABS = 175 + STABLEHLO_AND = 176 + STABLEHLO_COSINE = 177 + STABLEHLO_EXPONENTIAL = 178 + STABLEHLO_FLOOR = 179 + STABLEHLO_LOG = 180 + STABLEHLO_MINIMUM = 181 + STABLEHLO_NEGATE = 182 + STABLEHLO_OR = 183 + STABLEHLO_POWER = 184 + STABLEHLO_REMAINDER = 185 + STABLEHLO_RSQRT = 186 + STABLEHLO_SELECT = 187 + STABLEHLO_SUBTRACT = 188 + STABLEHLO_TANH = 189 + STABLEHLO_SCATTER = 190 + STABLEHLO_COMPARE = 191 + STABLEHLO_CONVERT = 192 + STABLEHLO_DYNAMIC_SLICE = 193 + STABLEHLO_DYNAMIC_UPDATE_SLICE = 194 + STABLEHLO_PAD = 195 + STABLEHLO_IOTA = 196 + STABLEHLO_DOT_GENERAL = 197 + STABLEHLO_REDUCE_WINDOW = 198 + STABLEHLO_SORT = 199 + STABLEHLO_WHILE = 200 + STABLEHLO_GATHER = 201 + STABLEHLO_TRANSPOSE = 202 + DILATE = 203 + STABLEHLO_RNG_BIT_GENERATOR = 204 + REDUCE_WINDOW = 205 diff --git a/backends/nxp/backend/ir/lib/tflite/BuiltinOptions.py b/backends/nxp/backend/ir/lib/tflite/BuiltinOptions.py new file mode 100755 index 00000000000..8e416fad7c6 --- /dev/null +++ b/backends/nxp/backend/ir/lib/tflite/BuiltinOptions.py @@ -0,0 +1,133 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + + +class BuiltinOptions(object): + NONE = 0 + Conv2DOptions = 1 + DepthwiseConv2DOptions = 2 + ConcatEmbeddingsOptions = 3 + LSHProjectionOptions = 4 + Pool2DOptions = 5 + SVDFOptions = 6 + RNNOptions = 7 + FullyConnectedOptions = 8 + SoftmaxOptions = 9 + ConcatenationOptions = 10 + AddOptions = 11 + L2NormOptions = 12 + LocalResponseNormalizationOptions = 13 + LSTMOptions = 14 + ResizeBilinearOptions = 15 + CallOptions = 16 + ReshapeOptions = 17 + SkipGramOptions = 18 + SpaceToDepthOptions = 19 + EmbeddingLookupSparseOptions = 20 + MulOptions = 21 + PadOptions = 22 + GatherOptions = 23 + BatchToSpaceNDOptions = 24 + SpaceToBatchNDOptions = 25 + TransposeOptions = 26 + ReducerOptions = 27 + SubOptions = 28 + DivOptions = 29 + SqueezeOptions = 30 + SequenceRNNOptions = 31 + StridedSliceOptions = 32 + ExpOptions = 33 + TopKV2Options = 34 + SplitOptions = 35 + LogSoftmaxOptions = 36 + CastOptions = 37 + DequantizeOptions = 38 + MaximumMinimumOptions = 39 + ArgMaxOptions = 40 + LessOptions = 41 + NegOptions = 42 + PadV2Options = 43 + GreaterOptions = 44 + GreaterEqualOptions = 45 + LessEqualOptions = 46 + SelectOptions = 47 + SliceOptions = 48 + TransposeConvOptions = 49 + SparseToDenseOptions = 50 + TileOptions = 51 + ExpandDimsOptions = 52 + EqualOptions = 53 + NotEqualOptions = 54 + ShapeOptions = 55 + PowOptions = 56 + ArgMinOptions = 57 + FakeQuantOptions = 58 + PackOptions = 59 + LogicalOrOptions = 60 + OneHotOptions = 61 + LogicalAndOptions = 62 + LogicalNotOptions = 63 + UnpackOptions = 64 + FloorDivOptions = 65 + SquareOptions = 66 + ZerosLikeOptions = 67 + FillOptions = 68 + BidirectionalSequenceLSTMOptions = 69 + BidirectionalSequenceRNNOptions = 70 + UnidirectionalSequenceLSTMOptions = 71 + FloorModOptions = 72 + RangeOptions = 73 + ResizeNearestNeighborOptions = 74 + LeakyReluOptions = 75 + SquaredDifferenceOptions = 76 + MirrorPadOptions = 77 + AbsOptions = 78 + SplitVOptions = 79 + UniqueOptions = 80 + ReverseV2Options = 81 + AddNOptions = 82 + GatherNdOptions = 83 + CosOptions = 84 + WhereOptions = 85 + RankOptions = 86 + ReverseSequenceOptions = 87 + MatrixDiagOptions = 88 + QuantizeOptions = 89 + MatrixSetDiagOptions = 90 + HardSwishOptions = 91 + IfOptions = 92 + WhileOptions = 93 + DepthToSpaceOptions = 94 + NonMaxSuppressionV4Options = 95 + NonMaxSuppressionV5Options = 96 + ScatterNdOptions = 97 + SelectV2Options = 98 + DensifyOptions = 99 + SegmentSumOptions = 100 + BatchMatMulOptions = 101 + CumsumOptions = 102 + CallOnceOptions = 103 + BroadcastToOptions = 104 + Rfft2dOptions = 105 + Conv3DOptions = 106 + HashtableOptions = 107 + HashtableFindOptions = 108 + HashtableImportOptions = 109 + HashtableSizeOptions = 110 + VarHandleOptions = 111 + ReadVariableOptions = 112 + AssignVariableOptions = 113 + RandomOptions = 114 + BucketizeOptions = 115 + GeluOptions = 116 + DynamicUpdateSliceOptions = 117 + UnsortedSegmentProdOptions = 118 + UnsortedSegmentMaxOptions = 119 + UnsortedSegmentMinOptions = 120 + UnsortedSegmentSumOptions = 121 + ATan2Options = 122 + SignOptions = 123 + BitcastOptions = 124 + BitwiseXorOptions = 125 + RightShiftOptions = 126 diff --git a/backends/nxp/backend/ir/lib/tflite/BuiltinOptions2.py b/backends/nxp/backend/ir/lib/tflite/BuiltinOptions2.py new file mode 100755 index 00000000000..5df8e2fe998 --- /dev/null +++ b/backends/nxp/backend/ir/lib/tflite/BuiltinOptions2.py @@ -0,0 +1,27 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + + +class BuiltinOptions2(object): + NONE = 0 + StablehloConcatenateOptions = 1 + StablehloBroadcastInDimOptions = 2 + StablehloSliceOptions = 3 + StablehloConvolutionOptions = 4 + StablehloCustomCallOptions = 5 + StablehloReduceOptions = 6 + StablehloScatterOptions = 7 + StablehloCompareOptions = 8 + StablehloDynamicSliceOptions = 9 + StablehloPadOptions = 10 + StablehloIotaOptions = 11 + StablehloDotGeneralOptions = 12 + StablehloReduceWindowOptions = 13 + StablehloSortOptions = 14 + StablehloWhileOptions = 15 + StablehloGatherOptions = 16 + StablehloTransposeOptions = 17 + DilateOptions = 18 + StablehloRngBitGeneratorOptions = 19 + ReduceWindowOptions = 20 diff --git a/backends/nxp/backend/ir/lib/tflite/CallOnceOptions.py b/backends/nxp/backend/ir/lib/tflite/CallOnceOptions.py new file mode 100755 index 00000000000..1bb63035716 --- /dev/null +++ b/backends/nxp/backend/ir/lib/tflite/CallOnceOptions.py @@ -0,0 +1,65 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy + +np = import_numpy() + + +class CallOnceOptions(object): + __slots__ = ["_tab"] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = CallOnceOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsCallOnceOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + + @classmethod + def CallOnceOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed + ) + + # CallOnceOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # CallOnceOptions + def InitSubgraphIndex(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) + return 0 + + +def CallOnceOptionsStart(builder): + builder.StartObject(1) + + +def Start(builder): + CallOnceOptionsStart(builder) + + +def CallOnceOptionsAddInitSubgraphIndex(builder, initSubgraphIndex): + builder.PrependInt32Slot(0, initSubgraphIndex, 0) + + +def AddInitSubgraphIndex(builder, initSubgraphIndex): + CallOnceOptionsAddInitSubgraphIndex(builder, initSubgraphIndex) + + +def CallOnceOptionsEnd(builder): + return builder.EndObject() + + +def End(builder): + return CallOnceOptionsEnd(builder) diff --git a/backends/nxp/backend/ir/lib/tflite/CallOptions.py b/backends/nxp/backend/ir/lib/tflite/CallOptions.py new file mode 100755 index 00000000000..4522a53917f --- /dev/null +++ b/backends/nxp/backend/ir/lib/tflite/CallOptions.py @@ -0,0 +1,67 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy + +np = import_numpy() + + +class CallOptions(object): + __slots__ = ["_tab"] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = CallOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsCallOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + + @classmethod + def CallOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed + ) + + # CallOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # CallOptions + def Subgraph(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.Get( + flatbuffers.number_types.Uint32Flags, o + self._tab.Pos + ) + return 0 + + +def CallOptionsStart(builder): + builder.StartObject(1) + + +def Start(builder): + CallOptionsStart(builder) + + +def CallOptionsAddSubgraph(builder, subgraph): + builder.PrependUint32Slot(0, subgraph, 0) + + +def AddSubgraph(builder, subgraph): + CallOptionsAddSubgraph(builder, subgraph) + + +def CallOptionsEnd(builder): + return builder.EndObject() + + +def End(builder): + return CallOptionsEnd(builder) diff --git a/backends/nxp/backend/ir/lib/tflite/CastOptions.py b/backends/nxp/backend/ir/lib/tflite/CastOptions.py new file mode 100755 index 00000000000..cbec318be5b --- /dev/null +++ b/backends/nxp/backend/ir/lib/tflite/CastOptions.py @@ -0,0 +1,80 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy + +np = import_numpy() + + +class CastOptions(object): + __slots__ = ["_tab"] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = CastOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsCastOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + + @classmethod + def CastOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed + ) + + # CastOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # CastOptions + def InDataType(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos) + return 0 + + # CastOptions + def OutDataType(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos) + return 0 + + +def CastOptionsStart(builder): + builder.StartObject(2) + + +def Start(builder): + CastOptionsStart(builder) + + +def CastOptionsAddInDataType(builder, inDataType): + builder.PrependInt8Slot(0, inDataType, 0) + + +def AddInDataType(builder, inDataType): + CastOptionsAddInDataType(builder, inDataType) + + +def CastOptionsAddOutDataType(builder, outDataType): + builder.PrependInt8Slot(1, outDataType, 0) + + +def AddOutDataType(builder, outDataType): + CastOptionsAddOutDataType(builder, outDataType) + + +def CastOptionsEnd(builder): + return builder.EndObject() + + +def End(builder): + return CastOptionsEnd(builder) diff --git a/backends/nxp/backend/ir/lib/tflite/CombinerType.py b/backends/nxp/backend/ir/lib/tflite/CombinerType.py new file mode 100755 index 00000000000..dfe8afb9fc8 --- /dev/null +++ b/backends/nxp/backend/ir/lib/tflite/CombinerType.py @@ -0,0 +1,9 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + + +class CombinerType(object): + SUM = 0 + MEAN = 1 + SQRTN = 2 diff --git a/backends/nxp/backend/ir/lib/tflite/ConcatEmbeddingsOptions.py b/backends/nxp/backend/ir/lib/tflite/ConcatEmbeddingsOptions.py new file mode 100755 index 00000000000..43234f666ad --- /dev/null +++ b/backends/nxp/backend/ir/lib/tflite/ConcatEmbeddingsOptions.py @@ -0,0 +1,163 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy + +np = import_numpy() + + +class ConcatEmbeddingsOptions(object): + __slots__ = ["_tab"] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = ConcatEmbeddingsOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsConcatEmbeddingsOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + + @classmethod + def ConcatEmbeddingsOptionsBufferHasIdentifier( + cls, buf, offset, size_prefixed=False + ): + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed + ) + + # ConcatEmbeddingsOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # ConcatEmbeddingsOptions + def NumChannels(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) + return 0 + + # ConcatEmbeddingsOptions + def NumColumnsPerChannel(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + a = self._tab.Vector(o) + return self._tab.Get( + flatbuffers.number_types.Int32Flags, + a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4), + ) + return 0 + + # ConcatEmbeddingsOptions + def NumColumnsPerChannelAsNumpy(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int32Flags, o) + return 0 + + # ConcatEmbeddingsOptions + def NumColumnsPerChannelLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # ConcatEmbeddingsOptions + def NumColumnsPerChannelIsNone(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + return o == 0 + + # ConcatEmbeddingsOptions + def EmbeddingDimPerChannel(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + if o != 0: + a = self._tab.Vector(o) + return self._tab.Get( + flatbuffers.number_types.Int32Flags, + a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4), + ) + return 0 + + # ConcatEmbeddingsOptions + def EmbeddingDimPerChannelAsNumpy(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + if o != 0: + return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int32Flags, o) + return 0 + + # ConcatEmbeddingsOptions + def EmbeddingDimPerChannelLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # ConcatEmbeddingsOptions + def EmbeddingDimPerChannelIsNone(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + return o == 0 + + +def ConcatEmbeddingsOptionsStart(builder): + builder.StartObject(3) + + +def Start(builder): + ConcatEmbeddingsOptionsStart(builder) + + +def ConcatEmbeddingsOptionsAddNumChannels(builder, numChannels): + builder.PrependInt32Slot(0, numChannels, 0) + + +def AddNumChannels(builder, numChannels): + ConcatEmbeddingsOptionsAddNumChannels(builder, numChannels) + + +def ConcatEmbeddingsOptionsAddNumColumnsPerChannel(builder, numColumnsPerChannel): + builder.PrependUOffsetTRelativeSlot( + 1, flatbuffers.number_types.UOffsetTFlags.py_type(numColumnsPerChannel), 0 + ) + + +def AddNumColumnsPerChannel(builder, numColumnsPerChannel): + ConcatEmbeddingsOptionsAddNumColumnsPerChannel(builder, numColumnsPerChannel) + + +def ConcatEmbeddingsOptionsStartNumColumnsPerChannelVector(builder, numElems): + return builder.StartVector(4, numElems, 4) + + +def StartNumColumnsPerChannelVector(builder, numElems: int) -> int: + return ConcatEmbeddingsOptionsStartNumColumnsPerChannelVector(builder, numElems) + + +def ConcatEmbeddingsOptionsAddEmbeddingDimPerChannel(builder, embeddingDimPerChannel): + builder.PrependUOffsetTRelativeSlot( + 2, flatbuffers.number_types.UOffsetTFlags.py_type(embeddingDimPerChannel), 0 + ) + + +def AddEmbeddingDimPerChannel(builder, embeddingDimPerChannel): + ConcatEmbeddingsOptionsAddEmbeddingDimPerChannel(builder, embeddingDimPerChannel) + + +def ConcatEmbeddingsOptionsStartEmbeddingDimPerChannelVector(builder, numElems): + return builder.StartVector(4, numElems, 4) + + +def StartEmbeddingDimPerChannelVector(builder, numElems: int) -> int: + return ConcatEmbeddingsOptionsStartEmbeddingDimPerChannelVector(builder, numElems) + + +def ConcatEmbeddingsOptionsEnd(builder): + return builder.EndObject() + + +def End(builder): + return ConcatEmbeddingsOptionsEnd(builder) diff --git a/backends/nxp/backend/ir/lib/tflite/ConcatenationOptions.py b/backends/nxp/backend/ir/lib/tflite/ConcatenationOptions.py new file mode 100755 index 00000000000..b1ec3c98e90 --- /dev/null +++ b/backends/nxp/backend/ir/lib/tflite/ConcatenationOptions.py @@ -0,0 +1,80 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy + +np = import_numpy() + + +class ConcatenationOptions(object): + __slots__ = ["_tab"] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = ConcatenationOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsConcatenationOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + + @classmethod + def ConcatenationOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed + ) + + # ConcatenationOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # ConcatenationOptions + def Axis(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) + return 0 + + # ConcatenationOptions + def FusedActivationFunction(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos) + return 0 + + +def ConcatenationOptionsStart(builder): + builder.StartObject(2) + + +def Start(builder): + ConcatenationOptionsStart(builder) + + +def ConcatenationOptionsAddAxis(builder, axis): + builder.PrependInt32Slot(0, axis, 0) + + +def AddAxis(builder, axis): + ConcatenationOptionsAddAxis(builder, axis) + + +def ConcatenationOptionsAddFusedActivationFunction(builder, fusedActivationFunction): + builder.PrependInt8Slot(1, fusedActivationFunction, 0) + + +def AddFusedActivationFunction(builder, fusedActivationFunction): + ConcatenationOptionsAddFusedActivationFunction(builder, fusedActivationFunction) + + +def ConcatenationOptionsEnd(builder): + return builder.EndObject() + + +def End(builder): + return ConcatenationOptionsEnd(builder) diff --git a/backends/nxp/backend/ir/lib/tflite/Conv2DOptions.py b/backends/nxp/backend/ir/lib/tflite/Conv2DOptions.py new file mode 100755 index 00000000000..36ece4ad2db --- /dev/null +++ b/backends/nxp/backend/ir/lib/tflite/Conv2DOptions.py @@ -0,0 +1,155 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy + +np = import_numpy() + + +class Conv2DOptions(object): + __slots__ = ["_tab"] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = Conv2DOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsConv2DOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + + @classmethod + def Conv2DOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed + ) + + # Conv2DOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # Conv2DOptions + def Padding(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos) + return 0 + + # Conv2DOptions + def StrideW(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) + return 0 + + # Conv2DOptions + def StrideH(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) + return 0 + + # Conv2DOptions + def FusedActivationFunction(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos) + return 0 + + # Conv2DOptions + def DilationWFactor(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) + return 1 + + # Conv2DOptions + def DilationHFactor(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) + return 1 + + # Conv2DOptions + def QuantizedBiasType(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(16)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos) + return 0 + + +def Conv2DOptionsStart(builder): + builder.StartObject(7) + + +def Start(builder): + Conv2DOptionsStart(builder) + + +def Conv2DOptionsAddPadding(builder, padding): + builder.PrependInt8Slot(0, padding, 0) + + +def AddPadding(builder, padding): + Conv2DOptionsAddPadding(builder, padding) + + +def Conv2DOptionsAddStrideW(builder, strideW): + builder.PrependInt32Slot(1, strideW, 0) + + +def AddStrideW(builder, strideW): + Conv2DOptionsAddStrideW(builder, strideW) + + +def Conv2DOptionsAddStrideH(builder, strideH): + builder.PrependInt32Slot(2, strideH, 0) + + +def AddStrideH(builder, strideH): + Conv2DOptionsAddStrideH(builder, strideH) + + +def Conv2DOptionsAddFusedActivationFunction(builder, fusedActivationFunction): + builder.PrependInt8Slot(3, fusedActivationFunction, 0) + + +def AddFusedActivationFunction(builder, fusedActivationFunction): + Conv2DOptionsAddFusedActivationFunction(builder, fusedActivationFunction) + + +def Conv2DOptionsAddDilationWFactor(builder, dilationWFactor): + builder.PrependInt32Slot(4, dilationWFactor, 1) + + +def AddDilationWFactor(builder, dilationWFactor): + Conv2DOptionsAddDilationWFactor(builder, dilationWFactor) + + +def Conv2DOptionsAddDilationHFactor(builder, dilationHFactor): + builder.PrependInt32Slot(5, dilationHFactor, 1) + + +def AddDilationHFactor(builder, dilationHFactor): + Conv2DOptionsAddDilationHFactor(builder, dilationHFactor) + + +def Conv2DOptionsAddQuantizedBiasType(builder, quantizedBiasType): + builder.PrependInt8Slot(6, quantizedBiasType, 0) + + +def AddQuantizedBiasType(builder, quantizedBiasType): + Conv2DOptionsAddQuantizedBiasType(builder, quantizedBiasType) + + +def Conv2DOptionsEnd(builder): + return builder.EndObject() + + +def End(builder): + return Conv2DOptionsEnd(builder) diff --git a/backends/nxp/backend/ir/lib/tflite/Conv3DOptions.py b/backends/nxp/backend/ir/lib/tflite/Conv3DOptions.py new file mode 100755 index 00000000000..31d7003461c --- /dev/null +++ b/backends/nxp/backend/ir/lib/tflite/Conv3DOptions.py @@ -0,0 +1,170 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy + +np = import_numpy() + + +class Conv3DOptions(object): + __slots__ = ["_tab"] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = Conv3DOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsConv3DOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + + @classmethod + def Conv3DOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed + ) + + # Conv3DOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # Conv3DOptions + def Padding(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos) + return 0 + + # Conv3DOptions + def StrideD(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) + return 0 + + # Conv3DOptions + def StrideW(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) + return 0 + + # Conv3DOptions + def StrideH(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) + return 0 + + # Conv3DOptions + def FusedActivationFunction(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos) + return 0 + + # Conv3DOptions + def DilationDFactor(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) + return 1 + + # Conv3DOptions + def DilationWFactor(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(16)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) + return 1 + + # Conv3DOptions + def DilationHFactor(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(18)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) + return 1 + + +def Conv3DOptionsStart(builder): + builder.StartObject(8) + + +def Start(builder): + Conv3DOptionsStart(builder) + + +def Conv3DOptionsAddPadding(builder, padding): + builder.PrependInt8Slot(0, padding, 0) + + +def AddPadding(builder, padding): + Conv3DOptionsAddPadding(builder, padding) + + +def Conv3DOptionsAddStrideD(builder, strideD): + builder.PrependInt32Slot(1, strideD, 0) + + +def AddStrideD(builder, strideD): + Conv3DOptionsAddStrideD(builder, strideD) + + +def Conv3DOptionsAddStrideW(builder, strideW): + builder.PrependInt32Slot(2, strideW, 0) + + +def AddStrideW(builder, strideW): + Conv3DOptionsAddStrideW(builder, strideW) + + +def Conv3DOptionsAddStrideH(builder, strideH): + builder.PrependInt32Slot(3, strideH, 0) + + +def AddStrideH(builder, strideH): + Conv3DOptionsAddStrideH(builder, strideH) + + +def Conv3DOptionsAddFusedActivationFunction(builder, fusedActivationFunction): + builder.PrependInt8Slot(4, fusedActivationFunction, 0) + + +def AddFusedActivationFunction(builder, fusedActivationFunction): + Conv3DOptionsAddFusedActivationFunction(builder, fusedActivationFunction) + + +def Conv3DOptionsAddDilationDFactor(builder, dilationDFactor): + builder.PrependInt32Slot(5, dilationDFactor, 1) + + +def AddDilationDFactor(builder, dilationDFactor): + Conv3DOptionsAddDilationDFactor(builder, dilationDFactor) + + +def Conv3DOptionsAddDilationWFactor(builder, dilationWFactor): + builder.PrependInt32Slot(6, dilationWFactor, 1) + + +def AddDilationWFactor(builder, dilationWFactor): + Conv3DOptionsAddDilationWFactor(builder, dilationWFactor) + + +def Conv3DOptionsAddDilationHFactor(builder, dilationHFactor): + builder.PrependInt32Slot(7, dilationHFactor, 1) + + +def AddDilationHFactor(builder, dilationHFactor): + Conv3DOptionsAddDilationHFactor(builder, dilationHFactor) + + +def Conv3DOptionsEnd(builder): + return builder.EndObject() + + +def End(builder): + return Conv3DOptionsEnd(builder) diff --git a/backends/nxp/backend/ir/lib/tflite/CosOptions.py b/backends/nxp/backend/ir/lib/tflite/CosOptions.py new file mode 100755 index 00000000000..fa3e7c46643 --- /dev/null +++ b/backends/nxp/backend/ir/lib/tflite/CosOptions.py @@ -0,0 +1,50 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy + +np = import_numpy() + + +class CosOptions(object): + __slots__ = ["_tab"] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = CosOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsCosOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + + @classmethod + def CosOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed + ) + + # CosOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + +def CosOptionsStart(builder): + builder.StartObject(0) + + +def Start(builder): + CosOptionsStart(builder) + + +def CosOptionsEnd(builder): + return builder.EndObject() + + +def End(builder): + return CosOptionsEnd(builder) diff --git a/backends/nxp/backend/ir/lib/tflite/CumsumOptions.py b/backends/nxp/backend/ir/lib/tflite/CumsumOptions.py new file mode 100755 index 00000000000..4d1d9e1eeb2 --- /dev/null +++ b/backends/nxp/backend/ir/lib/tflite/CumsumOptions.py @@ -0,0 +1,84 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy + +np = import_numpy() + + +class CumsumOptions(object): + __slots__ = ["_tab"] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = CumsumOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsCumsumOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + + @classmethod + def CumsumOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed + ) + + # CumsumOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # CumsumOptions + def Exclusive(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return bool( + self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos) + ) + return False + + # CumsumOptions + def Reverse(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + return bool( + self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos) + ) + return False + + +def CumsumOptionsStart(builder): + builder.StartObject(2) + + +def Start(builder): + CumsumOptionsStart(builder) + + +def CumsumOptionsAddExclusive(builder, exclusive): + builder.PrependBoolSlot(0, exclusive, 0) + + +def AddExclusive(builder, exclusive): + CumsumOptionsAddExclusive(builder, exclusive) + + +def CumsumOptionsAddReverse(builder, reverse): + builder.PrependBoolSlot(1, reverse, 0) + + +def AddReverse(builder, reverse): + CumsumOptionsAddReverse(builder, reverse) + + +def CumsumOptionsEnd(builder): + return builder.EndObject() + + +def End(builder): + return CumsumOptionsEnd(builder) diff --git a/backends/nxp/backend/ir/lib/tflite/CustomOptionsFormat.py b/backends/nxp/backend/ir/lib/tflite/CustomOptionsFormat.py new file mode 100755 index 00000000000..18bc07d023d --- /dev/null +++ b/backends/nxp/backend/ir/lib/tflite/CustomOptionsFormat.py @@ -0,0 +1,7 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + + +class CustomOptionsFormat(object): + FLEXBUFFERS = 0 diff --git a/backends/nxp/backend/ir/lib/tflite/CustomQuantization.py b/backends/nxp/backend/ir/lib/tflite/CustomQuantization.py new file mode 100755 index 00000000000..79fb359ebe0 --- /dev/null +++ b/backends/nxp/backend/ir/lib/tflite/CustomQuantization.py @@ -0,0 +1,98 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy + +np = import_numpy() + + +class CustomQuantization(object): + __slots__ = ["_tab"] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = CustomQuantization() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsCustomQuantization(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + + @classmethod + def CustomQuantizationBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed + ) + + # CustomQuantization + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # CustomQuantization + def Custom(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + a = self._tab.Vector(o) + return self._tab.Get( + flatbuffers.number_types.Uint8Flags, + a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 1), + ) + return 0 + + # CustomQuantization + def CustomAsNumpy(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Uint8Flags, o) + return 0 + + # CustomQuantization + def CustomLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # CustomQuantization + def CustomIsNone(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + return o == 0 + + +def CustomQuantizationStart(builder): + builder.StartObject(1) + + +def Start(builder): + CustomQuantizationStart(builder) + + +def CustomQuantizationAddCustom(builder, custom): + builder.PrependUOffsetTRelativeSlot( + 0, flatbuffers.number_types.UOffsetTFlags.py_type(custom), 0 + ) + + +def AddCustom(builder, custom): + CustomQuantizationAddCustom(builder, custom) + + +def CustomQuantizationStartCustomVector(builder, numElems): + return builder.StartVector(1, numElems, 1) + + +def StartCustomVector(builder, numElems: int) -> int: + return CustomQuantizationStartCustomVector(builder, numElems) + + +def CustomQuantizationEnd(builder): + return builder.EndObject() + + +def End(builder): + return CustomQuantizationEnd(builder) diff --git a/backends/nxp/backend/ir/lib/tflite/DensifyOptions.py b/backends/nxp/backend/ir/lib/tflite/DensifyOptions.py new file mode 100755 index 00000000000..1b54a16034a --- /dev/null +++ b/backends/nxp/backend/ir/lib/tflite/DensifyOptions.py @@ -0,0 +1,50 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy + +np = import_numpy() + + +class DensifyOptions(object): + __slots__ = ["_tab"] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = DensifyOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsDensifyOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + + @classmethod + def DensifyOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed + ) + + # DensifyOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + +def DensifyOptionsStart(builder): + builder.StartObject(0) + + +def Start(builder): + DensifyOptionsStart(builder) + + +def DensifyOptionsEnd(builder): + return builder.EndObject() + + +def End(builder): + return DensifyOptionsEnd(builder) diff --git a/backends/nxp/backend/ir/lib/tflite/DepthToSpaceOptions.py b/backends/nxp/backend/ir/lib/tflite/DepthToSpaceOptions.py new file mode 100755 index 00000000000..315b2da52e8 --- /dev/null +++ b/backends/nxp/backend/ir/lib/tflite/DepthToSpaceOptions.py @@ -0,0 +1,65 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy + +np = import_numpy() + + +class DepthToSpaceOptions(object): + __slots__ = ["_tab"] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = DepthToSpaceOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsDepthToSpaceOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + + @classmethod + def DepthToSpaceOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed + ) + + # DepthToSpaceOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # DepthToSpaceOptions + def BlockSize(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) + return 0 + + +def DepthToSpaceOptionsStart(builder): + builder.StartObject(1) + + +def Start(builder): + DepthToSpaceOptionsStart(builder) + + +def DepthToSpaceOptionsAddBlockSize(builder, blockSize): + builder.PrependInt32Slot(0, blockSize, 0) + + +def AddBlockSize(builder, blockSize): + DepthToSpaceOptionsAddBlockSize(builder, blockSize) + + +def DepthToSpaceOptionsEnd(builder): + return builder.EndObject() + + +def End(builder): + return DepthToSpaceOptionsEnd(builder) diff --git a/backends/nxp/backend/ir/lib/tflite/DepthwiseConv2DOptions.py b/backends/nxp/backend/ir/lib/tflite/DepthwiseConv2DOptions.py new file mode 100755 index 00000000000..e07ff231563 --- /dev/null +++ b/backends/nxp/backend/ir/lib/tflite/DepthwiseConv2DOptions.py @@ -0,0 +1,157 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy + +np = import_numpy() + + +class DepthwiseConv2DOptions(object): + __slots__ = ["_tab"] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = DepthwiseConv2DOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsDepthwiseConv2DOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + + @classmethod + def DepthwiseConv2DOptionsBufferHasIdentifier( + cls, buf, offset, size_prefixed=False + ): + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed + ) + + # DepthwiseConv2DOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # DepthwiseConv2DOptions + def Padding(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos) + return 0 + + # DepthwiseConv2DOptions + def StrideW(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) + return 0 + + # DepthwiseConv2DOptions + def StrideH(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) + return 0 + + # DepthwiseConv2DOptions + def DepthMultiplier(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) + return 0 + + # DepthwiseConv2DOptions + def FusedActivationFunction(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos) + return 0 + + # DepthwiseConv2DOptions + def DilationWFactor(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) + return 1 + + # DepthwiseConv2DOptions + def DilationHFactor(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(16)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) + return 1 + + +def DepthwiseConv2DOptionsStart(builder): + builder.StartObject(7) + + +def Start(builder): + DepthwiseConv2DOptionsStart(builder) + + +def DepthwiseConv2DOptionsAddPadding(builder, padding): + builder.PrependInt8Slot(0, padding, 0) + + +def AddPadding(builder, padding): + DepthwiseConv2DOptionsAddPadding(builder, padding) + + +def DepthwiseConv2DOptionsAddStrideW(builder, strideW): + builder.PrependInt32Slot(1, strideW, 0) + + +def AddStrideW(builder, strideW): + DepthwiseConv2DOptionsAddStrideW(builder, strideW) + + +def DepthwiseConv2DOptionsAddStrideH(builder, strideH): + builder.PrependInt32Slot(2, strideH, 0) + + +def AddStrideH(builder, strideH): + DepthwiseConv2DOptionsAddStrideH(builder, strideH) + + +def DepthwiseConv2DOptionsAddDepthMultiplier(builder, depthMultiplier): + builder.PrependInt32Slot(3, depthMultiplier, 0) + + +def AddDepthMultiplier(builder, depthMultiplier): + DepthwiseConv2DOptionsAddDepthMultiplier(builder, depthMultiplier) + + +def DepthwiseConv2DOptionsAddFusedActivationFunction(builder, fusedActivationFunction): + builder.PrependInt8Slot(4, fusedActivationFunction, 0) + + +def AddFusedActivationFunction(builder, fusedActivationFunction): + DepthwiseConv2DOptionsAddFusedActivationFunction(builder, fusedActivationFunction) + + +def DepthwiseConv2DOptionsAddDilationWFactor(builder, dilationWFactor): + builder.PrependInt32Slot(5, dilationWFactor, 1) + + +def AddDilationWFactor(builder, dilationWFactor): + DepthwiseConv2DOptionsAddDilationWFactor(builder, dilationWFactor) + + +def DepthwiseConv2DOptionsAddDilationHFactor(builder, dilationHFactor): + builder.PrependInt32Slot(6, dilationHFactor, 1) + + +def AddDilationHFactor(builder, dilationHFactor): + DepthwiseConv2DOptionsAddDilationHFactor(builder, dilationHFactor) + + +def DepthwiseConv2DOptionsEnd(builder): + return builder.EndObject() + + +def End(builder): + return DepthwiseConv2DOptionsEnd(builder) diff --git a/backends/nxp/backend/ir/lib/tflite/DequantizeOptions.py b/backends/nxp/backend/ir/lib/tflite/DequantizeOptions.py new file mode 100755 index 00000000000..f30ab73727f --- /dev/null +++ b/backends/nxp/backend/ir/lib/tflite/DequantizeOptions.py @@ -0,0 +1,50 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy + +np = import_numpy() + + +class DequantizeOptions(object): + __slots__ = ["_tab"] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = DequantizeOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsDequantizeOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + + @classmethod + def DequantizeOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed + ) + + # DequantizeOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + +def DequantizeOptionsStart(builder): + builder.StartObject(0) + + +def Start(builder): + DequantizeOptionsStart(builder) + + +def DequantizeOptionsEnd(builder): + return builder.EndObject() + + +def End(builder): + return DequantizeOptionsEnd(builder) diff --git a/backends/nxp/backend/ir/lib/tflite/DilateOptions.py b/backends/nxp/backend/ir/lib/tflite/DilateOptions.py new file mode 100755 index 00000000000..520d3e5957c --- /dev/null +++ b/backends/nxp/backend/ir/lib/tflite/DilateOptions.py @@ -0,0 +1,50 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy + +np = import_numpy() + + +class DilateOptions(object): + __slots__ = ["_tab"] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = DilateOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsDilateOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + + @classmethod + def DilateOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed + ) + + # DilateOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + +def DilateOptionsStart(builder): + builder.StartObject(0) + + +def Start(builder): + DilateOptionsStart(builder) + + +def DilateOptionsEnd(builder): + return builder.EndObject() + + +def End(builder): + return DilateOptionsEnd(builder) diff --git a/backends/nxp/backend/ir/lib/tflite/DimensionMetadata.py b/backends/nxp/backend/ir/lib/tflite/DimensionMetadata.py new file mode 100755 index 00000000000..900c3380143 --- /dev/null +++ b/backends/nxp/backend/ir/lib/tflite/DimensionMetadata.py @@ -0,0 +1,152 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy + +np = import_numpy() + + +class DimensionMetadata(object): + __slots__ = ["_tab"] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = DimensionMetadata() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsDimensionMetadata(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + + @classmethod + def DimensionMetadataBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed + ) + + # DimensionMetadata + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # DimensionMetadata + def Format(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos) + return 0 + + # DimensionMetadata + def DenseSize(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) + return 0 + + # DimensionMetadata + def ArraySegmentsType(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Uint8Flags, o + self._tab.Pos) + return 0 + + # DimensionMetadata + def ArraySegments(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10)) + if o != 0: + from flatbuffers.table import Table + + obj = Table(bytearray(), 0) + self._tab.Union(obj, o) + return obj + return None + + # DimensionMetadata + def ArrayIndicesType(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Uint8Flags, o + self._tab.Pos) + return 0 + + # DimensionMetadata + def ArrayIndices(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14)) + if o != 0: + from flatbuffers.table import Table + + obj = Table(bytearray(), 0) + self._tab.Union(obj, o) + return obj + return None + + +def DimensionMetadataStart(builder): + builder.StartObject(6) + + +def Start(builder): + DimensionMetadataStart(builder) + + +def DimensionMetadataAddFormat(builder, format): + builder.PrependInt8Slot(0, format, 0) + + +def AddFormat(builder, format): + DimensionMetadataAddFormat(builder, format) + + +def DimensionMetadataAddDenseSize(builder, denseSize): + builder.PrependInt32Slot(1, denseSize, 0) + + +def AddDenseSize(builder, denseSize): + DimensionMetadataAddDenseSize(builder, denseSize) + + +def DimensionMetadataAddArraySegmentsType(builder, arraySegmentsType): + builder.PrependUint8Slot(2, arraySegmentsType, 0) + + +def AddArraySegmentsType(builder, arraySegmentsType): + DimensionMetadataAddArraySegmentsType(builder, arraySegmentsType) + + +def DimensionMetadataAddArraySegments(builder, arraySegments): + builder.PrependUOffsetTRelativeSlot( + 3, flatbuffers.number_types.UOffsetTFlags.py_type(arraySegments), 0 + ) + + +def AddArraySegments(builder, arraySegments): + DimensionMetadataAddArraySegments(builder, arraySegments) + + +def DimensionMetadataAddArrayIndicesType(builder, arrayIndicesType): + builder.PrependUint8Slot(4, arrayIndicesType, 0) + + +def AddArrayIndicesType(builder, arrayIndicesType): + DimensionMetadataAddArrayIndicesType(builder, arrayIndicesType) + + +def DimensionMetadataAddArrayIndices(builder, arrayIndices): + builder.PrependUOffsetTRelativeSlot( + 5, flatbuffers.number_types.UOffsetTFlags.py_type(arrayIndices), 0 + ) + + +def AddArrayIndices(builder, arrayIndices): + DimensionMetadataAddArrayIndices(builder, arrayIndices) + + +def DimensionMetadataEnd(builder): + return builder.EndObject() + + +def End(builder): + return DimensionMetadataEnd(builder) diff --git a/backends/nxp/backend/ir/lib/tflite/DimensionType.py b/backends/nxp/backend/ir/lib/tflite/DimensionType.py new file mode 100755 index 00000000000..53429e9b76e --- /dev/null +++ b/backends/nxp/backend/ir/lib/tflite/DimensionType.py @@ -0,0 +1,8 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + + +class DimensionType(object): + DENSE = 0 + SPARSE_CSR = 1 diff --git a/backends/nxp/backend/ir/lib/tflite/DivOptions.py b/backends/nxp/backend/ir/lib/tflite/DivOptions.py new file mode 100755 index 00000000000..5aee6be7182 --- /dev/null +++ b/backends/nxp/backend/ir/lib/tflite/DivOptions.py @@ -0,0 +1,65 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy + +np = import_numpy() + + +class DivOptions(object): + __slots__ = ["_tab"] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = DivOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsDivOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + + @classmethod + def DivOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed + ) + + # DivOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # DivOptions + def FusedActivationFunction(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos) + return 0 + + +def DivOptionsStart(builder): + builder.StartObject(1) + + +def Start(builder): + DivOptionsStart(builder) + + +def DivOptionsAddFusedActivationFunction(builder, fusedActivationFunction): + builder.PrependInt8Slot(0, fusedActivationFunction, 0) + + +def AddFusedActivationFunction(builder, fusedActivationFunction): + DivOptionsAddFusedActivationFunction(builder, fusedActivationFunction) + + +def DivOptionsEnd(builder): + return builder.EndObject() + + +def End(builder): + return DivOptionsEnd(builder) diff --git a/backends/nxp/backend/ir/lib/tflite/DynamicUpdateSliceOptions.py b/backends/nxp/backend/ir/lib/tflite/DynamicUpdateSliceOptions.py new file mode 100755 index 00000000000..7b86b66c466 --- /dev/null +++ b/backends/nxp/backend/ir/lib/tflite/DynamicUpdateSliceOptions.py @@ -0,0 +1,52 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy + +np = import_numpy() + + +class DynamicUpdateSliceOptions(object): + __slots__ = ["_tab"] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = DynamicUpdateSliceOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsDynamicUpdateSliceOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + + @classmethod + def DynamicUpdateSliceOptionsBufferHasIdentifier( + cls, buf, offset, size_prefixed=False + ): + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed + ) + + # DynamicUpdateSliceOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + +def DynamicUpdateSliceOptionsStart(builder): + builder.StartObject(0) + + +def Start(builder): + DynamicUpdateSliceOptionsStart(builder) + + +def DynamicUpdateSliceOptionsEnd(builder): + return builder.EndObject() + + +def End(builder): + return DynamicUpdateSliceOptionsEnd(builder) diff --git a/backends/nxp/backend/ir/lib/tflite/EmbeddingLookupSparseOptions.py b/backends/nxp/backend/ir/lib/tflite/EmbeddingLookupSparseOptions.py new file mode 100755 index 00000000000..d1f636056e5 --- /dev/null +++ b/backends/nxp/backend/ir/lib/tflite/EmbeddingLookupSparseOptions.py @@ -0,0 +1,67 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy + +np = import_numpy() + + +class EmbeddingLookupSparseOptions(object): + __slots__ = ["_tab"] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = EmbeddingLookupSparseOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsEmbeddingLookupSparseOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + + @classmethod + def EmbeddingLookupSparseOptionsBufferHasIdentifier( + cls, buf, offset, size_prefixed=False + ): + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed + ) + + # EmbeddingLookupSparseOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # EmbeddingLookupSparseOptions + def Combiner(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos) + return 0 + + +def EmbeddingLookupSparseOptionsStart(builder): + builder.StartObject(1) + + +def Start(builder): + EmbeddingLookupSparseOptionsStart(builder) + + +def EmbeddingLookupSparseOptionsAddCombiner(builder, combiner): + builder.PrependInt8Slot(0, combiner, 0) + + +def AddCombiner(builder, combiner): + EmbeddingLookupSparseOptionsAddCombiner(builder, combiner) + + +def EmbeddingLookupSparseOptionsEnd(builder): + return builder.EndObject() + + +def End(builder): + return EmbeddingLookupSparseOptionsEnd(builder) diff --git a/backends/nxp/backend/ir/lib/tflite/EqualOptions.py b/backends/nxp/backend/ir/lib/tflite/EqualOptions.py new file mode 100755 index 00000000000..aa184e876ba --- /dev/null +++ b/backends/nxp/backend/ir/lib/tflite/EqualOptions.py @@ -0,0 +1,50 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy + +np = import_numpy() + + +class EqualOptions(object): + __slots__ = ["_tab"] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = EqualOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsEqualOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + + @classmethod + def EqualOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed + ) + + # EqualOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + +def EqualOptionsStart(builder): + builder.StartObject(0) + + +def Start(builder): + EqualOptionsStart(builder) + + +def EqualOptionsEnd(builder): + return builder.EndObject() + + +def End(builder): + return EqualOptionsEnd(builder) diff --git a/backends/nxp/backend/ir/lib/tflite/ExpOptions.py b/backends/nxp/backend/ir/lib/tflite/ExpOptions.py new file mode 100755 index 00000000000..c36969ecefb --- /dev/null +++ b/backends/nxp/backend/ir/lib/tflite/ExpOptions.py @@ -0,0 +1,50 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy + +np = import_numpy() + + +class ExpOptions(object): + __slots__ = ["_tab"] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = ExpOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsExpOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + + @classmethod + def ExpOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed + ) + + # ExpOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + +def ExpOptionsStart(builder): + builder.StartObject(0) + + +def Start(builder): + ExpOptionsStart(builder) + + +def ExpOptionsEnd(builder): + return builder.EndObject() + + +def End(builder): + return ExpOptionsEnd(builder) diff --git a/backends/nxp/backend/ir/lib/tflite/ExpandDimsOptions.py b/backends/nxp/backend/ir/lib/tflite/ExpandDimsOptions.py new file mode 100755 index 00000000000..cdaab92767e --- /dev/null +++ b/backends/nxp/backend/ir/lib/tflite/ExpandDimsOptions.py @@ -0,0 +1,50 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy + +np = import_numpy() + + +class ExpandDimsOptions(object): + __slots__ = ["_tab"] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = ExpandDimsOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsExpandDimsOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + + @classmethod + def ExpandDimsOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed + ) + + # ExpandDimsOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + +def ExpandDimsOptionsStart(builder): + builder.StartObject(0) + + +def Start(builder): + ExpandDimsOptionsStart(builder) + + +def ExpandDimsOptionsEnd(builder): + return builder.EndObject() + + +def End(builder): + return ExpandDimsOptionsEnd(builder) diff --git a/backends/nxp/backend/ir/lib/tflite/FakeQuantOptions.py b/backends/nxp/backend/ir/lib/tflite/FakeQuantOptions.py new file mode 100755 index 00000000000..6b8ad914285 --- /dev/null +++ b/backends/nxp/backend/ir/lib/tflite/FakeQuantOptions.py @@ -0,0 +1,116 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy + +np = import_numpy() + + +class FakeQuantOptions(object): + __slots__ = ["_tab"] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = FakeQuantOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsFakeQuantOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + + @classmethod + def FakeQuantOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed + ) + + # FakeQuantOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # FakeQuantOptions + def Min(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.Get( + flatbuffers.number_types.Float32Flags, o + self._tab.Pos + ) + return 0.0 + + # FakeQuantOptions + def Max(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + return self._tab.Get( + flatbuffers.number_types.Float32Flags, o + self._tab.Pos + ) + return 0.0 + + # FakeQuantOptions + def NumBits(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) + return 0 + + # FakeQuantOptions + def NarrowRange(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10)) + if o != 0: + return bool( + self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos) + ) + return False + + +def FakeQuantOptionsStart(builder): + builder.StartObject(4) + + +def Start(builder): + FakeQuantOptionsStart(builder) + + +def FakeQuantOptionsAddMin(builder, min): + builder.PrependFloat32Slot(0, min, 0.0) + + +def AddMin(builder, min): + FakeQuantOptionsAddMin(builder, min) + + +def FakeQuantOptionsAddMax(builder, max): + builder.PrependFloat32Slot(1, max, 0.0) + + +def AddMax(builder, max): + FakeQuantOptionsAddMax(builder, max) + + +def FakeQuantOptionsAddNumBits(builder, numBits): + builder.PrependInt32Slot(2, numBits, 0) + + +def AddNumBits(builder, numBits): + FakeQuantOptionsAddNumBits(builder, numBits) + + +def FakeQuantOptionsAddNarrowRange(builder, narrowRange): + builder.PrependBoolSlot(3, narrowRange, 0) + + +def AddNarrowRange(builder, narrowRange): + FakeQuantOptionsAddNarrowRange(builder, narrowRange) + + +def FakeQuantOptionsEnd(builder): + return builder.EndObject() + + +def End(builder): + return FakeQuantOptionsEnd(builder) diff --git a/backends/nxp/backend/ir/lib/tflite/FillOptions.py b/backends/nxp/backend/ir/lib/tflite/FillOptions.py new file mode 100755 index 00000000000..a99e717220b --- /dev/null +++ b/backends/nxp/backend/ir/lib/tflite/FillOptions.py @@ -0,0 +1,50 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy + +np = import_numpy() + + +class FillOptions(object): + __slots__ = ["_tab"] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = FillOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsFillOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + + @classmethod + def FillOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed + ) + + # FillOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + +def FillOptionsStart(builder): + builder.StartObject(0) + + +def Start(builder): + FillOptionsStart(builder) + + +def FillOptionsEnd(builder): + return builder.EndObject() + + +def End(builder): + return FillOptionsEnd(builder) diff --git a/backends/nxp/backend/ir/lib/tflite/FloorDivOptions.py b/backends/nxp/backend/ir/lib/tflite/FloorDivOptions.py new file mode 100755 index 00000000000..38b149c5b49 --- /dev/null +++ b/backends/nxp/backend/ir/lib/tflite/FloorDivOptions.py @@ -0,0 +1,50 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy + +np = import_numpy() + + +class FloorDivOptions(object): + __slots__ = ["_tab"] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = FloorDivOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsFloorDivOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + + @classmethod + def FloorDivOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed + ) + + # FloorDivOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + +def FloorDivOptionsStart(builder): + builder.StartObject(0) + + +def Start(builder): + FloorDivOptionsStart(builder) + + +def FloorDivOptionsEnd(builder): + return builder.EndObject() + + +def End(builder): + return FloorDivOptionsEnd(builder) diff --git a/backends/nxp/backend/ir/lib/tflite/FloorModOptions.py b/backends/nxp/backend/ir/lib/tflite/FloorModOptions.py new file mode 100755 index 00000000000..16fb3dc21f2 --- /dev/null +++ b/backends/nxp/backend/ir/lib/tflite/FloorModOptions.py @@ -0,0 +1,50 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy + +np = import_numpy() + + +class FloorModOptions(object): + __slots__ = ["_tab"] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = FloorModOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsFloorModOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + + @classmethod + def FloorModOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed + ) + + # FloorModOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + +def FloorModOptionsStart(builder): + builder.StartObject(0) + + +def Start(builder): + FloorModOptionsStart(builder) + + +def FloorModOptionsEnd(builder): + return builder.EndObject() + + +def End(builder): + return FloorModOptionsEnd(builder) diff --git a/backends/nxp/backend/ir/lib/tflite/FullyConnectedOptions.py b/backends/nxp/backend/ir/lib/tflite/FullyConnectedOptions.py new file mode 100755 index 00000000000..190cfd7ff7f --- /dev/null +++ b/backends/nxp/backend/ir/lib/tflite/FullyConnectedOptions.py @@ -0,0 +1,129 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy + +np = import_numpy() + + +class FullyConnectedOptions(object): + __slots__ = ["_tab"] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = FullyConnectedOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsFullyConnectedOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + + @classmethod + def FullyConnectedOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed + ) + + # FullyConnectedOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # FullyConnectedOptions + def FusedActivationFunction(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos) + return 0 + + # FullyConnectedOptions + def WeightsFormat(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos) + return 0 + + # FullyConnectedOptions + def KeepNumDims(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + if o != 0: + return bool( + self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos) + ) + return False + + # FullyConnectedOptions + def AsymmetricQuantizeInputs(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10)) + if o != 0: + return bool( + self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos) + ) + return False + + # FullyConnectedOptions + def QuantizedBiasType(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos) + return 0 + + +def FullyConnectedOptionsStart(builder): + builder.StartObject(5) + + +def Start(builder): + FullyConnectedOptionsStart(builder) + + +def FullyConnectedOptionsAddFusedActivationFunction(builder, fusedActivationFunction): + builder.PrependInt8Slot(0, fusedActivationFunction, 0) + + +def AddFusedActivationFunction(builder, fusedActivationFunction): + FullyConnectedOptionsAddFusedActivationFunction(builder, fusedActivationFunction) + + +def FullyConnectedOptionsAddWeightsFormat(builder, weightsFormat): + builder.PrependInt8Slot(1, weightsFormat, 0) + + +def AddWeightsFormat(builder, weightsFormat): + FullyConnectedOptionsAddWeightsFormat(builder, weightsFormat) + + +def FullyConnectedOptionsAddKeepNumDims(builder, keepNumDims): + builder.PrependBoolSlot(2, keepNumDims, 0) + + +def AddKeepNumDims(builder, keepNumDims): + FullyConnectedOptionsAddKeepNumDims(builder, keepNumDims) + + +def FullyConnectedOptionsAddAsymmetricQuantizeInputs(builder, asymmetricQuantizeInputs): + builder.PrependBoolSlot(3, asymmetricQuantizeInputs, 0) + + +def AddAsymmetricQuantizeInputs(builder, asymmetricQuantizeInputs): + FullyConnectedOptionsAddAsymmetricQuantizeInputs(builder, asymmetricQuantizeInputs) + + +def FullyConnectedOptionsAddQuantizedBiasType(builder, quantizedBiasType): + builder.PrependInt8Slot(4, quantizedBiasType, 0) + + +def AddQuantizedBiasType(builder, quantizedBiasType): + FullyConnectedOptionsAddQuantizedBiasType(builder, quantizedBiasType) + + +def FullyConnectedOptionsEnd(builder): + return builder.EndObject() + + +def End(builder): + return FullyConnectedOptionsEnd(builder) diff --git a/backends/nxp/backend/ir/lib/tflite/FullyConnectedOptionsWeightsFormat.py b/backends/nxp/backend/ir/lib/tflite/FullyConnectedOptionsWeightsFormat.py new file mode 100755 index 00000000000..143fc512266 --- /dev/null +++ b/backends/nxp/backend/ir/lib/tflite/FullyConnectedOptionsWeightsFormat.py @@ -0,0 +1,8 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + + +class FullyConnectedOptionsWeightsFormat(object): + DEFAULT = 0 + SHUFFLED4x16INT8 = 1 diff --git a/backends/nxp/backend/ir/lib/tflite/GatherNdOptions.py b/backends/nxp/backend/ir/lib/tflite/GatherNdOptions.py new file mode 100755 index 00000000000..e2aa9292b45 --- /dev/null +++ b/backends/nxp/backend/ir/lib/tflite/GatherNdOptions.py @@ -0,0 +1,50 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy + +np = import_numpy() + + +class GatherNdOptions(object): + __slots__ = ["_tab"] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = GatherNdOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsGatherNdOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + + @classmethod + def GatherNdOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed + ) + + # GatherNdOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + +def GatherNdOptionsStart(builder): + builder.StartObject(0) + + +def Start(builder): + GatherNdOptionsStart(builder) + + +def GatherNdOptionsEnd(builder): + return builder.EndObject() + + +def End(builder): + return GatherNdOptionsEnd(builder) diff --git a/backends/nxp/backend/ir/lib/tflite/GatherOptions.py b/backends/nxp/backend/ir/lib/tflite/GatherOptions.py new file mode 100755 index 00000000000..6817f57c6dc --- /dev/null +++ b/backends/nxp/backend/ir/lib/tflite/GatherOptions.py @@ -0,0 +1,80 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy + +np = import_numpy() + + +class GatherOptions(object): + __slots__ = ["_tab"] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = GatherOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsGatherOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + + @classmethod + def GatherOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed + ) + + # GatherOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # GatherOptions + def Axis(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) + return 0 + + # GatherOptions + def BatchDims(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) + return 0 + + +def GatherOptionsStart(builder): + builder.StartObject(2) + + +def Start(builder): + GatherOptionsStart(builder) + + +def GatherOptionsAddAxis(builder, axis): + builder.PrependInt32Slot(0, axis, 0) + + +def AddAxis(builder, axis): + GatherOptionsAddAxis(builder, axis) + + +def GatherOptionsAddBatchDims(builder, batchDims): + builder.PrependInt32Slot(1, batchDims, 0) + + +def AddBatchDims(builder, batchDims): + GatherOptionsAddBatchDims(builder, batchDims) + + +def GatherOptionsEnd(builder): + return builder.EndObject() + + +def End(builder): + return GatherOptionsEnd(builder) diff --git a/backends/nxp/backend/ir/lib/tflite/GeluOptions.py b/backends/nxp/backend/ir/lib/tflite/GeluOptions.py new file mode 100755 index 00000000000..edd27f3b69b --- /dev/null +++ b/backends/nxp/backend/ir/lib/tflite/GeluOptions.py @@ -0,0 +1,67 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy + +np = import_numpy() + + +class GeluOptions(object): + __slots__ = ["_tab"] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = GeluOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsGeluOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + + @classmethod + def GeluOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed + ) + + # GeluOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # GeluOptions + def Approximate(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return bool( + self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos) + ) + return False + + +def GeluOptionsStart(builder): + builder.StartObject(1) + + +def Start(builder): + GeluOptionsStart(builder) + + +def GeluOptionsAddApproximate(builder, approximate): + builder.PrependBoolSlot(0, approximate, 0) + + +def AddApproximate(builder, approximate): + GeluOptionsAddApproximate(builder, approximate) + + +def GeluOptionsEnd(builder): + return builder.EndObject() + + +def End(builder): + return GeluOptionsEnd(builder) diff --git a/backends/nxp/backend/ir/lib/tflite/GreaterEqualOptions.py b/backends/nxp/backend/ir/lib/tflite/GreaterEqualOptions.py new file mode 100755 index 00000000000..bedd7fe3d10 --- /dev/null +++ b/backends/nxp/backend/ir/lib/tflite/GreaterEqualOptions.py @@ -0,0 +1,50 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy + +np = import_numpy() + + +class GreaterEqualOptions(object): + __slots__ = ["_tab"] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = GreaterEqualOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsGreaterEqualOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + + @classmethod + def GreaterEqualOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed + ) + + # GreaterEqualOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + +def GreaterEqualOptionsStart(builder): + builder.StartObject(0) + + +def Start(builder): + GreaterEqualOptionsStart(builder) + + +def GreaterEqualOptionsEnd(builder): + return builder.EndObject() + + +def End(builder): + return GreaterEqualOptionsEnd(builder) diff --git a/backends/nxp/backend/ir/lib/tflite/GreaterOptions.py b/backends/nxp/backend/ir/lib/tflite/GreaterOptions.py new file mode 100755 index 00000000000..6d235140aa8 --- /dev/null +++ b/backends/nxp/backend/ir/lib/tflite/GreaterOptions.py @@ -0,0 +1,50 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy + +np = import_numpy() + + +class GreaterOptions(object): + __slots__ = ["_tab"] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = GreaterOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsGreaterOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + + @classmethod + def GreaterOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed + ) + + # GreaterOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + +def GreaterOptionsStart(builder): + builder.StartObject(0) + + +def Start(builder): + GreaterOptionsStart(builder) + + +def GreaterOptionsEnd(builder): + return builder.EndObject() + + +def End(builder): + return GreaterOptionsEnd(builder) diff --git a/backends/nxp/backend/ir/lib/tflite/HardSwishOptions.py b/backends/nxp/backend/ir/lib/tflite/HardSwishOptions.py new file mode 100755 index 00000000000..7f1c2034394 --- /dev/null +++ b/backends/nxp/backend/ir/lib/tflite/HardSwishOptions.py @@ -0,0 +1,50 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy + +np = import_numpy() + + +class HardSwishOptions(object): + __slots__ = ["_tab"] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = HardSwishOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsHardSwishOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + + @classmethod + def HardSwishOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed + ) + + # HardSwishOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + +def HardSwishOptionsStart(builder): + builder.StartObject(0) + + +def Start(builder): + HardSwishOptionsStart(builder) + + +def HardSwishOptionsEnd(builder): + return builder.EndObject() + + +def End(builder): + return HardSwishOptionsEnd(builder) diff --git a/backends/nxp/backend/ir/lib/tflite/HashtableFindOptions.py b/backends/nxp/backend/ir/lib/tflite/HashtableFindOptions.py new file mode 100755 index 00000000000..13d35d93644 --- /dev/null +++ b/backends/nxp/backend/ir/lib/tflite/HashtableFindOptions.py @@ -0,0 +1,50 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy + +np = import_numpy() + + +class HashtableFindOptions(object): + __slots__ = ["_tab"] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = HashtableFindOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsHashtableFindOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + + @classmethod + def HashtableFindOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed + ) + + # HashtableFindOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + +def HashtableFindOptionsStart(builder): + builder.StartObject(0) + + +def Start(builder): + HashtableFindOptionsStart(builder) + + +def HashtableFindOptionsEnd(builder): + return builder.EndObject() + + +def End(builder): + return HashtableFindOptionsEnd(builder) diff --git a/backends/nxp/backend/ir/lib/tflite/HashtableImportOptions.py b/backends/nxp/backend/ir/lib/tflite/HashtableImportOptions.py new file mode 100755 index 00000000000..6c3f6a571ad --- /dev/null +++ b/backends/nxp/backend/ir/lib/tflite/HashtableImportOptions.py @@ -0,0 +1,52 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy + +np = import_numpy() + + +class HashtableImportOptions(object): + __slots__ = ["_tab"] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = HashtableImportOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsHashtableImportOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + + @classmethod + def HashtableImportOptionsBufferHasIdentifier( + cls, buf, offset, size_prefixed=False + ): + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed + ) + + # HashtableImportOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + +def HashtableImportOptionsStart(builder): + builder.StartObject(0) + + +def Start(builder): + HashtableImportOptionsStart(builder) + + +def HashtableImportOptionsEnd(builder): + return builder.EndObject() + + +def End(builder): + return HashtableImportOptionsEnd(builder) diff --git a/backends/nxp/backend/ir/lib/tflite/HashtableOptions.py b/backends/nxp/backend/ir/lib/tflite/HashtableOptions.py new file mode 100755 index 00000000000..c8934965862 --- /dev/null +++ b/backends/nxp/backend/ir/lib/tflite/HashtableOptions.py @@ -0,0 +1,95 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy + +np = import_numpy() + + +class HashtableOptions(object): + __slots__ = ["_tab"] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = HashtableOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsHashtableOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + + @classmethod + def HashtableOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed + ) + + # HashtableOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # HashtableOptions + def TableId(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) + return 0 + + # HashtableOptions + def KeyDtype(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos) + return 0 + + # HashtableOptions + def ValueDtype(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos) + return 0 + + +def HashtableOptionsStart(builder): + builder.StartObject(3) + + +def Start(builder): + HashtableOptionsStart(builder) + + +def HashtableOptionsAddTableId(builder, tableId): + builder.PrependInt32Slot(0, tableId, 0) + + +def AddTableId(builder, tableId): + HashtableOptionsAddTableId(builder, tableId) + + +def HashtableOptionsAddKeyDtype(builder, keyDtype): + builder.PrependInt8Slot(1, keyDtype, 0) + + +def AddKeyDtype(builder, keyDtype): + HashtableOptionsAddKeyDtype(builder, keyDtype) + + +def HashtableOptionsAddValueDtype(builder, valueDtype): + builder.PrependInt8Slot(2, valueDtype, 0) + + +def AddValueDtype(builder, valueDtype): + HashtableOptionsAddValueDtype(builder, valueDtype) + + +def HashtableOptionsEnd(builder): + return builder.EndObject() + + +def End(builder): + return HashtableOptionsEnd(builder) diff --git a/backends/nxp/backend/ir/lib/tflite/HashtableSizeOptions.py b/backends/nxp/backend/ir/lib/tflite/HashtableSizeOptions.py new file mode 100755 index 00000000000..ce585b7d9be --- /dev/null +++ b/backends/nxp/backend/ir/lib/tflite/HashtableSizeOptions.py @@ -0,0 +1,50 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy + +np = import_numpy() + + +class HashtableSizeOptions(object): + __slots__ = ["_tab"] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = HashtableSizeOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsHashtableSizeOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + + @classmethod + def HashtableSizeOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed + ) + + # HashtableSizeOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + +def HashtableSizeOptionsStart(builder): + builder.StartObject(0) + + +def Start(builder): + HashtableSizeOptionsStart(builder) + + +def HashtableSizeOptionsEnd(builder): + return builder.EndObject() + + +def End(builder): + return HashtableSizeOptionsEnd(builder) diff --git a/backends/nxp/backend/ir/lib/tflite/IfOptions.py b/backends/nxp/backend/ir/lib/tflite/IfOptions.py new file mode 100755 index 00000000000..5bd12962429 --- /dev/null +++ b/backends/nxp/backend/ir/lib/tflite/IfOptions.py @@ -0,0 +1,80 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy + +np = import_numpy() + + +class IfOptions(object): + __slots__ = ["_tab"] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = IfOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsIfOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + + @classmethod + def IfOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed + ) + + # IfOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # IfOptions + def ThenSubgraphIndex(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) + return 0 + + # IfOptions + def ElseSubgraphIndex(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) + return 0 + + +def IfOptionsStart(builder): + builder.StartObject(2) + + +def Start(builder): + IfOptionsStart(builder) + + +def IfOptionsAddThenSubgraphIndex(builder, thenSubgraphIndex): + builder.PrependInt32Slot(0, thenSubgraphIndex, 0) + + +def AddThenSubgraphIndex(builder, thenSubgraphIndex): + IfOptionsAddThenSubgraphIndex(builder, thenSubgraphIndex) + + +def IfOptionsAddElseSubgraphIndex(builder, elseSubgraphIndex): + builder.PrependInt32Slot(1, elseSubgraphIndex, 0) + + +def AddElseSubgraphIndex(builder, elseSubgraphIndex): + IfOptionsAddElseSubgraphIndex(builder, elseSubgraphIndex) + + +def IfOptionsEnd(builder): + return builder.EndObject() + + +def End(builder): + return IfOptionsEnd(builder) diff --git a/backends/nxp/backend/ir/lib/tflite/Int32Vector.py b/backends/nxp/backend/ir/lib/tflite/Int32Vector.py new file mode 100755 index 00000000000..1b87b43784f --- /dev/null +++ b/backends/nxp/backend/ir/lib/tflite/Int32Vector.py @@ -0,0 +1,98 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy + +np = import_numpy() + + +class Int32Vector(object): + __slots__ = ["_tab"] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = Int32Vector() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsInt32Vector(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + + @classmethod + def Int32VectorBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed + ) + + # Int32Vector + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # Int32Vector + def Values(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + a = self._tab.Vector(o) + return self._tab.Get( + flatbuffers.number_types.Int32Flags, + a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4), + ) + return 0 + + # Int32Vector + def ValuesAsNumpy(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int32Flags, o) + return 0 + + # Int32Vector + def ValuesLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # Int32Vector + def ValuesIsNone(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + return o == 0 + + +def Int32VectorStart(builder): + builder.StartObject(1) + + +def Start(builder): + Int32VectorStart(builder) + + +def Int32VectorAddValues(builder, values): + builder.PrependUOffsetTRelativeSlot( + 0, flatbuffers.number_types.UOffsetTFlags.py_type(values), 0 + ) + + +def AddValues(builder, values): + Int32VectorAddValues(builder, values) + + +def Int32VectorStartValuesVector(builder, numElems): + return builder.StartVector(4, numElems, 4) + + +def StartValuesVector(builder, numElems: int) -> int: + return Int32VectorStartValuesVector(builder, numElems) + + +def Int32VectorEnd(builder): + return builder.EndObject() + + +def End(builder): + return Int32VectorEnd(builder) diff --git a/backends/nxp/backend/ir/lib/tflite/L2NormOptions.py b/backends/nxp/backend/ir/lib/tflite/L2NormOptions.py new file mode 100755 index 00000000000..908c45652f4 --- /dev/null +++ b/backends/nxp/backend/ir/lib/tflite/L2NormOptions.py @@ -0,0 +1,65 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy + +np = import_numpy() + + +class L2NormOptions(object): + __slots__ = ["_tab"] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = L2NormOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsL2NormOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + + @classmethod + def L2NormOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed + ) + + # L2NormOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # L2NormOptions + def FusedActivationFunction(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos) + return 0 + + +def L2NormOptionsStart(builder): + builder.StartObject(1) + + +def Start(builder): + L2NormOptionsStart(builder) + + +def L2NormOptionsAddFusedActivationFunction(builder, fusedActivationFunction): + builder.PrependInt8Slot(0, fusedActivationFunction, 0) + + +def AddFusedActivationFunction(builder, fusedActivationFunction): + L2NormOptionsAddFusedActivationFunction(builder, fusedActivationFunction) + + +def L2NormOptionsEnd(builder): + return builder.EndObject() + + +def End(builder): + return L2NormOptionsEnd(builder) diff --git a/backends/nxp/backend/ir/lib/tflite/LSHProjectionOptions.py b/backends/nxp/backend/ir/lib/tflite/LSHProjectionOptions.py new file mode 100755 index 00000000000..f0285b144ed --- /dev/null +++ b/backends/nxp/backend/ir/lib/tflite/LSHProjectionOptions.py @@ -0,0 +1,65 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy + +np = import_numpy() + + +class LSHProjectionOptions(object): + __slots__ = ["_tab"] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = LSHProjectionOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsLSHProjectionOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + + @classmethod + def LSHProjectionOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed + ) + + # LSHProjectionOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # LSHProjectionOptions + def Type(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos) + return 0 + + +def LSHProjectionOptionsStart(builder): + builder.StartObject(1) + + +def Start(builder): + LSHProjectionOptionsStart(builder) + + +def LSHProjectionOptionsAddType(builder, type): + builder.PrependInt8Slot(0, type, 0) + + +def AddType(builder, type): + LSHProjectionOptionsAddType(builder, type) + + +def LSHProjectionOptionsEnd(builder): + return builder.EndObject() + + +def End(builder): + return LSHProjectionOptionsEnd(builder) diff --git a/backends/nxp/backend/ir/lib/tflite/LSHProjectionType.py b/backends/nxp/backend/ir/lib/tflite/LSHProjectionType.py new file mode 100755 index 00000000000..32817911448 --- /dev/null +++ b/backends/nxp/backend/ir/lib/tflite/LSHProjectionType.py @@ -0,0 +1,9 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + + +class LSHProjectionType(object): + UNKNOWN = 0 + SPARSE = 1 + DENSE = 2 diff --git a/backends/nxp/backend/ir/lib/tflite/LSTMKernelType.py b/backends/nxp/backend/ir/lib/tflite/LSTMKernelType.py new file mode 100755 index 00000000000..f0e96f3fc56 --- /dev/null +++ b/backends/nxp/backend/ir/lib/tflite/LSTMKernelType.py @@ -0,0 +1,8 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + + +class LSTMKernelType(object): + FULL = 0 + BASIC = 1 diff --git a/backends/nxp/backend/ir/lib/tflite/LSTMOptions.py b/backends/nxp/backend/ir/lib/tflite/LSTMOptions.py new file mode 100755 index 00000000000..ff845946b40 --- /dev/null +++ b/backends/nxp/backend/ir/lib/tflite/LSTMOptions.py @@ -0,0 +1,131 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy + +np = import_numpy() + + +class LSTMOptions(object): + __slots__ = ["_tab"] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = LSTMOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsLSTMOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + + @classmethod + def LSTMOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed + ) + + # LSTMOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # LSTMOptions + def FusedActivationFunction(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos) + return 0 + + # LSTMOptions + def CellClip(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + return self._tab.Get( + flatbuffers.number_types.Float32Flags, o + self._tab.Pos + ) + return 0.0 + + # LSTMOptions + def ProjClip(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + if o != 0: + return self._tab.Get( + flatbuffers.number_types.Float32Flags, o + self._tab.Pos + ) + return 0.0 + + # LSTMOptions + def KernelType(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos) + return 0 + + # LSTMOptions + def AsymmetricQuantizeInputs(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12)) + if o != 0: + return bool( + self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos) + ) + return False + + +def LSTMOptionsStart(builder): + builder.StartObject(5) + + +def Start(builder): + LSTMOptionsStart(builder) + + +def LSTMOptionsAddFusedActivationFunction(builder, fusedActivationFunction): + builder.PrependInt8Slot(0, fusedActivationFunction, 0) + + +def AddFusedActivationFunction(builder, fusedActivationFunction): + LSTMOptionsAddFusedActivationFunction(builder, fusedActivationFunction) + + +def LSTMOptionsAddCellClip(builder, cellClip): + builder.PrependFloat32Slot(1, cellClip, 0.0) + + +def AddCellClip(builder, cellClip): + LSTMOptionsAddCellClip(builder, cellClip) + + +def LSTMOptionsAddProjClip(builder, projClip): + builder.PrependFloat32Slot(2, projClip, 0.0) + + +def AddProjClip(builder, projClip): + LSTMOptionsAddProjClip(builder, projClip) + + +def LSTMOptionsAddKernelType(builder, kernelType): + builder.PrependInt8Slot(3, kernelType, 0) + + +def AddKernelType(builder, kernelType): + LSTMOptionsAddKernelType(builder, kernelType) + + +def LSTMOptionsAddAsymmetricQuantizeInputs(builder, asymmetricQuantizeInputs): + builder.PrependBoolSlot(4, asymmetricQuantizeInputs, 0) + + +def AddAsymmetricQuantizeInputs(builder, asymmetricQuantizeInputs): + LSTMOptionsAddAsymmetricQuantizeInputs(builder, asymmetricQuantizeInputs) + + +def LSTMOptionsEnd(builder): + return builder.EndObject() + + +def End(builder): + return LSTMOptionsEnd(builder) diff --git a/backends/nxp/backend/ir/lib/tflite/LeakyReluOptions.py b/backends/nxp/backend/ir/lib/tflite/LeakyReluOptions.py new file mode 100755 index 00000000000..e940e39dfbb --- /dev/null +++ b/backends/nxp/backend/ir/lib/tflite/LeakyReluOptions.py @@ -0,0 +1,67 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy + +np = import_numpy() + + +class LeakyReluOptions(object): + __slots__ = ["_tab"] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = LeakyReluOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsLeakyReluOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + + @classmethod + def LeakyReluOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed + ) + + # LeakyReluOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # LeakyReluOptions + def Alpha(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.Get( + flatbuffers.number_types.Float32Flags, o + self._tab.Pos + ) + return 0.0 + + +def LeakyReluOptionsStart(builder): + builder.StartObject(1) + + +def Start(builder): + LeakyReluOptionsStart(builder) + + +def LeakyReluOptionsAddAlpha(builder, alpha): + builder.PrependFloat32Slot(0, alpha, 0.0) + + +def AddAlpha(builder, alpha): + LeakyReluOptionsAddAlpha(builder, alpha) + + +def LeakyReluOptionsEnd(builder): + return builder.EndObject() + + +def End(builder): + return LeakyReluOptionsEnd(builder) diff --git a/backends/nxp/backend/ir/lib/tflite/LessEqualOptions.py b/backends/nxp/backend/ir/lib/tflite/LessEqualOptions.py new file mode 100755 index 00000000000..2475f1614a6 --- /dev/null +++ b/backends/nxp/backend/ir/lib/tflite/LessEqualOptions.py @@ -0,0 +1,50 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy + +np = import_numpy() + + +class LessEqualOptions(object): + __slots__ = ["_tab"] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = LessEqualOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsLessEqualOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + + @classmethod + def LessEqualOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed + ) + + # LessEqualOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + +def LessEqualOptionsStart(builder): + builder.StartObject(0) + + +def Start(builder): + LessEqualOptionsStart(builder) + + +def LessEqualOptionsEnd(builder): + return builder.EndObject() + + +def End(builder): + return LessEqualOptionsEnd(builder) diff --git a/backends/nxp/backend/ir/lib/tflite/LessOptions.py b/backends/nxp/backend/ir/lib/tflite/LessOptions.py new file mode 100755 index 00000000000..651fece38e2 --- /dev/null +++ b/backends/nxp/backend/ir/lib/tflite/LessOptions.py @@ -0,0 +1,50 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy + +np = import_numpy() + + +class LessOptions(object): + __slots__ = ["_tab"] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = LessOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsLessOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + + @classmethod + def LessOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed + ) + + # LessOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + +def LessOptionsStart(builder): + builder.StartObject(0) + + +def Start(builder): + LessOptionsStart(builder) + + +def LessOptionsEnd(builder): + return builder.EndObject() + + +def End(builder): + return LessOptionsEnd(builder) diff --git a/backends/nxp/backend/ir/lib/tflite/LocalResponseNormalizationOptions.py b/backends/nxp/backend/ir/lib/tflite/LocalResponseNormalizationOptions.py new file mode 100755 index 00000000000..aaf2b5968ee --- /dev/null +++ b/backends/nxp/backend/ir/lib/tflite/LocalResponseNormalizationOptions.py @@ -0,0 +1,118 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy + +np = import_numpy() + + +class LocalResponseNormalizationOptions(object): + __slots__ = ["_tab"] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = LocalResponseNormalizationOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsLocalResponseNormalizationOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + + @classmethod + def LocalResponseNormalizationOptionsBufferHasIdentifier( + cls, buf, offset, size_prefixed=False + ): + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed + ) + + # LocalResponseNormalizationOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # LocalResponseNormalizationOptions + def Radius(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) + return 0 + + # LocalResponseNormalizationOptions + def Bias(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + return self._tab.Get( + flatbuffers.number_types.Float32Flags, o + self._tab.Pos + ) + return 0.0 + + # LocalResponseNormalizationOptions + def Alpha(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + if o != 0: + return self._tab.Get( + flatbuffers.number_types.Float32Flags, o + self._tab.Pos + ) + return 0.0 + + # LocalResponseNormalizationOptions + def Beta(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10)) + if o != 0: + return self._tab.Get( + flatbuffers.number_types.Float32Flags, o + self._tab.Pos + ) + return 0.0 + + +def LocalResponseNormalizationOptionsStart(builder): + builder.StartObject(4) + + +def Start(builder): + LocalResponseNormalizationOptionsStart(builder) + + +def LocalResponseNormalizationOptionsAddRadius(builder, radius): + builder.PrependInt32Slot(0, radius, 0) + + +def AddRadius(builder, radius): + LocalResponseNormalizationOptionsAddRadius(builder, radius) + + +def LocalResponseNormalizationOptionsAddBias(builder, bias): + builder.PrependFloat32Slot(1, bias, 0.0) + + +def AddBias(builder, bias): + LocalResponseNormalizationOptionsAddBias(builder, bias) + + +def LocalResponseNormalizationOptionsAddAlpha(builder, alpha): + builder.PrependFloat32Slot(2, alpha, 0.0) + + +def AddAlpha(builder, alpha): + LocalResponseNormalizationOptionsAddAlpha(builder, alpha) + + +def LocalResponseNormalizationOptionsAddBeta(builder, beta): + builder.PrependFloat32Slot(3, beta, 0.0) + + +def AddBeta(builder, beta): + LocalResponseNormalizationOptionsAddBeta(builder, beta) + + +def LocalResponseNormalizationOptionsEnd(builder): + return builder.EndObject() + + +def End(builder): + return LocalResponseNormalizationOptionsEnd(builder) diff --git a/backends/nxp/backend/ir/lib/tflite/LogSoftmaxOptions.py b/backends/nxp/backend/ir/lib/tflite/LogSoftmaxOptions.py new file mode 100755 index 00000000000..3ede83f2bc0 --- /dev/null +++ b/backends/nxp/backend/ir/lib/tflite/LogSoftmaxOptions.py @@ -0,0 +1,50 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy + +np = import_numpy() + + +class LogSoftmaxOptions(object): + __slots__ = ["_tab"] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = LogSoftmaxOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsLogSoftmaxOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + + @classmethod + def LogSoftmaxOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed + ) + + # LogSoftmaxOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + +def LogSoftmaxOptionsStart(builder): + builder.StartObject(0) + + +def Start(builder): + LogSoftmaxOptionsStart(builder) + + +def LogSoftmaxOptionsEnd(builder): + return builder.EndObject() + + +def End(builder): + return LogSoftmaxOptionsEnd(builder) diff --git a/backends/nxp/backend/ir/lib/tflite/LogicalAndOptions.py b/backends/nxp/backend/ir/lib/tflite/LogicalAndOptions.py new file mode 100755 index 00000000000..7ce030db964 --- /dev/null +++ b/backends/nxp/backend/ir/lib/tflite/LogicalAndOptions.py @@ -0,0 +1,50 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy + +np = import_numpy() + + +class LogicalAndOptions(object): + __slots__ = ["_tab"] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = LogicalAndOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsLogicalAndOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + + @classmethod + def LogicalAndOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed + ) + + # LogicalAndOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + +def LogicalAndOptionsStart(builder): + builder.StartObject(0) + + +def Start(builder): + LogicalAndOptionsStart(builder) + + +def LogicalAndOptionsEnd(builder): + return builder.EndObject() + + +def End(builder): + return LogicalAndOptionsEnd(builder) diff --git a/backends/nxp/backend/ir/lib/tflite/LogicalNotOptions.py b/backends/nxp/backend/ir/lib/tflite/LogicalNotOptions.py new file mode 100755 index 00000000000..356ef751c80 --- /dev/null +++ b/backends/nxp/backend/ir/lib/tflite/LogicalNotOptions.py @@ -0,0 +1,50 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy + +np = import_numpy() + + +class LogicalNotOptions(object): + __slots__ = ["_tab"] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = LogicalNotOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsLogicalNotOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + + @classmethod + def LogicalNotOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed + ) + + # LogicalNotOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + +def LogicalNotOptionsStart(builder): + builder.StartObject(0) + + +def Start(builder): + LogicalNotOptionsStart(builder) + + +def LogicalNotOptionsEnd(builder): + return builder.EndObject() + + +def End(builder): + return LogicalNotOptionsEnd(builder) diff --git a/backends/nxp/backend/ir/lib/tflite/LogicalOrOptions.py b/backends/nxp/backend/ir/lib/tflite/LogicalOrOptions.py new file mode 100755 index 00000000000..f1bd65d16af --- /dev/null +++ b/backends/nxp/backend/ir/lib/tflite/LogicalOrOptions.py @@ -0,0 +1,50 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy + +np = import_numpy() + + +class LogicalOrOptions(object): + __slots__ = ["_tab"] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = LogicalOrOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsLogicalOrOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + + @classmethod + def LogicalOrOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed + ) + + # LogicalOrOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + +def LogicalOrOptionsStart(builder): + builder.StartObject(0) + + +def Start(builder): + LogicalOrOptionsStart(builder) + + +def LogicalOrOptionsEnd(builder): + return builder.EndObject() + + +def End(builder): + return LogicalOrOptionsEnd(builder) diff --git a/backends/nxp/backend/ir/lib/tflite/MatrixDiagOptions.py b/backends/nxp/backend/ir/lib/tflite/MatrixDiagOptions.py new file mode 100755 index 00000000000..3e7488ad6af --- /dev/null +++ b/backends/nxp/backend/ir/lib/tflite/MatrixDiagOptions.py @@ -0,0 +1,50 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy + +np = import_numpy() + + +class MatrixDiagOptions(object): + __slots__ = ["_tab"] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = MatrixDiagOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsMatrixDiagOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + + @classmethod + def MatrixDiagOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed + ) + + # MatrixDiagOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + +def MatrixDiagOptionsStart(builder): + builder.StartObject(0) + + +def Start(builder): + MatrixDiagOptionsStart(builder) + + +def MatrixDiagOptionsEnd(builder): + return builder.EndObject() + + +def End(builder): + return MatrixDiagOptionsEnd(builder) diff --git a/backends/nxp/backend/ir/lib/tflite/MatrixSetDiagOptions.py b/backends/nxp/backend/ir/lib/tflite/MatrixSetDiagOptions.py new file mode 100755 index 00000000000..7abc0165447 --- /dev/null +++ b/backends/nxp/backend/ir/lib/tflite/MatrixSetDiagOptions.py @@ -0,0 +1,50 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy + +np = import_numpy() + + +class MatrixSetDiagOptions(object): + __slots__ = ["_tab"] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = MatrixSetDiagOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsMatrixSetDiagOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + + @classmethod + def MatrixSetDiagOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed + ) + + # MatrixSetDiagOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + +def MatrixSetDiagOptionsStart(builder): + builder.StartObject(0) + + +def Start(builder): + MatrixSetDiagOptionsStart(builder) + + +def MatrixSetDiagOptionsEnd(builder): + return builder.EndObject() + + +def End(builder): + return MatrixSetDiagOptionsEnd(builder) diff --git a/backends/nxp/backend/ir/lib/tflite/MaximumMinimumOptions.py b/backends/nxp/backend/ir/lib/tflite/MaximumMinimumOptions.py new file mode 100755 index 00000000000..ea37c25824f --- /dev/null +++ b/backends/nxp/backend/ir/lib/tflite/MaximumMinimumOptions.py @@ -0,0 +1,50 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy + +np = import_numpy() + + +class MaximumMinimumOptions(object): + __slots__ = ["_tab"] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = MaximumMinimumOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsMaximumMinimumOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + + @classmethod + def MaximumMinimumOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed + ) + + # MaximumMinimumOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + +def MaximumMinimumOptionsStart(builder): + builder.StartObject(0) + + +def Start(builder): + MaximumMinimumOptionsStart(builder) + + +def MaximumMinimumOptionsEnd(builder): + return builder.EndObject() + + +def End(builder): + return MaximumMinimumOptionsEnd(builder) diff --git a/backends/nxp/backend/ir/lib/tflite/Metadata.py b/backends/nxp/backend/ir/lib/tflite/Metadata.py new file mode 100755 index 00000000000..3816caffa09 --- /dev/null +++ b/backends/nxp/backend/ir/lib/tflite/Metadata.py @@ -0,0 +1,84 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy + +np = import_numpy() + + +class Metadata(object): + __slots__ = ["_tab"] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = Metadata() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsMetadata(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + + @classmethod + def MetadataBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed + ) + + # Metadata + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # Metadata + def Name(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.String(o + self._tab.Pos) + return None + + # Metadata + def Buffer(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + return self._tab.Get( + flatbuffers.number_types.Uint32Flags, o + self._tab.Pos + ) + return 0 + + +def MetadataStart(builder): + builder.StartObject(2) + + +def Start(builder): + MetadataStart(builder) + + +def MetadataAddName(builder, name): + builder.PrependUOffsetTRelativeSlot( + 0, flatbuffers.number_types.UOffsetTFlags.py_type(name), 0 + ) + + +def AddName(builder, name): + MetadataAddName(builder, name) + + +def MetadataAddBuffer(builder, buffer): + builder.PrependUint32Slot(1, buffer, 0) + + +def AddBuffer(builder, buffer): + MetadataAddBuffer(builder, buffer) + + +def MetadataEnd(builder): + return builder.EndObject() + + +def End(builder): + return MetadataEnd(builder) diff --git a/backends/nxp/backend/ir/lib/tflite/MirrorPadMode.py b/backends/nxp/backend/ir/lib/tflite/MirrorPadMode.py new file mode 100755 index 00000000000..85718ebf244 --- /dev/null +++ b/backends/nxp/backend/ir/lib/tflite/MirrorPadMode.py @@ -0,0 +1,8 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + + +class MirrorPadMode(object): + REFLECT = 0 + SYMMETRIC = 1 diff --git a/backends/nxp/backend/ir/lib/tflite/MirrorPadOptions.py b/backends/nxp/backend/ir/lib/tflite/MirrorPadOptions.py new file mode 100755 index 00000000000..2cf97b85350 --- /dev/null +++ b/backends/nxp/backend/ir/lib/tflite/MirrorPadOptions.py @@ -0,0 +1,65 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy + +np = import_numpy() + + +class MirrorPadOptions(object): + __slots__ = ["_tab"] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = MirrorPadOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsMirrorPadOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + + @classmethod + def MirrorPadOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed + ) + + # MirrorPadOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # MirrorPadOptions + def Mode(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos) + return 0 + + +def MirrorPadOptionsStart(builder): + builder.StartObject(1) + + +def Start(builder): + MirrorPadOptionsStart(builder) + + +def MirrorPadOptionsAddMode(builder, mode): + builder.PrependInt8Slot(0, mode, 0) + + +def AddMode(builder, mode): + MirrorPadOptionsAddMode(builder, mode) + + +def MirrorPadOptionsEnd(builder): + return builder.EndObject() + + +def End(builder): + return MirrorPadOptionsEnd(builder) diff --git a/backends/nxp/backend/ir/lib/tflite/Model.py b/backends/nxp/backend/ir/lib/tflite/Model.py new file mode 100755 index 00000000000..767243442db --- /dev/null +++ b/backends/nxp/backend/ir/lib/tflite/Model.py @@ -0,0 +1,352 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy + +np = import_numpy() + + +class Model(object): + __slots__ = ["_tab"] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = Model() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsModel(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + + @classmethod + def ModelBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed + ) + + # Model + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # Model + def Version(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.Get( + flatbuffers.number_types.Uint32Flags, o + self._tab.Pos + ) + return 0 + + # Model + def OperatorCodes(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + x = self._tab.Vector(o) + x += flatbuffers.number_types.UOffsetTFlags.py_type(j) * 4 + x = self._tab.Indirect(x) + from .OperatorCode import OperatorCode + + obj = OperatorCode() + obj.Init(self._tab.Bytes, x) + return obj + return None + + # Model + def OperatorCodesLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # Model + def OperatorCodesIsNone(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + return o == 0 + + # Model + def Subgraphs(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + if o != 0: + x = self._tab.Vector(o) + x += flatbuffers.number_types.UOffsetTFlags.py_type(j) * 4 + x = self._tab.Indirect(x) + from .SubGraph import SubGraph + + obj = SubGraph() + obj.Init(self._tab.Bytes, x) + return obj + return None + + # Model + def SubgraphsLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # Model + def SubgraphsIsNone(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + return o == 0 + + # Model + def Description(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10)) + if o != 0: + return self._tab.String(o + self._tab.Pos) + return None + + # Model + def Buffers(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12)) + if o != 0: + x = self._tab.Vector(o) + x += flatbuffers.number_types.UOffsetTFlags.py_type(j) * 4 + x = self._tab.Indirect(x) + from .Buffer import Buffer + + obj = Buffer() + obj.Init(self._tab.Bytes, x) + return obj + return None + + # Model + def BuffersLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # Model + def BuffersIsNone(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12)) + return o == 0 + + # Model + def MetadataBuffer(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14)) + if o != 0: + a = self._tab.Vector(o) + return self._tab.Get( + flatbuffers.number_types.Int32Flags, + a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4), + ) + return 0 + + # Model + def MetadataBufferAsNumpy(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14)) + if o != 0: + return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int32Flags, o) + return 0 + + # Model + def MetadataBufferLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # Model + def MetadataBufferIsNone(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14)) + return o == 0 + + # Model + def Metadata(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(16)) + if o != 0: + x = self._tab.Vector(o) + x += flatbuffers.number_types.UOffsetTFlags.py_type(j) * 4 + x = self._tab.Indirect(x) + from .Metadata import Metadata + + obj = Metadata() + obj.Init(self._tab.Bytes, x) + return obj + return None + + # Model + def MetadataLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(16)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # Model + def MetadataIsNone(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(16)) + return o == 0 + + # Model + def SignatureDefs(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(18)) + if o != 0: + x = self._tab.Vector(o) + x += flatbuffers.number_types.UOffsetTFlags.py_type(j) * 4 + x = self._tab.Indirect(x) + from .SignatureDef import SignatureDef + + obj = SignatureDef() + obj.Init(self._tab.Bytes, x) + return obj + return None + + # Model + def SignatureDefsLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(18)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # Model + def SignatureDefsIsNone(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(18)) + return o == 0 + + +def ModelStart(builder): + builder.StartObject(8) + + +def Start(builder): + ModelStart(builder) + + +def ModelAddVersion(builder, version): + builder.PrependUint32Slot(0, version, 0) + + +def AddVersion(builder, version): + ModelAddVersion(builder, version) + + +def ModelAddOperatorCodes(builder, operatorCodes): + builder.PrependUOffsetTRelativeSlot( + 1, flatbuffers.number_types.UOffsetTFlags.py_type(operatorCodes), 0 + ) + + +def AddOperatorCodes(builder, operatorCodes): + ModelAddOperatorCodes(builder, operatorCodes) + + +def ModelStartOperatorCodesVector(builder, numElems): + return builder.StartVector(4, numElems, 4) + + +def StartOperatorCodesVector(builder, numElems: int) -> int: + return ModelStartOperatorCodesVector(builder, numElems) + + +def ModelAddSubgraphs(builder, subgraphs): + builder.PrependUOffsetTRelativeSlot( + 2, flatbuffers.number_types.UOffsetTFlags.py_type(subgraphs), 0 + ) + + +def AddSubgraphs(builder, subgraphs): + ModelAddSubgraphs(builder, subgraphs) + + +def ModelStartSubgraphsVector(builder, numElems): + return builder.StartVector(4, numElems, 4) + + +def StartSubgraphsVector(builder, numElems: int) -> int: + return ModelStartSubgraphsVector(builder, numElems) + + +def ModelAddDescription(builder, description): + builder.PrependUOffsetTRelativeSlot( + 3, flatbuffers.number_types.UOffsetTFlags.py_type(description), 0 + ) + + +def AddDescription(builder, description): + ModelAddDescription(builder, description) + + +def ModelAddBuffers(builder, buffers): + builder.PrependUOffsetTRelativeSlot( + 4, flatbuffers.number_types.UOffsetTFlags.py_type(buffers), 0 + ) + + +def AddBuffers(builder, buffers): + ModelAddBuffers(builder, buffers) + + +def ModelStartBuffersVector(builder, numElems): + return builder.StartVector(4, numElems, 4) + + +def StartBuffersVector(builder, numElems: int) -> int: + return ModelStartBuffersVector(builder, numElems) + + +def ModelAddMetadataBuffer(builder, metadataBuffer): + builder.PrependUOffsetTRelativeSlot( + 5, flatbuffers.number_types.UOffsetTFlags.py_type(metadataBuffer), 0 + ) + + +def AddMetadataBuffer(builder, metadataBuffer): + ModelAddMetadataBuffer(builder, metadataBuffer) + + +def ModelStartMetadataBufferVector(builder, numElems): + return builder.StartVector(4, numElems, 4) + + +def StartMetadataBufferVector(builder, numElems: int) -> int: + return ModelStartMetadataBufferVector(builder, numElems) + + +def ModelAddMetadata(builder, metadata): + builder.PrependUOffsetTRelativeSlot( + 6, flatbuffers.number_types.UOffsetTFlags.py_type(metadata), 0 + ) + + +def AddMetadata(builder, metadata): + ModelAddMetadata(builder, metadata) + + +def ModelStartMetadataVector(builder, numElems): + return builder.StartVector(4, numElems, 4) + + +def StartMetadataVector(builder, numElems: int) -> int: + return ModelStartMetadataVector(builder, numElems) + + +def ModelAddSignatureDefs(builder, signatureDefs): + builder.PrependUOffsetTRelativeSlot( + 7, flatbuffers.number_types.UOffsetTFlags.py_type(signatureDefs), 0 + ) + + +def AddSignatureDefs(builder, signatureDefs): + ModelAddSignatureDefs(builder, signatureDefs) + + +def ModelStartSignatureDefsVector(builder, numElems): + return builder.StartVector(4, numElems, 4) + + +def StartSignatureDefsVector(builder, numElems: int) -> int: + return ModelStartSignatureDefsVector(builder, numElems) + + +def ModelEnd(builder): + return builder.EndObject() + + +def End(builder): + return ModelEnd(builder) diff --git a/backends/nxp/backend/ir/lib/tflite/MulOptions.py b/backends/nxp/backend/ir/lib/tflite/MulOptions.py new file mode 100755 index 00000000000..afa33eaaee6 --- /dev/null +++ b/backends/nxp/backend/ir/lib/tflite/MulOptions.py @@ -0,0 +1,65 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy + +np = import_numpy() + + +class MulOptions(object): + __slots__ = ["_tab"] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = MulOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsMulOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + + @classmethod + def MulOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed + ) + + # MulOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # MulOptions + def FusedActivationFunction(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos) + return 0 + + +def MulOptionsStart(builder): + builder.StartObject(1) + + +def Start(builder): + MulOptionsStart(builder) + + +def MulOptionsAddFusedActivationFunction(builder, fusedActivationFunction): + builder.PrependInt8Slot(0, fusedActivationFunction, 0) + + +def AddFusedActivationFunction(builder, fusedActivationFunction): + MulOptionsAddFusedActivationFunction(builder, fusedActivationFunction) + + +def MulOptionsEnd(builder): + return builder.EndObject() + + +def End(builder): + return MulOptionsEnd(builder) diff --git a/backends/nxp/backend/ir/lib/tflite/NegOptions.py b/backends/nxp/backend/ir/lib/tflite/NegOptions.py new file mode 100755 index 00000000000..892f1883f2a --- /dev/null +++ b/backends/nxp/backend/ir/lib/tflite/NegOptions.py @@ -0,0 +1,50 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy + +np = import_numpy() + + +class NegOptions(object): + __slots__ = ["_tab"] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = NegOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsNegOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + + @classmethod + def NegOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed + ) + + # NegOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + +def NegOptionsStart(builder): + builder.StartObject(0) + + +def Start(builder): + NegOptionsStart(builder) + + +def NegOptionsEnd(builder): + return builder.EndObject() + + +def End(builder): + return NegOptionsEnd(builder) diff --git a/backends/nxp/backend/ir/lib/tflite/NonMaxSuppressionV4Options.py b/backends/nxp/backend/ir/lib/tflite/NonMaxSuppressionV4Options.py new file mode 100755 index 00000000000..27b46c178ba --- /dev/null +++ b/backends/nxp/backend/ir/lib/tflite/NonMaxSuppressionV4Options.py @@ -0,0 +1,52 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy + +np = import_numpy() + + +class NonMaxSuppressionV4Options(object): + __slots__ = ["_tab"] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = NonMaxSuppressionV4Options() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsNonMaxSuppressionV4Options(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + + @classmethod + def NonMaxSuppressionV4OptionsBufferHasIdentifier( + cls, buf, offset, size_prefixed=False + ): + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed + ) + + # NonMaxSuppressionV4Options + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + +def NonMaxSuppressionV4OptionsStart(builder): + builder.StartObject(0) + + +def Start(builder): + NonMaxSuppressionV4OptionsStart(builder) + + +def NonMaxSuppressionV4OptionsEnd(builder): + return builder.EndObject() + + +def End(builder): + return NonMaxSuppressionV4OptionsEnd(builder) diff --git a/backends/nxp/backend/ir/lib/tflite/NonMaxSuppressionV5Options.py b/backends/nxp/backend/ir/lib/tflite/NonMaxSuppressionV5Options.py new file mode 100755 index 00000000000..3007be79baa --- /dev/null +++ b/backends/nxp/backend/ir/lib/tflite/NonMaxSuppressionV5Options.py @@ -0,0 +1,52 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy + +np = import_numpy() + + +class NonMaxSuppressionV5Options(object): + __slots__ = ["_tab"] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = NonMaxSuppressionV5Options() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsNonMaxSuppressionV5Options(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + + @classmethod + def NonMaxSuppressionV5OptionsBufferHasIdentifier( + cls, buf, offset, size_prefixed=False + ): + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed + ) + + # NonMaxSuppressionV5Options + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + +def NonMaxSuppressionV5OptionsStart(builder): + builder.StartObject(0) + + +def Start(builder): + NonMaxSuppressionV5OptionsStart(builder) + + +def NonMaxSuppressionV5OptionsEnd(builder): + return builder.EndObject() + + +def End(builder): + return NonMaxSuppressionV5OptionsEnd(builder) diff --git a/backends/nxp/backend/ir/lib/tflite/NotEqualOptions.py b/backends/nxp/backend/ir/lib/tflite/NotEqualOptions.py new file mode 100755 index 00000000000..910ae01b0aa --- /dev/null +++ b/backends/nxp/backend/ir/lib/tflite/NotEqualOptions.py @@ -0,0 +1,50 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy + +np = import_numpy() + + +class NotEqualOptions(object): + __slots__ = ["_tab"] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = NotEqualOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsNotEqualOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + + @classmethod + def NotEqualOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed + ) + + # NotEqualOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + +def NotEqualOptionsStart(builder): + builder.StartObject(0) + + +def Start(builder): + NotEqualOptionsStart(builder) + + +def NotEqualOptionsEnd(builder): + return builder.EndObject() + + +def End(builder): + return NotEqualOptionsEnd(builder) diff --git a/backends/nxp/backend/ir/lib/tflite/OneHotOptions.py b/backends/nxp/backend/ir/lib/tflite/OneHotOptions.py new file mode 100755 index 00000000000..6ef4974a559 --- /dev/null +++ b/backends/nxp/backend/ir/lib/tflite/OneHotOptions.py @@ -0,0 +1,65 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy + +np = import_numpy() + + +class OneHotOptions(object): + __slots__ = ["_tab"] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = OneHotOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsOneHotOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + + @classmethod + def OneHotOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed + ) + + # OneHotOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # OneHotOptions + def Axis(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) + return 0 + + +def OneHotOptionsStart(builder): + builder.StartObject(1) + + +def Start(builder): + OneHotOptionsStart(builder) + + +def OneHotOptionsAddAxis(builder, axis): + builder.PrependInt32Slot(0, axis, 0) + + +def AddAxis(builder, axis): + OneHotOptionsAddAxis(builder, axis) + + +def OneHotOptionsEnd(builder): + return builder.EndObject() + + +def End(builder): + return OneHotOptionsEnd(builder) diff --git a/backends/nxp/backend/ir/lib/tflite/Operator.py b/backends/nxp/backend/ir/lib/tflite/Operator.py new file mode 100755 index 00000000000..2638b5c2878 --- /dev/null +++ b/backends/nxp/backend/ir/lib/tflite/Operator.py @@ -0,0 +1,428 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy + +np = import_numpy() + + +class Operator(object): + __slots__ = ["_tab"] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = Operator() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsOperator(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + + @classmethod + def OperatorBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed + ) + + # Operator + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # Operator + def OpcodeIndex(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.Get( + flatbuffers.number_types.Uint32Flags, o + self._tab.Pos + ) + return 0 + + # Operator + def Inputs(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + a = self._tab.Vector(o) + return self._tab.Get( + flatbuffers.number_types.Int32Flags, + a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4), + ) + return 0 + + # Operator + def InputsAsNumpy(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int32Flags, o) + return 0 + + # Operator + def InputsLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # Operator + def InputsIsNone(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + return o == 0 + + # Operator + def Outputs(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + if o != 0: + a = self._tab.Vector(o) + return self._tab.Get( + flatbuffers.number_types.Int32Flags, + a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4), + ) + return 0 + + # Operator + def OutputsAsNumpy(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + if o != 0: + return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int32Flags, o) + return 0 + + # Operator + def OutputsLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # Operator + def OutputsIsNone(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + return o == 0 + + # Operator + def BuiltinOptionsType(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Uint8Flags, o + self._tab.Pos) + return 0 + + # Operator + def BuiltinOptions(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12)) + if o != 0: + from flatbuffers.table import Table + + obj = Table(bytearray(), 0) + self._tab.Union(obj, o) + return obj + return None + + # Operator + def CustomOptions(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14)) + if o != 0: + a = self._tab.Vector(o) + return self._tab.Get( + flatbuffers.number_types.Uint8Flags, + a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 1), + ) + return 0 + + # Operator + def CustomOptionsAsNumpy(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14)) + if o != 0: + return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Uint8Flags, o) + return 0 + + # Operator + def CustomOptionsLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # Operator + def CustomOptionsIsNone(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14)) + return o == 0 + + # Operator + def CustomOptionsFormat(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(16)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos) + return 0 + + # Operator + def MutatingVariableInputs(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(18)) + if o != 0: + a = self._tab.Vector(o) + return self._tab.Get( + flatbuffers.number_types.BoolFlags, + a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 1), + ) + return 0 + + # Operator + def MutatingVariableInputsAsNumpy(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(18)) + if o != 0: + return self._tab.GetVectorAsNumpy(flatbuffers.number_types.BoolFlags, o) + return 0 + + # Operator + def MutatingVariableInputsLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(18)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # Operator + def MutatingVariableInputsIsNone(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(18)) + return o == 0 + + # Operator + def Intermediates(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(20)) + if o != 0: + a = self._tab.Vector(o) + return self._tab.Get( + flatbuffers.number_types.Int32Flags, + a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4), + ) + return 0 + + # Operator + def IntermediatesAsNumpy(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(20)) + if o != 0: + return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int32Flags, o) + return 0 + + # Operator + def IntermediatesLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(20)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # Operator + def IntermediatesIsNone(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(20)) + return o == 0 + + # Operator + def LargeCustomOptionsOffset(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(22)) + if o != 0: + return self._tab.Get( + flatbuffers.number_types.Uint64Flags, o + self._tab.Pos + ) + return 0 + + # Operator + def LargeCustomOptionsSize(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(24)) + if o != 0: + return self._tab.Get( + flatbuffers.number_types.Uint64Flags, o + self._tab.Pos + ) + return 0 + + # Operator + def BuiltinOptions2Type(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(26)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Uint8Flags, o + self._tab.Pos) + return 0 + + # Operator + def BuiltinOptions2(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(28)) + if o != 0: + from flatbuffers.table import Table + + obj = Table(bytearray(), 0) + self._tab.Union(obj, o) + return obj + return None + + +def OperatorStart(builder): + builder.StartObject(13) + + +def Start(builder): + OperatorStart(builder) + + +def OperatorAddOpcodeIndex(builder, opcodeIndex): + builder.PrependUint32Slot(0, opcodeIndex, 0) + + +def AddOpcodeIndex(builder, opcodeIndex): + OperatorAddOpcodeIndex(builder, opcodeIndex) + + +def OperatorAddInputs(builder, inputs): + builder.PrependUOffsetTRelativeSlot( + 1, flatbuffers.number_types.UOffsetTFlags.py_type(inputs), 0 + ) + + +def AddInputs(builder, inputs): + OperatorAddInputs(builder, inputs) + + +def OperatorStartInputsVector(builder, numElems): + return builder.StartVector(4, numElems, 4) + + +def StartInputsVector(builder, numElems: int) -> int: + return OperatorStartInputsVector(builder, numElems) + + +def OperatorAddOutputs(builder, outputs): + builder.PrependUOffsetTRelativeSlot( + 2, flatbuffers.number_types.UOffsetTFlags.py_type(outputs), 0 + ) + + +def AddOutputs(builder, outputs): + OperatorAddOutputs(builder, outputs) + + +def OperatorStartOutputsVector(builder, numElems): + return builder.StartVector(4, numElems, 4) + + +def StartOutputsVector(builder, numElems: int) -> int: + return OperatorStartOutputsVector(builder, numElems) + + +def OperatorAddBuiltinOptionsType(builder, builtinOptionsType): + builder.PrependUint8Slot(3, builtinOptionsType, 0) + + +def AddBuiltinOptionsType(builder, builtinOptionsType): + OperatorAddBuiltinOptionsType(builder, builtinOptionsType) + + +def OperatorAddBuiltinOptions(builder, builtinOptions): + builder.PrependUOffsetTRelativeSlot( + 4, flatbuffers.number_types.UOffsetTFlags.py_type(builtinOptions), 0 + ) + + +def AddBuiltinOptions(builder, builtinOptions): + OperatorAddBuiltinOptions(builder, builtinOptions) + + +def OperatorAddCustomOptions(builder, customOptions): + builder.PrependUOffsetTRelativeSlot( + 5, flatbuffers.number_types.UOffsetTFlags.py_type(customOptions), 0 + ) + + +def AddCustomOptions(builder, customOptions): + OperatorAddCustomOptions(builder, customOptions) + + +def OperatorStartCustomOptionsVector(builder, numElems): + return builder.StartVector(1, numElems, 1) + + +def StartCustomOptionsVector(builder, numElems: int) -> int: + return OperatorStartCustomOptionsVector(builder, numElems) + + +def OperatorAddCustomOptionsFormat(builder, customOptionsFormat): + builder.PrependInt8Slot(6, customOptionsFormat, 0) + + +def AddCustomOptionsFormat(builder, customOptionsFormat): + OperatorAddCustomOptionsFormat(builder, customOptionsFormat) + + +def OperatorAddMutatingVariableInputs(builder, mutatingVariableInputs): + builder.PrependUOffsetTRelativeSlot( + 7, flatbuffers.number_types.UOffsetTFlags.py_type(mutatingVariableInputs), 0 + ) + + +def AddMutatingVariableInputs(builder, mutatingVariableInputs): + OperatorAddMutatingVariableInputs(builder, mutatingVariableInputs) + + +def OperatorStartMutatingVariableInputsVector(builder, numElems): + return builder.StartVector(1, numElems, 1) + + +def StartMutatingVariableInputsVector(builder, numElems: int) -> int: + return OperatorStartMutatingVariableInputsVector(builder, numElems) + + +def OperatorAddIntermediates(builder, intermediates): + builder.PrependUOffsetTRelativeSlot( + 8, flatbuffers.number_types.UOffsetTFlags.py_type(intermediates), 0 + ) + + +def AddIntermediates(builder, intermediates): + OperatorAddIntermediates(builder, intermediates) + + +def OperatorStartIntermediatesVector(builder, numElems): + return builder.StartVector(4, numElems, 4) + + +def StartIntermediatesVector(builder, numElems: int) -> int: + return OperatorStartIntermediatesVector(builder, numElems) + + +def OperatorAddLargeCustomOptionsOffset(builder, largeCustomOptionsOffset): + builder.PrependUint64Slot(9, largeCustomOptionsOffset, 0) + + +def AddLargeCustomOptionsOffset(builder, largeCustomOptionsOffset): + OperatorAddLargeCustomOptionsOffset(builder, largeCustomOptionsOffset) + + +def OperatorAddLargeCustomOptionsSize(builder, largeCustomOptionsSize): + builder.PrependUint64Slot(10, largeCustomOptionsSize, 0) + + +def AddLargeCustomOptionsSize(builder, largeCustomOptionsSize): + OperatorAddLargeCustomOptionsSize(builder, largeCustomOptionsSize) + + +def OperatorAddBuiltinOptions2Type(builder, builtinOptions2Type): + builder.PrependUint8Slot(11, builtinOptions2Type, 0) + + +def AddBuiltinOptions2Type(builder, builtinOptions2Type): + OperatorAddBuiltinOptions2Type(builder, builtinOptions2Type) + + +def OperatorAddBuiltinOptions2(builder, builtinOptions2): + builder.PrependUOffsetTRelativeSlot( + 12, flatbuffers.number_types.UOffsetTFlags.py_type(builtinOptions2), 0 + ) + + +def AddBuiltinOptions2(builder, builtinOptions2): + OperatorAddBuiltinOptions2(builder, builtinOptions2) + + +def OperatorEnd(builder): + return builder.EndObject() + + +def End(builder): + return OperatorEnd(builder) diff --git a/backends/nxp/backend/ir/lib/tflite/OperatorCode.py b/backends/nxp/backend/ir/lib/tflite/OperatorCode.py new file mode 100755 index 00000000000..fa714103125 --- /dev/null +++ b/backends/nxp/backend/ir/lib/tflite/OperatorCode.py @@ -0,0 +1,112 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy + +np = import_numpy() + + +class OperatorCode(object): + __slots__ = ["_tab"] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = OperatorCode() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsOperatorCode(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + + @classmethod + def OperatorCodeBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed + ) + + # OperatorCode + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # OperatorCode + def DeprecatedBuiltinCode(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos) + return 0 + + # OperatorCode + def CustomCode(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + return self._tab.String(o + self._tab.Pos) + return None + + # OperatorCode + def Version(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) + return 1 + + # OperatorCode + def BuiltinCode(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) + return 0 + + +def OperatorCodeStart(builder): + builder.StartObject(4) + + +def Start(builder): + OperatorCodeStart(builder) + + +def OperatorCodeAddDeprecatedBuiltinCode(builder, deprecatedBuiltinCode): + builder.PrependInt8Slot(0, deprecatedBuiltinCode, 0) + + +def AddDeprecatedBuiltinCode(builder, deprecatedBuiltinCode): + OperatorCodeAddDeprecatedBuiltinCode(builder, deprecatedBuiltinCode) + + +def OperatorCodeAddCustomCode(builder, customCode): + builder.PrependUOffsetTRelativeSlot( + 1, flatbuffers.number_types.UOffsetTFlags.py_type(customCode), 0 + ) + + +def AddCustomCode(builder, customCode): + OperatorCodeAddCustomCode(builder, customCode) + + +def OperatorCodeAddVersion(builder, version): + builder.PrependInt32Slot(2, version, 1) + + +def AddVersion(builder, version): + OperatorCodeAddVersion(builder, version) + + +def OperatorCodeAddBuiltinCode(builder, builtinCode): + builder.PrependInt32Slot(3, builtinCode, 0) + + +def AddBuiltinCode(builder, builtinCode): + OperatorCodeAddBuiltinCode(builder, builtinCode) + + +def OperatorCodeEnd(builder): + return builder.EndObject() + + +def End(builder): + return OperatorCodeEnd(builder) diff --git a/backends/nxp/backend/ir/lib/tflite/PackOptions.py b/backends/nxp/backend/ir/lib/tflite/PackOptions.py new file mode 100755 index 00000000000..1c527337eb9 --- /dev/null +++ b/backends/nxp/backend/ir/lib/tflite/PackOptions.py @@ -0,0 +1,80 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy + +np = import_numpy() + + +class PackOptions(object): + __slots__ = ["_tab"] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = PackOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsPackOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + + @classmethod + def PackOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed + ) + + # PackOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # PackOptions + def ValuesCount(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) + return 0 + + # PackOptions + def Axis(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) + return 0 + + +def PackOptionsStart(builder): + builder.StartObject(2) + + +def Start(builder): + PackOptionsStart(builder) + + +def PackOptionsAddValuesCount(builder, valuesCount): + builder.PrependInt32Slot(0, valuesCount, 0) + + +def AddValuesCount(builder, valuesCount): + PackOptionsAddValuesCount(builder, valuesCount) + + +def PackOptionsAddAxis(builder, axis): + builder.PrependInt32Slot(1, axis, 0) + + +def AddAxis(builder, axis): + PackOptionsAddAxis(builder, axis) + + +def PackOptionsEnd(builder): + return builder.EndObject() + + +def End(builder): + return PackOptionsEnd(builder) diff --git a/backends/nxp/backend/ir/lib/tflite/PadOptions.py b/backends/nxp/backend/ir/lib/tflite/PadOptions.py new file mode 100755 index 00000000000..a35dcba995d --- /dev/null +++ b/backends/nxp/backend/ir/lib/tflite/PadOptions.py @@ -0,0 +1,50 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy + +np = import_numpy() + + +class PadOptions(object): + __slots__ = ["_tab"] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = PadOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsPadOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + + @classmethod + def PadOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed + ) + + # PadOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + +def PadOptionsStart(builder): + builder.StartObject(0) + + +def Start(builder): + PadOptionsStart(builder) + + +def PadOptionsEnd(builder): + return builder.EndObject() + + +def End(builder): + return PadOptionsEnd(builder) diff --git a/backends/nxp/backend/ir/lib/tflite/PadV2Options.py b/backends/nxp/backend/ir/lib/tflite/PadV2Options.py new file mode 100755 index 00000000000..b1ebd175be8 --- /dev/null +++ b/backends/nxp/backend/ir/lib/tflite/PadV2Options.py @@ -0,0 +1,50 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy + +np = import_numpy() + + +class PadV2Options(object): + __slots__ = ["_tab"] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = PadV2Options() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsPadV2Options(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + + @classmethod + def PadV2OptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed + ) + + # PadV2Options + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + +def PadV2OptionsStart(builder): + builder.StartObject(0) + + +def Start(builder): + PadV2OptionsStart(builder) + + +def PadV2OptionsEnd(builder): + return builder.EndObject() + + +def End(builder): + return PadV2OptionsEnd(builder) diff --git a/backends/nxp/backend/ir/lib/tflite/Padding.py b/backends/nxp/backend/ir/lib/tflite/Padding.py new file mode 100755 index 00000000000..b8b908c0c21 --- /dev/null +++ b/backends/nxp/backend/ir/lib/tflite/Padding.py @@ -0,0 +1,8 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + + +class Padding(object): + SAME = 0 + VALID = 1 diff --git a/backends/nxp/backend/ir/lib/tflite/Pool2DOptions.py b/backends/nxp/backend/ir/lib/tflite/Pool2DOptions.py new file mode 100755 index 00000000000..454a385b6d3 --- /dev/null +++ b/backends/nxp/backend/ir/lib/tflite/Pool2DOptions.py @@ -0,0 +1,140 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy + +np = import_numpy() + + +class Pool2DOptions(object): + __slots__ = ["_tab"] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = Pool2DOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsPool2DOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + + @classmethod + def Pool2DOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed + ) + + # Pool2DOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # Pool2DOptions + def Padding(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos) + return 0 + + # Pool2DOptions + def StrideW(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) + return 0 + + # Pool2DOptions + def StrideH(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) + return 0 + + # Pool2DOptions + def FilterWidth(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) + return 0 + + # Pool2DOptions + def FilterHeight(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) + return 0 + + # Pool2DOptions + def FusedActivationFunction(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos) + return 0 + + +def Pool2DOptionsStart(builder): + builder.StartObject(6) + + +def Start(builder): + Pool2DOptionsStart(builder) + + +def Pool2DOptionsAddPadding(builder, padding): + builder.PrependInt8Slot(0, padding, 0) + + +def AddPadding(builder, padding): + Pool2DOptionsAddPadding(builder, padding) + + +def Pool2DOptionsAddStrideW(builder, strideW): + builder.PrependInt32Slot(1, strideW, 0) + + +def AddStrideW(builder, strideW): + Pool2DOptionsAddStrideW(builder, strideW) + + +def Pool2DOptionsAddStrideH(builder, strideH): + builder.PrependInt32Slot(2, strideH, 0) + + +def AddStrideH(builder, strideH): + Pool2DOptionsAddStrideH(builder, strideH) + + +def Pool2DOptionsAddFilterWidth(builder, filterWidth): + builder.PrependInt32Slot(3, filterWidth, 0) + + +def AddFilterWidth(builder, filterWidth): + Pool2DOptionsAddFilterWidth(builder, filterWidth) + + +def Pool2DOptionsAddFilterHeight(builder, filterHeight): + builder.PrependInt32Slot(4, filterHeight, 0) + + +def AddFilterHeight(builder, filterHeight): + Pool2DOptionsAddFilterHeight(builder, filterHeight) + + +def Pool2DOptionsAddFusedActivationFunction(builder, fusedActivationFunction): + builder.PrependInt8Slot(5, fusedActivationFunction, 0) + + +def AddFusedActivationFunction(builder, fusedActivationFunction): + Pool2DOptionsAddFusedActivationFunction(builder, fusedActivationFunction) + + +def Pool2DOptionsEnd(builder): + return builder.EndObject() + + +def End(builder): + return Pool2DOptionsEnd(builder) diff --git a/backends/nxp/backend/ir/lib/tflite/PowOptions.py b/backends/nxp/backend/ir/lib/tflite/PowOptions.py new file mode 100755 index 00000000000..bd1e5f6e0b0 --- /dev/null +++ b/backends/nxp/backend/ir/lib/tflite/PowOptions.py @@ -0,0 +1,50 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy + +np = import_numpy() + + +class PowOptions(object): + __slots__ = ["_tab"] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = PowOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsPowOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + + @classmethod + def PowOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed + ) + + # PowOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + +def PowOptionsStart(builder): + builder.StartObject(0) + + +def Start(builder): + PowOptionsStart(builder) + + +def PowOptionsEnd(builder): + return builder.EndObject() + + +def End(builder): + return PowOptionsEnd(builder) diff --git a/backends/nxp/backend/ir/lib/tflite/QuantizationDetails.py b/backends/nxp/backend/ir/lib/tflite/QuantizationDetails.py new file mode 100755 index 00000000000..93e322e1865 --- /dev/null +++ b/backends/nxp/backend/ir/lib/tflite/QuantizationDetails.py @@ -0,0 +1,8 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + + +class QuantizationDetails(object): + NONE = 0 + CustomQuantization = 1 diff --git a/backends/nxp/backend/ir/lib/tflite/QuantizationParameters.py b/backends/nxp/backend/ir/lib/tflite/QuantizationParameters.py new file mode 100755 index 00000000000..58b56643af2 --- /dev/null +++ b/backends/nxp/backend/ir/lib/tflite/QuantizationParameters.py @@ -0,0 +1,295 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy + +np = import_numpy() + + +class QuantizationParameters(object): + __slots__ = ["_tab"] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = QuantizationParameters() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsQuantizationParameters(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + + @classmethod + def QuantizationParametersBufferHasIdentifier( + cls, buf, offset, size_prefixed=False + ): + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed + ) + + # QuantizationParameters + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # QuantizationParameters + def Min(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + a = self._tab.Vector(o) + return self._tab.Get( + flatbuffers.number_types.Float32Flags, + a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4), + ) + return 0 + + # QuantizationParameters + def MinAsNumpy(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Float32Flags, o) + return 0 + + # QuantizationParameters + def MinLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # QuantizationParameters + def MinIsNone(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + return o == 0 + + # QuantizationParameters + def Max(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + a = self._tab.Vector(o) + return self._tab.Get( + flatbuffers.number_types.Float32Flags, + a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4), + ) + return 0 + + # QuantizationParameters + def MaxAsNumpy(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Float32Flags, o) + return 0 + + # QuantizationParameters + def MaxLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # QuantizationParameters + def MaxIsNone(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + return o == 0 + + # QuantizationParameters + def Scale(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + if o != 0: + a = self._tab.Vector(o) + return self._tab.Get( + flatbuffers.number_types.Float32Flags, + a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4), + ) + return 0 + + # QuantizationParameters + def ScaleAsNumpy(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + if o != 0: + return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Float32Flags, o) + return 0 + + # QuantizationParameters + def ScaleLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # QuantizationParameters + def ScaleIsNone(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + return o == 0 + + # QuantizationParameters + def ZeroPoint(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10)) + if o != 0: + a = self._tab.Vector(o) + return self._tab.Get( + flatbuffers.number_types.Int64Flags, + a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 8), + ) + return 0 + + # QuantizationParameters + def ZeroPointAsNumpy(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10)) + if o != 0: + return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int64Flags, o) + return 0 + + # QuantizationParameters + def ZeroPointLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # QuantizationParameters + def ZeroPointIsNone(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10)) + return o == 0 + + # QuantizationParameters + def DetailsType(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Uint8Flags, o + self._tab.Pos) + return 0 + + # QuantizationParameters + def Details(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14)) + if o != 0: + from flatbuffers.table import Table + + obj = Table(bytearray(), 0) + self._tab.Union(obj, o) + return obj + return None + + # QuantizationParameters + def QuantizedDimension(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(16)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) + return 0 + + +def QuantizationParametersStart(builder): + builder.StartObject(7) + + +def Start(builder): + QuantizationParametersStart(builder) + + +def QuantizationParametersAddMin(builder, min): + builder.PrependUOffsetTRelativeSlot( + 0, flatbuffers.number_types.UOffsetTFlags.py_type(min), 0 + ) + + +def AddMin(builder, min): + QuantizationParametersAddMin(builder, min) + + +def QuantizationParametersStartMinVector(builder, numElems): + return builder.StartVector(4, numElems, 4) + + +def StartMinVector(builder, numElems: int) -> int: + return QuantizationParametersStartMinVector(builder, numElems) + + +def QuantizationParametersAddMax(builder, max): + builder.PrependUOffsetTRelativeSlot( + 1, flatbuffers.number_types.UOffsetTFlags.py_type(max), 0 + ) + + +def AddMax(builder, max): + QuantizationParametersAddMax(builder, max) + + +def QuantizationParametersStartMaxVector(builder, numElems): + return builder.StartVector(4, numElems, 4) + + +def StartMaxVector(builder, numElems: int) -> int: + return QuantizationParametersStartMaxVector(builder, numElems) + + +def QuantizationParametersAddScale(builder, scale): + builder.PrependUOffsetTRelativeSlot( + 2, flatbuffers.number_types.UOffsetTFlags.py_type(scale), 0 + ) + + +def AddScale(builder, scale): + QuantizationParametersAddScale(builder, scale) + + +def QuantizationParametersStartScaleVector(builder, numElems): + return builder.StartVector(4, numElems, 4) + + +def StartScaleVector(builder, numElems: int) -> int: + return QuantizationParametersStartScaleVector(builder, numElems) + + +def QuantizationParametersAddZeroPoint(builder, zeroPoint): + builder.PrependUOffsetTRelativeSlot( + 3, flatbuffers.number_types.UOffsetTFlags.py_type(zeroPoint), 0 + ) + + +def AddZeroPoint(builder, zeroPoint): + QuantizationParametersAddZeroPoint(builder, zeroPoint) + + +def QuantizationParametersStartZeroPointVector(builder, numElems): + return builder.StartVector(8, numElems, 8) + + +def StartZeroPointVector(builder, numElems: int) -> int: + return QuantizationParametersStartZeroPointVector(builder, numElems) + + +def QuantizationParametersAddDetailsType(builder, detailsType): + builder.PrependUint8Slot(4, detailsType, 0) + + +def AddDetailsType(builder, detailsType): + QuantizationParametersAddDetailsType(builder, detailsType) + + +def QuantizationParametersAddDetails(builder, details): + builder.PrependUOffsetTRelativeSlot( + 5, flatbuffers.number_types.UOffsetTFlags.py_type(details), 0 + ) + + +def AddDetails(builder, details): + QuantizationParametersAddDetails(builder, details) + + +def QuantizationParametersAddQuantizedDimension(builder, quantizedDimension): + builder.PrependInt32Slot(6, quantizedDimension, 0) + + +def AddQuantizedDimension(builder, quantizedDimension): + QuantizationParametersAddQuantizedDimension(builder, quantizedDimension) + + +def QuantizationParametersEnd(builder): + return builder.EndObject() + + +def End(builder): + return QuantizationParametersEnd(builder) diff --git a/backends/nxp/backend/ir/lib/tflite/QuantizeOptions.py b/backends/nxp/backend/ir/lib/tflite/QuantizeOptions.py new file mode 100755 index 00000000000..225c5fe2814 --- /dev/null +++ b/backends/nxp/backend/ir/lib/tflite/QuantizeOptions.py @@ -0,0 +1,50 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy + +np = import_numpy() + + +class QuantizeOptions(object): + __slots__ = ["_tab"] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = QuantizeOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsQuantizeOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + + @classmethod + def QuantizeOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed + ) + + # QuantizeOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + +def QuantizeOptionsStart(builder): + builder.StartObject(0) + + +def Start(builder): + QuantizeOptionsStart(builder) + + +def QuantizeOptionsEnd(builder): + return builder.EndObject() + + +def End(builder): + return QuantizeOptionsEnd(builder) diff --git a/backends/nxp/backend/ir/lib/tflite/RNNOptions.py b/backends/nxp/backend/ir/lib/tflite/RNNOptions.py new file mode 100755 index 00000000000..889baf3dc0c --- /dev/null +++ b/backends/nxp/backend/ir/lib/tflite/RNNOptions.py @@ -0,0 +1,82 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy + +np = import_numpy() + + +class RNNOptions(object): + __slots__ = ["_tab"] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = RNNOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsRNNOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + + @classmethod + def RNNOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed + ) + + # RNNOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # RNNOptions + def FusedActivationFunction(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos) + return 0 + + # RNNOptions + def AsymmetricQuantizeInputs(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + return bool( + self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos) + ) + return False + + +def RNNOptionsStart(builder): + builder.StartObject(2) + + +def Start(builder): + RNNOptionsStart(builder) + + +def RNNOptionsAddFusedActivationFunction(builder, fusedActivationFunction): + builder.PrependInt8Slot(0, fusedActivationFunction, 0) + + +def AddFusedActivationFunction(builder, fusedActivationFunction): + RNNOptionsAddFusedActivationFunction(builder, fusedActivationFunction) + + +def RNNOptionsAddAsymmetricQuantizeInputs(builder, asymmetricQuantizeInputs): + builder.PrependBoolSlot(1, asymmetricQuantizeInputs, 0) + + +def AddAsymmetricQuantizeInputs(builder, asymmetricQuantizeInputs): + RNNOptionsAddAsymmetricQuantizeInputs(builder, asymmetricQuantizeInputs) + + +def RNNOptionsEnd(builder): + return builder.EndObject() + + +def End(builder): + return RNNOptionsEnd(builder) diff --git a/backends/nxp/backend/ir/lib/tflite/RandomOptions.py b/backends/nxp/backend/ir/lib/tflite/RandomOptions.py new file mode 100755 index 00000000000..6b5496bac29 --- /dev/null +++ b/backends/nxp/backend/ir/lib/tflite/RandomOptions.py @@ -0,0 +1,80 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy + +np = import_numpy() + + +class RandomOptions(object): + __slots__ = ["_tab"] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = RandomOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsRandomOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + + @classmethod + def RandomOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed + ) + + # RandomOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # RandomOptions + def Seed(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int64Flags, o + self._tab.Pos) + return 0 + + # RandomOptions + def Seed2(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int64Flags, o + self._tab.Pos) + return 0 + + +def RandomOptionsStart(builder): + builder.StartObject(2) + + +def Start(builder): + RandomOptionsStart(builder) + + +def RandomOptionsAddSeed(builder, seed): + builder.PrependInt64Slot(0, seed, 0) + + +def AddSeed(builder, seed): + RandomOptionsAddSeed(builder, seed) + + +def RandomOptionsAddSeed2(builder, seed2): + builder.PrependInt64Slot(1, seed2, 0) + + +def AddSeed2(builder, seed2): + RandomOptionsAddSeed2(builder, seed2) + + +def RandomOptionsEnd(builder): + return builder.EndObject() + + +def End(builder): + return RandomOptionsEnd(builder) diff --git a/backends/nxp/backend/ir/lib/tflite/RangeOptions.py b/backends/nxp/backend/ir/lib/tflite/RangeOptions.py new file mode 100755 index 00000000000..b585c0332f6 --- /dev/null +++ b/backends/nxp/backend/ir/lib/tflite/RangeOptions.py @@ -0,0 +1,50 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy + +np = import_numpy() + + +class RangeOptions(object): + __slots__ = ["_tab"] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = RangeOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsRangeOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + + @classmethod + def RangeOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed + ) + + # RangeOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + +def RangeOptionsStart(builder): + builder.StartObject(0) + + +def Start(builder): + RangeOptionsStart(builder) + + +def RangeOptionsEnd(builder): + return builder.EndObject() + + +def End(builder): + return RangeOptionsEnd(builder) diff --git a/backends/nxp/backend/ir/lib/tflite/RankOptions.py b/backends/nxp/backend/ir/lib/tflite/RankOptions.py new file mode 100755 index 00000000000..834e81dfe53 --- /dev/null +++ b/backends/nxp/backend/ir/lib/tflite/RankOptions.py @@ -0,0 +1,50 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy + +np = import_numpy() + + +class RankOptions(object): + __slots__ = ["_tab"] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = RankOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsRankOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + + @classmethod + def RankOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed + ) + + # RankOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + +def RankOptionsStart(builder): + builder.StartObject(0) + + +def Start(builder): + RankOptionsStart(builder) + + +def RankOptionsEnd(builder): + return builder.EndObject() + + +def End(builder): + return RankOptionsEnd(builder) diff --git a/backends/nxp/backend/ir/lib/tflite/ReadVariableOptions.py b/backends/nxp/backend/ir/lib/tflite/ReadVariableOptions.py new file mode 100755 index 00000000000..d33a50cb66a --- /dev/null +++ b/backends/nxp/backend/ir/lib/tflite/ReadVariableOptions.py @@ -0,0 +1,50 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy + +np = import_numpy() + + +class ReadVariableOptions(object): + __slots__ = ["_tab"] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = ReadVariableOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsReadVariableOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + + @classmethod + def ReadVariableOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed + ) + + # ReadVariableOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + +def ReadVariableOptionsStart(builder): + builder.StartObject(0) + + +def Start(builder): + ReadVariableOptionsStart(builder) + + +def ReadVariableOptionsEnd(builder): + return builder.EndObject() + + +def End(builder): + return ReadVariableOptionsEnd(builder) diff --git a/backends/nxp/backend/ir/lib/tflite/ReduceWindowFunction.py b/backends/nxp/backend/ir/lib/tflite/ReduceWindowFunction.py new file mode 100755 index 00000000000..6d9bf48a692 --- /dev/null +++ b/backends/nxp/backend/ir/lib/tflite/ReduceWindowFunction.py @@ -0,0 +1,13 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + + +class ReduceWindowFunction(object): + UNSUPPORTED = 0 + ADD = 1 + MUL = 2 + MINIMUM = 3 + MAXIMUM = 4 + ALL = 5 + ANY = 6 diff --git a/backends/nxp/backend/ir/lib/tflite/ReduceWindowOptions.py b/backends/nxp/backend/ir/lib/tflite/ReduceWindowOptions.py new file mode 100755 index 00000000000..273e5ddf8ca --- /dev/null +++ b/backends/nxp/backend/ir/lib/tflite/ReduceWindowOptions.py @@ -0,0 +1,65 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy + +np = import_numpy() + + +class ReduceWindowOptions(object): + __slots__ = ["_tab"] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = ReduceWindowOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsReduceWindowOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + + @classmethod + def ReduceWindowOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed + ) + + # ReduceWindowOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # ReduceWindowOptions + def ReduceFunction(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) + return 0 + + +def ReduceWindowOptionsStart(builder): + builder.StartObject(1) + + +def Start(builder): + ReduceWindowOptionsStart(builder) + + +def ReduceWindowOptionsAddReduceFunction(builder, reduceFunction): + builder.PrependInt32Slot(0, reduceFunction, 0) + + +def AddReduceFunction(builder, reduceFunction): + ReduceWindowOptionsAddReduceFunction(builder, reduceFunction) + + +def ReduceWindowOptionsEnd(builder): + return builder.EndObject() + + +def End(builder): + return ReduceWindowOptionsEnd(builder) diff --git a/backends/nxp/backend/ir/lib/tflite/ReducerOptions.py b/backends/nxp/backend/ir/lib/tflite/ReducerOptions.py new file mode 100755 index 00000000000..81b4ca56716 --- /dev/null +++ b/backends/nxp/backend/ir/lib/tflite/ReducerOptions.py @@ -0,0 +1,67 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy + +np = import_numpy() + + +class ReducerOptions(object): + __slots__ = ["_tab"] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = ReducerOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsReducerOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + + @classmethod + def ReducerOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed + ) + + # ReducerOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # ReducerOptions + def KeepDims(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return bool( + self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos) + ) + return False + + +def ReducerOptionsStart(builder): + builder.StartObject(1) + + +def Start(builder): + ReducerOptionsStart(builder) + + +def ReducerOptionsAddKeepDims(builder, keepDims): + builder.PrependBoolSlot(0, keepDims, 0) + + +def AddKeepDims(builder, keepDims): + ReducerOptionsAddKeepDims(builder, keepDims) + + +def ReducerOptionsEnd(builder): + return builder.EndObject() + + +def End(builder): + return ReducerOptionsEnd(builder) diff --git a/backends/nxp/backend/ir/lib/tflite/ReshapeOptions.py b/backends/nxp/backend/ir/lib/tflite/ReshapeOptions.py new file mode 100755 index 00000000000..12c67f92668 --- /dev/null +++ b/backends/nxp/backend/ir/lib/tflite/ReshapeOptions.py @@ -0,0 +1,98 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy + +np = import_numpy() + + +class ReshapeOptions(object): + __slots__ = ["_tab"] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = ReshapeOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsReshapeOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + + @classmethod + def ReshapeOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed + ) + + # ReshapeOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # ReshapeOptions + def NewShape(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + a = self._tab.Vector(o) + return self._tab.Get( + flatbuffers.number_types.Int32Flags, + a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4), + ) + return 0 + + # ReshapeOptions + def NewShapeAsNumpy(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int32Flags, o) + return 0 + + # ReshapeOptions + def NewShapeLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # ReshapeOptions + def NewShapeIsNone(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + return o == 0 + + +def ReshapeOptionsStart(builder): + builder.StartObject(1) + + +def Start(builder): + ReshapeOptionsStart(builder) + + +def ReshapeOptionsAddNewShape(builder, newShape): + builder.PrependUOffsetTRelativeSlot( + 0, flatbuffers.number_types.UOffsetTFlags.py_type(newShape), 0 + ) + + +def AddNewShape(builder, newShape): + ReshapeOptionsAddNewShape(builder, newShape) + + +def ReshapeOptionsStartNewShapeVector(builder, numElems): + return builder.StartVector(4, numElems, 4) + + +def StartNewShapeVector(builder, numElems: int) -> int: + return ReshapeOptionsStartNewShapeVector(builder, numElems) + + +def ReshapeOptionsEnd(builder): + return builder.EndObject() + + +def End(builder): + return ReshapeOptionsEnd(builder) diff --git a/backends/nxp/backend/ir/lib/tflite/ResizeBilinearOptions.py b/backends/nxp/backend/ir/lib/tflite/ResizeBilinearOptions.py new file mode 100755 index 00000000000..9c6e22c233a --- /dev/null +++ b/backends/nxp/backend/ir/lib/tflite/ResizeBilinearOptions.py @@ -0,0 +1,84 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy + +np = import_numpy() + + +class ResizeBilinearOptions(object): + __slots__ = ["_tab"] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = ResizeBilinearOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsResizeBilinearOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + + @classmethod + def ResizeBilinearOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed + ) + + # ResizeBilinearOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # ResizeBilinearOptions + def AlignCorners(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + if o != 0: + return bool( + self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos) + ) + return False + + # ResizeBilinearOptions + def HalfPixelCenters(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10)) + if o != 0: + return bool( + self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos) + ) + return False + + +def ResizeBilinearOptionsStart(builder): + builder.StartObject(4) + + +def Start(builder): + ResizeBilinearOptionsStart(builder) + + +def ResizeBilinearOptionsAddAlignCorners(builder, alignCorners): + builder.PrependBoolSlot(2, alignCorners, 0) + + +def AddAlignCorners(builder, alignCorners): + ResizeBilinearOptionsAddAlignCorners(builder, alignCorners) + + +def ResizeBilinearOptionsAddHalfPixelCenters(builder, halfPixelCenters): + builder.PrependBoolSlot(3, halfPixelCenters, 0) + + +def AddHalfPixelCenters(builder, halfPixelCenters): + ResizeBilinearOptionsAddHalfPixelCenters(builder, halfPixelCenters) + + +def ResizeBilinearOptionsEnd(builder): + return builder.EndObject() + + +def End(builder): + return ResizeBilinearOptionsEnd(builder) diff --git a/backends/nxp/backend/ir/lib/tflite/ResizeNearestNeighborOptions.py b/backends/nxp/backend/ir/lib/tflite/ResizeNearestNeighborOptions.py new file mode 100755 index 00000000000..cb75ce03d04 --- /dev/null +++ b/backends/nxp/backend/ir/lib/tflite/ResizeNearestNeighborOptions.py @@ -0,0 +1,86 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy + +np = import_numpy() + + +class ResizeNearestNeighborOptions(object): + __slots__ = ["_tab"] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = ResizeNearestNeighborOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsResizeNearestNeighborOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + + @classmethod + def ResizeNearestNeighborOptionsBufferHasIdentifier( + cls, buf, offset, size_prefixed=False + ): + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed + ) + + # ResizeNearestNeighborOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # ResizeNearestNeighborOptions + def AlignCorners(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return bool( + self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos) + ) + return False + + # ResizeNearestNeighborOptions + def HalfPixelCenters(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + return bool( + self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos) + ) + return False + + +def ResizeNearestNeighborOptionsStart(builder): + builder.StartObject(2) + + +def Start(builder): + ResizeNearestNeighborOptionsStart(builder) + + +def ResizeNearestNeighborOptionsAddAlignCorners(builder, alignCorners): + builder.PrependBoolSlot(0, alignCorners, 0) + + +def AddAlignCorners(builder, alignCorners): + ResizeNearestNeighborOptionsAddAlignCorners(builder, alignCorners) + + +def ResizeNearestNeighborOptionsAddHalfPixelCenters(builder, halfPixelCenters): + builder.PrependBoolSlot(1, halfPixelCenters, 0) + + +def AddHalfPixelCenters(builder, halfPixelCenters): + ResizeNearestNeighborOptionsAddHalfPixelCenters(builder, halfPixelCenters) + + +def ResizeNearestNeighborOptionsEnd(builder): + return builder.EndObject() + + +def End(builder): + return ResizeNearestNeighborOptionsEnd(builder) diff --git a/backends/nxp/backend/ir/lib/tflite/ReverseSequenceOptions.py b/backends/nxp/backend/ir/lib/tflite/ReverseSequenceOptions.py new file mode 100755 index 00000000000..ecf0f8f1069 --- /dev/null +++ b/backends/nxp/backend/ir/lib/tflite/ReverseSequenceOptions.py @@ -0,0 +1,82 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy + +np = import_numpy() + + +class ReverseSequenceOptions(object): + __slots__ = ["_tab"] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = ReverseSequenceOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsReverseSequenceOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + + @classmethod + def ReverseSequenceOptionsBufferHasIdentifier( + cls, buf, offset, size_prefixed=False + ): + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed + ) + + # ReverseSequenceOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # ReverseSequenceOptions + def SeqDim(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) + return 0 + + # ReverseSequenceOptions + def BatchDim(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) + return 0 + + +def ReverseSequenceOptionsStart(builder): + builder.StartObject(2) + + +def Start(builder): + ReverseSequenceOptionsStart(builder) + + +def ReverseSequenceOptionsAddSeqDim(builder, seqDim): + builder.PrependInt32Slot(0, seqDim, 0) + + +def AddSeqDim(builder, seqDim): + ReverseSequenceOptionsAddSeqDim(builder, seqDim) + + +def ReverseSequenceOptionsAddBatchDim(builder, batchDim): + builder.PrependInt32Slot(1, batchDim, 0) + + +def AddBatchDim(builder, batchDim): + ReverseSequenceOptionsAddBatchDim(builder, batchDim) + + +def ReverseSequenceOptionsEnd(builder): + return builder.EndObject() + + +def End(builder): + return ReverseSequenceOptionsEnd(builder) diff --git a/backends/nxp/backend/ir/lib/tflite/ReverseV2Options.py b/backends/nxp/backend/ir/lib/tflite/ReverseV2Options.py new file mode 100755 index 00000000000..1da88bea516 --- /dev/null +++ b/backends/nxp/backend/ir/lib/tflite/ReverseV2Options.py @@ -0,0 +1,50 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy + +np = import_numpy() + + +class ReverseV2Options(object): + __slots__ = ["_tab"] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = ReverseV2Options() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsReverseV2Options(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + + @classmethod + def ReverseV2OptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed + ) + + # ReverseV2Options + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + +def ReverseV2OptionsStart(builder): + builder.StartObject(0) + + +def Start(builder): + ReverseV2OptionsStart(builder) + + +def ReverseV2OptionsEnd(builder): + return builder.EndObject() + + +def End(builder): + return ReverseV2OptionsEnd(builder) diff --git a/backends/nxp/backend/ir/lib/tflite/Rfft2dOptions.py b/backends/nxp/backend/ir/lib/tflite/Rfft2dOptions.py new file mode 100755 index 00000000000..5090608da39 --- /dev/null +++ b/backends/nxp/backend/ir/lib/tflite/Rfft2dOptions.py @@ -0,0 +1,50 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy + +np = import_numpy() + + +class Rfft2dOptions(object): + __slots__ = ["_tab"] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = Rfft2dOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsRfft2dOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + + @classmethod + def Rfft2dOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed + ) + + # Rfft2dOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + +def Rfft2dOptionsStart(builder): + builder.StartObject(0) + + +def Start(builder): + Rfft2dOptionsStart(builder) + + +def Rfft2dOptionsEnd(builder): + return builder.EndObject() + + +def End(builder): + return Rfft2dOptionsEnd(builder) diff --git a/backends/nxp/backend/ir/lib/tflite/RightShiftOptions.py b/backends/nxp/backend/ir/lib/tflite/RightShiftOptions.py new file mode 100755 index 00000000000..09a54e862f3 --- /dev/null +++ b/backends/nxp/backend/ir/lib/tflite/RightShiftOptions.py @@ -0,0 +1,50 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy + +np = import_numpy() + + +class RightShiftOptions(object): + __slots__ = ["_tab"] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = RightShiftOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsRightShiftOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + + @classmethod + def RightShiftOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed + ) + + # RightShiftOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + +def RightShiftOptionsStart(builder): + builder.StartObject(0) + + +def Start(builder): + RightShiftOptionsStart(builder) + + +def RightShiftOptionsEnd(builder): + return builder.EndObject() + + +def End(builder): + return RightShiftOptionsEnd(builder) diff --git a/backends/nxp/backend/ir/lib/tflite/RngAlgorithm.py b/backends/nxp/backend/ir/lib/tflite/RngAlgorithm.py new file mode 100755 index 00000000000..ae8c633fd57 --- /dev/null +++ b/backends/nxp/backend/ir/lib/tflite/RngAlgorithm.py @@ -0,0 +1,9 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + + +class RngAlgorithm(object): + DEFAULT = 0 + PHILOX = 1 + THREEFRY = 2 diff --git a/backends/nxp/backend/ir/lib/tflite/SVDFOptions.py b/backends/nxp/backend/ir/lib/tflite/SVDFOptions.py new file mode 100755 index 00000000000..03e17149167 --- /dev/null +++ b/backends/nxp/backend/ir/lib/tflite/SVDFOptions.py @@ -0,0 +1,97 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy + +np = import_numpy() + + +class SVDFOptions(object): + __slots__ = ["_tab"] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = SVDFOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsSVDFOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + + @classmethod + def SVDFOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed + ) + + # SVDFOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # SVDFOptions + def Rank(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) + return 0 + + # SVDFOptions + def FusedActivationFunction(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos) + return 0 + + # SVDFOptions + def AsymmetricQuantizeInputs(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + if o != 0: + return bool( + self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos) + ) + return False + + +def SVDFOptionsStart(builder): + builder.StartObject(3) + + +def Start(builder): + SVDFOptionsStart(builder) + + +def SVDFOptionsAddRank(builder, rank): + builder.PrependInt32Slot(0, rank, 0) + + +def AddRank(builder, rank): + SVDFOptionsAddRank(builder, rank) + + +def SVDFOptionsAddFusedActivationFunction(builder, fusedActivationFunction): + builder.PrependInt8Slot(1, fusedActivationFunction, 0) + + +def AddFusedActivationFunction(builder, fusedActivationFunction): + SVDFOptionsAddFusedActivationFunction(builder, fusedActivationFunction) + + +def SVDFOptionsAddAsymmetricQuantizeInputs(builder, asymmetricQuantizeInputs): + builder.PrependBoolSlot(2, asymmetricQuantizeInputs, 0) + + +def AddAsymmetricQuantizeInputs(builder, asymmetricQuantizeInputs): + SVDFOptionsAddAsymmetricQuantizeInputs(builder, asymmetricQuantizeInputs) + + +def SVDFOptionsEnd(builder): + return builder.EndObject() + + +def End(builder): + return SVDFOptionsEnd(builder) diff --git a/backends/nxp/backend/ir/lib/tflite/ScatterNdOptions.py b/backends/nxp/backend/ir/lib/tflite/ScatterNdOptions.py new file mode 100755 index 00000000000..f5c63dfb784 --- /dev/null +++ b/backends/nxp/backend/ir/lib/tflite/ScatterNdOptions.py @@ -0,0 +1,50 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy + +np = import_numpy() + + +class ScatterNdOptions(object): + __slots__ = ["_tab"] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = ScatterNdOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsScatterNdOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + + @classmethod + def ScatterNdOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed + ) + + # ScatterNdOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + +def ScatterNdOptionsStart(builder): + builder.StartObject(0) + + +def Start(builder): + ScatterNdOptionsStart(builder) + + +def ScatterNdOptionsEnd(builder): + return builder.EndObject() + + +def End(builder): + return ScatterNdOptionsEnd(builder) diff --git a/backends/nxp/backend/ir/lib/tflite/SegmentSumOptions.py b/backends/nxp/backend/ir/lib/tflite/SegmentSumOptions.py new file mode 100755 index 00000000000..fa8b723728a --- /dev/null +++ b/backends/nxp/backend/ir/lib/tflite/SegmentSumOptions.py @@ -0,0 +1,50 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy + +np = import_numpy() + + +class SegmentSumOptions(object): + __slots__ = ["_tab"] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = SegmentSumOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsSegmentSumOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + + @classmethod + def SegmentSumOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed + ) + + # SegmentSumOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + +def SegmentSumOptionsStart(builder): + builder.StartObject(0) + + +def Start(builder): + SegmentSumOptionsStart(builder) + + +def SegmentSumOptionsEnd(builder): + return builder.EndObject() + + +def End(builder): + return SegmentSumOptionsEnd(builder) diff --git a/backends/nxp/backend/ir/lib/tflite/SelectOptions.py b/backends/nxp/backend/ir/lib/tflite/SelectOptions.py new file mode 100755 index 00000000000..080702bc489 --- /dev/null +++ b/backends/nxp/backend/ir/lib/tflite/SelectOptions.py @@ -0,0 +1,50 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy + +np = import_numpy() + + +class SelectOptions(object): + __slots__ = ["_tab"] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = SelectOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsSelectOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + + @classmethod + def SelectOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed + ) + + # SelectOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + +def SelectOptionsStart(builder): + builder.StartObject(0) + + +def Start(builder): + SelectOptionsStart(builder) + + +def SelectOptionsEnd(builder): + return builder.EndObject() + + +def End(builder): + return SelectOptionsEnd(builder) diff --git a/backends/nxp/backend/ir/lib/tflite/SelectV2Options.py b/backends/nxp/backend/ir/lib/tflite/SelectV2Options.py new file mode 100755 index 00000000000..6853d38059d --- /dev/null +++ b/backends/nxp/backend/ir/lib/tflite/SelectV2Options.py @@ -0,0 +1,50 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy + +np = import_numpy() + + +class SelectV2Options(object): + __slots__ = ["_tab"] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = SelectV2Options() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsSelectV2Options(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + + @classmethod + def SelectV2OptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed + ) + + # SelectV2Options + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + +def SelectV2OptionsStart(builder): + builder.StartObject(0) + + +def Start(builder): + SelectV2OptionsStart(builder) + + +def SelectV2OptionsEnd(builder): + return builder.EndObject() + + +def End(builder): + return SelectV2OptionsEnd(builder) diff --git a/backends/nxp/backend/ir/lib/tflite/SequenceRNNOptions.py b/backends/nxp/backend/ir/lib/tflite/SequenceRNNOptions.py new file mode 100755 index 00000000000..787e2bfc2a0 --- /dev/null +++ b/backends/nxp/backend/ir/lib/tflite/SequenceRNNOptions.py @@ -0,0 +1,99 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy + +np = import_numpy() + + +class SequenceRNNOptions(object): + __slots__ = ["_tab"] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = SequenceRNNOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsSequenceRNNOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + + @classmethod + def SequenceRNNOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed + ) + + # SequenceRNNOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # SequenceRNNOptions + def TimeMajor(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return bool( + self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos) + ) + return False + + # SequenceRNNOptions + def FusedActivationFunction(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos) + return 0 + + # SequenceRNNOptions + def AsymmetricQuantizeInputs(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + if o != 0: + return bool( + self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos) + ) + return False + + +def SequenceRNNOptionsStart(builder): + builder.StartObject(3) + + +def Start(builder): + SequenceRNNOptionsStart(builder) + + +def SequenceRNNOptionsAddTimeMajor(builder, timeMajor): + builder.PrependBoolSlot(0, timeMajor, 0) + + +def AddTimeMajor(builder, timeMajor): + SequenceRNNOptionsAddTimeMajor(builder, timeMajor) + + +def SequenceRNNOptionsAddFusedActivationFunction(builder, fusedActivationFunction): + builder.PrependInt8Slot(1, fusedActivationFunction, 0) + + +def AddFusedActivationFunction(builder, fusedActivationFunction): + SequenceRNNOptionsAddFusedActivationFunction(builder, fusedActivationFunction) + + +def SequenceRNNOptionsAddAsymmetricQuantizeInputs(builder, asymmetricQuantizeInputs): + builder.PrependBoolSlot(2, asymmetricQuantizeInputs, 0) + + +def AddAsymmetricQuantizeInputs(builder, asymmetricQuantizeInputs): + SequenceRNNOptionsAddAsymmetricQuantizeInputs(builder, asymmetricQuantizeInputs) + + +def SequenceRNNOptionsEnd(builder): + return builder.EndObject() + + +def End(builder): + return SequenceRNNOptionsEnd(builder) diff --git a/backends/nxp/backend/ir/lib/tflite/ShapeOptions.py b/backends/nxp/backend/ir/lib/tflite/ShapeOptions.py new file mode 100755 index 00000000000..ee89fcda8a4 --- /dev/null +++ b/backends/nxp/backend/ir/lib/tflite/ShapeOptions.py @@ -0,0 +1,65 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy + +np = import_numpy() + + +class ShapeOptions(object): + __slots__ = ["_tab"] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = ShapeOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsShapeOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + + @classmethod + def ShapeOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed + ) + + # ShapeOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # ShapeOptions + def OutType(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos) + return 0 + + +def ShapeOptionsStart(builder): + builder.StartObject(1) + + +def Start(builder): + ShapeOptionsStart(builder) + + +def ShapeOptionsAddOutType(builder, outType): + builder.PrependInt8Slot(0, outType, 0) + + +def AddOutType(builder, outType): + ShapeOptionsAddOutType(builder, outType) + + +def ShapeOptionsEnd(builder): + return builder.EndObject() + + +def End(builder): + return ShapeOptionsEnd(builder) diff --git a/backends/nxp/backend/ir/lib/tflite/SignOptions.py b/backends/nxp/backend/ir/lib/tflite/SignOptions.py new file mode 100755 index 00000000000..7ca65fa478e --- /dev/null +++ b/backends/nxp/backend/ir/lib/tflite/SignOptions.py @@ -0,0 +1,50 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy + +np = import_numpy() + + +class SignOptions(object): + __slots__ = ["_tab"] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = SignOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsSignOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + + @classmethod + def SignOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed + ) + + # SignOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + +def SignOptionsStart(builder): + builder.StartObject(0) + + +def Start(builder): + SignOptionsStart(builder) + + +def SignOptionsEnd(builder): + return builder.EndObject() + + +def End(builder): + return SignOptionsEnd(builder) diff --git a/backends/nxp/backend/ir/lib/tflite/SignatureDef.py b/backends/nxp/backend/ir/lib/tflite/SignatureDef.py new file mode 100755 index 00000000000..b806f44b7ed --- /dev/null +++ b/backends/nxp/backend/ir/lib/tflite/SignatureDef.py @@ -0,0 +1,172 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy + +np = import_numpy() + + +class SignatureDef(object): + __slots__ = ["_tab"] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = SignatureDef() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsSignatureDef(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + + @classmethod + def SignatureDefBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed + ) + + # SignatureDef + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # SignatureDef + def Inputs(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + x = self._tab.Vector(o) + x += flatbuffers.number_types.UOffsetTFlags.py_type(j) * 4 + x = self._tab.Indirect(x) + from .TensorMap import TensorMap + + obj = TensorMap() + obj.Init(self._tab.Bytes, x) + return obj + return None + + # SignatureDef + def InputsLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # SignatureDef + def InputsIsNone(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + return o == 0 + + # SignatureDef + def Outputs(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + x = self._tab.Vector(o) + x += flatbuffers.number_types.UOffsetTFlags.py_type(j) * 4 + x = self._tab.Indirect(x) + from .TensorMap import TensorMap + + obj = TensorMap() + obj.Init(self._tab.Bytes, x) + return obj + return None + + # SignatureDef + def OutputsLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # SignatureDef + def OutputsIsNone(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + return o == 0 + + # SignatureDef + def SignatureKey(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + if o != 0: + return self._tab.String(o + self._tab.Pos) + return None + + # SignatureDef + def SubgraphIndex(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12)) + if o != 0: + return self._tab.Get( + flatbuffers.number_types.Uint32Flags, o + self._tab.Pos + ) + return 0 + + +def SignatureDefStart(builder): + builder.StartObject(5) + + +def Start(builder): + SignatureDefStart(builder) + + +def SignatureDefAddInputs(builder, inputs): + builder.PrependUOffsetTRelativeSlot( + 0, flatbuffers.number_types.UOffsetTFlags.py_type(inputs), 0 + ) + + +def AddInputs(builder, inputs): + SignatureDefAddInputs(builder, inputs) + + +def SignatureDefStartInputsVector(builder, numElems): + return builder.StartVector(4, numElems, 4) + + +def StartInputsVector(builder, numElems: int) -> int: + return SignatureDefStartInputsVector(builder, numElems) + + +def SignatureDefAddOutputs(builder, outputs): + builder.PrependUOffsetTRelativeSlot( + 1, flatbuffers.number_types.UOffsetTFlags.py_type(outputs), 0 + ) + + +def AddOutputs(builder, outputs): + SignatureDefAddOutputs(builder, outputs) + + +def SignatureDefStartOutputsVector(builder, numElems): + return builder.StartVector(4, numElems, 4) + + +def StartOutputsVector(builder, numElems: int) -> int: + return SignatureDefStartOutputsVector(builder, numElems) + + +def SignatureDefAddSignatureKey(builder, signatureKey): + builder.PrependUOffsetTRelativeSlot( + 2, flatbuffers.number_types.UOffsetTFlags.py_type(signatureKey), 0 + ) + + +def AddSignatureKey(builder, signatureKey): + SignatureDefAddSignatureKey(builder, signatureKey) + + +def SignatureDefAddSubgraphIndex(builder, subgraphIndex): + builder.PrependUint32Slot(4, subgraphIndex, 0) + + +def AddSubgraphIndex(builder, subgraphIndex): + SignatureDefAddSubgraphIndex(builder, subgraphIndex) + + +def SignatureDefEnd(builder): + return builder.EndObject() + + +def End(builder): + return SignatureDefEnd(builder) diff --git a/backends/nxp/backend/ir/lib/tflite/SkipGramOptions.py b/backends/nxp/backend/ir/lib/tflite/SkipGramOptions.py new file mode 100755 index 00000000000..9a655ca7373 --- /dev/null +++ b/backends/nxp/backend/ir/lib/tflite/SkipGramOptions.py @@ -0,0 +1,97 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy + +np = import_numpy() + + +class SkipGramOptions(object): + __slots__ = ["_tab"] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = SkipGramOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsSkipGramOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + + @classmethod + def SkipGramOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed + ) + + # SkipGramOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # SkipGramOptions + def NgramSize(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) + return 0 + + # SkipGramOptions + def MaxSkipSize(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) + return 0 + + # SkipGramOptions + def IncludeAllNgrams(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + if o != 0: + return bool( + self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos) + ) + return False + + +def SkipGramOptionsStart(builder): + builder.StartObject(3) + + +def Start(builder): + SkipGramOptionsStart(builder) + + +def SkipGramOptionsAddNgramSize(builder, ngramSize): + builder.PrependInt32Slot(0, ngramSize, 0) + + +def AddNgramSize(builder, ngramSize): + SkipGramOptionsAddNgramSize(builder, ngramSize) + + +def SkipGramOptionsAddMaxSkipSize(builder, maxSkipSize): + builder.PrependInt32Slot(1, maxSkipSize, 0) + + +def AddMaxSkipSize(builder, maxSkipSize): + SkipGramOptionsAddMaxSkipSize(builder, maxSkipSize) + + +def SkipGramOptionsAddIncludeAllNgrams(builder, includeAllNgrams): + builder.PrependBoolSlot(2, includeAllNgrams, 0) + + +def AddIncludeAllNgrams(builder, includeAllNgrams): + SkipGramOptionsAddIncludeAllNgrams(builder, includeAllNgrams) + + +def SkipGramOptionsEnd(builder): + return builder.EndObject() + + +def End(builder): + return SkipGramOptionsEnd(builder) diff --git a/backends/nxp/backend/ir/lib/tflite/SliceOptions.py b/backends/nxp/backend/ir/lib/tflite/SliceOptions.py new file mode 100755 index 00000000000..a9257926836 --- /dev/null +++ b/backends/nxp/backend/ir/lib/tflite/SliceOptions.py @@ -0,0 +1,50 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy + +np = import_numpy() + + +class SliceOptions(object): + __slots__ = ["_tab"] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = SliceOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsSliceOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + + @classmethod + def SliceOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed + ) + + # SliceOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + +def SliceOptionsStart(builder): + builder.StartObject(0) + + +def Start(builder): + SliceOptionsStart(builder) + + +def SliceOptionsEnd(builder): + return builder.EndObject() + + +def End(builder): + return SliceOptionsEnd(builder) diff --git a/backends/nxp/backend/ir/lib/tflite/SoftmaxOptions.py b/backends/nxp/backend/ir/lib/tflite/SoftmaxOptions.py new file mode 100755 index 00000000000..b9cf096196e --- /dev/null +++ b/backends/nxp/backend/ir/lib/tflite/SoftmaxOptions.py @@ -0,0 +1,67 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy + +np = import_numpy() + + +class SoftmaxOptions(object): + __slots__ = ["_tab"] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = SoftmaxOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsSoftmaxOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + + @classmethod + def SoftmaxOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed + ) + + # SoftmaxOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # SoftmaxOptions + def Beta(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.Get( + flatbuffers.number_types.Float32Flags, o + self._tab.Pos + ) + return 0.0 + + +def SoftmaxOptionsStart(builder): + builder.StartObject(1) + + +def Start(builder): + SoftmaxOptionsStart(builder) + + +def SoftmaxOptionsAddBeta(builder, beta): + builder.PrependFloat32Slot(0, beta, 0.0) + + +def AddBeta(builder, beta): + SoftmaxOptionsAddBeta(builder, beta) + + +def SoftmaxOptionsEnd(builder): + return builder.EndObject() + + +def End(builder): + return SoftmaxOptionsEnd(builder) diff --git a/backends/nxp/backend/ir/lib/tflite/SpaceToBatchNDOptions.py b/backends/nxp/backend/ir/lib/tflite/SpaceToBatchNDOptions.py new file mode 100755 index 00000000000..6fa440d179b --- /dev/null +++ b/backends/nxp/backend/ir/lib/tflite/SpaceToBatchNDOptions.py @@ -0,0 +1,50 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy + +np = import_numpy() + + +class SpaceToBatchNDOptions(object): + __slots__ = ["_tab"] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = SpaceToBatchNDOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsSpaceToBatchNDOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + + @classmethod + def SpaceToBatchNDOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed + ) + + # SpaceToBatchNDOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + +def SpaceToBatchNDOptionsStart(builder): + builder.StartObject(0) + + +def Start(builder): + SpaceToBatchNDOptionsStart(builder) + + +def SpaceToBatchNDOptionsEnd(builder): + return builder.EndObject() + + +def End(builder): + return SpaceToBatchNDOptionsEnd(builder) diff --git a/backends/nxp/backend/ir/lib/tflite/SpaceToDepthOptions.py b/backends/nxp/backend/ir/lib/tflite/SpaceToDepthOptions.py new file mode 100755 index 00000000000..5a66d53a9c5 --- /dev/null +++ b/backends/nxp/backend/ir/lib/tflite/SpaceToDepthOptions.py @@ -0,0 +1,65 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy + +np = import_numpy() + + +class SpaceToDepthOptions(object): + __slots__ = ["_tab"] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = SpaceToDepthOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsSpaceToDepthOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + + @classmethod + def SpaceToDepthOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed + ) + + # SpaceToDepthOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # SpaceToDepthOptions + def BlockSize(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) + return 0 + + +def SpaceToDepthOptionsStart(builder): + builder.StartObject(1) + + +def Start(builder): + SpaceToDepthOptionsStart(builder) + + +def SpaceToDepthOptionsAddBlockSize(builder, blockSize): + builder.PrependInt32Slot(0, blockSize, 0) + + +def AddBlockSize(builder, blockSize): + SpaceToDepthOptionsAddBlockSize(builder, blockSize) + + +def SpaceToDepthOptionsEnd(builder): + return builder.EndObject() + + +def End(builder): + return SpaceToDepthOptionsEnd(builder) diff --git a/backends/nxp/backend/ir/lib/tflite/SparseIndexVector.py b/backends/nxp/backend/ir/lib/tflite/SparseIndexVector.py new file mode 100755 index 00000000000..ddd80de7241 --- /dev/null +++ b/backends/nxp/backend/ir/lib/tflite/SparseIndexVector.py @@ -0,0 +1,10 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + + +class SparseIndexVector(object): + NONE = 0 + Int32Vector = 1 + Uint16Vector = 2 + Uint8Vector = 3 diff --git a/backends/nxp/backend/ir/lib/tflite/SparseToDenseOptions.py b/backends/nxp/backend/ir/lib/tflite/SparseToDenseOptions.py new file mode 100755 index 00000000000..8748c665d19 --- /dev/null +++ b/backends/nxp/backend/ir/lib/tflite/SparseToDenseOptions.py @@ -0,0 +1,67 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy + +np = import_numpy() + + +class SparseToDenseOptions(object): + __slots__ = ["_tab"] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = SparseToDenseOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsSparseToDenseOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + + @classmethod + def SparseToDenseOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed + ) + + # SparseToDenseOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # SparseToDenseOptions + def ValidateIndices(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return bool( + self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos) + ) + return False + + +def SparseToDenseOptionsStart(builder): + builder.StartObject(1) + + +def Start(builder): + SparseToDenseOptionsStart(builder) + + +def SparseToDenseOptionsAddValidateIndices(builder, validateIndices): + builder.PrependBoolSlot(0, validateIndices, 0) + + +def AddValidateIndices(builder, validateIndices): + SparseToDenseOptionsAddValidateIndices(builder, validateIndices) + + +def SparseToDenseOptionsEnd(builder): + return builder.EndObject() + + +def End(builder): + return SparseToDenseOptionsEnd(builder) diff --git a/backends/nxp/backend/ir/lib/tflite/SparsityParameters.py b/backends/nxp/backend/ir/lib/tflite/SparsityParameters.py new file mode 100755 index 00000000000..a126189eca1 --- /dev/null +++ b/backends/nxp/backend/ir/lib/tflite/SparsityParameters.py @@ -0,0 +1,190 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy + +np = import_numpy() + + +class SparsityParameters(object): + __slots__ = ["_tab"] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = SparsityParameters() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsSparsityParameters(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + + @classmethod + def SparsityParametersBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed + ) + + # SparsityParameters + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # SparsityParameters + def TraversalOrder(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + a = self._tab.Vector(o) + return self._tab.Get( + flatbuffers.number_types.Int32Flags, + a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4), + ) + return 0 + + # SparsityParameters + def TraversalOrderAsNumpy(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int32Flags, o) + return 0 + + # SparsityParameters + def TraversalOrderLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # SparsityParameters + def TraversalOrderIsNone(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + return o == 0 + + # SparsityParameters + def BlockMap(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + a = self._tab.Vector(o) + return self._tab.Get( + flatbuffers.number_types.Int32Flags, + a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4), + ) + return 0 + + # SparsityParameters + def BlockMapAsNumpy(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int32Flags, o) + return 0 + + # SparsityParameters + def BlockMapLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # SparsityParameters + def BlockMapIsNone(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + return o == 0 + + # SparsityParameters + def DimMetadata(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + if o != 0: + x = self._tab.Vector(o) + x += flatbuffers.number_types.UOffsetTFlags.py_type(j) * 4 + x = self._tab.Indirect(x) + from .DimensionMetadata import DimensionMetadata + + obj = DimensionMetadata() + obj.Init(self._tab.Bytes, x) + return obj + return None + + # SparsityParameters + def DimMetadataLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # SparsityParameters + def DimMetadataIsNone(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + return o == 0 + + +def SparsityParametersStart(builder): + builder.StartObject(3) + + +def Start(builder): + SparsityParametersStart(builder) + + +def SparsityParametersAddTraversalOrder(builder, traversalOrder): + builder.PrependUOffsetTRelativeSlot( + 0, flatbuffers.number_types.UOffsetTFlags.py_type(traversalOrder), 0 + ) + + +def AddTraversalOrder(builder, traversalOrder): + SparsityParametersAddTraversalOrder(builder, traversalOrder) + + +def SparsityParametersStartTraversalOrderVector(builder, numElems): + return builder.StartVector(4, numElems, 4) + + +def StartTraversalOrderVector(builder, numElems: int) -> int: + return SparsityParametersStartTraversalOrderVector(builder, numElems) + + +def SparsityParametersAddBlockMap(builder, blockMap): + builder.PrependUOffsetTRelativeSlot( + 1, flatbuffers.number_types.UOffsetTFlags.py_type(blockMap), 0 + ) + + +def AddBlockMap(builder, blockMap): + SparsityParametersAddBlockMap(builder, blockMap) + + +def SparsityParametersStartBlockMapVector(builder, numElems): + return builder.StartVector(4, numElems, 4) + + +def StartBlockMapVector(builder, numElems: int) -> int: + return SparsityParametersStartBlockMapVector(builder, numElems) + + +def SparsityParametersAddDimMetadata(builder, dimMetadata): + builder.PrependUOffsetTRelativeSlot( + 2, flatbuffers.number_types.UOffsetTFlags.py_type(dimMetadata), 0 + ) + + +def AddDimMetadata(builder, dimMetadata): + SparsityParametersAddDimMetadata(builder, dimMetadata) + + +def SparsityParametersStartDimMetadataVector(builder, numElems): + return builder.StartVector(4, numElems, 4) + + +def StartDimMetadataVector(builder, numElems: int) -> int: + return SparsityParametersStartDimMetadataVector(builder, numElems) + + +def SparsityParametersEnd(builder): + return builder.EndObject() + + +def End(builder): + return SparsityParametersEnd(builder) diff --git a/backends/nxp/backend/ir/lib/tflite/SplitOptions.py b/backends/nxp/backend/ir/lib/tflite/SplitOptions.py new file mode 100755 index 00000000000..4d07f046d73 --- /dev/null +++ b/backends/nxp/backend/ir/lib/tflite/SplitOptions.py @@ -0,0 +1,65 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy + +np = import_numpy() + + +class SplitOptions(object): + __slots__ = ["_tab"] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = SplitOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsSplitOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + + @classmethod + def SplitOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed + ) + + # SplitOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # SplitOptions + def NumSplits(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) + return 0 + + +def SplitOptionsStart(builder): + builder.StartObject(1) + + +def Start(builder): + SplitOptionsStart(builder) + + +def SplitOptionsAddNumSplits(builder, numSplits): + builder.PrependInt32Slot(0, numSplits, 0) + + +def AddNumSplits(builder, numSplits): + SplitOptionsAddNumSplits(builder, numSplits) + + +def SplitOptionsEnd(builder): + return builder.EndObject() + + +def End(builder): + return SplitOptionsEnd(builder) diff --git a/backends/nxp/backend/ir/lib/tflite/SplitVOptions.py b/backends/nxp/backend/ir/lib/tflite/SplitVOptions.py new file mode 100755 index 00000000000..692296a1a42 --- /dev/null +++ b/backends/nxp/backend/ir/lib/tflite/SplitVOptions.py @@ -0,0 +1,65 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy + +np = import_numpy() + + +class SplitVOptions(object): + __slots__ = ["_tab"] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = SplitVOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsSplitVOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + + @classmethod + def SplitVOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed + ) + + # SplitVOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # SplitVOptions + def NumSplits(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) + return 0 + + +def SplitVOptionsStart(builder): + builder.StartObject(1) + + +def Start(builder): + SplitVOptionsStart(builder) + + +def SplitVOptionsAddNumSplits(builder, numSplits): + builder.PrependInt32Slot(0, numSplits, 0) + + +def AddNumSplits(builder, numSplits): + SplitVOptionsAddNumSplits(builder, numSplits) + + +def SplitVOptionsEnd(builder): + return builder.EndObject() + + +def End(builder): + return SplitVOptionsEnd(builder) diff --git a/backends/nxp/backend/ir/lib/tflite/SquareOptions.py b/backends/nxp/backend/ir/lib/tflite/SquareOptions.py new file mode 100755 index 00000000000..01f4f28f4e8 --- /dev/null +++ b/backends/nxp/backend/ir/lib/tflite/SquareOptions.py @@ -0,0 +1,50 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy + +np = import_numpy() + + +class SquareOptions(object): + __slots__ = ["_tab"] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = SquareOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsSquareOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + + @classmethod + def SquareOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed + ) + + # SquareOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + +def SquareOptionsStart(builder): + builder.StartObject(0) + + +def Start(builder): + SquareOptionsStart(builder) + + +def SquareOptionsEnd(builder): + return builder.EndObject() + + +def End(builder): + return SquareOptionsEnd(builder) diff --git a/backends/nxp/backend/ir/lib/tflite/SquaredDifferenceOptions.py b/backends/nxp/backend/ir/lib/tflite/SquaredDifferenceOptions.py new file mode 100755 index 00000000000..ffc96ef5179 --- /dev/null +++ b/backends/nxp/backend/ir/lib/tflite/SquaredDifferenceOptions.py @@ -0,0 +1,52 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy + +np = import_numpy() + + +class SquaredDifferenceOptions(object): + __slots__ = ["_tab"] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = SquaredDifferenceOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsSquaredDifferenceOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + + @classmethod + def SquaredDifferenceOptionsBufferHasIdentifier( + cls, buf, offset, size_prefixed=False + ): + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed + ) + + # SquaredDifferenceOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + +def SquaredDifferenceOptionsStart(builder): + builder.StartObject(0) + + +def Start(builder): + SquaredDifferenceOptionsStart(builder) + + +def SquaredDifferenceOptionsEnd(builder): + return builder.EndObject() + + +def End(builder): + return SquaredDifferenceOptionsEnd(builder) diff --git a/backends/nxp/backend/ir/lib/tflite/SqueezeOptions.py b/backends/nxp/backend/ir/lib/tflite/SqueezeOptions.py new file mode 100755 index 00000000000..ab3da484564 --- /dev/null +++ b/backends/nxp/backend/ir/lib/tflite/SqueezeOptions.py @@ -0,0 +1,98 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy + +np = import_numpy() + + +class SqueezeOptions(object): + __slots__ = ["_tab"] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = SqueezeOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsSqueezeOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + + @classmethod + def SqueezeOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed + ) + + # SqueezeOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # SqueezeOptions + def SqueezeDims(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + a = self._tab.Vector(o) + return self._tab.Get( + flatbuffers.number_types.Int32Flags, + a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4), + ) + return 0 + + # SqueezeOptions + def SqueezeDimsAsNumpy(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int32Flags, o) + return 0 + + # SqueezeOptions + def SqueezeDimsLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # SqueezeOptions + def SqueezeDimsIsNone(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + return o == 0 + + +def SqueezeOptionsStart(builder): + builder.StartObject(1) + + +def Start(builder): + SqueezeOptionsStart(builder) + + +def SqueezeOptionsAddSqueezeDims(builder, squeezeDims): + builder.PrependUOffsetTRelativeSlot( + 0, flatbuffers.number_types.UOffsetTFlags.py_type(squeezeDims), 0 + ) + + +def AddSqueezeDims(builder, squeezeDims): + SqueezeOptionsAddSqueezeDims(builder, squeezeDims) + + +def SqueezeOptionsStartSqueezeDimsVector(builder, numElems): + return builder.StartVector(4, numElems, 4) + + +def StartSqueezeDimsVector(builder, numElems: int) -> int: + return SqueezeOptionsStartSqueezeDimsVector(builder, numElems) + + +def SqueezeOptionsEnd(builder): + return builder.EndObject() + + +def End(builder): + return SqueezeOptionsEnd(builder) diff --git a/backends/nxp/backend/ir/lib/tflite/StablehloBroadcastInDimOptions.py b/backends/nxp/backend/ir/lib/tflite/StablehloBroadcastInDimOptions.py new file mode 100755 index 00000000000..08b1daaec4d --- /dev/null +++ b/backends/nxp/backend/ir/lib/tflite/StablehloBroadcastInDimOptions.py @@ -0,0 +1,102 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy + +np = import_numpy() + + +class StablehloBroadcastInDimOptions(object): + __slots__ = ["_tab"] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = StablehloBroadcastInDimOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsStablehloBroadcastInDimOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + + @classmethod + def StablehloBroadcastInDimOptionsBufferHasIdentifier( + cls, buf, offset, size_prefixed=False + ): + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed + ) + + # StablehloBroadcastInDimOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # StablehloBroadcastInDimOptions + def BroadcastDimensions(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + a = self._tab.Vector(o) + return self._tab.Get( + flatbuffers.number_types.Int64Flags, + a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 8), + ) + return 0 + + # StablehloBroadcastInDimOptions + def BroadcastDimensionsAsNumpy(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int64Flags, o) + return 0 + + # StablehloBroadcastInDimOptions + def BroadcastDimensionsLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # StablehloBroadcastInDimOptions + def BroadcastDimensionsIsNone(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + return o == 0 + + +def StablehloBroadcastInDimOptionsStart(builder): + builder.StartObject(1) + + +def Start(builder): + StablehloBroadcastInDimOptionsStart(builder) + + +def StablehloBroadcastInDimOptionsAddBroadcastDimensions(builder, broadcastDimensions): + builder.PrependUOffsetTRelativeSlot( + 0, flatbuffers.number_types.UOffsetTFlags.py_type(broadcastDimensions), 0 + ) + + +def AddBroadcastDimensions(builder, broadcastDimensions): + StablehloBroadcastInDimOptionsAddBroadcastDimensions(builder, broadcastDimensions) + + +def StablehloBroadcastInDimOptionsStartBroadcastDimensionsVector(builder, numElems): + return builder.StartVector(8, numElems, 8) + + +def StartBroadcastDimensionsVector(builder, numElems: int) -> int: + return StablehloBroadcastInDimOptionsStartBroadcastDimensionsVector( + builder, numElems + ) + + +def StablehloBroadcastInDimOptionsEnd(builder): + return builder.EndObject() + + +def End(builder): + return StablehloBroadcastInDimOptionsEnd(builder) diff --git a/backends/nxp/backend/ir/lib/tflite/StablehloCompareOptions.py b/backends/nxp/backend/ir/lib/tflite/StablehloCompareOptions.py new file mode 100755 index 00000000000..93a67064487 --- /dev/null +++ b/backends/nxp/backend/ir/lib/tflite/StablehloCompareOptions.py @@ -0,0 +1,86 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy + +np = import_numpy() + + +class StablehloCompareOptions(object): + __slots__ = ["_tab"] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = StablehloCompareOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsStablehloCompareOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + + @classmethod + def StablehloCompareOptionsBufferHasIdentifier( + cls, buf, offset, size_prefixed=False + ): + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed + ) + + # StablehloCompareOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # StablehloCompareOptions + def ComparisonDirection(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.Get( + flatbuffers.number_types.Uint32Flags, o + self._tab.Pos + ) + return 0 + + # StablehloCompareOptions + def CompareType(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + return self._tab.Get( + flatbuffers.number_types.Uint32Flags, o + self._tab.Pos + ) + return 0 + + +def StablehloCompareOptionsStart(builder): + builder.StartObject(2) + + +def Start(builder): + StablehloCompareOptionsStart(builder) + + +def StablehloCompareOptionsAddComparisonDirection(builder, comparisonDirection): + builder.PrependUint32Slot(0, comparisonDirection, 0) + + +def AddComparisonDirection(builder, comparisonDirection): + StablehloCompareOptionsAddComparisonDirection(builder, comparisonDirection) + + +def StablehloCompareOptionsAddCompareType(builder, compareType): + builder.PrependUint32Slot(1, compareType, 0) + + +def AddCompareType(builder, compareType): + StablehloCompareOptionsAddCompareType(builder, compareType) + + +def StablehloCompareOptionsEnd(builder): + return builder.EndObject() + + +def End(builder): + return StablehloCompareOptionsEnd(builder) diff --git a/backends/nxp/backend/ir/lib/tflite/StablehloComparisonDirection.py b/backends/nxp/backend/ir/lib/tflite/StablehloComparisonDirection.py new file mode 100755 index 00000000000..6a9b1ab722a --- /dev/null +++ b/backends/nxp/backend/ir/lib/tflite/StablehloComparisonDirection.py @@ -0,0 +1,12 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + + +class StablehloComparisonDirection(object): + STABLEHLO_COMPARISON_DIRECTION_EQ = 0 + STABLEHLO_COMPARISON_DIRECTION_NE = 1 + STABLEHLO_COMPARISON_DIRECTION_GE = 2 + STABLEHLO_COMPARISON_DIRECTION_GT = 3 + STABLEHLO_COMPARISON_DIRECTION_LE = 4 + STABLEHLO_COMPARISON_DIRECTION_LT = 5 diff --git a/backends/nxp/backend/ir/lib/tflite/StablehloComparisonType.py b/backends/nxp/backend/ir/lib/tflite/StablehloComparisonType.py new file mode 100755 index 00000000000..8a1206f5450 --- /dev/null +++ b/backends/nxp/backend/ir/lib/tflite/StablehloComparisonType.py @@ -0,0 +1,11 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + + +class StablehloComparisonType(object): + STABLEHLO_COMPARISON_TYPE_NOTYPE = 0 + STABLEHLO_COMPARISON_TYPE_FLOAT = 1 + STABLEHLO_COMPARISON_TYPE_FLOAT_TOTAL_ORDER = 2 + STABLEHLO_COMPARISON_TYPE_SIGNED = 3 + STABLEHLO_COMPARISON_TYPE_UNSIGNED = 4 diff --git a/backends/nxp/backend/ir/lib/tflite/StablehloConcatenateOptions.py b/backends/nxp/backend/ir/lib/tflite/StablehloConcatenateOptions.py new file mode 100755 index 00000000000..c556007f15d --- /dev/null +++ b/backends/nxp/backend/ir/lib/tflite/StablehloConcatenateOptions.py @@ -0,0 +1,67 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy + +np = import_numpy() + + +class StablehloConcatenateOptions(object): + __slots__ = ["_tab"] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = StablehloConcatenateOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsStablehloConcatenateOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + + @classmethod + def StablehloConcatenateOptionsBufferHasIdentifier( + cls, buf, offset, size_prefixed=False + ): + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed + ) + + # StablehloConcatenateOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # StablehloConcatenateOptions + def Dimension(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int64Flags, o + self._tab.Pos) + return 0 + + +def StablehloConcatenateOptionsStart(builder): + builder.StartObject(1) + + +def Start(builder): + StablehloConcatenateOptionsStart(builder) + + +def StablehloConcatenateOptionsAddDimension(builder, dimension): + builder.PrependInt64Slot(0, dimension, 0) + + +def AddDimension(builder, dimension): + StablehloConcatenateOptionsAddDimension(builder, dimension) + + +def StablehloConcatenateOptionsEnd(builder): + return builder.EndObject() + + +def End(builder): + return StablehloConcatenateOptionsEnd(builder) diff --git a/backends/nxp/backend/ir/lib/tflite/StablehloConvolutionOptions.py b/backends/nxp/backend/ir/lib/tflite/StablehloConvolutionOptions.py new file mode 100755 index 00000000000..b3feba712f4 --- /dev/null +++ b/backends/nxp/backend/ir/lib/tflite/StablehloConvolutionOptions.py @@ -0,0 +1,634 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy + +np = import_numpy() + + +class StablehloConvolutionOptions(object): + __slots__ = ["_tab"] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = StablehloConvolutionOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsStablehloConvolutionOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + + @classmethod + def StablehloConvolutionOptionsBufferHasIdentifier( + cls, buf, offset, size_prefixed=False + ): + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed + ) + + # StablehloConvolutionOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # StablehloConvolutionOptions + def WindowStrides(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + a = self._tab.Vector(o) + return self._tab.Get( + flatbuffers.number_types.Int64Flags, + a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 8), + ) + return 0 + + # StablehloConvolutionOptions + def WindowStridesAsNumpy(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int64Flags, o) + return 0 + + # StablehloConvolutionOptions + def WindowStridesLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # StablehloConvolutionOptions + def WindowStridesIsNone(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + return o == 0 + + # StablehloConvolutionOptions + def Padding(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + a = self._tab.Vector(o) + return self._tab.Get( + flatbuffers.number_types.Int64Flags, + a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 8), + ) + return 0 + + # StablehloConvolutionOptions + def PaddingAsNumpy(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int64Flags, o) + return 0 + + # StablehloConvolutionOptions + def PaddingLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # StablehloConvolutionOptions + def PaddingIsNone(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + return o == 0 + + # StablehloConvolutionOptions + def LhsDilation(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + if o != 0: + a = self._tab.Vector(o) + return self._tab.Get( + flatbuffers.number_types.Int64Flags, + a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 8), + ) + return 0 + + # StablehloConvolutionOptions + def LhsDilationAsNumpy(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + if o != 0: + return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int64Flags, o) + return 0 + + # StablehloConvolutionOptions + def LhsDilationLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # StablehloConvolutionOptions + def LhsDilationIsNone(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + return o == 0 + + # StablehloConvolutionOptions + def RhsDilation(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10)) + if o != 0: + a = self._tab.Vector(o) + return self._tab.Get( + flatbuffers.number_types.Int64Flags, + a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 8), + ) + return 0 + + # StablehloConvolutionOptions + def RhsDilationAsNumpy(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10)) + if o != 0: + return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int64Flags, o) + return 0 + + # StablehloConvolutionOptions + def RhsDilationLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # StablehloConvolutionOptions + def RhsDilationIsNone(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10)) + return o == 0 + + # StablehloConvolutionOptions + def WindowReversal(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12)) + if o != 0: + a = self._tab.Vector(o) + return self._tab.Get( + flatbuffers.number_types.BoolFlags, + a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 1), + ) + return 0 + + # StablehloConvolutionOptions + def WindowReversalAsNumpy(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12)) + if o != 0: + return self._tab.GetVectorAsNumpy(flatbuffers.number_types.BoolFlags, o) + return 0 + + # StablehloConvolutionOptions + def WindowReversalLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # StablehloConvolutionOptions + def WindowReversalIsNone(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12)) + return o == 0 + + # StablehloConvolutionOptions + def InputBatchDimension(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int64Flags, o + self._tab.Pos) + return 0 + + # StablehloConvolutionOptions + def InputFeatureDimension(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(16)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int64Flags, o + self._tab.Pos) + return 0 + + # StablehloConvolutionOptions + def InputSpatialDimensions(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(18)) + if o != 0: + a = self._tab.Vector(o) + return self._tab.Get( + flatbuffers.number_types.Int64Flags, + a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 8), + ) + return 0 + + # StablehloConvolutionOptions + def InputSpatialDimensionsAsNumpy(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(18)) + if o != 0: + return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int64Flags, o) + return 0 + + # StablehloConvolutionOptions + def InputSpatialDimensionsLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(18)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # StablehloConvolutionOptions + def InputSpatialDimensionsIsNone(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(18)) + return o == 0 + + # StablehloConvolutionOptions + def KernelInputFeatureDimension(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(20)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int64Flags, o + self._tab.Pos) + return 0 + + # StablehloConvolutionOptions + def KernelOutputFeatureDimension(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(22)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int64Flags, o + self._tab.Pos) + return 0 + + # StablehloConvolutionOptions + def KernelSpatialDimensions(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(24)) + if o != 0: + a = self._tab.Vector(o) + return self._tab.Get( + flatbuffers.number_types.Int64Flags, + a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 8), + ) + return 0 + + # StablehloConvolutionOptions + def KernelSpatialDimensionsAsNumpy(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(24)) + if o != 0: + return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int64Flags, o) + return 0 + + # StablehloConvolutionOptions + def KernelSpatialDimensionsLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(24)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # StablehloConvolutionOptions + def KernelSpatialDimensionsIsNone(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(24)) + return o == 0 + + # StablehloConvolutionOptions + def OutputBatchDimension(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(26)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int64Flags, o + self._tab.Pos) + return 0 + + # StablehloConvolutionOptions + def OutputFeatureDimension(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(28)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int64Flags, o + self._tab.Pos) + return 0 + + # StablehloConvolutionOptions + def OutputSpatialDimensions(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(30)) + if o != 0: + a = self._tab.Vector(o) + return self._tab.Get( + flatbuffers.number_types.Int64Flags, + a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 8), + ) + return 0 + + # StablehloConvolutionOptions + def OutputSpatialDimensionsAsNumpy(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(30)) + if o != 0: + return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int64Flags, o) + return 0 + + # StablehloConvolutionOptions + def OutputSpatialDimensionsLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(30)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # StablehloConvolutionOptions + def OutputSpatialDimensionsIsNone(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(30)) + return o == 0 + + # StablehloConvolutionOptions + def FeatureGroupCount(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(32)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int64Flags, o + self._tab.Pos) + return 0 + + # StablehloConvolutionOptions + def BatchGroupCount(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(34)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int64Flags, o + self._tab.Pos) + return 0 + + # StablehloConvolutionOptions + def PrecisionConfig(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(36)) + if o != 0: + a = self._tab.Vector(o) + return self._tab.Get( + flatbuffers.number_types.Uint32Flags, + a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4), + ) + return 0 + + # StablehloConvolutionOptions + def PrecisionConfigAsNumpy(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(36)) + if o != 0: + return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Uint32Flags, o) + return 0 + + # StablehloConvolutionOptions + def PrecisionConfigLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(36)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # StablehloConvolutionOptions + def PrecisionConfigIsNone(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(36)) + return o == 0 + + +def StablehloConvolutionOptionsStart(builder): + builder.StartObject(17) + + +def Start(builder): + StablehloConvolutionOptionsStart(builder) + + +def StablehloConvolutionOptionsAddWindowStrides(builder, windowStrides): + builder.PrependUOffsetTRelativeSlot( + 0, flatbuffers.number_types.UOffsetTFlags.py_type(windowStrides), 0 + ) + + +def AddWindowStrides(builder, windowStrides): + StablehloConvolutionOptionsAddWindowStrides(builder, windowStrides) + + +def StablehloConvolutionOptionsStartWindowStridesVector(builder, numElems): + return builder.StartVector(8, numElems, 8) + + +def StartWindowStridesVector(builder, numElems: int) -> int: + return StablehloConvolutionOptionsStartWindowStridesVector(builder, numElems) + + +def StablehloConvolutionOptionsAddPadding(builder, padding): + builder.PrependUOffsetTRelativeSlot( + 1, flatbuffers.number_types.UOffsetTFlags.py_type(padding), 0 + ) + + +def AddPadding(builder, padding): + StablehloConvolutionOptionsAddPadding(builder, padding) + + +def StablehloConvolutionOptionsStartPaddingVector(builder, numElems): + return builder.StartVector(8, numElems, 8) + + +def StartPaddingVector(builder, numElems: int) -> int: + return StablehloConvolutionOptionsStartPaddingVector(builder, numElems) + + +def StablehloConvolutionOptionsAddLhsDilation(builder, lhsDilation): + builder.PrependUOffsetTRelativeSlot( + 2, flatbuffers.number_types.UOffsetTFlags.py_type(lhsDilation), 0 + ) + + +def AddLhsDilation(builder, lhsDilation): + StablehloConvolutionOptionsAddLhsDilation(builder, lhsDilation) + + +def StablehloConvolutionOptionsStartLhsDilationVector(builder, numElems): + return builder.StartVector(8, numElems, 8) + + +def StartLhsDilationVector(builder, numElems: int) -> int: + return StablehloConvolutionOptionsStartLhsDilationVector(builder, numElems) + + +def StablehloConvolutionOptionsAddRhsDilation(builder, rhsDilation): + builder.PrependUOffsetTRelativeSlot( + 3, flatbuffers.number_types.UOffsetTFlags.py_type(rhsDilation), 0 + ) + + +def AddRhsDilation(builder, rhsDilation): + StablehloConvolutionOptionsAddRhsDilation(builder, rhsDilation) + + +def StablehloConvolutionOptionsStartRhsDilationVector(builder, numElems): + return builder.StartVector(8, numElems, 8) + + +def StartRhsDilationVector(builder, numElems: int) -> int: + return StablehloConvolutionOptionsStartRhsDilationVector(builder, numElems) + + +def StablehloConvolutionOptionsAddWindowReversal(builder, windowReversal): + builder.PrependUOffsetTRelativeSlot( + 4, flatbuffers.number_types.UOffsetTFlags.py_type(windowReversal), 0 + ) + + +def AddWindowReversal(builder, windowReversal): + StablehloConvolutionOptionsAddWindowReversal(builder, windowReversal) + + +def StablehloConvolutionOptionsStartWindowReversalVector(builder, numElems): + return builder.StartVector(1, numElems, 1) + + +def StartWindowReversalVector(builder, numElems: int) -> int: + return StablehloConvolutionOptionsStartWindowReversalVector(builder, numElems) + + +def StablehloConvolutionOptionsAddInputBatchDimension(builder, inputBatchDimension): + builder.PrependInt64Slot(5, inputBatchDimension, 0) + + +def AddInputBatchDimension(builder, inputBatchDimension): + StablehloConvolutionOptionsAddInputBatchDimension(builder, inputBatchDimension) + + +def StablehloConvolutionOptionsAddInputFeatureDimension(builder, inputFeatureDimension): + builder.PrependInt64Slot(6, inputFeatureDimension, 0) + + +def AddInputFeatureDimension(builder, inputFeatureDimension): + StablehloConvolutionOptionsAddInputFeatureDimension(builder, inputFeatureDimension) + + +def StablehloConvolutionOptionsAddInputSpatialDimensions( + builder, inputSpatialDimensions +): + builder.PrependUOffsetTRelativeSlot( + 7, flatbuffers.number_types.UOffsetTFlags.py_type(inputSpatialDimensions), 0 + ) + + +def AddInputSpatialDimensions(builder, inputSpatialDimensions): + StablehloConvolutionOptionsAddInputSpatialDimensions( + builder, inputSpatialDimensions + ) + + +def StablehloConvolutionOptionsStartInputSpatialDimensionsVector(builder, numElems): + return builder.StartVector(8, numElems, 8) + + +def StartInputSpatialDimensionsVector(builder, numElems: int) -> int: + return StablehloConvolutionOptionsStartInputSpatialDimensionsVector( + builder, numElems + ) + + +def StablehloConvolutionOptionsAddKernelInputFeatureDimension( + builder, kernelInputFeatureDimension +): + builder.PrependInt64Slot(8, kernelInputFeatureDimension, 0) + + +def AddKernelInputFeatureDimension(builder, kernelInputFeatureDimension): + StablehloConvolutionOptionsAddKernelInputFeatureDimension( + builder, kernelInputFeatureDimension + ) + + +def StablehloConvolutionOptionsAddKernelOutputFeatureDimension( + builder, kernelOutputFeatureDimension +): + builder.PrependInt64Slot(9, kernelOutputFeatureDimension, 0) + + +def AddKernelOutputFeatureDimension(builder, kernelOutputFeatureDimension): + StablehloConvolutionOptionsAddKernelOutputFeatureDimension( + builder, kernelOutputFeatureDimension + ) + + +def StablehloConvolutionOptionsAddKernelSpatialDimensions( + builder, kernelSpatialDimensions +): + builder.PrependUOffsetTRelativeSlot( + 10, flatbuffers.number_types.UOffsetTFlags.py_type(kernelSpatialDimensions), 0 + ) + + +def AddKernelSpatialDimensions(builder, kernelSpatialDimensions): + StablehloConvolutionOptionsAddKernelSpatialDimensions( + builder, kernelSpatialDimensions + ) + + +def StablehloConvolutionOptionsStartKernelSpatialDimensionsVector(builder, numElems): + return builder.StartVector(8, numElems, 8) + + +def StartKernelSpatialDimensionsVector(builder, numElems: int) -> int: + return StablehloConvolutionOptionsStartKernelSpatialDimensionsVector( + builder, numElems + ) + + +def StablehloConvolutionOptionsAddOutputBatchDimension(builder, outputBatchDimension): + builder.PrependInt64Slot(11, outputBatchDimension, 0) + + +def AddOutputBatchDimension(builder, outputBatchDimension): + StablehloConvolutionOptionsAddOutputBatchDimension(builder, outputBatchDimension) + + +def StablehloConvolutionOptionsAddOutputFeatureDimension( + builder, outputFeatureDimension +): + builder.PrependInt64Slot(12, outputFeatureDimension, 0) + + +def AddOutputFeatureDimension(builder, outputFeatureDimension): + StablehloConvolutionOptionsAddOutputFeatureDimension( + builder, outputFeatureDimension + ) + + +def StablehloConvolutionOptionsAddOutputSpatialDimensions( + builder, outputSpatialDimensions +): + builder.PrependUOffsetTRelativeSlot( + 13, flatbuffers.number_types.UOffsetTFlags.py_type(outputSpatialDimensions), 0 + ) + + +def AddOutputSpatialDimensions(builder, outputSpatialDimensions): + StablehloConvolutionOptionsAddOutputSpatialDimensions( + builder, outputSpatialDimensions + ) + + +def StablehloConvolutionOptionsStartOutputSpatialDimensionsVector(builder, numElems): + return builder.StartVector(8, numElems, 8) + + +def StartOutputSpatialDimensionsVector(builder, numElems: int) -> int: + return StablehloConvolutionOptionsStartOutputSpatialDimensionsVector( + builder, numElems + ) + + +def StablehloConvolutionOptionsAddFeatureGroupCount(builder, featureGroupCount): + builder.PrependInt64Slot(14, featureGroupCount, 0) + + +def AddFeatureGroupCount(builder, featureGroupCount): + StablehloConvolutionOptionsAddFeatureGroupCount(builder, featureGroupCount) + + +def StablehloConvolutionOptionsAddBatchGroupCount(builder, batchGroupCount): + builder.PrependInt64Slot(15, batchGroupCount, 0) + + +def AddBatchGroupCount(builder, batchGroupCount): + StablehloConvolutionOptionsAddBatchGroupCount(builder, batchGroupCount) + + +def StablehloConvolutionOptionsAddPrecisionConfig(builder, precisionConfig): + builder.PrependUOffsetTRelativeSlot( + 16, flatbuffers.number_types.UOffsetTFlags.py_type(precisionConfig), 0 + ) + + +def AddPrecisionConfig(builder, precisionConfig): + StablehloConvolutionOptionsAddPrecisionConfig(builder, precisionConfig) + + +def StablehloConvolutionOptionsStartPrecisionConfigVector(builder, numElems): + return builder.StartVector(4, numElems, 4) + + +def StartPrecisionConfigVector(builder, numElems: int) -> int: + return StablehloConvolutionOptionsStartPrecisionConfigVector(builder, numElems) + + +def StablehloConvolutionOptionsEnd(builder): + return builder.EndObject() + + +def End(builder): + return StablehloConvolutionOptionsEnd(builder) diff --git a/backends/nxp/backend/ir/lib/tflite/StablehloCustomCallOptions.py b/backends/nxp/backend/ir/lib/tflite/StablehloCustomCallOptions.py new file mode 100755 index 00000000000..6de9b88cba2 --- /dev/null +++ b/backends/nxp/backend/ir/lib/tflite/StablehloCustomCallOptions.py @@ -0,0 +1,214 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy + +np = import_numpy() + + +class StablehloCustomCallOptions(object): + __slots__ = ["_tab"] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = StablehloCustomCallOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsStablehloCustomCallOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + + @classmethod + def StablehloCustomCallOptionsBufferHasIdentifier( + cls, buf, offset, size_prefixed=False + ): + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed + ) + + # StablehloCustomCallOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # StablehloCustomCallOptions + def CallTargetName(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.String(o + self._tab.Pos) + return None + + # StablehloCustomCallOptions + def HasSideEffect(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + return bool( + self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos) + ) + return False + + # StablehloCustomCallOptions + def BackendConfig(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + if o != 0: + return self._tab.String(o + self._tab.Pos) + return None + + # StablehloCustomCallOptions + def ApiVersion(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) + return 0 + + # StablehloCustomCallOptions + def CalledComputations(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12)) + if o != 0: + a = self._tab.Vector(o) + return self._tab.Get( + flatbuffers.number_types.Int32Flags, + a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4), + ) + return 0 + + # StablehloCustomCallOptions + def CalledComputationsAsNumpy(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12)) + if o != 0: + return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int32Flags, o) + return 0 + + # StablehloCustomCallOptions + def CalledComputationsLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # StablehloCustomCallOptions + def CalledComputationsIsNone(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12)) + return o == 0 + + # StablehloCustomCallOptions + def CustomAttributes(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14)) + if o != 0: + a = self._tab.Vector(o) + return self._tab.Get( + flatbuffers.number_types.Uint8Flags, + a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 1), + ) + return 0 + + # StablehloCustomCallOptions + def CustomAttributesAsNumpy(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14)) + if o != 0: + return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Uint8Flags, o) + return 0 + + # StablehloCustomCallOptions + def CustomAttributesLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # StablehloCustomCallOptions + def CustomAttributesIsNone(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14)) + return o == 0 + + +def StablehloCustomCallOptionsStart(builder): + builder.StartObject(6) + + +def Start(builder): + StablehloCustomCallOptionsStart(builder) + + +def StablehloCustomCallOptionsAddCallTargetName(builder, callTargetName): + builder.PrependUOffsetTRelativeSlot( + 0, flatbuffers.number_types.UOffsetTFlags.py_type(callTargetName), 0 + ) + + +def AddCallTargetName(builder, callTargetName): + StablehloCustomCallOptionsAddCallTargetName(builder, callTargetName) + + +def StablehloCustomCallOptionsAddHasSideEffect(builder, hasSideEffect): + builder.PrependBoolSlot(1, hasSideEffect, 0) + + +def AddHasSideEffect(builder, hasSideEffect): + StablehloCustomCallOptionsAddHasSideEffect(builder, hasSideEffect) + + +def StablehloCustomCallOptionsAddBackendConfig(builder, backendConfig): + builder.PrependUOffsetTRelativeSlot( + 2, flatbuffers.number_types.UOffsetTFlags.py_type(backendConfig), 0 + ) + + +def AddBackendConfig(builder, backendConfig): + StablehloCustomCallOptionsAddBackendConfig(builder, backendConfig) + + +def StablehloCustomCallOptionsAddApiVersion(builder, apiVersion): + builder.PrependInt32Slot(3, apiVersion, 0) + + +def AddApiVersion(builder, apiVersion): + StablehloCustomCallOptionsAddApiVersion(builder, apiVersion) + + +def StablehloCustomCallOptionsAddCalledComputations(builder, calledComputations): + builder.PrependUOffsetTRelativeSlot( + 4, flatbuffers.number_types.UOffsetTFlags.py_type(calledComputations), 0 + ) + + +def AddCalledComputations(builder, calledComputations): + StablehloCustomCallOptionsAddCalledComputations(builder, calledComputations) + + +def StablehloCustomCallOptionsStartCalledComputationsVector(builder, numElems): + return builder.StartVector(4, numElems, 4) + + +def StartCalledComputationsVector(builder, numElems: int) -> int: + return StablehloCustomCallOptionsStartCalledComputationsVector(builder, numElems) + + +def StablehloCustomCallOptionsAddCustomAttributes(builder, customAttributes): + builder.PrependUOffsetTRelativeSlot( + 5, flatbuffers.number_types.UOffsetTFlags.py_type(customAttributes), 0 + ) + + +def AddCustomAttributes(builder, customAttributes): + StablehloCustomCallOptionsAddCustomAttributes(builder, customAttributes) + + +def StablehloCustomCallOptionsStartCustomAttributesVector(builder, numElems): + return builder.StartVector(1, numElems, 1) + + +def StartCustomAttributesVector(builder, numElems: int) -> int: + return StablehloCustomCallOptionsStartCustomAttributesVector(builder, numElems) + + +def StablehloCustomCallOptionsEnd(builder): + return builder.EndObject() + + +def End(builder): + return StablehloCustomCallOptionsEnd(builder) diff --git a/backends/nxp/backend/ir/lib/tflite/StablehloDotGeneralOptions.py b/backends/nxp/backend/ir/lib/tflite/StablehloDotGeneralOptions.py new file mode 100755 index 00000000000..e5c936239ea --- /dev/null +++ b/backends/nxp/backend/ir/lib/tflite/StablehloDotGeneralOptions.py @@ -0,0 +1,304 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy + +np = import_numpy() + + +class StablehloDotGeneralOptions(object): + __slots__ = ["_tab"] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = StablehloDotGeneralOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsStablehloDotGeneralOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + + @classmethod + def StablehloDotGeneralOptionsBufferHasIdentifier( + cls, buf, offset, size_prefixed=False + ): + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed + ) + + # StablehloDotGeneralOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # StablehloDotGeneralOptions + def LhsBatchingDimensions(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + a = self._tab.Vector(o) + return self._tab.Get( + flatbuffers.number_types.Int64Flags, + a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 8), + ) + return 0 + + # StablehloDotGeneralOptions + def LhsBatchingDimensionsAsNumpy(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int64Flags, o) + return 0 + + # StablehloDotGeneralOptions + def LhsBatchingDimensionsLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # StablehloDotGeneralOptions + def LhsBatchingDimensionsIsNone(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + return o == 0 + + # StablehloDotGeneralOptions + def RhsBatchingDimensions(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + a = self._tab.Vector(o) + return self._tab.Get( + flatbuffers.number_types.Int64Flags, + a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 8), + ) + return 0 + + # StablehloDotGeneralOptions + def RhsBatchingDimensionsAsNumpy(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int64Flags, o) + return 0 + + # StablehloDotGeneralOptions + def RhsBatchingDimensionsLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # StablehloDotGeneralOptions + def RhsBatchingDimensionsIsNone(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + return o == 0 + + # StablehloDotGeneralOptions + def LhsContractingDimensions(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + if o != 0: + a = self._tab.Vector(o) + return self._tab.Get( + flatbuffers.number_types.Int64Flags, + a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 8), + ) + return 0 + + # StablehloDotGeneralOptions + def LhsContractingDimensionsAsNumpy(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + if o != 0: + return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int64Flags, o) + return 0 + + # StablehloDotGeneralOptions + def LhsContractingDimensionsLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # StablehloDotGeneralOptions + def LhsContractingDimensionsIsNone(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + return o == 0 + + # StablehloDotGeneralOptions + def RhsContractingDimensions(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10)) + if o != 0: + a = self._tab.Vector(o) + return self._tab.Get( + flatbuffers.number_types.Int64Flags, + a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 8), + ) + return 0 + + # StablehloDotGeneralOptions + def RhsContractingDimensionsAsNumpy(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10)) + if o != 0: + return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int64Flags, o) + return 0 + + # StablehloDotGeneralOptions + def RhsContractingDimensionsLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # StablehloDotGeneralOptions + def RhsContractingDimensionsIsNone(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10)) + return o == 0 + + # StablehloDotGeneralOptions + def PrecisionConfig(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12)) + if o != 0: + a = self._tab.Vector(o) + return self._tab.Get( + flatbuffers.number_types.Uint32Flags, + a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4), + ) + return 0 + + # StablehloDotGeneralOptions + def PrecisionConfigAsNumpy(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12)) + if o != 0: + return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Uint32Flags, o) + return 0 + + # StablehloDotGeneralOptions + def PrecisionConfigLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # StablehloDotGeneralOptions + def PrecisionConfigIsNone(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12)) + return o == 0 + + +def StablehloDotGeneralOptionsStart(builder): + builder.StartObject(5) + + +def Start(builder): + StablehloDotGeneralOptionsStart(builder) + + +def StablehloDotGeneralOptionsAddLhsBatchingDimensions(builder, lhsBatchingDimensions): + builder.PrependUOffsetTRelativeSlot( + 0, flatbuffers.number_types.UOffsetTFlags.py_type(lhsBatchingDimensions), 0 + ) + + +def AddLhsBatchingDimensions(builder, lhsBatchingDimensions): + StablehloDotGeneralOptionsAddLhsBatchingDimensions(builder, lhsBatchingDimensions) + + +def StablehloDotGeneralOptionsStartLhsBatchingDimensionsVector(builder, numElems): + return builder.StartVector(8, numElems, 8) + + +def StartLhsBatchingDimensionsVector(builder, numElems: int) -> int: + return StablehloDotGeneralOptionsStartLhsBatchingDimensionsVector(builder, numElems) + + +def StablehloDotGeneralOptionsAddRhsBatchingDimensions(builder, rhsBatchingDimensions): + builder.PrependUOffsetTRelativeSlot( + 1, flatbuffers.number_types.UOffsetTFlags.py_type(rhsBatchingDimensions), 0 + ) + + +def AddRhsBatchingDimensions(builder, rhsBatchingDimensions): + StablehloDotGeneralOptionsAddRhsBatchingDimensions(builder, rhsBatchingDimensions) + + +def StablehloDotGeneralOptionsStartRhsBatchingDimensionsVector(builder, numElems): + return builder.StartVector(8, numElems, 8) + + +def StartRhsBatchingDimensionsVector(builder, numElems: int) -> int: + return StablehloDotGeneralOptionsStartRhsBatchingDimensionsVector(builder, numElems) + + +def StablehloDotGeneralOptionsAddLhsContractingDimensions( + builder, lhsContractingDimensions +): + builder.PrependUOffsetTRelativeSlot( + 2, flatbuffers.number_types.UOffsetTFlags.py_type(lhsContractingDimensions), 0 + ) + + +def AddLhsContractingDimensions(builder, lhsContractingDimensions): + StablehloDotGeneralOptionsAddLhsContractingDimensions( + builder, lhsContractingDimensions + ) + + +def StablehloDotGeneralOptionsStartLhsContractingDimensionsVector(builder, numElems): + return builder.StartVector(8, numElems, 8) + + +def StartLhsContractingDimensionsVector(builder, numElems: int) -> int: + return StablehloDotGeneralOptionsStartLhsContractingDimensionsVector( + builder, numElems + ) + + +def StablehloDotGeneralOptionsAddRhsContractingDimensions( + builder, rhsContractingDimensions +): + builder.PrependUOffsetTRelativeSlot( + 3, flatbuffers.number_types.UOffsetTFlags.py_type(rhsContractingDimensions), 0 + ) + + +def AddRhsContractingDimensions(builder, rhsContractingDimensions): + StablehloDotGeneralOptionsAddRhsContractingDimensions( + builder, rhsContractingDimensions + ) + + +def StablehloDotGeneralOptionsStartRhsContractingDimensionsVector(builder, numElems): + return builder.StartVector(8, numElems, 8) + + +def StartRhsContractingDimensionsVector(builder, numElems: int) -> int: + return StablehloDotGeneralOptionsStartRhsContractingDimensionsVector( + builder, numElems + ) + + +def StablehloDotGeneralOptionsAddPrecisionConfig(builder, precisionConfig): + builder.PrependUOffsetTRelativeSlot( + 4, flatbuffers.number_types.UOffsetTFlags.py_type(precisionConfig), 0 + ) + + +def AddPrecisionConfig(builder, precisionConfig): + StablehloDotGeneralOptionsAddPrecisionConfig(builder, precisionConfig) + + +def StablehloDotGeneralOptionsStartPrecisionConfigVector(builder, numElems): + return builder.StartVector(4, numElems, 4) + + +def StartPrecisionConfigVector(builder, numElems: int) -> int: + return StablehloDotGeneralOptionsStartPrecisionConfigVector(builder, numElems) + + +def StablehloDotGeneralOptionsEnd(builder): + return builder.EndObject() + + +def End(builder): + return StablehloDotGeneralOptionsEnd(builder) diff --git a/backends/nxp/backend/ir/lib/tflite/StablehloDynamicSliceOptions.py b/backends/nxp/backend/ir/lib/tflite/StablehloDynamicSliceOptions.py new file mode 100755 index 00000000000..ca643402cb7 --- /dev/null +++ b/backends/nxp/backend/ir/lib/tflite/StablehloDynamicSliceOptions.py @@ -0,0 +1,100 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy + +np = import_numpy() + + +class StablehloDynamicSliceOptions(object): + __slots__ = ["_tab"] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = StablehloDynamicSliceOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsStablehloDynamicSliceOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + + @classmethod + def StablehloDynamicSliceOptionsBufferHasIdentifier( + cls, buf, offset, size_prefixed=False + ): + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed + ) + + # StablehloDynamicSliceOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # StablehloDynamicSliceOptions + def SliceSizes(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + a = self._tab.Vector(o) + return self._tab.Get( + flatbuffers.number_types.Int64Flags, + a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 8), + ) + return 0 + + # StablehloDynamicSliceOptions + def SliceSizesAsNumpy(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int64Flags, o) + return 0 + + # StablehloDynamicSliceOptions + def SliceSizesLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # StablehloDynamicSliceOptions + def SliceSizesIsNone(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + return o == 0 + + +def StablehloDynamicSliceOptionsStart(builder): + builder.StartObject(1) + + +def Start(builder): + StablehloDynamicSliceOptionsStart(builder) + + +def StablehloDynamicSliceOptionsAddSliceSizes(builder, sliceSizes): + builder.PrependUOffsetTRelativeSlot( + 0, flatbuffers.number_types.UOffsetTFlags.py_type(sliceSizes), 0 + ) + + +def AddSliceSizes(builder, sliceSizes): + StablehloDynamicSliceOptionsAddSliceSizes(builder, sliceSizes) + + +def StablehloDynamicSliceOptionsStartSliceSizesVector(builder, numElems): + return builder.StartVector(8, numElems, 8) + + +def StartSliceSizesVector(builder, numElems: int) -> int: + return StablehloDynamicSliceOptionsStartSliceSizesVector(builder, numElems) + + +def StablehloDynamicSliceOptionsEnd(builder): + return builder.EndObject() + + +def End(builder): + return StablehloDynamicSliceOptionsEnd(builder) diff --git a/backends/nxp/backend/ir/lib/tflite/StablehloGatherOptions.py b/backends/nxp/backend/ir/lib/tflite/StablehloGatherOptions.py new file mode 100755 index 00000000000..cbb1f17249e --- /dev/null +++ b/backends/nxp/backend/ir/lib/tflite/StablehloGatherOptions.py @@ -0,0 +1,276 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy + +np = import_numpy() + + +class StablehloGatherOptions(object): + __slots__ = ["_tab"] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = StablehloGatherOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsStablehloGatherOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + + @classmethod + def StablehloGatherOptionsBufferHasIdentifier( + cls, buf, offset, size_prefixed=False + ): + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed + ) + + # StablehloGatherOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # StablehloGatherOptions + def OffsetDims(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + a = self._tab.Vector(o) + return self._tab.Get( + flatbuffers.number_types.Int64Flags, + a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 8), + ) + return 0 + + # StablehloGatherOptions + def OffsetDimsAsNumpy(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int64Flags, o) + return 0 + + # StablehloGatherOptions + def OffsetDimsLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # StablehloGatherOptions + def OffsetDimsIsNone(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + return o == 0 + + # StablehloGatherOptions + def CollapsedSliceDims(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + a = self._tab.Vector(o) + return self._tab.Get( + flatbuffers.number_types.Int64Flags, + a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 8), + ) + return 0 + + # StablehloGatherOptions + def CollapsedSliceDimsAsNumpy(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int64Flags, o) + return 0 + + # StablehloGatherOptions + def CollapsedSliceDimsLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # StablehloGatherOptions + def CollapsedSliceDimsIsNone(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + return o == 0 + + # StablehloGatherOptions + def StartIndexMap(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + if o != 0: + a = self._tab.Vector(o) + return self._tab.Get( + flatbuffers.number_types.Int64Flags, + a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 8), + ) + return 0 + + # StablehloGatherOptions + def StartIndexMapAsNumpy(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + if o != 0: + return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int64Flags, o) + return 0 + + # StablehloGatherOptions + def StartIndexMapLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # StablehloGatherOptions + def StartIndexMapIsNone(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + return o == 0 + + # StablehloGatherOptions + def IndexVectorDim(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int64Flags, o + self._tab.Pos) + return 0 + + # StablehloGatherOptions + def SliceSizes(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12)) + if o != 0: + a = self._tab.Vector(o) + return self._tab.Get( + flatbuffers.number_types.Int64Flags, + a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 8), + ) + return 0 + + # StablehloGatherOptions + def SliceSizesAsNumpy(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12)) + if o != 0: + return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int64Flags, o) + return 0 + + # StablehloGatherOptions + def SliceSizesLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # StablehloGatherOptions + def SliceSizesIsNone(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12)) + return o == 0 + + # StablehloGatherOptions + def IndicesAreSorted(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14)) + if o != 0: + return bool( + self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos) + ) + return False + + +def StablehloGatherOptionsStart(builder): + builder.StartObject(6) + + +def Start(builder): + StablehloGatherOptionsStart(builder) + + +def StablehloGatherOptionsAddOffsetDims(builder, offsetDims): + builder.PrependUOffsetTRelativeSlot( + 0, flatbuffers.number_types.UOffsetTFlags.py_type(offsetDims), 0 + ) + + +def AddOffsetDims(builder, offsetDims): + StablehloGatherOptionsAddOffsetDims(builder, offsetDims) + + +def StablehloGatherOptionsStartOffsetDimsVector(builder, numElems): + return builder.StartVector(8, numElems, 8) + + +def StartOffsetDimsVector(builder, numElems: int) -> int: + return StablehloGatherOptionsStartOffsetDimsVector(builder, numElems) + + +def StablehloGatherOptionsAddCollapsedSliceDims(builder, collapsedSliceDims): + builder.PrependUOffsetTRelativeSlot( + 1, flatbuffers.number_types.UOffsetTFlags.py_type(collapsedSliceDims), 0 + ) + + +def AddCollapsedSliceDims(builder, collapsedSliceDims): + StablehloGatherOptionsAddCollapsedSliceDims(builder, collapsedSliceDims) + + +def StablehloGatherOptionsStartCollapsedSliceDimsVector(builder, numElems): + return builder.StartVector(8, numElems, 8) + + +def StartCollapsedSliceDimsVector(builder, numElems: int) -> int: + return StablehloGatherOptionsStartCollapsedSliceDimsVector(builder, numElems) + + +def StablehloGatherOptionsAddStartIndexMap(builder, startIndexMap): + builder.PrependUOffsetTRelativeSlot( + 2, flatbuffers.number_types.UOffsetTFlags.py_type(startIndexMap), 0 + ) + + +def AddStartIndexMap(builder, startIndexMap): + StablehloGatherOptionsAddStartIndexMap(builder, startIndexMap) + + +def StablehloGatherOptionsStartStartIndexMapVector(builder, numElems): + return builder.StartVector(8, numElems, 8) + + +def StartStartIndexMapVector(builder, numElems: int) -> int: + return StablehloGatherOptionsStartStartIndexMapVector(builder, numElems) + + +def StablehloGatherOptionsAddIndexVectorDim(builder, indexVectorDim): + builder.PrependInt64Slot(3, indexVectorDim, 0) + + +def AddIndexVectorDim(builder, indexVectorDim): + StablehloGatherOptionsAddIndexVectorDim(builder, indexVectorDim) + + +def StablehloGatherOptionsAddSliceSizes(builder, sliceSizes): + builder.PrependUOffsetTRelativeSlot( + 4, flatbuffers.number_types.UOffsetTFlags.py_type(sliceSizes), 0 + ) + + +def AddSliceSizes(builder, sliceSizes): + StablehloGatherOptionsAddSliceSizes(builder, sliceSizes) + + +def StablehloGatherOptionsStartSliceSizesVector(builder, numElems): + return builder.StartVector(8, numElems, 8) + + +def StartSliceSizesVector(builder, numElems: int) -> int: + return StablehloGatherOptionsStartSliceSizesVector(builder, numElems) + + +def StablehloGatherOptionsAddIndicesAreSorted(builder, indicesAreSorted): + builder.PrependBoolSlot(5, indicesAreSorted, 0) + + +def AddIndicesAreSorted(builder, indicesAreSorted): + StablehloGatherOptionsAddIndicesAreSorted(builder, indicesAreSorted) + + +def StablehloGatherOptionsEnd(builder): + return builder.EndObject() + + +def End(builder): + return StablehloGatherOptionsEnd(builder) diff --git a/backends/nxp/backend/ir/lib/tflite/StablehloIotaOptions.py b/backends/nxp/backend/ir/lib/tflite/StablehloIotaOptions.py new file mode 100755 index 00000000000..af13f3b28fb --- /dev/null +++ b/backends/nxp/backend/ir/lib/tflite/StablehloIotaOptions.py @@ -0,0 +1,65 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy + +np = import_numpy() + + +class StablehloIotaOptions(object): + __slots__ = ["_tab"] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = StablehloIotaOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsStablehloIotaOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + + @classmethod + def StablehloIotaOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed + ) + + # StablehloIotaOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # StablehloIotaOptions + def IotaDimension(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int64Flags, o + self._tab.Pos) + return 0 + + +def StablehloIotaOptionsStart(builder): + builder.StartObject(1) + + +def Start(builder): + StablehloIotaOptionsStart(builder) + + +def StablehloIotaOptionsAddIotaDimension(builder, iotaDimension): + builder.PrependInt64Slot(0, iotaDimension, 0) + + +def AddIotaDimension(builder, iotaDimension): + StablehloIotaOptionsAddIotaDimension(builder, iotaDimension) + + +def StablehloIotaOptionsEnd(builder): + return builder.EndObject() + + +def End(builder): + return StablehloIotaOptionsEnd(builder) diff --git a/backends/nxp/backend/ir/lib/tflite/StablehloPadOptions.py b/backends/nxp/backend/ir/lib/tflite/StablehloPadOptions.py new file mode 100755 index 00000000000..6b744145299 --- /dev/null +++ b/backends/nxp/backend/ir/lib/tflite/StablehloPadOptions.py @@ -0,0 +1,194 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy + +np = import_numpy() + + +class StablehloPadOptions(object): + __slots__ = ["_tab"] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = StablehloPadOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsStablehloPadOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + + @classmethod + def StablehloPadOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed + ) + + # StablehloPadOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # StablehloPadOptions + def EdgePaddingLow(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + a = self._tab.Vector(o) + return self._tab.Get( + flatbuffers.number_types.Int64Flags, + a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 8), + ) + return 0 + + # StablehloPadOptions + def EdgePaddingLowAsNumpy(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int64Flags, o) + return 0 + + # StablehloPadOptions + def EdgePaddingLowLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # StablehloPadOptions + def EdgePaddingLowIsNone(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + return o == 0 + + # StablehloPadOptions + def EdgePaddingHigh(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + a = self._tab.Vector(o) + return self._tab.Get( + flatbuffers.number_types.Int64Flags, + a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 8), + ) + return 0 + + # StablehloPadOptions + def EdgePaddingHighAsNumpy(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int64Flags, o) + return 0 + + # StablehloPadOptions + def EdgePaddingHighLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # StablehloPadOptions + def EdgePaddingHighIsNone(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + return o == 0 + + # StablehloPadOptions + def InteriorPadding(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + if o != 0: + a = self._tab.Vector(o) + return self._tab.Get( + flatbuffers.number_types.Int64Flags, + a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 8), + ) + return 0 + + # StablehloPadOptions + def InteriorPaddingAsNumpy(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + if o != 0: + return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int64Flags, o) + return 0 + + # StablehloPadOptions + def InteriorPaddingLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # StablehloPadOptions + def InteriorPaddingIsNone(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + return o == 0 + + +def StablehloPadOptionsStart(builder): + builder.StartObject(3) + + +def Start(builder): + StablehloPadOptionsStart(builder) + + +def StablehloPadOptionsAddEdgePaddingLow(builder, edgePaddingLow): + builder.PrependUOffsetTRelativeSlot( + 0, flatbuffers.number_types.UOffsetTFlags.py_type(edgePaddingLow), 0 + ) + + +def AddEdgePaddingLow(builder, edgePaddingLow): + StablehloPadOptionsAddEdgePaddingLow(builder, edgePaddingLow) + + +def StablehloPadOptionsStartEdgePaddingLowVector(builder, numElems): + return builder.StartVector(8, numElems, 8) + + +def StartEdgePaddingLowVector(builder, numElems: int) -> int: + return StablehloPadOptionsStartEdgePaddingLowVector(builder, numElems) + + +def StablehloPadOptionsAddEdgePaddingHigh(builder, edgePaddingHigh): + builder.PrependUOffsetTRelativeSlot( + 1, flatbuffers.number_types.UOffsetTFlags.py_type(edgePaddingHigh), 0 + ) + + +def AddEdgePaddingHigh(builder, edgePaddingHigh): + StablehloPadOptionsAddEdgePaddingHigh(builder, edgePaddingHigh) + + +def StablehloPadOptionsStartEdgePaddingHighVector(builder, numElems): + return builder.StartVector(8, numElems, 8) + + +def StartEdgePaddingHighVector(builder, numElems: int) -> int: + return StablehloPadOptionsStartEdgePaddingHighVector(builder, numElems) + + +def StablehloPadOptionsAddInteriorPadding(builder, interiorPadding): + builder.PrependUOffsetTRelativeSlot( + 2, flatbuffers.number_types.UOffsetTFlags.py_type(interiorPadding), 0 + ) + + +def AddInteriorPadding(builder, interiorPadding): + StablehloPadOptionsAddInteriorPadding(builder, interiorPadding) + + +def StablehloPadOptionsStartInteriorPaddingVector(builder, numElems): + return builder.StartVector(8, numElems, 8) + + +def StartInteriorPaddingVector(builder, numElems: int) -> int: + return StablehloPadOptionsStartInteriorPaddingVector(builder, numElems) + + +def StablehloPadOptionsEnd(builder): + return builder.EndObject() + + +def End(builder): + return StablehloPadOptionsEnd(builder) diff --git a/backends/nxp/backend/ir/lib/tflite/StablehloPrecisionConfig.py b/backends/nxp/backend/ir/lib/tflite/StablehloPrecisionConfig.py new file mode 100755 index 00000000000..a9d4f41c364 --- /dev/null +++ b/backends/nxp/backend/ir/lib/tflite/StablehloPrecisionConfig.py @@ -0,0 +1,9 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + + +class StablehloPrecisionConfig(object): + DEFAULT = 0 + HIGH = 1 + HIGHEST = 2 diff --git a/backends/nxp/backend/ir/lib/tflite/StablehloReduceOptions.py b/backends/nxp/backend/ir/lib/tflite/StablehloReduceOptions.py new file mode 100755 index 00000000000..a4f42e1988e --- /dev/null +++ b/backends/nxp/backend/ir/lib/tflite/StablehloReduceOptions.py @@ -0,0 +1,115 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy + +np = import_numpy() + + +class StablehloReduceOptions(object): + __slots__ = ["_tab"] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = StablehloReduceOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsStablehloReduceOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + + @classmethod + def StablehloReduceOptionsBufferHasIdentifier( + cls, buf, offset, size_prefixed=False + ): + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed + ) + + # StablehloReduceOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # StablehloReduceOptions + def Dimensions(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + a = self._tab.Vector(o) + return self._tab.Get( + flatbuffers.number_types.Int64Flags, + a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 8), + ) + return 0 + + # StablehloReduceOptions + def DimensionsAsNumpy(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int64Flags, o) + return 0 + + # StablehloReduceOptions + def DimensionsLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # StablehloReduceOptions + def DimensionsIsNone(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + return o == 0 + + # StablehloReduceOptions + def BodySubgraphIndex(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) + return 0 + + +def StablehloReduceOptionsStart(builder): + builder.StartObject(2) + + +def Start(builder): + StablehloReduceOptionsStart(builder) + + +def StablehloReduceOptionsAddDimensions(builder, dimensions): + builder.PrependUOffsetTRelativeSlot( + 0, flatbuffers.number_types.UOffsetTFlags.py_type(dimensions), 0 + ) + + +def AddDimensions(builder, dimensions): + StablehloReduceOptionsAddDimensions(builder, dimensions) + + +def StablehloReduceOptionsStartDimensionsVector(builder, numElems): + return builder.StartVector(8, numElems, 8) + + +def StartDimensionsVector(builder, numElems: int) -> int: + return StablehloReduceOptionsStartDimensionsVector(builder, numElems) + + +def StablehloReduceOptionsAddBodySubgraphIndex(builder, bodySubgraphIndex): + builder.PrependInt32Slot(1, bodySubgraphIndex, 0) + + +def AddBodySubgraphIndex(builder, bodySubgraphIndex): + StablehloReduceOptionsAddBodySubgraphIndex(builder, bodySubgraphIndex) + + +def StablehloReduceOptionsEnd(builder): + return builder.EndObject() + + +def End(builder): + return StablehloReduceOptionsEnd(builder) diff --git a/backends/nxp/backend/ir/lib/tflite/StablehloReduceWindowOptions.py b/backends/nxp/backend/ir/lib/tflite/StablehloReduceWindowOptions.py new file mode 100755 index 00000000000..d87dd82c2c5 --- /dev/null +++ b/backends/nxp/backend/ir/lib/tflite/StablehloReduceWindowOptions.py @@ -0,0 +1,307 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy + +np = import_numpy() + + +class StablehloReduceWindowOptions(object): + __slots__ = ["_tab"] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = StablehloReduceWindowOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsStablehloReduceWindowOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + + @classmethod + def StablehloReduceWindowOptionsBufferHasIdentifier( + cls, buf, offset, size_prefixed=False + ): + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed + ) + + # StablehloReduceWindowOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # StablehloReduceWindowOptions + def WindowDimensions(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + a = self._tab.Vector(o) + return self._tab.Get( + flatbuffers.number_types.Int64Flags, + a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 8), + ) + return 0 + + # StablehloReduceWindowOptions + def WindowDimensionsAsNumpy(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int64Flags, o) + return 0 + + # StablehloReduceWindowOptions + def WindowDimensionsLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # StablehloReduceWindowOptions + def WindowDimensionsIsNone(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + return o == 0 + + # StablehloReduceWindowOptions + def WindowStrides(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + a = self._tab.Vector(o) + return self._tab.Get( + flatbuffers.number_types.Int64Flags, + a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 8), + ) + return 0 + + # StablehloReduceWindowOptions + def WindowStridesAsNumpy(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int64Flags, o) + return 0 + + # StablehloReduceWindowOptions + def WindowStridesLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # StablehloReduceWindowOptions + def WindowStridesIsNone(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + return o == 0 + + # StablehloReduceWindowOptions + def BaseDilations(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + if o != 0: + a = self._tab.Vector(o) + return self._tab.Get( + flatbuffers.number_types.Int64Flags, + a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 8), + ) + return 0 + + # StablehloReduceWindowOptions + def BaseDilationsAsNumpy(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + if o != 0: + return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int64Flags, o) + return 0 + + # StablehloReduceWindowOptions + def BaseDilationsLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # StablehloReduceWindowOptions + def BaseDilationsIsNone(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + return o == 0 + + # StablehloReduceWindowOptions + def WindowDilations(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10)) + if o != 0: + a = self._tab.Vector(o) + return self._tab.Get( + flatbuffers.number_types.Int64Flags, + a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 8), + ) + return 0 + + # StablehloReduceWindowOptions + def WindowDilationsAsNumpy(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10)) + if o != 0: + return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int64Flags, o) + return 0 + + # StablehloReduceWindowOptions + def WindowDilationsLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # StablehloReduceWindowOptions + def WindowDilationsIsNone(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10)) + return o == 0 + + # StablehloReduceWindowOptions + def Padding(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12)) + if o != 0: + a = self._tab.Vector(o) + return self._tab.Get( + flatbuffers.number_types.Int64Flags, + a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 8), + ) + return 0 + + # StablehloReduceWindowOptions + def PaddingAsNumpy(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12)) + if o != 0: + return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int64Flags, o) + return 0 + + # StablehloReduceWindowOptions + def PaddingLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # StablehloReduceWindowOptions + def PaddingIsNone(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12)) + return o == 0 + + # StablehloReduceWindowOptions + def BodySubgraphIndex(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) + return 0 + + +def StablehloReduceWindowOptionsStart(builder): + builder.StartObject(6) + + +def Start(builder): + StablehloReduceWindowOptionsStart(builder) + + +def StablehloReduceWindowOptionsAddWindowDimensions(builder, windowDimensions): + builder.PrependUOffsetTRelativeSlot( + 0, flatbuffers.number_types.UOffsetTFlags.py_type(windowDimensions), 0 + ) + + +def AddWindowDimensions(builder, windowDimensions): + StablehloReduceWindowOptionsAddWindowDimensions(builder, windowDimensions) + + +def StablehloReduceWindowOptionsStartWindowDimensionsVector(builder, numElems): + return builder.StartVector(8, numElems, 8) + + +def StartWindowDimensionsVector(builder, numElems: int) -> int: + return StablehloReduceWindowOptionsStartWindowDimensionsVector(builder, numElems) + + +def StablehloReduceWindowOptionsAddWindowStrides(builder, windowStrides): + builder.PrependUOffsetTRelativeSlot( + 1, flatbuffers.number_types.UOffsetTFlags.py_type(windowStrides), 0 + ) + + +def AddWindowStrides(builder, windowStrides): + StablehloReduceWindowOptionsAddWindowStrides(builder, windowStrides) + + +def StablehloReduceWindowOptionsStartWindowStridesVector(builder, numElems): + return builder.StartVector(8, numElems, 8) + + +def StartWindowStridesVector(builder, numElems: int) -> int: + return StablehloReduceWindowOptionsStartWindowStridesVector(builder, numElems) + + +def StablehloReduceWindowOptionsAddBaseDilations(builder, baseDilations): + builder.PrependUOffsetTRelativeSlot( + 2, flatbuffers.number_types.UOffsetTFlags.py_type(baseDilations), 0 + ) + + +def AddBaseDilations(builder, baseDilations): + StablehloReduceWindowOptionsAddBaseDilations(builder, baseDilations) + + +def StablehloReduceWindowOptionsStartBaseDilationsVector(builder, numElems): + return builder.StartVector(8, numElems, 8) + + +def StartBaseDilationsVector(builder, numElems: int) -> int: + return StablehloReduceWindowOptionsStartBaseDilationsVector(builder, numElems) + + +def StablehloReduceWindowOptionsAddWindowDilations(builder, windowDilations): + builder.PrependUOffsetTRelativeSlot( + 3, flatbuffers.number_types.UOffsetTFlags.py_type(windowDilations), 0 + ) + + +def AddWindowDilations(builder, windowDilations): + StablehloReduceWindowOptionsAddWindowDilations(builder, windowDilations) + + +def StablehloReduceWindowOptionsStartWindowDilationsVector(builder, numElems): + return builder.StartVector(8, numElems, 8) + + +def StartWindowDilationsVector(builder, numElems: int) -> int: + return StablehloReduceWindowOptionsStartWindowDilationsVector(builder, numElems) + + +def StablehloReduceWindowOptionsAddPadding(builder, padding): + builder.PrependUOffsetTRelativeSlot( + 4, flatbuffers.number_types.UOffsetTFlags.py_type(padding), 0 + ) + + +def AddPadding(builder, padding): + StablehloReduceWindowOptionsAddPadding(builder, padding) + + +def StablehloReduceWindowOptionsStartPaddingVector(builder, numElems): + return builder.StartVector(8, numElems, 8) + + +def StartPaddingVector(builder, numElems: int) -> int: + return StablehloReduceWindowOptionsStartPaddingVector(builder, numElems) + + +def StablehloReduceWindowOptionsAddBodySubgraphIndex(builder, bodySubgraphIndex): + builder.PrependInt32Slot(5, bodySubgraphIndex, 0) + + +def AddBodySubgraphIndex(builder, bodySubgraphIndex): + StablehloReduceWindowOptionsAddBodySubgraphIndex(builder, bodySubgraphIndex) + + +def StablehloReduceWindowOptionsEnd(builder): + return builder.EndObject() + + +def End(builder): + return StablehloReduceWindowOptionsEnd(builder) diff --git a/backends/nxp/backend/ir/lib/tflite/StablehloRngBitGeneratorOptions.py b/backends/nxp/backend/ir/lib/tflite/StablehloRngBitGeneratorOptions.py new file mode 100755 index 00000000000..bbeb66c1603 --- /dev/null +++ b/backends/nxp/backend/ir/lib/tflite/StablehloRngBitGeneratorOptions.py @@ -0,0 +1,67 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy + +np = import_numpy() + + +class StablehloRngBitGeneratorOptions(object): + __slots__ = ["_tab"] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = StablehloRngBitGeneratorOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsStablehloRngBitGeneratorOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + + @classmethod + def StablehloRngBitGeneratorOptionsBufferHasIdentifier( + cls, buf, offset, size_prefixed=False + ): + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed + ) + + # StablehloRngBitGeneratorOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # StablehloRngBitGeneratorOptions + def Algorithm(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos) + return 0 + + +def StablehloRngBitGeneratorOptionsStart(builder): + builder.StartObject(1) + + +def Start(builder): + StablehloRngBitGeneratorOptionsStart(builder) + + +def StablehloRngBitGeneratorOptionsAddAlgorithm(builder, algorithm): + builder.PrependInt8Slot(0, algorithm, 0) + + +def AddAlgorithm(builder, algorithm): + StablehloRngBitGeneratorOptionsAddAlgorithm(builder, algorithm) + + +def StablehloRngBitGeneratorOptionsEnd(builder): + return builder.EndObject() + + +def End(builder): + return StablehloRngBitGeneratorOptionsEnd(builder) diff --git a/backends/nxp/backend/ir/lib/tflite/StablehloScatterOptions.py b/backends/nxp/backend/ir/lib/tflite/StablehloScatterOptions.py new file mode 100755 index 00000000000..19a7feb0a4a --- /dev/null +++ b/backends/nxp/backend/ir/lib/tflite/StablehloScatterOptions.py @@ -0,0 +1,268 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy + +np = import_numpy() + + +class StablehloScatterOptions(object): + __slots__ = ["_tab"] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = StablehloScatterOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsStablehloScatterOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + + @classmethod + def StablehloScatterOptionsBufferHasIdentifier( + cls, buf, offset, size_prefixed=False + ): + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed + ) + + # StablehloScatterOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # StablehloScatterOptions + def IndicesAreSorted(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return bool( + self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos) + ) + return False + + # StablehloScatterOptions + def UpdateWindowDims(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + a = self._tab.Vector(o) + return self._tab.Get( + flatbuffers.number_types.Int64Flags, + a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 8), + ) + return 0 + + # StablehloScatterOptions + def UpdateWindowDimsAsNumpy(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int64Flags, o) + return 0 + + # StablehloScatterOptions + def UpdateWindowDimsLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # StablehloScatterOptions + def UpdateWindowDimsIsNone(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + return o == 0 + + # StablehloScatterOptions + def InsertedWindowDims(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + if o != 0: + a = self._tab.Vector(o) + return self._tab.Get( + flatbuffers.number_types.Int64Flags, + a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 8), + ) + return 0 + + # StablehloScatterOptions + def InsertedWindowDimsAsNumpy(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + if o != 0: + return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int64Flags, o) + return 0 + + # StablehloScatterOptions + def InsertedWindowDimsLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # StablehloScatterOptions + def InsertedWindowDimsIsNone(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + return o == 0 + + # StablehloScatterOptions + def ScatterDimsToOperandDims(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10)) + if o != 0: + a = self._tab.Vector(o) + return self._tab.Get( + flatbuffers.number_types.Int64Flags, + a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 8), + ) + return 0 + + # StablehloScatterOptions + def ScatterDimsToOperandDimsAsNumpy(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10)) + if o != 0: + return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int64Flags, o) + return 0 + + # StablehloScatterOptions + def ScatterDimsToOperandDimsLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # StablehloScatterOptions + def ScatterDimsToOperandDimsIsNone(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10)) + return o == 0 + + # StablehloScatterOptions + def IndexVectorDim(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int64Flags, o + self._tab.Pos) + return 0 + + # StablehloScatterOptions + def UniqueIndices(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14)) + if o != 0: + return bool( + self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos) + ) + return False + + # StablehloScatterOptions + def UpdateComputationSubgraphIndex(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(16)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) + return 0 + + +def StablehloScatterOptionsStart(builder): + builder.StartObject(7) + + +def Start(builder): + StablehloScatterOptionsStart(builder) + + +def StablehloScatterOptionsAddIndicesAreSorted(builder, indicesAreSorted): + builder.PrependBoolSlot(0, indicesAreSorted, 0) + + +def AddIndicesAreSorted(builder, indicesAreSorted): + StablehloScatterOptionsAddIndicesAreSorted(builder, indicesAreSorted) + + +def StablehloScatterOptionsAddUpdateWindowDims(builder, updateWindowDims): + builder.PrependUOffsetTRelativeSlot( + 1, flatbuffers.number_types.UOffsetTFlags.py_type(updateWindowDims), 0 + ) + + +def AddUpdateWindowDims(builder, updateWindowDims): + StablehloScatterOptionsAddUpdateWindowDims(builder, updateWindowDims) + + +def StablehloScatterOptionsStartUpdateWindowDimsVector(builder, numElems): + return builder.StartVector(8, numElems, 8) + + +def StartUpdateWindowDimsVector(builder, numElems: int) -> int: + return StablehloScatterOptionsStartUpdateWindowDimsVector(builder, numElems) + + +def StablehloScatterOptionsAddInsertedWindowDims(builder, insertedWindowDims): + builder.PrependUOffsetTRelativeSlot( + 2, flatbuffers.number_types.UOffsetTFlags.py_type(insertedWindowDims), 0 + ) + + +def AddInsertedWindowDims(builder, insertedWindowDims): + StablehloScatterOptionsAddInsertedWindowDims(builder, insertedWindowDims) + + +def StablehloScatterOptionsStartInsertedWindowDimsVector(builder, numElems): + return builder.StartVector(8, numElems, 8) + + +def StartInsertedWindowDimsVector(builder, numElems: int) -> int: + return StablehloScatterOptionsStartInsertedWindowDimsVector(builder, numElems) + + +def StablehloScatterOptionsAddScatterDimsToOperandDims( + builder, scatterDimsToOperandDims +): + builder.PrependUOffsetTRelativeSlot( + 3, flatbuffers.number_types.UOffsetTFlags.py_type(scatterDimsToOperandDims), 0 + ) + + +def AddScatterDimsToOperandDims(builder, scatterDimsToOperandDims): + StablehloScatterOptionsAddScatterDimsToOperandDims( + builder, scatterDimsToOperandDims + ) + + +def StablehloScatterOptionsStartScatterDimsToOperandDimsVector(builder, numElems): + return builder.StartVector(8, numElems, 8) + + +def StartScatterDimsToOperandDimsVector(builder, numElems: int) -> int: + return StablehloScatterOptionsStartScatterDimsToOperandDimsVector(builder, numElems) + + +def StablehloScatterOptionsAddIndexVectorDim(builder, indexVectorDim): + builder.PrependInt64Slot(4, indexVectorDim, 0) + + +def AddIndexVectorDim(builder, indexVectorDim): + StablehloScatterOptionsAddIndexVectorDim(builder, indexVectorDim) + + +def StablehloScatterOptionsAddUniqueIndices(builder, uniqueIndices): + builder.PrependBoolSlot(5, uniqueIndices, 0) + + +def AddUniqueIndices(builder, uniqueIndices): + StablehloScatterOptionsAddUniqueIndices(builder, uniqueIndices) + + +def StablehloScatterOptionsAddUpdateComputationSubgraphIndex( + builder, updateComputationSubgraphIndex +): + builder.PrependInt32Slot(6, updateComputationSubgraphIndex, 0) + + +def AddUpdateComputationSubgraphIndex(builder, updateComputationSubgraphIndex): + StablehloScatterOptionsAddUpdateComputationSubgraphIndex( + builder, updateComputationSubgraphIndex + ) + + +def StablehloScatterOptionsEnd(builder): + return builder.EndObject() + + +def End(builder): + return StablehloScatterOptionsEnd(builder) diff --git a/backends/nxp/backend/ir/lib/tflite/StablehloSliceOptions.py b/backends/nxp/backend/ir/lib/tflite/StablehloSliceOptions.py new file mode 100755 index 00000000000..36aa55f6bfa --- /dev/null +++ b/backends/nxp/backend/ir/lib/tflite/StablehloSliceOptions.py @@ -0,0 +1,194 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy + +np = import_numpy() + + +class StablehloSliceOptions(object): + __slots__ = ["_tab"] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = StablehloSliceOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsStablehloSliceOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + + @classmethod + def StablehloSliceOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed + ) + + # StablehloSliceOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # StablehloSliceOptions + def StartIndices(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + a = self._tab.Vector(o) + return self._tab.Get( + flatbuffers.number_types.Int64Flags, + a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 8), + ) + return 0 + + # StablehloSliceOptions + def StartIndicesAsNumpy(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int64Flags, o) + return 0 + + # StablehloSliceOptions + def StartIndicesLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # StablehloSliceOptions + def StartIndicesIsNone(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + return o == 0 + + # StablehloSliceOptions + def LimitIndices(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + a = self._tab.Vector(o) + return self._tab.Get( + flatbuffers.number_types.Int64Flags, + a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 8), + ) + return 0 + + # StablehloSliceOptions + def LimitIndicesAsNumpy(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int64Flags, o) + return 0 + + # StablehloSliceOptions + def LimitIndicesLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # StablehloSliceOptions + def LimitIndicesIsNone(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + return o == 0 + + # StablehloSliceOptions + def Strides(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + if o != 0: + a = self._tab.Vector(o) + return self._tab.Get( + flatbuffers.number_types.Int64Flags, + a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 8), + ) + return 0 + + # StablehloSliceOptions + def StridesAsNumpy(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + if o != 0: + return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int64Flags, o) + return 0 + + # StablehloSliceOptions + def StridesLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # StablehloSliceOptions + def StridesIsNone(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + return o == 0 + + +def StablehloSliceOptionsStart(builder): + builder.StartObject(3) + + +def Start(builder): + StablehloSliceOptionsStart(builder) + + +def StablehloSliceOptionsAddStartIndices(builder, startIndices): + builder.PrependUOffsetTRelativeSlot( + 0, flatbuffers.number_types.UOffsetTFlags.py_type(startIndices), 0 + ) + + +def AddStartIndices(builder, startIndices): + StablehloSliceOptionsAddStartIndices(builder, startIndices) + + +def StablehloSliceOptionsStartStartIndicesVector(builder, numElems): + return builder.StartVector(8, numElems, 8) + + +def StartStartIndicesVector(builder, numElems: int) -> int: + return StablehloSliceOptionsStartStartIndicesVector(builder, numElems) + + +def StablehloSliceOptionsAddLimitIndices(builder, limitIndices): + builder.PrependUOffsetTRelativeSlot( + 1, flatbuffers.number_types.UOffsetTFlags.py_type(limitIndices), 0 + ) + + +def AddLimitIndices(builder, limitIndices): + StablehloSliceOptionsAddLimitIndices(builder, limitIndices) + + +def StablehloSliceOptionsStartLimitIndicesVector(builder, numElems): + return builder.StartVector(8, numElems, 8) + + +def StartLimitIndicesVector(builder, numElems: int) -> int: + return StablehloSliceOptionsStartLimitIndicesVector(builder, numElems) + + +def StablehloSliceOptionsAddStrides(builder, strides): + builder.PrependUOffsetTRelativeSlot( + 2, flatbuffers.number_types.UOffsetTFlags.py_type(strides), 0 + ) + + +def AddStrides(builder, strides): + StablehloSliceOptionsAddStrides(builder, strides) + + +def StablehloSliceOptionsStartStridesVector(builder, numElems): + return builder.StartVector(8, numElems, 8) + + +def StartStridesVector(builder, numElems: int) -> int: + return StablehloSliceOptionsStartStridesVector(builder, numElems) + + +def StablehloSliceOptionsEnd(builder): + return builder.EndObject() + + +def End(builder): + return StablehloSliceOptionsEnd(builder) diff --git a/backends/nxp/backend/ir/lib/tflite/StablehloSortOptions.py b/backends/nxp/backend/ir/lib/tflite/StablehloSortOptions.py new file mode 100755 index 00000000000..4b227876468 --- /dev/null +++ b/backends/nxp/backend/ir/lib/tflite/StablehloSortOptions.py @@ -0,0 +1,97 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy + +np = import_numpy() + + +class StablehloSortOptions(object): + __slots__ = ["_tab"] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = StablehloSortOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsStablehloSortOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + + @classmethod + def StablehloSortOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed + ) + + # StablehloSortOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # StablehloSortOptions + def Dimension(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int64Flags, o + self._tab.Pos) + return 0 + + # StablehloSortOptions + def IsStable(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + return bool( + self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos) + ) + return False + + # StablehloSortOptions + def ComparatorSubgraphIndex(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) + return 0 + + +def StablehloSortOptionsStart(builder): + builder.StartObject(3) + + +def Start(builder): + StablehloSortOptionsStart(builder) + + +def StablehloSortOptionsAddDimension(builder, dimension): + builder.PrependInt64Slot(0, dimension, 0) + + +def AddDimension(builder, dimension): + StablehloSortOptionsAddDimension(builder, dimension) + + +def StablehloSortOptionsAddIsStable(builder, isStable): + builder.PrependBoolSlot(1, isStable, 0) + + +def AddIsStable(builder, isStable): + StablehloSortOptionsAddIsStable(builder, isStable) + + +def StablehloSortOptionsAddComparatorSubgraphIndex(builder, comparatorSubgraphIndex): + builder.PrependInt32Slot(2, comparatorSubgraphIndex, 0) + + +def AddComparatorSubgraphIndex(builder, comparatorSubgraphIndex): + StablehloSortOptionsAddComparatorSubgraphIndex(builder, comparatorSubgraphIndex) + + +def StablehloSortOptionsEnd(builder): + return builder.EndObject() + + +def End(builder): + return StablehloSortOptionsEnd(builder) diff --git a/backends/nxp/backend/ir/lib/tflite/StablehloTransposeOptions.py b/backends/nxp/backend/ir/lib/tflite/StablehloTransposeOptions.py new file mode 100755 index 00000000000..373c40e2f7b --- /dev/null +++ b/backends/nxp/backend/ir/lib/tflite/StablehloTransposeOptions.py @@ -0,0 +1,100 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy + +np = import_numpy() + + +class StablehloTransposeOptions(object): + __slots__ = ["_tab"] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = StablehloTransposeOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsStablehloTransposeOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + + @classmethod + def StablehloTransposeOptionsBufferHasIdentifier( + cls, buf, offset, size_prefixed=False + ): + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed + ) + + # StablehloTransposeOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # StablehloTransposeOptions + def Permutation(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + a = self._tab.Vector(o) + return self._tab.Get( + flatbuffers.number_types.Int64Flags, + a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 8), + ) + return 0 + + # StablehloTransposeOptions + def PermutationAsNumpy(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int64Flags, o) + return 0 + + # StablehloTransposeOptions + def PermutationLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # StablehloTransposeOptions + def PermutationIsNone(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + return o == 0 + + +def StablehloTransposeOptionsStart(builder): + builder.StartObject(1) + + +def Start(builder): + StablehloTransposeOptionsStart(builder) + + +def StablehloTransposeOptionsAddPermutation(builder, permutation): + builder.PrependUOffsetTRelativeSlot( + 0, flatbuffers.number_types.UOffsetTFlags.py_type(permutation), 0 + ) + + +def AddPermutation(builder, permutation): + StablehloTransposeOptionsAddPermutation(builder, permutation) + + +def StablehloTransposeOptionsStartPermutationVector(builder, numElems): + return builder.StartVector(8, numElems, 8) + + +def StartPermutationVector(builder, numElems: int) -> int: + return StablehloTransposeOptionsStartPermutationVector(builder, numElems) + + +def StablehloTransposeOptionsEnd(builder): + return builder.EndObject() + + +def End(builder): + return StablehloTransposeOptionsEnd(builder) diff --git a/backends/nxp/backend/ir/lib/tflite/StablehloWhileOptions.py b/backends/nxp/backend/ir/lib/tflite/StablehloWhileOptions.py new file mode 100755 index 00000000000..3fe68865e6d --- /dev/null +++ b/backends/nxp/backend/ir/lib/tflite/StablehloWhileOptions.py @@ -0,0 +1,80 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy + +np = import_numpy() + + +class StablehloWhileOptions(object): + __slots__ = ["_tab"] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = StablehloWhileOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsStablehloWhileOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + + @classmethod + def StablehloWhileOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed + ) + + # StablehloWhileOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # StablehloWhileOptions + def CondSubgraphIndex(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) + return 0 + + # StablehloWhileOptions + def BodySubgraphIndex(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) + return 0 + + +def StablehloWhileOptionsStart(builder): + builder.StartObject(2) + + +def Start(builder): + StablehloWhileOptionsStart(builder) + + +def StablehloWhileOptionsAddCondSubgraphIndex(builder, condSubgraphIndex): + builder.PrependInt32Slot(0, condSubgraphIndex, 0) + + +def AddCondSubgraphIndex(builder, condSubgraphIndex): + StablehloWhileOptionsAddCondSubgraphIndex(builder, condSubgraphIndex) + + +def StablehloWhileOptionsAddBodySubgraphIndex(builder, bodySubgraphIndex): + builder.PrependInt32Slot(1, bodySubgraphIndex, 0) + + +def AddBodySubgraphIndex(builder, bodySubgraphIndex): + StablehloWhileOptionsAddBodySubgraphIndex(builder, bodySubgraphIndex) + + +def StablehloWhileOptionsEnd(builder): + return builder.EndObject() + + +def End(builder): + return StablehloWhileOptionsEnd(builder) diff --git a/backends/nxp/backend/ir/lib/tflite/StridedSliceOptions.py b/backends/nxp/backend/ir/lib/tflite/StridedSliceOptions.py new file mode 100755 index 00000000000..0d9a84d5644 --- /dev/null +++ b/backends/nxp/backend/ir/lib/tflite/StridedSliceOptions.py @@ -0,0 +1,142 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy + +np = import_numpy() + + +class StridedSliceOptions(object): + __slots__ = ["_tab"] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = StridedSliceOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsStridedSliceOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + + @classmethod + def StridedSliceOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed + ) + + # StridedSliceOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # StridedSliceOptions + def BeginMask(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) + return 0 + + # StridedSliceOptions + def EndMask(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) + return 0 + + # StridedSliceOptions + def EllipsisMask(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) + return 0 + + # StridedSliceOptions + def NewAxisMask(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) + return 0 + + # StridedSliceOptions + def ShrinkAxisMask(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) + return 0 + + # StridedSliceOptions + def Offset(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14)) + if o != 0: + return bool( + self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos) + ) + return False + + +def StridedSliceOptionsStart(builder): + builder.StartObject(6) + + +def Start(builder): + StridedSliceOptionsStart(builder) + + +def StridedSliceOptionsAddBeginMask(builder, beginMask): + builder.PrependInt32Slot(0, beginMask, 0) + + +def AddBeginMask(builder, beginMask): + StridedSliceOptionsAddBeginMask(builder, beginMask) + + +def StridedSliceOptionsAddEndMask(builder, endMask): + builder.PrependInt32Slot(1, endMask, 0) + + +def AddEndMask(builder, endMask): + StridedSliceOptionsAddEndMask(builder, endMask) + + +def StridedSliceOptionsAddEllipsisMask(builder, ellipsisMask): + builder.PrependInt32Slot(2, ellipsisMask, 0) + + +def AddEllipsisMask(builder, ellipsisMask): + StridedSliceOptionsAddEllipsisMask(builder, ellipsisMask) + + +def StridedSliceOptionsAddNewAxisMask(builder, newAxisMask): + builder.PrependInt32Slot(3, newAxisMask, 0) + + +def AddNewAxisMask(builder, newAxisMask): + StridedSliceOptionsAddNewAxisMask(builder, newAxisMask) + + +def StridedSliceOptionsAddShrinkAxisMask(builder, shrinkAxisMask): + builder.PrependInt32Slot(4, shrinkAxisMask, 0) + + +def AddShrinkAxisMask(builder, shrinkAxisMask): + StridedSliceOptionsAddShrinkAxisMask(builder, shrinkAxisMask) + + +def StridedSliceOptionsAddOffset(builder, offset): + builder.PrependBoolSlot(5, offset, 0) + + +def AddOffset(builder, offset): + StridedSliceOptionsAddOffset(builder, offset) + + +def StridedSliceOptionsEnd(builder): + return builder.EndObject() + + +def End(builder): + return StridedSliceOptionsEnd(builder) diff --git a/backends/nxp/backend/ir/lib/tflite/SubGraph.py b/backends/nxp/backend/ir/lib/tflite/SubGraph.py new file mode 100755 index 00000000000..3bfc4fd69b2 --- /dev/null +++ b/backends/nxp/backend/ir/lib/tflite/SubGraph.py @@ -0,0 +1,251 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy + +np = import_numpy() + + +class SubGraph(object): + __slots__ = ["_tab"] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = SubGraph() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsSubGraph(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + + @classmethod + def SubGraphBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed + ) + + # SubGraph + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # SubGraph + def Tensors(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + x = self._tab.Vector(o) + x += flatbuffers.number_types.UOffsetTFlags.py_type(j) * 4 + x = self._tab.Indirect(x) + from .Tensor import Tensor + + obj = Tensor() + obj.Init(self._tab.Bytes, x) + return obj + return None + + # SubGraph + def TensorsLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # SubGraph + def TensorsIsNone(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + return o == 0 + + # SubGraph + def Inputs(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + a = self._tab.Vector(o) + return self._tab.Get( + flatbuffers.number_types.Int32Flags, + a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4), + ) + return 0 + + # SubGraph + def InputsAsNumpy(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int32Flags, o) + return 0 + + # SubGraph + def InputsLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # SubGraph + def InputsIsNone(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + return o == 0 + + # SubGraph + def Outputs(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + if o != 0: + a = self._tab.Vector(o) + return self._tab.Get( + flatbuffers.number_types.Int32Flags, + a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4), + ) + return 0 + + # SubGraph + def OutputsAsNumpy(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + if o != 0: + return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int32Flags, o) + return 0 + + # SubGraph + def OutputsLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # SubGraph + def OutputsIsNone(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + return o == 0 + + # SubGraph + def Operators(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10)) + if o != 0: + x = self._tab.Vector(o) + x += flatbuffers.number_types.UOffsetTFlags.py_type(j) * 4 + x = self._tab.Indirect(x) + from .Operator import Operator + + obj = Operator() + obj.Init(self._tab.Bytes, x) + return obj + return None + + # SubGraph + def OperatorsLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # SubGraph + def OperatorsIsNone(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10)) + return o == 0 + + # SubGraph + def Name(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12)) + if o != 0: + return self._tab.String(o + self._tab.Pos) + return None + + +def SubGraphStart(builder): + builder.StartObject(5) + + +def Start(builder): + SubGraphStart(builder) + + +def SubGraphAddTensors(builder, tensors): + builder.PrependUOffsetTRelativeSlot( + 0, flatbuffers.number_types.UOffsetTFlags.py_type(tensors), 0 + ) + + +def AddTensors(builder, tensors): + SubGraphAddTensors(builder, tensors) + + +def SubGraphStartTensorsVector(builder, numElems): + return builder.StartVector(4, numElems, 4) + + +def StartTensorsVector(builder, numElems: int) -> int: + return SubGraphStartTensorsVector(builder, numElems) + + +def SubGraphAddInputs(builder, inputs): + builder.PrependUOffsetTRelativeSlot( + 1, flatbuffers.number_types.UOffsetTFlags.py_type(inputs), 0 + ) + + +def AddInputs(builder, inputs): + SubGraphAddInputs(builder, inputs) + + +def SubGraphStartInputsVector(builder, numElems): + return builder.StartVector(4, numElems, 4) + + +def StartInputsVector(builder, numElems: int) -> int: + return SubGraphStartInputsVector(builder, numElems) + + +def SubGraphAddOutputs(builder, outputs): + builder.PrependUOffsetTRelativeSlot( + 2, flatbuffers.number_types.UOffsetTFlags.py_type(outputs), 0 + ) + + +def AddOutputs(builder, outputs): + SubGraphAddOutputs(builder, outputs) + + +def SubGraphStartOutputsVector(builder, numElems): + return builder.StartVector(4, numElems, 4) + + +def StartOutputsVector(builder, numElems: int) -> int: + return SubGraphStartOutputsVector(builder, numElems) + + +def SubGraphAddOperators(builder, operators): + builder.PrependUOffsetTRelativeSlot( + 3, flatbuffers.number_types.UOffsetTFlags.py_type(operators), 0 + ) + + +def AddOperators(builder, operators): + SubGraphAddOperators(builder, operators) + + +def SubGraphStartOperatorsVector(builder, numElems): + return builder.StartVector(4, numElems, 4) + + +def StartOperatorsVector(builder, numElems: int) -> int: + return SubGraphStartOperatorsVector(builder, numElems) + + +def SubGraphAddName(builder, name): + builder.PrependUOffsetTRelativeSlot( + 4, flatbuffers.number_types.UOffsetTFlags.py_type(name), 0 + ) + + +def AddName(builder, name): + SubGraphAddName(builder, name) + + +def SubGraphEnd(builder): + return builder.EndObject() + + +def End(builder): + return SubGraphEnd(builder) diff --git a/backends/nxp/backend/ir/lib/tflite/SubOptions.py b/backends/nxp/backend/ir/lib/tflite/SubOptions.py new file mode 100755 index 00000000000..35ce822f376 --- /dev/null +++ b/backends/nxp/backend/ir/lib/tflite/SubOptions.py @@ -0,0 +1,82 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy + +np = import_numpy() + + +class SubOptions(object): + __slots__ = ["_tab"] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = SubOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsSubOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + + @classmethod + def SubOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed + ) + + # SubOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # SubOptions + def FusedActivationFunction(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos) + return 0 + + # SubOptions + def PotScaleInt16(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + return bool( + self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos) + ) + return True + + +def SubOptionsStart(builder): + builder.StartObject(2) + + +def Start(builder): + SubOptionsStart(builder) + + +def SubOptionsAddFusedActivationFunction(builder, fusedActivationFunction): + builder.PrependInt8Slot(0, fusedActivationFunction, 0) + + +def AddFusedActivationFunction(builder, fusedActivationFunction): + SubOptionsAddFusedActivationFunction(builder, fusedActivationFunction) + + +def SubOptionsAddPotScaleInt16(builder, potScaleInt16): + builder.PrependBoolSlot(1, potScaleInt16, 1) + + +def AddPotScaleInt16(builder, potScaleInt16): + SubOptionsAddPotScaleInt16(builder, potScaleInt16) + + +def SubOptionsEnd(builder): + return builder.EndObject() + + +def End(builder): + return SubOptionsEnd(builder) diff --git a/backends/nxp/backend/ir/lib/tflite/Tensor.py b/backends/nxp/backend/ir/lib/tflite/Tensor.py new file mode 100755 index 00000000000..fdfcdfe5786 --- /dev/null +++ b/backends/nxp/backend/ir/lib/tflite/Tensor.py @@ -0,0 +1,317 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy + +np = import_numpy() + + +class Tensor(object): + __slots__ = ["_tab"] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = Tensor() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsTensor(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + + @classmethod + def TensorBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed + ) + + # Tensor + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # Tensor + def Shape(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + a = self._tab.Vector(o) + return self._tab.Get( + flatbuffers.number_types.Int32Flags, + a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4), + ) + return 0 + + # Tensor + def ShapeAsNumpy(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int32Flags, o) + return 0 + + # Tensor + def ShapeLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # Tensor + def ShapeIsNone(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + return o == 0 + + # Tensor + def Type(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos) + return 0 + + # Tensor + def Buffer(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + if o != 0: + return self._tab.Get( + flatbuffers.number_types.Uint32Flags, o + self._tab.Pos + ) + return 0 + + # Tensor + def Name(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10)) + if o != 0: + return self._tab.String(o + self._tab.Pos) + return None + + # Tensor + def Quantization(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12)) + if o != 0: + x = self._tab.Indirect(o + self._tab.Pos) + from .QuantizationParameters import QuantizationParameters + + obj = QuantizationParameters() + obj.Init(self._tab.Bytes, x) + return obj + return None + + # Tensor + def IsVariable(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14)) + if o != 0: + return bool( + self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos) + ) + return False + + # Tensor + def Sparsity(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(16)) + if o != 0: + x = self._tab.Indirect(o + self._tab.Pos) + from .SparsityParameters import SparsityParameters + + obj = SparsityParameters() + obj.Init(self._tab.Bytes, x) + return obj + return None + + # Tensor + def ShapeSignature(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(18)) + if o != 0: + a = self._tab.Vector(o) + return self._tab.Get( + flatbuffers.number_types.Int32Flags, + a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4), + ) + return 0 + + # Tensor + def ShapeSignatureAsNumpy(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(18)) + if o != 0: + return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int32Flags, o) + return 0 + + # Tensor + def ShapeSignatureLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(18)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # Tensor + def ShapeSignatureIsNone(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(18)) + return o == 0 + + # Tensor + def HasRank(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(20)) + if o != 0: + return bool( + self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos) + ) + return False + + # Tensor + def VariantTensors(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(22)) + if o != 0: + x = self._tab.Vector(o) + x += flatbuffers.number_types.UOffsetTFlags.py_type(j) * 4 + x = self._tab.Indirect(x) + from .VariantSubType import VariantSubType + + obj = VariantSubType() + obj.Init(self._tab.Bytes, x) + return obj + return None + + # Tensor + def VariantTensorsLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(22)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # Tensor + def VariantTensorsIsNone(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(22)) + return o == 0 + + +def TensorStart(builder): + builder.StartObject(10) + + +def Start(builder): + TensorStart(builder) + + +def TensorAddShape(builder, shape): + builder.PrependUOffsetTRelativeSlot( + 0, flatbuffers.number_types.UOffsetTFlags.py_type(shape), 0 + ) + + +def AddShape(builder, shape): + TensorAddShape(builder, shape) + + +def TensorStartShapeVector(builder, numElems): + return builder.StartVector(4, numElems, 4) + + +def StartShapeVector(builder, numElems: int) -> int: + return TensorStartShapeVector(builder, numElems) + + +def TensorAddType(builder, type): + builder.PrependInt8Slot(1, type, 0) + + +def AddType(builder, type): + TensorAddType(builder, type) + + +def TensorAddBuffer(builder, buffer): + builder.PrependUint32Slot(2, buffer, 0) + + +def AddBuffer(builder, buffer): + TensorAddBuffer(builder, buffer) + + +def TensorAddName(builder, name): + builder.PrependUOffsetTRelativeSlot( + 3, flatbuffers.number_types.UOffsetTFlags.py_type(name), 0 + ) + + +def AddName(builder, name): + TensorAddName(builder, name) + + +def TensorAddQuantization(builder, quantization): + builder.PrependUOffsetTRelativeSlot( + 4, flatbuffers.number_types.UOffsetTFlags.py_type(quantization), 0 + ) + + +def AddQuantization(builder, quantization): + TensorAddQuantization(builder, quantization) + + +def TensorAddIsVariable(builder, isVariable): + builder.PrependBoolSlot(5, isVariable, 0) + + +def AddIsVariable(builder, isVariable): + TensorAddIsVariable(builder, isVariable) + + +def TensorAddSparsity(builder, sparsity): + builder.PrependUOffsetTRelativeSlot( + 6, flatbuffers.number_types.UOffsetTFlags.py_type(sparsity), 0 + ) + + +def AddSparsity(builder, sparsity): + TensorAddSparsity(builder, sparsity) + + +def TensorAddShapeSignature(builder, shapeSignature): + builder.PrependUOffsetTRelativeSlot( + 7, flatbuffers.number_types.UOffsetTFlags.py_type(shapeSignature), 0 + ) + + +def AddShapeSignature(builder, shapeSignature): + TensorAddShapeSignature(builder, shapeSignature) + + +def TensorStartShapeSignatureVector(builder, numElems): + return builder.StartVector(4, numElems, 4) + + +def StartShapeSignatureVector(builder, numElems: int) -> int: + return TensorStartShapeSignatureVector(builder, numElems) + + +def TensorAddHasRank(builder, hasRank): + builder.PrependBoolSlot(8, hasRank, 0) + + +def AddHasRank(builder, hasRank): + TensorAddHasRank(builder, hasRank) + + +def TensorAddVariantTensors(builder, variantTensors): + builder.PrependUOffsetTRelativeSlot( + 9, flatbuffers.number_types.UOffsetTFlags.py_type(variantTensors), 0 + ) + + +def AddVariantTensors(builder, variantTensors): + TensorAddVariantTensors(builder, variantTensors) + + +def TensorStartVariantTensorsVector(builder, numElems): + return builder.StartVector(4, numElems, 4) + + +def StartVariantTensorsVector(builder, numElems: int) -> int: + return TensorStartVariantTensorsVector(builder, numElems) + + +def TensorEnd(builder): + return builder.EndObject() + + +def End(builder): + return TensorEnd(builder) diff --git a/backends/nxp/backend/ir/lib/tflite/TensorMap.py b/backends/nxp/backend/ir/lib/tflite/TensorMap.py new file mode 100755 index 00000000000..d133e462b50 --- /dev/null +++ b/backends/nxp/backend/ir/lib/tflite/TensorMap.py @@ -0,0 +1,84 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy + +np = import_numpy() + + +class TensorMap(object): + __slots__ = ["_tab"] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = TensorMap() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsTensorMap(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + + @classmethod + def TensorMapBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed + ) + + # TensorMap + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # TensorMap + def Name(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.String(o + self._tab.Pos) + return None + + # TensorMap + def TensorIndex(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + return self._tab.Get( + flatbuffers.number_types.Uint32Flags, o + self._tab.Pos + ) + return 0 + + +def TensorMapStart(builder): + builder.StartObject(2) + + +def Start(builder): + TensorMapStart(builder) + + +def TensorMapAddName(builder, name): + builder.PrependUOffsetTRelativeSlot( + 0, flatbuffers.number_types.UOffsetTFlags.py_type(name), 0 + ) + + +def AddName(builder, name): + TensorMapAddName(builder, name) + + +def TensorMapAddTensorIndex(builder, tensorIndex): + builder.PrependUint32Slot(1, tensorIndex, 0) + + +def AddTensorIndex(builder, tensorIndex): + TensorMapAddTensorIndex(builder, tensorIndex) + + +def TensorMapEnd(builder): + return builder.EndObject() + + +def End(builder): + return TensorMapEnd(builder) diff --git a/backends/nxp/backend/ir/lib/tflite/TensorType.py b/backends/nxp/backend/ir/lib/tflite/TensorType.py new file mode 100755 index 00000000000..95137102b6a --- /dev/null +++ b/backends/nxp/backend/ir/lib/tflite/TensorType.py @@ -0,0 +1,24 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + + +class TensorType(object): + FLOAT32 = 0 + FLOAT16 = 1 + INT32 = 2 + UINT8 = 3 + INT64 = 4 + STRING = 5 + BOOL = 6 + INT16 = 7 + COMPLEX64 = 8 + INT8 = 9 + FLOAT64 = 10 + COMPLEX128 = 11 + UINT64 = 12 + RESOURCE = 13 + VARIANT = 14 + UINT32 = 15 + UINT16 = 16 + INT4 = 17 diff --git a/backends/nxp/backend/ir/lib/tflite/TileOptions.py b/backends/nxp/backend/ir/lib/tflite/TileOptions.py new file mode 100755 index 00000000000..4ac1010b46a --- /dev/null +++ b/backends/nxp/backend/ir/lib/tflite/TileOptions.py @@ -0,0 +1,50 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy + +np = import_numpy() + + +class TileOptions(object): + __slots__ = ["_tab"] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = TileOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsTileOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + + @classmethod + def TileOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed + ) + + # TileOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + +def TileOptionsStart(builder): + builder.StartObject(0) + + +def Start(builder): + TileOptionsStart(builder) + + +def TileOptionsEnd(builder): + return builder.EndObject() + + +def End(builder): + return TileOptionsEnd(builder) diff --git a/backends/nxp/backend/ir/lib/tflite/TopKV2Options.py b/backends/nxp/backend/ir/lib/tflite/TopKV2Options.py new file mode 100755 index 00000000000..08b7c833d15 --- /dev/null +++ b/backends/nxp/backend/ir/lib/tflite/TopKV2Options.py @@ -0,0 +1,50 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy + +np = import_numpy() + + +class TopKV2Options(object): + __slots__ = ["_tab"] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = TopKV2Options() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsTopKV2Options(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + + @classmethod + def TopKV2OptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed + ) + + # TopKV2Options + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + +def TopKV2OptionsStart(builder): + builder.StartObject(0) + + +def Start(builder): + TopKV2OptionsStart(builder) + + +def TopKV2OptionsEnd(builder): + return builder.EndObject() + + +def End(builder): + return TopKV2OptionsEnd(builder) diff --git a/backends/nxp/backend/ir/lib/tflite/TransposeConvOptions.py b/backends/nxp/backend/ir/lib/tflite/TransposeConvOptions.py new file mode 100755 index 00000000000..836199a0912 --- /dev/null +++ b/backends/nxp/backend/ir/lib/tflite/TransposeConvOptions.py @@ -0,0 +1,125 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy + +np = import_numpy() + + +class TransposeConvOptions(object): + __slots__ = ["_tab"] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = TransposeConvOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsTransposeConvOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + + @classmethod + def TransposeConvOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed + ) + + # TransposeConvOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # TransposeConvOptions + def Padding(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos) + return 0 + + # TransposeConvOptions + def StrideW(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) + return 0 + + # TransposeConvOptions + def StrideH(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) + return 0 + + # TransposeConvOptions + def FusedActivationFunction(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos) + return 0 + + # TransposeConvOptions + def QuantizedBiasType(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos) + return 0 + + +def TransposeConvOptionsStart(builder): + builder.StartObject(5) + + +def Start(builder): + TransposeConvOptionsStart(builder) + + +def TransposeConvOptionsAddPadding(builder, padding): + builder.PrependInt8Slot(0, padding, 0) + + +def AddPadding(builder, padding): + TransposeConvOptionsAddPadding(builder, padding) + + +def TransposeConvOptionsAddStrideW(builder, strideW): + builder.PrependInt32Slot(1, strideW, 0) + + +def AddStrideW(builder, strideW): + TransposeConvOptionsAddStrideW(builder, strideW) + + +def TransposeConvOptionsAddStrideH(builder, strideH): + builder.PrependInt32Slot(2, strideH, 0) + + +def AddStrideH(builder, strideH): + TransposeConvOptionsAddStrideH(builder, strideH) + + +def TransposeConvOptionsAddFusedActivationFunction(builder, fusedActivationFunction): + builder.PrependInt8Slot(3, fusedActivationFunction, 0) + + +def AddFusedActivationFunction(builder, fusedActivationFunction): + TransposeConvOptionsAddFusedActivationFunction(builder, fusedActivationFunction) + + +def TransposeConvOptionsAddQuantizedBiasType(builder, quantizedBiasType): + builder.PrependInt8Slot(4, quantizedBiasType, 0) + + +def AddQuantizedBiasType(builder, quantizedBiasType): + TransposeConvOptionsAddQuantizedBiasType(builder, quantizedBiasType) + + +def TransposeConvOptionsEnd(builder): + return builder.EndObject() + + +def End(builder): + return TransposeConvOptionsEnd(builder) diff --git a/backends/nxp/backend/ir/lib/tflite/TransposeOptions.py b/backends/nxp/backend/ir/lib/tflite/TransposeOptions.py new file mode 100755 index 00000000000..eccb306fd88 --- /dev/null +++ b/backends/nxp/backend/ir/lib/tflite/TransposeOptions.py @@ -0,0 +1,50 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy + +np = import_numpy() + + +class TransposeOptions(object): + __slots__ = ["_tab"] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = TransposeOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsTransposeOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + + @classmethod + def TransposeOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed + ) + + # TransposeOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + +def TransposeOptionsStart(builder): + builder.StartObject(0) + + +def Start(builder): + TransposeOptionsStart(builder) + + +def TransposeOptionsEnd(builder): + return builder.EndObject() + + +def End(builder): + return TransposeOptionsEnd(builder) diff --git a/backends/nxp/backend/ir/lib/tflite/Uint16Vector.py b/backends/nxp/backend/ir/lib/tflite/Uint16Vector.py new file mode 100755 index 00000000000..0eb7940d32c --- /dev/null +++ b/backends/nxp/backend/ir/lib/tflite/Uint16Vector.py @@ -0,0 +1,98 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy + +np = import_numpy() + + +class Uint16Vector(object): + __slots__ = ["_tab"] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = Uint16Vector() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsUint16Vector(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + + @classmethod + def Uint16VectorBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed + ) + + # Uint16Vector + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # Uint16Vector + def Values(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + a = self._tab.Vector(o) + return self._tab.Get( + flatbuffers.number_types.Uint16Flags, + a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 2), + ) + return 0 + + # Uint16Vector + def ValuesAsNumpy(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Uint16Flags, o) + return 0 + + # Uint16Vector + def ValuesLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # Uint16Vector + def ValuesIsNone(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + return o == 0 + + +def Uint16VectorStart(builder): + builder.StartObject(1) + + +def Start(builder): + Uint16VectorStart(builder) + + +def Uint16VectorAddValues(builder, values): + builder.PrependUOffsetTRelativeSlot( + 0, flatbuffers.number_types.UOffsetTFlags.py_type(values), 0 + ) + + +def AddValues(builder, values): + Uint16VectorAddValues(builder, values) + + +def Uint16VectorStartValuesVector(builder, numElems): + return builder.StartVector(2, numElems, 2) + + +def StartValuesVector(builder, numElems: int) -> int: + return Uint16VectorStartValuesVector(builder, numElems) + + +def Uint16VectorEnd(builder): + return builder.EndObject() + + +def End(builder): + return Uint16VectorEnd(builder) diff --git a/backends/nxp/backend/ir/lib/tflite/Uint8Vector.py b/backends/nxp/backend/ir/lib/tflite/Uint8Vector.py new file mode 100755 index 00000000000..869807790c6 --- /dev/null +++ b/backends/nxp/backend/ir/lib/tflite/Uint8Vector.py @@ -0,0 +1,98 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy + +np = import_numpy() + + +class Uint8Vector(object): + __slots__ = ["_tab"] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = Uint8Vector() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsUint8Vector(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + + @classmethod + def Uint8VectorBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed + ) + + # Uint8Vector + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # Uint8Vector + def Values(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + a = self._tab.Vector(o) + return self._tab.Get( + flatbuffers.number_types.Uint8Flags, + a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 1), + ) + return 0 + + # Uint8Vector + def ValuesAsNumpy(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Uint8Flags, o) + return 0 + + # Uint8Vector + def ValuesLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # Uint8Vector + def ValuesIsNone(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + return o == 0 + + +def Uint8VectorStart(builder): + builder.StartObject(1) + + +def Start(builder): + Uint8VectorStart(builder) + + +def Uint8VectorAddValues(builder, values): + builder.PrependUOffsetTRelativeSlot( + 0, flatbuffers.number_types.UOffsetTFlags.py_type(values), 0 + ) + + +def AddValues(builder, values): + Uint8VectorAddValues(builder, values) + + +def Uint8VectorStartValuesVector(builder, numElems): + return builder.StartVector(1, numElems, 1) + + +def StartValuesVector(builder, numElems: int) -> int: + return Uint8VectorStartValuesVector(builder, numElems) + + +def Uint8VectorEnd(builder): + return builder.EndObject() + + +def End(builder): + return Uint8VectorEnd(builder) diff --git a/backends/nxp/backend/ir/lib/tflite/UnidirectionalSequenceLSTMOptions.py b/backends/nxp/backend/ir/lib/tflite/UnidirectionalSequenceLSTMOptions.py new file mode 100755 index 00000000000..d86806eaa76 --- /dev/null +++ b/backends/nxp/backend/ir/lib/tflite/UnidirectionalSequenceLSTMOptions.py @@ -0,0 +1,164 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy + +np = import_numpy() + + +class UnidirectionalSequenceLSTMOptions(object): + __slots__ = ["_tab"] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = UnidirectionalSequenceLSTMOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsUnidirectionalSequenceLSTMOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + + @classmethod + def UnidirectionalSequenceLSTMOptionsBufferHasIdentifier( + cls, buf, offset, size_prefixed=False + ): + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed + ) + + # UnidirectionalSequenceLSTMOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # UnidirectionalSequenceLSTMOptions + def FusedActivationFunction(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos) + return 0 + + # UnidirectionalSequenceLSTMOptions + def CellClip(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + return self._tab.Get( + flatbuffers.number_types.Float32Flags, o + self._tab.Pos + ) + return 0.0 + + # UnidirectionalSequenceLSTMOptions + def ProjClip(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + if o != 0: + return self._tab.Get( + flatbuffers.number_types.Float32Flags, o + self._tab.Pos + ) + return 0.0 + + # UnidirectionalSequenceLSTMOptions + def TimeMajor(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10)) + if o != 0: + return bool( + self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos) + ) + return False + + # UnidirectionalSequenceLSTMOptions + def AsymmetricQuantizeInputs(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12)) + if o != 0: + return bool( + self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos) + ) + return False + + # UnidirectionalSequenceLSTMOptions + def DiagonalRecurrentTensors(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14)) + if o != 0: + return bool( + self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos) + ) + return False + + +def UnidirectionalSequenceLSTMOptionsStart(builder): + builder.StartObject(6) + + +def Start(builder): + UnidirectionalSequenceLSTMOptionsStart(builder) + + +def UnidirectionalSequenceLSTMOptionsAddFusedActivationFunction( + builder, fusedActivationFunction +): + builder.PrependInt8Slot(0, fusedActivationFunction, 0) + + +def AddFusedActivationFunction(builder, fusedActivationFunction): + UnidirectionalSequenceLSTMOptionsAddFusedActivationFunction( + builder, fusedActivationFunction + ) + + +def UnidirectionalSequenceLSTMOptionsAddCellClip(builder, cellClip): + builder.PrependFloat32Slot(1, cellClip, 0.0) + + +def AddCellClip(builder, cellClip): + UnidirectionalSequenceLSTMOptionsAddCellClip(builder, cellClip) + + +def UnidirectionalSequenceLSTMOptionsAddProjClip(builder, projClip): + builder.PrependFloat32Slot(2, projClip, 0.0) + + +def AddProjClip(builder, projClip): + UnidirectionalSequenceLSTMOptionsAddProjClip(builder, projClip) + + +def UnidirectionalSequenceLSTMOptionsAddTimeMajor(builder, timeMajor): + builder.PrependBoolSlot(3, timeMajor, 0) + + +def AddTimeMajor(builder, timeMajor): + UnidirectionalSequenceLSTMOptionsAddTimeMajor(builder, timeMajor) + + +def UnidirectionalSequenceLSTMOptionsAddAsymmetricQuantizeInputs( + builder, asymmetricQuantizeInputs +): + builder.PrependBoolSlot(4, asymmetricQuantizeInputs, 0) + + +def AddAsymmetricQuantizeInputs(builder, asymmetricQuantizeInputs): + UnidirectionalSequenceLSTMOptionsAddAsymmetricQuantizeInputs( + builder, asymmetricQuantizeInputs + ) + + +def UnidirectionalSequenceLSTMOptionsAddDiagonalRecurrentTensors( + builder, diagonalRecurrentTensors +): + builder.PrependBoolSlot(5, diagonalRecurrentTensors, 0) + + +def AddDiagonalRecurrentTensors(builder, diagonalRecurrentTensors): + UnidirectionalSequenceLSTMOptionsAddDiagonalRecurrentTensors( + builder, diagonalRecurrentTensors + ) + + +def UnidirectionalSequenceLSTMOptionsEnd(builder): + return builder.EndObject() + + +def End(builder): + return UnidirectionalSequenceLSTMOptionsEnd(builder) diff --git a/backends/nxp/backend/ir/lib/tflite/UniqueOptions.py b/backends/nxp/backend/ir/lib/tflite/UniqueOptions.py new file mode 100755 index 00000000000..4e9f1a51088 --- /dev/null +++ b/backends/nxp/backend/ir/lib/tflite/UniqueOptions.py @@ -0,0 +1,65 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy + +np = import_numpy() + + +class UniqueOptions(object): + __slots__ = ["_tab"] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = UniqueOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsUniqueOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + + @classmethod + def UniqueOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed + ) + + # UniqueOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # UniqueOptions + def IdxOutType(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos) + return 2 + + +def UniqueOptionsStart(builder): + builder.StartObject(1) + + +def Start(builder): + UniqueOptionsStart(builder) + + +def UniqueOptionsAddIdxOutType(builder, idxOutType): + builder.PrependInt8Slot(0, idxOutType, 2) + + +def AddIdxOutType(builder, idxOutType): + UniqueOptionsAddIdxOutType(builder, idxOutType) + + +def UniqueOptionsEnd(builder): + return builder.EndObject() + + +def End(builder): + return UniqueOptionsEnd(builder) diff --git a/backends/nxp/backend/ir/lib/tflite/UnpackOptions.py b/backends/nxp/backend/ir/lib/tflite/UnpackOptions.py new file mode 100755 index 00000000000..7378e4bc8ab --- /dev/null +++ b/backends/nxp/backend/ir/lib/tflite/UnpackOptions.py @@ -0,0 +1,80 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy + +np = import_numpy() + + +class UnpackOptions(object): + __slots__ = ["_tab"] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = UnpackOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsUnpackOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + + @classmethod + def UnpackOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed + ) + + # UnpackOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # UnpackOptions + def Num(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) + return 0 + + # UnpackOptions + def Axis(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) + return 0 + + +def UnpackOptionsStart(builder): + builder.StartObject(2) + + +def Start(builder): + UnpackOptionsStart(builder) + + +def UnpackOptionsAddNum(builder, num): + builder.PrependInt32Slot(0, num, 0) + + +def AddNum(builder, num): + UnpackOptionsAddNum(builder, num) + + +def UnpackOptionsAddAxis(builder, axis): + builder.PrependInt32Slot(1, axis, 0) + + +def AddAxis(builder, axis): + UnpackOptionsAddAxis(builder, axis) + + +def UnpackOptionsEnd(builder): + return builder.EndObject() + + +def End(builder): + return UnpackOptionsEnd(builder) diff --git a/backends/nxp/backend/ir/lib/tflite/UnsortedSegmentMaxOptions.py b/backends/nxp/backend/ir/lib/tflite/UnsortedSegmentMaxOptions.py new file mode 100755 index 00000000000..cf0b2d50952 --- /dev/null +++ b/backends/nxp/backend/ir/lib/tflite/UnsortedSegmentMaxOptions.py @@ -0,0 +1,52 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy + +np = import_numpy() + + +class UnsortedSegmentMaxOptions(object): + __slots__ = ["_tab"] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = UnsortedSegmentMaxOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsUnsortedSegmentMaxOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + + @classmethod + def UnsortedSegmentMaxOptionsBufferHasIdentifier( + cls, buf, offset, size_prefixed=False + ): + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed + ) + + # UnsortedSegmentMaxOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + +def UnsortedSegmentMaxOptionsStart(builder): + builder.StartObject(0) + + +def Start(builder): + UnsortedSegmentMaxOptionsStart(builder) + + +def UnsortedSegmentMaxOptionsEnd(builder): + return builder.EndObject() + + +def End(builder): + return UnsortedSegmentMaxOptionsEnd(builder) diff --git a/backends/nxp/backend/ir/lib/tflite/UnsortedSegmentMinOptions.py b/backends/nxp/backend/ir/lib/tflite/UnsortedSegmentMinOptions.py new file mode 100755 index 00000000000..88906797bc8 --- /dev/null +++ b/backends/nxp/backend/ir/lib/tflite/UnsortedSegmentMinOptions.py @@ -0,0 +1,52 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy + +np = import_numpy() + + +class UnsortedSegmentMinOptions(object): + __slots__ = ["_tab"] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = UnsortedSegmentMinOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsUnsortedSegmentMinOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + + @classmethod + def UnsortedSegmentMinOptionsBufferHasIdentifier( + cls, buf, offset, size_prefixed=False + ): + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed + ) + + # UnsortedSegmentMinOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + +def UnsortedSegmentMinOptionsStart(builder): + builder.StartObject(0) + + +def Start(builder): + UnsortedSegmentMinOptionsStart(builder) + + +def UnsortedSegmentMinOptionsEnd(builder): + return builder.EndObject() + + +def End(builder): + return UnsortedSegmentMinOptionsEnd(builder) diff --git a/backends/nxp/backend/ir/lib/tflite/UnsortedSegmentProdOptions.py b/backends/nxp/backend/ir/lib/tflite/UnsortedSegmentProdOptions.py new file mode 100755 index 00000000000..c15dc9533bc --- /dev/null +++ b/backends/nxp/backend/ir/lib/tflite/UnsortedSegmentProdOptions.py @@ -0,0 +1,52 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy + +np = import_numpy() + + +class UnsortedSegmentProdOptions(object): + __slots__ = ["_tab"] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = UnsortedSegmentProdOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsUnsortedSegmentProdOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + + @classmethod + def UnsortedSegmentProdOptionsBufferHasIdentifier( + cls, buf, offset, size_prefixed=False + ): + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed + ) + + # UnsortedSegmentProdOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + +def UnsortedSegmentProdOptionsStart(builder): + builder.StartObject(0) + + +def Start(builder): + UnsortedSegmentProdOptionsStart(builder) + + +def UnsortedSegmentProdOptionsEnd(builder): + return builder.EndObject() + + +def End(builder): + return UnsortedSegmentProdOptionsEnd(builder) diff --git a/backends/nxp/backend/ir/lib/tflite/UnsortedSegmentSumOptions.py b/backends/nxp/backend/ir/lib/tflite/UnsortedSegmentSumOptions.py new file mode 100755 index 00000000000..f7394e99089 --- /dev/null +++ b/backends/nxp/backend/ir/lib/tflite/UnsortedSegmentSumOptions.py @@ -0,0 +1,52 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy + +np = import_numpy() + + +class UnsortedSegmentSumOptions(object): + __slots__ = ["_tab"] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = UnsortedSegmentSumOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsUnsortedSegmentSumOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + + @classmethod + def UnsortedSegmentSumOptionsBufferHasIdentifier( + cls, buf, offset, size_prefixed=False + ): + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed + ) + + # UnsortedSegmentSumOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + +def UnsortedSegmentSumOptionsStart(builder): + builder.StartObject(0) + + +def Start(builder): + UnsortedSegmentSumOptionsStart(builder) + + +def UnsortedSegmentSumOptionsEnd(builder): + return builder.EndObject() + + +def End(builder): + return UnsortedSegmentSumOptionsEnd(builder) diff --git a/backends/nxp/backend/ir/lib/tflite/VarHandleOptions.py b/backends/nxp/backend/ir/lib/tflite/VarHandleOptions.py new file mode 100755 index 00000000000..0869c9c9055 --- /dev/null +++ b/backends/nxp/backend/ir/lib/tflite/VarHandleOptions.py @@ -0,0 +1,84 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy + +np = import_numpy() + + +class VarHandleOptions(object): + __slots__ = ["_tab"] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = VarHandleOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsVarHandleOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + + @classmethod + def VarHandleOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed + ) + + # VarHandleOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # VarHandleOptions + def Container(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.String(o + self._tab.Pos) + return None + + # VarHandleOptions + def SharedName(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + return self._tab.String(o + self._tab.Pos) + return None + + +def VarHandleOptionsStart(builder): + builder.StartObject(2) + + +def Start(builder): + VarHandleOptionsStart(builder) + + +def VarHandleOptionsAddContainer(builder, container): + builder.PrependUOffsetTRelativeSlot( + 0, flatbuffers.number_types.UOffsetTFlags.py_type(container), 0 + ) + + +def AddContainer(builder, container): + VarHandleOptionsAddContainer(builder, container) + + +def VarHandleOptionsAddSharedName(builder, sharedName): + builder.PrependUOffsetTRelativeSlot( + 1, flatbuffers.number_types.UOffsetTFlags.py_type(sharedName), 0 + ) + + +def AddSharedName(builder, sharedName): + VarHandleOptionsAddSharedName(builder, sharedName) + + +def VarHandleOptionsEnd(builder): + return builder.EndObject() + + +def End(builder): + return VarHandleOptionsEnd(builder) diff --git a/backends/nxp/backend/ir/lib/tflite/VariantSubType.py b/backends/nxp/backend/ir/lib/tflite/VariantSubType.py new file mode 100755 index 00000000000..aefb3955692 --- /dev/null +++ b/backends/nxp/backend/ir/lib/tflite/VariantSubType.py @@ -0,0 +1,130 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy + +np = import_numpy() + + +class VariantSubType(object): + __slots__ = ["_tab"] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = VariantSubType() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsVariantSubType(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + + @classmethod + def VariantSubTypeBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed + ) + + # VariantSubType + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # VariantSubType + def Shape(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + a = self._tab.Vector(o) + return self._tab.Get( + flatbuffers.number_types.Int32Flags, + a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4), + ) + return 0 + + # VariantSubType + def ShapeAsNumpy(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int32Flags, o) + return 0 + + # VariantSubType + def ShapeLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # VariantSubType + def ShapeIsNone(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + return o == 0 + + # VariantSubType + def Type(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos) + return 0 + + # VariantSubType + def HasRank(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + if o != 0: + return bool( + self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos) + ) + return False + + +def VariantSubTypeStart(builder): + builder.StartObject(3) + + +def Start(builder): + VariantSubTypeStart(builder) + + +def VariantSubTypeAddShape(builder, shape): + builder.PrependUOffsetTRelativeSlot( + 0, flatbuffers.number_types.UOffsetTFlags.py_type(shape), 0 + ) + + +def AddShape(builder, shape): + VariantSubTypeAddShape(builder, shape) + + +def VariantSubTypeStartShapeVector(builder, numElems): + return builder.StartVector(4, numElems, 4) + + +def StartShapeVector(builder, numElems: int) -> int: + return VariantSubTypeStartShapeVector(builder, numElems) + + +def VariantSubTypeAddType(builder, type): + builder.PrependInt8Slot(1, type, 0) + + +def AddType(builder, type): + VariantSubTypeAddType(builder, type) + + +def VariantSubTypeAddHasRank(builder, hasRank): + builder.PrependBoolSlot(2, hasRank, 0) + + +def AddHasRank(builder, hasRank): + VariantSubTypeAddHasRank(builder, hasRank) + + +def VariantSubTypeEnd(builder): + return builder.EndObject() + + +def End(builder): + return VariantSubTypeEnd(builder) diff --git a/backends/nxp/backend/ir/lib/tflite/WhereOptions.py b/backends/nxp/backend/ir/lib/tflite/WhereOptions.py new file mode 100755 index 00000000000..8d731f3a3fa --- /dev/null +++ b/backends/nxp/backend/ir/lib/tflite/WhereOptions.py @@ -0,0 +1,50 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy + +np = import_numpy() + + +class WhereOptions(object): + __slots__ = ["_tab"] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = WhereOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsWhereOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + + @classmethod + def WhereOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed + ) + + # WhereOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + +def WhereOptionsStart(builder): + builder.StartObject(0) + + +def Start(builder): + WhereOptionsStart(builder) + + +def WhereOptionsEnd(builder): + return builder.EndObject() + + +def End(builder): + return WhereOptionsEnd(builder) diff --git a/backends/nxp/backend/ir/lib/tflite/WhileOptions.py b/backends/nxp/backend/ir/lib/tflite/WhileOptions.py new file mode 100755 index 00000000000..21fb3072629 --- /dev/null +++ b/backends/nxp/backend/ir/lib/tflite/WhileOptions.py @@ -0,0 +1,80 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy + +np = import_numpy() + + +class WhileOptions(object): + __slots__ = ["_tab"] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = WhileOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsWhileOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + + @classmethod + def WhileOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed + ) + + # WhileOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # WhileOptions + def CondSubgraphIndex(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) + return 0 + + # WhileOptions + def BodySubgraphIndex(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) + return 0 + + +def WhileOptionsStart(builder): + builder.StartObject(2) + + +def Start(builder): + WhileOptionsStart(builder) + + +def WhileOptionsAddCondSubgraphIndex(builder, condSubgraphIndex): + builder.PrependInt32Slot(0, condSubgraphIndex, 0) + + +def AddCondSubgraphIndex(builder, condSubgraphIndex): + WhileOptionsAddCondSubgraphIndex(builder, condSubgraphIndex) + + +def WhileOptionsAddBodySubgraphIndex(builder, bodySubgraphIndex): + builder.PrependInt32Slot(1, bodySubgraphIndex, 0) + + +def AddBodySubgraphIndex(builder, bodySubgraphIndex): + WhileOptionsAddBodySubgraphIndex(builder, bodySubgraphIndex) + + +def WhileOptionsEnd(builder): + return builder.EndObject() + + +def End(builder): + return WhileOptionsEnd(builder) diff --git a/backends/nxp/backend/ir/lib/tflite/ZerosLikeOptions.py b/backends/nxp/backend/ir/lib/tflite/ZerosLikeOptions.py new file mode 100755 index 00000000000..4ddd6318a15 --- /dev/null +++ b/backends/nxp/backend/ir/lib/tflite/ZerosLikeOptions.py @@ -0,0 +1,50 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy + +np = import_numpy() + + +class ZerosLikeOptions(object): + __slots__ = ["_tab"] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = ZerosLikeOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsZerosLikeOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + + @classmethod + def ZerosLikeOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed + ) + + # ZerosLikeOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + +def ZerosLikeOptionsStart(builder): + builder.StartObject(0) + + +def Start(builder): + ZerosLikeOptionsStart(builder) + + +def ZerosLikeOptionsEnd(builder): + return builder.EndObject() + + +def End(builder): + return ZerosLikeOptionsEnd(builder) diff --git a/backends/nxp/backend/ir/lib/tflite/__init__.py b/backends/nxp/backend/ir/lib/tflite/__init__.py new file mode 100755 index 00000000000..e69de29bb2d diff --git a/backends/nxp/backend/ir/logger.py b/backends/nxp/backend/ir/logger.py new file mode 100644 index 00000000000..ce8da2a31df --- /dev/null +++ b/backends/nxp/backend/ir/logger.py @@ -0,0 +1,343 @@ +# +# Copyright 2023 Martin Pavella +# Copyright 2023 NXP +# +# License: MIT +# See the LICENSE_MIT for more details. +# +""" + logger + +Module implements functions for logging, error messages and custom assertions. +""" + +import sys +from collections import defaultdict +from enum import Enum +from typing import NoReturn, Optional + + +class Style: + """Strings used to set a color and other styles to the output printed to console. + + example usage: + logger.w(f'{logger.Style.orange + logger.Style.bold}Some warning. {logger.Style.end}Additional info.') + + """ + + red = "\033[91m" + green = "\033[92m" + orange = "\033[93m" + blue = "\033[94m" + magenta = "\033[95m" + cyan = "\033[96m" + + bold = "\033[1m" + underline = "\033[4m" + + end = "\033[0m" + + +class MessageImportance(Enum): + """Importance levels of messages to print.""" + + DEBUG = 0 + INFO = 1 + WARNING = 2 + ERROR = 3 + + +MIN_OUTPUT_IMPORTANCE = MessageImportance.WARNING + + +class Message: + """Custom messages, that are printed to console from different locations in the code.""" + + ALLOW_SELECT_OPS = ( + "If you want to convert the model using the SELECT_TF_OPS, run the conversion again with " + f"the flag {Style.bold + Style.cyan}--allow-select-ops{Style.end}." + ) + + GUARANTEE_NON_NEGATIVE_INDICES = ( + f"{Style.green}If you know that the indices are always non-negative, you can run" + f" the converter with the flag {Style.bold + Style.cyan}--non-negative-indices" + f"{Style.end}." + ) + + CAST_INT64_TO_INT32 = ( + f"Use option {Style.bold + Style.cyan}--cast-int64-to-int32{Style.end} to disable this " + "check and re-cast input/output to INT32." + ) + + IGNORE_OPSET_VERSION = ( + "If you want to try and convert the model anyway, run the conversion again with the flag " + f"{Style.bold + Style.cyan}--ignore-opset-version{Style.end}. Keep in mind that the output" + " TFLite model may potentially be invalid." + ) + + +class Code(Enum): + """Error codes""" + + INTERNAL_ERROR = 1 + GENERATED_MODEL_INVALID = 2 + INVALID_OPTIMIZATION = 3 + PREPROCESSING_ERROR = 4 + + UNSUPPORTED_OPERATOR = 21 + UNSUPPORTED_ONNX_TYPE = 22 + UNSUPPORTED_OPERATOR_ATTRIBUTES = 23 + NOT_IMPLEMENTED = 24 + + INVALID_TYPE = 31 + INVALID_TENSOR_SHAPE = 32 + INVALID_ONNX_OPERATOR = 33 + INVALID_ONNX_OPERATOR_ATTRIBUTE = 34 + INVALID_ONNX_MODEL = 35 + + CONVERSION_IMPOSSIBLE = 41 + SHAPE_INFERENCE_ERROR = 42 + IO_PRESERVATION_ERROR = 43 + + INVALID_INPUT = 51 + + UNSUPPORTED_NODE = 61 + + +class Error(Exception): + + def __init__(self, err_code: Code, msg, exception: Optional[Exception] = None): + self.error_code = err_code + self.msg = msg + self.exception = exception + + def __str__(self): + output = f"[{self.error_code}] - {self.msg}" + if self.exception is not None: + output += f" - (Parent exception: {self.exception})" + + return output + + +class LoggingContext: + """ + Context that represents part of an application to which current logs belong to. Contexts are meant + to be nested from most general (global) to most specific (node context etc.). Use context manager + 'logger.loggingContext()' to enable specific context. + """ + + def __init__(self, context_name): + self.context_name = context_name + + def __str__(self) -> str: + return self.context_name + + def __repr__(self) -> str: + return self.context_name + + +class BasicLoggingContext(LoggingContext): + """ + Basic logging contexts specified by its name. + """ + + GLOBAL = LoggingContext("global") + SHAPE_INFERENCE = LoggingContext("shape_inference") + ONNX_PARSER = LoggingContext("onnx_parser") + OPERATOR_CONVERSION = LoggingContext("operator_conversion") + TFLITE_GENERATOR = LoggingContext("tflite_generator") + QDQ_QUANTIZER = LoggingContext("qdq_quantizer") + + +class NodeLoggingContext(LoggingContext): + """ + ONNX node specific context. Logs reported within this context are related to node with index 'node_id'. + """ + + def __init__(self, node_id): + self.node_id = node_id + super().__init__(f"node_{node_id}") + + +class ConversionLog: + """ + Record logs sent within some logging context. Log might belong to multiple contexts. Single log + event are present with: message, logging context hierarchy, importance (logger.MessageImportance) and + optional error code (logger.Code). Logs added outside any context are ignored. + """ + + _current_logging_context = [] + _log = defaultdict(list) + _log_count = 0 + + def append_context(self, loggingContext: LoggingContext): + if len(self._current_logging_context) == 0: + self._log = defaultdict(list) + self._log_count = 0 + + self._current_logging_context.append(loggingContext.context_name) + + def pop_last_context(self): + self._current_logging_context.pop() + + def reset(self): + self._log = defaultdict(list) + self._current_logging_context = [] + self._log_count = 0 + + def add_log( + self, + importance: MessageImportance, + message: str, + error_code: Code | None = None, + ): + data = { + "message": message, + "logging_context_hierarchy": list(self._current_logging_context), + "importance": importance.value, + "message_id": self._log_count, + } + + if error_code is not None: + data["error_code"] = error_code + + if len(self._current_logging_context) != 0: + self._log[self._current_logging_context[-1]].append(data) + self._log_count += 1 + + def get_logs(self) -> dict: + return self._log + + def _get_node_error(self, node_id: int, dict_item: str) -> Code | str | None: + """ + Return first error log item that belong to node with id 'node_id'. If no error is present + None is returned instead. + + :param node_id: ONNX node id. + :param dict_item: Dictionary item to return from `log` + :return: Error code or None if there's no error related to node. + """ + + node_logs = self._log[f"node_{node_id}"] + for log in node_logs: + if log["importance"] == MessageImportance.ERROR.value: + return log[dict_item] + + return None + + def get_node_error_code(self, node_id: int) -> Code | None: + """ + Return first error code that belong to node with id 'node_id'. If no error is present + None is returned instead. + + :param node_id: ONNX node id. + :return: Error code or None if there's no error related to node. + """ + + return self._get_node_error(node_id, "error_code") + + def get_node_error_message(self, node_id: int) -> str | None: + """ + Return first error message that belong to node with id 'node_id'. If no error is present + None is returned instead. + + :param node_id: ONNX node id + :return: Error message or None if there is no error related to node. + """ + + return self._get_node_error(node_id, "message") + + +conversion_log = ConversionLog() + + +class loggingContext: + """ + Context manager used to nest logging contexts. Usage: + + with loggingContext(BasicLoggingContext.GLOBAL): + with loggingContext(BasicLoggingContext.ONNX_PARSER): + logger.i("My log") # this log is automatically assigned to both parent contexts + + """ + + def __init__(self, logging_context: LoggingContext): + self.logging_context = logging_context + + def __enter__(self): + conversion_log.append_context(self.logging_context) + + def __exit__(self, _, __, ___): + conversion_log.pop_last_context() + + +def d(msg: str): + """Log internal debug message with given parameters.""" + + if MIN_OUTPUT_IMPORTANCE.value > MessageImportance.DEBUG.value: + return + + print("DEBUG: ", msg) + conversion_log.add_log(MessageImportance.DEBUG, msg) + + +def i(msg: str): + """Log info message with given parameters.""" + + if MIN_OUTPUT_IMPORTANCE.value > MessageImportance.INFO.value: + return + + print("INFO: ", msg) + conversion_log.add_log(MessageImportance.INFO, msg) + + +def w(msg: str): + """Log warning message with given parameters.""" + + if MIN_OUTPUT_IMPORTANCE.value > MessageImportance.WARNING.value: + return + + print("WARNING: ", msg) + conversion_log.add_log(MessageImportance.WARNING, msg) + + +def e(err_code: Code, msg: str, exception: Optional[Exception] = None) -> NoReturn: + """Print and raise exception with error message composed of provided error code, messages and optional exception. + :param err_code: Error code. + :param msg: Error message. + :param exception: (Optional) Exception object to print before the program exits. + """ + + error = Error(err_code, msg, exception) + conversion_log.add_log(MessageImportance.ERROR, str(error), error_code=err_code) + print("ERROR: ", str(error), file=sys.stderr) + + raise error + + +def expect_type(obj, expected_type, msg: str = ""): + if type(obj) is not expected_type: + w( + msg + + f":Object '{obj}' is of type '{type(obj)}' where '{expected_type}' was expected!" + ) + + +def require_type(obj, required_type, msg: str = ""): + if type(obj) is not required_type: + e( + Code.INVALID_TYPE, + msg + + f":Object '{obj}' is of type '{type(obj)}' where '{required_type}' was required!", + ) + + +def internal_assert(truth_value: bool, msg: str = ""): + """Assert that the 'truth_value' is True. If not, raise a logger INTERNAL_ERROR with message 'msg'. + + :param truth_value: Boolean to check. + :param msg: Message to raise the Error with. + """ + + if not truth_value: + e(Code.INTERNAL_ERROR, msg) diff --git a/backends/nxp/backend/ir/tensor_formatting.py b/backends/nxp/backend/ir/tensor_formatting.py new file mode 100644 index 00000000000..aab22c3c368 --- /dev/null +++ b/backends/nxp/backend/ir/tensor_formatting.py @@ -0,0 +1,55 @@ +# +# Copyright 2023 Martin Pavella +# Copyright 2023-2024 NXP +# +# License: MIT +# See the LICENSE_MIT for more details. +# +from enum import Enum + +from executorch.backends.nxp.backend.node_format_inference import NodeFormat + + +class TensorFormat(Enum): + CHANNELS_FIRST = 0 + + CHANNELS_LAST = 10 + + # The format of TFLite Conv3D weights tensor: [output_channels, input_channels, D, H, W] + CONV_3D_WEIGHT_FORMAT = 11 + + # Intermediate format between 'Transpose' and 'Reshape' ops when single dimension with value 1 + # is added/removed via reshaping + RESHAPE_SINGLE_UNITARY_TRANSPOSITION = 12 + + # The format of TFLite TransposeConv 2D weights tensor: [M/group, kH, kW, C] + TRANSPOSE_CONV_2D_WEIGHT_FORMAT = 13 + + # No special format (matrices, vectors, shapes etc.). All tensors with the FORMATLESS format MUST have EXACTLY + # the same shape and data in the TFLite model and in the ONNX model. + FORMATLESS = 20 + + NONE = 30 # Format has not been identified + + def is_channels_first(self) -> bool: + return self == TensorFormat.CHANNELS_FIRST + + def is_channels_last(self) -> bool: + return self == TensorFormat.CHANNELS_LAST + + @staticmethod + def from_node_format(node_format: NodeFormat): + if node_format.is_channels_first(): + return TensorFormat.CHANNELS_LAST + elif node_format == NodeFormat.FORMATLESS: + return TensorFormat.FORMATLESS + else: + return TensorFormat.NONE + + def to_node_format(self): + if self == TensorFormat.CHANNELS_LAST: + return NodeFormat.CHANNELS_FIRST + elif self == TensorFormat.FORMATLESS: + return NodeFormat.FORMATLESS + else: + return NodeFormat.NONE diff --git a/backends/nxp/backend/ir/tflite_generator/__init__.py b/backends/nxp/backend/ir/tflite_generator/__init__.py new file mode 100755 index 00000000000..e69de29bb2d diff --git a/backends/nxp/backend/ir/tflite_generator/builtin_options/__init__.py b/backends/nxp/backend/ir/tflite_generator/builtin_options/__init__.py new file mode 100755 index 00000000000..e69de29bb2d diff --git a/backends/nxp/backend/ir/tflite_generator/builtin_options/abs_options.py b/backends/nxp/backend/ir/tflite_generator/builtin_options/abs_options.py new file mode 100755 index 00000000000..68bc1ec6dce --- /dev/null +++ b/backends/nxp/backend/ir/tflite_generator/builtin_options/abs_options.py @@ -0,0 +1,25 @@ +# Copyright 2024 NXP +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +import flatbuffers as fb + +from executorch.backends.nxp.backend.ir.lib.tflite import AbsOptions + +from executorch.backends.nxp.backend.ir.lib.tflite.BuiltinOperator import ( + BuiltinOperator, +) +from executorch.backends.nxp.backend.ir.lib.tflite.BuiltinOptions import BuiltinOptions +from executorch.backends.nxp.backend.ir.tflite_generator.meta import meta + + +class Abs(meta.BuiltinOptions): + + def __init__(self) -> None: + super().__init__(BuiltinOptions.AbsOptions, BuiltinOperator.ABS) + + def gen_tflite(self, builder: fb.Builder): + AbsOptions.Start(builder) + + return AbsOptions.End(builder) diff --git a/backends/nxp/backend/ir/tflite_generator/builtin_options/add_n_options.py b/backends/nxp/backend/ir/tflite_generator/builtin_options/add_n_options.py new file mode 100755 index 00000000000..2646f326852 --- /dev/null +++ b/backends/nxp/backend/ir/tflite_generator/builtin_options/add_n_options.py @@ -0,0 +1,31 @@ +# +# Copyright 2023 Martin Pavella +# +# License: MIT +# See the LICENSE_MIT for more details. +# +""" + AddN + +Representation of the TFLite operator 'AddN'. +""" + +import executorch.backends.nxp.backend.ir.tflite_generator.meta.meta as meta +import flatbuffers as fb +from executorch.backends.nxp.backend.ir.lib.tflite import ( + AddNOptions as libAddNOptions, + BuiltinOperator as libBuiltinOperator, + BuiltinOptions as libBuiltinOptions, +) + + +class AddN(meta.BuiltinOptions): + def __init__(self) -> None: + super().__init__( + libBuiltinOptions.BuiltinOptions.AddNOptions, + libBuiltinOperator.BuiltinOperator.ADD_N, + ) + + def gen_tflite(self, builder: fb.Builder): + libAddNOptions.Start(builder) + return libAddNOptions.End(builder) diff --git a/backends/nxp/backend/ir/tflite_generator/builtin_options/add_options.py b/backends/nxp/backend/ir/tflite_generator/builtin_options/add_options.py new file mode 100755 index 00000000000..37c04a84588 --- /dev/null +++ b/backends/nxp/backend/ir/tflite_generator/builtin_options/add_options.py @@ -0,0 +1,45 @@ +# +# Copyright 2023 Martin Pavella +# +# License: MIT +# See the LICENSE_MIT for more details. +# +""" + Add + +Representation of the TFLite operator 'Add'. +""" + +import executorch.backends.nxp.backend.ir.tflite_generator.meta.meta as meta +import flatbuffers as fb +from executorch.backends.nxp.backend.ir.lib.tflite import ( + ActivationFunctionType as libActivationFunctionType, + AddOptions as libAddOptions, + BuiltinOperator as libBuiltinOperator, + BuiltinOptions as libBuiltinOptions, +) + + +class Add(meta.BuiltinOptions): + fused_activation_function: libActivationFunctionType.ActivationFunctionType + + # TODO potScaleInt16 + + def __init__( + self, + fused_activation_function: libActivationFunctionType.ActivationFunctionType = libActivationFunctionType.ActivationFunctionType.NONE, + ) -> None: + super().__init__( + libBuiltinOptions.BuiltinOptions.AddOptions, + libBuiltinOperator.BuiltinOperator.ADD, + ) + self.fused_activation_function = fused_activation_function + + def gen_tflite(self, builder: fb.Builder): + libAddOptions.Start(builder) + + libAddOptions.AddFusedActivationFunction( + builder, self.fused_activation_function + ) + + return libAddOptions.End(builder) diff --git a/backends/nxp/backend/ir/tflite_generator/builtin_options/arg_max_options.py b/backends/nxp/backend/ir/tflite_generator/builtin_options/arg_max_options.py new file mode 100755 index 00000000000..cd826201358 --- /dev/null +++ b/backends/nxp/backend/ir/tflite_generator/builtin_options/arg_max_options.py @@ -0,0 +1,28 @@ +# Copyright 2024 NXP +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +import executorch.backends.nxp.backend.ir.tflite_generator.meta.meta as meta +import flatbuffers as fb +from executorch.backends.nxp.backend.ir.lib.tflite import ArgMaxOptions +from executorch.backends.nxp.backend.ir.lib.tflite.BuiltinOperator import ( + BuiltinOperator, +) +from executorch.backends.nxp.backend.ir.lib.tflite.BuiltinOptions import BuiltinOptions +from executorch.backends.nxp.backend.ir.lib.tflite.TensorType import TensorType + + +class ArgMax(meta.BuiltinOptions): + output_type: TensorType + + def __init__(self, output_type: TensorType) -> None: + super().__init__(BuiltinOptions.ArgMaxOptions, BuiltinOperator.ARG_MAX) + self.output_type = output_type + + def gen_tflite(self, builder: fb.Builder): + ArgMaxOptions.Start(builder) + + ArgMaxOptions.AddOutputType(builder, self.output_type) + + return ArgMaxOptions.End(builder) diff --git a/backends/nxp/backend/ir/tflite_generator/builtin_options/arg_min_options.py b/backends/nxp/backend/ir/tflite_generator/builtin_options/arg_min_options.py new file mode 100755 index 00000000000..2ea3bfe0f55 --- /dev/null +++ b/backends/nxp/backend/ir/tflite_generator/builtin_options/arg_min_options.py @@ -0,0 +1,28 @@ +# Copyright 2024 NXP +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +import executorch.backends.nxp.backend.ir.tflite_generator.meta.meta as meta +import flatbuffers as fb +from executorch.backends.nxp.backend.ir.lib.tflite import ArgMinOptions +from executorch.backends.nxp.backend.ir.lib.tflite.BuiltinOperator import ( + BuiltinOperator, +) +from executorch.backends.nxp.backend.ir.lib.tflite.BuiltinOptions import BuiltinOptions +from executorch.backends.nxp.backend.ir.lib.tflite.TensorType import TensorType + + +class ArgMin(meta.BuiltinOptions): + output_type: TensorType + + def __init__(self, output_type: TensorType) -> None: + super().__init__(BuiltinOptions.ArgMinOptions, BuiltinOperator.ARG_MIN) + self.output_type = output_type + + def gen_tflite(self, builder: fb.Builder): + ArgMinOptions.Start(builder) + + ArgMinOptions.AddOutputType(builder, self.output_type) + + return ArgMinOptions.End(builder) diff --git a/backends/nxp/backend/ir/tflite_generator/builtin_options/average_pool_2d_options.py b/backends/nxp/backend/ir/tflite_generator/builtin_options/average_pool_2d_options.py new file mode 100755 index 00000000000..d3f59b3844d --- /dev/null +++ b/backends/nxp/backend/ir/tflite_generator/builtin_options/average_pool_2d_options.py @@ -0,0 +1,62 @@ +# +# Copyright 2023 Martin Pavella +# +# License: MIT +# See the LICENSE_MIT for more details. +# +""" + AveragePool2D + +Representation of the TFLite operator 'AveragePool2D'. +""" + +import executorch.backends.nxp.backend.ir.lib.tflite.ActivationFunctionType as libActivationFunctionType +import executorch.backends.nxp.backend.ir.lib.tflite.BuiltinOperator as libBuiltinOperator +import executorch.backends.nxp.backend.ir.lib.tflite.BuiltinOptions as libBuiltinOptions +import executorch.backends.nxp.backend.ir.lib.tflite.Padding as libPadding +import executorch.backends.nxp.backend.ir.lib.tflite.Pool2DOptions as libPool2DOptions +import executorch.backends.nxp.backend.ir.tflite_generator.meta.meta as meta +import flatbuffers as fb + + +class AveragePool2D(meta.BuiltinOptions): + padding: libPadding.Padding + stride_w: int + stride_h: int + filter_w: int + filter_h: int + fused_activation_function: libActivationFunctionType.ActivationFunctionType + + def __init__( + self, + padding: libPadding.Padding = libPadding.Padding.SAME, + stride_w: int = 1, + stride_h: int = 1, + filter_w: int = 1, + filter_h: int = 1, + fused_activation_function: libActivationFunctionType.ActivationFunctionType = libActivationFunctionType.ActivationFunctionType.NONE, + ) -> None: + super().__init__( + libBuiltinOptions.BuiltinOptions.Pool2DOptions, + libBuiltinOperator.BuiltinOperator.AVERAGE_POOL_2D, + ) + self.padding = padding + self.stride_w = stride_w + self.stride_h = stride_h + self.filter_w = filter_w + self.filter_h = filter_h + self.fused_activation_function = fused_activation_function + + def gen_tflite(self, builder: fb.Builder): + libPool2DOptions.Start(builder) + + libPool2DOptions.AddPadding(builder, self.padding) + libPool2DOptions.AddStrideW(builder, self.stride_w) + libPool2DOptions.AddStrideH(builder, self.stride_h) + libPool2DOptions.AddFilterHeight(builder, self.filter_h) + libPool2DOptions.AddFilterWidth(builder, self.filter_w) + libPool2DOptions.AddFusedActivationFunction( + builder, self.fused_activation_function + ) + + return libPool2DOptions.End(builder) diff --git a/backends/nxp/backend/ir/tflite_generator/builtin_options/batch_mat_mul_options.py b/backends/nxp/backend/ir/tflite_generator/builtin_options/batch_mat_mul_options.py new file mode 100755 index 00000000000..48aebd5dce4 --- /dev/null +++ b/backends/nxp/backend/ir/tflite_generator/builtin_options/batch_mat_mul_options.py @@ -0,0 +1,45 @@ +# Copyright 2023 NXP +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +""" + batch_mat_mul_options + +Representation of the TFLite operator 'BatchMatMul'. +""" +import executorch.backends.nxp.backend.ir.tflite_generator.meta.meta as meta +import flatbuffers as fb +from executorch.backends.nxp.backend.ir.lib.tflite import ( + BatchMatMulOptions, + BuiltinOperator, + BuiltinOptions, +) + + +class BatchMatMul(meta.BuiltinOptions): + adj_x: bool + adj_y: bool + asymmetric_quantize_inputs: bool + + def __init__( + self, adj_x: bool, adj_y: bool, asymmetric_quantize_inputs: bool + ) -> None: + super().__init__( + BuiltinOptions.BuiltinOptions.BatchMatMulOptions, + BuiltinOperator.BuiltinOperator.BATCH_MATMUL, + ) + self.adj_x = adj_x + self.adj_y = adj_y + self.asymmetric_quantize_inputs = asymmetric_quantize_inputs + + def gen_tflite(self, builder: fb.Builder): + BatchMatMulOptions.Start(builder) + + BatchMatMulOptions.AddAdjX(builder, self.adj_x) + BatchMatMulOptions.AddAdjY(builder, self.adj_y) + BatchMatMulOptions.AddAsymmetricQuantizeInputs( + builder, self.asymmetric_quantize_inputs + ) + + return BatchMatMulOptions.End(builder) diff --git a/backends/nxp/backend/ir/tflite_generator/builtin_options/bidirectional_sequence_lstm_options.py b/backends/nxp/backend/ir/tflite_generator/builtin_options/bidirectional_sequence_lstm_options.py new file mode 100755 index 00000000000..a0589d19d3d --- /dev/null +++ b/backends/nxp/backend/ir/tflite_generator/builtin_options/bidirectional_sequence_lstm_options.py @@ -0,0 +1,65 @@ +# Copyright 2024 NXP +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +import executorch.backends.nxp.backend.ir.lib.tflite.BidirectionalSequenceLSTMOptions as libBSLSTMOptions +import flatbuffers as fb +from executorch.backends.nxp.backend.ir.lib.tflite.ActivationFunctionType import ( + ActivationFunctionType, +) +from executorch.backends.nxp.backend.ir.lib.tflite.BuiltinOperator import ( + BuiltinOperator, +) +from executorch.backends.nxp.backend.ir.lib.tflite.BuiltinOptions import BuiltinOptions +from executorch.backends.nxp.backend.ir.tflite_generator.meta import meta + + +class BidirectionalSequenceLSTM(meta.BuiltinOptions): + fused_activation_function: ActivationFunctionType + cell_clip: float + proj_clip: float + merge_outputs: bool + + # V2+ + time_major: bool # If True, the first dimension is sequence, otherwise batch. + + # V3+ + asymmetric_quantize_inputs: bool + + def __init__( + self, + cell_clip: float, + proj_clip: float, + time_major: bool = True, + merge_outputs: bool = True, + asymmetric_quantize_inputs: bool = False, + fused_activation_function: ActivationFunctionType = ActivationFunctionType.NONE, + ) -> None: + super().__init__( + BuiltinOptions.BidirectionalSequenceLSTMOptions, + BuiltinOperator.BIDIRECTIONAL_SEQUENCE_LSTM, + ) + + self.fused_activation_function = fused_activation_function + self.cell_clip = cell_clip + self.proj_clip = proj_clip + self.merge_outputs = merge_outputs + self.time_major = time_major + self.asymmetric_quantize_inputs = asymmetric_quantize_inputs + + def gen_tflite(self, builder: fb.Builder): + libBSLSTMOptions.Start(builder) + + libBSLSTMOptions.AddFusedActivationFunction( + builder, self.fused_activation_function + ) + libBSLSTMOptions.AddCellClip(builder, self.cell_clip) + libBSLSTMOptions.AddProjClip(builder, self.proj_clip) + libBSLSTMOptions.AddMergeOutputs(builder, self.merge_outputs) + libBSLSTMOptions.AddTimeMajor(builder, self.time_major) + libBSLSTMOptions.AddAsymmetricQuantizeInputs( + builder, self.asymmetric_quantize_inputs + ) + + return libBSLSTMOptions.End(builder) diff --git a/backends/nxp/backend/ir/tflite_generator/builtin_options/bidirectional_sequence_rnn_options.py b/backends/nxp/backend/ir/tflite_generator/builtin_options/bidirectional_sequence_rnn_options.py new file mode 100755 index 00000000000..f55c05197f5 --- /dev/null +++ b/backends/nxp/backend/ir/tflite_generator/builtin_options/bidirectional_sequence_rnn_options.py @@ -0,0 +1,53 @@ +# Copyright 2024 NXP +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +import executorch.backends.nxp.backend.ir.lib.tflite.BidirectionalSequenceRNNOptions as libBRNNOptions +import flatbuffers as fb +from executorch.backends.nxp.backend.ir.lib.tflite.ActivationFunctionType import ( + ActivationFunctionType, +) +from executorch.backends.nxp.backend.ir.lib.tflite.BuiltinOperator import ( + BuiltinOperator, +) +from executorch.backends.nxp.backend.ir.lib.tflite.BuiltinOptions import BuiltinOptions +from executorch.backends.nxp.backend.ir.tflite_generator.meta import meta + + +class BidirectionalSequenceRNN(meta.BuiltinOptions): + time_major: bool # If True, the first dimension is sequence, otherwise batch. + fused_activation_function: ActivationFunctionType + merge_outputs: bool + asymmetric_quantize_inputs: bool + + def __init__( + self, + time_major: bool = True, + merge_outputs: bool = True, + asymmetric_quantize_inputs: bool = False, + fused_activation_function: ActivationFunctionType = ActivationFunctionType.NONE, + ) -> None: + super().__init__( + BuiltinOptions.BidirectionalSequenceRNNOptions, + BuiltinOperator.BIDIRECTIONAL_SEQUENCE_RNN, + ) + + self.time_major = time_major + self.fused_activation_function = fused_activation_function + self.merge_outputs = merge_outputs + self.asymmetric_quantize_inputs = asymmetric_quantize_inputs + + def gen_tflite(self, builder: fb.Builder): + libBRNNOptions.Start(builder) + + libBRNNOptions.AddTimeMajor(builder, self.time_major) + libBRNNOptions.AddFusedActivationFunction( + builder, self.fused_activation_function + ) + libBRNNOptions.AddMergeOutputs(builder, self.merge_outputs) + libBRNNOptions.AddAsymmetricQuantizeInputs( + builder, self.asymmetric_quantize_inputs + ) + + return libBRNNOptions.End(builder) diff --git a/backends/nxp/backend/ir/tflite_generator/builtin_options/bitwise_xor_options.py b/backends/nxp/backend/ir/tflite_generator/builtin_options/bitwise_xor_options.py new file mode 100755 index 00000000000..dee9032cce6 --- /dev/null +++ b/backends/nxp/backend/ir/tflite_generator/builtin_options/bitwise_xor_options.py @@ -0,0 +1,23 @@ +# Copyright 2024 NXP +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +import executorch.backends.nxp.backend.ir.tflite_generator.meta.meta as meta +import flatbuffers as fb +from executorch.backends.nxp.backend.ir.lib.tflite import BitwiseXorOptions +from executorch.backends.nxp.backend.ir.lib.tflite.BuiltinOperator import ( + BuiltinOperator, +) +from executorch.backends.nxp.backend.ir.lib.tflite.BuiltinOptions import BuiltinOptions + + +class BitwiseXor(meta.BuiltinOptions): + + def __init__(self) -> None: + super().__init__(BuiltinOptions.BitwiseXorOptions, BuiltinOperator.BITWISE_XOR) + + def gen_tflite(self, builder: fb.Builder): + BitwiseXorOptions.Start(builder) + + return BitwiseXorOptions.End(builder) diff --git a/backends/nxp/backend/ir/tflite_generator/builtin_options/broadcast_to_options.py b/backends/nxp/backend/ir/tflite_generator/builtin_options/broadcast_to_options.py new file mode 100755 index 00000000000..6c19aa4229c --- /dev/null +++ b/backends/nxp/backend/ir/tflite_generator/builtin_options/broadcast_to_options.py @@ -0,0 +1,25 @@ +# Copyright 2023 NXP +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +import executorch.backends.nxp.backend.ir.lib.tflite.BroadcastToOptions as libBroadcastToOptions +import executorch.backends.nxp.backend.ir.tflite_generator.meta.meta as meta +import flatbuffers as fb +from executorch.backends.nxp.backend.ir.lib.tflite.BuiltinOperator import ( + BuiltinOperator, +) +from executorch.backends.nxp.backend.ir.lib.tflite.BuiltinOptions import BuiltinOptions + + +class BroadcastTo(meta.BuiltinOptions): + + def __init__(self) -> None: + super().__init__( + BuiltinOptions.BroadcastToOptions, BuiltinOperator.BROADCAST_TO + ) + + def gen_tflite(self, builder: fb.Builder): + libBroadcastToOptions.Start(builder) + + return libBroadcastToOptions.End(builder) diff --git a/backends/nxp/backend/ir/tflite_generator/builtin_options/cast_options.py b/backends/nxp/backend/ir/tflite_generator/builtin_options/cast_options.py new file mode 100755 index 00000000000..ff7904f1169 --- /dev/null +++ b/backends/nxp/backend/ir/tflite_generator/builtin_options/cast_options.py @@ -0,0 +1,32 @@ +# Copyright 2023 NXP +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + + +import executorch.backends.nxp.backend.ir.tflite_generator.meta.meta as meta +import flatbuffers as fb +from executorch.backends.nxp.backend.ir.lib.tflite import CastOptions as libCastOptions +from executorch.backends.nxp.backend.ir.lib.tflite.BuiltinOperator import ( + BuiltinOperator, +) +from executorch.backends.nxp.backend.ir.lib.tflite.BuiltinOptions import BuiltinOptions +from executorch.backends.nxp.backend.ir.lib.tflite.TensorType import TensorType + + +class Cast(meta.BuiltinOptions): + in_data_type: TensorType + out_data_type: TensorType + + def __init__(self, in_data_type: TensorType, out_data_type: TensorType) -> None: + super().__init__(BuiltinOptions.CastOptions, BuiltinOperator.CAST) + self.in_data_type = in_data_type + self.out_data_type = out_data_type + + def gen_tflite(self, builder: fb.Builder): + libCastOptions.Start(builder) + + libCastOptions.AddInDataType(builder, self.in_data_type) + libCastOptions.AddOutDataType(builder, self.out_data_type) + + return libCastOptions.End(builder) diff --git a/backends/nxp/backend/ir/tflite_generator/builtin_options/concatenation_options.py b/backends/nxp/backend/ir/tflite_generator/builtin_options/concatenation_options.py new file mode 100755 index 00000000000..c7dee397556 --- /dev/null +++ b/backends/nxp/backend/ir/tflite_generator/builtin_options/concatenation_options.py @@ -0,0 +1,38 @@ +# Copyright 2023 NXP +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +import executorch.backends.nxp.backend.ir.lib.tflite.ActivationFunctionType as libActivationFunctionType +import executorch.backends.nxp.backend.ir.lib.tflite.BuiltinOperator as libBuiltinOperator +import executorch.backends.nxp.backend.ir.lib.tflite.BuiltinOptions as libBuiltinOptions +import executorch.backends.nxp.backend.ir.lib.tflite.ConcatenationOptions as libConcatenationOptions +import executorch.backends.nxp.backend.ir.tflite_generator.meta.meta as meta +import flatbuffers as fb + + +class Concatenation(meta.BuiltinOptions): + axis: int + fused_activation_function: libActivationFunctionType.ActivationFunctionType + + def __init__( + self, + axis: int, + fused_activation_function: libActivationFunctionType.ActivationFunctionType = libActivationFunctionType.ActivationFunctionType.NONE, + ) -> None: + super().__init__( + libBuiltinOptions.BuiltinOptions.ConcatenationOptions, + libBuiltinOperator.BuiltinOperator.CONCATENATION, + ) + self.axis = axis + self.fused_activation_function = fused_activation_function + + def gen_tflite(self, builder: fb.Builder): + libConcatenationOptions.Start(builder) + + libConcatenationOptions.AddAxis(builder, self.axis) + libConcatenationOptions.AddFusedActivationFunction( + builder, self.fused_activation_function + ) + + return libConcatenationOptions.End(builder) diff --git a/backends/nxp/backend/ir/tflite_generator/builtin_options/conv_2d_options.py b/backends/nxp/backend/ir/tflite_generator/builtin_options/conv_2d_options.py new file mode 100755 index 00000000000..62709181bf7 --- /dev/null +++ b/backends/nxp/backend/ir/tflite_generator/builtin_options/conv_2d_options.py @@ -0,0 +1,59 @@ +# +# Copyright 2023 Martin Pavella +# Copyright 2023 NXP +# +# License: MIT +# See the LICENSE_MIT for more details. +# + +import executorch.backends.nxp.backend.ir.lib.tflite.Conv2DOptions as libConv2DOptions +import executorch.backends.nxp.backend.ir.tflite_generator.meta.meta as meta +import flatbuffers as fb +from executorch.backends.nxp.backend.ir.lib.tflite.ActivationFunctionType import ( + ActivationFunctionType, +) +from executorch.backends.nxp.backend.ir.lib.tflite.BuiltinOperator import ( + BuiltinOperator, +) +from executorch.backends.nxp.backend.ir.lib.tflite.BuiltinOptions import BuiltinOptions +from executorch.backends.nxp.backend.ir.lib.tflite.Padding import Padding + + +class Conv2D(meta.BuiltinOptions): + padding: Padding + stride_w: int + stride_h: int + dilation_w_factor: int + dilation_h_factor: int + fused_activation_function: ActivationFunctionType + + def __init__( + self, + padding: Padding = Padding.SAME, + stride_w: int = 1, + stride_h: int = 1, + dilation_w_factor: int = 1, + dilation_h_factor: int = 1, + fused_activation_function: ActivationFunctionType = ActivationFunctionType.NONE, + ) -> None: + super().__init__(BuiltinOptions.Conv2DOptions, BuiltinOperator.CONV_2D) + self.padding = padding + self.stride_w = stride_w + self.stride_h = stride_h + self.dilation_w_factor = dilation_w_factor + self.dilation_h_factor = dilation_h_factor + self.fused_activation_function = fused_activation_function + + def gen_tflite(self, builder: fb.Builder): + libConv2DOptions.Start(builder) + + libConv2DOptions.AddPadding(builder, self.padding) + libConv2DOptions.AddStrideW(builder, self.stride_w) + libConv2DOptions.AddStrideH(builder, self.stride_h) + libConv2DOptions.AddFusedActivationFunction( + builder, self.fused_activation_function + ) + libConv2DOptions.AddDilationWFactor(builder, self.dilation_w_factor) + libConv2DOptions.AddDilationHFactor(builder, self.dilation_h_factor) + + return libConv2DOptions.End(builder) diff --git a/backends/nxp/backend/ir/tflite_generator/builtin_options/conv_3d_options.py b/backends/nxp/backend/ir/tflite_generator/builtin_options/conv_3d_options.py new file mode 100755 index 00000000000..bbd62efbb73 --- /dev/null +++ b/backends/nxp/backend/ir/tflite_generator/builtin_options/conv_3d_options.py @@ -0,0 +1,67 @@ +# Copyright 2023 NXP +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +import executorch.backends.nxp.backend.ir.lib.tflite.Conv3DOptions as libConv3DOptions +import executorch.backends.nxp.backend.ir.tflite_generator.meta.meta as meta +import flatbuffers as fb +from executorch.backends.nxp.backend.ir.lib.tflite.ActivationFunctionType import ( + ActivationFunctionType, +) +from executorch.backends.nxp.backend.ir.lib.tflite.BuiltinOperator import ( + BuiltinOperator, +) +from executorch.backends.nxp.backend.ir.lib.tflite.BuiltinOptions import BuiltinOptions +from executorch.backends.nxp.backend.ir.lib.tflite.Padding import Padding + + +class Conv3D(meta.BuiltinOptions): + padding: Padding + stride_w: int + stride_h: int + stride_d: int + dilation_w_factor: int + dilation_h_factor: int + dilation_d_factor: int + fused_activation_function: ActivationFunctionType + + def __init__( + self, + padding: Padding = Padding.SAME, + stride_w: int = 1, + stride_h: int = 1, + stride_d: int = 1, + dilation_w_factor: int = 1, + dilation_h_factor: int = 1, + dilation_d_factor: int = 1, + fused_activation_function: ActivationFunctionType = ActivationFunctionType.NONE, + ) -> None: + super().__init__(BuiltinOptions.Conv3DOptions, BuiltinOperator.CONV_3D) + self.padding = padding + self.stride_w = stride_w + self.stride_h = stride_h + self.stride_d = stride_d + self.dilation_w_factor = dilation_w_factor + self.dilation_h_factor = dilation_h_factor + self.dilation_d_factor = dilation_d_factor + self.fused_activation_function = fused_activation_function + + def gen_tflite(self, builder: fb.Builder): + libConv3DOptions.Start(builder) + + libConv3DOptions.AddPadding(builder, self.padding) + + libConv3DOptions.AddStrideW(builder, self.stride_w) + libConv3DOptions.AddStrideH(builder, self.stride_h) + libConv3DOptions.AddStrideD(builder, self.stride_d) + + libConv3DOptions.AddFusedActivationFunction( + builder, self.fused_activation_function + ) + + libConv3DOptions.AddDilationWFactor(builder, self.dilation_w_factor) + libConv3DOptions.AddDilationHFactor(builder, self.dilation_h_factor) + libConv3DOptions.AddDilationDFactor(builder, self.dilation_d_factor) + + return libConv3DOptions.End(builder) diff --git a/backends/nxp/backend/ir/tflite_generator/builtin_options/cum_sum_options.py b/backends/nxp/backend/ir/tflite_generator/builtin_options/cum_sum_options.py new file mode 100755 index 00000000000..54b9379a654 --- /dev/null +++ b/backends/nxp/backend/ir/tflite_generator/builtin_options/cum_sum_options.py @@ -0,0 +1,30 @@ +# Copyright 2024 NXP +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +import executorch.backends.nxp.backend.ir.tflite_generator.meta.meta as meta +import flatbuffers as fb +from executorch.backends.nxp.backend.ir.lib.tflite import CumsumOptions +from executorch.backends.nxp.backend.ir.lib.tflite.BuiltinOperator import ( + BuiltinOperator, +) +from executorch.backends.nxp.backend.ir.lib.tflite.BuiltinOptions import BuiltinOptions + + +class CumSum(meta.BuiltinOptions): + exclusive: bool + reverse: bool + + def __init__(self, exclusive: bool, reverse: bool) -> None: + super().__init__(BuiltinOptions.CumsumOptions, BuiltinOperator.CUMSUM) + self.exclusive = exclusive + self.reverse = reverse + + def gen_tflite(self, builder: fb.Builder): + CumsumOptions.Start(builder) + + CumsumOptions.AddExclusive(builder, self.exclusive) + CumsumOptions.AddReverse(builder, self.reverse) + + return CumsumOptions.End(builder) diff --git a/backends/nxp/backend/ir/tflite_generator/builtin_options/depth_to_space_options.py b/backends/nxp/backend/ir/tflite_generator/builtin_options/depth_to_space_options.py new file mode 100755 index 00000000000..90033e72196 --- /dev/null +++ b/backends/nxp/backend/ir/tflite_generator/builtin_options/depth_to_space_options.py @@ -0,0 +1,29 @@ +# Copyright 2024 NXP +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +import executorch.backends.nxp.backend.ir.tflite_generator.meta.meta as meta +import flatbuffers as fb +from executorch.backends.nxp.backend.ir.lib.tflite import DepthToSpaceOptions +from executorch.backends.nxp.backend.ir.lib.tflite.BuiltinOperator import ( + BuiltinOperator, +) +from executorch.backends.nxp.backend.ir.lib.tflite.BuiltinOptions import BuiltinOptions + + +class DepthToSpace(meta.BuiltinOptions): + block_size: int + + def __init__(self, block_size: int) -> None: + super().__init__( + BuiltinOptions.DepthToSpaceOptions, BuiltinOperator.DEPTH_TO_SPACE + ) + self.block_size = block_size + + def gen_tflite(self, builder: fb.Builder): + DepthToSpaceOptions.Start(builder) + + DepthToSpaceOptions.DepthToSpaceOptionsAddBlockSize(builder, self.block_size) + + return DepthToSpaceOptions.End(builder) diff --git a/backends/nxp/backend/ir/tflite_generator/builtin_options/depthwise_conv_2d_options.py b/backends/nxp/backend/ir/tflite_generator/builtin_options/depthwise_conv_2d_options.py new file mode 100755 index 00000000000..66b5fd952b6 --- /dev/null +++ b/backends/nxp/backend/ir/tflite_generator/builtin_options/depthwise_conv_2d_options.py @@ -0,0 +1,69 @@ +# Copyright 2023 NXP +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +""" + depthwise_conv_2d_options + +Representation of the TFLite operator 'DepthwiseConv2D'. +""" + +import executorch.backends.nxp.backend.ir.lib.tflite.DepthwiseConv2DOptions as libDepthwiseConv2DOptions +import executorch.backends.nxp.backend.ir.tflite_generator.meta.meta as meta +import flatbuffers as fb +from executorch.backends.nxp.backend.ir.lib.tflite.ActivationFunctionType import ( + ActivationFunctionType, +) +from executorch.backends.nxp.backend.ir.lib.tflite.BuiltinOperator import ( + BuiltinOperator, +) +from executorch.backends.nxp.backend.ir.lib.tflite.BuiltinOptions import BuiltinOptions +from executorch.backends.nxp.backend.ir.lib.tflite.Padding import Padding + + +class DepthwiseConv2D(meta.BuiltinOptions): + padding: Padding + stride_w: int = 1 + stride_h: int = 1 + fused_activation_function: ActivationFunctionType + dilation_w_factor: int = 1 + dilation_h_factor: int = 1 + depth_multiplier: int = 1 # Redundant according to schema.fbs (line 597) + + def __init__( + self, + padding: Padding = Padding.SAME, + stride_w: int = 1, + stride_h: int = 1, + dilation_w_factor: int = 1, + dilation_h_factor: int = 1, + fused_activation_function: ActivationFunctionType = ActivationFunctionType.NONE, + depth_multiplier: int = 1, + ) -> None: + super().__init__( + BuiltinOptions.DepthwiseConv2DOptions, BuiltinOperator.DEPTHWISE_CONV_2D + ) + self.padding = padding + self.stride_w = stride_w + self.stride_h = stride_h + self.fused_activation_function = fused_activation_function + self.dilation_w_factor = dilation_w_factor + self.dilation_h_factor = dilation_h_factor + self.depth_multiplier = depth_multiplier + + def gen_tflite(self, builder: fb.Builder): + libDepthwiseConv2DOptions.Start(builder) + + libDepthwiseConv2DOptions.AddPadding(builder, self.padding) + libDepthwiseConv2DOptions.AddStrideW(builder, self.stride_w) + libDepthwiseConv2DOptions.AddStrideH(builder, self.stride_h) + libDepthwiseConv2DOptions.AddFusedActivationFunction( + builder, self.fused_activation_function + ) + libDepthwiseConv2DOptions.AddDilationWFactor(builder, self.dilation_w_factor) + libDepthwiseConv2DOptions.AddDilationHFactor(builder, self.dilation_h_factor) + + libDepthwiseConv2DOptions.AddDepthMultiplier(builder, self.depth_multiplier) + + return libDepthwiseConv2DOptions.End(builder) diff --git a/backends/nxp/backend/ir/tflite_generator/builtin_options/dequantize_options.py b/backends/nxp/backend/ir/tflite_generator/builtin_options/dequantize_options.py new file mode 100755 index 00000000000..aebccb47e1c --- /dev/null +++ b/backends/nxp/backend/ir/tflite_generator/builtin_options/dequantize_options.py @@ -0,0 +1,24 @@ +# Copyright 2023 NXP +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +import executorch.backends.nxp.backend.ir.lib.tflite.BuiltinOperator as libBuiltinOperator +import executorch.backends.nxp.backend.ir.lib.tflite.BuiltinOptions as libBuiltinOptions +import executorch.backends.nxp.backend.ir.lib.tflite.QuantizeOptions as libQuantizeOptions +import executorch.backends.nxp.backend.ir.tflite_generator.meta.meta as meta +import flatbuffers as fb + + +class Dequantize(meta.BuiltinOptions): + + def __init__(self) -> None: + super().__init__( + libBuiltinOptions.BuiltinOptions.DequantizeOptions, + libBuiltinOperator.BuiltinOperator.DEQUANTIZE, + ) + + def gen_tflite(self, builder: fb.Builder): + libQuantizeOptions.Start(builder) + + return libQuantizeOptions.End(builder) diff --git a/backends/nxp/backend/ir/tflite_generator/builtin_options/div_options.py b/backends/nxp/backend/ir/tflite_generator/builtin_options/div_options.py new file mode 100755 index 00000000000..24f35498df1 --- /dev/null +++ b/backends/nxp/backend/ir/tflite_generator/builtin_options/div_options.py @@ -0,0 +1,43 @@ +# +# Copyright 2023 Martin Pavella +# Copyright 2023 NXP +# +# License: MIT +# See the LICENSE_MIT for more details. +# +""" + Div + +Representation of the TFLite operator 'Div'. +""" + +import executorch.backends.nxp.backend.ir.tflite_generator.meta.meta as meta +import flatbuffers as fb +from executorch.backends.nxp.backend.ir.lib.tflite import DivOptions as libDivOptions +from executorch.backends.nxp.backend.ir.lib.tflite.ActivationFunctionType import ( + ActivationFunctionType, +) +from executorch.backends.nxp.backend.ir.lib.tflite.BuiltinOperator import ( + BuiltinOperator, +) +from executorch.backends.nxp.backend.ir.lib.tflite.BuiltinOptions import BuiltinOptions + + +class Div(meta.BuiltinOptions): + fused_activation_function: ActivationFunctionType + + def __init__( + self, + fused_activation_function: ActivationFunctionType = ActivationFunctionType.NONE, + ) -> None: + super().__init__(BuiltinOptions.DivOptions, BuiltinOperator.DIV) + self.fused_activation_function = fused_activation_function + + def gen_tflite(self, builder: fb.Builder): + libDivOptions.Start(builder) + + libDivOptions.AddFusedActivationFunction( + builder, self.fused_activation_function + ) + + return libDivOptions.End(builder) diff --git a/backends/nxp/backend/ir/tflite_generator/builtin_options/equal_options.py b/backends/nxp/backend/ir/tflite_generator/builtin_options/equal_options.py new file mode 100755 index 00000000000..238d334314a --- /dev/null +++ b/backends/nxp/backend/ir/tflite_generator/builtin_options/equal_options.py @@ -0,0 +1,23 @@ +# Copyright 2023 NXP +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +import executorch.backends.nxp.backend.ir.lib.tflite.EqualOptions as libEqualOptions +import flatbuffers as fb +from executorch.backends.nxp.backend.ir.lib.tflite.BuiltinOperator import ( + BuiltinOperator, +) +from executorch.backends.nxp.backend.ir.lib.tflite.BuiltinOptions import BuiltinOptions +from executorch.backends.nxp.backend.ir.tflite_generator.meta import meta + + +class Equal(meta.BuiltinOptions): + + def __init__(self) -> None: + super().__init__(BuiltinOptions.EqualOptions, BuiltinOperator.EQUAL) + + def gen_tflite(self, builder: fb.Builder): + libEqualOptions.Start(builder) + + return libEqualOptions.End(builder) diff --git a/backends/nxp/backend/ir/tflite_generator/builtin_options/exp_options.py b/backends/nxp/backend/ir/tflite_generator/builtin_options/exp_options.py new file mode 100755 index 00000000000..b6fe351edb2 --- /dev/null +++ b/backends/nxp/backend/ir/tflite_generator/builtin_options/exp_options.py @@ -0,0 +1,23 @@ +# Copyright 2023 NXP +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +import executorch.backends.nxp.backend.ir.lib.tflite.ExpOptions as libExpOptions +import executorch.backends.nxp.backend.ir.tflite_generator.meta.meta as meta +import flatbuffers as fb +from executorch.backends.nxp.backend.ir.lib.tflite.BuiltinOperator import ( + BuiltinOperator, +) +from executorch.backends.nxp.backend.ir.lib.tflite.BuiltinOptions import BuiltinOptions + + +class Exp(meta.BuiltinOptions): + + def __init__(self) -> None: + super().__init__(BuiltinOptions.ExpOptions, BuiltinOperator.EXP) + + def gen_tflite(self, builder: fb.Builder): + libExpOptions.Start(builder) + + return libExpOptions.End(builder) diff --git a/backends/nxp/backend/ir/tflite_generator/builtin_options/floor_mod_options.py b/backends/nxp/backend/ir/tflite_generator/builtin_options/floor_mod_options.py new file mode 100755 index 00000000000..448a3b77d1b --- /dev/null +++ b/backends/nxp/backend/ir/tflite_generator/builtin_options/floor_mod_options.py @@ -0,0 +1,23 @@ +# Copyright 2024 NXP +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +import executorch.backends.nxp.backend.ir.tflite_generator.meta.meta as meta +import flatbuffers as fb +from executorch.backends.nxp.backend.ir.lib.tflite import FloorModOptions +from executorch.backends.nxp.backend.ir.lib.tflite.BuiltinOperator import ( + BuiltinOperator, +) +from executorch.backends.nxp.backend.ir.lib.tflite.BuiltinOptions import BuiltinOptions + + +class FloorMod(meta.BuiltinOptions): + + def __init__(self) -> None: + super().__init__(BuiltinOptions.FloorModOptions, BuiltinOperator.FLOOR_MOD) + + def gen_tflite(self, builder: fb.Builder): + FloorModOptions.Start(builder) + + return FloorModOptions.End(builder) diff --git a/backends/nxp/backend/ir/tflite_generator/builtin_options/fully_connected_options.py b/backends/nxp/backend/ir/tflite_generator/builtin_options/fully_connected_options.py new file mode 100755 index 00000000000..1aa2c783032 --- /dev/null +++ b/backends/nxp/backend/ir/tflite_generator/builtin_options/fully_connected_options.py @@ -0,0 +1,57 @@ +# +# Copyright 2023 Martin Pavella +# Copyright 2023 NXP +# +# License: MIT +# See the LICENSE_MIT for more details. +# + +import executorch.backends.nxp.backend.ir.lib.tflite.FullyConnectedOptions as libFullyConnectedOptions +import executorch.backends.nxp.backend.ir.tflite_generator.meta.meta as meta +import flatbuffers as fb +from executorch.backends.nxp.backend.ir.lib.tflite.ActivationFunctionType import ( + ActivationFunctionType, +) +from executorch.backends.nxp.backend.ir.lib.tflite.BuiltinOperator import ( + BuiltinOperator, +) +from executorch.backends.nxp.backend.ir.lib.tflite.BuiltinOptions import BuiltinOptions +from executorch.backends.nxp.backend.ir.lib.tflite.FullyConnectedOptionsWeightsFormat import ( + FullyConnectedOptionsWeightsFormat, +) + + +class FullyConnected(meta.BuiltinOptions): + fused_activation_function: ActivationFunctionType + weights_format: FullyConnectedOptionsWeightsFormat + keep_num_dims: bool + asymmetric_quantize_inputs: bool + + def __init__( + self, + fused_activation_function: ActivationFunctionType = ActivationFunctionType.NONE, + weights_format: FullyConnectedOptionsWeightsFormat = FullyConnectedOptionsWeightsFormat.DEFAULT, + keep_num_dims: bool = False, + asymmetric_quantize_inputs: bool = False, + ) -> None: + super().__init__( + BuiltinOptions.FullyConnectedOptions, BuiltinOperator.FULLY_CONNECTED + ) + self.fused_activation_function = fused_activation_function + self.weights_format = weights_format + self.keep_num_dims = keep_num_dims + self.asymmetric_quantize_inputs = asymmetric_quantize_inputs + + def gen_tflite(self, builder: fb.Builder): + libFullyConnectedOptions.Start(builder) + + libFullyConnectedOptions.AddFusedActivationFunction( + builder, self.fused_activation_function + ) + libFullyConnectedOptions.AddWeightsFormat(builder, self.weights_format) + libFullyConnectedOptions.AddKeepNumDims(builder, self.keep_num_dims) + libFullyConnectedOptions.AddAsymmetricQuantizeInputs( + builder, self.asymmetric_quantize_inputs + ) + + return libFullyConnectedOptions.End(builder) diff --git a/backends/nxp/backend/ir/tflite_generator/builtin_options/gather_nd_options.py b/backends/nxp/backend/ir/tflite_generator/builtin_options/gather_nd_options.py new file mode 100755 index 00000000000..9198995fb47 --- /dev/null +++ b/backends/nxp/backend/ir/tflite_generator/builtin_options/gather_nd_options.py @@ -0,0 +1,23 @@ +# Copyright 2024 NXP +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +import executorch.backends.nxp.backend.ir.lib.tflite.GatherNdOptions as libGatherNDOptions +import executorch.backends.nxp.backend.ir.tflite_generator.meta.meta as meta +import flatbuffers as fb +from executorch.backends.nxp.backend.ir.lib.tflite.BuiltinOperator import ( + BuiltinOperator, +) +from executorch.backends.nxp.backend.ir.lib.tflite.BuiltinOptions import BuiltinOptions + + +class GatherND(meta.BuiltinOptions): + + def __init__(self) -> None: + super().__init__(BuiltinOptions.GatherNdOptions, BuiltinOperator.GATHER_ND) + + def gen_tflite(self, builder: fb.Builder): + libGatherNDOptions.Start(builder) + + return libGatherNDOptions.End(builder) diff --git a/backends/nxp/backend/ir/tflite_generator/builtin_options/gather_options.py b/backends/nxp/backend/ir/tflite_generator/builtin_options/gather_options.py new file mode 100755 index 00000000000..395500a7117 --- /dev/null +++ b/backends/nxp/backend/ir/tflite_generator/builtin_options/gather_options.py @@ -0,0 +1,30 @@ +# Copyright 2023 NXP +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +import executorch.backends.nxp.backend.ir.lib.tflite.GatherOptions as libGatherOptions +import executorch.backends.nxp.backend.ir.tflite_generator.meta.meta as meta +import flatbuffers as fb +from executorch.backends.nxp.backend.ir.lib.tflite.BuiltinOperator import ( + BuiltinOperator, +) +from executorch.backends.nxp.backend.ir.lib.tflite.BuiltinOptions import BuiltinOptions + + +class Gather(meta.BuiltinOptions): + axis: int + batch_dims: int + + def __init__(self, axis: int, batch_dims: int = 0) -> None: + super().__init__(BuiltinOptions.GatherOptions, BuiltinOperator.GATHER) + self.axis = axis + self.batch_dims = batch_dims + + def gen_tflite(self, builder: fb.Builder): + libGatherOptions.Start(builder) + + libGatherOptions.AddAxis(builder, self.axis) + libGatherOptions.AddBatchDims(builder, self.batch_dims) + + return libGatherOptions.End(builder) diff --git a/backends/nxp/backend/ir/tflite_generator/builtin_options/gelu_options.py b/backends/nxp/backend/ir/tflite_generator/builtin_options/gelu_options.py new file mode 100755 index 00000000000..4a20e787263 --- /dev/null +++ b/backends/nxp/backend/ir/tflite_generator/builtin_options/gelu_options.py @@ -0,0 +1,27 @@ +# Copyright 2024 NXP +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +import executorch.backends.nxp.backend.ir.tflite_generator.meta.meta as meta +import flatbuffers as fb +from executorch.backends.nxp.backend.ir.lib.tflite import GeluOptions +from executorch.backends.nxp.backend.ir.lib.tflite.BuiltinOperator import ( + BuiltinOperator, +) +from executorch.backends.nxp.backend.ir.lib.tflite.BuiltinOptions import BuiltinOptions + + +class Gelu(meta.BuiltinOptions): + approximate: bool + + def __init__(self, approximate: bool) -> None: + super().__init__(BuiltinOptions.GeluOptions, BuiltinOperator.GELU) + self.approximate = approximate + + def gen_tflite(self, builder: fb.Builder): + GeluOptions.Start(builder) + + GeluOptions.AddApproximate(builder, self.approximate) + + return GeluOptions.End(builder) diff --git a/backends/nxp/backend/ir/tflite_generator/builtin_options/greater_equal_options.py b/backends/nxp/backend/ir/tflite_generator/builtin_options/greater_equal_options.py new file mode 100755 index 00000000000..a5071cd057f --- /dev/null +++ b/backends/nxp/backend/ir/tflite_generator/builtin_options/greater_equal_options.py @@ -0,0 +1,25 @@ +# Copyright 2024 NXP +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +import executorch.backends.nxp.backend.ir.tflite_generator.meta.meta as meta +import flatbuffers as fb +from executorch.backends.nxp.backend.ir.lib.tflite import GreaterEqualOptions +from executorch.backends.nxp.backend.ir.lib.tflite.BuiltinOperator import ( + BuiltinOperator, +) +from executorch.backends.nxp.backend.ir.lib.tflite.BuiltinOptions import BuiltinOptions + + +class GreaterEqual(meta.BuiltinOptions): + + def __init__(self) -> None: + super().__init__( + BuiltinOptions.GreaterEqualOptions, BuiltinOperator.GREATER_EQUAL + ) + + def gen_tflite(self, builder: fb.Builder): + GreaterEqualOptions.Start(builder) + + return GreaterEqualOptions.End(builder) diff --git a/backends/nxp/backend/ir/tflite_generator/builtin_options/greater_options.py b/backends/nxp/backend/ir/tflite_generator/builtin_options/greater_options.py new file mode 100755 index 00000000000..a129281b86b --- /dev/null +++ b/backends/nxp/backend/ir/tflite_generator/builtin_options/greater_options.py @@ -0,0 +1,23 @@ +# Copyright 2024 NXP +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +import executorch.backends.nxp.backend.ir.tflite_generator.meta.meta as meta +import flatbuffers as fb +from executorch.backends.nxp.backend.ir.lib.tflite import GreaterOptions +from executorch.backends.nxp.backend.ir.lib.tflite.BuiltinOperator import ( + BuiltinOperator, +) +from executorch.backends.nxp.backend.ir.lib.tflite.BuiltinOptions import BuiltinOptions + + +class Greater(meta.BuiltinOptions): + + def __init__(self) -> None: + super().__init__(BuiltinOptions.GreaterOptions, BuiltinOperator.GREATER) + + def gen_tflite(self, builder: fb.Builder): + GreaterOptions.Start(builder) + + return GreaterOptions.End(builder) diff --git a/backends/nxp/backend/ir/tflite_generator/builtin_options/hard_swish_options.py b/backends/nxp/backend/ir/tflite_generator/builtin_options/hard_swish_options.py new file mode 100755 index 00000000000..be18603715a --- /dev/null +++ b/backends/nxp/backend/ir/tflite_generator/builtin_options/hard_swish_options.py @@ -0,0 +1,23 @@ +# Copyright 2024 NXP +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +import executorch.backends.nxp.backend.ir.tflite_generator.meta.meta as meta +import flatbuffers as fb +from executorch.backends.nxp.backend.ir.lib.tflite import HardSwishOptions +from executorch.backends.nxp.backend.ir.lib.tflite.BuiltinOperator import ( + BuiltinOperator, +) +from executorch.backends.nxp.backend.ir.lib.tflite.BuiltinOptions import BuiltinOptions + + +class HardSwish(meta.BuiltinOptions): + + def __init__(self) -> None: + super().__init__(BuiltinOptions.HardSwishOptions, BuiltinOperator.HARD_SWISH) + + def gen_tflite(self, builder: fb.Builder): + HardSwishOptions.Start(builder) + + return HardSwishOptions.End(builder) diff --git a/backends/nxp/backend/ir/tflite_generator/builtin_options/leaky_relu_options.py b/backends/nxp/backend/ir/tflite_generator/builtin_options/leaky_relu_options.py new file mode 100755 index 00000000000..6ba7bb65d72 --- /dev/null +++ b/backends/nxp/backend/ir/tflite_generator/builtin_options/leaky_relu_options.py @@ -0,0 +1,31 @@ +# +# Copyright 2023 Martin Pavella +# +# License: MIT +# See the LICENSE_MIT for more details. +# + + +import executorch.backends.nxp.backend.ir.lib.tflite.BuiltinOperator as libBuiltinOperator +import executorch.backends.nxp.backend.ir.lib.tflite.BuiltinOptions as libBuiltinOptions +import executorch.backends.nxp.backend.ir.lib.tflite.LeakyReluOptions as libLeakyReluOptions +import executorch.backends.nxp.backend.ir.tflite_generator.meta.meta as meta +import flatbuffers as fb + + +class LeakyRelu(meta.BuiltinOptions): + alpha: float + + def __init__(self, alpha: float) -> None: + super().__init__( + libBuiltinOptions.BuiltinOptions.LeakyReluOptions, + libBuiltinOperator.BuiltinOperator.LEAKY_RELU, + ) + self.alpha = alpha + + def gen_tflite(self, builder: fb.Builder): + libLeakyReluOptions.Start(builder) + + libLeakyReluOptions.AddAlpha(builder, self.alpha) + + return libLeakyReluOptions.End(builder) diff --git a/backends/nxp/backend/ir/tflite_generator/builtin_options/less_equal_options.py b/backends/nxp/backend/ir/tflite_generator/builtin_options/less_equal_options.py new file mode 100755 index 00000000000..3bc5e36e721 --- /dev/null +++ b/backends/nxp/backend/ir/tflite_generator/builtin_options/less_equal_options.py @@ -0,0 +1,23 @@ +# Copyright 2024 NXP +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +import executorch.backends.nxp.backend.ir.tflite_generator.meta.meta as meta +import flatbuffers as fb +from executorch.backends.nxp.backend.ir.lib.tflite import LessEqualOptions +from executorch.backends.nxp.backend.ir.lib.tflite.BuiltinOperator import ( + BuiltinOperator, +) +from executorch.backends.nxp.backend.ir.lib.tflite.BuiltinOptions import BuiltinOptions + + +class LessEqual(meta.BuiltinOptions): + + def __init__(self) -> None: + super().__init__(BuiltinOptions.LessEqualOptions, BuiltinOperator.LESS_EQUAL) + + def gen_tflite(self, builder: fb.Builder): + LessEqualOptions.Start(builder) + + return LessEqualOptions.End(builder) diff --git a/backends/nxp/backend/ir/tflite_generator/builtin_options/less_options.py b/backends/nxp/backend/ir/tflite_generator/builtin_options/less_options.py new file mode 100755 index 00000000000..2f7c4696892 --- /dev/null +++ b/backends/nxp/backend/ir/tflite_generator/builtin_options/less_options.py @@ -0,0 +1,23 @@ +# Copyright 2024 NXP +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +import executorch.backends.nxp.backend.ir.lib.tflite.LessOptions as libLessOptions +import executorch.backends.nxp.backend.ir.tflite_generator.meta.meta as meta +import flatbuffers as fb +from executorch.backends.nxp.backend.ir.lib.tflite.BuiltinOperator import ( + BuiltinOperator, +) +from executorch.backends.nxp.backend.ir.lib.tflite.BuiltinOptions import BuiltinOptions + + +class Less(meta.BuiltinOptions): + + def __init__(self) -> None: + super().__init__(BuiltinOptions.LessOptions, BuiltinOperator.LESS) + + def gen_tflite(self, builder: fb.Builder): + libLessOptions.Start(builder) + + return libLessOptions.End(builder) diff --git a/backends/nxp/backend/ir/tflite_generator/builtin_options/log_softmax_options.py b/backends/nxp/backend/ir/tflite_generator/builtin_options/log_softmax_options.py new file mode 100755 index 00000000000..163cbfb7cf9 --- /dev/null +++ b/backends/nxp/backend/ir/tflite_generator/builtin_options/log_softmax_options.py @@ -0,0 +1,29 @@ +# +# Copyright 2023 Martin Pavella +# +# License: MIT +# See the LICENSE_MIT for more details. +# +""" + LogSoftmax + +Representation of the TFLite operator 'LogSoftmax'. +""" + +import executorch.backends.nxp.backend.ir.lib.tflite.BuiltinOperator as libBuiltinOperator +import executorch.backends.nxp.backend.ir.lib.tflite.BuiltinOptions as libBuiltinOptions +import executorch.backends.nxp.backend.ir.lib.tflite.LogSoftmaxOptions as libLogSoftmaxOptions +import executorch.backends.nxp.backend.ir.tflite_generator.meta.meta as meta +import flatbuffers as fb + + +class LogSoftmax(meta.BuiltinOptions): + def __init__(self) -> None: + super().__init__( + libBuiltinOptions.BuiltinOptions.LogSoftmaxOptions, + libBuiltinOperator.BuiltinOperator.LOG_SOFTMAX, + ) + + def gen_tflite(self, builder: fb.Builder): + libLogSoftmaxOptions.Start(builder) + return libLogSoftmaxOptions.End(builder) diff --git a/backends/nxp/backend/ir/tflite_generator/builtin_options/logical_and_options.py b/backends/nxp/backend/ir/tflite_generator/builtin_options/logical_and_options.py new file mode 100755 index 00000000000..95253c5841e --- /dev/null +++ b/backends/nxp/backend/ir/tflite_generator/builtin_options/logical_and_options.py @@ -0,0 +1,23 @@ +# Copyright 2024 NXP +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +import executorch.backends.nxp.backend.ir.tflite_generator.meta.meta as meta +import flatbuffers as fb +from executorch.backends.nxp.backend.ir.lib.tflite import LogicalAndOptions +from executorch.backends.nxp.backend.ir.lib.tflite.BuiltinOperator import ( + BuiltinOperator, +) +from executorch.backends.nxp.backend.ir.lib.tflite.BuiltinOptions import BuiltinOptions + + +class LogicalAnd(meta.BuiltinOptions): + + def __init__(self) -> None: + super().__init__(BuiltinOptions.LogicalAndOptions, BuiltinOperator.LOGICAL_AND) + + def gen_tflite(self, builder: fb.Builder): + LogicalAndOptions.Start(builder) + + return LogicalAndOptions.End(builder) diff --git a/backends/nxp/backend/ir/tflite_generator/builtin_options/logical_not_options.py b/backends/nxp/backend/ir/tflite_generator/builtin_options/logical_not_options.py new file mode 100755 index 00000000000..05798388f1b --- /dev/null +++ b/backends/nxp/backend/ir/tflite_generator/builtin_options/logical_not_options.py @@ -0,0 +1,23 @@ +# Copyright 2024 NXP +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +import executorch.backends.nxp.backend.ir.tflite_generator.meta.meta as meta +import flatbuffers as fb +from executorch.backends.nxp.backend.ir.lib.tflite import LogicalNotOptions +from executorch.backends.nxp.backend.ir.lib.tflite.BuiltinOperator import ( + BuiltinOperator, +) +from executorch.backends.nxp.backend.ir.lib.tflite.BuiltinOptions import BuiltinOptions + + +class LogicalNot(meta.BuiltinOptions): + + def __init__(self) -> None: + super().__init__(BuiltinOptions.LogicalNotOptions, BuiltinOperator.LOGICAL_NOT) + + def gen_tflite(self, builder: fb.Builder): + LogicalNotOptions.Start(builder) + + return LogicalNotOptions.End(builder) diff --git a/backends/nxp/backend/ir/tflite_generator/builtin_options/logical_or_options.py b/backends/nxp/backend/ir/tflite_generator/builtin_options/logical_or_options.py new file mode 100755 index 00000000000..ff39fe42ee4 --- /dev/null +++ b/backends/nxp/backend/ir/tflite_generator/builtin_options/logical_or_options.py @@ -0,0 +1,23 @@ +# Copyright 2024 NXP +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +import executorch.backends.nxp.backend.ir.tflite_generator.meta.meta as meta +import flatbuffers as fb +from executorch.backends.nxp.backend.ir.lib.tflite import LogicalOrOptions +from executorch.backends.nxp.backend.ir.lib.tflite.BuiltinOperator import ( + BuiltinOperator, +) +from executorch.backends.nxp.backend.ir.lib.tflite.BuiltinOptions import BuiltinOptions + + +class LogicalOr(meta.BuiltinOptions): + + def __init__(self) -> None: + super().__init__(BuiltinOptions.LogicalOrOptions, BuiltinOperator.LOGICAL_OR) + + def gen_tflite(self, builder: fb.Builder): + LogicalOrOptions.Start(builder) + + return LogicalOrOptions.End(builder) diff --git a/backends/nxp/backend/ir/tflite_generator/builtin_options/lrn_options.py b/backends/nxp/backend/ir/tflite_generator/builtin_options/lrn_options.py new file mode 100755 index 00000000000..8d4fe5d20ff --- /dev/null +++ b/backends/nxp/backend/ir/tflite_generator/builtin_options/lrn_options.py @@ -0,0 +1,45 @@ +# +# Copyright 2023 Martin Pavella +# Copyright 2023 NXP +# +# License: MIT +# See the LICENSE_MIT for more details. +# +""" + LRN + +Representation of the TFLite operator 'LocalResponseNormalization'. +""" + +import executorch.backends.nxp.backend.ir.lib.tflite.BuiltinOperator as libBuiltinOperator +import executorch.backends.nxp.backend.ir.lib.tflite.BuiltinOptions as libBuiltinOptions +import executorch.backends.nxp.backend.ir.lib.tflite.LocalResponseNormalizationOptions as libLocalResponseNormalizationOptions +import executorch.backends.nxp.backend.ir.tflite_generator.meta.meta as meta +import flatbuffers as fb + + +class LRN(meta.BuiltinOptions): + radius: int + bias: float + alpha: float + beta: float + + def __init__(self, radius: int, bias: float, alpha: float, beta: float) -> None: + super().__init__( + libBuiltinOptions.BuiltinOptions.LocalResponseNormalizationOptions, + libBuiltinOperator.BuiltinOperator.LOCAL_RESPONSE_NORMALIZATION, + ) + self.radius = radius + self.bias = bias + self.alpha = alpha + self.beta = beta + + def gen_tflite(self, builder: fb.Builder): + libLocalResponseNormalizationOptions.Start(builder) + + libLocalResponseNormalizationOptions.AddRadius(builder, self.radius) + libLocalResponseNormalizationOptions.AddBias(builder, self.bias) + libLocalResponseNormalizationOptions.AddAlpha(builder, self.alpha) + libLocalResponseNormalizationOptions.AddBeta(builder, self.beta) + + return libLocalResponseNormalizationOptions.End(builder) diff --git a/backends/nxp/backend/ir/tflite_generator/builtin_options/lstm_options.py b/backends/nxp/backend/ir/tflite_generator/builtin_options/lstm_options.py new file mode 100755 index 00000000000..47a624b4cbd --- /dev/null +++ b/backends/nxp/backend/ir/tflite_generator/builtin_options/lstm_options.py @@ -0,0 +1,60 @@ +# Copyright 2023 NXP +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +import executorch.backends.nxp.backend.ir.lib.tflite.LSTMOptions as libLSTMOptions +import flatbuffers as fb +from executorch.backends.nxp.backend.ir.lib.tflite.ActivationFunctionType import ( + ActivationFunctionType, +) +from executorch.backends.nxp.backend.ir.lib.tflite.BuiltinOperator import ( + BuiltinOperator, +) +from executorch.backends.nxp.backend.ir.lib.tflite.BuiltinOptions import BuiltinOptions +from executorch.backends.nxp.backend.ir.lib.tflite.LSTMKernelType import LSTMKernelType +from executorch.backends.nxp.backend.ir.tflite_generator.meta import meta + + +class LSTM(meta.BuiltinOptions): + # LSTM v1+ + fused_activation_function: ActivationFunctionType + cell_clip: float + proj_clip: float + + # LSTM v2+ + kernel_type: LSTMKernelType + + # LSTM v4+ + asymmetric_quantize_inputs: bool + + def __init__( + self, + cell_clip: float, + proj_clip: float, + kernel_type: LSTMKernelType = LSTMKernelType.FULL, + asymmetric_quantize_inputs: bool = False, + fused_activation_function: ActivationFunctionType = ActivationFunctionType.NONE, + ) -> None: + super().__init__(BuiltinOptions.LSTMOptions, BuiltinOperator.LSTM) + + self.cell_clip = cell_clip + self.proj_clip = proj_clip + self.kernel_type = kernel_type + self.asymmetric_quantize_inputs = asymmetric_quantize_inputs + self.fused_activation_function = fused_activation_function + + def gen_tflite(self, builder: fb.Builder): + libLSTMOptions.Start(builder) + + libLSTMOptions.AddFusedActivationFunction( + builder, self.fused_activation_function + ) + libLSTMOptions.AddCellClip(builder, self.cell_clip) + libLSTMOptions.AddProjClip(builder, self.proj_clip) + libLSTMOptions.AddKernelType(builder, self.kernel_type) + libLSTMOptions.AddAsymmetricQuantizeInputs( + builder, self.asymmetric_quantize_inputs + ) + + return libLSTMOptions.End(builder) diff --git a/backends/nxp/backend/ir/tflite_generator/builtin_options/max_pool_2d_options.py b/backends/nxp/backend/ir/tflite_generator/builtin_options/max_pool_2d_options.py new file mode 100755 index 00000000000..b87a2f46de2 --- /dev/null +++ b/backends/nxp/backend/ir/tflite_generator/builtin_options/max_pool_2d_options.py @@ -0,0 +1,62 @@ +# +# Copyright 2023 Martin Pavella +# +# License: MIT +# See the LICENSE_MIT for more details. +# +""" + MaxPool2D + +Representation of the TFLite operator 'MaxPool2D'. +""" + +import executorch.backends.nxp.backend.ir.lib.tflite.ActivationFunctionType as libActivationFunctionType +import executorch.backends.nxp.backend.ir.lib.tflite.BuiltinOperator as libBuiltinOperator +import executorch.backends.nxp.backend.ir.lib.tflite.BuiltinOptions as libBuiltinOptions +import executorch.backends.nxp.backend.ir.lib.tflite.Padding as libPadding +import executorch.backends.nxp.backend.ir.lib.tflite.Pool2DOptions as libPool2DOptions +import executorch.backends.nxp.backend.ir.tflite_generator.meta.meta as meta +import flatbuffers as fb + + +class MaxPool2D(meta.BuiltinOptions): + padding: libPadding.Padding + stride_w: int + stride_h: int + filter_w: int + filter_h: int + fused_activation_function: libActivationFunctionType.ActivationFunctionType + + def __init__( + self, + padding: libPadding.Padding = libPadding.Padding.SAME, + stride_w: int = 1, + stride_h: int = 1, + filter_w: int = 1, + filter_h: int = 1, + fused_activation_function: libActivationFunctionType.ActivationFunctionType = libActivationFunctionType.ActivationFunctionType.NONE, + ) -> None: + super().__init__( + libBuiltinOptions.BuiltinOptions.Pool2DOptions, + libBuiltinOperator.BuiltinOperator.MAX_POOL_2D, + ) + self.padding = padding + self.stride_w = stride_w + self.stride_h = stride_h + self.filter_w = filter_w + self.filter_h = filter_h + self.fused_activation_function = fused_activation_function + + def gen_tflite(self, builder: fb.Builder): + libPool2DOptions.Start(builder) + + libPool2DOptions.AddPadding(builder, self.padding) + libPool2DOptions.AddStrideW(builder, self.stride_w) + libPool2DOptions.AddStrideH(builder, self.stride_h) + libPool2DOptions.AddFilterHeight(builder, self.filter_h) + libPool2DOptions.AddFilterWidth(builder, self.filter_w) + libPool2DOptions.AddFusedActivationFunction( + builder, self.fused_activation_function + ) + + return libPool2DOptions.End(builder) diff --git a/backends/nxp/backend/ir/tflite_generator/builtin_options/maximum_options.py b/backends/nxp/backend/ir/tflite_generator/builtin_options/maximum_options.py new file mode 100755 index 00000000000..1a0c7d9f630 --- /dev/null +++ b/backends/nxp/backend/ir/tflite_generator/builtin_options/maximum_options.py @@ -0,0 +1,21 @@ +# Copyright 2024 NXP +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +import executorch.backends.nxp.backend.ir.tflite_generator.meta.meta as meta +import flatbuffers as fb +from executorch.backends.nxp.backend.ir.lib.tflite import MaximumMinimumOptions +from executorch.backends.nxp.backend.ir.lib.tflite.BuiltinOperator import ( + BuiltinOperator, +) +from executorch.backends.nxp.backend.ir.lib.tflite.BuiltinOptions import BuiltinOptions + + +class Maximum(meta.BuiltinOptions): + def __init__(self) -> None: + super().__init__(BuiltinOptions.MaximumMinimumOptions, BuiltinOperator.MAXIMUM) + + def gen_tflite(self, builder: fb.Builder): + MaximumMinimumOptions.Start(builder) + return MaximumMinimumOptions.End(builder) diff --git a/backends/nxp/backend/ir/tflite_generator/builtin_options/mean_options.py b/backends/nxp/backend/ir/tflite_generator/builtin_options/mean_options.py new file mode 100755 index 00000000000..8cf526b72ae --- /dev/null +++ b/backends/nxp/backend/ir/tflite_generator/builtin_options/mean_options.py @@ -0,0 +1,28 @@ +# Copyright 2023 NXP +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +import flatbuffers as fb + +from executorch.backends.nxp.backend.ir.lib.tflite import ReducerOptions +from executorch.backends.nxp.backend.ir.lib.tflite.BuiltinOperator import ( + BuiltinOperator, +) +from executorch.backends.nxp.backend.ir.lib.tflite.BuiltinOptions import BuiltinOptions +from executorch.backends.nxp.backend.ir.tflite_generator.meta import meta + + +class Mean(meta.BuiltinOptions): + keep_dims: bool + + def __init__(self, keep_dims: bool) -> None: + super().__init__(BuiltinOptions.ReducerOptions, BuiltinOperator.MEAN) + self.keep_dims = keep_dims + + def gen_tflite(self, builder: fb.Builder): + ReducerOptions.Start(builder) + + ReducerOptions.AddKeepDims(builder, self.keep_dims) + + return ReducerOptions.End(builder) diff --git a/backends/nxp/backend/ir/tflite_generator/builtin_options/minimum_options.py b/backends/nxp/backend/ir/tflite_generator/builtin_options/minimum_options.py new file mode 100755 index 00000000000..02def0d7eb9 --- /dev/null +++ b/backends/nxp/backend/ir/tflite_generator/builtin_options/minimum_options.py @@ -0,0 +1,21 @@ +# Copyright 2024 NXP +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +import executorch.backends.nxp.backend.ir.tflite_generator.meta.meta as meta +import flatbuffers as fb +from executorch.backends.nxp.backend.ir.lib.tflite import MaximumMinimumOptions +from executorch.backends.nxp.backend.ir.lib.tflite.BuiltinOperator import ( + BuiltinOperator, +) +from executorch.backends.nxp.backend.ir.lib.tflite.BuiltinOptions import BuiltinOptions + + +class Minimum(meta.BuiltinOptions): + def __init__(self) -> None: + super().__init__(BuiltinOptions.MaximumMinimumOptions, BuiltinOperator.MINIMUM) + + def gen_tflite(self, builder: fb.Builder): + MaximumMinimumOptions.Start(builder) + return MaximumMinimumOptions.End(builder) diff --git a/backends/nxp/backend/ir/tflite_generator/builtin_options/mirror_pad_options.py b/backends/nxp/backend/ir/tflite_generator/builtin_options/mirror_pad_options.py new file mode 100755 index 00000000000..a7111381734 --- /dev/null +++ b/backends/nxp/backend/ir/tflite_generator/builtin_options/mirror_pad_options.py @@ -0,0 +1,60 @@ +# Copyright 2023 NXP +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +import flatbuffers + +from executorch.backends.nxp.backend.ir.lib.tflite import ( + BuiltinOperator, + BuiltinOptions, + MirrorPadOptions, +) +from executorch.backends.nxp.backend.ir.lib.tflite.MirrorPadMode import MirrorPadMode +from executorch.backends.nxp.backend.ir.tflite_generator.meta import meta + + +# SYMMETRIC pads = [2, 2, 2, 2] +# input: +# [[0. 1.] +# [2. 3.]] +# +# output: +# [[3. 2. 2. 3. 3. 2.] +# [1. 0. 0. 1. 1. 0.] +# [1. 0. 0. 1. 1. 0.] +# [3. 2. 2. 3. 3. 2.] +# [3. 2. 2. 3. 3. 2.] +# [1. 0. 0. 1. 1. 0.]] + +# REFLECT pads = [2, 2, 2, 2] +# input: +# [[0. 1.] +# [2. 3.]] +# +# output: (Doesn't make sense to me, why the first row is all 0. Also last row is weird.) +# (Also the element [0][0] is sometimes 9.462417e-28, so the computations seems non-deterministic!) +# [[0. 0. 0. 0. 0. 0.] +# [0. 3. 2. 3. 2. 2.] +# [2. 1. 0. 1. 0. 0.] +# [0. 3. 2. 3. 2. 2.] +# [2. 1. 0. 1. 0. 0.] +# [2. 1. 0. 1. 0. 0.]] + + +class MirrorPad(meta.BuiltinOptions): + mode: MirrorPadMode + + def __init__(self, mode: MirrorPadMode = MirrorPadMode.REFLECT) -> None: + super().__init__( + BuiltinOptions.BuiltinOptions.MirrorPadOptions, + BuiltinOperator.BuiltinOperator.MIRROR_PAD, + ) + self.mode = mode + + def gen_tflite(self, builder: flatbuffers.Builder): + MirrorPadOptions.Start(builder) + + MirrorPadOptions.AddMode(builder, self.mode) + + return MirrorPadOptions.End(builder) diff --git a/backends/nxp/backend/ir/tflite_generator/builtin_options/mul_options.py b/backends/nxp/backend/ir/tflite_generator/builtin_options/mul_options.py new file mode 100755 index 00000000000..3eef87b2344 --- /dev/null +++ b/backends/nxp/backend/ir/tflite_generator/builtin_options/mul_options.py @@ -0,0 +1,36 @@ +# +# Copyright 2023 Martin Pavella +# Copyright 2023 NXP +# +# License: MIT +# See the LICENSE_MIT for more details. +# + +import executorch.backends.nxp.backend.ir.tflite_generator.meta.meta as meta +import flatbuffers as fb +from executorch.backends.nxp.backend.ir.lib.tflite import MulOptions +from executorch.backends.nxp.backend.ir.lib.tflite.ActivationFunctionType import ( + ActivationFunctionType, +) +from executorch.backends.nxp.backend.ir.lib.tflite.BuiltinOperator import ( + BuiltinOperator, +) +from executorch.backends.nxp.backend.ir.lib.tflite.BuiltinOptions import BuiltinOptions + + +class Mul(meta.BuiltinOptions): + fused_activation_function: ActivationFunctionType + + def __init__( + self, + fused_activation_function: ActivationFunctionType = ActivationFunctionType.NONE, + ) -> None: + super().__init__(BuiltinOptions.MulOptions, BuiltinOperator.MUL) + self.fused_activation_function = fused_activation_function + + def gen_tflite(self, builder: fb.Builder): + MulOptions.Start(builder) + + MulOptions.AddFusedActivationFunction(builder, self.fused_activation_function) + + return MulOptions.End(builder) diff --git a/backends/nxp/backend/ir/tflite_generator/builtin_options/multinomial_options.py b/backends/nxp/backend/ir/tflite_generator/builtin_options/multinomial_options.py new file mode 100755 index 00000000000..cd59fe7f608 --- /dev/null +++ b/backends/nxp/backend/ir/tflite_generator/builtin_options/multinomial_options.py @@ -0,0 +1,30 @@ +# Copyright 2024 NXP +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +import executorch.backends.nxp.backend.ir.tflite_generator.meta.meta as meta +import flatbuffers as fb +from executorch.backends.nxp.backend.ir.lib.tflite import RandomOptions +from executorch.backends.nxp.backend.ir.lib.tflite.BuiltinOperator import ( + BuiltinOperator, +) +from executorch.backends.nxp.backend.ir.lib.tflite.BuiltinOptions import BuiltinOptions + + +class Multinomial(meta.BuiltinOptions): + seed: int + seed2: int + + def __init__(self, seed: int, seed2: int) -> None: + super().__init__(BuiltinOptions.RandomOptions, BuiltinOperator.MULTINOMIAL) + self.seed = seed + self.seed2 = seed2 + + def gen_tflite(self, builder: fb.Builder): + RandomOptions.Start(builder) + + RandomOptions.AddSeed(builder, self.seed) + RandomOptions.AddSeed2(builder, self.seed2) + + return RandomOptions.End(builder) diff --git a/backends/nxp/backend/ir/tflite_generator/builtin_options/neg_options.py b/backends/nxp/backend/ir/tflite_generator/builtin_options/neg_options.py new file mode 100755 index 00000000000..5ddea3bfe6e --- /dev/null +++ b/backends/nxp/backend/ir/tflite_generator/builtin_options/neg_options.py @@ -0,0 +1,23 @@ +# Copyright 2024 NXP +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +import executorch.backends.nxp.backend.ir.tflite_generator.meta.meta as meta +import flatbuffers as fb +from executorch.backends.nxp.backend.ir.lib.tflite import NegOptions +from executorch.backends.nxp.backend.ir.lib.tflite.BuiltinOperator import ( + BuiltinOperator, +) +from executorch.backends.nxp.backend.ir.lib.tflite.BuiltinOptions import BuiltinOptions + + +class Neg(meta.BuiltinOptions): + + def __init__(self) -> None: + super().__init__(BuiltinOptions.NegOptions, BuiltinOperator.NEG) + + def gen_tflite(self, builder: fb.Builder): + NegOptions.Start(builder) + + return NegOptions.End(builder) diff --git a/backends/nxp/backend/ir/tflite_generator/builtin_options/not_equal_options.py b/backends/nxp/backend/ir/tflite_generator/builtin_options/not_equal_options.py new file mode 100755 index 00000000000..98b2819b944 --- /dev/null +++ b/backends/nxp/backend/ir/tflite_generator/builtin_options/not_equal_options.py @@ -0,0 +1,23 @@ +# Copyright 2024 NXP +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +import executorch.backends.nxp.backend.ir.tflite_generator.meta.meta as meta +import flatbuffers as fb +from executorch.backends.nxp.backend.ir.lib.tflite import NotEqualOptions +from executorch.backends.nxp.backend.ir.lib.tflite.BuiltinOperator import ( + BuiltinOperator, +) +from executorch.backends.nxp.backend.ir.lib.tflite.BuiltinOptions import BuiltinOptions + + +class NotEqual(meta.BuiltinOptions): + + def __init__(self) -> None: + super().__init__(BuiltinOptions.NotEqualOptions, BuiltinOperator.NOT_EQUAL) + + def gen_tflite(self, builder: fb.Builder): + NotEqualOptions.Start(builder) + + return NotEqualOptions.End(builder) diff --git a/backends/nxp/backend/ir/tflite_generator/builtin_options/one_hot_options.py b/backends/nxp/backend/ir/tflite_generator/builtin_options/one_hot_options.py new file mode 100755 index 00000000000..c075c96f08d --- /dev/null +++ b/backends/nxp/backend/ir/tflite_generator/builtin_options/one_hot_options.py @@ -0,0 +1,27 @@ +# Copyright 2024 NXP +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +import executorch.backends.nxp.backend.ir.tflite_generator.meta.meta as meta +import flatbuffers as fb +from executorch.backends.nxp.backend.ir.lib.tflite import OneHotOptions +from executorch.backends.nxp.backend.ir.lib.tflite.BuiltinOperator import ( + BuiltinOperator, +) +from executorch.backends.nxp.backend.ir.lib.tflite.BuiltinOptions import BuiltinOptions + + +class OneHot(meta.BuiltinOptions): + axis: int + + def __init__(self, axis: int) -> None: + super().__init__(BuiltinOptions.OneHotOptions, BuiltinOperator.ONE_HOT) + self.axis = axis + + def gen_tflite(self, builder: fb.Builder): + OneHotOptions.Start(builder) + + OneHotOptions.AddAxis(builder, self.axis) + + return OneHotOptions.End(builder) diff --git a/backends/nxp/backend/ir/tflite_generator/builtin_options/pad_options.py b/backends/nxp/backend/ir/tflite_generator/builtin_options/pad_options.py new file mode 100755 index 00000000000..86d81f20af5 --- /dev/null +++ b/backends/nxp/backend/ir/tflite_generator/builtin_options/pad_options.py @@ -0,0 +1,27 @@ +# Copyright 2023 NXP +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +import flatbuffers + +from executorch.backends.nxp.backend.ir.lib.tflite import ( + BuiltinOperator, + BuiltinOptions, + PadOptions, +) +from executorch.backends.nxp.backend.ir.tflite_generator.meta import meta + + +class Pad(meta.BuiltinOptions): + + def __init__(self) -> None: + super().__init__( + BuiltinOptions.BuiltinOptions.PadOptions, + BuiltinOperator.BuiltinOperator.PAD, + ) + + def gen_tflite(self, builder: flatbuffers.Builder): + PadOptions.Start(builder) + + return PadOptions.End(builder) diff --git a/backends/nxp/backend/ir/tflite_generator/builtin_options/pad_v2_options.py b/backends/nxp/backend/ir/tflite_generator/builtin_options/pad_v2_options.py new file mode 100755 index 00000000000..48b6c404517 --- /dev/null +++ b/backends/nxp/backend/ir/tflite_generator/builtin_options/pad_v2_options.py @@ -0,0 +1,27 @@ +# Copyright 2023 NXP +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +import flatbuffers + +from executorch.backends.nxp.backend.ir.lib.tflite import ( + BuiltinOperator, + BuiltinOptions, + PadV2Options, +) +from executorch.backends.nxp.backend.ir.tflite_generator.meta import meta + + +class PadV2(meta.BuiltinOptions): + + def __init__(self) -> None: + super().__init__( + BuiltinOptions.BuiltinOptions.PadV2Options, + BuiltinOperator.BuiltinOperator.PADV2, + ) + + def gen_tflite(self, builder: flatbuffers.Builder): + PadV2Options.Start(builder) + + return PadV2Options.End(builder) diff --git a/backends/nxp/backend/ir/tflite_generator/builtin_options/pow_options.py b/backends/nxp/backend/ir/tflite_generator/builtin_options/pow_options.py new file mode 100755 index 00000000000..c87204feeaf --- /dev/null +++ b/backends/nxp/backend/ir/tflite_generator/builtin_options/pow_options.py @@ -0,0 +1,24 @@ +# Copyright 2024 NXP +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +import flatbuffers + +from executorch.backends.nxp.backend.ir.lib.tflite import PowOptions +from executorch.backends.nxp.backend.ir.lib.tflite.BuiltinOperator import ( + BuiltinOperator, +) +from executorch.backends.nxp.backend.ir.lib.tflite.BuiltinOptions import BuiltinOptions +from executorch.backends.nxp.backend.ir.tflite_generator.meta import meta + + +class Pow(meta.BuiltinOptions): + + def __init__(self) -> None: + super().__init__(BuiltinOptions.PowOptions, BuiltinOperator.POW) + + def gen_tflite(self, builder: flatbuffers.Builder): + PowOptions.Start(builder) + + return PowOptions.End(builder) diff --git a/backends/nxp/backend/ir/tflite_generator/builtin_options/quantize_options.py b/backends/nxp/backend/ir/tflite_generator/builtin_options/quantize_options.py new file mode 100755 index 00000000000..e80e642a09f --- /dev/null +++ b/backends/nxp/backend/ir/tflite_generator/builtin_options/quantize_options.py @@ -0,0 +1,30 @@ +# Copyright 2023 NXP +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +""" + quantize_options + + Representation of a TFLite operator 'Quantize'. +""" + +import executorch.backends.nxp.backend.ir.lib.tflite.BuiltinOperator as libBuiltinOperator +import executorch.backends.nxp.backend.ir.lib.tflite.BuiltinOptions as libBuiltinOptions +import executorch.backends.nxp.backend.ir.lib.tflite.QuantizeOptions as libQuantizeOptions +import executorch.backends.nxp.backend.ir.tflite_generator.meta.meta as meta +import flatbuffers as fb + + +class Quantize(meta.BuiltinOptions): + + def __init__(self) -> None: + super().__init__( + libBuiltinOptions.BuiltinOptions.QuantizeOptions, + libBuiltinOperator.BuiltinOperator.QUANTIZE, + ) + + def gen_tflite(self, builder: fb.Builder): + libQuantizeOptions.Start(builder) + + return libQuantizeOptions.End(builder) diff --git a/backends/nxp/backend/ir/tflite_generator/builtin_options/range_options.py b/backends/nxp/backend/ir/tflite_generator/builtin_options/range_options.py new file mode 100755 index 00000000000..aaeed7f87b2 --- /dev/null +++ b/backends/nxp/backend/ir/tflite_generator/builtin_options/range_options.py @@ -0,0 +1,23 @@ +# Copyright 2024 NXP +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +import executorch.backends.nxp.backend.ir.tflite_generator.meta.meta as meta +import flatbuffers as fb +from executorch.backends.nxp.backend.ir.lib.tflite import RangeOptions +from executorch.backends.nxp.backend.ir.lib.tflite.BuiltinOperator import ( + BuiltinOperator, +) +from executorch.backends.nxp.backend.ir.lib.tflite.BuiltinOptions import BuiltinOptions + + +class Range(meta.BuiltinOptions): + + def __init__(self) -> None: + super().__init__(BuiltinOptions.RangeOptions, BuiltinOperator.RANGE) + + def gen_tflite(self, builder: fb.Builder): + RangeOptions.Start(builder) + + return RangeOptions.End(builder) diff --git a/backends/nxp/backend/ir/tflite_generator/builtin_options/reduce_max_options.py b/backends/nxp/backend/ir/tflite_generator/builtin_options/reduce_max_options.py new file mode 100755 index 00000000000..6becf2069c4 --- /dev/null +++ b/backends/nxp/backend/ir/tflite_generator/builtin_options/reduce_max_options.py @@ -0,0 +1,28 @@ +# Copyright 2024 NXP +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +import flatbuffers as fb + +from executorch.backends.nxp.backend.ir.lib.tflite import ReducerOptions +from executorch.backends.nxp.backend.ir.lib.tflite.BuiltinOperator import ( + BuiltinOperator, +) +from executorch.backends.nxp.backend.ir.lib.tflite.BuiltinOptions import BuiltinOptions +from executorch.backends.nxp.backend.ir.tflite_generator.meta import meta + + +class ReduceMax(meta.BuiltinOptions): + keep_dims: bool + + def __init__(self, keep_dims: bool) -> None: + super().__init__(BuiltinOptions.ReducerOptions, BuiltinOperator.REDUCE_MAX) + self.keep_dims = keep_dims + + def gen_tflite(self, builder: fb.Builder): + ReducerOptions.Start(builder) + + ReducerOptions.AddKeepDims(builder, self.keep_dims) + + return ReducerOptions.End(builder) diff --git a/backends/nxp/backend/ir/tflite_generator/builtin_options/reduce_min_options.py b/backends/nxp/backend/ir/tflite_generator/builtin_options/reduce_min_options.py new file mode 100755 index 00000000000..1b36203fefc --- /dev/null +++ b/backends/nxp/backend/ir/tflite_generator/builtin_options/reduce_min_options.py @@ -0,0 +1,28 @@ +# Copyright 2024 NXP +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +import flatbuffers as fb + +from executorch.backends.nxp.backend.ir.lib.tflite import ReducerOptions +from executorch.backends.nxp.backend.ir.lib.tflite.BuiltinOperator import ( + BuiltinOperator, +) +from executorch.backends.nxp.backend.ir.lib.tflite.BuiltinOptions import BuiltinOptions +from executorch.backends.nxp.backend.ir.tflite_generator.meta import meta + + +class ReduceMin(meta.BuiltinOptions): + keep_dims: bool + + def __init__(self, keep_dims: bool) -> None: + super().__init__(BuiltinOptions.ReducerOptions, BuiltinOperator.REDUCE_MIN) + self.keep_dims = keep_dims + + def gen_tflite(self, builder: fb.Builder): + ReducerOptions.Start(builder) + + ReducerOptions.AddKeepDims(builder, self.keep_dims) + + return ReducerOptions.End(builder) diff --git a/backends/nxp/backend/ir/tflite_generator/builtin_options/reduce_prod_options.py b/backends/nxp/backend/ir/tflite_generator/builtin_options/reduce_prod_options.py new file mode 100755 index 00000000000..102e2b261ac --- /dev/null +++ b/backends/nxp/backend/ir/tflite_generator/builtin_options/reduce_prod_options.py @@ -0,0 +1,28 @@ +# Copyright 2024 NXP +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +import flatbuffers as fb + +from executorch.backends.nxp.backend.ir.lib.tflite import ReducerOptions +from executorch.backends.nxp.backend.ir.lib.tflite.BuiltinOperator import ( + BuiltinOperator, +) +from executorch.backends.nxp.backend.ir.lib.tflite.BuiltinOptions import BuiltinOptions +from executorch.backends.nxp.backend.ir.tflite_generator.meta import meta + + +class ReduceProd(meta.BuiltinOptions): + keep_dims: bool + + def __init__(self, keep_dims: bool) -> None: + super().__init__(BuiltinOptions.ReducerOptions, BuiltinOperator.REDUCE_PROD) + self.keep_dims = keep_dims + + def gen_tflite(self, builder: fb.Builder): + ReducerOptions.Start(builder) + + ReducerOptions.AddKeepDims(builder, self.keep_dims) + + return ReducerOptions.End(builder) diff --git a/backends/nxp/backend/ir/tflite_generator/builtin_options/reshape_options.py b/backends/nxp/backend/ir/tflite_generator/builtin_options/reshape_options.py new file mode 100755 index 00000000000..800bd645b8a --- /dev/null +++ b/backends/nxp/backend/ir/tflite_generator/builtin_options/reshape_options.py @@ -0,0 +1,52 @@ +# +# Copyright 2023 Martin Pavella +# +# License: MIT +# See the LICENSE_MIT for more details. +# +""" + Reshape + +Representation of the TFLite operator 'Reshape'. +""" + +from typing import List, Optional + +import executorch.backends.nxp.backend.ir.lib.tflite.BuiltinOperator as libBuiltinOperator +import executorch.backends.nxp.backend.ir.lib.tflite.BuiltinOptions as libBuiltinOptions +import executorch.backends.nxp.backend.ir.lib.tflite.ReshapeOptions as libReshapeOptions +import executorch.backends.nxp.backend.ir.tflite_generator.meta.meta as meta + +import flatbuffers as fb + + +class NewShape(meta.IntVector): + def __init__(self, new_shape: List[int]) -> None: + super().__init__(new_shape, libReshapeOptions.StartNewShapeVector) + + +class Reshape(meta.BuiltinOptions): + new_shape: Optional[NewShape] + + def __init__(self, new_shape: Optional[List[int]]) -> None: + super().__init__( + libBuiltinOptions.BuiltinOptions.ReshapeOptions, + libBuiltinOperator.BuiltinOperator.RESHAPE, + ) + if new_shape is not None: + self.new_shape = NewShape(new_shape) + else: + self.new_shape = None + + def gen_tflite(self, builder: fb.Builder): + if self.new_shape is not None: + tfl_new_shape = self.new_shape.gen_tflite(builder) + else: + tfl_new_shape = None + + libReshapeOptions.Start(builder) + + if tfl_new_shape is not None: + libReshapeOptions.AddNewShape(builder, tfl_new_shape) + + return libReshapeOptions.End(builder) diff --git a/backends/nxp/backend/ir/tflite_generator/builtin_options/resize_bilinear_options.py b/backends/nxp/backend/ir/tflite_generator/builtin_options/resize_bilinear_options.py new file mode 100755 index 00000000000..9630941a28a --- /dev/null +++ b/backends/nxp/backend/ir/tflite_generator/builtin_options/resize_bilinear_options.py @@ -0,0 +1,34 @@ +# Copyright 2024 NXP +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +import flatbuffers as fb + +from executorch.backends.nxp.backend.ir.lib.tflite import ResizeBilinearOptions +from executorch.backends.nxp.backend.ir.lib.tflite.BuiltinOperator import ( + BuiltinOperator, +) +from executorch.backends.nxp.backend.ir.lib.tflite.BuiltinOptions import BuiltinOptions +from executorch.backends.nxp.backend.ir.tflite_generator.meta import meta + + +# noinspection SpellCheckingInspection +class ResizeBilinear(meta.BuiltinOptions): + align_corners: bool + half_pixel_centers: bool + + def __init__(self, align_corners: bool, half_pixel_centers: bool) -> None: + super().__init__( + BuiltinOptions.ResizeBilinearOptions, BuiltinOperator.RESIZE_BILINEAR + ) + self.align_corners = align_corners + self.half_pixel_centers = half_pixel_centers + + def gen_tflite(self, builder: fb.Builder): + ResizeBilinearOptions.Start(builder) + + ResizeBilinearOptions.AddAlignCorners(builder, self.align_corners) + ResizeBilinearOptions.AddHalfPixelCenters(builder, self.half_pixel_centers) + + return ResizeBilinearOptions.End(builder) diff --git a/backends/nxp/backend/ir/tflite_generator/builtin_options/resize_nearest_neighbor_options.py b/backends/nxp/backend/ir/tflite_generator/builtin_options/resize_nearest_neighbor_options.py new file mode 100755 index 00000000000..8837aad0bde --- /dev/null +++ b/backends/nxp/backend/ir/tflite_generator/builtin_options/resize_nearest_neighbor_options.py @@ -0,0 +1,37 @@ +# Copyright 2024 NXP +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +import flatbuffers as fb + +from executorch.backends.nxp.backend.ir.lib.tflite import ResizeNearestNeighborOptions +from executorch.backends.nxp.backend.ir.lib.tflite.BuiltinOperator import ( + BuiltinOperator, +) +from executorch.backends.nxp.backend.ir.lib.tflite.BuiltinOptions import BuiltinOptions +from executorch.backends.nxp.backend.ir.tflite_generator.meta import meta + + +# noinspection SpellCheckingInspection +class ResizeNearestNeighbor(meta.BuiltinOptions): + align_corners: bool + half_pixel_centers: bool + + def __init__(self, align_corners: bool, half_pixel_centers: bool) -> None: + super().__init__( + BuiltinOptions.ResizeNearestNeighborOptions, + BuiltinOperator.RESIZE_NEAREST_NEIGHBOR, + ) + self.align_corners = align_corners + self.half_pixel_centers = half_pixel_centers + + def gen_tflite(self, builder: fb.Builder): + ResizeNearestNeighborOptions.Start(builder) + + ResizeNearestNeighborOptions.AddAlignCorners(builder, self.align_corners) + ResizeNearestNeighborOptions.AddHalfPixelCenters( + builder, self.half_pixel_centers + ) + + return ResizeNearestNeighborOptions.End(builder) diff --git a/backends/nxp/backend/ir/tflite_generator/builtin_options/reverse_sequence_options.py b/backends/nxp/backend/ir/tflite_generator/builtin_options/reverse_sequence_options.py new file mode 100755 index 00000000000..0896f5137e5 --- /dev/null +++ b/backends/nxp/backend/ir/tflite_generator/builtin_options/reverse_sequence_options.py @@ -0,0 +1,32 @@ +# Copyright 2024 NXP +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +import executorch.backends.nxp.backend.ir.tflite_generator.meta.meta as meta +import flatbuffers as fb +from executorch.backends.nxp.backend.ir.lib.tflite import ReverseSequenceOptions +from executorch.backends.nxp.backend.ir.lib.tflite.BuiltinOperator import ( + BuiltinOperator, +) +from executorch.backends.nxp.backend.ir.lib.tflite.BuiltinOptions import BuiltinOptions + + +class ReverseSequence(meta.BuiltinOptions): + seq_dim: int + batch_dim: int + + def __init__(self, seq_dim: int, batch_dim: int) -> None: + super().__init__( + BuiltinOptions.ReverseSequenceOptions, BuiltinOperator.REVERSE_SEQUENCE + ) + self.seq_dim = seq_dim + self.batch_dim = batch_dim + + def gen_tflite(self, builder: fb.Builder): + ReverseSequenceOptions.Start(builder) + + ReverseSequenceOptions.AddSeqDim(builder, self.seq_dim) + ReverseSequenceOptions.AddBatchDim(builder, self.batch_dim) + + return ReverseSequenceOptions.End(builder) diff --git a/backends/nxp/backend/ir/tflite_generator/builtin_options/scatter_nd_options.py b/backends/nxp/backend/ir/tflite_generator/builtin_options/scatter_nd_options.py new file mode 100755 index 00000000000..c32805d7604 --- /dev/null +++ b/backends/nxp/backend/ir/tflite_generator/builtin_options/scatter_nd_options.py @@ -0,0 +1,23 @@ +# Copyright 2024 NXP +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +import executorch.backends.nxp.backend.ir.tflite_generator.meta.meta as meta +import flatbuffers as fb +from executorch.backends.nxp.backend.ir.lib.tflite import ScatterNdOptions +from executorch.backends.nxp.backend.ir.lib.tflite.BuiltinOperator import ( + BuiltinOperator, +) +from executorch.backends.nxp.backend.ir.lib.tflite.BuiltinOptions import BuiltinOptions + + +class ScatterND(meta.BuiltinOptions): + + def __init__(self) -> None: + super().__init__(BuiltinOptions.ScatterNdOptions, BuiltinOperator.SCATTER_ND) + + def gen_tflite(self, builder: fb.Builder): + ScatterNdOptions.Start(builder) + + return ScatterNdOptions.End(builder) diff --git a/backends/nxp/backend/ir/tflite_generator/builtin_options/select_v2_options.py b/backends/nxp/backend/ir/tflite_generator/builtin_options/select_v2_options.py new file mode 100755 index 00000000000..dd862a83dd2 --- /dev/null +++ b/backends/nxp/backend/ir/tflite_generator/builtin_options/select_v2_options.py @@ -0,0 +1,24 @@ +# Copyright 2023 NXP +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + + +import executorch.backends.nxp.backend.ir.lib.tflite.BuiltinOperator as libBuiltinOperator +import executorch.backends.nxp.backend.ir.lib.tflite.BuiltinOptions as libBuiltinOptions +import executorch.backends.nxp.backend.ir.lib.tflite.SelectV2Options as libSelectV2Options +import executorch.backends.nxp.backend.ir.tflite_generator.meta.meta as meta +import flatbuffers as fb + + +class SelectV2(meta.BuiltinOptions): + + def __init__(self) -> None: + super().__init__( + libBuiltinOptions.BuiltinOptions.SelectV2Options, + libBuiltinOperator.BuiltinOperator.SELECT_V2, + ) + + def gen_tflite(self, builder: fb.Builder): + libSelectV2Options.Start(builder) + return libSelectV2Options.End(builder) diff --git a/backends/nxp/backend/ir/tflite_generator/builtin_options/shape_options.py b/backends/nxp/backend/ir/tflite_generator/builtin_options/shape_options.py new file mode 100755 index 00000000000..b6d4e5ac7b4 --- /dev/null +++ b/backends/nxp/backend/ir/tflite_generator/builtin_options/shape_options.py @@ -0,0 +1,34 @@ +# Copyright 2023 NXP +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +""" + shape_options + + Representation of a TFLite operator 'Shape'. +""" + +import executorch.backends.nxp.backend.ir.lib.tflite.ShapeOptions as libShapeOptions +import executorch.backends.nxp.backend.ir.tflite_generator.meta.meta as meta +import flatbuffers as fb +from executorch.backends.nxp.backend.ir.lib.tflite.BuiltinOperator import ( + BuiltinOperator, +) +from executorch.backends.nxp.backend.ir.lib.tflite.BuiltinOptions import BuiltinOptions +from executorch.backends.nxp.backend.ir.lib.tflite.TensorType import TensorType + + +class Shape(meta.BuiltinOptions): + out_type: TensorType + + def __init__(self, out_type: TensorType) -> None: + super().__init__(BuiltinOptions.ShapeOptions, BuiltinOperator.SHAPE) + self.out_type = out_type + + def gen_tflite(self, builder: fb.Builder): + libShapeOptions.Start(builder) + + libShapeOptions.AddOutType(builder, self.out_type) + + return libShapeOptions.End(builder) diff --git a/backends/nxp/backend/ir/tflite_generator/builtin_options/sign_options.py b/backends/nxp/backend/ir/tflite_generator/builtin_options/sign_options.py new file mode 100755 index 00000000000..8881dc0a910 --- /dev/null +++ b/backends/nxp/backend/ir/tflite_generator/builtin_options/sign_options.py @@ -0,0 +1,22 @@ +# Copyright 2024 NXP +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +import executorch.backends.nxp.backend.ir.tflite_generator.meta.meta as meta +import flatbuffers as fb +from executorch.backends.nxp.backend.ir.lib.tflite import SignOptions +from executorch.backends.nxp.backend.ir.lib.tflite.BuiltinOperator import ( + BuiltinOperator, +) +from executorch.backends.nxp.backend.ir.lib.tflite.BuiltinOptions import BuiltinOptions + + +class Sign(meta.BuiltinOptions): + def __init__(self) -> None: + super().__init__(BuiltinOptions.SignOptions, BuiltinOperator.SIGN) + + def gen_tflite(self, builder: fb.Builder): + SignOptions.Start(builder) + + return SignOptions.End(builder) diff --git a/backends/nxp/backend/ir/tflite_generator/builtin_options/slice_options.py b/backends/nxp/backend/ir/tflite_generator/builtin_options/slice_options.py new file mode 100755 index 00000000000..f5e8ef7d43a --- /dev/null +++ b/backends/nxp/backend/ir/tflite_generator/builtin_options/slice_options.py @@ -0,0 +1,29 @@ +# Copyright 2023 NXP +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +""" + slice_options + +Representation of the TFLite operator 'Slice'. +""" + +import executorch.backends.nxp.backend.ir.tflite_generator.meta.meta as meta +import flatbuffers as fb +from executorch.backends.nxp.backend.ir.lib.tflite import ( + SliceOptions as libSliceOptions, +) +from executorch.backends.nxp.backend.ir.lib.tflite.BuiltinOperator import ( + BuiltinOperator, +) +from executorch.backends.nxp.backend.ir.lib.tflite.BuiltinOptions import BuiltinOptions + + +class Slice(meta.BuiltinOptions): + def __init__(self) -> None: + super().__init__(BuiltinOptions.SliceOptions, BuiltinOperator.SLICE) + + def gen_tflite(self, builder: fb.Builder): + libSliceOptions.Start(builder) + return libSliceOptions.End(builder) diff --git a/backends/nxp/backend/ir/tflite_generator/builtin_options/softmax_options.py b/backends/nxp/backend/ir/tflite_generator/builtin_options/softmax_options.py new file mode 100755 index 00000000000..3001f659d40 --- /dev/null +++ b/backends/nxp/backend/ir/tflite_generator/builtin_options/softmax_options.py @@ -0,0 +1,35 @@ +# +# Copyright 2023 Martin Pavella +# +# License: MIT +# See the LICENSE_MIT for more details. +# +""" + Softmax + +Representation of the TFLite operator 'Softmax'. +""" + +import executorch.backends.nxp.backend.ir.lib.tflite.BuiltinOperator as libBuiltinOperator +import executorch.backends.nxp.backend.ir.lib.tflite.BuiltinOptions as libBuiltinOptions +import executorch.backends.nxp.backend.ir.lib.tflite.SoftmaxOptions as libSoftmaxOptions +import executorch.backends.nxp.backend.ir.tflite_generator.meta.meta as meta +import flatbuffers as fb + + +class Softmax(meta.BuiltinOptions): + beta: float + + def __init__(self, beta: float) -> None: + super().__init__( + libBuiltinOptions.BuiltinOptions.SoftmaxOptions, + libBuiltinOperator.BuiltinOperator.SOFTMAX, + ) + self.beta = beta + + def gen_tflite(self, builder: fb.Builder): + libSoftmaxOptions.Start(builder) + + libSoftmaxOptions.AddBeta(builder, self.beta) + + return libSoftmaxOptions.End(builder) diff --git a/backends/nxp/backend/ir/tflite_generator/builtin_options/space_to_depth_options.py b/backends/nxp/backend/ir/tflite_generator/builtin_options/space_to_depth_options.py new file mode 100755 index 00000000000..b6112247dad --- /dev/null +++ b/backends/nxp/backend/ir/tflite_generator/builtin_options/space_to_depth_options.py @@ -0,0 +1,29 @@ +# Copyright 2024 NXP +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +import executorch.backends.nxp.backend.ir.tflite_generator.meta.meta as meta +import flatbuffers as fb +from executorch.backends.nxp.backend.ir.lib.tflite import SpaceToDepthOptions +from executorch.backends.nxp.backend.ir.lib.tflite.BuiltinOperator import ( + BuiltinOperator, +) +from executorch.backends.nxp.backend.ir.lib.tflite.BuiltinOptions import BuiltinOptions + + +class SpaceToDepth(meta.BuiltinOptions): + block_size: int + + def __init__(self, block_size: int) -> None: + super().__init__( + BuiltinOptions.SpaceToDepthOptions, BuiltinOperator.SPACE_TO_DEPTH + ) + self.block_size = block_size + + def gen_tflite(self, builder: fb.Builder): + SpaceToDepthOptions.Start(builder) + + SpaceToDepthOptions.SpaceToDepthOptionsAddBlockSize(builder, self.block_size) + + return SpaceToDepthOptions.End(builder) diff --git a/backends/nxp/backend/ir/tflite_generator/builtin_options/split_options.py b/backends/nxp/backend/ir/tflite_generator/builtin_options/split_options.py new file mode 100755 index 00000000000..e975af881f1 --- /dev/null +++ b/backends/nxp/backend/ir/tflite_generator/builtin_options/split_options.py @@ -0,0 +1,27 @@ +# Copyright 2023 NXP +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +import executorch.backends.nxp.backend.ir.lib.tflite.SplitOptions as libSplitOptions +import flatbuffers as fb +from executorch.backends.nxp.backend.ir.lib.tflite.BuiltinOperator import ( + BuiltinOperator, +) +from executorch.backends.nxp.backend.ir.lib.tflite.BuiltinOptions import BuiltinOptions +from executorch.backends.nxp.backend.ir.tflite_generator.meta import meta + + +class Split(meta.BuiltinOptions): + num_splits: int + + def __init__(self, num_splits: int) -> None: + super().__init__(BuiltinOptions.SplitOptions, BuiltinOperator.SPLIT) + self.num_splits = num_splits + + def gen_tflite(self, builder: fb.Builder): + libSplitOptions.Start(builder) + + libSplitOptions.AddNumSplits(builder, self.num_splits) + + return libSplitOptions.End(builder) diff --git a/backends/nxp/backend/ir/tflite_generator/builtin_options/split_v_options.py b/backends/nxp/backend/ir/tflite_generator/builtin_options/split_v_options.py new file mode 100755 index 00000000000..9f6751d6944 --- /dev/null +++ b/backends/nxp/backend/ir/tflite_generator/builtin_options/split_v_options.py @@ -0,0 +1,27 @@ +# Copyright 2023 NXP +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +import executorch.backends.nxp.backend.ir.lib.tflite.SplitVOptions as libSplitVOptions +import flatbuffers as fb +from executorch.backends.nxp.backend.ir.lib.tflite.BuiltinOperator import ( + BuiltinOperator, +) +from executorch.backends.nxp.backend.ir.lib.tflite.BuiltinOptions import BuiltinOptions +from executorch.backends.nxp.backend.ir.tflite_generator.meta import meta + + +class SplitV(meta.BuiltinOptions): + num_splits: int + + def __init__(self, num_splits: int) -> None: + super().__init__(BuiltinOptions.SplitVOptions, BuiltinOperator.SPLIT_V) + self.num_splits = num_splits + + def gen_tflite(self, builder: fb.Builder): + libSplitVOptions.Start(builder) + + libSplitVOptions.AddNumSplits(builder, self.num_splits) + + return libSplitVOptions.End(builder) diff --git a/backends/nxp/backend/ir/tflite_generator/builtin_options/square_options.py b/backends/nxp/backend/ir/tflite_generator/builtin_options/square_options.py new file mode 100755 index 00000000000..327e161c167 --- /dev/null +++ b/backends/nxp/backend/ir/tflite_generator/builtin_options/square_options.py @@ -0,0 +1,23 @@ +# Copyright 2023 NXP +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +import executorch.backends.nxp.backend.ir.tflite_generator.meta.meta as meta +import flatbuffers as fb +from executorch.backends.nxp.backend.ir.lib.tflite import ( + SquareOptions as libSquareOptions, +) +from executorch.backends.nxp.backend.ir.lib.tflite.BuiltinOperator import ( + BuiltinOperator, +) +from executorch.backends.nxp.backend.ir.lib.tflite.BuiltinOptions import BuiltinOptions + + +class Square(meta.BuiltinOptions): + def __init__(self) -> None: + super().__init__(BuiltinOptions.SquareOptions, BuiltinOperator.SQUARE) + + def gen_tflite(self, builder: fb.Builder): + libSquareOptions.Start(builder) + return libSquareOptions.End(builder) diff --git a/backends/nxp/backend/ir/tflite_generator/builtin_options/squared_difference_options.py b/backends/nxp/backend/ir/tflite_generator/builtin_options/squared_difference_options.py new file mode 100755 index 00000000000..1b073782b93 --- /dev/null +++ b/backends/nxp/backend/ir/tflite_generator/builtin_options/squared_difference_options.py @@ -0,0 +1,25 @@ +# Copyright 2024 NXP +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +import executorch.backends.nxp.backend.ir.tflite_generator.meta.meta as meta +import flatbuffers as fb +from executorch.backends.nxp.backend.ir.lib.tflite import SquaredDifferenceOptions +from executorch.backends.nxp.backend.ir.lib.tflite.BuiltinOperator import ( + BuiltinOperator, +) +from executorch.backends.nxp.backend.ir.lib.tflite.BuiltinOptions import BuiltinOptions + + +class SquaredDifference(meta.BuiltinOptions): + + def __init__(self) -> None: + super().__init__( + BuiltinOptions.SquaredDifferenceOptions, BuiltinOperator.SQUARED_DIFFERENCE + ) + + def gen_tflite(self, builder: fb.Builder): + SquaredDifferenceOptions.Start(builder) + + return SquaredDifferenceOptions.End(builder) diff --git a/backends/nxp/backend/ir/tflite_generator/builtin_options/squeeze_options.py b/backends/nxp/backend/ir/tflite_generator/builtin_options/squeeze_options.py new file mode 100755 index 00000000000..4718917fc60 --- /dev/null +++ b/backends/nxp/backend/ir/tflite_generator/builtin_options/squeeze_options.py @@ -0,0 +1,42 @@ +# Copyright 2023 NXP +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +import executorch.backends.nxp.backend.ir.lib.tflite.SqueezeOptions as libSqueezeOptions +import flatbuffers as fb +from executorch.backends.nxp.backend.ir.lib.tflite.BuiltinOperator import ( + BuiltinOperator, +) +from executorch.backends.nxp.backend.ir.lib.tflite.BuiltinOptions import BuiltinOptions +from executorch.backends.nxp.backend.ir.tflite_generator.meta import meta + + +class SqueezeDims(meta.IntVector): + def __init__(self, new_shape: list[int]) -> None: + super().__init__(new_shape, libSqueezeOptions.StartSqueezeDimsVector) + + +class Squeeze(meta.BuiltinOptions): + squeeze_dims: SqueezeDims | None + + def __init__(self, squeeze_dims: list[int] | None) -> None: + super().__init__(BuiltinOptions.SqueezeOptions, BuiltinOperator.SQUEEZE) + + if squeeze_dims is not None: + self.squeeze_dims = SqueezeDims(squeeze_dims) + else: + self.squeeze_dims = None + + def gen_tflite(self, builder: fb.Builder): + if self.squeeze_dims is not None: + tfl_squeeze_dims = self.squeeze_dims.gen_tflite(builder) + else: + tfl_squeeze_dims = None + + libSqueezeOptions.Start(builder) + + if tfl_squeeze_dims is not None: + libSqueezeOptions.AddSqueezeDims(builder, tfl_squeeze_dims) + + return libSqueezeOptions.End(builder) diff --git a/backends/nxp/backend/ir/tflite_generator/builtin_options/strided_slice_options.py b/backends/nxp/backend/ir/tflite_generator/builtin_options/strided_slice_options.py new file mode 100755 index 00000000000..6f04b7e1db3 --- /dev/null +++ b/backends/nxp/backend/ir/tflite_generator/builtin_options/strided_slice_options.py @@ -0,0 +1,35 @@ +# Copyright 2023 NXP +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +""" + slice_options + +Representation of the TFLite operator 'Slice'. +""" + +import executorch.backends.nxp.backend.ir.tflite_generator.meta.meta as meta +import flatbuffers as fb +from executorch.backends.nxp.backend.ir.lib.tflite import ( + StridedSliceOptions as libStridedSliceOptions, +) +from executorch.backends.nxp.backend.ir.lib.tflite.BuiltinOperator import ( + BuiltinOperator, +) +from executorch.backends.nxp.backend.ir.lib.tflite.BuiltinOptions import BuiltinOptions + + +class StridedSlice(meta.BuiltinOptions): + offset: bool + + def __init__(self, offset: bool = False) -> None: + super().__init__( + BuiltinOptions.StridedSliceOptions, BuiltinOperator.STRIDED_SLICE + ) + self.offset = offset + + def gen_tflite(self, builder: fb.Builder): + libStridedSliceOptions.Start(builder) + libStridedSliceOptions.AddOffset(builder, self.offset) + return libStridedSliceOptions.End(builder) diff --git a/backends/nxp/backend/ir/tflite_generator/builtin_options/sub_options.py b/backends/nxp/backend/ir/tflite_generator/builtin_options/sub_options.py new file mode 100755 index 00000000000..16dcd1e64ab --- /dev/null +++ b/backends/nxp/backend/ir/tflite_generator/builtin_options/sub_options.py @@ -0,0 +1,45 @@ +# +# Copyright 2023 Martin Pavella +# +# License: MIT +# See the LICENSE_MIT for more details. +# +""" + Sub + +Representation of the TFLite operator 'Sub'. +""" + +import executorch.backends.nxp.backend.ir.tflite_generator.meta.meta as meta +import flatbuffers as fb +from executorch.backends.nxp.backend.ir.lib.tflite import ( + ActivationFunctionType as libActivationFunctionType, + BuiltinOperator as libBuiltinOperator, + BuiltinOptions as libBuiltinOptions, + SubOptions as libSubOptions, +) + + +class Sub(meta.BuiltinOptions): + fused_activation_function: libActivationFunctionType.ActivationFunctionType + + # TODO potScaleInt16 + + def __init__( + self, + fused_activation_function: libActivationFunctionType.ActivationFunctionType = libActivationFunctionType.ActivationFunctionType.NONE, + ) -> None: + super().__init__( + libBuiltinOptions.BuiltinOptions.SubOptions, + libBuiltinOperator.BuiltinOperator.SUB, + ) + self.fused_activation_function = fused_activation_function + + def gen_tflite(self, builder: fb.Builder): + libSubOptions.Start(builder) + + libSubOptions.AddFusedActivationFunction( + builder, self.fused_activation_function + ) + + return libSubOptions.End(builder) diff --git a/backends/nxp/backend/ir/tflite_generator/builtin_options/sum_options.py b/backends/nxp/backend/ir/tflite_generator/builtin_options/sum_options.py new file mode 100755 index 00000000000..f309e5f7f54 --- /dev/null +++ b/backends/nxp/backend/ir/tflite_generator/builtin_options/sum_options.py @@ -0,0 +1,28 @@ +# Copyright 2024 NXP +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +import flatbuffers as fb + +from executorch.backends.nxp.backend.ir.lib.tflite import ReducerOptions +from executorch.backends.nxp.backend.ir.lib.tflite.BuiltinOperator import ( + BuiltinOperator, +) +from executorch.backends.nxp.backend.ir.lib.tflite.BuiltinOptions import BuiltinOptions +from executorch.backends.nxp.backend.ir.tflite_generator.meta import meta + + +class Sum(meta.BuiltinOptions): + keep_dims: bool + + def __init__(self, keep_dims: bool) -> None: + super().__init__(BuiltinOptions.ReducerOptions, BuiltinOperator.SUM) + self.keep_dims = keep_dims + + def gen_tflite(self, builder: fb.Builder): + ReducerOptions.Start(builder) + + ReducerOptions.AddKeepDims(builder, self.keep_dims) + + return ReducerOptions.End(builder) diff --git a/backends/nxp/backend/ir/tflite_generator/builtin_options/tile_options.py b/backends/nxp/backend/ir/tflite_generator/builtin_options/tile_options.py new file mode 100755 index 00000000000..92ba780de42 --- /dev/null +++ b/backends/nxp/backend/ir/tflite_generator/builtin_options/tile_options.py @@ -0,0 +1,23 @@ +# Copyright 2024 NXP +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +import executorch.backends.nxp.backend.ir.tflite_generator.meta.meta as meta +import flatbuffers as fb +from executorch.backends.nxp.backend.ir.lib.tflite import TileOptions +from executorch.backends.nxp.backend.ir.lib.tflite.BuiltinOperator import ( + BuiltinOperator, +) +from executorch.backends.nxp.backend.ir.lib.tflite.BuiltinOptions import BuiltinOptions + + +class Tile(meta.BuiltinOptions): + + def __init__(self) -> None: + super().__init__(BuiltinOptions.TileOptions, BuiltinOperator.TILE) + + def gen_tflite(self, builder: fb.Builder): + TileOptions.Start(builder) + + return TileOptions.End(builder) diff --git a/backends/nxp/backend/ir/tflite_generator/builtin_options/transpose_conv_options.py b/backends/nxp/backend/ir/tflite_generator/builtin_options/transpose_conv_options.py new file mode 100755 index 00000000000..87bc8ea3639 --- /dev/null +++ b/backends/nxp/backend/ir/tflite_generator/builtin_options/transpose_conv_options.py @@ -0,0 +1,50 @@ +# Copyright 2024 NXP +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +import executorch.backends.nxp.backend.ir.lib.tflite.TransposeConvOptions as libTransposeConvOptions +import executorch.backends.nxp.backend.ir.tflite_generator.meta.meta as meta +import flatbuffers as fb +from executorch.backends.nxp.backend.ir.lib.tflite.ActivationFunctionType import ( + ActivationFunctionType, +) +from executorch.backends.nxp.backend.ir.lib.tflite.BuiltinOperator import ( + BuiltinOperator, +) +from executorch.backends.nxp.backend.ir.lib.tflite.BuiltinOptions import BuiltinOptions +from executorch.backends.nxp.backend.ir.lib.tflite.Padding import Padding + + +class TransposeConv(meta.BuiltinOptions): + padding: Padding + stride_w: int + stride_h: int + fused_activation_function: ActivationFunctionType + + def __init__( + self, + padding: Padding = Padding.SAME, + stride_w: int = 1, + stride_h: int = 1, + fused_activation_function: ActivationFunctionType = ActivationFunctionType.NONE, + ) -> None: + super().__init__( + BuiltinOptions.TransposeConvOptions, BuiltinOperator.TRANSPOSE_CONV + ) + self.padding = padding + self.stride_w = stride_w + self.stride_h = stride_h + self.fused_activation_function = fused_activation_function + + def gen_tflite(self, builder: fb.Builder): + libTransposeConvOptions.Start(builder) + + libTransposeConvOptions.AddPadding(builder, self.padding) + libTransposeConvOptions.AddStrideW(builder, self.stride_w) + libTransposeConvOptions.AddStrideH(builder, self.stride_h) + libTransposeConvOptions.AddFusedActivationFunction( + builder, self.fused_activation_function + ) + + return libTransposeConvOptions.End(builder) diff --git a/backends/nxp/backend/ir/tflite_generator/builtin_options/transpose_options.py b/backends/nxp/backend/ir/tflite_generator/builtin_options/transpose_options.py new file mode 100755 index 00000000000..5869b1ed315 --- /dev/null +++ b/backends/nxp/backend/ir/tflite_generator/builtin_options/transpose_options.py @@ -0,0 +1,29 @@ +# +# Copyright 2023 Martin Pavella +# +# License: MIT +# See the LICENSE_MIT for more details. +# +""" + Transpose + +Representation of the TFLite operator 'Transpose'. +""" + +import executorch.backends.nxp.backend.ir.lib.tflite.BuiltinOperator as libBuiltinOperator +import executorch.backends.nxp.backend.ir.lib.tflite.BuiltinOptions as libBuiltinOptions +import executorch.backends.nxp.backend.ir.lib.tflite.TransposeOptions as libTransposeOptions +import executorch.backends.nxp.backend.ir.tflite_generator.meta.meta as meta +import flatbuffers as fb + + +class Transpose(meta.BuiltinOptions): + def __init__(self) -> None: + super().__init__( + libBuiltinOptions.BuiltinOptions.TransposeOptions, + libBuiltinOperator.BuiltinOperator.TRANSPOSE, + ) + + def gen_tflite(self, builder: fb.Builder): + libTransposeOptions.Start(builder) + return libTransposeOptions.End(builder) diff --git a/backends/nxp/backend/ir/tflite_generator/builtin_options/unidirectional_sequence_lstm_options.py b/backends/nxp/backend/ir/tflite_generator/builtin_options/unidirectional_sequence_lstm_options.py new file mode 100755 index 00000000000..b8921453a62 --- /dev/null +++ b/backends/nxp/backend/ir/tflite_generator/builtin_options/unidirectional_sequence_lstm_options.py @@ -0,0 +1,67 @@ +# Copyright 2024 NXP +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +import executorch.backends.nxp.backend.ir.lib.tflite.UnidirectionalSequenceLSTMOptions as libUSLSTMOptions +import flatbuffers as fb +from executorch.backends.nxp.backend.ir.lib.tflite.ActivationFunctionType import ( + ActivationFunctionType, +) +from executorch.backends.nxp.backend.ir.lib.tflite.BuiltinOperator import ( + BuiltinOperator, +) +from executorch.backends.nxp.backend.ir.lib.tflite.BuiltinOptions import BuiltinOptions +from executorch.backends.nxp.backend.ir.tflite_generator.meta import meta + + +class UnidirectionalSequenceLSTM(meta.BuiltinOptions): + fused_activation_function: ActivationFunctionType + cell_clip: float + proj_clip: float + time_major: bool # If True, the first dimension is sequence, otherwise batch. + + # Unidirectional Sequence LSTM v3+ + asymmetric_quantize_inputs: bool + + # Unidirectional Sequence LSTM v4+ + diagonal_recurrent_tensors: bool + + def __init__( + self, + cell_clip: float, + proj_clip: float, + time_major: bool = True, + asymmetric_quantize_inputs: bool = False, + diagonal_recurrent_tensors: bool = False, + fused_activation_function: ActivationFunctionType = ActivationFunctionType.NONE, + ) -> None: + super().__init__( + BuiltinOptions.UnidirectionalSequenceLSTMOptions, + BuiltinOperator.UNIDIRECTIONAL_SEQUENCE_LSTM, + ) + + self.fused_activation_function = fused_activation_function + self.cell_clip = cell_clip + self.proj_clip = proj_clip + self.time_major = time_major + self.asymmetric_quantize_inputs = asymmetric_quantize_inputs + self.diagonal_recurrent_tensors = diagonal_recurrent_tensors + + def gen_tflite(self, builder: fb.Builder): + libUSLSTMOptions.Start(builder) + + libUSLSTMOptions.AddFusedActivationFunction( + builder, self.fused_activation_function + ) + libUSLSTMOptions.AddCellClip(builder, self.cell_clip) + libUSLSTMOptions.AddProjClip(builder, self.proj_clip) + libUSLSTMOptions.AddTimeMajor(builder, self.time_major) + libUSLSTMOptions.AddAsymmetricQuantizeInputs( + builder, self.asymmetric_quantize_inputs + ) + libUSLSTMOptions.AddDiagonalRecurrentTensors( + builder, self.diagonal_recurrent_tensors + ) + + return libUSLSTMOptions.End(builder) diff --git a/backends/nxp/backend/ir/tflite_generator/builtin_options/unidirectional_sequence_rnn_options.py b/backends/nxp/backend/ir/tflite_generator/builtin_options/unidirectional_sequence_rnn_options.py new file mode 100755 index 00000000000..30e16678a36 --- /dev/null +++ b/backends/nxp/backend/ir/tflite_generator/builtin_options/unidirectional_sequence_rnn_options.py @@ -0,0 +1,49 @@ +# Copyright 2024 NXP +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +import executorch.backends.nxp.backend.ir.lib.tflite.SequenceRNNOptions as libUSRNNOptions +import flatbuffers as fb +from executorch.backends.nxp.backend.ir.lib.tflite.ActivationFunctionType import ( + ActivationFunctionType, +) +from executorch.backends.nxp.backend.ir.lib.tflite.BuiltinOperator import ( + BuiltinOperator, +) +from executorch.backends.nxp.backend.ir.lib.tflite.BuiltinOptions import BuiltinOptions +from executorch.backends.nxp.backend.ir.tflite_generator.meta import meta + + +class UnidirectionalSequenceRNN(meta.BuiltinOptions): + time_major: bool # If True, the first dimension is sequence, otherwise batch. + fused_activation_function: ActivationFunctionType + asymmetric_quantize_inputs: bool + + def __init__( + self, + time_major: bool = True, + asymmetric_quantize_inputs: bool = False, + fused_activation_function: ActivationFunctionType = ActivationFunctionType.NONE, + ) -> None: + super().__init__( + BuiltinOptions.SequenceRNNOptions, + BuiltinOperator.UNIDIRECTIONAL_SEQUENCE_RNN, + ) + + self.time_major = time_major + self.fused_activation_function = fused_activation_function + self.asymmetric_quantize_inputs = asymmetric_quantize_inputs + + def gen_tflite(self, builder: fb.Builder): + libUSRNNOptions.Start(builder) + + libUSRNNOptions.AddTimeMajor(builder, self.time_major) + libUSRNNOptions.AddFusedActivationFunction( + builder, self.fused_activation_function + ) + libUSRNNOptions.AddAsymmetricQuantizeInputs( + builder, self.asymmetric_quantize_inputs + ) + + return libUSRNNOptions.End(builder) diff --git a/backends/nxp/backend/ir/tflite_generator/custom_options/flex_transpose_options.py b/backends/nxp/backend/ir/tflite_generator/custom_options/flex_transpose_options.py new file mode 100755 index 00000000000..fa0624cb852 --- /dev/null +++ b/backends/nxp/backend/ir/tflite_generator/custom_options/flex_transpose_options.py @@ -0,0 +1,78 @@ +# Copyright 2024 NXP +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +from executorch.backends.nxp.backend.ir.tflite_generator.meta.meta import CustomOptions + + +class FlexTranspose(CustomOptions): + + def __init__(self) -> None: + super().__init__( + "FlexTranspose", + bytearray( + [ + 9, + 84, + 114, + 97, + 110, + 115, + 112, + 111, + 115, + 101, + 0, + 39, + 18, + 9, + 84, + 114, + 97, + 110, + 115, + 112, + 111, + 115, + 101, + 26, + 0, + 26, + 0, + 42, + 11, + 10, + 5, + 84, + 112, + 101, + 114, + 109, + 18, + 2, + 48, + 3, + 42, + 7, + 10, + 1, + 84, + 18, + 2, + 48, + 1, + 50, + 0, + 0, + 2, + 52, + 42, + 20, + 20, + 4, + 40, + 1, + ] + ), + ) diff --git a/backends/nxp/backend/ir/tflite_generator/meta/__init__.py b/backends/nxp/backend/ir/tflite_generator/meta/__init__.py new file mode 100755 index 00000000000..e69de29bb2d diff --git a/backends/nxp/backend/ir/tflite_generator/meta/meta.py b/backends/nxp/backend/ir/tflite_generator/meta/meta.py new file mode 100755 index 00000000000..bfa48744351 --- /dev/null +++ b/backends/nxp/backend/ir/tflite_generator/meta/meta.py @@ -0,0 +1,255 @@ +# +# Copyright 2023 Martin Pavella +# Copyright 2024 NXP +# +# License: MIT +# See the LICENSE_MIT for more details. +# +""" + meta + +Implementations of classes that all classes in /src/tflite_generator/ inherit from. +""" +import logging +from typing import Callable, Iterator, List, Union + +import executorch.backends.nxp.backend.ir.lib.tflite.BuiltinOperator as bOp +import executorch.backends.nxp.backend.ir.lib.tflite.BuiltinOptions as bOpt + +import flatbuffers as fb + +logger = logging.getLogger(__name__) + +""" This file contains parent classes for simple classes used in the '/model' directory. """ + + +class TFLiteObject: + """Parent class for all tflite objects. That is all objects in the 'tflite_generator' directory.""" + + """ Generates tflite representation for this object. MUST be overridden! """ + + def gen_tflite(self, builder: fb.Builder) -> int: + logger.warning("TFLiteObject: genTFLite() is not defined!") + return 0 + + +class TFLiteVector(TFLiteObject): + """Represents a TFLite vector of TFLiteObjects. Provides interface for storing data + and generating output TFLite code.""" + + vector: List[Union[TFLiteObject, int, float, bool]] + + """ Indicates if an empty vector should be generated if 'vector' attribute is + empty, or to not generate anything in that case. """ + gen_empty: bool = True + + """ TFLite 'Start...Vector' function for the exact vector. Takes 2 arguments, + 'flatbuffers.Builder' and number of vector elements """ + start_function: Callable[[fb.Builder, int], None] + + """ TFLite 'Prepend...' function for the exact vector item type. Takes 'flatbuffers.Builder' + as argument """ + prepend_function: Callable[[fb.Builder], Callable[[int], None]] + + def __init__( + self, + vector: List[Union[TFLiteObject, int, float, bool]], + start_function: Callable[[fb.Builder, int], None], + prepend_function: Callable[ + [fb.Builder], Callable[[int], None] + ] = lambda builder: builder.PrependUOffsetTRelative, + gen_empty: bool = True, + ) -> None: + if vector is None: + vector = [] + self.vector = vector + self.start_function = start_function + self.prepend_function = prepend_function + self.gen_empty = gen_empty + + def append(self, item): + self.vector.append(item) + + def insert(self, index: int, item): + self.vector.insert(index, item) + + def index(self, item) -> int: + return self.vector.index(item) + + def remove(self, item): + self.vector.remove(item) + + def get(self, index: int): + return self.vector[index] + + def get_last(self): + if len(self.vector) > 0: + return self.vector[-1] + return None + + def len(self): + return self.vector.__len__() + + def __str__(self): + return self.vector.__str__() + + def __iter__(self) -> Iterator: + return self.vector.__iter__() + + def __getitem__(self, index): + return self.vector[index] + + def gen_tflite(self, builder: fb.Builder): + """Generates TFLite code for the vector""" + + if (not self.gen_empty) and (len(self.vector) == 0): + # Nothing to generate + return + + # IMPORTANT! tflite MUST be generated for list items in REVERSE ORDER! + # Otherwise, the order will be wrong. + tfl_vector = [item.gen_tflite(builder) for item in reversed(self.vector)] + + self.start_function(builder, len(self.vector)) + + for tfl_item in tfl_vector: + self.prepend_function(builder)(tfl_item) + + return builder.EndVector() + + +class TFLiteAtomicVector(TFLiteVector): + def __init__( + self, + vector: List[Union[int, float, bool]], + start_function: Callable[[fb.Builder, int], None], + prepend_function: Callable[[fb.Builder], Callable[[int], None]], + gen_empty: bool = True, + ) -> None: + super().__init__(vector, start_function, prepend_function, gen_empty) + + def __eq__(self, other): + return self.vector == other.vector + + def gen_tflite(self, builder: fb.Builder): + """Generates TFLite code for the vector""" + + if (not self.gen_empty) and (len(self.vector) == 0): + # Nothing to generate + return + + self.start_function(builder, len(self.vector)) + + # IMPORTANT! tflite MUST be generated for list items in REVERSE ORDER! + # Otherwise, the order will be wrong. + for val in reversed(self.vector): + self.prepend_function(builder)(val) + + return builder.EndVector() + + +class FloatVector(TFLiteAtomicVector): + """Class represents a TFLite vector of float values. Provides interface for storing data + and generating output TFLite code.""" + + def __init__( + self, + float_list: List[float], + start_function: Callable[[fb.Builder, int], None], + prepend_function: Callable[ + [fb.Builder], Callable[[int], None] + ] = lambda builder: builder.PrependFloat32, + gen_empty: bool = True, + ) -> None: + super().__init__(float_list, start_function, prepend_function, gen_empty) + + +class IntVector(TFLiteAtomicVector): + """Class represents a TFLite vector of integer values. Provides interface for storing data + and generating output TFLite code.""" + + vector: List[int] + + def __init__( + self, + int_list: List[int], + start_function: Callable[[fb.Builder, int], None], + prepend_function: Callable[ + [fb.Builder], Callable[[int], None] + ] = lambda builder: builder.PrependInt32, + gen_empty: bool = True, + ) -> None: + super().__init__(int_list, start_function, prepend_function, gen_empty) + + +class BoolVector(TFLiteAtomicVector): + """Class represents a TFLite vector of boolean values. Provides interface for storing data + and generating output TFLite code.""" + + vector: List[bool] + + def __init__( + self, + bool_list: List[bool], + start_function: Callable[[fb.Builder, int], None], + prepend_function: Callable[ + [fb.Builder], Callable[[int], None] + ] = lambda builder: builder.PrependBool, + gen_empty: bool = True, + ) -> None: + super().__init__(bool_list, start_function, prepend_function, gen_empty) + + +class BuiltinOptions(TFLiteObject): + """Class represents 'BuiltinOptions' for an Operator. Used in 'model/Operators.py'. + Provides interface for work with any BuiltinOptions table. + This class alone does NOT generate any TFLite. + Subclasses do NOT generate TFLite for the 'builtinOptionsType', only for the exact options. + 'builtinOptionsType' is merely stored here for convenience and an 'Operator' object + generates its TFLite representation (as it is the child of the 'operator' table in 'operators'). + """ + + """ The type of parameters of this operator. """ + builtin_options_type: bOpt.BuiltinOptions + + """ The type of this operator. """ + operator_type: bOp.BuiltinOperator + + def __init__( + self, + builtin_options_type: bOpt.BuiltinOptions, + operator_type: bOp.BuiltinOperator, + ) -> None: + if builtin_options_type is None: + logger.d( + "TFLITE: Operator inheriting from 'BuiltinOptions'. MUST specify the 'builtinOptionsType'!" + ) + if operator_type is None: + logger.d( + "TFLITE: Operator inheriting from 'BuiltinOptions'. MUST specify the 'operatorType'!" + ) + self.builtin_options_type = builtin_options_type + self.operator_type = operator_type + + """ Function has to be overwritten """ + + def gen_tflite(self, builder: fb.Builder): + logger.w( + f"BuiltinOperator '{self.builtin_options_type}':genTFLite() is not defined!" + ) + + +class CustomOptions(bytearray): + """Class represents a `custom_options` object in the TFLite model, i.e. a bytearray form of the parameters of a + `custom` TFLite operator. + + Currently, this is being used for `Flex Delegate` operators / `SELECT_TF_OPS`. + """ + + operator_type = bOp.BuiltinOperator.CUSTOM + custom_code: str + + def __init__(self, custom_code: str, data: bytearray): + super().__init__() + self.custom_code = custom_code + self[:] = data diff --git a/backends/nxp/backend/ir/tflite_generator/meta/types.py b/backends/nxp/backend/ir/tflite_generator/meta/types.py new file mode 100755 index 00000000000..34e1760602e --- /dev/null +++ b/backends/nxp/backend/ir/tflite_generator/meta/types.py @@ -0,0 +1,198 @@ +# +# Copyright 2023 Martin Pavella +# Copyright 2024 NXP +# +# License: MIT +# See the LICENSE_MIT for more details. +# +""" + types + +Module contains helper functions that work with TFLite data types. +""" +from enum import Enum + +import executorch.backends.nxp.backend.ir.logger as logger + +import flatbuffers as fb +from executorch.backends.nxp.backend.ir.lib.tflite.TensorType import TensorType + +# Lists of types. Used to simplify specification of supported types in conversion modules. +FLOATS = [TensorType.FLOAT16, TensorType.FLOAT32, TensorType.FLOAT64] +INTS = [TensorType.INT8, TensorType.INT16, TensorType.INT32, TensorType.INT64] +UINTS = [TensorType.UINT8, TensorType.UINT16, TensorType.UINT32, TensorType.UINT64] +ALL_TYPES = ( + FLOATS + + INTS + + UINTS + + [TensorType.STRING, TensorType.BOOL, TensorType.COMPLEX64, TensorType.COMPLEX128] +) + + +class TensorFlowDataType(Enum): + # The DataType enum used internally by TensorFlow. + # https://github.com/tensorflow/tensorflow/blob/v2.15.0/tensorflow/core/framework/types.proto#L13-L87 + + DT_INVALID = 0 + DT_FLOAT = 1 + DT_DOUBLE = 2 + DT_INT32 = 3 + DT_UINT8 = 4 + DT_INT16 = 5 + DT_INT8 = 6 + DT_STRING = 7 + DT_COMPLEX64 = 8 + DT_INT64 = 9 + DT_BOOL = 10 + DT_QINT8 = 11 + DT_QUINT8 = 12 + DT_QINT32 = 13 + DT_BFLOAT16 = 14 + DT_QINT16 = 15 + DT_QUINT16 = 16 + DT_UINT16 = 17 + DT_COMPLEX128 = 18 + DT_HALF = 19 + DT_RESOURCE = 20 + DT_VARIANT = 21 + DT_UINT32 = 22 + DT_UINT64 = 23 + DT_FLOAT8_E5M2 = 24 + DT_FLOAT8_E4M3FN = 25 + DT_INT4 = 29 + DT_UINT4 = 30 + + DT_FLOAT_REF = 101 + DT_DOUBLE_REF = 102 + DT_INT32_REF = 103 + DT_UINT8_REF = 104 + DT_INT16_REF = 105 + DT_INT8_REF = 106 + DT_STRING_REF = 107 + DT_COMPLEX64_REF = 108 + DT_INT64_REF = 109 + DT_BOOL_REF = 110 + DT_QINT8_REF = 111 + DT_QUINT8_REF = 112 + DT_QINT32_REF = 113 + DT_BFLOAT16_REF = 114 + DT_QINT16_REF = 115 + DT_QUINT16_REF = 116 + DT_UINT16_REF = 117 + DT_COMPLEX128_REF = 118 + DT_HALF_REF = 119 + DT_RESOURCE_REF = 120 + DT_VARIANT_REF = 121 + DT_UINT32_REF = 122 + DT_UINT64_REF = 123 + DT_FLOAT8_E5M2_REF = 124 + DT_FLOAT8_E4M3FN_REF = 125 + DT_INT4_REF = 129 + DT_UINT4_REF = 130 + + +def is_unsigned(data_type: TensorType) -> bool: + return data_type in { + TensorType.UINT8, + TensorType.UINT16, + TensorType.UINT32, + TensorType.UINT64, + } + + +def is_signed(data_type: TensorType) -> bool: + return data_type in { + TensorType.INT8, + TensorType.INT16, + TensorType.INT32, + TensorType.INT64, + } + + +def name_for_type(data_type: TensorType) -> str: + """Return the name of given TFLite data type.""" + names = [ + "FLOAT32", + "FLOAT16", + "INT32", + "UINT8", + "INT64", + "STRING", + "BOOL", + "INT16", + "COMPLEX64", + "INT8", + "FLOAT64", + "COMPLEX128", + "UINT64", + "RESOURCE", + "VARIANT", + "UINT32", + "UINT16", + "INT4", + ] + + return names[data_type] + + +def type_size(data_type: TensorType): + """Return the memory size in bytes of given TFLite data type.""" + if data_type in {TensorType.UINT8, TensorType.INT8}: + return 1 + elif data_type in {TensorType.UINT16, TensorType.INT16, TensorType.FLOAT16}: + return 2 + elif data_type in {TensorType.UINT32, TensorType.INT32, TensorType.FLOAT32}: + return 4 + elif data_type in { + TensorType.UINT64, + TensorType.INT64, + TensorType.FLOAT64, + TensorType.COMPLEX64, + }: + return 8 + elif data_type in {TensorType.COMPLEX128}: + return 16 + + logger.e( + logger.Code.INTERNAL_ERROR, + f"Unexpected type '{data_type}' in types.type_size().", + ) + + +def prepend_function(builder: fb.Builder, data_type: TensorType): # noqa C901 + """Return the flatbuffer 'Prepend()' function for given type.""" + if data_type == TensorType.UINT8: + return builder.PrependUint8 + elif data_type == TensorType.UINT16: + return builder.PrependUint16 + elif data_type == TensorType.UINT32: + return builder.PrependUint32 + elif data_type == TensorType.UINT64: + return builder.PrependUint64 + + elif data_type == TensorType.INT8: + return builder.PrependInt8 + elif data_type == TensorType.INT16: + return builder.PrependInt16 + elif data_type == TensorType.INT32: + return builder.PrependInt32 + elif data_type == TensorType.INT64: + return builder.PrependInt64 + + elif data_type == TensorType.FLOAT16: + logger.w( + "Flatbuffer prepend function for FLOAT16 datatype is not supported! Using default 16b alternative." + ) + return builder.PrependInt16 # Might not work + elif data_type == TensorType.FLOAT32: + return builder.PrependFloat32 + elif data_type == TensorType.FLOAT64: + return builder.PrependFloat64 + + elif data_type == TensorType.BOOL: + return builder.PrependBool + + logger.e( + logger.Code.NOT_IMPLEMENTED, + f"Unsupported flatbuffer prepend function for type '{data_type}'!", + ) diff --git a/backends/nxp/backend/ir/tflite_generator/tflite_model.py b/backends/nxp/backend/ir/tflite_generator/tflite_model.py new file mode 100755 index 00000000000..a9384861178 --- /dev/null +++ b/backends/nxp/backend/ir/tflite_generator/tflite_model.py @@ -0,0 +1,816 @@ +# +# Copyright 2023 Martin Pavella +# Copyright 2023-2024 NXP +# +# License: MIT +# See the LICENSE_MIT for more details. +# + +import itertools +import logging +from typing import List, Optional + +import executorch.backends.nxp.backend.ir.lib.tflite.Buffer as libBuffer +import executorch.backends.nxp.backend.ir.lib.tflite.BuiltinOperator as libBuiltinOperator +import executorch.backends.nxp.backend.ir.lib.tflite.CustomOptionsFormat as libCustomOptionsFormat +import executorch.backends.nxp.backend.ir.lib.tflite.Model as libModel +import executorch.backends.nxp.backend.ir.lib.tflite.Operator as libOperator +import executorch.backends.nxp.backend.ir.lib.tflite.OperatorCode as libOperatorCode +import executorch.backends.nxp.backend.ir.lib.tflite.QuantizationDetails as libQuantizedDetails +import executorch.backends.nxp.backend.ir.lib.tflite.QuantizationParameters as libQuantizedParameters +import executorch.backends.nxp.backend.ir.lib.tflite.SubGraph as libSubGraphs +import executorch.backends.nxp.backend.ir.lib.tflite.Tensor as libTensor +import executorch.backends.nxp.backend.ir.lib.tflite.TensorType as libTensorType +import executorch.backends.nxp.backend.ir.tflite_generator.meta.meta as meta + +import flatbuffers as fb +import numpy as np +from executorch.backends.nxp.backend.ir import tensor_formatting +from executorch.backends.nxp.backend.ir.tflite_generator.meta import types +from executorch.backends.nxp.backend.ir.tflite_generator.meta.types import name_for_type + +logger = logging.getLogger(__name__) + + +def _exactly_one_is_none(obj1: Optional, obj2: Optional): + return (obj1 is not None and obj2 is None) or (obj1 is None and obj2 is not None) + + +class Buffer(meta.TFLiteObject): + """'data' is an array of any type, but MUST have the correct 'dtype' specified!""" + + data: np.ndarray + type: libTensorType.TensorType + + """ IMPORTANT! The following attributes are used only by 'ModelBuilder' + in order to make model creation more efficient. """ + + """ Index to the 'buffers' vector. Used to assign the 'buffer' attribute of the + Tensor, this buffer belongs to.""" + tmp_index: int + + def __init__( + self, + data: np.ndarray = None, + data_type: libTensorType.TensorType = libTensorType.TensorType.INT32, + ) -> None: + self.data = data + self.type = data_type + + def __data_is_empty(self): + """Determine if the buffer data is empty.""" + return (self.data is None) or (self.data.size == 0) + + def get_prepend_function(self, builder: fb.Builder): + return types.prepend_function(builder, self.type) + + def gen_tflite(self, builder: fb.Builder): + if self.__data_is_empty(): + # If there is no data, table is empty + libBuffer.Start(builder) + return libBuffer.End(builder) + + if self.data.dtype.itemsize != 1: + # TFLite Buffer is an array of bytes. Larger datatypes must be reduced to bytes first. + self.data = np.frombuffer(self.data.tobytes(), np.uint8) + else: + # Arrays of bytes must also be flattened. + self.data = self.data.flatten() + + if self.data.dtype.kind in ["b", "i", "u", "f"]: # flatbuffers.builder line 483 + tfl_data = builder.CreateNumpyVector(self.data) + # In case of problems, see 'https://github.com/google/flatbuffers/issues/4668'. + + elif self.data.dtype.kind == "S": + # String tensor. Not sure how to handle this case. I've played around with 'builder.CreateString()' but I + # couldn't quite make it work. As it is not a priority right now, just exit with error. + logger.error( + "Generating a TFLite static string tensor is not yet supported." + ) + raise RuntimeError() + + else: + # Cannot use the 'CreateNumpyVector' method -> use specific prepend functions. + logger.warning( + f"Creating a static TFLite tensor buffer for type '{name_for_type(self.type)}'. " + "This is not a common case and it has not been tested!" + ) + + prepend = self.get_prepend_function(builder) + + # 'data' length has to be multiplied by item size, because tflite.Buffer is a vector of type 'UINT8'. + # So e.g. one 'INT32' item will take up 4 spaces in the vector. + len_bytes = len(self.data) * types.type_size(self.type) + + libBuffer.StartDataVector(builder, len_bytes) + # Flatbuffer is built in reverse, so for correct order, data must be iterated in reverse. + for val in reversed(self.data): + prepend(val) + tfl_data = builder.EndVector() + + libBuffer.Start(builder) + libBuffer.AddData(builder, tfl_data) + + return libBuffer.End(builder) + + +class Buffers(meta.TFLiteVector): + vector: List[Buffer] + + def __init__(self, vector: List[Buffer] = None) -> None: + super().__init__(vector, libModel.StartBuffersVector) + + +class OperatorCode(meta.TFLiteObject): + """Represents an OperatorCode object, used in the vector 'operator_codes' in the model.""" + + builtin_code: libBuiltinOperator.BuiltinOperator + version: int + custom_code: str + + def __init__( + self, + builtin_code: libBuiltinOperator.BuiltinOperator, + version: int = 1, + custom_code: str = None, + ): + """ + :param builtin_code: Operator code from the 'BuiltinOperator' enum. + :param version: Operator version. Defaults to 1. + :param custom_code: Custom code name. Parameter 'builtin_code' must be set to + 'BuiltinOperator.CUSTOM' when custom code is used. + """ + self.version = version + self.builtin_code = builtin_code + self.custom_code = custom_code + + if ( + self.custom_code is not None + and builtin_code != libBuiltinOperator.BuiltinOperator.CUSTOM + ): + logger.error( + f"Attempt to use custom code with non-CUSTOM builtin code ({builtin_code})." + ) + + def gen_tflite(self, builder: fb.builder): + """Generate TFLite representation for this OperatorCode""" + if self.custom_code is not None: + custom_code = builder.CreateString(self.custom_code) + else: + custom_code = None + + libOperatorCode.Start(builder) + + # The 'deprecated_builtin_code' is a byte. Make sure it doesn't overflow. + # noinspection PyTypeChecker + if self.builtin_code <= 127: + libOperatorCode.AddDeprecatedBuiltinCode(builder, self.builtin_code) + + libOperatorCode.AddVersion(builder, self.version) + libOperatorCode.AddBuiltinCode(builder, self.builtin_code) + if custom_code is not None: + libOperatorCode.AddCustomCode(builder, custom_code) + return libOperatorCode.End(builder) + + +class OperatorCodes(meta.TFLiteVector): + vector: List[OperatorCode] + + def __init__(self, operator_codes: List[OperatorCode] = None) -> None: + super().__init__(operator_codes, libModel.StartOperatorCodesVector) + + +class Min(meta.FloatVector): + def __init__(self, min: List[float] = None) -> None: + super().__init__(min, libQuantizedParameters.StartMinVector, gen_empty=False) + + +class Max(meta.FloatVector): + def __init__(self, max: List[float] = None) -> None: + super().__init__(max, libQuantizedParameters.StartMaxVector, gen_empty=False) + + +class Scale(meta.FloatVector): + def __init__(self, scale: List[float] = None) -> None: + super().__init__(scale, libQuantizedParameters.StartScaleVector) + + +class ZeroPoint(meta.IntVector): + def __init__(self, zero_point: List[int] = None) -> None: + super().__init__( + zero_point, + libQuantizedParameters.StartZeroPointVector, + lambda builder: builder.PrependInt64, + ) + + +class Quantization(meta.TFLiteObject): + min: Min + max: Max + scale: Optional[Scale] + zero_point: ZeroPoint + quantized_dimension: int + details_type: libQuantizedDetails.QuantizationDetails + + # TODO details + + def __init__( + self, + min: Min = Min(), # noqa B008 + max: Max = Max(), # noqa B008 + scale: Scale = None, + zero_point: ZeroPoint = ZeroPoint([0]), # noqa B008 + quantized_dimension: int = 0, + details_type: libQuantizedDetails.QuantizationDetails = libQuantizedDetails.QuantizationDetails.NONE, + ) -> None: + self.min = min + self.max = max + self.scale = scale + self.zero_point = zero_point + self.quantized_dimension = quantized_dimension + self.details_type = details_type + + def __eq__(self, other): + if self is None and other is None: + return True + elif _exactly_one_is_none(self, other): + return False + + if _exactly_one_is_none(self.scale, other.scale): + return False + + if self.scale is not None: + if self.scale != other.scale: + return False + if self.zero_point != other.zero_point: + return False + if self.quantized_dimension != other.quantized_dimension: + return False + if self.min != other.min: + return False + if self.max != other.max: + return False + + return True + + def is_per_channel(self) -> bool: + """Determine if this quantization is per channel, instead of per tensor.""" + if (self.scale is not None and self.zero_point is not None) and ( + self.scale.len() == self.zero_point.len() + ): + return self.scale.len() > 1 + + return False + + def is_per_tensor(self) -> bool: + """Determine if this quantization is per tensor""" + if (self.scale is not None and self.zero_point is not None) and ( + self.scale.len() == self.zero_point.len() + ): + return self.scale.len() == 1 + + return False + + def gen_tflite(self, builder: fb.Builder): + # Sometimes 1D per-tensor quantized tensors can have quantized_dimension != 0 + # (residue from badly defined ONNX models). This would cause TFLite inference to crash. + if not self.is_per_channel(): + self.quantized_dimension = 0 + + tfl_min = self.min.gen_tflite(builder) + tfl_max = self.max.gen_tflite(builder) + tfl_scale = self.scale.gen_tflite(builder) + tfl_zero_point = self.zero_point.gen_tflite(builder) + + libQuantizedParameters.Start(builder) + + if tfl_min is not None: + libQuantizedParameters.AddMin(builder, tfl_min) + + if tfl_max is not None: + libQuantizedParameters.AddMax(builder, tfl_max) + + libQuantizedParameters.AddScale(builder, tfl_scale) + + libQuantizedParameters.AddZeroPoint(builder, tfl_zero_point) + + libQuantizedParameters.AddDetailsType(builder, self.details_type) + + libQuantizedParameters.AddQuantizedDimension(builder, self.quantized_dimension) + + return libQuantizedParameters.End(builder) + + +class Shape(meta.IntVector): + __shape_offset: int + + __also_has_signature: bool + __shape_signature_vector: List[int] + __shape_signature_offset: int + + def __init__(self, shape: List[int]) -> None: + super().__init__(shape, libTensor.StartShapeVector) + self.__also_has_signature = False + + @property + def flat_size(self): + return np.prod(self.vector).item() + + def is_symbolic(self) -> bool: + """Determine if the shape uses symbolic dimensions + + :return: True, if at least 1 dimension of the shape is not a positive integer. + """ + + return not all(isinstance(dim, int) and dim >= 0 for dim in self.vector) + + def is_well_defined(self) -> bool: + """Determine if the shape is not empty and also is not symbolic. + + :return: True, if the shape contains just positive integers. + """ + + if self.len() == 0: + return False + + return not self.is_symbolic() + + def __check_dims(self): + """Check if all dimensions are integers. If not, transform this + to 'shape_signature'.""" + + self.__shape_signature_vector = [] + + for val in self.vector: + if (not isinstance(val, int)) or (val < 0): + val = -1 + self.__also_has_signature = True + + self.__shape_signature_vector.append(val) + + if self.__also_has_signature: + self.vector = [abs(val) for val in self.__shape_signature_vector] + + def gen_tflite(self, builder: fb.Builder, tensor): + """Generates TFLite code for the Shape""" + self.__check_dims() + + if self.__also_has_signature: + tensor.has_rank = True + + self.__shape_offset = super().gen_tflite(builder) + if self.__also_has_signature: + self.vector = self.__shape_signature_vector + self.__shape_signature_offset = super().gen_tflite(builder) + + def add_tf_lite(self, builder): + libTensor.AddShape(builder, self.__shape_offset) + + if self.__also_has_signature: + libTensor.AddShapeSignature(builder, self.__shape_signature_offset) + + +class Tensor(meta.TFLiteObject): + is_variable: bool + has_rank: bool + type: libTensorType.TensorType + buffer: int + name: str + shape: Shape + quantization: Quantization + # TODO sparsity + # TODO shapeSignature + # TODO variantTensors + + tensor_format: tensor_formatting.TensorFormat + + # TODO If 'hasRank' is false, "shape" must be []. + + """ IMPORTANT! The following attributes are used only by 'ModelBuilder' + in order to make model creation more efficient. """ + + """ Reference to the 'Buffer' object holding this tensors data. 'tmpBuffer' MUST be + stored a 'Buffers' object and MUST be referenced using the index 'buffer'. """ + tmp_buffer: Buffer + + """ Index to the 'tensors' vector for this tensor. """ + tmp_index: int + + # A boolean indicating, that this tensor should be considered as empty, by the TFLite inference engine. + # Can only be used for optional tensors. Whether a tensor is optional is usually indicated in the comments of the + # corresponding TFLite kernel files. + # If set to True, TFLite kernel modules will receive 'nullptr' as the returned value from the + # 'GetOptionalInputTensor()' function. + tmp_null_tensor: bool + + @property + def rank(self): + """Get the number of dimensions of this `Tensor`.""" + return self.shape.len() + + def __init__( + self, + shape: Shape = None, + name: str = None, + buffer: int = None, + data_type: libTensorType.TensorType = libTensorType.TensorType.FLOAT32, + quantization: Quantization = None, + is_variable: bool = False, + has_rank: bool = False, + ) -> None: + self.is_variable = is_variable + self.has_rank = has_rank + self.type = data_type + self.buffer = buffer + self.name = name + self.shape = shape + self.quantization = quantization + + self.tmp_null_tensor = False + + self.tensor_format = tensor_formatting.TensorFormat.NONE + + def gen_tflite(self, builder: fb.Builder): + + if self.shape is not None: + self.shape.gen_tflite(builder, self) + + if self.name is not None: + name = builder.CreateString(self.name) + else: + name = None + + if self.quantization is not None: + tfl_quantization = self.quantization.gen_tflite(builder) + else: + tfl_quantization = None + + libTensor.Start(builder) + + if self.shape is not None: + self.shape.add_tf_lite(builder) + + libTensor.AddType(builder, self.type) + + if self.buffer is not None: + libTensor.AddBuffer(builder, self.buffer) + + if name is not None: + libTensor.AddName(builder, name) + + if tfl_quantization is not None: + libTensor.AddQuantization(builder, tfl_quantization) + + libTensor.AddIsVariable(builder, self.is_variable) + + libTensor.AddHasRank(builder, self.has_rank) + + return libTensor.End(builder) + + +class Tensors(meta.TFLiteVector): + vector: List[Tensor] + + def __init__(self, tensors: List[Tensor] = None) -> None: + super().__init__(tensors, libSubGraphs.StartTensorsVector) + + +class OperatorInputs(meta.IntVector): + def __init__(self, inputs: List[int] = None): + super().__init__(inputs, libOperator.StartInputsVector) + + +class OperatorOutputs(meta.IntVector): + def __init__(self, outputs: List[int] = None): + super().__init__(outputs, libOperator.StartOutputsVector) + + +class MutatingVariableInputs(meta.BoolVector): + def __init__(self, mutating_variable_inputs: List[bool] = None) -> None: + super().__init__( + mutating_variable_inputs, libOperator.StartMutatingVariableInputsVector + ) + + +class Operator(meta.TFLiteObject): + opcode_index: int + custom_options_format: ( + libCustomOptionsFormat.CustomOptionsFormat + ) # Only default value is possible + mutating_variable_inputs: MutatingVariableInputs + inputs: OperatorInputs + outputs: OperatorOutputs + builtin_options: meta.BuiltinOptions + custom_options: meta.CustomOptions + # TODO intermediates + + """ IMPORTANT! The following attributes are used only by 'ModelBuilder' + in order to make model creation more efficient. """ + + """ Lists of references to 'Tensor' objects. Simpler to use when converting + than 'inputs' and 'outputs'. """ + tmp_inputs: List[Tensor] + tmp_outputs: List[Tensor] + tmp_version: int # OperatorConverter uses this to assign the corresponding operator code with correct version. + + # If `True`, this is an extra operator added during conversion. It was not present in the original ONNX model. + tmp_added_extra: bool + + def __init__( + self, + inputs: OperatorInputs = None, + outputs: OperatorOutputs = None, + builtin_options: meta.BuiltinOptions = None, + opcode_index: int = 0, + mutating_variable_inputs: MutatingVariableInputs = MutatingVariableInputs(), # noqa B008 + custom_options_format: libCustomOptionsFormat.CustomOptionsFormat = libCustomOptionsFormat.CustomOptionsFormat.FLEXBUFFERS, + custom_options: meta.CustomOptions = None, + ) -> None: + self.opcode_index = opcode_index + self.custom_options_format = custom_options_format + self.mutating_variable_inputs = mutating_variable_inputs + self.builtin_options = builtin_options + if inputs is None: + inputs = OperatorInputs() + self.inputs = inputs + if outputs is None: + outputs = OperatorOutputs() + self.outputs = outputs + self.custom_options = custom_options + + self.tmp_inputs = [] + self.tmp_outputs = [] + self.tmp_version = 1 + self.tmp_added_extra = False + + def uses_per_channel_quantization(self) -> bool: + """Determine if this operator uses per-channel quantization.""" + for tensor in itertools.chain(self.tmp_inputs, self.tmp_outputs): + if tensor.quantization is None: + continue + + if tensor.quantization.is_per_channel(): + return True + + return False + + def is_quantized_without_qdq(self) -> bool: + """Determine if the Operator was quantized but not using the QDQ schema. + + ! This only works before quantization parameters are propagated ! + """ + y = self.tmp_outputs[0] + + if y.type not in { + libTensorType.TensorType.INT8, + libTensorType.TensorType.UINT8, + }: + return False + + inputs_quantized = any(x.quantization is not None for x in self.tmp_inputs) + + # Inputs are quantized and output isn't. + return inputs_quantized and y.quantization is None + + def is_qdq_quantized(self) -> bool: + """Determine if the Operator was quantized using the QDQ schema. + + ! This only works before quantization parameters are propagated ! + """ + y = self.tmp_outputs[0] + output_quantized = y.quantization is not None + output_8b_int = y.type in { + libTensorType.TensorType.INT8, + libTensorType.TensorType.UINT8, + } + + if not output_quantized and output_8b_int: + # (U)INT8 but not quantized -> not QDQ + return False + elif output_quantized and not output_8b_int: + # Non-(U)INT8 output, but quantized -> not supported + return False + + # Output quantized + INT8/UINT8 or different type (bool etc.) + + # Check if any of the inputs is quantized + return any(x.quantization is not None for x in self.tmp_inputs) + + def gen_tflite(self, builder: fb.Builder): + if self.inputs is not None: + tfl_inputs = self.inputs.gen_tflite(builder) + else: + tfl_inputs = None + + if self.outputs is not None: + tfl_outputs = self.outputs.gen_tflite(builder) + else: + tfl_outputs = None + + if self.custom_options is not None: + tfl_custom_options = builder.CreateByteVector(self.custom_options) + else: + tfl_custom_options = None + + if self.builtin_options is not None: + tfl_builtin_options = self.builtin_options.gen_tflite(builder) + else: + tfl_builtin_options = None + + if self.mutating_variable_inputs is not None: + tfl_mutating_variable_inputs = self.mutating_variable_inputs.gen_tflite( + builder + ) + else: + tfl_mutating_variable_inputs = None + + libOperator.Start(builder) + + libOperator.AddOpcodeIndex(builder, self.opcode_index) + + if tfl_inputs is not None: + libOperator.AddInputs(builder, tfl_inputs) + + if tfl_outputs is not None: + libOperator.AddOutputs(builder, tfl_outputs) + + if tfl_builtin_options is not None: + libOperator.AddBuiltinOptions(builder, tfl_builtin_options) + libOperator.AddBuiltinOptionsType( + builder, self.builtin_options.builtin_options_type + ) + + if tfl_custom_options is not None: + libOperator.AddBuiltinOptionsType(builder, 0) + libOperator.AddCustomOptionsFormat(builder, self.custom_options_format) + libOperator.AddCustomOptions(builder, tfl_custom_options) + + if tfl_mutating_variable_inputs is not None: + libOperator.AddMutatingVariableInputs(builder, tfl_mutating_variable_inputs) + + return libOperator.End(builder) + + +class Operators(meta.TFLiteVector): + vector: List[Operator] + + def __init__(self, operators: List[Operator] = None) -> None: + super().__init__(operators, libSubGraphs.StartOperatorsVector) + + +class SubGraphInputs(meta.IntVector): + """List of 'Tensor' objects. Easier to use while converting.""" + + tmp_inputs: List[Tensor] + + def __init__(self, inputs: List[int] = None): + """'inputs' is a list of indices into the 'tensors' vector.""" + super().__init__(inputs, libSubGraphs.StartInputsVector) + self.tmp_inputs = [] + + +class SubGraphOutputs(meta.IntVector): + """List of 'Tensor' objects. Easier to use while converting.""" + + tmp_outputs: List[Tensor] + + def __init__(self, outputs: List[int] = None): + """'outputs' is a list of indices into the 'tensors' vector.""" + super().__init__(outputs, libSubGraphs.StartOutputsVector) + self.tmp_outputs = [] + + +class SubGraph(meta.TFLiteObject): + inputs: SubGraphInputs + outputs: SubGraphOutputs + tensors: Tensors + operators: Operators + + # TODO name + + def __init__( + self, + inputs: SubGraphInputs = None, + outputs: SubGraphOutputs = None, + tensors: Tensors = None, + operators: Operators = None, + ): + self.inputs = inputs + self.outputs = outputs + self.tensors = tensors + self.operators = operators + + def gen_tflite(self, builder: fb.Builder): + if self.tensors is not None: + tfl_tensors = self.tensors.gen_tflite(builder) + else: + tfl_tensors = None + + if self.inputs is not None: + tfl_inputs = self.inputs.gen_tflite(builder) + else: + tfl_inputs = None + + if self.outputs is not None: + tfl_outputs = self.outputs.gen_tflite(builder) + else: + tfl_outputs = None + + if self.operators is not None: + tfl_operators = self.operators.gen_tflite(builder) + else: + tfl_operators = None + + libSubGraphs.Start(builder) + + if tfl_tensors is not None: + libSubGraphs.AddTensors(builder, tfl_tensors) + + if tfl_inputs is not None: + libSubGraphs.AddInputs(builder, tfl_inputs) + + if tfl_outputs is not None: + libSubGraphs.AddOutputs(builder, tfl_outputs) + + if tfl_operators is not None: + libSubGraphs.AddOperators(builder, tfl_operators) + + return libSubGraphs.End(builder) + + +class SubGraphs(meta.TFLiteVector): + vector: List[SubGraph] + + def __init__(self, sub_graphs: List[SubGraph] = None) -> None: + super().__init__(sub_graphs, libModel.StartSubgraphsVector) + + +class Model(meta.TFLiteObject): + version: int + description: str + operator_codes: OperatorCodes + sub_graphs: SubGraphs + buffers: Buffers + # TODO signatureDefs + # TODO metadata + # TODO metadataBuffer + + __fileIdentifier = "TFL3" # file_identifier from the used TFLite schema + + @classmethod + def __gen_file_identifier(cls): + """Generate byte-like object representing the TFLite format""" + return cls.__fileIdentifier.encode("ascii") + + def __init__( + self, + version: int = 1, + description: str = None, + buffers: Buffers = None, + operator_codes: OperatorCodes = None, + sub_graphs: SubGraphs = None, + ) -> None: + self.version = version + self.description = description + self.operator_codes = operator_codes + self.sub_graphs = sub_graphs + self.buffers = buffers + + def gen_tflite(self, builder: fb.Builder): + if self.operator_codes is not None: + tfl_operator_codes = self.operator_codes.gen_tflite(builder) + else: + tfl_operator_codes = None + + if self.sub_graphs is not None: + tfl_sub_graphs = self.sub_graphs.gen_tflite(builder) + else: + tfl_sub_graphs = None + + if self.description is not None: + tfl_description = builder.CreateString(self.description) + else: + tfl_description = None + + if self.buffers is not None: + tfl_buffers = self.buffers.gen_tflite(builder) + else: + tfl_buffers = None + + libModel.Start(builder) + + libModel.AddVersion(builder, self.version) + + if tfl_operator_codes is not None: + libModel.AddOperatorCodes(builder, tfl_operator_codes) + + if tfl_sub_graphs is not None: + libModel.AddSubgraphs(builder, tfl_sub_graphs) + + if tfl_description is not None: + libModel.AddDescription(builder, tfl_description) + + if tfl_buffers is not None: + libModel.AddBuffers(builder, tfl_buffers) + + builder.Finish(libModel.End(builder), Model.__gen_file_identifier()) diff --git a/backends/nxp/backend/ir/tflite_optimizer/README.md b/backends/nxp/backend/ir/tflite_optimizer/README.md new file mode 100755 index 00000000000..5abbfdd9965 --- /dev/null +++ b/backends/nxp/backend/ir/tflite_optimizer/README.md @@ -0,0 +1,166 @@ +# Pattern matching + +A tool which takes a symbolic definition of a pattern of operators and yields all matching instances of +the pattern in the internal TFLite model. + +### Example use + +```python +matcher = PatternMatcher( + builder, + [ + Op(['Transpose'], ['x', 'perm'], ['y']), + Op(['Reshape', 'Squeeze'], ['y', ...], ['z']), + Op(['FullyConnected'], ['z', 'w'], ['fc_out'], [ + HasFusedActivationFunction() + ]), + MultipleSameOps(['Add'], ['fc_out']) + ], + [ + TensorsHaveOneConsumer(['y', 'z']), + TensorHasData('perm') + ]) + +for [transpose, reshape, fc, add_ops], tensor_map, input_to_ops, output_to_op in matcher.match_patterns(): + x = tensor_map['x'] + ... +``` + +The `PatternMatcher` has 3 parameters in its constructor: + +* A [`ModelBuilder`](../converter/builder/model_builder.py) object which encapsulates the internal + TFLite model. This is the model that the `PatternMatcher` will search. +* A list of symbolic operators, which describe the pattern the `PatternMatcher` will search for. Its details are + described in a [later section](#blocks-to-define-a-pattern) of this document. +* The last parameter is an optional list of tensor rules defined in [tensor_rules.py](tensor_rules.py). They allow + additional restrictions to be placed on the tensors present in the pattern. The yielded pattern will always satisfy + all of these rules. + +The PatternMatcher will perform 1 pass through the TFLite model encapsulated by the given `ModelBuilder`, and gradually +yield all matching patterns of operators. So changes to the TFLite model done in the body of the `for` loop above, can +immediately have an effect on the next matched instance of the searched pattern. + +The method `match_patterns()` will gradually yield a tuple containing: + +* A list of matched operators. Their number and order will exactly match the operators specified in the pattern. +* A dictionary mapping symbolic tensor names (such as `x` or `perm` in the example above) to the actual TFLite tensors + matched in the model. +* A dictionary mapping the name of a real tensor from the model, to a list of operators which use this tensor as their + input. +* A dictionary mapping the name of a real tensor from the model, to an operator which produces this tensor as its + output. + +The first block in the pattern must be an `Op`. The pattern matcher will internally go through the model until it finds +a match for this first `Op`. It then sets its current position to this `Op` and tries to match the rest of the pattern. +If it succeeds, it yields the matched operators and returns to the current position (the first `Op`). It then continues +on its single pass through the model and tries to find another match for the first `Op`. + +That also means that all subsequent blocks must somehow be connected to some previous block. So the following is **not** +allowed. + +```python +Op(['Sqrt'], ['a'], ['b']), +Op(['Cast'], ['c'], ['d']) +``` + +--- + +# Blocks to define a pattern + +## Op + +Block which represents exactly 1 TFLite operator. + +The class is defined as follows: + +```python +class Op: + ops: list[str] | None = None + inputs: list[str | None] | None = None + outputs: list[str | None] | None = None + op_rules: list[OpRule] | None = None +``` + +* The matched TFLite operator will have 1 of the operator types specified in `ops`. If the `ops` is `None`, the operator + type will not be considered during the pattern matching. + +* The `inputs` and `outputs` contain symbolic names, which will uniquely identify actual matched tensors from the TFLite + model. The number specified tensors will be exactly the same in the matched TFLite operator (except for the case + of `...`). + * Instead of a symbolic name, `None` can be given which represents an anonymous tensor, which still however must be + present + in the matched operator. + * Another alternative to a symbolic name is the ellipsis `...`. It represents any number of tensors (including 0). + It can only be used at the beginning and/or at the end of the `inputs`/`outputs`. If it is at the beginning, the + matching will be done in reverse, starting with the last tensor. + * The `inputs` and/or `outpus` can be omitted altogether, if they are `None`. This means that the `PatternMatcher` + will not take the `inputs`/`outpus` into consideration while matching the pattern. + +* `op_rules` is a list of rules that the operator must satisfy in order to be matched. They are defined + in [operator_rules.py](operator_rules.py). The yielded pattern will always satisfy all these rules. + +## MultipleSameOps + +Block which represents multiple (at least 1) occurrences of similar operators. The similar operators must all fit 1 +common definition, which is similar to the definition of `Op`. + +```python +class MultipleSameOps: + ops: list[str] + inputs: list[str | None] | None = None + outputs: list[str | None] | None = None + op_rules: list[OpRule] | None = None +``` + +At least 1 input of the `MultipleSameOps` must be the output of a previous block. Other inputs/outputs which have not +been defined by a previous block, will represent a set of tensors instead of just 1 tensor. + +The `MultipleSameOps` block will be matched with a list of operators, which consume the already matched input tensor. +All operators consuming this tensor must match the `MultipleSameOps` block, in order for the match to be successful. + +### Example use + +```python +Op(['Quantize'], outputs=['x']), +MultipleSameOps(['Dequantize'], ['x'], ['y']) +``` + +The first `Op` defines a `Quantize` operator with an output tensor `x`, which is a single tensor. The +following `MultipleSameOps` represents a set of `N` `Dequantize` operators, which all consume the `x` tensor. These `N` +operators all produce their own output, so `y` represents `N` tensors, not just 1. + +Tensor rules can still be used for `y`, and they have to pass for all output tensors of the `Dequantize` operators. + +Operator rules can still be used for the `MultipleSameOps`, and they have to pass for all matched operators. + +It is **not** possible to use tensor/operator rules to filter the matched operators of `MultipleSameOps`. The pattern +matcher will find all operators which use the `x` tensor, and if and only if they **all** match the definition, the +whole pattern is yielded. + +Sets of tensors (such as `y` in the example above) cannot be used as inputs to following blocks right now. + +### Semantics of consuming a set of tensors + +Currently, it is not allowed for any block to consume a set of tensors, such as the `y` in the example above. + +## OneOf + +Block which represents a single operator, that has to match at least one given `Op` in the `one_of_ops` list. + +```python +class OneOf(): + one_of_ops: list[Op] +``` + +### Example use + +```python +Op(['FullyConnected'], outputs=['y']), +OneOf([ + Op(['Add'], ['y', 'b']), + Op(['Add'], ['b', 'y']) +]) +``` + +The example code above represents a situation where we do not care if the `Add` uses the output of the `FullyConnected` +as its first input or its second input. \ No newline at end of file diff --git a/backends/nxp/backend/ir/tflite_optimizer/__init__.py b/backends/nxp/backend/ir/tflite_optimizer/__init__.py new file mode 100755 index 00000000000..e69de29bb2d diff --git a/backends/nxp/backend/ir/tflite_optimizer/graph_utils.py b/backends/nxp/backend/ir/tflite_optimizer/graph_utils.py new file mode 100755 index 00000000000..0f2a0d8a447 --- /dev/null +++ b/backends/nxp/backend/ir/tflite_optimizer/graph_utils.py @@ -0,0 +1,115 @@ +# Copyright 2024 NXP +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +import executorch.backends.nxp.backend.ir.converter.builder.model_builder as model_builder +from executorch.backends.nxp.backend.ir import logger +from executorch.backends.nxp.backend.ir.lib.tflite.BuiltinOperator import ( + BuiltinOperator, +) +from executorch.backends.nxp.backend.ir.tflite_generator import tflite_model + +InputTensorToOpsMap = dict[str, list[tflite_model.Operator]] +OutputTensorToOpMap = dict[str, tflite_model.Operator] +NameToTensorMap = dict[str, tflite_model.Tensor | list[tflite_model.Tensor]] + + +def create_tensor_to_operator_dictionaries( + builder: "model_builder.ModelBuilder", +) -> tuple[InputTensorToOpsMap, OutputTensorToOpMap]: + """Create and return 2 dictionaries, which map a tensor name, to a TFLite operator, which has the tensor as + input, and output respectively. + + :return: Dictionary mapping a tensor name to a list of operators that use it as an input, + dictionary mapping a tensor name to the operator, which produces it as its output. + """ + input_tensor_to_operators: InputTensorToOpsMap = {} + output_tensor_to_operator: OutputTensorToOpMap = {} + + for op in builder.get_operators().vector: + for input_tensor in op.tmp_inputs: + if input_tensor.name not in input_tensor_to_operators.keys(): + input_tensor_to_operators[input_tensor.name] = [] + + input_tensor_to_operators[input_tensor.name].append(op) + + for output_tensor in op.tmp_outputs: + output_tensor_to_operator[output_tensor.name] = op + + return input_tensor_to_operators, output_tensor_to_operator + + +# Extend this map with operators required for future optimizations. +op_type_to_builtin_operator_map = { + "Add": BuiltinOperator.ADD, + "AddN": BuiltinOperator.ADD_N, + "AveragePool2D": BuiltinOperator.AVERAGE_POOL_2D, + "BatchMatMul": BuiltinOperator.BATCH_MATMUL, + "BidirectionalSequenceLSTM": BuiltinOperator.BIDIRECTIONAL_SEQUENCE_LSTM, + "BidirectionalSequenceRNN": BuiltinOperator.BIDIRECTIONAL_SEQUENCE_RNN, + "Cast": BuiltinOperator.CAST, + "Concatenation": BuiltinOperator.CONCATENATION, + "Conv2D": BuiltinOperator.CONV_2D, + "Conv3D": BuiltinOperator.CONV_3D, + "DepthwiseConv2D": BuiltinOperator.DEPTHWISE_CONV_2D, + "Dequantize": BuiltinOperator.DEQUANTIZE, + "Div": BuiltinOperator.DIV, + "FullyConnected": BuiltinOperator.FULLY_CONNECTED, + "HardSwish": BuiltinOperator.HARD_SWISH, + "L2Norm": BuiltinOperator.L2_NORMALIZATION, + "LSTM": BuiltinOperator.LSTM, + "LeakyRelu": BuiltinOperator.LEAKY_RELU, + "Logistic": BuiltinOperator.LOGISTIC, + "MaxPool2D": BuiltinOperator.MAX_POOL_2D, + "Maximum": BuiltinOperator.MAXIMUM, + "Mean": BuiltinOperator.MEAN, + "Minimum": BuiltinOperator.MINIMUM, + "Mul": BuiltinOperator.MUL, + "PRelu": BuiltinOperator.PRELU, + "Quantize": BuiltinOperator.QUANTIZE, + "RNN": BuiltinOperator.RNN, + "ReduceProd": BuiltinOperator.REDUCE_PROD, + "Relu": BuiltinOperator.RELU, + "Relu6": BuiltinOperator.RELU6, + "ReluN1To1": BuiltinOperator.RELU_N1_TO_1, + "Reshape": BuiltinOperator.RESHAPE, + "SVDF": BuiltinOperator.SVDF, + "ScatterND": BuiltinOperator.SCATTER_ND, + "SequenceRNN": BuiltinOperator.UNIDIRECTIONAL_SEQUENCE_RNN, + "Sign": BuiltinOperator.SIGN, + "Slice": BuiltinOperator.SLICE, + "Split": BuiltinOperator.SPLIT, + "StridedSlice": BuiltinOperator.STRIDED_SLICE, + "Sub": BuiltinOperator.SUB, + "Sum": BuiltinOperator.SUM, + "Tanh": BuiltinOperator.TANH, + "Transpose": BuiltinOperator.TRANSPOSE, + "TransposeConv": BuiltinOperator.TRANSPOSE_CONV, + "UnidirectionalSequenceLSTM": BuiltinOperator.UNIDIRECTIONAL_SEQUENCE_LSTM, + "Where": BuiltinOperator.WHERE, +} + + +def builtin_operator_for_op_type(op_type: str) -> BuiltinOperator: + builtin_op = op_type_to_builtin_operator_map.get(op_type, None) + if builtin_op is None: + logger.e( + logger.Code.INTERNAL_ERROR, + f"PatternMatcher doesn't support `{op_type}` yet.", + ) + + return builtin_op + + +def operator_is_type( + op: tflite_model.Operator, op_type: str, builder: "model_builder.ModelBuilder" +): + builtin_op = builtin_operator_for_op_type(op_type) + + opcode_indices = builder.op_code_type_index_map.get(builtin_op, None) + if opcode_indices is None: + # The operator is not present in the model at all. + return False + + return op.opcode_index in opcode_indices.values() diff --git a/backends/nxp/backend/ir/tflite_optimizer/operator_rules.py b/backends/nxp/backend/ir/tflite_optimizer/operator_rules.py new file mode 100755 index 00000000000..253dc9c69a1 --- /dev/null +++ b/backends/nxp/backend/ir/tflite_optimizer/operator_rules.py @@ -0,0 +1,122 @@ +# Copyright 2024 NXP +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +from abc import ABC, abstractmethod +from dataclasses import dataclass + +import executorch.backends.nxp.backend.ir.converter.builder.model_builder as model_builder +from executorch.backends.nxp.backend.ir.lib.tflite.ActivationFunctionType import ( + ActivationFunctionType, +) +from executorch.backends.nxp.backend.ir.tflite_generator import tflite_model +from executorch.backends.nxp.backend.ir.tflite_optimizer.graph_utils import ( + NameToTensorMap, + operator_is_type, +) +from executorch.backends.nxp.backend.ir.tflite_optimizer.optimizations.base_optimization import ( + InputTensorToOpsMap, + OutputTensorToOpMap, +) + + +class OpRule(ABC): + @abstractmethod + def __call__( + self, + op: tflite_model.Operator, + tensor_map: NameToTensorMap, + input_to_ops_map: InputTensorToOpsMap, + output_to_op_map: OutputTensorToOpMap, + builder: "model_builder.ModelBuilder", + ) -> bool: + pass + + +class NoFusedActivationFunction(OpRule): + + def __call__( + self, + op: tflite_model.Operator, + tensor_map: NameToTensorMap, + input_to_ops_map: InputTensorToOpsMap, + output_to_op_map: OutputTensorToOpMap, + builder: "model_builder.ModelBuilder", + ) -> bool: + if not hasattr(op, "builtin_options"): + return False + + if not hasattr(op.builtin_options, "fused_activation_function"): + return False + + # noinspection PyUnresolvedReferences + return ( + op.builtin_options.fused_activation_function == ActivationFunctionType.NONE + ) + + +class HasFusedActivationFunction(OpRule): + + def __call__( + self, + op: tflite_model.Operator, + tensor_map: NameToTensorMap, + input_to_ops_map: InputTensorToOpsMap, + output_to_op_map: OutputTensorToOpMap, + builder: "model_builder.ModelBuilder", + ) -> bool: + if not hasattr(op, "builtin_options"): + return True + + if not hasattr(op.builtin_options, "fused_activation_function"): + return True + + # noinspection PyUnresolvedReferences + return ( + op.builtin_options.fused_activation_function != ActivationFunctionType.NONE + ) + + +@dataclass +class AllInputsComeFrom(OpRule): + """Assures that all input tensors of this operator are produced by operators with op type + `single_preceding_op_type`. + """ + + single_preceding_op_type: str + + def __call__( + self, + op: tflite_model.Operator, + tensor_map: NameToTensorMap, + input_to_ops_map: InputTensorToOpsMap, + output_to_op_map: OutputTensorToOpMap, + builder: "model_builder.ModelBuilder", + ) -> bool: + preceding_ops = [output_to_op_map[inpt.name] for inpt in op.tmp_inputs] + + return all( + operator_is_type(preceding_op, self.single_preceding_op_type, builder) + for preceding_op in preceding_ops + ) + + +@dataclass +class WasNotInTheOriginalONNXModel(OpRule): + """Assures that this operator wasn't created by converting an ONNX operator from the original model, but instead + was added extra in order to convert a different operator. + + This rule is currently only satisfied for operators added by ModelBuilder methods `create_..._before()` and + `create_..._after()`. + """ + + def __call__( + self, + op: tflite_model.Operator, + tensor_map: NameToTensorMap, + input_to_ops_map: InputTensorToOpsMap, + output_to_op_map: OutputTensorToOpMap, + builder: "model_builder.ModelBuilder", + ) -> bool: + return op.tmp_added_extra diff --git a/backends/nxp/backend/ir/tflite_optimizer/optimizations/__init__.py b/backends/nxp/backend/ir/tflite_optimizer/optimizations/__init__.py new file mode 100755 index 00000000000..e69de29bb2d diff --git a/backends/nxp/backend/ir/tflite_optimizer/optimizations/base_optimization.py b/backends/nxp/backend/ir/tflite_optimizer/optimizations/base_optimization.py new file mode 100755 index 00000000000..6001ca961b8 --- /dev/null +++ b/backends/nxp/backend/ir/tflite_optimizer/optimizations/base_optimization.py @@ -0,0 +1,36 @@ +# Copyright 2024 NXP +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +from abc import ABC, abstractmethod + +from executorch.backends.nxp.backend.ir.conversion_config import ConversionConfig +from executorch.backends.nxp.backend.ir.converter.builder import model_builder +from executorch.backends.nxp.backend.ir.tflite_optimizer.graph_utils import ( + create_tensor_to_operator_dictionaries, + InputTensorToOpsMap, + OutputTensorToOpMap, +) + + +class BaseOptimization(ABC): + _builder: "model_builder.ModelBuilder" + + def __init__( + self, builder: "model_builder.ModelBuilder", conversion_config: ConversionConfig + ): + self._builder = builder + self._conversion_config = conversion_config + + def _create_tensor_to_operator_dictionaries( + self, + ) -> tuple[InputTensorToOpsMap, OutputTensorToOpMap]: + return create_tensor_to_operator_dictionaries(self._builder) + + @abstractmethod + def __call__(self) -> bool: + """Execute the optimization and return `True` if the optimization had an effect and the model was modified. + `False` otherwise. + """ + pass diff --git a/backends/nxp/backend/ir/tflite_optimizer/optimizations/combine_hard_sigmoid_and_mul_to_hard_swish.py b/backends/nxp/backend/ir/tflite_optimizer/optimizations/combine_hard_sigmoid_and_mul_to_hard_swish.py new file mode 100755 index 00000000000..dddabfe87f1 --- /dev/null +++ b/backends/nxp/backend/ir/tflite_optimizer/optimizations/combine_hard_sigmoid_and_mul_to_hard_swish.py @@ -0,0 +1,256 @@ +# Copyright 2024 NXP +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +from executorch.backends.nxp.backend.ir.lib.tflite.BuiltinOperator import ( + BuiltinOperator, +) +from executorch.backends.nxp.backend.ir.lib.tflite.TensorType import TensorType +from executorch.backends.nxp.backend.ir.tflite_generator import tflite_model +from executorch.backends.nxp.backend.ir.tflite_generator.builtin_options.hard_swish_options import ( + HardSwish, +) +from executorch.backends.nxp.backend.ir.tflite_optimizer.optimizations.base_optimization import ( + BaseOptimization, +) +from executorch.backends.nxp.backend.ir.tflite_optimizer.pattern_matcher import ( + OneOf, + Op, + PatternMatcher, +) +from executorch.backends.nxp.backend.ir.tflite_optimizer.tensor_rules import ( + RuleOr, + TensorHasNConsumers, + TensorHasStaticValue, + TensorHasType, + TensorsAreQuantized, + TensorsHaveOneConsumer, + TensorsHaveType, +) + + +class CombineHardSigmoidAndMulIntoHardSwish(BaseOptimization): + + def __call__(self) -> bool: + made_changes = self._combine_float_variant() + made_changes |= self._combine_quantized_variant() + + return made_changes + + def _combine_float_variant(self) -> bool: + """Fuse some operators in the following pattern. The ops `Mul`, `Add` `Minimum` and `Relu` compute the + `HardSigmoid` operation, as there is no `HardSigmoid` operator in TFLite. + + ┌─────┴─────┐ `x` + ┌──▼──┐ │ + 1/6 ──► Mul │ │ + └──┬──┘ │ + ┌──▼──┐ │ + 1/2 ──► Add │ │ │ + └──┬──┘ │ ┌─────▼─────┐ + ┌────▼────┐ │ ─────► │ HardSwish │ + 1 ──► Minimum │ │ └─────┬─────┘ + └────┬────┘ │ + ┌──▼───┐ │ + │ Relu │ │ + └──┬───┘ │ + └───┐ ┌───┘ + ┌▼───▼┐ + │ Mul │ + └──┬──┘ + """ + + matcher = PatternMatcher( + self._builder, + [ + Op(["Mul"], ["x", "alpha"], ["mul_o"]), + OneOf( + [ + Op(["Add"], ["mul_o", "beta"], ["add_o"]), + Op(["Add"], ["beta", "mul_o"], ["add_o"]), + ] + ), + OneOf( + [ + Op(["Minimum"], ["add_o", "one"], ["min_o"]), + Op(["Minimum"], ["one", "add_o"], ["min_o"]), + ] + ), + Op(["Relu"], ["min_o"], ["relu_o"]), + OneOf( + [ + Op(["Mul"], ["x", "relu_o"], ["y"]), + Op(["Mul"], ["relu_o", "x"], ["y"]), + ] + ), + ], + [ + TensorHasNConsumers("x", 2), + TensorsHaveOneConsumer(["mul_o", "add_o", "min_o", "relu_o"]), + TensorHasStaticValue("alpha", 1 / 6), + TensorHasStaticValue("beta", 0.5), + TensorHasStaticValue("one", 1), + # `HardSwishConverter` and `HardSigmoidConverter` both only support float32. + TensorHasType("x", TensorType.FLOAT32), + ], + ) + + # The mapped operator (value) will be inserted into the model later, at the position of the `key` operator. + to_add: dict[tflite_model.Operator, tflite_model.Operator] = {} + to_remove = [] + for pattern_ops, tensor_map, _, _ in matcher.match_patterns(): + x, y = tensor_map["x"], tensor_map["y"] + hard_swish = tflite_model.Operator( + builtin_options=HardSwish(), + opcode_index=self._builder.op_code_index_for_op_type( + BuiltinOperator.HARD_SWISH + ), + ) + hard_swish.tmp_inputs = [x] + hard_swish.tmp_outputs = [y] + + to_add[pattern_ops[0]] = hard_swish + + to_remove.extend(pattern_ops) + + ops = self._builder.get_operators() + for k, v in to_add.items(): + idx = ops.index(k) + ops.insert(idx, v) + + for op in to_remove: + ops.remove(op) + + return len(to_remove) != 0 + + def _combine_quantized_variant(self) -> bool: + """Fuse some operators in the following pattern. The ops `Mul`, `Add` `Minimum` and `Relu` compute the + `HardSigmoid` operation, as there is no `HardSigmoid` operator in TFLite. + + The following pattern arises from using the `onnx2quant` on a model with `HardSwish`. The quantizer always + runs a pre-processing step which splits the ONNX `HardSwish` into `HardSigmoid` and `Mul`. It seems like it + cannot be turned off. Therefore, we cannot add QDQ quantization of `HardSwish`. But since `HardSigmoid` + gets converted to multiple TFLite operators, we also cannot really add QDQ quantization for that operator. + This means that `HardSwish` will never get fully quantized by the `onnx2quant`, and the following pattern + will be created. + We can, however, convert the entire pattern into a quantized `HardSwish` using this optimization. + + │ (u)int8 `x` + ┌─────▼──────┐ + │ Dequantize │ + └─────┬──────┘ + ┌─────┴─────┐ float32 + ┌──▼──┐ │ + 1/6 ──► Mul │ │ + └──┬──┘ │ + ┌──▼──┐ │ + 1/2 ──► Add │ │ + └──┬──┘ │ + ┌────▼────┐ │ + 1 ──► Minimum │ │ │ (u)int8 `x` + └────┬────┘ │ ┌─────▼─────┐ + ┌──▼───┐ │ ─────► │ HardSwish │ + │ Relu │ │ └─────┬─────┘ + └──┬───┘ │ │ (u)int8 `y` + ┌────▼─────┐ │ + │ Quantize │ │ + └────┬─────┘ │ + ┌─────▼──────┐ │ + │ Dequantize │ │ + └─────┬──────┘ │ + └───┐ ┌───┘ + ┌▼───▼┐ + │ Mul │ + └──┬──┘ + │ float32 + ┌────▼─────┐ + │ Quantize │ + └────┬─────┘ + │ (u)int8 `y` + """ + matcher = PatternMatcher( + self._builder, + [ + Op(["Dequantize"], ["x"], ["deq1_o"]), + OneOf( + [ + Op(["Mul"], ["deq1_o", "alpha"], ["mul1_o"]), + Op(["Mul"], ["alpha", "deq1_o"], ["mul1_o"]), + ] + ), + OneOf( + [ + Op(["Add"], ["mul1_o", "beta"], ["add_o"]), + Op(["Add"], ["beta", "mul1_o"], ["add_o"]), + ] + ), + OneOf( + [ + Op(["Minimum"], ["add_o", "one"], ["min_o"]), + Op(["Minimum"], ["one", "add_o"], ["min_o"]), + ] + ), + Op(["Relu"], ["min_o"], ["relu_o"]), + Op(["Quantize"], ["relu_o"], ["quant1_o"]), + Op(["Dequantize"], ["quant1_o"], ["deq2_o"]), + OneOf( + [ + Op(["Mul"], ["deq1_o", "deq2_o"], ["mul2_o"]), + Op(["Mul"], ["deq2_o", "deq1_o"], ["mul2_o"]), + ] + ), + Op(["Quantize"], ["mul2_o"], ["y"]), + ], + [ + TensorHasNConsumers("deq1_o", 2), + TensorsHaveOneConsumer( + [ + "mul1_o", + "add_o", + "min_o", + "relu_o", + "quant1_o", + "deq2_o", + "mul2_o", + ] + ), + TensorHasStaticValue("alpha", 1 / 6), + TensorHasStaticValue("beta", 0.5), + TensorHasStaticValue("one", 1), + TensorHasType("deq1_o", TensorType.FLOAT32), + TensorsAreQuantized(["x", "y"]), + RuleOr( + TensorsHaveType(["x", "y"], TensorType.INT8), + TensorsHaveType(["x", "y"], TensorType.UINT8), + ), + ], + ) + + # The mapped operator (value) will be inserted into the model later, at the position of the `key` operator. + to_add: dict[tflite_model.Operator, tflite_model.Operator] = {} + to_remove = [] + for pattern_ops, tensor_map, _, _ in matcher.match_patterns(): + x, y = tensor_map["x"], tensor_map["y"] + hard_swish = tflite_model.Operator( + builtin_options=HardSwish(), + opcode_index=self._builder.op_code_index_for_op_type( + BuiltinOperator.HARD_SWISH + ), + ) + hard_swish.tmp_inputs = [x] + hard_swish.tmp_outputs = [y] + + to_add[pattern_ops[0]] = hard_swish + + to_remove.extend(pattern_ops) + + ops = self._builder.get_operators() + for k, v in to_add.items(): + idx = ops.index(k) + ops.insert(idx, v) + + for op in to_remove: + ops.remove(op) + + return len(to_remove) != 0 diff --git a/backends/nxp/backend/ir/tflite_optimizer/optimizations/eliminate_dead_branches.py b/backends/nxp/backend/ir/tflite_optimizer/optimizations/eliminate_dead_branches.py new file mode 100755 index 00000000000..cea179dfb09 --- /dev/null +++ b/backends/nxp/backend/ir/tflite_optimizer/optimizations/eliminate_dead_branches.py @@ -0,0 +1,82 @@ +# Copyright 2024 NXP +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +from executorch.backends.nxp.backend.ir import logger +from executorch.backends.nxp.backend.ir.tflite_generator import tflite_model +from executorch.backends.nxp.backend.ir.tflite_optimizer.optimizations.base_optimization import ( + BaseOptimization, +) + + +class EliminateDeadBranches(BaseOptimization): + + def __call__(self) -> bool: + _, output_to_ops = self._create_tensor_to_operator_dictionaries() + + output_names = [ + tensor.name for tensor in self._builder.get_sub_graph().outputs.tmp_outputs + ] + + tensor_names_to_process = set(output_names) + tensors_to_keep = set() + ops_to_keep = set() + processed_ops = set() + + # Iterate from output tensors to inputs and mark all visited nodes & tensors + while len(tensor_names_to_process) != 0: + tensor = tensor_names_to_process.pop() + tensors_to_keep.add(tensor) + + if tensor not in output_to_ops: + # Input tensor or already processed + continue + + op: tflite_model.Operator = output_to_ops[tensor] + + if op in processed_ops: + continue + + # Append all inputs and outputs to next processing. Outputs of nodes aren't + # necessarily outputs of the model but must be preserved. + for tensor in op.tmp_inputs + op.tmp_outputs: + tensor_names_to_process.add(tensor.name) + + ops_to_keep.add(op) + processed_ops.add(op) + + if not self._conversion_config.allow_inputs_stripping: + # Keep all inputs (even if they are not used) when prohibited by user + tensors_to_keep.update( + [ + tensor.name + for tensor in self._builder.get_sub_graph().inputs.tmp_inputs + ] + ) + + # Remove unused ops + ops = self._builder.get_operators().vector + i, removed_ops_count = 0, 0 + while i < len(ops): + if ops[i] in ops_to_keep: + i += 1 + else: + removed_ops_count += 1 + del ops[i] + + # Remove unused tensors + tensors = self._builder.get_tensors().vector + i = 0 + while i < len(tensors): + if tensors[i].name in tensors_to_keep: + i += 1 + else: + del tensors[i] + + if removed_ops_count != 0: + logger.i( + f"Dead branch elimination optimization removed {removed_ops_count} unused ops from the graph." + ) + + return removed_ops_count != 0 diff --git a/backends/nxp/backend/ir/tflite_optimizer/optimizations/fuse_activation_functions.py b/backends/nxp/backend/ir/tflite_optimizer/optimizations/fuse_activation_functions.py new file mode 100755 index 00000000000..6b657c4d5b1 --- /dev/null +++ b/backends/nxp/backend/ir/tflite_optimizer/optimizations/fuse_activation_functions.py @@ -0,0 +1,235 @@ +# Copyright 2024 NXP +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +from executorch.backends.nxp.backend.ir import logger +from executorch.backends.nxp.backend.ir.lib.tflite.ActivationFunctionType import ( + ActivationFunctionType, +) +from executorch.backends.nxp.backend.ir.lib.tflite.BuiltinOperator import ( + BuiltinOperator, +) +from executorch.backends.nxp.backend.ir.tflite_generator import tflite_model +from executorch.backends.nxp.backend.ir.tflite_optimizer.graph_utils import ( + operator_is_type, +) +from executorch.backends.nxp.backend.ir.tflite_optimizer.operator_rules import ( + NoFusedActivationFunction, +) +from executorch.backends.nxp.backend.ir.tflite_optimizer.optimizations.base_optimization import ( + BaseOptimization, +) +from executorch.backends.nxp.backend.ir.tflite_optimizer.pattern_matcher import ( + Op, + PatternMatcher, +) +from executorch.backends.nxp.backend.ir.tflite_optimizer.tensor_rules import ( + TensorHasOneConsumer, +) + + +class FuseActivationFunctions(BaseOptimization): + ops_with_fused_activation_function = [ + "Conv2D", + "Conv3D", + "DepthwiseConv2D", + "TransposeConv", + "MaxPool2D", + "AveragePool2D", + "SVDF", + "FullyConnected", + "Add", + "Mul", + "Sub", + "Div", + # 'Concatenation', # currently disabled + # https://github.com/tensorflow/tensorflow/blob/v2.15.0/tensorflow/lite/kernels/concatenation.cc#L139 + # 'L2Norm', # currently disabled + # https://github.com/tensorflow/tensorflow/blob/v2.15.0/tensorflow/lite/kernels/l2norm.cc#L72 + # LSTM operators will always already have fused activation functions. They are assigned in `convert_lstm.py`. + # 'LSTM', 'UnidirectionalSequenceLSTM', 'BidirectionalSequenceLSTM' + # RNN operators will always already have fused activation functions. They are assigned in `convert_rnn.py`. + # 'RNN', 'SequenceRNN', 'BidirectionalSequenceRNN', + ] + + activation_functions = ["Relu", "ReluN1To1", "Relu6", "Tanh", "Sign"] + + supported_activations_for_op: dict[ + BuiltinOperator, list[ActivationFunctionType] + ] = { + BuiltinOperator.CONV_2D: [ + ActivationFunctionType.RELU, + ActivationFunctionType.RELU_N1_TO_1, + ActivationFunctionType.RELU6, + ], + # https://github.com/tensorflow/tensorflow/blob/v2.15.0/tensorflow/lite/kernels/conv.cc#L912 + # https://github.com/tensorflow/tensorflow/blob/v2.15.0/tensorflow/lite/kernels/kernel_util.h#L285-L300 + BuiltinOperator.CONV_3D: [ + ActivationFunctionType.RELU, + ActivationFunctionType.RELU_N1_TO_1, + ActivationFunctionType.RELU6, + ], + # https://github.com/tensorflow/tensorflow/blob/v2.15.0/tensorflow/lite/kernels/conv3d.cc#L213 + # https://github.com/tensorflow/tensorflow/blob/v2.15.0/tensorflow/lite/kernels/kernel_util.h#L285-L300 + BuiltinOperator.DEPTHWISE_CONV_2D: [ + ActivationFunctionType.RELU, + ActivationFunctionType.RELU_N1_TO_1, + ActivationFunctionType.RELU6, + ], + # https://github.com/tensorflow/tensorflow/blob/v2.15.0/tensorflow/lite/kernels/depthwise_conv.cc#L307 + # https://github.com/tensorflow/tensorflow/blob/v2.15.0/tensorflow/lite/kernels/kernel_util.h#L285-L300 + BuiltinOperator.TRANSPOSE_CONV: [ + ActivationFunctionType.RELU, + ActivationFunctionType.RELU_N1_TO_1, + ActivationFunctionType.RELU6, + ], + # https://github.com/tensorflow/tensorflow/blob/v2.15.0/tensorflow/lite/kernels/transpose_conv.cc#L516 + # https://github.com/tensorflow/tensorflow/blob/v2.15.0/tensorflow/lite/kernels/kernel_util.h#L285-L300 + BuiltinOperator.MAX_POOL_2D: [ + ActivationFunctionType.RELU, + ActivationFunctionType.RELU_N1_TO_1, + ActivationFunctionType.RELU6, + ], + # https://github.com/tensorflow/tensorflow/blob/v2.15.0/tensorflow/lite/kernels/pooling.cc#L247 + # https://github.com/tensorflow/tensorflow/blob/v2.15.0/tensorflow/lite/kernels/kernel_util.h#L285-L300 + BuiltinOperator.AVERAGE_POOL_2D: [ + ActivationFunctionType.RELU, + ActivationFunctionType.RELU_N1_TO_1, + ActivationFunctionType.RELU6, + ], + # https://github.com/tensorflow/tensorflow/blob/v2.15.0/tensorflow/lite/kernels/pooling.cc#L124 + # https://github.com/tensorflow/tensorflow/blob/v2.15.0/tensorflow/lite/kernels/kernel_util.h#L285-L300 + BuiltinOperator.FULLY_CONNECTED: [ + ActivationFunctionType.RELU, + ActivationFunctionType.RELU_N1_TO_1, + ActivationFunctionType.RELU6, + ], + # https://github.com/tensorflow/tensorflow/blob/v2.15.0/tensorflow/lite/kernels/fully_connected.cc#L627-L630 + BuiltinOperator.ADD: [ + ActivationFunctionType.RELU, + ActivationFunctionType.RELU_N1_TO_1, + ActivationFunctionType.RELU6, + ], + # https://github.com/tensorflow/tensorflow/blob/v2.15.0/tensorflow/lite/kernels/add.cc#L246 + # https://github.com/tensorflow/tensorflow/blob/v2.15.0/tensorflow/lite/kernels/kernel_util.h#L285-L300 + BuiltinOperator.MUL: [ + ActivationFunctionType.RELU, + ActivationFunctionType.RELU_N1_TO_1, + ActivationFunctionType.RELU6, + ], + # https://github.com/tensorflow/tensorflow/blob/v2.15.0/tensorflow/lite/kernels/mul.cc#L159 + # https://github.com/tensorflow/tensorflow/blob/v2.15.0/tensorflow/lite/kernels/kernel_util.h#L285-L300 + BuiltinOperator.SUB: [ + ActivationFunctionType.RELU, + ActivationFunctionType.RELU_N1_TO_1, + ActivationFunctionType.RELU6, + ], + # https://github.com/tensorflow/tensorflow/blob/v2.15.0/tensorflow/lite/kernels/sub.cc#L306 + # https://github.com/tensorflow/tensorflow/blob/v2.15.0/tensorflow/lite/kernels/kernel_util.h#L285-L300 + BuiltinOperator.DIV: [ + ActivationFunctionType.RELU, + ActivationFunctionType.RELU_N1_TO_1, + ActivationFunctionType.RELU6, + ], + # https://github.com/tensorflow/tensorflow/blob/v2.15.0/tensorflow/lite/kernels/div.cc#L180 + # https://github.com/tensorflow/tensorflow/blob/v2.15.0/tensorflow/lite/kernels/kernel_util.h#L285-L300 + BuiltinOperator.SVDF: [ActivationFunctionType.RELU], + # https://github.com/tensorflow/tensorflow/blob/v2.15.0/tensorflow/lite/kernels/svdf.cc#L394 + BuiltinOperator.RNN: [ + ActivationFunctionType.RELU, + ActivationFunctionType.RELU_N1_TO_1, + ActivationFunctionType.RELU6, + ActivationFunctionType.TANH, + ActivationFunctionType.SIGN_BIT, + ], + # https://github.com/tensorflow/tensorflow/blob/v2.15.0/tensorflow/lite/kernels/basic_rnn.cc#L222 + # https://github.com/tensorflow/tensorflow/blob/v2.15.0/tensorflow/lite/kernels/internal/kernel_utils.cc#L71 + # https://github.com/tensorflow/tensorflow/blob/v2.15.0/tensorflow/lite/kernels/internal/tensor_utils.h#L58-L77 + BuiltinOperator.UNIDIRECTIONAL_SEQUENCE_RNN: [ + ActivationFunctionType.RELU, + ActivationFunctionType.RELU_N1_TO_1, + ActivationFunctionType.RELU6, + ActivationFunctionType.TANH, + ActivationFunctionType.SIGN_BIT, + ], + # https://github.com/tensorflow/tensorflow/blob/6887368d6d46223f460358323c4b76d61d1558a8/tensorflow/lite/kernels/unidirectional_sequence_rnn.cc#L239 + # https://github.com/tensorflow/tensorflow/blob/v2.15.0/tensorflow/lite/kernels/internal/kernel_utils.cc#L71 + # https://github.com/tensorflow/tensorflow/blob/v2.15.0/tensorflow/lite/kernels/internal/tensor_utils.h#L58-L77 + BuiltinOperator.BIDIRECTIONAL_SEQUENCE_RNN: [ + ActivationFunctionType.RELU, + ActivationFunctionType.RELU_N1_TO_1, + ActivationFunctionType.RELU6, + ActivationFunctionType.TANH, + ActivationFunctionType.SIGN_BIT, + ], + # https://github.com/tensorflow/tensorflow/blob/6887368d6d46223f460358323c4b76d61d1558a8/tensorflow/lite/kernels/bidirectional_sequence_rnn.cc#L433 + # https://github.com/tensorflow/tensorflow/blob/v2.15.0/tensorflow/lite/kernels/internal/kernel_utils.cc#L71 + # https://github.com/tensorflow/tensorflow/blob/v2.15.0/tensorflow/lite/kernels/internal/tensor_utils.h#L58-L77 + } + + ops_that_need_equal_io_quantization = [ + # Documented restrictions from https://www.tensorflow.org/lite/performance/quantization_spec + BuiltinOperator.AVERAGE_POOL_2D, + BuiltinOperator.MAX_POOL_2D, + BuiltinOperator.CONCATENATION, + ] + + def _act_fun_type_for_op(self, op: tflite_model.Operator) -> ActivationFunctionType: + if operator_is_type(op, "Relu", self._builder): + return ActivationFunctionType.RELU + elif operator_is_type(op, "ReluN1To1", self._builder): + return ActivationFunctionType.RELU_N1_TO_1 + elif operator_is_type(op, "Relu6", self._builder): + return ActivationFunctionType.RELU6 + elif operator_is_type(op, "Tanh", self._builder): + return ActivationFunctionType.TANH + elif operator_is_type(op, "Sign", self._builder): + return ActivationFunctionType.SIGN_BIT + + def __call__(self) -> bool: + matcher = PatternMatcher( + self._builder, + [ + Op( + self.ops_with_fused_activation_function, + ["x"], + ["x1"], + [NoFusedActivationFunction()], + ), + Op(self.activation_functions, ["x1"], ["y"]), + ], + [TensorHasOneConsumer("x1")], + ) + + to_remove = [] + for [leading_op, act_fun_op], tensor_map, _, _ in matcher.match_patterns(): + builtin_leading_op = leading_op.builtin_options.operator_type + logger.internal_assert( + builtin_leading_op in self.supported_activations_for_op.keys(), + f"FuseActivationFunctions: supported activations for operator `{builtin_leading_op}`" + "are not known.", + ) + + act_fun = self._act_fun_type_for_op(act_fun_op) + if act_fun not in self.supported_activations_for_op[builtin_leading_op]: + # The leading op doesn't support this activation function. + continue + + x, y = tensor_map["x"], tensor_map["y"] + if ( + x.quantization != y.quantization + and builtin_leading_op in self.ops_that_need_equal_io_quantization + ): + # The fusion would result in different input and output quantization of `leading_op`, which would cause + # runtime issues for that particular operator. + continue + + leading_op.builtin_options.fused_activation_function = act_fun + leading_op.tmp_outputs[0] = act_fun_op.tmp_outputs[0] + to_remove.append(act_fun_op) + + for op in to_remove: + self._builder.get_operators().remove(op) + + return len(to_remove) != 0 diff --git a/backends/nxp/backend/ir/tflite_optimizer/optimizations/fuse_fully_connected_and_add_operators.py b/backends/nxp/backend/ir/tflite_optimizer/optimizations/fuse_fully_connected_and_add_operators.py new file mode 100755 index 00000000000..b6fd5849551 --- /dev/null +++ b/backends/nxp/backend/ir/tflite_optimizer/optimizations/fuse_fully_connected_and_add_operators.py @@ -0,0 +1,80 @@ +# Copyright 2024 NXP +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +from executorch.backends.nxp.backend.ir.lib.tflite.TensorType import TensorType +from executorch.backends.nxp.backend.ir.tflite_optimizer.operator_rules import ( + NoFusedActivationFunction, +) +from executorch.backends.nxp.backend.ir.tflite_optimizer.optimizations.base_optimization import ( + BaseOptimization, +) +from executorch.backends.nxp.backend.ir.tflite_optimizer.pattern_matcher import ( + OneOf, + Op, + PatternMatcher, +) +from executorch.backends.nxp.backend.ir.tflite_optimizer.tensor_rules import ( + RuleAnd, + RuleIf, + RuleOr, + TensorDimensionsMatch, + TensorHasDimensionOfSize, + TensorHasOneConsumer, + TensorHasRank, + TensorHasType, + TensorIsQuantized, +) + + +class FuseFullyConnectedAndAddOperators(BaseOptimization): + + def __call__(self) -> bool: + """ + FullyConnected -> Add sequence can handle more complicated shapes than just FullyConnected with bias + (due to shape broadcasting). + The bias can have shape [N] or [1, N], where N is the first dimension of the FC weights tensor. + It could also have shape [1, ..., 1, N], but then the TFLite FullyConnected removes the leading ones, + even if 'keep_num_dims' is True. In ONNX, the output tensor has the leading ones, + In this case, a Reshape would have to be added, so we do not perform the fusion. + + # https://github.com/tensorflow/tensorflow/blob/v2.15.0/tensorflow/lite/kernels/fully_connected.cc#L398 + """ + matcher = PatternMatcher( + self._builder, + [ + # Require exactly 2 inputs. + Op( + ["FullyConnected"], ["x", "w"], ["y"], [NoFusedActivationFunction()] + ), + OneOf([Op(["Add"], ["y", "b"]), Op(["Add"], ["b", "y"])]), + ], + [ + TensorHasOneConsumer("y"), + TensorHasRank("w", 2), + RuleOr( + TensorHasRank("b", 1), + RuleAnd(TensorHasRank("b", 2), TensorHasDimensionOfSize("b", 0, 1)), + ), + TensorDimensionsMatch("w", 0, "b", -1), + RuleIf(TensorIsQuantized("x"), TensorHasType("b", TensorType.INT32)), + ], + ) + + to_remove = [] + for (fc, add), tensor_map, _, _ in matcher.match_patterns(): + b = tensor_map["b"] + fc.tmp_inputs.append(b) + + # Remove the 'Add' operator. + fc.tmp_outputs[0] = add.tmp_outputs[0] + fc.builtin_options.fused_activation_function = ( + add.builtin_options.fused_activation_function + ) + to_remove.append(add) + + for op in to_remove: + self._builder.get_operators().remove(op) + + return len(to_remove) != 0 diff --git a/backends/nxp/backend/ir/tflite_optimizer/optimizations/fuse_quanitze_into_preceding_ops.py b/backends/nxp/backend/ir/tflite_optimizer/optimizations/fuse_quanitze_into_preceding_ops.py new file mode 100755 index 00000000000..6b3bd70cc01 --- /dev/null +++ b/backends/nxp/backend/ir/tflite_optimizer/optimizations/fuse_quanitze_into_preceding_ops.py @@ -0,0 +1,94 @@ +# Copyright 2024 NXP +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +from executorch.backends.nxp.backend.ir.tflite_optimizer.operator_rules import ( + WasNotInTheOriginalONNXModel, +) +from executorch.backends.nxp.backend.ir.tflite_optimizer.optimizations.base_optimization import ( + BaseOptimization, +) +from executorch.backends.nxp.backend.ir.tflite_optimizer.pattern_matcher import ( + Op, + PatternMatcher, +) +from executorch.backends.nxp.backend.ir.tflite_optimizer.tensor_rules import ( + TensorHasOneConsumer, + TensorsArePerTensorQuantized, + TensorsHaveSameType, +) + + +class FuseQuantizeIntoPrecedingOps(BaseOptimization): + """Remove some `Quantize` operators in the following pattern. + + │ + ┌─▼──┐ + │ Op │ │ + └─┬──┘ ┌─▼──┐ + │ 'x' (same type, quantization params `A`) ─────► │ Op │ + ┌────▼─────┐ └─┬──┘ + │ Quantize │ │ (same type, quantization params `B`) + └────┬─────┘ + │ 'y' (same type, quantization params `B`) + """ + + ops_that_can_have_any_output_quantization = [ + # List of operators which don't have restrictions placed on their output quantization and are currently + # supported by `onnx2quant`. + "Add", + "BatchMatMul", + "FullyConnected", + "HardSwish", + "LeakyRelu", + "Mean", + "Mul", + "PRelu", + "ReduceProd", + "Relu", + "Sub", + "Sum", + ] + + def __call__(self) -> bool: + matcher = PatternMatcher( + self._builder, + [ + Op( + self.ops_that_can_have_any_output_quantization, + outputs=[..., "x", ...], + ), + Op( + ["Quantize"], + ["x"], + ["y"], + [ + # Restrict this optimization to extra `Quantize` operators which were added during conversion. + # Sometimes the `Quantize` operators which are present in the ONNX model can be essential and + # shouldn't be removed. They can for example perform clipping. + WasNotInTheOriginalONNXModel() + ], + ), + ], + [ + TensorHasOneConsumer("x"), + # Make sure the `Quantize` is just changing quantization parameters. Otherwise, it couldn't be fused. + TensorsHaveSameType(["x", "y"]), + TensorsArePerTensorQuantized(["x", "y"]), + ], + ) + + to_remove = [] + for [leading_op, quantize], tensor_map, _, _ in matcher.match_patterns(): + x, y = tensor_map["x"], tensor_map["y"] + + x_idx = leading_op.tmp_outputs.index(x) + leading_op.tmp_outputs[x_idx] = y + + to_remove.append(quantize) + + for op in to_remove: + self._builder.get_operators().remove(op) + + return len(to_remove) != 0 diff --git a/backends/nxp/backend/ir/tflite_optimizer/optimizations/keep_one_empty_buffer.py b/backends/nxp/backend/ir/tflite_optimizer/optimizations/keep_one_empty_buffer.py new file mode 100755 index 00000000000..9809719fad4 --- /dev/null +++ b/backends/nxp/backend/ir/tflite_optimizer/optimizations/keep_one_empty_buffer.py @@ -0,0 +1,39 @@ +# Copyright 2024 NXP +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +from executorch.backends.nxp.backend.ir.converter.tensor_utils import tensor_has_data +from executorch.backends.nxp.backend.ir.tflite_optimizer.optimizations.base_optimization import ( + BaseOptimization, +) + + +class KeepOneEmptyBuffer(BaseOptimization): + + def __call__(self) -> bool: + """Create a single empty `Buffer` object and assign it to all tensors in the model that don't have static data. + :return: True, if any tensors had their buffer changed. Otherwise, False. + """ + + made_changes = False + empty_buffer = self._builder.get_first_empty_buffer() + + for t in self._builder.get_tensors().vector: + if tensor_has_data(t): + # The buffer of `t` is not empty. + continue + + if t.tmp_buffer == empty_buffer: + # Already optimized. + continue + + if t.is_variable: + # The data of the tensor will change at runtime, so it shouldn't share the buffer with other tensors. + continue + + # It's safe to replace the buffer. + t.tmp_buffer = empty_buffer + made_changes = True + + return made_changes diff --git a/backends/nxp/backend/ir/tflite_optimizer/optimizations/move_relu_before_concat.py b/backends/nxp/backend/ir/tflite_optimizer/optimizations/move_relu_before_concat.py new file mode 100755 index 00000000000..4d10b7c80ae --- /dev/null +++ b/backends/nxp/backend/ir/tflite_optimizer/optimizations/move_relu_before_concat.py @@ -0,0 +1,107 @@ +# Copyright 2024 NXP +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +from collections import defaultdict +from copy import deepcopy + +from executorch.backends.nxp.backend.ir.tflite_generator import tflite_model +from executorch.backends.nxp.backend.ir.tflite_optimizer.operator_rules import ( + AllInputsComeFrom, +) +from executorch.backends.nxp.backend.ir.tflite_optimizer.optimizations.base_optimization import ( + BaseOptimization, +) +from executorch.backends.nxp.backend.ir.tflite_optimizer.pattern_matcher import ( + Op, + PatternMatcher, +) +from executorch.backends.nxp.backend.ir.tflite_optimizer.tensor_rules import ( + TensorHasOneConsumer, + TensorsHaveSameQuantization, +) + + +class MoveActivationBeforeConcatenation(BaseOptimization): + """ + Move some operators around in the following pattern. + This is a common pattern that emerges from the conversion of separable convolutions. + + │ │ │ │ + ┌───▼────┐ ┌───▼────┐ ┌───▼────┐ ┌───▼────┐ + │ Conv2D │ ... │ Conv2D │ │ Conv2D │ ... │ Conv2D │ + └───┬────┘ └───┬────┘ └───┬────┘ └───┬────┘ + └──┐ ┌──┘ │ │ + ┌──▼──────────▼─┐ ┌──▼───┐ ┌──▼───┐ + │ Concatenation │ ─────► │ Relu │ ... │ Relu │ + └───────┬───────┘ └──┬───┘ └──┬───┘ + │ 'x' └──┐ ┌──┘ + ┌──▼───┐ ┌──▼──────────▼─┐ + │ Relu │ │ Concatenation │ + └──┬───┘ └───────┬───────┘ + │ 'y' │ + """ + + activations = ["Relu", "ReluN1To1", "Relu6", "Tanh", "Sign"] + + def __call__(self) -> bool: + matcher = PatternMatcher( + self._builder, + [ + Op(["Concatenation"], None, ["x"], [AllInputsComeFrom("Conv2D")]), + Op(self.activations, ["x"], ["y"]), + ], + [ + TensorHasOneConsumer("x"), + # If the activation function is not changing the quantization parameters, it can be moved without + # messing with the quantization elsewhere. + TensorsHaveSameQuantization(["x", "y"]), + ], + ) + + to_remove = [] + + # Mapping an operator to a list of operators. These operators (value) will later be added into the TFLite + # model's `operators` in front of the specified operator (key). + to_add: dict[tflite_model.Operator, list[tflite_model.Operator]] = defaultdict( + lambda: [] + ) + + for [concat, activation], _, _, _ in matcher.match_patterns(): + new_concat_inputs = [] + for concat_input in concat.tmp_inputs: + # Create a new operator for the activation function. + new_activation = deepcopy(activation) + new_activation.tmp_inputs = [concat_input] + new_activation_output = self._builder.duplicate_tensor(concat_input) + new_activation.tmp_outputs = [new_activation_output] + + to_add[concat].append( + new_activation + ) # Insert the new activation into the model later. + + new_concat_inputs.append( + new_activation_output + ) # Connect the activation with the `Concatenation`. + + concat.tmp_inputs = new_concat_inputs + + # Tensor rule ensures that only the activation functions is using the output of the `Concatenation`. + # It is safe to bypass. + concat.tmp_outputs[0] = activation.tmp_outputs[0] + to_remove.append(activation) + + operators = self._builder.get_operators() + + # Add the new activations into the model. + for concat, activations in to_add.items(): + idx = operators.index(concat) + for activation in activations: + operators.insert(idx, activation) + + # Remove the old activations. + for activation in to_remove: + operators.remove(activation) + + return len(to_remove) != 0 diff --git a/backends/nxp/backend/ir/tflite_optimizer/optimizations/permute_fully_connected_weights_after_reshape.py b/backends/nxp/backend/ir/tflite_optimizer/optimizations/permute_fully_connected_weights_after_reshape.py new file mode 100755 index 00000000000..42eefc1ab56 --- /dev/null +++ b/backends/nxp/backend/ir/tflite_optimizer/optimizations/permute_fully_connected_weights_after_reshape.py @@ -0,0 +1,121 @@ +# Copyright 2024 NXP +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +import numpy as np + +from executorch.backends.nxp.backend.ir import logger +from executorch.backends.nxp.backend.ir.converter.conversion import translator +from executorch.backends.nxp.backend.ir.tflite_optimizer.optimizations.base_optimization import ( + BaseOptimization, +) +from executorch.backends.nxp.backend.ir.tflite_optimizer.pattern_matcher import ( + Op, + PatternMatcher, +) +from executorch.backends.nxp.backend.ir.tflite_optimizer.tensor_rules import ( + TensorDimensionsMatch, + TensorHasRank, + TensorIsChannelsFirst, + TensorIsChannelsLast, + TensorIsFormatless, + TensorsHaveData, + TensorsHaveOneConsumer, +) + + +class PermuteFullyConnectedWeightsAfterReshape(BaseOptimization): + + def __call__(self) -> bool: + """Search for the pattern: + + │ (3D / 4D / 5D, channels last) + ┌──────▼──────┐ + │ Transpose │ + └──────┬──────┘ + │ (3D / 4D / 5D, channels first) + ┌─────▼─────┐ + │ Reshape │ + └─────┬─────┘ + │ (2D, formatless) + ┌────────▼───────┐ + │ FullyConnected ◄───── Weights (static) + └────────┬───────┘ + │ (2D, formatless) + ▼ + + In this case, it is possible to permute the `weights` of the `FullyConnected`, and remove the `Transpose`. + + How it works: + - The original model doesn't have the `Transpose`. It just has `Reshape` into `MatMul` (or `Gemm`...). + - The `Transpose` is added, because the `Reshape` has a channels last input, which was originally + channels first (in the ONNX model), and so the 2D output of the `Reshape` would have the same data. + but at different locations. The `Transpose` makes the input channels first, which ensures correct + output of the `Reshape`. + - In the scenario in the graph above, it is possible to omit the `Transpose`, which causes the `Reshape` + output to be "permuted", and then the `weights` of the `FullyConnected` can be statically permuted + to match. This will result in correct `FullyConnected` output. + - It is required that the `Reshape` output has shape [N, H * W * ... * C] (if the input was + [N, H, W, ..., C]). The `weights` will have shape [X, C * H * W * ...] (where X is arbitrary). + Since we know the values of C, H, W, ..., we can statically reshape the `weights` to + [X, C, H, W, ...], transpose it to [X, H, W, ..., C], and flatten it back to [X, H * W * ... * C]. + """ + + matcher = PatternMatcher( + self._builder, + [ + Op(["Transpose"], ["x", "perm"], ["y"]), + Op(["Reshape"], ["y", ...], ["z"]), + Op(["FullyConnected"], ["z", "w", ...]), + ], + [ + TensorsHaveOneConsumer(["y", "z"]), + TensorDimensionsMatch("y", 0, "z", 0), + TensorDimensionsMatch("z", 1, "w", 1), + TensorIsChannelsLast("x"), + TensorIsChannelsFirst("y"), + TensorIsFormatless("z"), + TensorHasRank("z", 2), + TensorsHaveData(["perm", "w"]), + ], + ) + + to_remove = [] + for (transpose, reshape, fc), tensor_map, _, _ in matcher.match_patterns(): + # Make sure the `Transpose` is applying the expected permutation. + y = tensor_map["y"] + to_onnx_perm = ( + translator.create_channels_last_to_channels_first_permutation( + y.shape.len() + ) + ) + if not np.allclose(to_onnx_perm, tensor_map["perm"].tmp_buffer.data): + continue # The `Transpose` has an unexpected permutation. + + w = tensor_map["w"] + tmp_shape = [w.shape[0]] + y.shape[1:] # H, W, C + + data = w.tmp_buffer.data.reshape(tmp_shape) # Reshape from 2D. + data = translator.convert_data_to_channels_last( + data + ) # Permute to TFLite format. + data = data.reshape(w.shape.vector) # Flatten to 2D. + + # Create a new tensor for the data, in case it is used by some other operator as well. + new_weights = self._builder.duplicate_tensor(w) + new_weights.tmp_buffer.data = data + fc.tmp_inputs[1] = new_weights + + # Remove the `Transpose`. + logger.i( + f"Permuting the `weights`({w.name}) of a FullyConnected operator and removing an artificial " + "Transpose operator." + ) + reshape.tmp_inputs[0] = transpose.tmp_inputs[0] + to_remove.append(transpose) + + for op in to_remove: + self._builder.get_operators().remove(op) + + return len(to_remove) != 0 diff --git a/backends/nxp/backend/ir/tflite_optimizer/optimizations/prune_cast_operators.py b/backends/nxp/backend/ir/tflite_optimizer/optimizations/prune_cast_operators.py new file mode 100755 index 00000000000..8cce0bb61e8 --- /dev/null +++ b/backends/nxp/backend/ir/tflite_optimizer/optimizations/prune_cast_operators.py @@ -0,0 +1,117 @@ +# Copyright 2024 NXP +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +from executorch.backends.nxp.backend.ir.tflite_optimizer.optimizations.base_optimization import ( + BaseOptimization, +) +from executorch.backends.nxp.backend.ir.tflite_optimizer.pattern_matcher import ( + MultipleSameOps, + Op, + PatternMatcher, +) +from executorch.backends.nxp.backend.ir.tflite_optimizer.tensor_rules import ( + RuleOr, + TensorIsNotModelOutput, + TensorIsNotQuantized, + TensorsAreNotQuantized, + TensorsHaveSameType, +) + + +class FuseCastOperators(BaseOptimization): + """Remove some `Cast` operators in the following pattern. + + │ 'x' + ┌──▼───┐ + │ Cast │ + └──┬───┘ │ 'x' + ┌─┴─── ... ──────┐ 'y' ─────► ┌──┴── ... ─────┐ ('y' is not in the model anymore) + ┌──▼───┐ ┌──▼───┐ ┌──▼───┐ ┌──▼───┐ + │ Cast │ ... │ Cast │ │ Cast │ ... │ Cast │ + └──┬───┘ └──┬───┘ └──┬───┘ └──┬───┘ + │ │ 'z' │ │ 'z' + """ + + def __call__(self) -> bool: + matcher = PatternMatcher( + self._builder, + [ + Op(["Cast"], outputs=["y"]), + MultipleSameOps(["Cast"], ["y", ...]), # Only `Cast` ops can use `y`. + ], + [TensorIsNotModelOutput("y"), TensorIsNotQuantized("y")], + ) + + to_remove = [] + for [leading_cast, following_cast_ops], _, _, _ in matcher.match_patterns(): + # Remove the leading cast. + for cast in following_cast_ops: + cast.tmp_inputs[0] = leading_cast.tmp_inputs[0] + + to_remove.append(leading_cast) + + for op in to_remove: + self._builder.get_operators().remove(op) + + return len(to_remove) != 0 + + +class RemoveCastOperatorsWithNoEffect(BaseOptimization): + """Remove operators that match the following pattern. + + │ 'x' + ┌──▼───┐ + │ Cast │ + └──┬───┘ + │ 'y' (same type as 'x') + """ + + def __call__(self) -> bool: + matcher = PatternMatcher( + self._builder, + [Op(["Cast"], ["x", ...], ["y"])], + [ + TensorsHaveSameType(["x", "y"]), + TensorsAreNotQuantized(["x", "y"]), + RuleOr( + TensorIsNotModelOutput("x"), + TensorIsNotModelOutput("y"), + # If both 'x' and 'y' are model outputs, the `Cast` cannot be removed. If the op was removed, its + # input and output would be combined into 1 tensor, which would have to represent 2 model outputs + # with 2 different names, which is not possible. + ), + ], + ) + + to_remove = [] + for [cast], tensor_map, input_to_ops, _ in matcher.match_patterns(): + if not self._builder.operator_can_be_skipped(cast): + continue + + x = tensor_map["x"] + y = tensor_map["y"] + model_outputs = self._builder.get_sub_graph().outputs.tmp_outputs + + # Replace `y` with `x` in the inputs of all following operators. + following_ops = input_to_ops.get(y.name, []) + for op in following_ops: + while y in op.tmp_inputs: + input_idx = op.tmp_inputs.index(y) + op.tmp_inputs[input_idx] = x + + if y in model_outputs: + # Replace the output as well. + while y in model_outputs: + idx = model_outputs.index(y) + model_outputs[idx] = x + + self._builder.swap_tensor_names(x, y) + + to_remove.append(cast) + + for op in to_remove: + self._builder.get_operators().remove(op) + + return len(to_remove) != 0 diff --git a/backends/nxp/backend/ir/tflite_optimizer/optimizations/prune_quantize_operators.py b/backends/nxp/backend/ir/tflite_optimizer/optimizations/prune_quantize_operators.py new file mode 100755 index 00000000000..317654cde9a --- /dev/null +++ b/backends/nxp/backend/ir/tflite_optimizer/optimizations/prune_quantize_operators.py @@ -0,0 +1,304 @@ +# Copyright 2024 NXP +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +import numpy as np + +from executorch.backends.nxp.backend.ir.lib.tflite.TensorType import TensorType +from executorch.backends.nxp.backend.ir.tflite_generator import tflite_model +from executorch.backends.nxp.backend.ir.tflite_optimizer.optimizations.base_optimization import ( + BaseOptimization, +) +from executorch.backends.nxp.backend.ir.tflite_optimizer.pattern_matcher import ( + MultipleSameOps, + Op, + PatternMatcher, +) +from executorch.backends.nxp.backend.ir.tflite_optimizer.tensor_rules import ( + TensorIsNotModelOutput, + TensorsHaveSameQuantization, +) + + +class FuseParallelQuantizeOperators(BaseOptimization): + """Fuse some `Quantize` operators in the following pattern. + + │ 'x' │ 'x' + ┌───┴──── ... ───────┐ ┌────▼─────┐ + ┌────▼─────┐ ┌────▼─────┐ ─────► │ Quantize │ + │ Quantize │ ... │ Quantize │ └────┬─────┘ + └────┬─────┘ └────┬─────┘ ┌───┴─ ... ─┐ + │ │ 'y' (same quantization) │ │ 'y' + + + The pattern below only has 2 `Quantize` operators. But the `PatternMatcher` will gradually match all parallel + `Quantize` operators which fit the pattern above, and remove the unnecessary ones. + """ + + def __call__(self) -> bool: + matcher = PatternMatcher( + self._builder, + [Op(["Quantize"], ["x"], ["y1"]), Op(["Quantize"], ["x"], ["y2"])], + [ + TensorsHaveSameQuantization(["y1", "y2"]), + # 'y2' will be removed from the model, so it cannot be a model output. But thanks to the nature of the + # `PatternMatcher`, it doesn't matter which `Quantize` produces the model output. The `PatternMatcher` + # will first match 1 `Quantize` as the first `Op` and try to optimize. If it doesn't work, it will then + # match the second `Quantize` operator with the first `Op` and try to optimize that way. This will + # result in a perfectly optimized pattern every time. + TensorIsNotModelOutput("y2"), + ], + ) + + to_remove = [] + for ( + [_, quant_to_remove], + tensor_map, + input_to_ops, + _, + ) in matcher.match_patterns(): + to_remove.append(quant_to_remove) + + y1 = tensor_map["y1"] + y2 = tensor_map["y2"] + next_ops = input_to_ops.get(y2.name, []) + for next_op in next_ops: + while y2 in next_op.tmp_inputs: + idx = next_op.tmp_inputs.index(y2) + next_op.tmp_inputs[idx] = y1 + + quant_to_remove.tmp_inputs = ( + [] + ) # To prevent future matches of this operator. + + ops = self._builder.get_operators() + for op in to_remove: + ops.remove(op) + + return ops.len() != 0 + + +# noinspection PyMethodMayBeStatic +class PruneQuantizeOperators(BaseOptimization): + """Remove some `Quantize` operators in the following pattern. + + │ 'x' + ┌────▼─────┐ + │ Quantize │ + └────┬─────┘ + ┌───┴──── ... ───────┐ 'y' + ┌────▼─────┐ ┌────▼─────┐ + │ Quantize │ ... │ Quantize │ + └────┬─────┘ └────┬─────┘ + │ │ 'z' + """ + + def __call__(self) -> bool: + matcher = PatternMatcher( + self._builder, + [ + Op(["Quantize"], ["x"], ["y"]), + MultipleSameOps( + ["Quantize"], ["y"] + ), # Nothing other than `Quantize` ops can use `y`. + ], + [TensorIsNotModelOutput("y")], + ) + + to_remove = [] + for ( + [leading_quantize, following_quantize_ops], + tensor_map, + input_to_ops, + _, + ) in matcher.match_patterns(): + x = tensor_map["x"] + + if self._is_quantization_recasting_from_float(x, following_quantize_ops): + # First Quantize can be skipped because it does only recasting + to_remove.append(leading_quantize) + + for next_quantize in following_quantize_ops: + next_quantize.tmp_inputs[0] = x + + elif self._is_quantization_recasting_from_integer( + x, following_quantize_ops + ): + # The Quantize ops negate each other -> remove them both + to_remove.append(leading_quantize) + + graph_outputs = self._builder.get_sub_graph().outputs.tmp_outputs + for next_quantize in following_quantize_ops: + to_remove.append(next_quantize) + + # Replace the output of the next Quantize with the input of the first Quantize + next_quantize_output = next_quantize.tmp_outputs[0] + self._bypass_to_next_quantize_ops( + input_to_ops, next_quantize_output, x + ) + + # If the output of the first Quantize is also the graph output -> replace the graph output too + if next_quantize_output in graph_outputs: + graph_outputs.remove(next_quantize_output) + if x not in graph_outputs: + graph_outputs.append(x) + + for op in to_remove: + self._builder.get_operators().remove(op) + + return len(to_remove) != 0 + + def _is_quantization_recasting_from_float( + self, quantize_input: tflite_model.Tensor, next_ops: list[tflite_model.Operator] + ): + """ + Check if 'next_ops' just recast from one type to another. Scale + recalculated zp + must be the same for all nodes. Input of first Quantize op has to be float to match + criteria. + + float int8 uint8 + ----> [quant] ---> [quant] -----> + zp zp-128 + OR + + float uint8 int8 + ----> [quant] ----> [quant] -----> + zp zp+128 + + OR (forked variant with similar restrictions as mentioned above) + + u / int u/int + float | ----> [quant] ----> + ----> [quant] --| + | ----> [quant] ----> + u/int u/int + + :param quantize_input: Input tensor of first QuantizeLinear node. + :param next_ops: QuantizeLinear ops that consume output of 'quantize_input'. + :return: True if pattern with recasting is found. + """ + + if not quantize_input.type == TensorType.FLOAT32: + return False + + # All 'next_ops' has the same output type and q-params + next_op_output_match_first = [ + self._same_type_and_quantization( + next_ops[0].tmp_outputs[0], next_op.tmp_outputs[0] + ) + for next_op in next_ops + ] + if not all(next_op_output_match_first): + return False + + # All 'next_ops' are the same, do some additional checks on the first one + + next_op_input = next_ops[0].tmp_inputs[0] + next_op_output = next_ops[0].tmp_outputs[0] + + input_zp = next_op_input.quantization.zero_point.vector + output_zp = next_op_output.quantization.zero_point.vector + + if next_op_input.quantization.scale != next_op_output.quantization.scale: + return False + + if ( + next_op_input.type == TensorType.INT8 + and next_op_output.type == TensorType.UINT8 + ): + return np.equal(input_zp, np.array(output_zp) - 128) + elif ( + next_op_input.type == TensorType.UINT8 + and next_op_output.type == TensorType.INT8 + ): + return np.equal(input_zp, np.array(output_zp) + 128) + + return False + + def _is_quantization_recasting_from_integer( + self, quantize_input: tflite_model.Tensor, next_ops: list[tflite_model.Operator] + ): + """ + Check if 'next_ops' just recast from one type to another. Scale + recalculated zp + must be the same for all nodes. Input of first Quantize op has to be (u)int8 to + match criteria. + + uint8 int8 uint8 + ----> [quant] -----> [quant] ----> + zp zp+128 zp + + OR + + int8 uint8 int8 + ---> [quant] -----> [quant] ---> + zp zp-128 zp + + OR (forked variant with similar restrictions as mentioned above) + + u/int u/int + u/int | ----> [quant] ----> + ----> [quant] --| + | ----> [quant] ----> + u/int u/int + + :param quantize_input: Input tensor of first QuantizeLinear node. + :param next_ops: QuantizeLinear ops that consume output of 'quantize_input'. + :return: True if pattern with recasting is found. + """ + + if quantize_input.type not in [TensorType.INT8, TensorType.UINT8]: + return False + + # All 'next_ops' has the same output type and q-params as input of first Quantize + next_op_output_match_first = [ + self._same_type_and_quantization(quantize_input, next_op.tmp_outputs[0]) + for next_op in next_ops + ] + if not all(next_op_output_match_first): + return False + + # All 'next_ops' are the same, do some additional checks on the first one + + next_op_input = next_ops[0].tmp_inputs[0] + next_op_output = next_ops[0].tmp_outputs[0] + + input_zp = next_op_input.quantization.zero_point.vector + output_zp = next_op_output.quantization.zero_point.vector + + if quantize_input.quantization.scale != next_op_input.quantization.scale: + return False + + if next_op_input.quantization.scale != next_op_output.quantization.scale: + return False + + if ( + next_op_input.type == TensorType.INT8 + and next_op_output.type == TensorType.UINT8 + ): + return np.equal(input_zp, np.array(output_zp) - 128) + elif ( + next_op_input.type == TensorType.UINT8 + and next_op_output.type == TensorType.INT8 + ): + return np.equal(input_zp, np.array(output_zp) + 128) + + return False + + def _same_type_and_quantization( + self, a: tflite_model.Tensor, b: tflite_model.Tensor + ): + same_type = a.type == b.type + same_quantization = a.quantization == b.quantization + + return same_type and same_quantization + + def _bypass_to_next_quantize_ops( + self, input_to_ops, next_quantize_output, quantize_input + ): + ops_after_next_quantize = input_to_ops.get(next_quantize_output.name, []) + for op_after_next_quantize in ops_after_next_quantize: + for index, input_tensor in enumerate(op_after_next_quantize.tmp_inputs): + if input_tensor == next_quantize_output: + # Replace the input + op_after_next_quantize.tmp_inputs[index] = quantize_input diff --git a/backends/nxp/backend/ir/tflite_optimizer/optimizations/prune_reshape_operators.py b/backends/nxp/backend/ir/tflite_optimizer/optimizations/prune_reshape_operators.py new file mode 100755 index 00000000000..229d4747a7c --- /dev/null +++ b/backends/nxp/backend/ir/tflite_optimizer/optimizations/prune_reshape_operators.py @@ -0,0 +1,116 @@ +# Copyright 2024 NXP +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +from executorch.backends.nxp.backend.ir.tflite_optimizer.optimizations.base_optimization import ( + BaseOptimization, +) +from executorch.backends.nxp.backend.ir.tflite_optimizer.pattern_matcher import ( + MultipleSameOps, + Op, + PatternMatcher, +) +from executorch.backends.nxp.backend.ir.tflite_optimizer.tensor_rules import ( + RuleOr, + TensorIsNotModelOutput, + TensorsHaveSameShape, +) + + +class FuseReshapeOperators(BaseOptimization): + """Remove some `Reshape` operator in the following pattern. + + │ 'x' + ┌────▼────┐ + │ Reshape │ + └────┬────┘ │ 'x' + ┌───┴─── ... ───────┐ 'y' ─────► ┌───┴─── ... ───────┐ ('y' is not in the model anymore) + ┌────▼────┐ ┌────▼────┐ ┌────▼────┐ ┌────▼────┐ + │ Reshape │ ... │ Reshape │ │ Reshape │ ... │ Reshape │ + └────┬────┘ └────┬────┘ └────┬────┘ └────┬────┘ + │ │ 'z' │ │ 'z' + """ + + def __call__(self) -> bool: + matcher = PatternMatcher( + self._builder, + [ + Op(["Reshape"], outputs=["y"]), + MultipleSameOps( + ["Reshape"], ["y", ...] + ), # Nothing other than `Reshape` ops can use `y`. + ], + [TensorIsNotModelOutput("y")], + ) + + to_remove = [] + for [leading_reshape, following_reshapes], _, _, _ in matcher.match_patterns(): + # Remove the leading reshape. + for r in following_reshapes: + r.tmp_inputs[0] = leading_reshape.tmp_inputs[0] + + to_remove.append(leading_reshape) + + for op in to_remove: + self._builder.get_operators().remove(op) + + return len(to_remove) != 0 + + +class RemoveReshapeOperatorsWithNoEffect(BaseOptimization): + """Remove operators that match the following pattern. + + │ 'x' + ┌────▼────┐ + │ Reshape │ + └────┬────┘ + │ 'y' (same shape as 'x') + """ + + def __call__(self) -> bool: + matcher = PatternMatcher( + self._builder, + [Op(["Reshape"], ["x", ...], ["y"])], + [ + TensorsHaveSameShape(["x", "y"]), + RuleOr( + TensorIsNotModelOutput("x"), + TensorIsNotModelOutput("y"), + # If both 'x' and 'y' are model outputs, the `Reshape` cannot be removed. If the op was removed, its + # input and output would be combined into 1 tensor, which would have to represent 2 model outputs + # with 2 different names, which is not possible. + ), + ], + ) + + to_remove = [] + for [reshape], tensor_map, input_to_ops, _ in matcher.match_patterns(): + if not self._builder.operator_can_be_skipped(reshape): + continue + + x = tensor_map["x"] + y = tensor_map["y"] + model_outputs = self._builder.get_sub_graph().outputs.tmp_outputs + + # Replace `y` with `x` in the inputs of all following operators. + following_ops = input_to_ops.get(y.name, []) + for op in following_ops: + while y in op.tmp_inputs: + input_idx = op.tmp_inputs.index(y) + op.tmp_inputs[input_idx] = x + + if y in model_outputs: + # Replace the output as well. + while y in model_outputs: + idx = model_outputs.index(y) + model_outputs[idx] = x + + self._builder.swap_tensor_names(x, y) + + to_remove.append(reshape) + + for op in to_remove: + self._builder.get_operators().remove(op) + + return len(to_remove) != 0 diff --git a/backends/nxp/backend/ir/tflite_optimizer/optimizations/prune_transpose_operators.py b/backends/nxp/backend/ir/tflite_optimizer/optimizations/prune_transpose_operators.py new file mode 100755 index 00000000000..dc9ad9999b4 --- /dev/null +++ b/backends/nxp/backend/ir/tflite_optimizer/optimizations/prune_transpose_operators.py @@ -0,0 +1,155 @@ +# Copyright 2024 NXP +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +import numpy as np + +from executorch.backends.nxp.backend.ir.converter.conversion.translator import ( + apply_permutation_to, + combine_permutations, +) +from executorch.backends.nxp.backend.ir.tflite_generator import tflite_model +from executorch.backends.nxp.backend.ir.tflite_optimizer.optimizations.base_optimization import ( + BaseOptimization, +) +from executorch.backends.nxp.backend.ir.tflite_optimizer.pattern_matcher import ( + MultipleSameOps, + Op, + PatternMatcher, +) +from executorch.backends.nxp.backend.ir.tflite_optimizer.tensor_rules import ( + RuleOr, + TensorHasData, + TensorIsNotModelOutput, + TensorsHaveData, +) + + +class FuseTransposeOperators(BaseOptimization): + """Remove some `Transpose` operators in the following pattern. + + │ 'x' + ┌─────▼─────┐ + │ Transpose │ + └─────┬─────┘ │ 'x' + ┌───┴──── ... ────────┐ 'y' ─────► ┌───┴──── ... ────────┐ ('y' is not in the model anymore) + ┌─────▼─────┐ ┌─────▼─────┐ ┌─────▼─────┐ ┌─────▼─────┐ + │ Transpose │ ... │ Transpose │ │ Transpose │ ... │ Transpose │ + └─────┬─────┘ └─────┬─────┘ └─────┬─────┘ └─────┬─────┘ + │ │ 'z' │ │ 'z' + """ + + def __call__(self) -> bool: + matcher = PatternMatcher( + self._builder, + [ + Op(["Transpose"], ["x", "perm1"], ["y"]), + MultipleSameOps( + ["Transpose"], ["y", "perm2"] + ), # Nothing other than `Transpose` ops can use `y`. + ], + [TensorsHaveData(["perm1", "perm2"]), TensorIsNotModelOutput("y")], + ) + + to_remove = [] + for ( + [leading_transpose, following_transposes], + tensor_map, + _, + _, + ) in matcher.match_patterns(): + x = tensor_map["x"] + perm1 = tensor_map["perm1"].tmp_buffer.data + + # Remove the leading transpose. + for second_transpose in following_transposes: + # Combine the permutations for a new permutation of the second `Transpose`. + perm2 = second_transpose.tmp_inputs[1].tmp_buffer.data + combined_perm = np.array(combine_permutations(perm1, perm2), np.int32) + second_transpose.tmp_inputs[1] = self._builder.create_tensor_for_data( + combined_perm, "perm" + ) + + # Compute the output shape of the second `Transpose`. + new_output_shape = apply_permutation_to(x.shape.vector, combined_perm) + second_transpose.tmp_outputs[0].shape = tflite_model.Shape( + list(new_output_shape) + ) + + # Bypass the first `Transpose`. + second_transpose.tmp_inputs[0] = leading_transpose.tmp_inputs[0] + + to_remove.append(leading_transpose) + + for op in to_remove: + self._builder.get_operators().remove(op) + + return len(to_remove) != 0 + + +class RemoveIdentityTransposeOperators(BaseOptimization): + """Remove operators that match the following pattern. + + │ 'x' + ┌─────▼─────┐ + │ Transpose ◄───── identity permutation + └─────┬─────┘ + │ 'y' + """ + + def __call__(self) -> bool: + matcher = PatternMatcher( + self._builder, + [Op(["Transpose"], ["x", "perm"], ["y"])], + [ + TensorHasData( + "perm" + ), # Note: identity permutation must be checked later. + RuleOr( + TensorIsNotModelOutput("x"), + TensorIsNotModelOutput("y"), + # If both 'x' and 'y' are model outputs, the `Transpose` cannot be removed. If the op was removed, + # its input and output would be combined into 1 tensor, which would have to represent 2 model + # outputs with 2 different names, which is not possible. + ), + ], + ) + + to_remove = [] + for [transpose], tensor_map, input_to_ops, _ in matcher.match_patterns(): + if not self._builder.operator_can_be_skipped(transpose): + continue + + x = tensor_map["x"] + y = tensor_map["y"] + + # Check if the `Transpose` is doing nothing. + permutation = tensor_map["perm"].tmp_buffer.data + if not np.allclose(permutation, range(x.rank)): + # Not and identity permutation. + continue + + model_outputs = self._builder.get_sub_graph().outputs.tmp_outputs + + # Replace `y` with `x` in the inputs of all following operators. + following_ops = input_to_ops.get(y.name, []) + for op in following_ops: + while y in op.tmp_inputs: + input_idx = op.tmp_inputs.index(y) + op.tmp_inputs[input_idx] = x + + if y in model_outputs: + # Replace the output as well. + while y in model_outputs: + idx = model_outputs.index(y) + model_outputs[idx] = x + + self._builder.swap_tensor_names(x, y) + + to_remove.append(transpose) + + for op in to_remove: + self._builder.get_operators().remove(op) + + return len(to_remove) != 0 diff --git a/backends/nxp/backend/ir/tflite_optimizer/optimizations/remove_unused_tensors_and_buffers.py b/backends/nxp/backend/ir/tflite_optimizer/optimizations/remove_unused_tensors_and_buffers.py new file mode 100755 index 00000000000..105c06c3709 --- /dev/null +++ b/backends/nxp/backend/ir/tflite_optimizer/optimizations/remove_unused_tensors_and_buffers.py @@ -0,0 +1,62 @@ +# Copyright 2024 NXP +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +from executorch.backends.nxp.backend.ir.tflite_generator import tflite_model +from executorch.backends.nxp.backend.ir.tflite_optimizer.optimizations.base_optimization import ( + BaseOptimization, +) + + +class RemoveUnusedTensorsAndBuffers(BaseOptimization): + + def _get_used_tensors_and_buffers( + self, + ) -> (set[tflite_model.Tensor], set[tflite_model.Buffer]): + """Get a set of all tensors used by the operators in the model, and a set of all buffers used by these tensors.""" + used_tensors = set() + used_buffers = set() + + for op in self._builder.get_operators(): + for tensor in op.tmp_inputs + op.tmp_outputs: + used_tensors.add(tensor) + if tensor.tmp_buffer is not None: + used_buffers.add(tensor.tmp_buffer) + + return used_tensors, used_buffers + + def __call__(self) -> bool: + """Remove all tensors and buffers from the model, that are not used. + :return: True, if any tensors/buffers were removed. Otherwise, False. + """ + + used_tensors, used_buffers = self._get_used_tensors_and_buffers() + + made_changes = False + model_inputs = self._builder.get_sub_graph().inputs.tmp_inputs + to_remove = [] + for tensor in self._builder.get_tensors(): + if tensor not in used_tensors: + if tensor in model_inputs: + # It is possible that an input tensor ended up not being used by any operators. But removing it from + # the model would cause errors at runtime, so it must stay. + pass + + else: + to_remove.append(tensor) + + for tensor in to_remove: + made_changes = True + self._builder.get_tensors().remove(tensor) + + to_remove = [] + for buffer in self._builder.get_buffers().vector: + if buffer not in used_buffers: + to_remove.append(buffer) + + for buffer in to_remove: + made_changes = True + self._builder.get_buffers().remove(buffer) + + return made_changes diff --git a/backends/nxp/backend/ir/tflite_optimizer/optimizations/replace_average_pool_before_fully_connected_with_sum.py b/backends/nxp/backend/ir/tflite_optimizer/optimizations/replace_average_pool_before_fully_connected_with_sum.py new file mode 100755 index 00000000000..0b3926dd8a5 --- /dev/null +++ b/backends/nxp/backend/ir/tflite_optimizer/optimizations/replace_average_pool_before_fully_connected_with_sum.py @@ -0,0 +1,164 @@ +# Copyright 2024 NXP +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +import numpy as np +from executorch.backends.nxp.backend.ir import logger + +from executorch.backends.nxp.backend.ir.lib.tflite.BuiltinOperator import ( + BuiltinOperator, +) +from executorch.backends.nxp.backend.ir.lib.tflite.TensorType import TensorType +from executorch.backends.nxp.backend.ir.tflite_generator import tflite_model +from executorch.backends.nxp.backend.ir.tflite_generator.builtin_options.sum_options import ( + Sum, +) +from executorch.backends.nxp.backend.ir.tflite_optimizer.optimizations.base_optimization import ( + BaseOptimization, +) +from executorch.backends.nxp.backend.ir.tflite_optimizer.pattern_matcher import ( + Op, + PatternMatcher, +) +from executorch.backends.nxp.backend.ir.tflite_optimizer.tensor_rules import ( + RuleOr, + TensorDimensionsMatch, + TensorHasData, + TensorHasRank, + TensorIsChannelsLast, + TensorIsFormatless, + TensorsAreQuantized, + TensorsHaveOneConsumer, + TensorsHaveType, +) + + +class ReplaceAveragePoolBeforeFullyConnectedWithSum(BaseOptimization): + """Replace `AveragePool2D` and `Reshape` with `Sum` in the following pattern. + │ + ┌────────▼────────┐ + │ AveragePool2D │ (global kernel) │ + └────────┬────────┘ ┌───▼───┐ + │ (4D, channels last) │ Sum │ + ┌─────▼─────┐ └───┬───┘ + │ Reshape │ ─────► │ + └─────┬─────┘ ┌────────▼─────────┐ + │ (2D, formatless) │ FullyConnected ◄───── Scaled weights + ┌────────▼───────┐ └────────┬─────────┘ + │ FullyConnected ◄───── Weights (static) + └────────┬───────┘ + │ + + This is possible if the `AveragePool2D` is pooling across the entire input (i.e. global AveragePool). In this + case, it is possible to use a `Sum` operator instead, and then statically divide the `weights` of the + `FullyConnected`. This will effectively compute the average across the input at runtime. + This replacement becomes useful when there is a `Reshape` between, which flattens the tensor to 2D. This + flattening can be done by the `Sum` operator as well (parameter `keep_dims=False`). + As a result, the `Reshape` must simply remove the `1`s in the spatial dimensions, and keep the `batch size` and + `channels` unchanged. + """ + + def __call__(self) -> bool: + matcher = PatternMatcher( + self._builder, + [ + Op(["AveragePool2D"], ["x"], ["ap_out"]), + Op(["Reshape"], ["ap_out", ...], ["resh_out"]), + Op(["FullyConnected"], ["resh_out", "w", ...], ["y"]), + ], + [ + # Require either float32, or quantized tensors. + RuleOr( + TensorsHaveType(["w", "resh_out"], TensorType.FLOAT32), + TensorsAreQuantized(["w", "resh_out"]), + ), + TensorsHaveOneConsumer(["x", "ap_out", "resh_out"]), + TensorIsChannelsLast("ap_out"), + TensorHasRank("resh_out", 2), + TensorIsFormatless("resh_out"), + TensorHasRank("w", 2), + TensorHasData("w"), + TensorDimensionsMatch( + "ap_out", 0, "resh_out", 0 + ), # Batch size unchanged. + TensorDimensionsMatch( + "ap_out", -1, "resh_out", -1 + ), # Channels unchanged. + ], + ) + + # The mapped operator (value) will later be added into the TFLite model, in front of the `key` operator. + to_add: dict[tflite_model.Operator, tflite_model.Operator] = {} + to_remove = [] + for [ap, reshape, fc], tensor_map, _, _ in matcher.match_patterns(): + x, resh_out, w = tensor_map["x"], tensor_map["resh_out"], tensor_map["w"] + + kernel_shape = [ap.builtin_options.filter_h, ap.builtin_options.filter_w] + if kernel_shape != x.shape[1:3]: + continue # Not a global average pool. + + # Divide the static FullyConnected weights by the number of kernel elements. This will transform the `sums` + # to `averages` at runtime. + num_kernel_elements = np.prod(kernel_shape).astype("float32") + new_w = self._builder.duplicate_tensor(w) + if w.type == TensorType.FLOAT32: + # Just divide the weights. + new_w.tmp_buffer.data = np.array( + new_w.tmp_buffer.data / num_kernel_elements + ).astype("float32") + + elif w.quantization is not None: + # Divide the `scale` quantization parameter instead of the data. Since the `weights` are static, + # changing the `scale` will change the actual values represented by the quantized data. This is because + # the scale changes, while the raw data remains exactly the same. + new_w.quantization.scale.vector = [ + s / num_kernel_elements for s in new_w.quantization.scale.vector + ] + + # Since the output of the `Sum` will now contain the `sums` of its input and not the `averages`, its + # `scale` quantization parameter is not ideal. Multiply the `scale` by the number of elements of the + # kernel to maintain the same accuracy. + resh_out.quantization.scale.vector = [ + s * num_kernel_elements for s in resh_out.quantization.scale.vector + ] + + else: + # Should never happen. Raise an exception to notify us just in case. + logger.e( + logger.Code.INTERNAL_ERROR, + "ReplaceAveragePoolBeforeFullyConnectedWithSum: Unexpected type.", + ) + + fc.tmp_inputs[1] = ( + new_w # Replace the scaled `weights` of the `FullyConnected`. + ) + + # Reduce over the spatial dimensions. + axes = self._builder.create_tensor_for_data( + np.array([1, 2], "int32"), "axes" + ) + + sum_op = tflite_model.Operator( + builtin_options=Sum(keep_dims=False), + opcode_index=self._builder.op_code_index_for_op_type( + BuiltinOperator.SUM + ), + ) + sum_op.tmp_inputs = [x, axes] + sum_op.tmp_outputs = [resh_out] + + to_add[fc] = sum_op + to_remove.extend([ap, reshape]) + + # Add the new `Sum` operators into the model. + ops = self._builder.get_operators() + for k, sum_op in to_add.items(): + idx = ops.index(k) + ops.insert(idx, sum_op) + + # Remove the `AveragePool` and `Reshape` operators from the model. + for op in to_remove: + ops.remove(op) + + return len(to_remove) != 0 diff --git a/backends/nxp/backend/ir/tflite_optimizer/optimizer.py b/backends/nxp/backend/ir/tflite_optimizer/optimizer.py new file mode 100755 index 00000000000..fc94656ac74 --- /dev/null +++ b/backends/nxp/backend/ir/tflite_optimizer/optimizer.py @@ -0,0 +1,229 @@ +# +# Copyright 2023 Martin Pavella +# Copyright 2024 NXP +# +# License: MIT +# See the LICENSE_MIT for more details. +# + +from enum import Enum +from typing import Callable + +from executorch.backends.nxp.backend.ir import logger +from executorch.backends.nxp.backend.ir.conversion_config import ConversionConfig +from executorch.backends.nxp.backend.ir.tflite_optimizer.optimizations.combine_hard_sigmoid_and_mul_to_hard_swish import ( + CombineHardSigmoidAndMulIntoHardSwish, +) +from executorch.backends.nxp.backend.ir.tflite_optimizer.optimizations.eliminate_dead_branches import ( + EliminateDeadBranches, +) +from executorch.backends.nxp.backend.ir.tflite_optimizer.optimizations.fuse_activation_functions import ( + FuseActivationFunctions, +) +from executorch.backends.nxp.backend.ir.tflite_optimizer.optimizations.fuse_fully_connected_and_add_operators import ( + FuseFullyConnectedAndAddOperators, +) +from executorch.backends.nxp.backend.ir.tflite_optimizer.optimizations.fuse_quanitze_into_preceding_ops import ( + FuseQuantizeIntoPrecedingOps, +) +from executorch.backends.nxp.backend.ir.tflite_optimizer.optimizations.keep_one_empty_buffer import ( + KeepOneEmptyBuffer, +) +from executorch.backends.nxp.backend.ir.tflite_optimizer.optimizations.move_relu_before_concat import ( + MoveActivationBeforeConcatenation, +) +from executorch.backends.nxp.backend.ir.tflite_optimizer.optimizations.permute_fully_connected_weights_after_reshape import ( + PermuteFullyConnectedWeightsAfterReshape, +) +from executorch.backends.nxp.backend.ir.tflite_optimizer.optimizations.prune_cast_operators import ( + FuseCastOperators, + RemoveCastOperatorsWithNoEffect, +) +from executorch.backends.nxp.backend.ir.tflite_optimizer.optimizations.prune_quantize_operators import ( + FuseParallelQuantizeOperators, + PruneQuantizeOperators, +) +from executorch.backends.nxp.backend.ir.tflite_optimizer.optimizations.prune_reshape_operators import ( + FuseReshapeOperators, + RemoveReshapeOperatorsWithNoEffect, +) +from executorch.backends.nxp.backend.ir.tflite_optimizer.optimizations.prune_transpose_operators import ( + FuseTransposeOperators, + RemoveIdentityTransposeOperators, +) +from executorch.backends.nxp.backend.ir.tflite_optimizer.optimizations.remove_unused_tensors_and_buffers import ( + RemoveUnusedTensorsAndBuffers, +) +from executorch.backends.nxp.backend.ir.tflite_optimizer.optimizations.replace_average_pool_before_fully_connected_with_sum import ( + ReplaceAveragePoolBeforeFullyConnectedWithSum, +) + + +class Optimization(Enum): + KEEP_ONE_EMPTY_BUFFER = 0 + FUSE_ACTIVATION_FUNCTIONS = 1 + FUSE_FULLY_CONNECTED_AND_ADD = 2 + + FUSE_RESHAPE_OPERATORS = 3 + REMOVE_RESHAPE_OPERATORS_WITH_NO_EFFECT = 4 + + FUSE_TRANSPOSE_OPERATORS = 5 + REMOVE_IDENTITY_TRANSPOSE_OPERATORS = 6 + + PRUNE_QUANTIZE_OPERATORS = 7 + FUSE_PARALLEL_QUANTIZE_OPERATORS = 8 + FUSE_QUANTIZE_INTO_PRECEDING_OPS = 9 + + REMOVE_UNUSED_TENSORS = 10 + ELIMINATE_DEAD_BRANCHES = 11 + PERMUTE_FULLY_CONNECTED_WEIGHTS_AFTER_RESHAPE = 12 + + FUSE_CAST_OPERATORS = 13 + REMOVE_CAST_OPERATORS_WITH_NO_EFFECT = 14 + + MOVE_ACTIVATION_BEFORE_CONCAT = 15 + COMBINE_HARD_SIGMOID_AND_MUL_INTO_HARD_SWISH = 16 + REPLACE_AVERAGE_POOL_BEFORE_FULLY_CONNECTED_WITH_SUM = 17 + + +class Optimizer: + """ + Class provides methods to optimize a TFLite model. To do so, it uses a ModelBuilder object, encapsulating + the TFLite model. + + A lot of these methods were implemented a while ago they are not very efficient. Some of them may also not cover + all edge cases. + """ + + # avoid circular dependency with importing the model_builder but allow typehints + _builder: "model_builder.ModelBuilder" # noqa F821 + + # Dictionary which maps optimizations to methods which implement them + optimization_map: dict[Optimization, Callable] + + # As long as the model is being modified, optimizations will be applied again and again. This variable is the hard + # limit to the number of times any single optimization is applied. + optimization_application_limit = 10 # Empirical value. + + def __init__( + self, + builder: "model_builder.ModelBuilder", # noqa F821 + conversion_config: ConversionConfig, + ): + self._builder = builder + + self.optimization_map = { + Optimization.KEEP_ONE_EMPTY_BUFFER: KeepOneEmptyBuffer( + builder, conversion_config + ), + Optimization.FUSE_ACTIVATION_FUNCTIONS: FuseActivationFunctions( + builder, conversion_config + ), + Optimization.FUSE_FULLY_CONNECTED_AND_ADD: FuseFullyConnectedAndAddOperators( + builder, conversion_config + ), + Optimization.FUSE_RESHAPE_OPERATORS: FuseReshapeOperators( + builder, conversion_config + ), + Optimization.REMOVE_RESHAPE_OPERATORS_WITH_NO_EFFECT: RemoveReshapeOperatorsWithNoEffect( + builder, conversion_config + ), + Optimization.FUSE_TRANSPOSE_OPERATORS: FuseTransposeOperators( + builder, conversion_config + ), + Optimization.REMOVE_IDENTITY_TRANSPOSE_OPERATORS: RemoveIdentityTransposeOperators( + builder, conversion_config + ), + Optimization.PRUNE_QUANTIZE_OPERATORS: PruneQuantizeOperators( + builder, conversion_config + ), + Optimization.FUSE_PARALLEL_QUANTIZE_OPERATORS: FuseParallelQuantizeOperators( + builder, conversion_config + ), + Optimization.FUSE_QUANTIZE_INTO_PRECEDING_OPS: FuseQuantizeIntoPrecedingOps( + builder, conversion_config + ), + Optimization.REMOVE_UNUSED_TENSORS: RemoveUnusedTensorsAndBuffers( + builder, conversion_config + ), + Optimization.ELIMINATE_DEAD_BRANCHES: EliminateDeadBranches( + builder, conversion_config + ), + Optimization.PERMUTE_FULLY_CONNECTED_WEIGHTS_AFTER_RESHAPE: PermuteFullyConnectedWeightsAfterReshape( + builder, conversion_config + ), + Optimization.FUSE_CAST_OPERATORS: FuseCastOperators( + builder, conversion_config + ), + Optimization.REMOVE_CAST_OPERATORS_WITH_NO_EFFECT: RemoveCastOperatorsWithNoEffect( + builder, conversion_config + ), + Optimization.MOVE_ACTIVATION_BEFORE_CONCAT: MoveActivationBeforeConcatenation( + builder, conversion_config + ), + Optimization.COMBINE_HARD_SIGMOID_AND_MUL_INTO_HARD_SWISH: CombineHardSigmoidAndMulIntoHardSwish( + builder, conversion_config + ), + Optimization.REPLACE_AVERAGE_POOL_BEFORE_FULLY_CONNECTED_WITH_SUM: ReplaceAveragePoolBeforeFullyConnectedWithSum( + builder, conversion_config + ), + } + + def optimize( + self, + optimization_whitelist: list[Optimization] | None = None, + optimization_blacklist: list[Optimization] | None = None, + ): + """Apply optimizations to the TFLite model encapsulated by 'self._builder'. + :param optimization_whitelist: A list of optimizations to apply to the model. + :param optimization_blacklist: A list of optimizations to NOT apply to the model. + + At least one of 'optimization_whitelist' and 'optimization_blacklist' must be 'None'. + If both are 'None', all optimizations are applied. + + The optimizations will be applied multiple times in a loop, until the model is fully optimized. + """ + + if optimization_whitelist is not None and optimization_blacklist is not None: + logger.e( + logger.Code.INVALID_OPTIMIZATION, + "Optimization whitelist and blacklist cannot both be specified.", + ) + + if optimization_whitelist is not None: + optimizations = optimization_whitelist + else: + # Apply all optimizations + optimizations = list(Optimization) + + if optimization_blacklist is not None: + for o in optimization_blacklist: + try: + optimizations.remove(o) + except ValueError: + logger.w( + f"Optimization blacklist contains invalid optimization '{o}'." + ) + + # Execute the optimizations until the model is fully optimized. + for _i in range(self.optimization_application_limit): + run_again = False + + for optimization in optimizations: + if optimization not in self.optimization_map.keys(): + logger.e( + logger.Code.INVALID_OPTIMIZATION, + f"The converter doesn't recognise the '{optimization}' optimization.", + ) + + # Call the optimization + made_changes = self.optimization_map[optimization]() + logger.internal_assert( + type(made_changes) is bool, + f"Optimization `{optimization}` didn't return bool.", + ) + run_again |= made_changes + + if not run_again: + # The model is now fully optimized. + break diff --git a/backends/nxp/backend/ir/tflite_optimizer/pattern_matcher.py b/backends/nxp/backend/ir/tflite_optimizer/pattern_matcher.py new file mode 100755 index 00000000000..84d65d817b2 --- /dev/null +++ b/backends/nxp/backend/ir/tflite_optimizer/pattern_matcher.py @@ -0,0 +1,921 @@ +# Copyright 2024 NXP +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +from abc import ABC, abstractmethod +from collections import defaultdict +from dataclasses import dataclass +from typing import cast, Iterator, Tuple + +import executorch.backends.nxp.backend.ir.converter.builder.model_builder as model_builder +from executorch.backends.nxp.backend.ir import logger +from executorch.backends.nxp.backend.ir.tflite_generator import tflite_model +from executorch.backends.nxp.backend.ir.tflite_optimizer.graph_utils import ( + builtin_operator_for_op_type, + create_tensor_to_operator_dictionaries, + InputTensorToOpsMap, + NameToTensorMap, + operator_is_type, + OutputTensorToOpMap, +) +from executorch.backends.nxp.backend.ir.tflite_optimizer.operator_rules import OpRule +from executorch.backends.nxp.backend.ir.tflite_optimizer.tensor_rules import TensorRule + + +class OperatorBlock(ABC): + @abstractmethod + def validate(self): + pass + + +@dataclass +class OpLikeBlock(OperatorBlock): + ops: list[str] | None = None + inputs: list[str | None] | None = None + outputs: list[str | None] | None = None + op_rules: list[OpRule] | None = None + + def validate(self): + """Check if the `Op` follows the limitations of the PatternMatcher. + If it doesn't exit with error and a corresponding message. + """ + + # `...` can only be used at the start or end of the inputs/outputs. + if len(self.inputs_as_list()) > 2: + logger.internal_assert( + ... not in self.inputs_as_list()[1:-1], + "PatternMatcher: The `...` can only be used " + "at the start and/or end of the inputs.", + ) + if len(self.outputs_as_list()) > 2: + logger.internal_assert( + ... not in self.outputs_as_list()[1:-1], + "PatternMatcher: The `...` can only be used" + " at the start and/or end of the outputs.", + ) + + def inputs_as_list(self) -> list[str | None]: + """Return the `inputs` attribute. If it's `None`, return `[]`.""" + if self.inputs is None: + return [] + return self.inputs + + def outputs_as_list(self) -> list[str | None]: + """Return the `outputs` attribute. If it's `None`, return `[]`.""" + if self.outputs is None: + return [] + return self.outputs + + def io_as_list(self) -> list[str | None]: + """Return the `inputs` and `outputs` attributes combined into 1. If they are `None`, return `[]`.""" + return self.inputs_as_list() + self.outputs_as_list() + + +@dataclass +class Op(OpLikeBlock): + """Class represents 1 operator.""" + + def match( + self, + real_op: tflite_model.Operator, + tensor_map: NameToTensorMap, + input_to_ops_map: InputTensorToOpsMap, + output_to_op_map: OutputTensorToOpMap, + builder: "model_builder.ModelBuilder", + ) -> bool: + """Try to match the `Op` with a real TFLite Operator. If the match is successful, add new mappings for matched + tensors into the tensor_map`. + :return: True, if the `Op` was successfully matched. Otherwise, return False. + """ + + # noinspection PyBroadException + try: + if not self._op_type_matches(real_op, builder): + return False + + tensor_map_copy = tensor_map.copy() # Use a copy in case the match fails. + + if not self._match_inputs(real_op, tensor_map_copy): + return False + + if not self._match_outputs(real_op, tensor_map_copy): + return False + + if not self._op_rules_satisfied( + real_op, tensor_map_copy, input_to_ops_map, output_to_op_map, builder + ): + return False + + # Operator matched. + tensor_map.update(tensor_map_copy) + return True + + except Exception: + # Unexpected failure. + return False + + def _op_type_matches( + self, real_op: tflite_model.Operator, builder: "model_builder.ModelBuilder" + ) -> bool: + """Check if the type of the TFLite operator `real_op` matches the types defined in this `Op`.""" + if self.ops is None: + return True + + return any(operator_is_type(real_op, op_type, builder) for op_type in self.ops) + + def _op_rules_satisfied( + self, + real_op: tflite_model.Operator, + tensor_map: NameToTensorMap, + input_to_ops_map: InputTensorToOpsMap, + output_to_op_map: OutputTensorToOpMap, + builder: "model_builder.ModelBuilder", + ) -> bool: + """Check if all operator rules defined for this `Op` are satisfied.""" + if self.op_rules is None: + return True + + return all( + rule(real_op, tensor_map, input_to_ops_map, output_to_op_map, builder) + for rule in self.op_rules + ) + + def _match_inputs( # noqa C901 + self, real_op: tflite_model.Operator, tensor_map: NameToTensorMap + ) -> bool: + """Check if it is possible to match the input tensors of the TFLite operator `real_op` with the ones + defined for this `Op`. + New mappings may be added into the `tensor_map`. + """ + if self.inputs is None: + return True + + num_real_inputs = len(real_op.tmp_inputs) + + step = 1 + real_input_index = 0 + inputs = self.inputs + if inputs[0] is ... and inputs[-1] is not ...: + # The `...` is used only at the start. In this case, iterate through the inputs from the end. + step = -1 + real_input_index = num_real_inputs - 1 + inputs = reversed(inputs) + + def _checked_all_inputs(real_input_idx: int) -> bool: + if step == 1: + return real_input_idx >= num_real_inputs + elif step == -1: + return real_input_idx < 0 + else: + raise ValueError + + can_skip = False + for inpt in inputs: + if _checked_all_inputs(real_input_index) and (inpt is not ...): + return False # The inputs don't match + + if inpt is ...: + can_skip = True + continue + + elif inpt is None: + # The tensor is not named, but must be there. + real_input_index += step + else: + # A tensor name is specified. + real_in = real_op.tmp_inputs[real_input_index] + if inpt in tensor_map.keys(): + # Tensor has already been mapped. + logger.internal_assert( + type(tensor_map[inpt]) is tflite_model.Tensor, + f"PatternMatcher: consuming a set of tensors `{inpt}` is not supported right now.", + ) + if tensor_map[inpt] != real_in: + # The tensor doesn't match the mapped one. + if can_skip: + real_input_index += step + continue + + return False + + else: + # The tensor has been mapped and matches. + real_input_index += step + continue + + # Map the matched tensor. + can_skip = ( + False # Matched a tensor, so the `...` does not apply anymore. + ) + tensor_map[inpt] = real_in + real_input_index += step + + return True + + def _match_outputs( # noqa C901 + self, real_op: tflite_model.Operator, tensor_map: NameToTensorMap + ) -> bool: + """Check if it is possible to match the output tensors of the TFLite operator `real_op` with the ones + defined for this `Op`. + New mappings may be added into the `tensor_map`. + """ + if self.outputs is None: + return True + + num_real_outputs = len(real_op.tmp_outputs) + step = 1 + real_output_index = 0 + outputs = self.outputs + if outputs[0] is ... and outputs[-1] is not ...: + # The `...` is used only at the start. In this case, iterate through the outputs from the end. + step = -1 + real_output_index = num_real_outputs - 1 + outputs = reversed(outputs) + + def _checked_all_outputs(real_output_idx: int) -> bool: + if step == 1: + return real_output_idx >= num_real_outputs + elif step == -1: + return real_output_idx < 0 + else: + raise ValueError + + can_skip = False + for out in outputs: + if _checked_all_outputs(real_output_index) and (out is not ...): + return False # The outputs don't match + + if out is ...: + can_skip = True + continue + + elif out is None: + # The tensor is not named, but must be there. + real_output_index += step + else: + # A tensor name is specified. + real_out = real_op.tmp_outputs[real_output_index] + if out in tensor_map.keys(): + # Tensor has already been mapped. + if tensor_map[out] != real_out: + # The tensor doesn't match. + if can_skip: + real_output_index += step + continue + + return False + else: + # The tensor has been mapped and matches. + real_output_index += step + continue + + # Map the matched tensor. + can_skip = ( + False # Matched a tensor, so the `...` does not apply anymore. + ) + tensor_map[out] = real_out + real_output_index += step + + return True + + +@dataclass +class MultipleSameOps(OpLikeBlock): + """Class represents multiple occurrences of similar operators with the same op type, inputs and outputs.""" + + def match( + self, + real_ops: list[tflite_model.Operator], + tensor_map: NameToTensorMap, + input_to_ops_map: InputTensorToOpsMap, + output_to_op_map: OutputTensorToOpMap, + builder: "model_builder.ModelBuilder", + ) -> bool: + """Try to match the `MultipleSameOps` with real TFLite operators. If the match is successful, add new mappings + for matched tensors into the tensor_map`. + :return: True, if the `MultipleSameOps` was successfully matched. Otherwise, return False. + """ + # noinspection PyBroadException + try: + if len(real_ops) == 0: + return False + + if not self._op_types_match(real_ops, builder): + return False + + tensor_map_copy = tensor_map.copy() # Use a copy in case the match fails. + + if not self._match_inputs(real_ops, tensor_map_copy): + return False + + if not self._match_outputs(real_ops, tensor_map_copy): + return False + + if not self._op_rules_satisfied( + real_ops, tensor_map_copy, input_to_ops_map, output_to_op_map, builder + ): + return False + + # Operator matched. + tensor_map.update(tensor_map_copy) + return True + + except Exception: + # Unexpected failure. + return False + + def validate(self): + super().validate() + logger.internal_assert( + self.ops is not None, + "PatternMatcher: `MultipleSameOps` doesn't support `ops=None` yet.", + ) + + def _op_types_match( + self, + real_ops: list[tflite_model.Operator], + builder: "model_builder.ModelBuilder", + ) -> bool: + """Check if the types of the TFLite operators `real_ops` match the types defined in this `MultipleSameOps`.""" + for real_op in real_ops: + if not any( + operator_is_type(real_op, op_type, builder) for op_type in self.ops + ): + return False + + return True + + def _op_rules_satisfied( + self, + real_ops: list[tflite_model.Operator], + tensor_map: NameToTensorMap, + input_to_ops_map: InputTensorToOpsMap, + output_to_op_map: OutputTensorToOpMap, + builder: "model_builder.ModelBuilder", + ) -> bool: + """Check if all operator rules defined for this `MultipleSameOps` are satisfied for all operators.""" + if self.op_rules is None: + return True + + for real_op in real_ops: + if not all( + rule(real_op, tensor_map, input_to_ops_map, output_to_op_map, builder) + for rule in self.op_rules + ): + return False + + return True + + def _match_inputs( # noqa C901 + self, real_ops: list[tflite_model.Operator], tensor_map: NameToTensorMap + ) -> bool: + """Check if it is possible to match the input tensors of the TFLite operators `real_ops` with the ones + defined for this `MultipleSameOps`. + New mappings may be added into the `tensor_map`. + """ + if self.inputs is None: + return True + + set_of_tensors_map = defaultdict(lambda: []) + + for real_op in real_ops: + num_real_inputs = len(real_op.tmp_inputs) + + step = 1 + real_input_index = 0 + inputs = self.inputs + if inputs[0] is ... and inputs[-1] is not ...: + # The `...` is used only at the start. In this case, iterate through the inputs from the end. + step = -1 + real_input_index = num_real_inputs - 1 + inputs = reversed(inputs) + + def _checked_all_inputs(real_input_idx: int) -> bool: + if step == 1: # noqa B036 + return real_input_idx >= num_real_inputs # noqa B036 + elif step == -1: # noqa B036 + return real_input_idx < 0 + else: + raise ValueError + + can_skip = False + for inpt in inputs: + if _checked_all_inputs(real_input_index) and (inpt is not ...): + return False # The inputs don't match + + if inpt is ...: + can_skip = True + continue + + elif inpt is None: + # The tensor is not named, but must be there. + real_input_index += step + else: + # A tensor name is specified. + real_in = real_op.tmp_inputs[real_input_index] + if inpt in tensor_map.keys(): + # Tensor has already been mapped. + logger.internal_assert( + type(tensor_map[inpt]) is tflite_model.Tensor, + f"PatternMatcher: consuming a set of tensors `{inpt}` is not supported right now.", + ) + if tensor_map[inpt] != real_in: + # The tensor doesn't match the mapped one. + if can_skip: + real_input_index += step + continue + + return False + + else: + # The tensor has been mapped and matches. + real_input_index += step + continue + + # Map the matched tensor. + can_skip = ( + False # Matched a tensor, so the `...` does not apply anymore. + ) + set_of_tensors_map[inpt].append(real_in) + + real_input_index += step + + # The `MultipleSameOps` were matched with `real_ops`. Add the new tensor mappings to the `tensor_map`. + tensor_map.update(set_of_tensors_map) + + return True + + def _match_outputs( + self, real_ops: list[tflite_model.Operator], tensor_map: NameToTensorMap + ) -> bool: + """Check if it is possible to match the output tensors of the TFLite operators `real_ops` with the ones + defined for this `MultipleSameOps`. + New mappings may be added into the `tensor_map`. + """ + if self.outputs is None: + return True + + set_of_tensors_map = defaultdict(lambda: []) + + for real_op in real_ops: + num_real_outputs = len(real_op.tmp_outputs) + + step = 1 + real_output_index = 0 + outputs = self.outputs + if outputs[0] is ... and outputs[-1] is not ...: + # The `...` is used only at the start. In this case, iterate through the outputs from the end. + step = -1 + real_output_index = num_real_outputs - 1 + outputs = reversed(outputs) + + def _checked_all_outputs(real_output_idx: int) -> bool: + if step == 1: # noqa B036 + return real_output_idx >= num_real_outputs # noqa B036 + elif step == -1: # noqa B036 + return real_output_idx < 0 + else: + raise ValueError + + for output in outputs: + if _checked_all_outputs(real_output_index) and (output is not ...): + return False # The outputs don't match + + if output is ...: + continue + + elif output is None: + # The tensor is not named, but must be there. + real_output_index += step + else: + # A tensor name is specified. + real_out = real_op.tmp_outputs[real_output_index] + if output in tensor_map.keys(): + # Tensor has already been mapped. This isn't supported right now. + logger.e( + logger.Code.INTERNAL_ERROR, + "PatternMatcher: MultipleSameOps is producing an already " + f"defined tensor `{output}`, which is not yet supported.", + ) + + # Map the matched tensor. + set_of_tensors_map[output].append(real_out) + + real_output_index += step + + # The `MultipleSameOps` were matched with `real_ops`. Add the new tensor mappings to the `tensor_map`. + tensor_map.update(set_of_tensors_map) + + return True + + +@dataclass() +class OneOf(OperatorBlock): + """Class represents 1 operator, which matches at least 1 of the specified `Op` objects.""" + + # For now, limited to `Op` objects. + one_of_ops: list[Op] + + def validate(self): + for op in self.one_of_ops: + op.validate() + + +# noinspection PyMethodMayBeStatic +class PatternMatcher: + builder: "model_builder.ModelBuilder" + pattern: list[OperatorBlock] + tensor_rules: list[TensorRule] | None + + def __init__( + self, + builder: "model_builder.ModelBuilder", + pattern: list[OperatorBlock], + tensor_rules: list[TensorRule] | None = None, + ): + self.builder = builder + self.pattern = pattern + self.tensor_rules = tensor_rules + + self._validate_pattern() + + def _tensor_rules_satisfied( + self, + tensor_map: NameToTensorMap, + input_to_ops_map: InputTensorToOpsMap, + output_to_op_map: OutputTensorToOpMap, + ) -> bool: + """Check if all currently applicable tensor rules are satisfied.""" + if self.tensor_rules is None: + return True + + for rule in self.tensor_rules: + if rule.is_applicable(tensor_map) and not rule( + tensor_map, input_to_ops_map, output_to_op_map, self.builder + ): + return False # Rule is not satisfied. + + return True + + def _get_opcode_indices_for(self, op_type: str) -> int | None: + builtin_op = builtin_operator_for_op_type(op_type) + return self.builder.op_code_type_index_map.get(builtin_op, None) + + def _validate_pattern(self): + """Make sure the `pattern` is valid according to the limitations of the `PatternMatcher`. + If it isn't, exit with error and a corresponding message. + """ + if len(self.pattern) == 0: + logger.e(logger.Code.INTERNAL_ERROR, "PatternMatcher: empty pattern.") + + if type(self.pattern[0]) is not Op: + logger.e( + logger.Code.INTERNAL_ERROR, + "PatternMatcher: invalid pattern. The first block must be an `Op`.", + ) + + for block in self.pattern: + block.validate() + + def _all_ops_are_in_the_model(self): + """Determine if it is even possible to find a match for the pattern, based on whether the ops in the pattern + are in the model. + """ + + for block in self.pattern: + match block: + case Op(): + op = cast(Op, block) + if op.ops is not None: + if all( + self._get_opcode_indices_for(op_type) is None + for op_type in op.ops + ): + return False + + case MultipleSameOps(): + multiple_same_ops = cast(MultipleSameOps, block) + if all( + self._get_opcode_indices_for(op_type) is None + for op_type in multiple_same_ops.ops + ): + return False + + case OneOf(): + one_of = cast(OneOf, block) + valid = False + for op in one_of.one_of_ops: + if any( + self._get_opcode_indices_for(op_type) is not None + for op_type in op.ops + ): + valid = True + + if not valid: + return False + + return True + + def _extend_pattern_with_op( + self, + op: Op, + real_pattern: list, + tensor_map: NameToTensorMap, + input_to_ops: InputTensorToOpsMap, + output_to_op: OutputTensorToOpMap, + ) -> bool: + """Extend the currently matched pattern in `real_pattern` with an operator represented by `op`. + This function finds a suitable TFLite operator in the model, and adds it to `real_pattern`. + :return: True, if a matching operator was found. Otherwise, False. + """ + if all(tensor not in tensor_map.keys() for tensor in op.io_as_list()): + # The operator is not connected to the already matched part of the pattern. This is not supported. + logger.e( + logger.Code.INTERNAL_ERROR, + f"PatternMatcher: Op on index {len(real_pattern)} is not connected " + "to the preceding operators in the pattern.", + ) + + # The Op is somehow connected to the matched part. + + tensor_map_copy = tensor_map.copy() + + # Check if it is connected via the inputs. + for inpt in op.inputs_as_list(): + if inpt not in tensor_map_copy.keys(): + continue + + # Found connecting input. + connecting_input = tensor_map_copy[inpt] + logger.internal_assert( + type(connecting_input) is tflite_model.Tensor, + f"PatternMatcher: consuming a set of tensors `{inpt}` is not yet supported.", + ) + + following_ops = input_to_ops.get(connecting_input.name, []) + for following_op in following_ops: + if following_op in real_pattern: + continue # This operator has already been matched. + + if op.match( + following_op, + tensor_map_copy, + input_to_ops, + output_to_op, + self.builder, + ) and self._tensor_rules_satisfied( + tensor_map_copy, input_to_ops, output_to_op + ): + # Successful match. + real_pattern.append(following_op) + tensor_map.update(tensor_map_copy) + return True + + else: + tensor_map_copy = ( + tensor_map.copy() + ) # Erase any potential invalid mappings. + + # Try operators connected via the outputs. + for out in op.outputs_as_list(): + if out not in tensor_map_copy.keys(): + continue + + # Found connecting output. + connecting_output = tensor_map_copy[out] + preceding_op = output_to_op.get(connecting_output.name, None) + if preceding_op is None: + continue + if preceding_op in real_pattern: + continue # This operator has already been matched. + if op.match( + preceding_op, tensor_map_copy, input_to_ops, output_to_op, self.builder + ) and self._tensor_rules_satisfied( + tensor_map_copy, input_to_ops, output_to_op + ): + # Successful match. + real_pattern.append(preceding_op) + tensor_map.update(tensor_map_copy) + return True + + else: + tensor_map_copy = ( + tensor_map.copy() + ) # Erase any potential invalid mappings. + + return False + + def _extend_pattern_with_multiple_same_ops( + self, + multiple_same_ops: MultipleSameOps, + real_pattern: list, + tensor_map: NameToTensorMap, + input_to_ops: InputTensorToOpsMap, + output_to_op: OutputTensorToOpMap, + ) -> bool: + """Extend the currently matched pattern in `real_pattern` with multiple operators represented by + `multiple_same_ops`. + This function finds suitable TFLite operators in the model, and adds them to `real_pattern`. + :return: True, if a matching operators were found. Otherwise, False. + """ + if all( + tensor not in tensor_map.keys() for tensor in multiple_same_ops.io_as_list() + ): + # The `MultipleSameOps` is not connected to the already matched part of the pattern. This is not supported. + logger.e( + logger.Code.INTERNAL_ERROR, + f"PatternMatcher: MultipleSameOps on index {len(real_pattern)} is not " + "connected to any preceding Ops in the pattern.", + ) + + # ---- The MultipleSameOps is somehow connected to the matched part. ---- + + tensor_map_copy = tensor_map.copy() + + # Check if it is connected via the inputs. + for inpt in multiple_same_ops.inputs_as_list(): + if inpt not in tensor_map_copy.keys(): + continue + + # Found connecting input. + connecting_input = tensor_map_copy[inpt] + following_ops = input_to_ops.get(connecting_input.name, []) + logger.internal_assert( + type(connecting_input) is tflite_model.Tensor, + f"PatternMatcher: consuming a set of tensors `{inpt}` is not yet supported.", + ) + + # All following ops have to match. + if any(following_op in real_pattern for following_op in following_ops): + continue # This operator has already been matched. + + if multiple_same_ops.match( + following_ops, tensor_map_copy, input_to_ops, output_to_op, self.builder + ) and self._tensor_rules_satisfied( + tensor_map_copy, input_to_ops, output_to_op + ): + # Successful match. + real_pattern.append(following_ops) + tensor_map.update(tensor_map_copy) + return True + + else: + tensor_map_copy = ( + tensor_map.copy() + ) # Erase any potential invalid mappings. + + # `MultipleSameOps` cannot be connected via the outputs. + return False + + def _extend_pattern_with_one_of( + self, + one_of: OneOf, + real_pattern: list, + tensor_map: NameToTensorMap, + input_to_ops: InputTensorToOpsMap, + output_to_op: OutputTensorToOpMap, + ) -> bool: + """Extend the currently matched pattern in `real_pattern` with an operator represented by `one_of`. + This function finds a suitable TFLite operator in the model, and adds it to `real_pattern`. + :return: True, if a matching operator was found. Otherwise, False. + """ + for op in one_of.one_of_ops: + tensor_map_copy = tensor_map.copy() + if self._extend_pattern_with_op( + op, real_pattern, tensor_map_copy, input_to_ops, output_to_op + ): + # Successfully matched the `OneOf`. + tensor_map.update(tensor_map_copy) + return True + + return False + + def _match_rest_of_pattern( + self, + real_pattern: list, + tensor_map: NameToTensorMap, + input_to_ops: InputTensorToOpsMap, + output_to_op: OutputTensorToOpMap, + pattern_idx: int, + ): + """Provided that a part of the pattern has been matched with operators in the TFLite model, extend this matched + `real_pattern` with new TFLite operators that match the rest of the pattern. + :param pattern_idx: Index into the `self.patter`, with the first block that has not yet been matched. + """ + if pattern_idx >= len(self.pattern): + # Successfully matched full pattern. + return True + + tensor_map_copy = tensor_map.copy() + + match self.pattern[pattern_idx]: + case Op(): + op = cast(Op, self.pattern[pattern_idx]) + if self._extend_pattern_with_op( + op, real_pattern, tensor_map_copy, input_to_ops, output_to_op + ): + # Successful match. + pattern_idx += 1 + tensor_map.update(tensor_map_copy) + + else: + # Failed to match the Op. + return False + + case MultipleSameOps(): + multiple_same_ops = cast(MultipleSameOps, self.pattern[pattern_idx]) + if self._extend_pattern_with_multiple_same_ops( + multiple_same_ops, + real_pattern, + tensor_map_copy, + input_to_ops, + output_to_op, + ): + # Successful match. + pattern_idx += 1 + tensor_map.update(tensor_map_copy) + + else: + # Failed to match the MultipleSameOps. + return False + + case OneOf(): + one_of = cast(OneOf, self.pattern[pattern_idx]) + if self._extend_pattern_with_one_of( + one_of, real_pattern, tensor_map_copy, input_to_ops, output_to_op + ): + # Successful match. + pattern_idx += 1 + tensor_map.update(tensor_map_copy) + + else: + # Failed to match the Op. + return False + + case _: + logger.e( + logger.Code.INTERNAL_ERROR, + f"PatternMatcher: pattern contains unexpected block `{self.pattern[pattern_idx]}`.", + ) + + # Matched a block. Recursively match the rest of the pattern. + return self._match_rest_of_pattern( + real_pattern, tensor_map, input_to_ops, output_to_op, pattern_idx + ) + + def match_patterns( + self, + ) -> Iterator[ + Tuple[ + list[tflite_model.Operator | list[tflite_model.Operator]], + NameToTensorMap, + InputTensorToOpsMap, + OutputTensorToOpMap, + ] + ]: + """Iterate over the model and yield matched patterns of operators.""" + + if not self._all_ops_are_in_the_model(): + # The model doesn't contain sufficient operators to satisfy the pattern. + return + + input_to_ops, output_to_op = create_tensor_to_operator_dictionaries( + self.builder + ) + + real_pattern: list[tflite_model.Operator] = ( + [] + ) # List of matched TFLite operators in the TFLite model. + tensor_map: NameToTensorMap = {} + + # The first block of a pattern is always an `Op`. + first_pattern_op = cast(Op, self.pattern[0]) + + for first_real_op in self.builder.get_operators(): + if first_pattern_op.match( + first_real_op, tensor_map, input_to_ops, output_to_op, self.builder + ) and self._tensor_rules_satisfied(tensor_map, input_to_ops, output_to_op): + # Successful first match. + real_pattern.append(first_real_op) + + else: + # Mismatch. + real_pattern = [] + tensor_map = {} + continue + + # Matched the first `Op`. Now try to match the rest of the pattern. + if self._match_rest_of_pattern( + real_pattern, tensor_map, input_to_ops, output_to_op, 1 + ): # Start from index 1 in the pattern. + # Successfully matched full pattern. + yield real_pattern, tensor_map, input_to_ops, output_to_op + + # The underlying TFLite model may have been changed. Re-compute the tensor to operator maps to be safe. + input_to_ops, output_to_op = create_tensor_to_operator_dictionaries( + self.builder + ) + + real_pattern = [] + tensor_map = {} diff --git a/backends/nxp/backend/ir/tflite_optimizer/tensor_rules.py b/backends/nxp/backend/ir/tflite_optimizer/tensor_rules.py new file mode 100755 index 00000000000..270f38f9a0c --- /dev/null +++ b/backends/nxp/backend/ir/tflite_optimizer/tensor_rules.py @@ -0,0 +1,710 @@ +# Copyright 2024 NXP +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +from abc import ABC, abstractmethod +from dataclasses import dataclass + +import executorch.backends.nxp.backend.ir.converter.builder.model_builder as model_builder + +import numpy as np +from executorch.backends.nxp.backend.ir.lib.tflite.TensorType import TensorType +from executorch.backends.nxp.backend.ir.tensor_formatting import TensorFormat +from executorch.backends.nxp.backend.ir.tflite_generator import tflite_model +from executorch.backends.nxp.backend.ir.tflite_optimizer.optimizations.base_optimization import ( + InputTensorToOpsMap, + OutputTensorToOpMap, +) +from executorch.backends.nxp.backend.ir.tflite_optimizer.pattern_matcher import ( + NameToTensorMap, + operator_is_type, +) + + +class TensorRule(ABC): + @abstractmethod + def __call__( + self, + tensor_map: NameToTensorMap, + input_to_ops: InputTensorToOpsMap, + output_to_op: OutputTensorToOpMap, + builder: "model_builder.ModelBuilder", + ) -> bool: + pass + + @abstractmethod + def is_applicable(self, tensor_map: NameToTensorMap) -> bool: + """Determine if the rule can be tested, based on whether the required tensors have already been mapped.""" + pass + + +class MultipleTensorRule(TensorRule): + @property + @abstractmethod + def rules(self) -> list[TensorRule]: + """The individual tensor rules.""" + pass + + def __call__( + self, + tensor_map: NameToTensorMap, + input_to_ops: InputTensorToOpsMap, + output_to_op: OutputTensorToOpMap, + builder: "model_builder.ModelBuilder", + ) -> bool: + return all( + rule(tensor_map, input_to_ops, output_to_op, builder) for rule in self.rules + ) + + def is_applicable(self, tensor_map: NameToTensorMap) -> bool: + return all(rule.is_applicable(tensor_map) for rule in self.rules) + + +@dataclass +class TensorHasRank(TensorRule): + tensor: str + rank: int + + def __call__( + self, + tensor_map: NameToTensorMap, + input_to_ops: InputTensorToOpsMap, + output_to_op: OutputTensorToOpMap, + builder: "model_builder.ModelBuilder", + ) -> bool: + match tensor_map[self.tensor]: + case tflite_model.Tensor(): + return tensor_map[self.tensor].rank == self.rank + case list(): + return all(t.rank == self.rank for t in tensor_map[self.tensor]) + case _: + raise ValueError + + def is_applicable(self, tensor_map: NameToTensorMap) -> bool: + return self.tensor in tensor_map.keys() + + +@dataclass +class TensorHasData(TensorRule): + tensor: str + + def __call__( + self, + tensor_map: NameToTensorMap, + input_to_ops: InputTensorToOpsMap, + output_to_op: OutputTensorToOpMap, + builder: "model_builder.ModelBuilder", + ) -> bool: + match tensor_map[self.tensor]: + case tflite_model.Tensor(): + return tensor_map[self.tensor].tmp_buffer.data is not None + case list(): + return all( + t.tmp_buffer.data is not None for t in tensor_map[self.tensor] + ) + case _: + raise ValueError + + def is_applicable(self, tensor_map: NameToTensorMap) -> bool: + return self.tensor in tensor_map.keys() + + +@dataclass +class TensorsHaveData(MultipleTensorRule): + def __init__(self, tensors: list[str]): + self._rules = [TensorHasData(t) for t in tensors] + + @property + def rules(self) -> list[TensorRule]: + return self._rules + + +@dataclass +class TensorHasStaticValue(TensorRule): + # Rule assures that the tensor has a single static value, which is equal to the provided `value`. + + tensor: str + value: int | float + + def __call__( + self, + tensor_map: NameToTensorMap, + input_to_ops: InputTensorToOpsMap, + output_to_op: OutputTensorToOpMap, + builder: "model_builder.ModelBuilder", + ) -> bool: + match tensor_map[self.tensor]: + case tflite_model.Tensor(): + data = tensor_map[self.tensor].tmp_buffer.data + if data is None or data.size > 1: + return False + + return np.allclose(data, np.asarray([self.value], data.dtype)) + + case list(): + for t in tensor_map[self.tensor]: + data = t.tmp_buffer.data + if data is None or data.size > 1: + return False + + if not np.allclose(data, np.asarray([self.value], data.dtype)): + return False + + return True + + case _: + raise ValueError + + def is_applicable(self, tensor_map: NameToTensorMap) -> bool: + return self.tensor in tensor_map.keys() + + +@dataclass +class TensorHasNConsumers(TensorRule): + tensor: str + n: int + + def __call__( + self, + tensor_map: NameToTensorMap, + input_to_ops: InputTensorToOpsMap, + output_to_op: OutputTensorToOpMap, + builder: "model_builder.ModelBuilder", + ) -> bool: + model_outputs = builder.get_sub_graph().outputs.tmp_outputs + match tensor_map[self.tensor]: + case tflite_model.Tensor(): + num_consumers = len(input_to_ops.get(tensor_map[self.tensor].name, [])) + if tensor_map[self.tensor] in model_outputs: + num_consumers += 1 + return num_consumers == self.n + + case list(): + for t in tensor_map[self.tensor]: + num_consumers = len(input_to_ops.get(t.name, [])) + if t in model_outputs: + num_consumers += 1 + if num_consumers != self.n: + return False + + return True + + case _: + raise ValueError + + def is_applicable(self, tensor_map: NameToTensorMap) -> bool: + return self.tensor in tensor_map.keys() + + +class TensorHasOneConsumer(TensorHasNConsumers): + def __init__(self, tensor: str): + super().__init__(tensor, 1) + + +class TensorsHaveOneConsumer(MultipleTensorRule): + def __init__(self, tensors: list[str]): + self._rules = [TensorHasOneConsumer(t) for t in tensors] + + @property + def rules(self) -> list[TensorRule]: + return self._rules + + +@dataclass +class TensorConsumedOnlyBy(TensorRule): + tensor: str + consuming_operator_type: str + + def __call__( + self, + tensor_map: NameToTensorMap, + input_to_ops: InputTensorToOpsMap, + output_to_op: OutputTensorToOpMap, + builder: "model_builder.ModelBuilder", + ) -> bool: + match tensor_map[self.tensor]: + case tflite_model.Tensor(): + return all( + operator_is_type(op, self.consuming_operator_type, builder) + for op in input_to_ops.get(tensor_map[self.tensor].name, []) + ) + case list(): + for t in tensor_map[self.tensor]: + if not all( + operator_is_type(op, self.consuming_operator_type, builder) + for op in input_to_ops.get(t.name, []) + ): + return False + case _: + raise ValueError + + def is_applicable(self, tensor_map: NameToTensorMap) -> bool: + return self.tensor in tensor_map.keys() + + +@dataclass +class TensorDimensionsMatch(TensorRule): + tensor_1: str + dim_idx_1: int + + tensor_2: str + dim_idx_2: int + + def __call__( + self, + tensor_map: NameToTensorMap, + input_to_ops: InputTensorToOpsMap, + output_to_op: OutputTensorToOpMap, + builder: "model_builder.ModelBuilder", + ) -> bool: + t1 = tensor_map[self.tensor_1] + t2 = tensor_map[self.tensor_2] + + if (type(t1), type(t2)) != (tflite_model.Tensor, tflite_model.Tensor): + raise NotImplementedError( + "Tensor rule `TensorDimensionsMatch` is not implemented for sets of tensors." + ) + + if (not t1.shape.is_well_defined()) or (not t2.shape.is_well_defined()): + return False + + return t1.shape[self.dim_idx_1] == t2.shape[self.dim_idx_2] + + def is_applicable(self, tensor_map: NameToTensorMap) -> bool: + return self.tensor_1 in tensor_map.keys() and self.tensor_2 in tensor_map.keys() + + +@dataclass +class TensorHasDimensionOfSize(TensorRule): + tensor: str + dim_index: int + dim_size: int + + def __call__( + self, + tensor_map: NameToTensorMap, + input_to_ops: InputTensorToOpsMap, + output_to_op: OutputTensorToOpMap, + builder: "model_builder.ModelBuilder", + ) -> bool: + match tensor_map[self.tensor]: + case tflite_model.Tensor(): + return tensor_map[self.tensor].shape[self.dim_index] == self.dim_size + + case list(): + return all( + t.shape[self.dim_index] == self.dim_size + for t in tensor_map[self.tensor] + ) + + case _: + raise ValueError + + def is_applicable(self, tensor_map: NameToTensorMap) -> bool: + return self.tensor in tensor_map.keys() + + +@dataclass +class TensorsHaveSameShape(TensorRule): + tensors: list[str] + + def __call__( + self, + tensor_map: NameToTensorMap, + input_to_ops: InputTensorToOpsMap, + output_to_op: OutputTensorToOpMap, + builder: "model_builder.ModelBuilder", + ) -> bool: + mapped_tensors = [tensor_map[tensor] for tensor in self.tensors] + if any(type(t) is not tflite_model.Tensor for t in mapped_tensors): + raise NotImplementedError( + "Tensor rule `TensorsHaveSameShape` is not implemented for sets of tensors." + ) + + if not all(t.shape.is_well_defined() for t in mapped_tensors): + # Not all shapes are known. + return False + + if len(self.tensors) == 0: + return True + + first_shape = mapped_tensors[0].shape + return all(t.shape == first_shape for t in mapped_tensors) + + def is_applicable(self, tensor_map: NameToTensorMap) -> bool: + return all(tensor in tensor_map.keys() for tensor in self.tensors) + + +@dataclass +class TensorsHaveSameType(TensorRule): + tensors: list[str] + + def __call__( + self, + tensor_map: NameToTensorMap, + input_to_ops: InputTensorToOpsMap, + output_to_op: OutputTensorToOpMap, + builder: "model_builder.ModelBuilder", + ) -> bool: + if len(self.tensors) == 0: + return True + + mapped_tensors = [tensor_map[tensor] for tensor in self.tensors] + if any(type(t) is not tflite_model.Tensor for t in mapped_tensors): + raise NotImplementedError( + "Tensor rule `TensorsHaveSameType` is not implemented for sets of tensors." + ) + + first_type = mapped_tensors[0].type + return all(t.type == first_type for t in mapped_tensors) + + def is_applicable(self, tensor_map: NameToTensorMap) -> bool: + return all(tensor in tensor_map.keys() for tensor in self.tensors) + + +@dataclass +class RuleIf(TensorRule): + condition_rule: TensorRule + body_rule: TensorRule + + def __call__( + self, + tensor_map: NameToTensorMap, + input_to_ops: InputTensorToOpsMap, + output_to_op: OutputTensorToOpMap, + builder: "model_builder.ModelBuilder", + ) -> bool: + if self.condition_rule(tensor_map, input_to_ops, output_to_op, builder): + return self.body_rule(tensor_map, input_to_ops, output_to_op, builder) + + return True + + def is_applicable(self, tensor_map: NameToTensorMap) -> bool: + return self.condition_rule.is_applicable( + tensor_map + ) and self.body_rule.is_applicable(tensor_map) + + +class RuleOr(TensorRule): + + def __init__(self, *rules: TensorRule): + self.rules = list(rules) + + def __call__( + self, + tensor_map: NameToTensorMap, + input_to_ops: InputTensorToOpsMap, + output_to_op: OutputTensorToOpMap, + builder: "model_builder.ModelBuilder", + ) -> bool: + return any( + rule(tensor_map, input_to_ops, output_to_op, builder) for rule in self.rules + ) + + def is_applicable(self, tensor_map: NameToTensorMap) -> bool: + return all(rule.is_applicable(tensor_map) for rule in self.rules) + + +class RuleAnd(TensorRule): + + def __init__(self, *rules: TensorRule): + self.rules = list(rules) + + def __call__( + self, + tensor_map: NameToTensorMap, + input_to_ops: InputTensorToOpsMap, + output_to_op: OutputTensorToOpMap, + builder: "model_builder.ModelBuilder", + ) -> bool: + return all( + rule(tensor_map, input_to_ops, output_to_op, builder) for rule in self.rules + ) + + def is_applicable(self, tensor_map: NameToTensorMap) -> bool: + return all(rule.is_applicable(tensor_map) for rule in self.rules) + + +@dataclass +class TensorHasType(TensorRule): + tensor: str + type_: TensorType + + def __call__( + self, + tensor_map: NameToTensorMap, + input_to_ops: InputTensorToOpsMap, + output_to_op: OutputTensorToOpMap, + builder: "model_builder.ModelBuilder", + ) -> bool: + match tensor_map[self.tensor]: + case tflite_model.Tensor(): + return tensor_map[self.tensor].type == self.type_ + case list(): + return all(t.type == self.type_ for t in tensor_map[self.tensor]) + case _: + raise ValueError + + def is_applicable(self, tensor_map: NameToTensorMap) -> bool: + return self.tensor in tensor_map.keys() + + +@dataclass +class TensorsHaveType(MultipleTensorRule): + def __init__(self, tensors: list[str], type_: TensorType): + self._rules = [TensorHasType(t, type_) for t in tensors] + + @property + def rules(self) -> list[TensorRule]: + return self._rules + + +@dataclass +class TensorIsChannelsLast(TensorRule): + tensor: str + + def __call__( + self, + tensor_map: NameToTensorMap, + input_to_ops: InputTensorToOpsMap, + output_to_op: OutputTensorToOpMap, + builder: "model_builder.ModelBuilder", + ) -> bool: + match tensor_map[self.tensor]: + case tflite_model.Tensor(): + return tensor_map[self.tensor].tensor_format.is_channels_last() + case list(): + return all( + t.tensor_format.is_channels_last() for t in tensor_map[self.tensor] + ) + case _: + raise ValueError + + def is_applicable(self, tensor_map: NameToTensorMap) -> bool: + return self.tensor in tensor_map.keys() + + +@dataclass +class TensorIsChannelsFirst(TensorRule): + tensor: str + + def __call__( + self, + tensor_map: NameToTensorMap, + input_to_ops: InputTensorToOpsMap, + output_to_op: OutputTensorToOpMap, + builder: "model_builder.ModelBuilder", + ) -> bool: + match tensor_map[self.tensor]: + case tflite_model.Tensor(): + return tensor_map[self.tensor].tensor_format.is_channels_first() + case list(): + return all( + t.tensor_format.is_channels_first() for t in tensor_map[self.tensor] + ) + case _: + raise ValueError + + def is_applicable(self, tensor_map: NameToTensorMap) -> bool: + return self.tensor in tensor_map.keys() + + +@dataclass +class TensorIsFormatless(TensorRule): + tensor: str + + def __call__( + self, + tensor_map: NameToTensorMap, + input_to_ops: InputTensorToOpsMap, + output_to_op: OutputTensorToOpMap, + builder: "model_builder.ModelBuilder", + ) -> bool: + match tensor_map[self.tensor]: + case tflite_model.Tensor(): + return tensor_map[self.tensor].tensor_format == TensorFormat.FORMATLESS + case list(): + return all( + t.tensor_format == TensorFormat.FORMATLESS + for t in tensor_map[self.tensor] + ) + case _: + raise ValueError + + def is_applicable(self, tensor_map: NameToTensorMap) -> bool: + return self.tensor in tensor_map.keys() + + +@dataclass +class TensorIsQuantized(TensorRule): + tensor: str + + def __call__( + self, + tensor_map: NameToTensorMap, + input_to_ops: InputTensorToOpsMap, + output_to_op: OutputTensorToOpMap, + builder: "model_builder.ModelBuilder", + ) -> bool: + match tensor_map[self.tensor]: + case tflite_model.Tensor(): + return tensor_map[self.tensor].quantization is not None + case list(): + return all(t.quantization is not None for t in tensor_map[self.tensor]) + case _: + raise ValueError + + def is_applicable(self, tensor_map: NameToTensorMap) -> bool: + return self.tensor in tensor_map.keys() + + +@dataclass +class TensorIsNotQuantized(TensorRule): + tensor: str + + def __call__( + self, + tensor_map: NameToTensorMap, + input_to_ops_map: InputTensorToOpsMap, + output_to_op_map: OutputTensorToOpMap, + builder: "model_builder.ModelBuilder", + ) -> bool: + match tensor_map[self.tensor]: + case tflite_model.Tensor(): + return tensor_map[self.tensor].quantization is None + case list(): + return all(t.quantization is None for t in tensor_map[self.tensor]) + case _: + raise ValueError + + def is_applicable(self, tensor_map: NameToTensorMap) -> bool: + return self.tensor in tensor_map.keys() + + +@dataclass +class TensorIsPerTensorQuantized(TensorRule): + tensor: str + + def __call__( + self, + tensor_map: NameToTensorMap, + input_to_ops: InputTensorToOpsMap, + output_to_op: OutputTensorToOpMap, + builder: "model_builder.ModelBuilder", + ) -> bool: + match tensor_map[self.tensor]: + case tflite_model.Tensor(): + tensor = tensor_map[self.tensor] + return ( + tensor.quantization is not None + ) and tensor.quantization.is_per_tensor() + case list(): + return all( + (t.quantization is not None) and t.quantization.is_per_tensor() + for t in tensor_map[self.tensor] + ) + case _: + raise ValueError + + def is_applicable(self, tensor_map: NameToTensorMap) -> bool: + return self.tensor in tensor_map.keys() + + +class TensorsAreQuantized(MultipleTensorRule): + def __init__(self, tensors: list[str]): + self._rules = [TensorIsQuantized(t) for t in tensors] + + @property + def rules(self) -> list[TensorRule]: + return self._rules + + +class TensorsAreNotQuantized(MultipleTensorRule): + def __init__(self, tensors: list[str]): + self._rules = [TensorIsNotQuantized(t) for t in tensors] + + @property + def rules(self) -> list[TensorRule]: + return self._rules + + +class TensorsArePerTensorQuantized(MultipleTensorRule): + def __init__(self, tensors: list[str]): + self._rules = [TensorIsPerTensorQuantized(t) for t in tensors] + + @property + def rules(self) -> list[TensorRule]: + return self._rules + + +@dataclass +class TensorsHaveSameQuantization(TensorRule): + tensors: list[str] + + def __call__( + self, + tensor_map: NameToTensorMap, + input_to_ops: InputTensorToOpsMap, + output_to_op: OutputTensorToOpMap, + builder: "model_builder.ModelBuilder", + ) -> bool: + if len(self.tensors) == 0: + return True + + all_tensors: list[tflite_model.Tensor] = [] + for mapped_tensor in (tensor_map[tensor] for tensor in self.tensors): + match mapped_tensor: + case tflite_model.Tensor(): + all_tensors.append(mapped_tensor) + case list(): + all_tensors.extend(mapped_tensor) + case _: + raise ValueError + + first_quantization = all_tensors[0].quantization + first_type = all_tensors[0].type + return all(t.quantization == first_quantization for t in all_tensors) and all( + t.type == first_type for t in all_tensors + ) + + def is_applicable(self, tensor_map: NameToTensorMap) -> bool: + return all(tensor in tensor_map.keys() for tensor in self.tensors) + + +@dataclass +class TensorIsNotModelOutput(TensorRule): + tensor: str + + def __call__( + self, + tensor_map: NameToTensorMap, + input_to_ops: InputTensorToOpsMap, + output_to_op: OutputTensorToOpMap, + builder: "model_builder.ModelBuilder", + ) -> bool: + match tensor_map[self.tensor]: + case tflite_model.Tensor(): + return ( + tensor_map[self.tensor] + not in builder.get_sub_graph().outputs.tmp_outputs + ) + case list(): + return all( + t not in builder.get_sub_graph().outputs.tmp_outputs + for t in tensor_map[self.tensor] + ) + case _: + raise ValueError + + def is_applicable(self, tensor_map: NameToTensorMap) -> bool: + return self.tensor in tensor_map.keys() + + +class TensorsAreNotModelOutputs(MultipleTensorRule): + def __init__(self, tensors: list[str]): + self._rules = [TensorIsNotModelOutput(t) for t in tensors] + + @property + def rules(self) -> list[TensorRule]: + return self._rules diff --git a/backends/nxp/backend/neutron_converter_manager.py b/backends/nxp/backend/neutron_converter_manager.py new file mode 100644 index 00000000000..8826c60dc3b --- /dev/null +++ b/backends/nxp/backend/neutron_converter_manager.py @@ -0,0 +1,55 @@ +# Copyright 2024 NXP +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. +import importlib +import pkgutil + +from executorch.backends.nxp.backend.ir.converter.node_converter import Target + + +class NeutronConverterManager: + """ + Manager for conversion of TFLite model in flatbuffers format into TFLite model that + contains NeutronGraph nodes. + """ + + _supported_target_names = [Target.RT700.value] + + def convert( + self, tflite_model: bytes, target: str, neutron_converter_flavor: str + ) -> bytes: + # Neutron converter crashes if we provide invalid target -> verify. + if target not in self._supported_target_names: + raise RuntimeError( + f"Target '{target}' is not supported by NeutronConverterManager." + ) + + neutron_converter_modules = [ + module.name + for module in pkgutil.iter_modules() + if module.name.startswith("neutron_converter") + ] + + requested_module_name = f"neutron_converter_{neutron_converter_flavor}" + if requested_module_name not in neutron_converter_modules: + if len(neutron_converter_modules) > 0: + raise RuntimeError( + f"Neutron Converter module with flavor '{neutron_converter_flavor}' " + f"not found. Available modules: {neutron_converter_modules}." + ) + else: + raise RuntimeError( + f"Neutron Converter module with flavor '{neutron_converter_flavor}' " + f"not found. Install 'neutron_converter_[flavor]' Python package." + ) + + neutron_converter = importlib.import_module( + f"{requested_module_name}.neutron_converter" + ) + + cctx = neutron_converter.CompilationContext() + cctx.targetOpts = neutron_converter.getNeutronTarget(target) + model_converted = neutron_converter.convertModel(list(tflite_model), cctx) + + return bytes(model_converted) diff --git a/backends/nxp/backend/node_format_inference.py b/backends/nxp/backend/node_format_inference.py new file mode 100644 index 00000000000..76b05d172a4 --- /dev/null +++ b/backends/nxp/backend/node_format_inference.py @@ -0,0 +1,259 @@ +# Copyright 2024 NXP +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +import logging +from enum import Enum + +from executorch.exir.dialects._ops import ops as exir_ops + +from torch import Node +from torch.export import ExportedProgram + +logger = logging.getLogger(__name__) + + +class NodeFormat(Enum): + # Node's output in NCHW format + CHANNELS_FIRST = 0 + + # Node's output format has no meaning + FORMATLESS = 1 + + # Format has not been identified + NONE = 2 + + def is_channels_first(self) -> bool: + return self == NodeFormat.CHANNELS_FIRST + + +class NodeFormatInference: + # Dictionary with Edge Aten ops that always use channels first format. + # The op in the dictionary is mapped to a dictionary, which holds indices to input nodes + # that are always channels first. + ops_with_channels_first_nodes = { + exir_ops.edge.aten.avg_pool2d.default: {"inputs": [0]}, + exir_ops.edge.aten.convolution.default: {"inputs": [0, 1]}, + exir_ops.edge.aten.max_pool2d_with_indices.default: {"inputs": [0]}, + exir_ops.edge.aten.max_pool2d.default: {"inputs": [0]}, + } + + # A set of Edge Aten ops, which have the ability to change the format (for example - input nodes + # are channels first but output is formatless). + ops_that_can_change_tensor_format = {exir_ops.edge.aten.view_copy.default} + + _node_format_mapping: dict[Node, NodeFormat] + + _type_changed_during_last_run: bool + + # Mapping between Node and its ancestors (inputs) + _node_inputs: dict[Node, list[Node]] + + # Mapping between Node and its children (outputs) + _node_outputs: dict[Node, list[Node]] + + def __init__(self, edge_program: ExportedProgram): + self._edge_program = edge_program + + self._nodes = edge_program.graph.nodes + self._node_format_mapping = {} + self._node_inputs = { + node: node.all_input_nodes for node in edge_program.graph.nodes + } + self._node_outputs = { + node: list(node.users.keys()) for node in edge_program.graph.nodes + } + + self._type_changed_during_last_run = False + + def identify_node_formats(self) -> dict[Node, NodeFormat]: + self._type_changed_during_last_run = True + + # Re-run format inference until there are no changes + while self._type_changed_during_last_run: + self._type_changed_during_last_run = False + + for node in self._nodes: + self._infer_format_of_nodes(node) + + return self._node_format_mapping + + def _infer_format_of_nodes(self, node: Node): + op_type = self._get_node_op_type(node) + + if op_type in self.ops_with_channels_first_nodes: + self._handle_node_which_uses_channels_first_format(node) + elif op_type in self.ops_that_can_change_tensor_format: + if op_type == exir_ops.edge.aten.view_copy.default: # view_copy + self._assign_format_to_node( + self._node_outputs[node][0], NodeFormat.FORMATLESS + ) + else: + logger.error( + f"Node format inference for node type: {op_type} not found!" + ) + else: + self._handle_node_which_can_use_any_node_format(node) + + def _infer_format_based_on_io_ranks(self, node: Node): + """Determine the format of the output tensor of given "reshape style operator" based on the ranks of its input + and output. + """ + # noinspection PyBroadException + try: + main_input_rank = len(node.all_input_nodes[0].meta["val"].shape) + main_output_rank = len(node.meta["val"].shape) + + if main_output_rank == main_input_rank: + # Operator maintains the number of dimensions -> try to propagate the format. + self._match_formats_of_nodes(node, node.prev) + + else: + # Either the op 'flattens' the tensor, so output is formatless, or it scales it up, in which case the + # format is assumed to be 'FORMATLESS', and may be back propagated as channels first later. + self._assign_format_to_node(node, NodeFormat.FORMATLESS) + + except: + # Some shape data is not known, so we cannot be extra clever. Just set the output to `FORMATLESS` and + # everything will be alright. + self._assign_format_to_node(node, NodeFormat.FORMATLESS) + + def _match_formats_of_nodes(self, node_1, node_2): + """If one of 'node_1' or 'node_2' is channels first, make the other channels first as well. + If neither is channels first, make them both formatless. + """ + + format_1 = self._get_node_format(node_1) + format_2 = self._get_node_format(node_2) + + if format_1.is_channels_first() or format_2.is_channels_first(): + # At least 1 is channels first + if not format_1.is_channels_first(): + self._assign_format_to_node(node_1, NodeFormat.CHANNELS_FIRST) + elif not format_2.is_channels_first(): + self._assign_format_to_node(node_2, NodeFormat.CHANNELS_FIRST) + + else: + self._assign_format_to_node(node_1, NodeFormat.FORMATLESS) + self._assign_format_to_node(node_2, NodeFormat.FORMATLESS) + + def _assign_format_to_node(self, node: Node, node_format: NodeFormat): + """ + Assign format to node, but only if it's not channels first. + """ + old_node_format = self._get_node_format(node) + + if old_node_format is NodeFormat.CHANNELS_FIRST: + # Once CHANNEL_FIRST was assigned, we don't want to reassign + return + + if old_node_format != node_format: + self._type_changed_during_last_run = True + + self._node_format_mapping[node] = node_format + + def _get_node_op_type(self, node: Node) -> str | None: + """ + Get node's operation type or None if node is not callable function. + """ + if node.op == "call_function": + return node.target + + return None + + def _handle_node_which_uses_channels_first_format(self, node: Node): + """ + Function for assigning format to nodes that require channels first input (Conv, MaxPool etc.) + """ + op_type = self._get_node_op_type(node) + + for index, ancestor_node in enumerate(self._node_inputs[node]): + # Go through input nodes and assign them correct format + if index in self.ops_with_channels_first_nodes[op_type]["inputs"]: + self._assign_format_to_node(ancestor_node, NodeFormat.CHANNELS_FIRST) + + # We need to propagate channels first format up to already visited nodes + self._propagate_channels_first_format_up(ancestor_node) + else: + self._assign_format_to_node(ancestor_node, NodeFormat.FORMATLESS) + + # (TODO Lukas Sztefek): It is expected here, that CHANNELS_FIRST node always produces CHANNELS_FIRST output. + # Validate the assumption. + self._assign_format_to_node(node, NodeFormat.CHANNELS_FIRST) + + def _handle_node_which_can_use_any_node_format(self, node: Node): + """ + Function for assigning format to nodes that don't care about format (Softmax, Abs). + It stays formatless if there is no surrounding channels first ancestor/child node. + """ + if not self._node_produces_or_consumes_channels_first_format(node): + # Nor inputs or current node are channels first -> assign everything to formatless + for processed_node in self._node_inputs[node] + [node]: + self._assign_format_to_node(processed_node, NodeFormat.FORMATLESS) + + else: + # Node produces or consumes channels first content + for processed_node in self._node_inputs[node] + [node]: + is_0d_to_2d = self._node_product_has_0_to_2_dimensions(processed_node) + + if self._get_node_format(processed_node).is_channels_first(): + # Node output already channel first + continue + elif is_0d_to_2d: + # Node has less than 3 dimensions so it cannot be considered CHANNELS_FIRST + self._assign_format_to_node(processed_node, NodeFormat.FORMATLESS) + else: + # Node has more than 2D output -> make it channels first + self._assign_format_to_node( + processed_node, NodeFormat.CHANNELS_FIRST + ) + self._propagate_channels_first_format_up(processed_node) + + def _propagate_channels_first_format_up(self, node: Node): + if self._node_is_placeholder(node): + # Input or buffer node -> there is no parent node so we can end propagation here + self._assign_format_to_node(node, NodeFormat.CHANNELS_FIRST) + return + + if node in self.ops_that_can_change_tensor_format: + # Propagation ends here because processed node changing format. + return + + for ancestor_node in self._node_inputs[node]: + # Propagate channels first to ancestor nodes + self._infer_format_of_nodes(ancestor_node) + + def _node_product_has_0_to_2_dimensions(self, node: Node) -> bool: + assert "val" in node.meta, f"Node '{node.name}' doesn't contain 'val' metadata!" + + node_value_meta = node.meta["val"] + + # (TODO Lukas Sztefek): Some nodes contains multiple value metadata (MaxPool, ...). Find out why. + if isinstance(node_value_meta, tuple): + node_value_meta = node_value_meta[0] + elif isinstance(node_value_meta, list): + node_value_meta = node_value_meta[0] + + node_output_rank = len(node_value_meta.shape) + + return 0 <= node_output_rank <= 2 + + def _node_produces_or_consumes_channels_first_format(self, node) -> bool: + """ + Check if node itself produces output in channels first format or consumes it from ancestor node. + """ + if self._get_node_format(node).is_channels_first(): + return True + + input_nodes = self._node_inputs[node] + return any( + self._get_node_format(ancestor_node).is_channels_first() + for ancestor_node in input_nodes + ) + + def _get_node_format(self, node): + return self._node_format_mapping.get(node, NodeFormat.NONE) + + def _node_is_placeholder(self, node: Node): + return node.op == "placeholder" diff --git a/backends/nxp/neutron_node_extraction.py b/backends/nxp/neutron_node_extraction.py new file mode 100644 index 00000000000..10648b48849 --- /dev/null +++ b/backends/nxp/neutron_node_extraction.py @@ -0,0 +1,102 @@ +# Copyright 2024 NXP +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +from dataclasses import dataclass + +import numpy as np + +from executorch.backends.nxp.backend.ir.lib.tflite.BuiltinOperator import ( + BuiltinOperator, +) +from executorch.backends.nxp.backend.ir.lib.tflite.Model import Model + + +@dataclass +class NeutronNodeArtifacts: + microcode: np.ndarray + weights: np.ndarray + kernels: np.ndarray + + +def extract_artifacts_from_neutron_node( + tflite_flatbuffer_or_path: bytes | str, +) -> NeutronNodeArtifacts: + """Extract the payload (microcode, weights, kernels) from the Neutron Node in the given TFLite model. + The model can be provided as a binary flatbuffer, or a path to a `.tflite` model. + """ + + if isinstance(tflite_flatbuffer_or_path, str): + with open(tflite_flatbuffer_or_path, "rb") as f: + flatbuffer = f.read() + else: + flatbuffer = tflite_flatbuffer_or_path + + model = Model.GetRootAs(flatbuffer, 0) + assert ( + model.SubgraphsLength() == 1 + ), f"The model has `{model.SubgraphsLength()}` SubGraphs instead of `1`." + + sub_graph = model.Subgraphs(0) + + if sub_graph.OperatorsLength() == 0: + raise RuntimeError( + "Model converted with neutron-converter has `0` operators instead of `1`." + ) + elif sub_graph.OperatorsLength() > 1: + builtin_operators_map: dict[int, str] = { + y: x for x, y in BuiltinOperator.__dict__.items() + } + + opcodes = [model.OperatorCodes(i) for i in range(model.OperatorCodesLength())] + nodes = [sub_graph.Operators(i) for i in range(sub_graph.OperatorsLength())] + ops_found = [ + builtin_operators_map[opcodes[node.OpcodeIndex()].BuiltinCode()] + for node in nodes + ] + + raise RuntimeError( + f"Model converted with neutron-converter has `{sub_graph.OperatorsLength()}` operators " + f'instead of `1`. Operators found: {", ".join(ops_found)}.' + ) + + neutron_node = None + opcodes = [model.OperatorCodes(i) for i in range(model.OperatorCodesLength())] + for i in range(sub_graph.OperatorsLength()): + opcode = opcodes[sub_graph.Operators(i).OpcodeIndex()] + if ( + opcode.BuiltinCode() == BuiltinOperator.CUSTOM + and opcode.CustomCode() == b"NeutronGraph" + ): + # Found the NeutronNode. + neutron_node = sub_graph.Operators(i) + break + + if neutron_node is None: + raise RuntimeError( + "Model converted with neutron-converter does not contain a NeutronGraph node." + ) + + # The last 3 input tensors of the Neutron Node contain: + # 1. Neutron Microcode + # 2. Neutron Weights + # 3. Neutron Kernels + assert ( + neutron_node.InputsLength() >= 3 + ), f"The Neutron Node only has `{neutron_node.GetInputsLen()}` inputs. Expected at least `3`." + microcode_idx, weights_idx, kernels_idx = neutron_node.InputsAsNumpy()[-3:] + + microcode_buffer_idx = sub_graph.Tensors(microcode_idx).Buffer() + weights_buffer_idx = sub_graph.Tensors(weights_idx).Buffer() + kernels_buffer_idx = sub_graph.Tensors(kernels_idx).Buffer() + + microcode = model.Buffers(microcode_buffer_idx).DataAsNumpy() + weights = model.Buffers(weights_buffer_idx).DataAsNumpy() + kernels = model.Buffers(kernels_buffer_idx).DataAsNumpy() + + assert ( + microcode.dtype == weights.dtype == kernels.dtype == np.dtype("uint8") + ), "The Neutron Node uses unexpected data types." + + return NeutronNodeArtifacts(microcode, weights, kernels) diff --git a/backends/nxp/neutron_partitioner.py b/backends/nxp/neutron_partitioner.py new file mode 100644 index 00000000000..44863a6344e --- /dev/null +++ b/backends/nxp/neutron_partitioner.py @@ -0,0 +1,328 @@ +# Copyright (c) 2024-2025 NXP +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# Partitioner for the NXP Neutron NPU + +import logging +import operator +from dataclasses import dataclass +from typing import Dict, final, List, Mapping + +import torch + +from executorch.backends.nxp.backend.edge_program_converter import ( + EdgeProgramToIRConverter, +) +from executorch.backends.nxp.backend.ir.converter.node_converter import Target +from torch.export.exported_program import ExportedProgram +from torch.fx.passes.infra.partitioner import CapabilityBasedPartitioner +from torch.fx.passes.operator_support import OperatorSupportBase +from torch.nn import Parameter +from executorch.backends.nxp.backend.ir.converter.node_converters.ops_converters import * # noqa F403 +from executorch.backends.nxp.nxp_backend import NeutronBackend +from executorch.exir.backend.compile_spec_schema import CompileSpec +from executorch.exir.backend.partitioner import ( + DelegationSpec, + Partitioner, + PartitionResult, +) +from executorch.exir.backend.utils import tag_constant_data +from executorch.exir.dialects._ops import ops as exir_ops + + +class QDQClusterRecognizer: + """ + Implementation of the Quantize - Dequantize clustering. + The quantization is captured in the ExecuTorch program using the QDQ (Quantize - DeQuantize) representation. Here + the inputs to a node comes from some dequantize nodes and outputs goes to some quantize nodes. + The QDQClusterRecognizer identifies operator performing the quantized arithmetic represented in QDQ form, and the + corresponding QDQ cluster. The QDQ cluster consists of the: + - dequantize nodes producing the inputs to the compute node + - compute node (e.g. conv) + - auxiliary nodes, like getitem, view_copy, ... which does not perform a core computation + - quantize nodes processing the output of the compute node. + """ + + @dataclass + class QDQCluster: + """ + Dataclass to hold the QDQ cluster instance. For the purpose of Partitioner we hold the list of operators, + in the QDQ cluster (`ops`) and the compute node what the QDQ cluster is built around. + The compute node is what is represented in the Neutron IR. the rest of nodes are helpers for data transformation, + and defines the quantization parameters. This gives the partitioner the ability to: + - identify if the node is part of a QDQ cluster + - reference the compute node in the QDQ cluster + """ + + compute_node: torch.fx.Node + ops: List[torch.fx.Node] + + QUANTIZE_OPERATORS = [ + exir_ops.edge.quantized_decomposed.quantize_per_channel.default, + exir_ops.edge.quantized_decomposed.quantize_per_tensor.default, + ] + + DEQUANTIZE_OPERATORS = [ + exir_ops.edge.quantized_decomposed.dequantize_per_channel.default, + exir_ops.edge.quantized_decomposed.dequantize_per_tensor.default, + ] + + AUXILIARY_OPS = [ + operator.getitem, + exir_ops.edge.aten.view_copy.default, + exir_ops.edge.aten.permute_copy.default, + ] + + def __init__(self): + self.cluster_map: dict[str, QDQClusterRecognizer.QDQCluster] = {} + + @staticmethod + def is_quant_node(node: torch.fx.Node) -> bool: + return node.target in QDQClusterRecognizer.QUANTIZE_OPERATORS + + @staticmethod + def is_dequant_node(node: torch.fx.Node) -> bool: + return node.target in QDQClusterRecognizer.DEQUANTIZE_OPERATORS + + @staticmethod + def is_auxiliary_node(node: torch.fx.Node) -> bool: + return node.target in QDQClusterRecognizer.AUXILIARY_OPS + + def get_qdq_cluster_input_part(self, node: torch.fx.Node) -> List[torch.fx.Node]: + """ + Return the list of nodes representing the input part of the QDQ cluster of the node `node`. + Those are various dequantization nodes (see DEQUANTIZE_OPERATORS) optionally followed by auxiliary + nodes. + If the `node` not meets the QDQ cluster schema, returns empty list. + """ + + # Iterative search for input nodes of the QDQ Cluster: + nodes_to_check = [node] + qdq_cluster = [] + while len(nodes_to_check) > 0: + n = nodes_to_check.pop() + qdq_cluster.append(n) + if self.is_dequant_node(n): + continue + input_nodes_from_dequant_or_helper = [ + (self.is_dequant_node(i) or self.is_auxiliary_node(i)) + for i in n.all_input_nodes + ] + if all(input_nodes_from_dequant_or_helper): + nodes_to_check.extend(n.all_input_nodes) + else: + return [] + + logging.debug(f"Dequant Cluster for {node} is: {qdq_cluster}") + return qdq_cluster + + def get_qdq_cluster_output_part(self, node: torch.fx.Node) -> List[torch.fx.Node]: + """ + Returns the list of nodes representing the output part of the QDQ cluster of the `node`. + Those are various quantize nodes (see QUANTIZE_OPERATORS) preceded by auxiliary nodes. + If the `node` not meets the QDQ cluster schema, returns empty list. + """ + + # Iterative search for output nodes of the QDQ Cluster: + nodes_to_check = [node] + qdq_cluster = [] + while len(nodes_to_check) > 0: + n = nodes_to_check.pop() + qdq_cluster.append(n) + if self.is_quant_node(n): + continue + consumers = [ + ngn for ngn in list(node.graph.nodes) if n in ngn.all_input_nodes + ] + logging.debug(f"\t Users for node {n} are: {consumers}") + output_nodes_to_quant_or_helper = [ + (self.is_quant_node(i) or self.is_auxiliary_node(i)) for i in consumers + ] + if all(output_nodes_to_quant_or_helper): + nodes_to_check.extend(consumers) + else: + return [] + + logging.debug(f"Quant Cluster for {node} is {qdq_cluster}") + return qdq_cluster + + def get_qdq_cluster(self, node: torch.fx.Node) -> List[torch.fx.Node]: + """ + Returns the QDQ cluster of the operator, if quantized. If operator is not quantized, returns empty list. + """ + logging.debug(node) + input_qdq_cluster = self.get_qdq_cluster_input_part(node) + output_qdq_cluster = self.get_qdq_cluster_output_part(node) + if input_qdq_cluster and output_qdq_cluster: + return list(set(input_qdq_cluster).union(output_qdq_cluster)) + else: + return [] + + def tag_nodes(self, nodes: List[torch.fx.Node], cluster_name: str) -> None: + """ + Tags a node and its related dequant and quant nodes with a specified cluster name + """ + for node in nodes: + logging.info(f"Tagging node {node} as {cluster_name}") + node.meta["cluster"] = cluster_name + + def tag_qdq_clusters(self, nodes: List[torch.fx.Node]): + """ + Identifies QDQ clusters and tag them based on compute operation inside. + """ + + for node in nodes: + if ( + node.op == "call_function" + and not self.is_quant_node(node) + and not self.is_dequant_node(node) + ): + cluster = self.get_qdq_cluster(node) + if cluster: + cluster_name = f"{node.name}_cluster" + self.tag_nodes(cluster, cluster_name) + self.cluster_map[cluster_name] = self.QDQCluster(node, cluster) + + +supported_ops = { + exir_ops.edge.aten.addmm.default: AddMMConverter, # noqa F405 + exir_ops.edge.aten.avg_pool2d.default: AvgPool2dConverter, # noqa F405 + exir_ops.edge.aten.constant_pad_nd.default: ConstantPadNDConverter, # noqa F405 + exir_ops.edge.aten.convolution.default: ConvolutionConverter, # noqa F405 + exir_ops.edge.aten.max_pool2d.default: MaxPool2dConverter, # noqa F405 + exir_ops.edge.aten.max_pool2d_with_indices.default: MaxPool2dConverter, # noqa F405 + exir_ops.edge.aten.mm.default: MMConverter, # noqa F405 + exir_ops.edge.aten.relu.default: ReLUConverter, # noqa F405 + exir_ops.edge.aten._softmax.default: SoftmaxConverter, # noqa F405 + exir_ops.edge.aten.view_copy.default: ViewCopyConverter, # noqa F405 +} + + +class NeutronSupportedOperators(OperatorSupportBase): + + def __init__( + self, + qdq_clusters: Dict[str, QDQClusterRecognizer.QDQCluster], + target: Target, + operators_not_to_delegate: List[str], + parameters_mapping: dict[str, Parameter], + ): + self.qdq_clusters = qdq_clusters + self.target = target + self.operators_not_to_delegate = operators_not_to_delegate + self.parameters_mapping = parameters_mapping + + def _is_node_quantized(self, node: torch.fx.node.Node): + return "cluster" in node.meta + + def _is_node_call_function(self, node: torch.fx.node.Node): + return node.op == "call_function" + + def is_node_delegatable(self, node: torch.fx.node.Node): + if self.operators_not_to_delegate != [""]: + any_non_delegatable = any( + x in node.name for x in self.operators_not_to_delegate + ) + return not any_non_delegatable + return True + + def _is_node_supported_compute(self, node: torch.fx.node.Node) -> bool: + """ + Operator checking function for compute nodes. + """ + if not self.is_node_delegatable(node): + return False + + if (node_converter := supported_ops.get(node.target, None)) is None: + # There is no `NodeConverter` for this `node`. + return False + + return ( + self._is_node_call_function(node) + and self._is_node_quantized(node) + and + # TODO: `view_copy` node should be delegated only if it's not the only operator in the cluster. + node_converter.is_supported(node, self.target, self.parameters_mapping) + ) + + def _is_node_supported_non_compute(self, node: torch.fx.node.Node) -> bool: + """ + If the node is a quantize, dequantize or auxiliary node inside a QDQ cluster, the support on Neutron + is determined by the support of the compute operator. + """ + return self._is_node_quantized(node) and self._is_node_supported_compute( + self.qdq_clusters[node.meta["cluster"]].compute_node + ) + + def is_node_supported( + self, submodules: Mapping[str, torch.nn.Module], node: torch.fx.Node + ) -> bool: + """ + Check if the Edge operator is supported on Neutron. + """ + + if ( + QDQClusterRecognizer.is_quant_node(node) + or QDQClusterRecognizer.is_dequant_node(node) + or QDQClusterRecognizer.is_auxiliary_node(node) + ): + return self._is_node_supported_non_compute(node) + else: + return self._is_node_supported_compute(node) + + +@final +class NeutronPartitioner(Partitioner): + def __init__(self, compile_spec: List[CompileSpec]) -> None: + self.delegation_spec = DelegationSpec(NeutronBackend.__name__, compile_spec) + + def partition(self, exported_program: ExportedProgram) -> PartitionResult: + # Run the CapabilityBasedPartitioner to return the largest possible + # subgraphs containing the nodes with the tags + logging.info("NeutronPartitioner::partition") + partition_tags = {} + + graph_module = exported_program.graph_module + nodes = list(graph_module.graph.nodes) + + qdq_cluster_recognizer = QDQClusterRecognizer() + qdq_cluster_recognizer.tag_qdq_clusters(nodes) + graph_module.recompile() + + target = None + operators_not_to_delegate = "" + for spec in self.delegation_spec.compile_specs: + if spec.key == "target": + target = Target(spec.value.decode()) + if spec.key == "operators_not_to_delegate": + operators_not_to_delegate = spec.value.decode().split(",") + assert target is not None + logging.info(f"Operators not to delegate: {operators_not_to_delegate}") + + parameters_mapping = EdgeProgramToIRConverter.map_inputs_to_parameters( + exported_program + ) + capability_partitioner = CapabilityBasedPartitioner( + exported_program.graph_module, + NeutronSupportedOperators( + qdq_cluster_recognizer.cluster_map, + target, + operators_not_to_delegate, + parameters_mapping, + ), + allows_single_node_partition=True, + ) + + partition_list = capability_partitioner.propose_partitions() + for partition in partition_list: + for node in partition.nodes: + delegation_tag = f"tag{partition.id}" + node.meta["delegation_tag"] = delegation_tag + partition_tags[delegation_tag] = self.delegation_spec + + tag_constant_data(exported_program) + return PartitionResult( + tagged_exported_program=exported_program, partition_tags=partition_tags + ) diff --git a/backends/nxp/neutron_pass_manager.py b/backends/nxp/neutron_pass_manager.py new file mode 100644 index 00000000000..02bcc0079f6 --- /dev/null +++ b/backends/nxp/neutron_pass_manager.py @@ -0,0 +1,50 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# Copyright 2025 NXP +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +from typing import List, Optional, Type + +from executorch.exir.pass_base import ExportPass +from executorch.exir.program._program import _transform + +from torch._export.pass_base import PassType +from torch.export import ExportedProgram + + +class NeutronPassManager: + def __init__( + self, + exported_program: ExportedProgram, + passes: Optional[List[Type[PassType]]] = None, + ) -> None: + """ + A helper class to run multiple passes on a program + """ + self._exported_program = exported_program + + if not passes: + self.passes = [] + else: + self.passes = passes + + @property + def exported_program(self) -> ExportedProgram: + return self._exported_program + + def transform(self) -> ExportedProgram: + """ + Returns a transformed ExportedProgram + """ + ep = self.exported_program + for pass_ in self.passes: + if issubclass(pass_, ExportPass): + transform_pass = pass_() + else: + raise RuntimeError( + f"Expecting ExportPass or ExportPass(), but got pass: {pass_} with type: {type(pass_)}" + ) + ep = _transform(ep, transform_pass) + return ep diff --git a/backends/nxp/nxp_backend.py b/backends/nxp/nxp_backend.py new file mode 100644 index 00000000000..3233cf6dbd9 --- /dev/null +++ b/backends/nxp/nxp_backend.py @@ -0,0 +1,333 @@ +# Copyright 2024 NXP +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# +# Main implementation of AoT flow to partition and preprocess for Neutron target +# backends. +# + +import logging +import struct +from typing import final, List, Optional + +import numpy as np +import torch + +from executorch.backends.nxp.backend.edge_program_converter import ( + EdgeProgramToIRConverter, +) +from executorch.backends.nxp.backend.ir.converter.node_converter import Target +from executorch.backends.nxp.backend.ir.tensor_formatting import TensorFormat +from executorch.backends.nxp.backend.neutron_converter_manager import ( + NeutronConverterManager, +) +from executorch.backends.nxp.neutron_node_extraction import ( + extract_artifacts_from_neutron_node, + NeutronNodeArtifacts, +) +from executorch.backends.nxp.neutron_pass_manager import NeutronPassManager +from executorch.backends.transforms.remove_getitem_op import RemoveGetItemPass +from executorch.exir.backend.backend_details import BackendDetails, PreprocessResult +from executorch.exir.backend.compile_spec_schema import CompileSpec +from executorch.exir.verification.verifier import EXIREdgeDialectVerifier +from torch.export.exported_program import ExportedProgram + + +class NeutronCompileSpecBuilder: + + def __init__(self): + self.config: Target = None + self.compile_spec: List[CompileSpec] = [] + self.compiler_flags = [] + self.output_format = None + self.operators_not_to_delegate: List[str] = [] + self.neutron_converter_flavor = None + + def _replace_colons(self, operator: str) -> str: + """ + Replace '::' with '_' + """ + return operator.replace("::", "_") + + def neutron_compile_spec( + self, + config: str, + neutron_converter_flavor: str, + extra_flags: Optional[str] = None, + operators_not_to_delegate: Optional[List[str]] = None, + ): + """ + Generate compile spec for Neutron NPU + + Args: + config: Neutron accelerator configuration, e.g. "imxrt700" + neutron_converter_flavor: Flavor of the neutron-converter module to use. Neutron-converter module named " + "'neutron_converter_SDK_25_03' has flavor 'SDK_25_03'. + extra_flags: Extra flags for the Neutron compiler + operators_not_to_delegate: List of operators that should not be delegated + """ + try: + self.config = Target(config) + except ValueError: + raise ValueError( + f"Config `{config}` is not a valid target. Must be one of `{Target.values()}`." + ) + + self.neutron_converter_flavor = neutron_converter_flavor + + assert ( + self.output_format is None + ), f"Output format already set to f{self.output_format}" + self.output_format = "tflite" + self.compiler_flags = [] + + if extra_flags is not None: + self.compiler_flags.append(extra_flags) + + if operators_not_to_delegate is not None: + self.operators_not_to_delegate = [ + self._replace_colons(op) for op in operators_not_to_delegate + ] + + return self + + def build(self): + """ + Generate a list of compile spec objects from the builder + """ + if self.output_format == "tflite": + self.compile_spec += [ + CompileSpec("output_format", "tflite".encode()), + CompileSpec("compile_flags", " ".join(self.compiler_flags).encode()), + CompileSpec("target", self.config.value.encode()), + CompileSpec( + "neutron_converter_flavor", self.neutron_converter_flavor.encode() + ), + CompileSpec( + "operators_not_to_delegate", + ",".join(self.operators_not_to_delegate).encode(), + ), + ] + + return self.compile_spec + + +def generate_neutron_compile_spec( + config: str, # The target platform. For example "imxrt700". + neutron_converter_flavor: str, + system_config: Optional[str] = None, + extra_flags: Optional[str] = None, + operators_not_to_delegate: Optional[List[str]] = None, +) -> List[CompileSpec]: + return ( + NeutronCompileSpecBuilder() + .neutron_compile_spec( + config, + neutron_converter_flavor, + extra_flags=extra_flags, + operators_not_to_delegate=operators_not_to_delegate, + ) + .build() + ) + + +@final +class NeutronBackend(BackendDetails): + + @staticmethod + def preprocess( + edge_program: ExportedProgram, + compile_spec: List[CompileSpec], + ) -> PreprocessResult: + logging.info("NeutronBackend::preprocess") + + logging.debug(f"NeutronBackend preprocessing graph:\n{edge_program.graph}") + + output_format = "" + compile_flags = [] + binary = bytes() + target = "" + neutron_converter_flavor = "" + for spec in compile_spec: + if spec.key == "output_format": + output_format = spec.value.decode() + if spec.key == "target": + target = spec.value.decode() + if spec.key == "compile_flags": + compile_flags.append(spec.value.decode()) + if spec.key == "neutron_converter_flavor": + neutron_converter_flavor = spec.value.decode() + + # Check that the output format is set in the compile spec + if not output_format: + raise RuntimeError("output format is required") + + for node in edge_program.graph.nodes: + if node.op == "call_function": + logging.debug(f"Operator to be processed: {node.target}") + + # Serialize and return the program. + if output_format == "tflite": + # We need to create custom model verifier with max_pool2d added as exception. + # Otherwise, we get violation that this op is not part of ATen Core ops. + edge_program._verifiers = [ + EXIREdgeDialectVerifier( + class_only=True, exception_list=[torch.ops.aten.max_pool2d.default] + ) + ] + + # Remove MaxPool-related "getitem" nodes from graph + edge_program = NeutronPassManager( + edge_program, [RemoveGetItemPass] + ).transform() + + # Convert the edge program to TFLite. + tflite_model, io_formats = EdgeProgramToIRConverter().convert_program( + edge_program + ) + + neutron_model = NeutronConverterManager().convert( + tflite_model, target, neutron_converter_flavor + ) + + # Dump the tflite file if logging level is enabled + if logging.root.isEnabledFor(logging.WARNING): + import os + + delegation_tag = list(edge_program.graph.nodes)[0].meta[ + "delegation_tag" + ] + logging.debug( + f"Serializing converted graph with tag {delegation_tag} to {os.getcwd()}" + ) + with open(f"{delegation_tag}_pure.et.tflite", "wb") as f: + f.write(bytes(tflite_model)) + with open(f"{delegation_tag}_neutron.et.tflite", "wb") as f: + f.write(bytes(neutron_model)) + + binary = PayloadComposer().get_binary_payload(io_formats, neutron_model) + else: + raise RuntimeError(f"Unknown format {output_format}") + + return PreprocessResult(processed_bytes=binary) + + +class PayloadComposer: + ALIGNMENT = 16 + + def _padding_format_string_for_array(self, array: np.ndarray) -> str: + """Create a padding format string for the given array, which will add 0s at the end for correct alignment. + E.g. the string '10x' represents adding 10 bytes of '0' padding. + """ + assert array.dtype == np.dtype("uint8") + + overflow = array.size % self.ALIGNMENT + if overflow == 0: + return "" + + # Overflow 1 means padding 15, so use `alignment - overflow` padding. + return f"{self.ALIGNMENT - overflow}x" + + def _format_string_for_array(self, array: np.ndarray) -> str: + """Create a format string which will represent the provided array. It also handles the necessary alignment. + E.g. for array [1,2,3] we get '3s13x', because '3s' means string of 3 bytes, and `13x` means adding 13 bytes + of '0' padding at the end (for 16B alignment). + """ + assert array.dtype == np.dtype("uint8") + + return f"{array.size}s{self._padding_format_string_for_array(array)}" + + def _create_payload_header(self, io_formats) -> np.ndarray: + """ + Create bytes header for returned payload. It contains information about + input and output tensor formats. Tensors are ordered based on graph signature + of ExportedProgram. Header schema: + + +----------------------------------+-----------------------------------+ + | Input TensorFormats length (1B) | Output TensorFormats length (1B) | + +----------------------------------+-----------------------------------+ + | 1st input tensor format (1B) | [nth* input tensor format (1B)] | + +----------------------------------+-----------------------------------+ + | 1st output tensor format (1B) | [nth* output tensor format (1B)] | + +----------------------------------+-----------------------------------+ + + :param io_formats: IO tensors formats. + :return: Bytes representation of payload header. + """ + inputs = io_formats["inputs"] + outputs = io_formats["outputs"] + + assert len(inputs) < 256, "Models with more than 255 inputs are not supported." + assert ( + len(outputs) < 256 + ), "Models with more than 255 outputs are not supported." + + header_data = [len(inputs)] + header_data.append(len(outputs)) + + for _tensor, tensor_format in inputs.items(): + header_data.append(1 if tensor_format == TensorFormat.CHANNELS_LAST else 0) + + for _tensor, tensor_format in outputs.items(): + header_data.append(1 if tensor_format == TensorFormat.CHANNELS_LAST else 0) + + # noinspection PyTypeChecker + return np.array(header_data, dtype=np.uint8) + + def _pack_with_alignment( + self, header: np.ndarray, neutron_artifacts: NeutronNodeArtifacts + ) -> bytes: + """ + Packs provided data into serialized binary data of the following C struct: + struct NeutronBinary { + uint8[] header; + uint8[] microcode; + uint8[] weights; + uint8[] kernels; + } + The individual components must be aligned to 16 bytes. + """ + + return struct.pack( + self._format_string_for_array(header) + + self._format_string_for_array(neutron_artifacts.microcode) + + self._format_string_for_array(neutron_artifacts.weights) + + self._format_string_for_array(neutron_artifacts.kernels), + header.tobytes(), + neutron_artifacts.microcode.tobytes(), + neutron_artifacts.weights.tobytes(), + neutron_artifacts.kernels.tobytes(), + ) + + def get_binary_payload(self, io_formats, neutron_model) -> bytes: + """ + Get binary payload for provided input/output tensor formats and neutron_model. Returned data have + following structure: + + +----------------------------------------------------------------------------------------------------------------+ + | 16 bytes aligned blocks | + +===========================+===========================+============================+===========================+ + | Input formats length (1B) | Output formats length (1B) | [nth* input format (1B)] | [nth* output format (1B)] | + +---------------------------+--------------------------- +---------------------------+---------------------------+ + | Neutron microcode | + +----------------------------------------------------------------------------------------------------------------+ + | Neutron weights | + +----------------------------------------------------------------------------------------------------------------+ + | Neutron kernels | + +----------------------------------------------------------------------------------------------------------------+ + + Tensor format definition: '0x1' == CHANNELS_LAST, '0x0' == FORMATLESS (no format). + + :param io_formats: Dictionary with keys 'inputs' and 'outputs' that contains dictionaries + mapping tensor name to TensorFormat. + :param neutron_model: Neutron model with single NeutronGraph node. + :return: 16 bytes aligned binary payload. + """ + header = self._create_payload_header(io_formats) + + # Extract the Neutron microcode, weights and kernels from the Neutron Node in the `neutron_model`. + neutron_artifacts = extract_artifacts_from_neutron_node(neutron_model) + + return self._pack_with_alignment(header, neutron_artifacts) diff --git a/backends/nxp/requirements-tests.txt b/backends/nxp/requirements-tests.txt new file mode 100644 index 00000000000..513ccefe848 --- /dev/null +++ b/backends/nxp/requirements-tests.txt @@ -0,0 +1,6 @@ +--extra-index-url https://eiq.nxp.com/repository +tensorflow==2.18.0 +pytest-mock +tflite +GvGen +neutron-converter_SDK_25_03 diff --git a/backends/nxp/tests/executorch_pipeline.py b/backends/nxp/tests/executorch_pipeline.py new file mode 100644 index 00000000000..6c452b99baf --- /dev/null +++ b/backends/nxp/tests/executorch_pipeline.py @@ -0,0 +1,91 @@ +# Copyright 2024-2025 NXP +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +import torch + +from executorch import exir +from executorch.backends.nxp.neutron_partitioner import NeutronPartitioner +from executorch.backends.nxp.nxp_backend import generate_neutron_compile_spec + +# TODO (Robert Kalmar) Uncomment when NXP passes are ported to main +# from executorch.backends.nxp.pytorch_passes.nxp_pytorch_pass_manager import NXPPyTorchPassManager +from executorch.backends.nxp.quantizer.neutron_quantizer import NeutronQuantizer +from executorch.exir import ( + EdgeCompileConfig, + EdgeProgramManager, + ExecutorchBackendConfig, + ExecutorchProgramManager, + to_edge_transform_and_lower, +) +from torch import nn +from torch.ao.quantization.quantize_pt2e import convert_pt2e, prepare_pt2e + + +def _quantize_model(model, calibration_inputs: list[tuple[torch.Tensor]]): + quantizer = NeutronQuantizer() + + m = prepare_pt2e(model, quantizer) + for _i, data in enumerate(calibration_inputs): + m(*data) + m = convert_pt2e(m) + + return m + + +def to_quantized_edge_program( + model: torch.nn.Module, + input_shape: tuple, + operators_not_to_delegate: list[str] = None, + target="imxrt700", + neutron_converter_flavor="SDK_25_03", +) -> EdgeProgramManager: + calibration_inputs = [(torch.randn(input_shape),), (torch.randn(input_shape),)] + example_input = (torch.ones(*input_shape),) + + exir_program_aten = torch.export.export_for_training( + model, example_input, strict=True + ) + + # TODO(Robert Kalmar) uncoment when NXP passes are ported to main + # Run pre-processing passes of the float32 aten dialect program. + # pass_manager = NXPPyTorchPassManager(exir_program_aten) + # pass_manager.run() # All passes by default. + + exir_program_aten_module = exir_program_aten.module() + exir_program_aten__module_quant = _quantize_model( + exir_program_aten_module, calibration_inputs + ) + + compile_spec = generate_neutron_compile_spec( + target, + operators_not_to_delegate=operators_not_to_delegate, + neutron_converter_flavor=neutron_converter_flavor, + ) + partitioner = NeutronPartitioner(compile_spec) + edge_program_manager = to_edge_transform_and_lower( + torch.export.export( + exir_program_aten__module_quant, example_input, strict=True + ), + partitioner=[partitioner], + compile_config=EdgeCompileConfig(_check_ir_validity=False), + ) + + return edge_program_manager + + +def to_quantized_executorch_program( + model: torch.nn.Module, input_shape: tuple +) -> ExecutorchProgramManager: + edge_program_manager = to_quantized_edge_program(model, input_shape) + + return edge_program_manager.to_executorch( + config=ExecutorchBackendConfig(extract_delegate_segments=False) + ) + + +def to_edge_program(model: nn.Module, input_shape) -> EdgeProgramManager: + example_input = (torch.ones(input_shape),) + exir_program = torch.export.export(model, example_input) + return exir.to_edge(exir_program) diff --git a/backends/nxp/tests/executors.py b/backends/nxp/tests/executors.py new file mode 100644 index 00000000000..2c9fdf69f5a --- /dev/null +++ b/backends/nxp/tests/executors.py @@ -0,0 +1,293 @@ +# Copyright 2023-2024 NXP +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +from typing import Dict, Union + +import numpy +import numpy as np +import torch + +from executorch.backends.nxp.backend.edge_program_converter import ( + EdgeProgramToIRConverter, +) +from executorch.backends.nxp.backend.ir import logger +from executorch.backends.nxp.backend.ir.conversion_config import ConversionConfig +from torch.export import ExportedProgram + +# If executed on i.MX platform, there is no tensorflow module. And typically the intention is to use the tflite python +# interpreter available in tflite_runtime +try: + import tensorflow.lite as tflite +except ModuleNotFoundError: + import tflite_runtime.interpreter as tflite + + +class EdgeProgramExecutor: + + def __init__(self, edge_program: ExportedProgram): + self.edge_program = edge_program + + def inference( + self, input_data: Union[numpy.ndarray, Dict[int, numpy.ndarray]] + ) -> Union[numpy.ndarray, Dict[str, numpy.ndarray]]: + + if not isinstance(input_data, numpy.ndarray): + raise RuntimeError( + "Edge program inference with multiple inputs not implemented" + ) + + output = self.edge_program.module()(torch.from_numpy(input_data)) + + if isinstance(output, torch.Tensor): + return output.detach().numpy() + elif isinstance(output, tuple) and len(output) == 1: + return output[0].detach().numpy() + + raise RuntimeError( + "Edge program inference with multiple outputs not implemented" + ) + + +class TFLiteExecutor: + _interpreter: tflite.Interpreter + + def __init__( + self, + model_path: str = None, + model_content=None, + save_model=False, + saved_model_name="model.tflite", + delegate_path=None, + num_threads=None, + op_resolver_type=tflite.experimental.OpResolverType.AUTO, + ): + """ + Construct TFLiteExecutor used to quickly run inference on TFLite model. + Exactly one of "model_path" and "model_content" must be specified. + + :param model_path: Path to executed TFLite model. + :param model_content: Path to byte representation of TFLite model. + :param save_model: If true and model was provided through "model_content", + model is saved to storage with name "saved_model_name". + :param saved_model_name: Model name used when model stored to storage. Default + value is "model.tflite". + :param delegate_path: External delegate to be used for the TFLiteExecutor, see + https://www.tensorflow.org/api_docs/python/tf/lite/Interpreter for details. Default value is None. + :param num_threads: number of threads to be used by the TFLiteExecutor, see + https://www.tensorflow.org/api_docs/python/tf/lite/Interpreter for details. Default value is None. + :param op_resolver_type: Op kernels to be used by the TFLiteExecutor, see + https://www.tensorflow.org/api_docs/python/tf/lite/Interpreter for details. Default value is + tflite.experimental.OpResolverType.AUTO. + """ + assert model_path is not None or model_content is not None + assert model_path is None or model_content is None + + if delegate_path is not None: + delegate = [tflite.load_delegate(delegate_path)] + else: + delegate = None + + if save_model: + with open(saved_model_name, "wb") as f: + f.write(model_content) + + if model_path is not None: + self._interpreter = tflite.Interpreter( + model_path=model_path, + experimental_delegates=delegate, + num_threads=num_threads, + experimental_op_resolver_type=op_resolver_type, + ) + else: + self._interpreter = tflite.Interpreter( + model_content=model_content, + experimental_delegates=delegate, + num_threads=num_threads, + experimental_op_resolver_type=op_resolver_type, + ) + + self._interpreter.allocate_tensors() + + def inference( + self, input_data: Union[numpy.ndarray, Dict[int, numpy.ndarray]] + ) -> Union[numpy.ndarray, Dict[str, numpy.ndarray]]: + input_details = self._interpreter.get_input_details() + output_details = self._interpreter.get_output_details() + + if isinstance(input_data, numpy.ndarray): + self._interpreter.set_tensor(input_details[0]["index"], input_data) + elif isinstance(input_data, Dict): + if len(input_data) != len(input_details): + logger.w( + f"Number of model inputs: '{len(input_details)}', and provided input data: '{len(input_data)}'" + f" is not the same. Using first {len(input_details)} inputs tensors." + ) + for index in range(len(input_details)): + self._interpreter.set_tensor( + input_details[index]["index"], input_data[index] + ) + + self._interpreter.allocate_tensors() + self._interpreter.invoke() + + output_data = {} + + for output_detail in output_details: + output_data[output_detail["name"]] = self._interpreter.get_tensor( + output_detail["index"] + ) + + # Flatten output if there is only one value in output dictionary + if len(output_data) == 1: + return np.asarray(next(iter(output_data.values()))) + else: + return output_data + + def get_output_details(self, index): + return self._interpreter.get_output_details()[index] + + +def compare_output_arrays( + tfl_output: np.ndarray, + edge_output: np.ndarray, + output_name: str, + rtol: float = 1.0e-5, + atol: float = 1.0e-8, +): + """Assert that the provided numpy arrays are equal. + + :param tfl_output: Numpy array holding the output of the TFLite model. + :param edge_output: Numpy array holding the output of the ExportedProgram. + :param output_name: Common name of the above arrays. + :param rtol: Relative tolerance. + :param atol: Absolute tolerance. + """ + if tfl_output.dtype.char == edge_output.dtype.char == "O": + # String types fail in the following checks. Cast them to float32 before comparison. + tfl_output = tfl_output.astype(np.float32) + edge_output = edge_output.astype(np.float32) + + if tfl_output.dtype != np.bool_ and tfl_output.size != 0: + logger.d( + f"Maximum output difference of the `{output_name}`tensor: {np.max(np.abs(tfl_output - edge_output))}" + ) + + assert tfl_output.shape == edge_output.shape, "Output shapes don't match!" + + assert np.allclose( + tfl_output, edge_output, rtol=rtol, atol=atol, equal_nan=True + ), f"Output values of the `{output_name}` tensor don't match!" + + +class TFLiteIOPreprocess: + + def preprocess(self, data: np.ndarray): + return data + + +class ToNHWCPreprocess(TFLiteIOPreprocess): + + def preprocess(self, data: np.ndarray): + assert isinstance( + data, np.ndarray + ), "Only single Numpy array preprocessing is currently supported" + return np.transpose(data, [0, 2, 3, 1]) + + +class ToNCHWPreprocess(TFLiteIOPreprocess): + + def preprocess(self, data: np.ndarray): + assert isinstance( + data, np.ndarray + ), "Only single Numpy array preprocessing is currently supported" + return np.transpose(data, [0, 3, 1, 2]) + + +def convert_run_compare( + edge_program: ExportedProgram, + input_data, + rtol=1.0e-5, + atol=1.0e-8, + save_models=False, + tfl_model: (bytes, dict) = None, + tflite_input_preprocess: TFLiteIOPreprocess = TFLiteIOPreprocess(), # noqa B008 + tflite_output_preprocess: TFLiteIOPreprocess = TFLiteIOPreprocess(), # noqa B008 + conversion_config: ConversionConfig = ConversionConfig(), # noqa B008 + tflite_op_resolver_type=tflite.experimental.OpResolverType.AUTO, +) -> (TFLiteExecutor, EdgeProgramExecutor): + + if tfl_model is None: + tfl_model, _ = EdgeProgramToIRConverter().convert_program( + edge_program, conversion_config + ) + + edge_program_executor = EdgeProgramExecutor(edge_program) + edge_program_output = edge_program_executor.inference(input_data) + + tflite_input_data = tflite_input_preprocess.preprocess(input_data) + tflite_executor = TFLiteExecutor( + model_content=tfl_model, + save_model=save_models, + op_resolver_type=tflite_op_resolver_type, + ) + tflite_output = tflite_executor.inference(tflite_input_data) + tflite_output = tflite_output_preprocess.preprocess(tflite_output) + + if isinstance(tflite_output, dict) and isinstance(edge_program_output, dict): + if ( + len( + set(tflite_output.keys()).symmetric_difference( + set(edge_program_output.keys()) + ) + ) + == 0 + ): + # Both TFLite and ExportedProgram output dictionaries have the same keys. + for output_name, tflite_out in tflite_output.items(): + compare_output_arrays( + tflite_out, + edge_program_output[output_name], + output_name, + rtol, + atol, + ) + + else: + logger.e( + logger.Code.INTERNAL_ERROR, + "Original program and converted TFLite models have different outputs.", + ) + + elif isinstance(tflite_output, np.ndarray) and isinstance( + edge_program_output, np.ndarray + ): + compare_output_arrays( + tflite_output, edge_program_output, "main output", rtol, atol + ) + + else: + # This can happen for example, if the TFLite model does not have some outputs, which are in exported program. + logger.e( + logger.Code.NOT_IMPLEMENTED, + "Original ExportedProgram and converted TFLite models have different" + " number of outputs. Testing is not implemented for this case.", + ) + + return tflite_executor, edge_program_executor + + +class OverrideSupportedTargets: + + def __init__(self, converter_class, *, new_targets): + self._converter_class = converter_class + self._new_targets = new_targets + + self._old_targets = self._converter_class.supported_targets + + def __enter__(self): + self._converter_class.supported_targets = self._new_targets + + def __exit__(self, exc_type, exc_val, exc_tb): + self._converter_class.supported_targets = self._old_targets diff --git a/backends/nxp/tests/exported_program_vizualize.py b/backends/nxp/tests/exported_program_vizualize.py new file mode 100644 index 00000000000..0f4b8db697c --- /dev/null +++ b/backends/nxp/tests/exported_program_vizualize.py @@ -0,0 +1,90 @@ +# Copyright 2024 NXP +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +import random + +from gvgen import GvGen +from torch.export import ExportedProgram + + +def exported_program_to_dot( # noqa C901 + exported_program: ExportedProgram, dot_file_name="graph.dot", show_tags=True +): + """ + Generate dot file for tagged exported program. + + :param exported_program: Exported program with optional meta values: 'delegation_tag' and 'cluster'. + :param dot_file_name: Produced .dot file name. + :param show_tags: If True, nodes will be shown as a subcomponent of tag nodes. + """ + graph = GvGen() + + def name_color(string): # pseudo-randomization function + h = hash(string) # hash string and int together + if h < 0: # ensure positive number + h = h * -1 + random.seed(h) # set the seed to use for randomization + r = int(random.random() * 255) + g = int(random.random() * 255) + b = int(random.random() * 255) + return "#%02x%02x%02x" % (r, g, b) + + graph_items = {} + delegation_tags = {} + + # Find tags (parent objects) + for node in exported_program.graph.nodes: + if "delegation_tag" in node.meta and show_tags: + tag = node.meta["delegation_tag"] + if tag not in delegation_tags: + item = graph.newItem(tag) + delegation_tags[tag] = item + + for node in exported_program.graph.nodes: + if "delegation_tag" in node.meta and show_tags: + # Delegated node -> add color + tag = node.meta["delegation_tag"] + item = graph.newItem(node.name, delegation_tags[tag]) + + graph.propertyAppend(item, "fillcolor", name_color(tag)) + graph.propertyAppend(item, "style", "filled") + else: + item = graph.newItem(node.name) + + label = graph.propertyGet(item, "label") + if "cluster" in node.meta: + graph.propertyAppend( + item, "label", label + "\n QDQ Cluster: " + node.meta["cluster"] + ) + + # Change shape of node for (de)quantize and rest of nodes + if any(q in label for q in ["_quantize_per_tensor_", "_quantize_per_channel_"]): + graph.propertyAppend(item, "shape", "invhouse") + elif any( + dq in label + for dq in ["_dequantize_per_tensor_", "_dequantize_per_channel_"] + ): + graph.propertyAppend(item, "shape", "house") + else: + graph.propertyAppend(item, "shape", "box") + + graph_items[node.name] = item + + # Add connections between nodes + for node in exported_program.graph.nodes: + for user in node.users: + link = graph.newLink(graph_items[node.name], graph_items[user.name]) + + label = "" + if "val" in node.meta: + tensor = node.meta["val"] + if isinstance(tensor, tuple): + tensor = tensor[0] # Fake tensor + label = f" ({list(tensor.shape)} | {tensor.dtype})" + + graph.propertyAppend(link, "label", label) + + with open(dot_file_name, "w") as f: + graph.dot(f) diff --git a/backends/nxp/tests/ir/converter/node_converter/test_avg_pool2d_converter.py b/backends/nxp/tests/ir/converter/node_converter/test_avg_pool2d_converter.py new file mode 100644 index 00000000000..8b6b63bb53f --- /dev/null +++ b/backends/nxp/tests/ir/converter/node_converter/test_avg_pool2d_converter.py @@ -0,0 +1,158 @@ +# Copyright 2024 NXP +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +import numpy as np +import pytest +import torch + +from executorch.backends.nxp.backend.edge_program_converter import ( + EdgeProgramToIRConverter, +) +from executorch.backends.nxp.tests.executorch_pipeline import ( + to_edge_program, + to_quantized_edge_program, +) +from executorch.backends.nxp.tests.executors import ( + convert_run_compare, + ToNCHWPreprocess, + ToNHWCPreprocess, +) +from executorch.backends.nxp.tests.models import AvgPool2dConvModule, AvgPool2dModule +from torch.export import ExportedProgram + + +@pytest.fixture(autouse=True) +def reseed_model_per_test_run(): + torch.manual_seed(23) + np.random.seed(23) + + +@pytest.mark.parametrize( + "input_shape, padding, count_include_pad", + [ + pytest.param( + (1, 4, 8, 8), + (0, 0), + True, + id="No padding, include padding to average calculation.", + ), + pytest.param( + (1, 4, 8, 8), + (0, 0), + False, + id="No padding, don't include padding to average calculation.", + ), + pytest.param( + (1, 4, 8, 8), + (1, 1), + True, + id="Padding, keep the same output tensor size as input, include " + "padding to average calculation.", + ), + pytest.param( + (1, 4, 8, 8), + (1, 0), + True, + id="Padding, change the output tensor size, include padding to " + "average calculation.", + ), + pytest.param( + (1, 4, 9, 9), + (1, 0), + True, + id="Padding, change the output tensor size, include padding to " + "average calculation.", + ), + pytest.param( + (1, 4, 7, 7), + (0, 1), + True, + id="Padding, change the output tensor size, include padding to " + "average calculation.", + ), + ], +) +def test_avg_pool_2d_conversion(input_shape, padding, count_include_pad): + model = AvgPool2dModule(padding=padding, count_include_pad=count_include_pad) + edge_program = to_edge_program(model, input_shape).exported_program() + + input_data = np.random.random(input_shape).astype(np.float32) + + convert_run_compare( + edge_program, + input_data, + tflite_input_preprocess=ToNHWCPreprocess(), + tflite_output_preprocess=ToNCHWPreprocess(), + ) + + +@pytest.mark.parametrize( + "input_shape, padding, count_include_pad", + [ + pytest.param( + (1, 4, 16, 16), + (0, 0), + True, + id="No padding, include padding to average calculation.", + ), + pytest.param( + (1, 4, 16, 16), + (0, 0), + False, + id="No padding, don't include padding to average calculation.", + ), + pytest.param( + (1, 4, 16, 16), + (1, 1), + True, + id="Keep the same output tensor size as input, include padding " + "to average calculation.", + ), + pytest.param( + (1, 4, 16, 16), + (1, 0), + True, + id="Padding, change same tensor size, include padding to average" + " calculation.", + ), + pytest.param( + (1, 4, 11, 11), + (0, 1), + True, + id="Padding, change same tensor size, include padding to average" + " calculation.", + ), + pytest.param( + (1, 4, 11, 11), + (1, 0), + True, + id="Padding, change same tensor size, include padding to average" + " calculation.", + ), + ], +) +def test_avg_pool_2d_quant_conversion(mocker, input_shape, padding, count_include_pad): + model = AvgPool2dConvModule(padding=padding, count_include_pad=count_include_pad) + + converter_spy = mocker.spy(EdgeProgramToIRConverter, "convert_program") + + # Run conversion + _ = to_quantized_edge_program(model, input_shape) + + # Capture generated model + tflite_flatbuffers_model, io_formats = converter_spy.spy_return + + # Capture converted program + exported_program: ExportedProgram = converter_spy.call_args.args[1] + + input_data = (np.random.random(input_shape).astype(np.float32) * 50).astype(np.int8) + + convert_run_compare( + exported_program, + tflite_input_preprocess=ToNHWCPreprocess(), + tfl_model=tflite_flatbuffers_model, + tflite_output_preprocess=ToNCHWPreprocess(), + input_data=input_data, + ) diff --git a/backends/nxp/tests/ir/converter/node_converter/test_constant_pad_nd_converter.py b/backends/nxp/tests/ir/converter/node_converter/test_constant_pad_nd_converter.py new file mode 100644 index 00000000000..d6030ebae7f --- /dev/null +++ b/backends/nxp/tests/ir/converter/node_converter/test_constant_pad_nd_converter.py @@ -0,0 +1,147 @@ +# Copyright 2024 NXP +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +import numpy as np +import pytest +import torch + +from executorch.backends.nxp.backend.edge_program_converter import ( + EdgeProgramToIRConverter, +) +from executorch.backends.nxp.tests.executorch_pipeline import ( + to_edge_program, + to_quantized_edge_program, +) +from executorch.backends.nxp.tests.executors import ( + convert_run_compare, + ToNCHWPreprocess, + ToNHWCPreprocess, +) +from executorch.backends.nxp.tests.models import ( + ConstantPadNDConvModule, + ConstantPadNDModule, + Conv2dConstantPadNDModule, +) +from torch.export import ExportedProgram + + +@pytest.fixture(autouse=True) +def reseed_model_per_test_run(): + torch.manual_seed(23) + np.random.seed(23) + + +@pytest.mark.parametrize("constant", [0.0, 42.0, -13.37]) +def test_constant_pad_nd_conversion__specific_constant(constant): + input_shape = [2, 4, 6, 8] + paddings = [1, 2, 3, 4] + + edge_program = to_edge_program( + ConstantPadNDModule(paddings, constant), input_shape + ).exported_program() + + input_data = np.random.random(input_shape).astype(np.float32) + + convert_run_compare(edge_program, input_data) + + +@pytest.mark.parametrize("constant", [0.0, 67.28, 42.0, -13.37]) +@pytest.mark.skip(reason="Neutron Converter does not fully convert for NPU") +def test_constant_pad_nd_quant_conversion__specific_constant(mocker, constant): + input_shape = (2, 4, 12, 12) + paddings = (2, 2, 2, 2) + + converter_spy = mocker.spy(EdgeProgramToIRConverter, "convert_program") + + # Run conversion + _ = to_quantized_edge_program( + Conv2dConstantPadNDModule(paddings, constant), input_shape + ) + + # Capture generated model + tflite_flatbuffers_model, io_formats = converter_spy.spy_return + + # Capture converted program + edge_program: ExportedProgram = converter_spy.call_args.args[1] + + input_data = (np.random.random(input_shape).astype(np.float32) * 50).astype(np.int8) + + convert_run_compare( + edge_program, + input_data, + tfl_model=tflite_flatbuffers_model, + atol=1.0, + tflite_input_preprocess=ToNHWCPreprocess(), + tflite_output_preprocess=ToNCHWPreprocess(), + ) + + +def test_constant_pad_nd_conversion__default_constant(): + input_shape = [2, 4, 6, 8] + paddings = [1, 2, 3, 4] + + edge_program = to_edge_program( + ConstantPadNDModule(paddings), input_shape + ).exported_program() + + input_data = np.random.random(input_shape).astype(np.float32) + + convert_run_compare(edge_program, input_data) + + +@pytest.mark.parametrize( + "input_shape, paddings", + [ + pytest.param([2], list(range(2)), id="1D, padding H"), + pytest.param([2, 4], list(range(2)), id="2D, padding H"), + pytest.param([2, 4], list(range(4)), id="2D, padding N, H"), + pytest.param([2, 4, 6], list(range(2)), id="3D, padding H"), + pytest.param([2, 4, 6], list(range(4)), id="3D, padding C, H"), + pytest.param([2, 4, 6], list(range(6)), id="3D, padding N, C, H"), + pytest.param([2, 4, 6, 8], list(range(2)), id="4D, padding W"), + pytest.param([2, 4, 6, 8], list(range(4)), id="4D, padding H, W"), + pytest.param([2, 4, 6, 8], list(range(6)), id="4D, padding C, H, W"), + pytest.param([2, 4, 6, 8], list(range(8)), id="4D, padding N, C, H, W"), + pytest.param([1, 2, 3, 4, 5], list(range(2)), id="5D, padding D"), + pytest.param([1, 2, 3, 4, 5], list(range(4)), id="5D, padding W, D"), + pytest.param([1, 2, 3, 4, 5], list(range(6)), id="5D, padding H, W, D"), + pytest.param([1, 2, 3, 4, 5], list(range(8)), id="5D, padding C, H, W, D"), + pytest.param([1, 2, 3, 4, 5], list(range(10)), id="5D, padding N, C, H, W, D"), + ], +) +def test_constant_pad_nd_conversion__format_less(input_shape, paddings): + edge_program = to_edge_program( + ConstantPadNDModule(paddings), input_shape + ).exported_program() + + input_data = np.random.random(input_shape).astype(np.float32) + + convert_run_compare(edge_program, input_data) + + +@pytest.mark.parametrize( + "input_shape, paddings", + [ + pytest.param([2, 4, 6, 8], list(range(2)), id="4D, padding W"), + pytest.param([2, 4, 6, 8], list(range(4)), id="4D, padding H, W"), + pytest.param([2, 1, 6, 8], [1, 2, 3, 4, 2, 1], id="4D, padding C, H, W"), + pytest.param( + [2, 1, 6, 8], [1, 2, 3, 4, 2, 1, 5, 6], id="4D, padding N, C, H, W" + ), + ], +) +def test_constant_pad_nd_conversion__channels_first(input_shape, paddings): + edge_program = to_edge_program( + ConstantPadNDConvModule(paddings), input_shape + ).exported_program() # Extra `Conv` after the padding. + + input_data = np.random.random(input_shape).astype(np.float32) + + convert_run_compare( + edge_program, + input_data, + tflite_input_preprocess=ToNHWCPreprocess(), + tflite_output_preprocess=ToNCHWPreprocess(), + ) diff --git a/backends/nxp/tests/ir/converter/node_converter/test_conv_converter.py b/backends/nxp/tests/ir/converter/node_converter/test_conv_converter.py new file mode 100644 index 00000000000..1eceacbf060 --- /dev/null +++ b/backends/nxp/tests/ir/converter/node_converter/test_conv_converter.py @@ -0,0 +1,206 @@ +# Copyright 2024 NXP +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +import numpy as np +import pytest +import torch + +from executorch.backends.nxp.backend.edge_program_converter import ( + EdgeProgramToIRConverter, +) +from executorch.backends.nxp.tests.executorch_pipeline import ( + to_edge_program, + to_quantized_edge_program, +) +from executorch.backends.nxp.tests.executors import ( + convert_run_compare, + ToNCHWPreprocess, + ToNHWCPreprocess, +) +from executorch.backends.nxp.tests.models import Conv2dModule +from torch.export import ExportedProgram + + +@pytest.fixture(autouse=True) +def reseed_model_per_test_run(): + torch.manual_seed(23) + np.random.seed(23) + + +@pytest.mark.parametrize( + "input_shape, padding", + [ + pytest.param((1, 4, 32, 32), (0, 0), id="No padding."), + pytest.param( + (1, 4, 32, 32), + (1, 1), + id="Padding, keep the same output tensor size as input.", + ), + pytest.param( + (1, 4, 32, 32), (1, 0), id="Padding, change the output tensor size." + ), + pytest.param( + (1, 4, 31, 31), (1, 0), id="Padding, change the output tensor size." + ), + pytest.param( + (1, 4, 31, 31), (0, 1), id="Padding, change the output tensor size." + ), + ], +) +@pytest.mark.parametrize( + "dilation", + [ + pytest.param(1, id="No dilation."), + pytest.param(2, id="2 dilation."), + pytest.param((1, 3), id="Side-different dilation."), + ], +) +def test_conv2d_conversion(input_shape, padding, dilation: int): + edge_program = to_edge_program( + Conv2dModule(padding=padding, dilation=dilation), input_shape + ).exported_program() + + input_data = np.random.random(input_shape).astype(np.float32) + + convert_run_compare( + edge_program, + input_data, + tflite_input_preprocess=ToNHWCPreprocess(), + tflite_output_preprocess=ToNCHWPreprocess(), + atol=4e-7, + ) + + +@pytest.mark.parametrize( + "model, input_shape", + [ + pytest.param( + Conv2dModule(in_channels=8, out_channels=32, kernel_size=5), + (1, 8, 32, 32), + id="In ch 8, out ch 32, kernel 5", + ), + pytest.param( + Conv2dModule(in_channels=8, out_channels=32, kernel_size=5, padding=3), + (1, 8, 32, 32), + id="In ch 8, out ch 32, kernel 5, padding 3", + ), + pytest.param( + Conv2dModule(in_channels=8, out_channels=32, kernel_size=5, padding=(2, 3)), + (1, 8, 31, 31), + id="In ch 8, out ch 32, kernel 5, padding (2, 3)", + ), + pytest.param( + Conv2dModule( + in_channels=8, + out_channels=32, + kernel_size=5, + padding=(2, 3), + dilation=(1, 2), + ), + (1, 8, 31, 31), + id="In ch 8, out ch 32, kernel 5, padding (2, 3), dilation (1, 2)", + ), + pytest.param( + Conv2dModule( + in_channels=16, out_channels=32, kernel_size=3, padding=2, dilation=2 + ), + (1, 16, 32, 32), + id="In ch 16, out ch 32, kernel 3, padding 2, dilation 2", + ), + pytest.param( + Conv2dModule(in_channels=32, out_channels=32, kernel_size=3, dilation=2), + (1, 32, 32, 32), + id="In ch 32, out ch 32, kernel 3, dilation 2", + ), + pytest.param( + Conv2dModule( + in_channels=32, + out_channels=32, + kernel_size=3, + padding=(0, 1), + dilation=2, + ), + (1, 32, 35, 35), + id="In ch 32, out ch 32, kernel 3, padding (0, 1), dilation 2", + ), + pytest.param( + Conv2dModule( + in_channels=32, + out_channels=32, + kernel_size=3, + padding=(1, 0), + dilation=(3, 1), + ), + (1, 32, 35, 35), + id="In ch 32, out ch 32, kernel 3, padding (1, 0), dilation (3, 1)", + ), + pytest.param( + Conv2dModule( + in_channels=32, out_channels=32, kernel_size=3, dilation=(2, 3) + ), + (1, 32, 32, 32), + id="In ch 32, out ch 32, kernel 3, dilation (2, 3)", + ), + pytest.param( + Conv2dModule(in_channels=32, out_channels=64, kernel_size=4), + (1, 32, 32, 32), + id="In ch 32, out ch 32, kernel 4", + ), + pytest.param( + Conv2dModule( + in_channels=32, out_channels=64, kernel_size=4, padding=(1, 2) + ), + (1, 32, 33, 33), + id="In ch 32, out ch 32, kernel 4, padding (1, 2)", + ), + pytest.param( + Conv2dModule( + in_channels=32, out_channels=64, kernel_size=4, padding=(1, 0) + ), + (1, 32, 33, 33), + id="In ch 32, out ch 32, kernel 4, padding (1, 0)", + ), + pytest.param( + Conv2dModule( + in_channels=32, out_channels=64, kernel_size=4, padding=(0, 2) + ), + (1, 32, 32, 32), + id="In ch 32, out ch 32, kernel 4, padding (0, 2)", + ), + pytest.param( + Conv2dModule( + in_channels=32, + out_channels=64, + kernel_size=4, + padding=(0, 2), + dilation=(1, 2), + ), + (1, 32, 32, 32), + id="In ch 32, out ch 32, kernel 4, padding (0, 2), dilation (1, 2)", + ), + ], +) +def test_conv2d_quant_conversion(mocker, model: torch.nn.Module, input_shape): + converter_spy = mocker.spy(EdgeProgramToIRConverter, "convert_program") + + # Run conversion + _ = to_quantized_edge_program(model, input_shape) + + # Capture generated model + tflite_flatbuffers_model, io_formats = converter_spy.spy_return + + # Capture converted program + exported_program: ExportedProgram = converter_spy.call_args.args[1] + + input_data = (np.random.random(input_shape).astype(np.float32) * 50).astype(np.int8) + + convert_run_compare( + exported_program, + tflite_input_preprocess=ToNHWCPreprocess(), + tfl_model=tflite_flatbuffers_model, + tflite_output_preprocess=ToNCHWPreprocess(), + input_data=input_data, + atol=1.0, + ) diff --git a/backends/nxp/tests/ir/converter/node_converter/test_linear_converter.py b/backends/nxp/tests/ir/converter/node_converter/test_linear_converter.py new file mode 100644 index 00000000000..4a19c3d8c4b --- /dev/null +++ b/backends/nxp/tests/ir/converter/node_converter/test_linear_converter.py @@ -0,0 +1,40 @@ +# Copyright 2024 NXP +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +import numpy as np +import pytest +import torch + +from executorch.backends.nxp.tests.executorch_pipeline import to_edge_program +from executorch.backends.nxp.tests.executors import convert_run_compare +from executorch.backends.nxp.tests.models import LinearModule + + +@pytest.fixture(autouse=True) +def reseed_model_per_test_run(): + torch.manual_seed(23) + np.random.seed(23) + + +def test_linear_conversion__with_bias(): + input_shape = (10, 32) + edge_program = to_edge_program( + LinearModule(bias=True), input_shape + ).exported_program() + + input_data = np.random.random(input_shape).astype(np.float32) + + convert_run_compare(edge_program, input_data=input_data, atol=1.0e-6) + + +def test_linear_conversion__without_bias(): + input_shape = (10, 32) + edge_program = to_edge_program( + LinearModule(bias=True), input_shape + ).exported_program() + + input_data = np.random.random(input_shape).astype(np.float32) + + convert_run_compare(edge_program, input_data=input_data, atol=1.0e-6) diff --git a/backends/nxp/tests/ir/converter/node_converter/test_max_pool_2d_converter.py b/backends/nxp/tests/ir/converter/node_converter/test_max_pool_2d_converter.py new file mode 100644 index 00000000000..2618558f7c9 --- /dev/null +++ b/backends/nxp/tests/ir/converter/node_converter/test_max_pool_2d_converter.py @@ -0,0 +1,121 @@ +# Copyright 2024 NXP +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +import numpy as np +import pytest +import torch + +from executorch.backends.nxp.backend.edge_program_converter import ( + EdgeProgramToIRConverter, +) +from executorch.backends.nxp.neutron_pass_manager import NeutronPassManager +from executorch.backends.nxp.tests.executorch_pipeline import ( + to_edge_program, + to_quantized_edge_program, +) +from executorch.backends.nxp.tests.executors import ( + convert_run_compare, + ToNCHWPreprocess, + ToNHWCPreprocess, +) +from executorch.backends.nxp.tests.models import MaxPool2dConvModule, MaxPool2dModule +from executorch.backends.xnnpack._passes import RemoveGetItemPass +from executorch.exir.verification.verifier import EXIREdgeDialectVerifier +from torch.export import ExportedProgram + + +@pytest.fixture(autouse=True) +def reseed_model_per_test_run(): + torch.manual_seed(23) + np.random.seed(23) + + +@pytest.mark.parametrize( + "input_shape, padding", + [ + pytest.param((1, 4, 8, 8), (0, 0), id="No padding."), + pytest.param( + (1, 4, 8, 8), + (1, 1), + id="Padding, keep the same output tensor size as input.", + ), + pytest.param( + (1, 4, 8, 8), (1, 0), id="Padding, change the output tensor size." + ), + pytest.param( + (1, 4, 9, 9), (1, 0), id="Padding, change the output tensor size." + ), + pytest.param( + (1, 4, 9, 9), (0, 1), id="Padding, change the output tensor size." + ), + ], +) +def test_max_pool_2d_conversion(input_shape, padding): + edge_program = to_edge_program( + MaxPool2dModule(padding=padding), input_shape + ).exported_program() + + # We need to create custom model verifier with max_pool2d added as exception. + # Otherwise, we get violation that this op is not part of ATen Core ops. + edge_program._verifiers = [ + EXIREdgeDialectVerifier( + class_only=True, exception_list=[torch.ops.aten.max_pool2d.default] + ) + ] + + # Remove MaxPool-related "getitem" nodes from graph + edge_program = NeutronPassManager(edge_program, [RemoveGetItemPass]).transform() + + input_data = np.random.random(input_shape).astype(np.float32) + + convert_run_compare( + edge_program, + input_data, + tflite_input_preprocess=ToNHWCPreprocess(), + tflite_output_preprocess=ToNCHWPreprocess(), + ) + + +@pytest.mark.parametrize( + "input_shape, padding", + [ + pytest.param((1, 4, 8, 8), (0, 0), id="No padding."), + pytest.param( + (1, 4, 8, 8), + (1, 1), + id="Padding, keep the same output tensor size as input.", + ), + pytest.param( + (1, 4, 8, 8), (1, 0), id="Padding, change the output tensor size." + ), + pytest.param( + (1, 4, 11, 11), (1, 0), id="Padding, change the output tensor size." + ), + pytest.param( + (1, 4, 11, 11), (0, 1), id="Padding, change the output tensor size." + ), + ], +) +def test_max_pool_2d_quant_conversion(mocker, input_shape, padding): + converter_spy = mocker.spy(EdgeProgramToIRConverter, "convert_program") + + # Run conversion + _ = to_quantized_edge_program(MaxPool2dConvModule(padding=padding), input_shape) + + # Capture generated model + tflite_flatbuffers_model, io_formats = converter_spy.spy_return + + # Capture converted program + exported_program: ExportedProgram = converter_spy.call_args.args[1] + + input_data = (np.random.random(input_shape).astype(np.float32) * 50).astype(np.int8) + + convert_run_compare( + exported_program, + tflite_input_preprocess=ToNHWCPreprocess(), + tfl_model=tflite_flatbuffers_model, + tflite_output_preprocess=ToNCHWPreprocess(), + input_data=input_data, + ) diff --git a/backends/nxp/tests/ir/converter/node_converter/test_permute_copy_converter.py b/backends/nxp/tests/ir/converter/node_converter/test_permute_copy_converter.py new file mode 100644 index 00000000000..d25e2759cc8 --- /dev/null +++ b/backends/nxp/tests/ir/converter/node_converter/test_permute_copy_converter.py @@ -0,0 +1,64 @@ +# Copyright 2024 NXP +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +import numpy as np +import pytest +import torch + +from executorch.backends.nxp.backend.edge_program_converter import ( + EdgeProgramToIRConverter, +) +from executorch.backends.nxp.tests.executorch_pipeline import to_quantized_edge_program +from executorch.backends.nxp.tests.executors import ( + convert_run_compare, + ToNCHWPreprocess, + ToNHWCPreprocess, +) +from executorch.backends.nxp.tests.models import Conv2dModule +from torch.export import ExportedProgram + + +@pytest.fixture(autouse=True) +def reseed_model_per_test_run(): + torch.manual_seed(23) + np.random.seed(23) + + +class Conv2dPermuteCopyModule(torch.nn.Module): + def __init__(self, new_dims: tuple[int, ...]): + super().__init__() + self.new_dims = new_dims + self.conv = Conv2dModule() + + def forward(self, x): + x = self.conv(x) + return torch.permute(x, self.new_dims) + + +def test_permute_copy_quant_conversion__with_bias(mocker): + input_shape = (1, 4, 8, 8) + new_dims = (0, 2, 3, 1) + + converter_spy = mocker.spy(EdgeProgramToIRConverter, "convert_program") + + # Run conversion + _ = to_quantized_edge_program(Conv2dPermuteCopyModule(new_dims), input_shape) + + # Capture generated model + tflite_flatbuffers_model, io_formats = converter_spy.spy_return + + # Capture converted program + edge_program: ExportedProgram = converter_spy.call_args.args[1] + + input_data = (np.random.random(input_shape).astype(np.float32) * 50).astype(np.int8) + + convert_run_compare( + edge_program, + input_data, + tfl_model=tflite_flatbuffers_model, + atol=1.0, + tflite_input_preprocess=ToNHWCPreprocess(), + tflite_output_preprocess=ToNCHWPreprocess(), + ) diff --git a/backends/nxp/tests/ir/converter/node_converter/test_relu_converter.py b/backends/nxp/tests/ir/converter/node_converter/test_relu_converter.py new file mode 100644 index 00000000000..8d903e3e0b5 --- /dev/null +++ b/backends/nxp/tests/ir/converter/node_converter/test_relu_converter.py @@ -0,0 +1,108 @@ +# Copyright 2024 NXP +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +import numpy as np +import pytest +import torch + +from executorch.backends.nxp.backend.edge_program_converter import ( + EdgeProgramToIRConverter, +) +from executorch.backends.nxp.tests.executorch_pipeline import ( + to_edge_program, + to_quantized_edge_program, +) +from executorch.backends.nxp.tests.executors import ( + convert_run_compare, + ToNCHWPreprocess, + ToNHWCPreprocess, +) +from executorch.backends.nxp.tests.models import Conv2dModule, LinearModule, ReLUModule +from torch.export import ExportedProgram + + +@pytest.fixture(autouse=True) +def reseed_model_per_test_run(): + torch.manual_seed(23) + np.random.seed(23) + + +class ConvReLUModule(torch.nn.Module): + def __init__(self): + super().__init__() + + self.conv = Conv2dModule() + self.relu = torch.nn.ReLU() + + def forward(self, x): + x = self.conv(x) + return self.relu(x) + + +class LinearReLUModule(torch.nn.Module): + def __init__(self): + super().__init__() + + self.linear = LinearModule(bias=True) + self.relu = torch.nn.ReLU() + + def forward(self, x): + x = self.linear(x) + return self.relu(x) + + +def test_relu_conversion(): + input_shape = (10, 4, 32, 32) + edge_program = to_edge_program(ReLUModule(), input_shape).exported_program() + + input_data = 2 * np.random.random(input_shape).astype(np.float32) - 1 + + convert_run_compare(edge_program, input_data=input_data) + + +def test_relu_with_conv_quant_conversion(mocker): + input_shape = (1, 4, 32, 32) + converter_spy = mocker.spy(EdgeProgramToIRConverter, "convert_program") + + # Run conversion + _ = to_quantized_edge_program(ConvReLUModule(), input_shape) + + # Capture generated model + tflite_flatbuffers_model, _ = converter_spy.spy_return + + # Capture converted program + edge_program: ExportedProgram = converter_spy.call_args.args[1] + + input_data = ( + (2 * np.random.random(input_shape).astype(np.float32) - 1) * 50 + ).astype(np.int8) + + convert_run_compare( + edge_program, + input_data, + tfl_model=tflite_flatbuffers_model, + tflite_input_preprocess=ToNHWCPreprocess(), + tflite_output_preprocess=ToNCHWPreprocess(), + ) + + +def test_relu_with_linear_quant_conversion(mocker): + input_shape = (256, 32) + converter_spy = mocker.spy(EdgeProgramToIRConverter, "convert_program") + + # Run conversion + _ = to_quantized_edge_program(LinearReLUModule(), input_shape) + + # Capture generated model + tflite_flatbuffers_model, _ = converter_spy.spy_return + + # Capture converted program + edge_program: ExportedProgram = converter_spy.call_args.args[1] + + input_data = ( + (2 * np.random.random(input_shape).astype(np.float32) - 1) * 50 + ).astype(np.int8) + + convert_run_compare(edge_program, input_data, tfl_model=tflite_flatbuffers_model) diff --git a/backends/nxp/tests/ir/converter/node_converter/test_softmax_converter.py b/backends/nxp/tests/ir/converter/node_converter/test_softmax_converter.py new file mode 100644 index 00000000000..c3eecc04adc --- /dev/null +++ b/backends/nxp/tests/ir/converter/node_converter/test_softmax_converter.py @@ -0,0 +1,111 @@ +# Copyright 2024 NXP +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +import numpy as np +import pytest +import torch + +from executorch.backends.nxp.backend.edge_program_converter import ( + EdgeProgramToIRConverter, +) +from executorch.backends.nxp.backend.ir.conversion_config import ConversionConfig +from executorch.backends.nxp.tests.executorch_pipeline import to_edge_program +from executorch.backends.nxp.tests.executors import convert_run_compare +from executorch.backends.nxp.tests.models import SoftmaxConvModule, SoftmaxModule + + +@pytest.fixture(autouse=True) +def reseed_model_per_test_run(): + torch.manual_seed(23) + np.random.seed(23) + + +@pytest.mark.parametrize( + "input_shape,dim", + [ + pytest.param((10,), -1, id="1D,dim=-1"), + pytest.param((10,), 0, id="1D,dim=0"), + pytest.param((10, 32), -1, id="2D,dim=-1"), + pytest.param((10, 32), 1, id="2D,dim=1"), + ], +) +def test_softmax_conversion__formatless_input(input_shape, dim: int): + model = SoftmaxModule(dim) + + edge_program = to_edge_program(model, input_shape).exported_program() + + input_data = np.random.random(input_shape).astype(np.float32) + + convert_run_compare(edge_program, input_data=input_data) + + +@pytest.mark.parametrize( + "input_shape,dim", + [ + pytest.param((10, 32, 32), -1, id="3D,dim=-1"), + pytest.param((10, 32, 32), 2, id="3D,dim=2"), + pytest.param((10, 32, 32, 8), -1, id="4D,dim=-1"), + pytest.param((10, 32, 32, 8), 3, id="4D,dim=3"), + pytest.param((10, 32, 32, 8, 8), -1, id="5D,dim=-1"), + pytest.param((10, 32, 32, 8, 8), 4, id="5D,dim=4"), + ], +) +def test_softmax_conversion__unknown_input_format(input_shape, dim: int): + model = SoftmaxModule(dim) + + edge_program = to_edge_program(model, input_shape).exported_program() + + # Currently this test not pass because the convertibility checker doesn't use tensor formats. + with pytest.raises( + AssertionError, match="`aten__softmax_default` is not convertible" + ): + EdgeProgramToIRConverter().convert_program(edge_program, ConversionConfig()) + + # input_data = np.random.random(input_shape).astype(np.float32) + # convert_run_compare(edge_program_manager.exported_program(), input_data=input_data, atol=5e-7) + + +@pytest.mark.parametrize( + "input_shape,dim", + [ + pytest.param((10, 4, 32, 32), 1, id="4D,dim=1"), + pytest.param((10, 4, 16, 16), -3, id="4D,dim=-3"), + ], +) +def test_softmax_conversion_channel_last(input_shape, dim: int): + model = SoftmaxConvModule(dim) + + edge_program = to_edge_program(model, input_shape).exported_program() + + # TODO (Robert Kalmar) Currently this test not pass because the convertibility checker doesn't use tensor formats. + with pytest.raises( + AssertionError, match="`aten__softmax_default` is not convertible" + ): + EdgeProgramToIRConverter().convert_program(edge_program, ConversionConfig()) + + # input_data = np.random.random(input_shape).astype(np.float32) + # convert_run_compare(edge_program_manager.exported_program(), tflite_input_preprocess=ToNHWCPreprocess(), + # tflite_output_preprocess=ToNCHWPreprocess(), input_data=input_data, atol=5e-7) + + +@pytest.mark.parametrize( + "input_shape,dim", + [ + pytest.param((10, 32), 0, id="2D,dim=0"), + pytest.param((10, 32, 32), 1, id="3D,dim=1"), + pytest.param((10, 32, 32, 8), 2, id="4D,dim=2"), + pytest.param((10, 32, 32, 8, 8), 3, id="5D,dim=3"), + pytest.param((10, 32, 32, 8, 8), 2, id="5D,dim=2"), + ], +) +def test_softmax_conversion_unsupported_dims(input_shape, dim: int): + model = SoftmaxModule(dim) + + edge_program = to_edge_program(model, input_shape).exported_program() + + with pytest.raises( + AssertionError, match="`aten__softmax_default` is not convertible" + ): + EdgeProgramToIRConverter().convert_program(edge_program, ConversionConfig()) diff --git a/backends/nxp/tests/ir/converter/node_converter/test_view_copy_converter.py b/backends/nxp/tests/ir/converter/node_converter/test_view_copy_converter.py new file mode 100644 index 00000000000..9863c8acc41 --- /dev/null +++ b/backends/nxp/tests/ir/converter/node_converter/test_view_copy_converter.py @@ -0,0 +1,237 @@ +# Copyright 2024 NXP +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +from typing import Sequence + +import numpy as np +import pytest +import torch + +from executorch.backends.nxp.backend.edge_program_converter import ( + EdgeProgramToIRConverter, +) +from executorch.backends.nxp.backend.ir.converter.builder.model_builder import ( + ModelBuilder, +) +from executorch.backends.nxp.backend.ir.tflite_generator.builtin_options.conv_2d_options import ( + Conv2D, +) +from executorch.backends.nxp.backend.ir.tflite_generator.builtin_options.reshape_options import ( + Reshape, +) +from executorch.backends.nxp.backend.ir.tflite_generator.builtin_options.transpose_options import ( + Transpose, +) +from executorch.backends.nxp.tests.executorch_pipeline import ( + to_edge_program, + to_quantized_edge_program, +) +from executorch.backends.nxp.tests.executors import ( + convert_run_compare, + ToNCHWPreprocess, + ToNHWCPreprocess, +) +from torch import nn +from torch.export import ExportedProgram + + +@pytest.fixture(autouse=True) +def reseed_model_per_test_run(): + torch.manual_seed(23) + np.random.seed(23) + + +class FormatlessToChannelsFirstModule(nn.Module): + def __init__(self, channels: int, new_shape: Sequence[int]): + super().__init__() + self.conv = nn.Conv2d(channels, channels, 2, bias=True) + self.new_shape = new_shape + + def forward(self, x): + x = torch.reshape(x, self.new_shape) + x = self.conv(x) + return x + + +class FormatlessToFormatlessModule(nn.Module): + def __init__(self, new_shape: Sequence[int]): + super().__init__() + self.new_shape = new_shape + + def forward(self, x): + x = torch.reshape(x, self.new_shape) + return x + + +class ConvReshapeModule(nn.Module): + def __init__(self, channels: int, new_shape: Sequence[int]): + super().__init__() + self.conv = nn.Conv2d(channels, channels, 2, bias=True) + self.new_shape = new_shape + + def forward(self, x): + x = self.conv(x) + x = torch.reshape(x, self.new_shape) + return x + + +class LinearReshapeModule(torch.nn.Module): + def __init__(self, new_shape: Sequence[int]): + super().__init__() + self.linear = nn.Linear(64, 32, bias=True) + self.new_shape = new_shape + + def forward(self, x): + x = self.linear(x) + x = torch.reshape(x, self.new_shape) + return x + + +def test__channels_first_to_2d(mocker): + input_shape = [2, 4, 7, 9] + new_shape = [12, 32] # Mix up the dimensions for a thorough test. + + torch_model = ConvReshapeModule(channels=input_shape[1], new_shape=new_shape) + edge_program = to_edge_program(torch_model, input_shape).exported_program() + + input_data = np.random.random(input_shape).astype(np.float32) + + converter_spy = mocker.spy(ModelBuilder, "finish") + + convert_run_compare( + edge_program, input_data, tflite_input_preprocess=ToNHWCPreprocess() + ) + + tflite_model = converter_spy.spy_return + ops = tflite_model.sub_graphs[0].operators.vector + assert len(ops) == 3 + assert isinstance(ops[0].builtin_options, Conv2D) + assert isinstance(ops[1].builtin_options, Transpose) + assert isinstance(ops[2].builtin_options, Reshape) + + +def test__channels_first_to_4d(mocker): + input_shape = [2, 4, 6, 8] + new_shape = [7, 4, 2, 5] + + torch_model = ConvReshapeModule(channels=input_shape[1], new_shape=new_shape) + edge_program = to_edge_program(torch_model, input_shape).exported_program() + + input_data = np.random.random(input_shape).astype(np.float32) + + converter_spy = mocker.spy(ModelBuilder, "finish") + + convert_run_compare( + edge_program, input_data, tflite_input_preprocess=ToNHWCPreprocess() + ) + + tflite_model = converter_spy.spy_return + ops = tflite_model.sub_graphs[0].operators.vector + assert len(ops) == 3 + assert isinstance(ops[0].builtin_options, Conv2D) + assert isinstance(ops[1].builtin_options, Transpose) + assert isinstance(ops[2].builtin_options, Reshape) + + +def test__formatless_to_channels_first(mocker): + input_shape = [12, 32] + new_shape = [2, 4, 6, 8] # Mix up the dimensions for a thorough test. + + torch_model = FormatlessToChannelsFirstModule( + channels=new_shape[1], new_shape=new_shape + ) + edge_program = to_edge_program(torch_model, input_shape).exported_program() + + input_data = np.random.random(input_shape).astype(np.float32) + + converter_spy = mocker.spy(ModelBuilder, "finish") + + convert_run_compare( + edge_program, input_data, tflite_output_preprocess=ToNCHWPreprocess() + ) + + tflite_model = converter_spy.spy_return + ops = tflite_model.sub_graphs[0].operators.vector + assert len(ops) == 3 + assert isinstance(ops[0].builtin_options, Reshape) + assert isinstance(ops[1].builtin_options, Transpose) + assert isinstance(ops[2].builtin_options, Conv2D) + + +def test__formatless_to_formatless(mocker): + input_shape = [12, 32] + new_shape = [2, 4, 6, 8] + + torch_model = FormatlessToFormatlessModule(new_shape=new_shape) + edge_program = to_edge_program(torch_model, input_shape).exported_program() + + input_data = np.random.random(input_shape).astype(np.float32) + + converter_spy = mocker.spy(ModelBuilder, "finish") + + convert_run_compare(edge_program, input_data) + + tflite_model = converter_spy.spy_return + ops = tflite_model.sub_graphs[0].operators.vector + assert len(ops) == 1 # No extra Transpose ops. + assert isinstance(ops[0].builtin_options, Reshape) + + +@pytest.mark.parametrize( + "input_shape, new_shape", + [ + pytest.param((8, 64), (1, 16, 4, 4), id="2D"), + ], +) +def test_view_copy_w_linear_quant_conversion(mocker, input_shape, new_shape): + converter_spy = mocker.spy(EdgeProgramToIRConverter, "convert_program") + + # Run conversion + _ = to_quantized_edge_program(LinearReshapeModule(new_shape=new_shape), input_shape) + + # Capture generated model + tflite_flatbuffers_model, io_formats = converter_spy.spy_return + + # Capture converted program + edge_program: ExportedProgram = converter_spy.call_args.args[1] + + input_data = (np.random.random(input_shape).astype(np.float32) * 50).astype(np.int8) + + convert_run_compare( + edge_program, input_data, tfl_model=tflite_flatbuffers_model, atol=1.0 + ) + + +@pytest.mark.parametrize( + "input_shape, new_shape", + [ + pytest.param((1, 4, 16, 16), (50, 18), id="4D, batch_size=1"), + pytest.param((10, 4, 16, 16), (500, 18), id="4D, , batch_size=10"), + ], +) +@pytest.mark.skip(reason="Neutron Converter does not fully convert for NPU") +def test_view_copy_w_conv_quant_conversion(mocker, input_shape, new_shape): + converter_spy = mocker.spy(EdgeProgramToIRConverter, "convert_program") + + # Run conversion + _ = to_quantized_edge_program( + ConvReshapeModule(channels=input_shape[1], new_shape=new_shape), input_shape + ) + + # Capture generated model + tflite_flatbuffers_model, io_formats = converter_spy.spy_return + + # Capture converted program + edge_program: ExportedProgram = converter_spy.call_args.args[1] + + input_data = (np.random.random(input_shape).astype(np.float32) * 50).astype(np.int8) + + convert_run_compare( + edge_program, + input_data, + tflite_input_preprocess=ToNHWCPreprocess(), + tfl_model=tflite_flatbuffers_model, + atol=1.0, + ) diff --git a/backends/nxp/tests/test_neutron_backend.py b/backends/nxp/tests/test_neutron_backend.py new file mode 100644 index 00000000000..45b4ce5ead5 --- /dev/null +++ b/backends/nxp/tests/test_neutron_backend.py @@ -0,0 +1,156 @@ +# Copyright 2024 NXP +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +import numpy as np +import torch + +from executorch.backends.nxp.backend.edge_program_converter import ( + EdgeProgramToIRConverter, +) +from executorch.backends.nxp.backend.ir.lib.tflite.BuiltinOptions import BuiltinOptions +from executorch.backends.nxp.backend.ir.lib.tflite.Model import Model +from executorch.backends.nxp.tests.executorch_pipeline import to_quantized_edge_program +from executorch.backends.nxp.tests.executors import ( + convert_run_compare, + EdgeProgramExecutor, + TFLiteExecutor, + ToNHWCPreprocess, +) +from executorch.backends.nxp.tests.models import ( + Conv2dModule, + ConvFCSoftmaxModule, + LinearSoftmaxModule, +) +from torch.export import ExportedProgram + + +def test_neutron_backend__single_conv_model(): + edge_program_manager = to_quantized_edge_program( + Conv2dModule(bias=False), (1, 4, 32, 32) + ) + lowered_module = ( + edge_program_manager.exported_program().graph_module.lowered_module_0 + ) + assert ( + len(lowered_module.processed_bytes) != 0 + ) # The Neutron microcode, weights and kernels have been written here + + +def test_neutron_backend__single_conv_model__payload_header_channels_last(): + edge_program_manager = to_quantized_edge_program( + Conv2dModule(bias=False), (1, 4, 32, 32) + ) + payload = ( + edge_program_manager.exported_program().graph_module.lowered_module_0.processed_bytes + ) + + assert payload[0] == 0x1 # Single input + assert payload[1] == 0x1 # Single output + assert payload[2] == 0x1 # Channels last + assert payload[3] == 0x1 # Channels last + assert all(byte == 0x0 for byte in payload[4:16]) # Aligned to 16 bytes + assert payload[17] != 0x0 # Followed by non-zero content + + +def test_neutron_backend__linear_softmax_model__payload_header_formatless(): + edge_program_manager = to_quantized_edge_program(LinearSoftmaxModule(), (1, 12)) + payload = ( + edge_program_manager.exported_program().graph_module.lowered_module_0.processed_bytes + ) + + assert payload[0] == 0x1 # Single input + assert payload[1] == 0x1 # Single output + assert payload[2] == 0x0 # Formatless + assert payload[3] == 0x0 # Formatless + assert all(byte == 0x0 for byte in payload[4:16]) # Aligned to 16 bytes + assert payload[17] != 0x0 # Followed by non-zero content + + +def test_lowered_program_and_tflite_output_match__conv2d__no_bias(mocker): + converter_spy = mocker.spy(EdgeProgramToIRConverter, "convert_program") + + model = Conv2dModule(bias=False) + input_shape = (1, 4, 32, 32) + + # Run conversion + to_quantized_edge_program(model, input_shape) + + # Capture generated model + tflite_flatbuffers_model, io_formats = converter_spy.spy_return + + tflite_model = Model.GetRootAs(tflite_flatbuffers_model) + sub_graph = tflite_model.Subgraphs(0) + + assert sub_graph.OperatorsLength() == 1 + assert sub_graph.Operators(0).BuiltinOptionsType() == BuiltinOptions.Conv2DOptions + + # Capture converted program + exported_program: ExportedProgram = converter_spy.call_args.args[1] + + input_data = ( + (torch.randn(input_shape, dtype=torch.float32) * 50) + .type(torch.int8) + .detach() + .numpy() + ) + input_data_tflite = np.transpose(input_data, [0, 2, 3, 1]) + + # Execute program and TFLite model + program_executor = EdgeProgramExecutor(exported_program) + tflite_executor = TFLiteExecutor(model_content=tflite_flatbuffers_model) + + output_edge = program_executor.inference(input_data) + output_tflite = tflite_executor.inference(input_data_tflite) + + output_tflite = np.transpose(output_tflite, [0, 3, 1, 2]) + + # Outputs difference is smaller than 1 (rounding error in quantization) + assert np.max(np.abs(output_edge - output_tflite)) <= 1 + + +def test_conv_fc__lowered_program_and_tflite_output_match(mocker): + converter_spy = mocker.spy(EdgeProgramToIRConverter, "convert_program") + + model = ConvFCSoftmaxModule() + input_shape = (1, 4, 5, 5) + + # Run conversion + _ = to_quantized_edge_program(model, input_shape) + + # Capture converted program + exported_program: ExportedProgram = converter_spy.call_args.args[1] + + # Capture generated model + tflite_flatbuffers_model, _ = converter_spy.spy_return + + # No Transpose ops in produced TFLite model + tflite_subgraph = Model.GetRootAs(tflite_flatbuffers_model).Subgraphs(0) + + assert tflite_subgraph.OperatorsLength() == 3 + assert ( + tflite_subgraph.Operators(0).BuiltinOptionsType() + == BuiltinOptions.Conv2DOptions + ) + assert ( + tflite_subgraph.Operators(1).BuiltinOptionsType() + == BuiltinOptions.ReshapeOptions + ) + assert ( + tflite_subgraph.Operators(2).BuiltinOptionsType() + == BuiltinOptions.FullyConnectedOptions + ) + + # Verify outputs of program and TFLite model + input_data = ( + (torch.randn(input_shape, dtype=torch.float32)) + .type(torch.int8) + .detach() + .numpy() + ) + convert_run_compare( + exported_program, + input_data=input_data, + tflite_input_preprocess=ToNHWCPreprocess(), + ) diff --git a/backends/nxp/tests/test_neutron_converter_manager.py b/backends/nxp/tests/test_neutron_converter_manager.py new file mode 100644 index 00000000000..fb816ef199f --- /dev/null +++ b/backends/nxp/tests/test_neutron_converter_manager.py @@ -0,0 +1,59 @@ +# Copyright 2024 NXP +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +import pytest +import torch + +from executorch import exir +from executorch.backends.nxp.backend.edge_program_converter import ( + EdgeProgramToIRConverter, +) +from executorch.backends.nxp.backend.neutron_converter_manager import ( + NeutronConverterManager, +) +from executorch.backends.nxp.tests.models import Conv2dModule + + +def test_conv2d_neutron_conversion__default_flavor(): + model = Conv2dModule() + + example_input = (torch.ones(1, 4, 32, 32),) + exir_program = torch.export.export(model, example_input) + edge_program_manager = exir.to_edge(exir_program) + + edge_program_converter = EdgeProgramToIRConverter() + tflite_model, _ = edge_program_converter.convert_program( + edge_program_manager.exported_program() + ) + + neutron_converter_manager = NeutronConverterManager() + neutron_model = neutron_converter_manager.convert( + tflite_model, "imxrt700", "SDK_25_03" + ) + + assert len( + neutron_model + ), "Produced NeutronGraph-based TFLite model has zero length!" + + +def test__conv2d_neutron_conversion__invalid_flavor(): + model = Conv2dModule() + + example_input = (torch.ones(1, 4, 32, 32),) + exir_program = torch.export.export(model, example_input) + edge_program_manager = exir.to_edge(exir_program) + + edge_program_converter = EdgeProgramToIRConverter() + tflite_model, _ = edge_program_converter.convert_program( + edge_program_manager.exported_program() + ) + + neutron_converter_manager = NeutronConverterManager() + with pytest.raises(RuntimeError) as excinfo: + _ = neutron_converter_manager.convert(tflite_model, "imxrt700", "bad_flavor") + + assert "Neutron Converter module with flavor 'bad_flavor' not found." in str( + excinfo + ) diff --git a/backends/nxp/tests/test_node_format_inference.py b/backends/nxp/tests/test_node_format_inference.py new file mode 100644 index 00000000000..96107efa755 --- /dev/null +++ b/backends/nxp/tests/test_node_format_inference.py @@ -0,0 +1,89 @@ +# Copyright 2024 NXP +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +import torch + +from executorch import exir +from executorch.backends.nxp.backend.node_format_inference import ( + NodeFormat, + NodeFormatInference, +) +from executorch.backends.nxp.neutron_pass_manager import NeutronPassManager +from executorch.backends.nxp.tests.models import ( + Conv2dModule, + MaxPool2dModule, + SoftmaxModule, +) +from executorch.backends.xnnpack._passes import RemoveGetItemPass +from executorch.exir.verification.verifier import EXIREdgeDialectVerifier + + +def test_convolution(): + model = Conv2dModule() + example_input = (torch.ones(1, 4, 32, 32),) + + exir_program = torch.export.export(model, example_input) + edge_program = exir.to_edge(exir_program).exported_program() + + node_formats = NodeFormatInference(edge_program).identify_node_formats() + + expected_mapping = { + "p_conv_weight": NodeFormat.CHANNELS_FIRST, + "p_conv_bias": NodeFormat.FORMATLESS, + "x": NodeFormat.CHANNELS_FIRST, + "aten_convolution_default": NodeFormat.CHANNELS_FIRST, + "output": NodeFormat.CHANNELS_FIRST, + } + + for node, node_format in node_formats.items(): + assert expected_mapping[node.name] == node_format + + +def test_softmax(): + model = SoftmaxModule(1) + example_input = (torch.ones(1, 4, 32, 32),) + + exir_program = torch.export.export(model, example_input) + edge_program = exir.to_edge(exir_program).exported_program() + + node_formats = NodeFormatInference(edge_program).identify_node_formats() + + expected_mapping = { + "x": NodeFormat.FORMATLESS, + "aten__softmax_default": NodeFormat.FORMATLESS, + "output": NodeFormat.FORMATLESS, + } + + for node, node_format in node_formats.items(): + assert expected_mapping[node.name] == node_format + + +def test_maxpool2d(): + model = MaxPool2dModule() + example_input = (torch.ones(1, 4, 32, 32),) + + exir_program = torch.export.export(model, example_input) + edge_program = exir.to_edge(exir_program).exported_program() + + # We need to create custom model verifier with max_pool2d added as exception. + # Otherwise, we get violation that this op is not part of ATen Core ops. + edge_program._verifiers = [ + EXIREdgeDialectVerifier( + class_only=True, exception_list=[torch.ops.aten.max_pool2d.default] + ) + ] + + # Remove MaxPool-related "getitem" nodes from graph + edge_program = NeutronPassManager(edge_program, [RemoveGetItemPass]).transform() + node_formats = NodeFormatInference(edge_program).identify_node_formats() + + expected_mapping = { + "x": NodeFormat.CHANNELS_FIRST, + "aten_max_pool2d_default": NodeFormat.CHANNELS_FIRST, + "output": NodeFormat.CHANNELS_FIRST, + } + + for node, node_format in node_formats.items(): + assert expected_mapping[node.name] == node_format diff --git a/backends/nxp/tests/test_operator_selector.py b/backends/nxp/tests/test_operator_selector.py new file mode 100644 index 00000000000..ca301daf738 --- /dev/null +++ b/backends/nxp/tests/test_operator_selector.py @@ -0,0 +1,24 @@ +# Copyright 2025 NXP +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree.import torch + +from executorch.backends.nxp.tests.executorch_pipeline import to_quantized_edge_program +from executorch.backends.nxp.tests.models import Conv2dModule + + +def test_operator_selector_mechanism(): + model = Conv2dModule(bias=False) + input_shape = (1, 4, 32, 32) + + operators_not_to_delegate = ["aten::convolution"] + + edge_program_manager = to_quantized_edge_program( + model, input_shape, operators_not_to_delegate=operators_not_to_delegate + ) + + exported_program = edge_program_manager.exported_program() + + for node in exported_program.graph.nodes: + if node.name == "aten_convolution_default": + assert "delegation_tag" not in node.meta diff --git a/backends/nxp/tests/test_qdq_clustering_conv.py b/backends/nxp/tests/test_qdq_clustering_conv.py new file mode 100644 index 00000000000..1713aace1fe --- /dev/null +++ b/backends/nxp/tests/test_qdq_clustering_conv.py @@ -0,0 +1,31 @@ +# Copyright 2024 NXP +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +from executorch.backends.nxp.tests.executorch_pipeline import to_quantized_edge_program +from executorch.backends.nxp.tests.models import Conv2dModule + + +def test_conv2d_partitioner(): + model = Conv2dModule(bias=False) + + edge_program = to_quantized_edge_program(model, (1, 4, 32, 32)) + + # Get subgraph (module) that is delegated to neutron + lowered_module = edge_program.exported_program().graph_module.lowered_module_0 + nodes = list(lowered_module.original_module.graph.nodes) + + assert len(nodes) == 7 + + q_x_node = nodes[1] + dq_w_node = nodes[2] + dq_x_node = nodes[3] + conv_node = nodes[4] + q_y_node = nodes[5] + + assert "cluster" not in q_x_node.meta + assert dq_w_node.meta["cluster"] == "aten_convolution_default_cluster" + assert dq_x_node.meta["cluster"] == "aten_convolution_default_cluster" + assert conv_node.meta["cluster"] == "aten_convolution_default_cluster" + assert q_y_node.meta["cluster"] == "aten_convolution_default_cluster" diff --git a/backends/xnnpack/_passes/remove_getitem_op.py b/backends/transforms/remove_getitem_op.py similarity index 100% rename from backends/xnnpack/_passes/remove_getitem_op.py rename to backends/transforms/remove_getitem_op.py diff --git a/backends/xnnpack/_passes/__init__.py b/backends/xnnpack/_passes/__init__.py index 36a7833dca0..4bf5bdfb079 100644 --- a/backends/xnnpack/_passes/__init__.py +++ b/backends/xnnpack/_passes/__init__.py @@ -6,6 +6,8 @@ from typing import List, Optional, Type +from executorch.backends.transforms.remove_getitem_op import RemoveGetItemPass + from executorch.backends.xnnpack._passes.channels_last_tagged_reshape_pass import ( ChannelsLastTaggedReshapePass, ) @@ -23,7 +25,6 @@ FuseBatchNormWithConvPass, ) from executorch.backends.xnnpack._passes.prelu_reshape_pass import PReLUReshapePass -from executorch.backends.xnnpack._passes.remove_getitem_op import RemoveGetItemPass from executorch.backends.xnnpack._passes.tag_implicit_q_dq_pass import ( TagImplicitQDqPass, ) diff --git a/backends/xnnpack/test/passes/test_remove_get_item_pass.py b/backends/xnnpack/test/passes/test_remove_get_item_pass.py index 4d71d61afd7..f9d98c6a5ff 100644 --- a/backends/xnnpack/test/passes/test_remove_get_item_pass.py +++ b/backends/xnnpack/test/passes/test_remove_get_item_pass.py @@ -7,7 +7,7 @@ import unittest import torch -from executorch.backends.xnnpack._passes.remove_getitem_op import RemoveGetItemPass +from executorch.backends.transforms.remove_getitem_op import RemoveGetItemPass from executorch.backends.xnnpack.test.tester import RunPasses, Tester diff --git a/examples/nxp/setup.sh b/examples/nxp/setup.sh new file mode 100644 index 00000000000..1ef2cc82c2a --- /dev/null +++ b/examples/nxp/setup.sh @@ -0,0 +1,10 @@ +#!/usr/bin/env bash +# Copyright 2025 NXP +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +set -u + +# Install neutron-converter +pip install --extra-index-url https://eiq.nxp.com/repository neutron-converter_SDK_25_03 diff --git a/setup.py b/setup.py index 44fb9a712a3..85b9563bc05 100644 --- a/setup.py +++ b/setup.py @@ -123,6 +123,7 @@ def pybindings(cls) -> bool: cls.mps(), cls.openvino(), cls.xnnpack(), + cls.neutron(), cls.training(), ] ), @@ -144,6 +145,10 @@ def openvino(cls) -> bool: def xnnpack(cls) -> bool: return cls._is_cmake_arg_enabled("EXECUTORCH_BUILD_XNNPACK", default=True) + @classmethod + def neutron(cls) -> bool: + return cls._is_cmake_arg_enabled("EXECUTORCH_BUILD_NEUTRON", default=False) + @classmethod def training(cls) -> bool: return cls._is_cmake_arg_enabled( @@ -682,7 +687,7 @@ def initialize_options(self): default_parallel = str(os.cpu_count() - 1) self.parallel = os.environ.get("CMAKE_BUILD_PARALLEL_LEVEL", default_parallel) - def run(self): + def run(self): # noqa C901 self.dump_options() cfg = get_build_type(self.debug) @@ -732,6 +737,9 @@ def run(self): if ShouldBuild.xnnpack(): cmake_args += ["-DEXECUTORCH_BUILD_XNNPACK=ON"] + if ShouldBuild.neutron(): + cmake_args += ["-DEXECUTORCH_BUILD_NEUTRON=ON"] + if ShouldBuild.training(): build_args += ["--target", "_training_lib"]