From 6aa886f8a95baa200765c7f4559b91382cefe015 Mon Sep 17 00:00:00 2001 From: shewu-quic Date: Thu, 4 Apr 2024 02:15:53 +0800 Subject: [PATCH 1/3] Qualcomm AI Engine Direct - Enable custom operator Summary: - Support to register op package in QNN Backend - Add example script to run torch custom op with QNN Op package - Allow op package override torch built-in operator - Add op package example - Modify the flag of dlopen for QNN library - Generate custom op based on the meta and _schema.arguments of torch.fx.Node - Add README for the custom op --- backends/qualcomm/CMakeLists.txt | 5 +- backends/qualcomm/builders/README.md | 23 +- backends/qualcomm/builders/node_visitor.py | 50 +-- .../qualcomm/builders/node_visitor_manager.py | 77 ++++ backends/qualcomm/builders/op_abs.py | 3 +- .../builders/op_adaptive_avg_pool2d.py | 3 +- backends/qualcomm/builders/op_add.py | 3 +- backends/qualcomm/builders/op_arange.py | 3 +- backends/qualcomm/builders/op_argmin.py | 3 +- backends/qualcomm/builders/op_avg_pool2d.py | 3 +- backends/qualcomm/builders/op_batch_norm.py | 3 +- backends/qualcomm/builders/op_bmm.py | 3 +- backends/qualcomm/builders/op_cat.py | 3 +- backends/qualcomm/builders/op_ceil.py | 3 +- backends/qualcomm/builders/op_clamp.py | 3 +- backends/qualcomm/builders/op_conv2d.py | 3 +- backends/qualcomm/builders/op_cos.py | 3 +- backends/qualcomm/builders/op_custom_op.py | 86 +++++ .../qualcomm/builders/op_depth_to_space.py | 3 +- backends/qualcomm/builders/op_dequantize.py | 3 +- backends/qualcomm/builders/op_div.py | 3 +- backends/qualcomm/builders/op_embedding.py | 3 +- backends/qualcomm/builders/op_eq.py | 3 +- backends/qualcomm/builders/op_expand.py | 3 +- backends/qualcomm/builders/op_full.py | 3 +- backends/qualcomm/builders/op_full_like.py | 3 +- backends/qualcomm/builders/op_ge.py | 3 +- backends/qualcomm/builders/op_gelu.py | 3 +- backends/qualcomm/builders/op_group_norm.py | 3 +- backends/qualcomm/builders/op_gt.py | 3 +- backends/qualcomm/builders/op_hardsigmoid.py | 3 +- backends/qualcomm/builders/op_hardswish.py | 3 +- backends/qualcomm/builders/op_hardtanh.py | 3 +- backends/qualcomm/builders/op_index.py | 3 +- backends/qualcomm/builders/op_index_put.py | 3 +- .../qualcomm/builders/op_instance_norm.py | 3 +- backends/qualcomm/builders/op_layer_norm.py | 3 +- backends/qualcomm/builders/op_le.py | 3 +- backends/qualcomm/builders/op_linear.py | 3 +- backends/qualcomm/builders/op_log.py | 3 +- backends/qualcomm/builders/op_log_softmax.py | 3 +- backends/qualcomm/builders/op_logical_not.py | 3 +- backends/qualcomm/builders/op_lt.py | 3 +- backends/qualcomm/builders/op_matmul.py | 3 +- backends/qualcomm/builders/op_max.py | 3 +- backends/qualcomm/builders/op_max_pool2d.py | 3 +- backends/qualcomm/builders/op_mean_dim.py | 3 +- backends/qualcomm/builders/op_min.py | 3 +- backends/qualcomm/builders/op_mul.py | 3 +- backends/qualcomm/builders/op_ne.py | 3 +- backends/qualcomm/builders/op_neg.py | 3 +- backends/qualcomm/builders/op_pad.py | 3 +- backends/qualcomm/builders/op_pow.py | 3 +- backends/qualcomm/builders/op_prelu.py | 3 +- backends/qualcomm/builders/op_quantize.py | 3 +- backends/qualcomm/builders/op_relu.py | 3 +- backends/qualcomm/builders/op_repeat.py | 3 +- backends/qualcomm/builders/op_reshape.py | 3 +- backends/qualcomm/builders/op_rms_norm.py | 3 +- backends/qualcomm/builders/op_rsqrt.py | 3 +- backends/qualcomm/builders/op_select_copy.py | 3 +- backends/qualcomm/builders/op_sigmoid.py | 3 +- backends/qualcomm/builders/op_sin.py | 3 +- backends/qualcomm/builders/op_skip_ops.py | 3 +- backends/qualcomm/builders/op_slice_copy.py | 3 +- backends/qualcomm/builders/op_softmax.py | 3 +- .../qualcomm/builders/op_space_to_depth.py | 3 +- .../qualcomm/builders/op_split_with_sizes.py | 3 +- backends/qualcomm/builders/op_sqrt.py | 3 +- backends/qualcomm/builders/op_squeeze.py | 3 +- backends/qualcomm/builders/op_sub.py | 3 +- backends/qualcomm/builders/op_sum_int_list.py | 3 +- backends/qualcomm/builders/op_tanh.py | 3 +- backends/qualcomm/builders/op_to.py | 3 +- backends/qualcomm/builders/op_topk.py | 3 +- backends/qualcomm/builders/op_transpose.py | 3 +- backends/qualcomm/builders/op_unsqueeze.py | 3 +- .../builders/op_upsample_bilinear2d.py | 3 +- .../builders/op_upsample_nearest2d.py | 3 +- backends/qualcomm/builders/op_where.py | 3 +- .../qualcomm/partition/qnn_partitioner.py | 13 +- backends/qualcomm/qnn_preprocess.py | 10 +- backends/qualcomm/runtime/QnnManager.cpp | 6 +- .../qualcomm/runtime/backends/CMakeLists.txt | 7 + .../runtime/backends/QnnBackendCommon.cpp | 43 ++- .../runtime/backends/QnnBackendCommon.h | 12 +- .../runtime/backends/QnnBackendFactory.cpp | 11 +- .../runtime/backends/QnnImplementation.cpp | 12 +- .../runtime/backends/QnnOpPackageManager.cpp | 37 ++ .../runtime/backends/QnnOpPackageManager.h | 40 ++ .../serialization/qc_compiler_spec.fbs | 48 +++ backends/qualcomm/serialization/qc_schema.py | 34 +- backends/qualcomm/tests/test_qnn_delegate.py | 43 ++- backends/qualcomm/tests/utils.py | 8 +- backends/qualcomm/utils/utils.py | 7 + examples/qualcomm/custom_op/README.md | 164 ++++++++ examples/qualcomm/custom_op/custom_ops_1.py | 337 ++++++++++++++++ .../ExampleOpPackage/Makefile | 364 ++++++++++++++++++ .../config/example_op_package_htp.xml | 59 +++ .../src/ExampleOpPackageInterface.cpp | 289 ++++++++++++++ .../src/ops/ExampleCustomOp.cpp | 211 ++++++++++ examples/qualcomm/utils.py | 7 +- 102 files changed, 2073 insertions(+), 155 deletions(-) create mode 100644 backends/qualcomm/builders/node_visitor_manager.py create mode 100644 backends/qualcomm/builders/op_custom_op.py create mode 100644 backends/qualcomm/runtime/backends/QnnOpPackageManager.cpp create mode 100644 backends/qualcomm/runtime/backends/QnnOpPackageManager.h create mode 100644 examples/qualcomm/custom_op/README.md create mode 100644 examples/qualcomm/custom_op/custom_ops_1.py create mode 100644 examples/qualcomm/custom_op/example_op_package_htp/ExampleOpPackage/Makefile create mode 100644 examples/qualcomm/custom_op/example_op_package_htp/ExampleOpPackage/config/example_op_package_htp.xml create mode 100644 examples/qualcomm/custom_op/example_op_package_htp/ExampleOpPackage/src/ExampleOpPackageInterface.cpp create mode 100644 examples/qualcomm/custom_op/example_op_package_htp/ExampleOpPackage/src/ops/ExampleCustomOp.cpp diff --git a/backends/qualcomm/CMakeLists.txt b/backends/qualcomm/CMakeLists.txt index aefa929ee9f..f34c4648f2a 100644 --- a/backends/qualcomm/CMakeLists.txt +++ b/backends/qualcomm/CMakeLists.txt @@ -131,6 +131,7 @@ add_library(qnn_implementation STATIC) add_library(qnn_logger STATIC) add_library(qnn_manager STATIC) add_library(qnn_mem_manager STATIC) +add_library(qnn_op_package_manager STATIC) add_library(qnn_profiler STATIC) add_library(qnn_schema INTERFACE ${_qnn_schema__outputs}) add_library(qnn_sys_function_interface INTERFACE) @@ -157,13 +158,13 @@ target_link_libraries( target_link_libraries(qnn_executorch_logging PRIVATE qnn_schema) target_link_libraries(qnn_profiler PRIVATE qnn_executorch_logging) target_link_libraries(qnn_logger PRIVATE qnn_implementation ${android_log}) -target_link_libraries(qnn_backend PRIVATE qnn_implementation qnn_logger) +target_link_libraries(qnn_backend PRIVATE qnn_implementation qnn_logger qnn_op_package_manager) target_link_libraries(qnn_custom_protocol PRIVATE qcir_utils) target_link_libraries( qnn_device PRIVATE qnn_executorch_logging qnn_implementation qnn_logger ) target_link_libraries( - qnn_backend_cache PRIVATE qnn_sys_implementation qcir_utils + qnn_backend_cache PRIVATE qnn_sys_implementation qcir_utils qnn_schema ) target_link_libraries( qnn_context PRIVATE qnn_implementation qnn_logger qnn_backend qnn_device diff --git a/backends/qualcomm/builders/README.md b/backends/qualcomm/builders/README.md index 3a97e8d6d6a..d6a294ea843 100644 --- a/backends/qualcomm/builders/README.md +++ b/backends/qualcomm/builders/README.md @@ -2,14 +2,18 @@ Thank you for contributing to Qualcomm AI Engine Direct delegate for ExecuTorch. Reading and following these guidelines will help you quickly get the essentials of implementing operator builder to unblock yourself and land pull requests more efficiently. ## Sections -* [References](#references) -* [Getting Started](#getting-started) - * [Identify Unsupported Operator](#identify-unsupported-operator) - * [Check Operator Spec](#check-operator-spec) - * [Implementation](#implementation) - * [Quantizer Annotation](#quantizer-annotation) -* [Issues](#issues) -* [Pull Requests](#pull-requests) +- [Contribution for More Operators](#contribution-for-more-operators) + - [Sections](#sections) + - [References](#references) + - [Qualcomm AI Engine Direct](#qualcomm-ai-engine-direct) + - [PyTorch](#pytorch) + - [Getting Started](#getting-started) + - [Identify Unsupported Operator](#identify-unsupported-operator) + - [Check Operator Spec](#check-operator-spec) + - [Implementation](#implementation) + - [Quantizer Annotation](#quantizer-annotation) + - [Issues](#issues) + - [Pull Requests](#pull-requests) ## References ### Qualcomm AI Engine Direct @@ -175,7 +179,8 @@ import torch from executorch.backends.qualcomm.utils.constants import QCOM_DATA # op builder will inherit NodeVisitor and have its own implementation # register_node_visitor for book-keeping the dictionary of target name v.s. callback -from .node_visitor import NodeVisitor, register_node_visitor +from .node_visitor import NodeVisitor +from .node_visitor_manager import register_node_visitor # the definitions required to build operator in QNN from .qnn_constants import OpLayerNorm, QNN_OP_PACKAGE_NAME_QTI_AISW # utility to get parameter value when creating tensor in QNN diff --git a/backends/qualcomm/builders/node_visitor.py b/backends/qualcomm/builders/node_visitor.py index 1e0d2039641..0d5c3fbab25 100644 --- a/backends/qualcomm/builders/node_visitor.py +++ b/backends/qualcomm/builders/node_visitor.py @@ -58,7 +58,9 @@ torch.int64: PyQnnWrapper.Qnn_DataType_t.QNN_DATATYPE_INT_64, torch.uint8: PyQnnWrapper.Qnn_DataType_t.QNN_DATATYPE_UINT_8, torch.uint16: PyQnnWrapper.Qnn_DataType_t.QNN_DATATYPE_UINT_16, + torch.uint32: PyQnnWrapper.Qnn_DataType_t.QNN_DATATYPE_UINT_32, float: PyQnnWrapper.Qnn_DataType_t.QNN_DATATYPE_FLOAT_32, + int: PyQnnWrapper.Qnn_DataType_t.QNN_DATATYPE_UINT_32, } PER_CHANNEL_ENCODING = { @@ -382,51 +384,3 @@ def define_node( ) -> PyQnnWrapper.PyQnnOpWrapper: """Convert torch.fx.Node to OpWrapper""" raise NotImplementedError("NodeVisitor must be extended!") - - -# This will hold mapping of all node names to the visitor class -_node_visitor_dict = {} - - -def register_node_visitor(visitor): - """Register node visitor into _node_visitor_dict""" - assert ( - isinstance(visitor, type) - and issubclass(visitor, NodeVisitor) - and hasattr(visitor, "target") - ), f"Illformed NodeVisitor subclass, can't register!, got: {visitor}" - for target in visitor.target: - _node_visitor_dict[target] = visitor - - -def generate_node_to_external_map( - edge_program: torch.export.ExportedProgram, -) -> Dict[torch.fx.Node, int]: - node_to_external_map = {} - for node in edge_program.graph_module.graph.nodes: - # The order in which we visit the placeholder node is same as the *args - # order for the forward(*args) signature for this gm. Using the order of - # the nodes as external_id to extract the right arg from *args at runtime - if is_graph_input(node, edge_program): - node_to_external_map[node] = len(node_to_external_map) - for node in edge_program.graph_module.graph.nodes: - if is_graph_output(node): - node_to_external_map[node] = len(node_to_external_map) - return node_to_external_map - - -def get_node_visitors( - edge_program: torch.export.ExportedProgram, - enable_tensor_dump=False, -) -> Dict[str, NodeVisitor]: - """Create a new class instance at runtime, and put them in a dict""" - node_to_external_map = generate_node_to_external_map(edge_program) - node_visitors = {} - for target, visitor in _node_visitor_dict.items(): - assert callable( - visitor - ), f"Expeting a callable class, but got {visitor} of type {type(visitor)}" - node_visitors[target] = visitor( - node_to_external_map, edge_program, enable_tensor_dump - ) - return node_visitors diff --git a/backends/qualcomm/builders/node_visitor_manager.py b/backends/qualcomm/builders/node_visitor_manager.py new file mode 100644 index 00000000000..fa9d51db1ad --- /dev/null +++ b/backends/qualcomm/builders/node_visitor_manager.py @@ -0,0 +1,77 @@ +# Copyright (c) Qualcomm Innovation Center, Inc. +# All rights reserved +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +from typing import Dict, List + +import torch +from executorch.backends.qualcomm.serialization.qc_schema import ( + QnnExecuTorchOpPackageInfo, +) + +from .node_visitor import NodeVisitor +from .op_custom_op import CustomOp +from .utils import is_graph_input, is_graph_output + + +# This will hold mapping of all node names to the visitor class +_node_visitor_dict = {} + + +def register_node_visitor(visitor): + """Register node visitor into _node_visitor_dict""" + assert ( + isinstance(visitor, type) + and issubclass(visitor, NodeVisitor) + and hasattr(visitor, "target") + ), f"Informed NodeVisitor subclass, can't register!, got: {visitor}" + for target in visitor.target: + _node_visitor_dict[target] = visitor + + +def generate_node_to_external_map( + edge_program: torch.export.ExportedProgram, +) -> Dict[torch.fx.Node, int]: + node_to_external_map = {} + for node in edge_program.graph_module.graph.nodes: + # The order in which we visit the placeholder node is same as the *args + # order for the forward(*args) signature for this gm. Using the order of + # the nodes as external_id to extract the right arg from *args at runtime + if is_graph_input(node, edge_program): + node_to_external_map[node] = len(node_to_external_map) + for node in edge_program.graph_module.graph.nodes: + if is_graph_output(node): + node_to_external_map[node] = len(node_to_external_map) + return node_to_external_map + + +def get_node_visitors( + edge_program: torch.export.ExportedProgram, + enable_tensor_dump=False, + op_package_infos: List[QnnExecuTorchOpPackageInfo] = None, +) -> Dict[str, NodeVisitor]: + """Create a new class instance at runtime, and put them in a dict""" + node_to_external_map = generate_node_to_external_map(edge_program) + node_visitors = {} + for target, visitor in _node_visitor_dict.items(): + assert callable( + visitor + ), f"Expecting a callable class, but got {visitor} of type {type(visitor)}" + node_visitors[target] = visitor( + node_to_external_map, edge_program, enable_tensor_dump + ) + if op_package_infos: + custom_ops = [] + for op_package_info in op_package_infos: + if op_package_info.custom_op_name not in custom_ops: + custom_op_builder = CustomOp( + op_package_info, + node_to_external_map, + edge_program, + enable_tensor_dump, + ) + node_visitors[op_package_info.custom_op_name] = custom_op_builder + custom_ops.append(op_package_info.custom_op_name) + return node_visitors diff --git a/backends/qualcomm/builders/op_abs.py b/backends/qualcomm/builders/op_abs.py index 002ffe85208..78ac8ea555e 100644 --- a/backends/qualcomm/builders/op_abs.py +++ b/backends/qualcomm/builders/op_abs.py @@ -9,7 +9,8 @@ import torch -from .node_visitor import NodeVisitor, register_node_visitor +from .node_visitor import NodeVisitor +from .node_visitor_manager import register_node_visitor from .qnn_constants import OpElementWiseAbs, QNN_OP_PACKAGE_NAME_QTI_AISW diff --git a/backends/qualcomm/builders/op_adaptive_avg_pool2d.py b/backends/qualcomm/builders/op_adaptive_avg_pool2d.py index c944e1646e7..d9def327144 100644 --- a/backends/qualcomm/builders/op_adaptive_avg_pool2d.py +++ b/backends/qualcomm/builders/op_adaptive_avg_pool2d.py @@ -11,7 +11,8 @@ import torch -from .node_visitor import NodeVisitor, register_node_visitor +from .node_visitor import NodeVisitor +from .node_visitor_manager import register_node_visitor from .qnn_constants import OpPoolAvg2d, QNN_OP_PACKAGE_NAME_QTI_AISW diff --git a/backends/qualcomm/builders/op_add.py b/backends/qualcomm/builders/op_add.py index b5edfd7bb52..ef9124a947f 100644 --- a/backends/qualcomm/builders/op_add.py +++ b/backends/qualcomm/builders/op_add.py @@ -9,7 +9,8 @@ import torch -from .node_visitor import NodeVisitor, register_node_visitor +from .node_visitor import NodeVisitor +from .node_visitor_manager import register_node_visitor from .qnn_constants import OpElementWiseAdd, QNN_OP_PACKAGE_NAME_QTI_AISW diff --git a/backends/qualcomm/builders/op_arange.py b/backends/qualcomm/builders/op_arange.py index 210ab85e506..e8c4c7d5267 100644 --- a/backends/qualcomm/builders/op_arange.py +++ b/backends/qualcomm/builders/op_arange.py @@ -9,7 +9,8 @@ import torch -from .node_visitor import NodeVisitor, register_node_visitor +from .node_visitor import NodeVisitor +from .node_visitor_manager import register_node_visitor @register_node_visitor diff --git a/backends/qualcomm/builders/op_argmin.py b/backends/qualcomm/builders/op_argmin.py index 5630b02a5cc..76d7ba2a830 100644 --- a/backends/qualcomm/builders/op_argmin.py +++ b/backends/qualcomm/builders/op_argmin.py @@ -10,7 +10,8 @@ import torch from executorch.backends.qualcomm.utils.constants import QCOM_AXIS_ORDER, QCOM_DATA -from .node_visitor import NodeVisitor, QNN_TENSOR_TYPE_MAP, register_node_visitor +from .node_visitor import NodeVisitor, QNN_TENSOR_TYPE_MAP +from .node_visitor_manager import register_node_visitor from .qnn_constants import OpArgmin, OpCast, QNN_OP_PACKAGE_NAME_QTI_AISW diff --git a/backends/qualcomm/builders/op_avg_pool2d.py b/backends/qualcomm/builders/op_avg_pool2d.py index 394d4008587..7982d98ddff 100644 --- a/backends/qualcomm/builders/op_avg_pool2d.py +++ b/backends/qualcomm/builders/op_avg_pool2d.py @@ -12,7 +12,8 @@ import torch from executorch.backends.qualcomm.utils.constants import QCOM_DATA -from .node_visitor import NodeVisitor, register_node_visitor +from .node_visitor import NodeVisitor +from .node_visitor_manager import register_node_visitor from .qnn_constants import OpPoolAvg2d, QNN_OP_PACKAGE_NAME_QTI_AISW diff --git a/backends/qualcomm/builders/op_batch_norm.py b/backends/qualcomm/builders/op_batch_norm.py index 9aed1401875..e2bbd0ce0a4 100644 --- a/backends/qualcomm/builders/op_batch_norm.py +++ b/backends/qualcomm/builders/op_batch_norm.py @@ -18,7 +18,8 @@ ) from executorch.exir.dialects._ops import ops as exir_ops -from .node_visitor import NodeVisitor, register_node_visitor +from .node_visitor import NodeVisitor +from .node_visitor_manager import register_node_visitor from .qnn_constants import OpBatchnorm, QNN_OP_PACKAGE_NAME_QTI_AISW from .utils import get_parameter diff --git a/backends/qualcomm/builders/op_bmm.py b/backends/qualcomm/builders/op_bmm.py index 46fbff1cc7e..a8e1148eabf 100644 --- a/backends/qualcomm/builders/op_bmm.py +++ b/backends/qualcomm/builders/op_bmm.py @@ -9,7 +9,8 @@ import torch -from .node_visitor import NodeVisitor, register_node_visitor +from .node_visitor import NodeVisitor +from .node_visitor_manager import register_node_visitor from .qnn_constants import OpMatMul, QNN_OP_PACKAGE_NAME_QTI_AISW diff --git a/backends/qualcomm/builders/op_cat.py b/backends/qualcomm/builders/op_cat.py index 7f160856390..dd0c321191a 100644 --- a/backends/qualcomm/builders/op_cat.py +++ b/backends/qualcomm/builders/op_cat.py @@ -12,7 +12,8 @@ import torch from executorch.backends.qualcomm.utils.constants import QCOM_AXIS_ORDER, QCOM_DATA -from .node_visitor import NodeVisitor, register_node_visitor +from .node_visitor import NodeVisitor +from .node_visitor_manager import register_node_visitor from .qnn_constants import OpConcat, QNN_OP_PACKAGE_NAME_QTI_AISW diff --git a/backends/qualcomm/builders/op_ceil.py b/backends/qualcomm/builders/op_ceil.py index 19fe14d6392..c7ef5f007c1 100644 --- a/backends/qualcomm/builders/op_ceil.py +++ b/backends/qualcomm/builders/op_ceil.py @@ -9,7 +9,8 @@ import torch -from .node_visitor import NodeVisitor, register_node_visitor +from .node_visitor import NodeVisitor +from .node_visitor_manager import register_node_visitor from .qnn_constants import OpElementWiseCeil, QNN_OP_PACKAGE_NAME_QTI_AISW diff --git a/backends/qualcomm/builders/op_clamp.py b/backends/qualcomm/builders/op_clamp.py index 0f9a9ffa196..f8ff1a2fad1 100644 --- a/backends/qualcomm/builders/op_clamp.py +++ b/backends/qualcomm/builders/op_clamp.py @@ -11,7 +11,8 @@ import torch from executorch.backends.qualcomm.utils.constants import QCOM_DATA -from .node_visitor import NodeVisitor, register_node_visitor +from .node_visitor import NodeVisitor +from .node_visitor_manager import register_node_visitor from .qnn_constants import OpReluMinMax, QNN_OP_PACKAGE_NAME_QTI_AISW diff --git a/backends/qualcomm/builders/op_conv2d.py b/backends/qualcomm/builders/op_conv2d.py index a6051636d3e..411dc836697 100644 --- a/backends/qualcomm/builders/op_conv2d.py +++ b/backends/qualcomm/builders/op_conv2d.py @@ -13,7 +13,8 @@ import torch from executorch.backends.qualcomm.utils.constants import QCOM_DATA -from .node_visitor import NodeVisitor, register_node_visitor +from .node_visitor import NodeVisitor +from .node_visitor_manager import register_node_visitor from .qnn_constants import ( OpConv2d, OpDepthWiseConv2d, diff --git a/backends/qualcomm/builders/op_cos.py b/backends/qualcomm/builders/op_cos.py index 3858a947d93..8ad406938f5 100644 --- a/backends/qualcomm/builders/op_cos.py +++ b/backends/qualcomm/builders/op_cos.py @@ -10,7 +10,8 @@ import torch -from .node_visitor import NodeVisitor, register_node_visitor +from .node_visitor import NodeVisitor +from .node_visitor_manager import register_node_visitor from .qnn_constants import OpElementWiseCos, QNN_OP_PACKAGE_NAME_QTI_AISW diff --git a/backends/qualcomm/builders/op_custom_op.py b/backends/qualcomm/builders/op_custom_op.py new file mode 100644 index 00000000000..28e54e96191 --- /dev/null +++ b/backends/qualcomm/builders/op_custom_op.py @@ -0,0 +1,86 @@ +# Copyright (c) Qualcomm Innovation Center, Inc. +# All rights reserved +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. +from typing import Dict, Iterable + +import executorch.backends.qualcomm.python.PyQnnWrapperAdaptor as PyQnnWrapper + +import numpy as np +import torch + +from executorch.backends.qualcomm.serialization.qc_schema import ( + QnnExecuTorchOpPackageInfo, +) + +from executorch.backends.qualcomm.utils.constants import QCOM_DATA + +from .node_visitor import NodeVisitor, QNN_TENSOR_TYPE_MAP + + +class CustomOp(NodeVisitor): + target = "" + op_package_info = QnnExecuTorchOpPackageInfo() + + def __init__(self, op_package_info: QnnExecuTorchOpPackageInfo, *args) -> None: + super().__init__(*args) + self.target = op_package_info.custom_op_name + self.op_package_info = op_package_info + + def define_node( + self, + node: torch.fx.Node, + nodes_to_wrappers: Dict[torch.fx.Node, PyQnnWrapper.TensorWrapper], + ) -> PyQnnWrapper.PyQnnOpWrapper: + custom_op = PyQnnWrapper.PyQnnOpWrapper( + node.name, + self.op_package_info.op_package_name, + self.op_package_info.qnn_op_type_name, + ) + + custom_input_tensors = [] + custom_attr_keys = [arg.name for arg in node.target._schema.arguments] + for arg, arg_name in zip(node.args, custom_attr_keys): + if arg is None: + continue + if isinstance(arg, torch.fx.Node): + input_tensor = self.get_tensor(arg, node) + input_tensor_wrapper = self.define_tensor( + arg, + node, + input_tensor, + PyQnnWrapper.Qnn_TensorType_t.QNN_TENSOR_TYPE_NATIVE, + nodes_to_wrappers, + ) + custom_input_tensors.append(input_tensor_wrapper) + elif isinstance(arg, Iterable): + tensor_parm_shape = [len(arg)] + custom_op.AddTensorParam( + arg_name, + QNN_TENSOR_TYPE_MAP[type(arg[0])], + len(tensor_parm_shape), + tensor_parm_shape, + np.array(arg), + True, + ) + else: + custom_op.AddScalarParam( + arg_name, + QNN_TENSOR_TYPE_MAP[type(arg)], + {QCOM_DATA: arg}, + ) + + output_tensor = self.get_tensor(node, node) + output_tensor_wrapper = self.define_tensor( + node, + node, + output_tensor, + PyQnnWrapper.Qnn_TensorType_t.QNN_TENSOR_TYPE_NATIVE, + nodes_to_wrappers, + ) + custom_output_tensors = [output_tensor_wrapper] + + custom_op.AddInputTensors(custom_input_tensors) + custom_op.AddOutputTensors(custom_output_tensors) + return custom_op diff --git a/backends/qualcomm/builders/op_depth_to_space.py b/backends/qualcomm/builders/op_depth_to_space.py index 56c57b4bd5e..b4e94c1e52e 100644 --- a/backends/qualcomm/builders/op_depth_to_space.py +++ b/backends/qualcomm/builders/op_depth_to_space.py @@ -12,7 +12,8 @@ import torch from executorch.backends.qualcomm.utils.constants import QCOM_DATA -from .node_visitor import NodeVisitor, register_node_visitor +from .node_visitor import NodeVisitor +from .node_visitor_manager import register_node_visitor from .qnn_constants import OpDepthToSpace, QNN_OP_PACKAGE_NAME_QTI_AISW diff --git a/backends/qualcomm/builders/op_dequantize.py b/backends/qualcomm/builders/op_dequantize.py index 507ecc4e3e3..cf250b52cb7 100644 --- a/backends/qualcomm/builders/op_dequantize.py +++ b/backends/qualcomm/builders/op_dequantize.py @@ -9,7 +9,8 @@ import torch -from .node_visitor import NodeVisitor, register_node_visitor +from .node_visitor import NodeVisitor +from .node_visitor_manager import register_node_visitor from .qnn_constants import OpDequantize, QNN_OP_PACKAGE_NAME_QTI_AISW diff --git a/backends/qualcomm/builders/op_div.py b/backends/qualcomm/builders/op_div.py index ce3f96abc7f..b64218398a0 100644 --- a/backends/qualcomm/builders/op_div.py +++ b/backends/qualcomm/builders/op_div.py @@ -9,7 +9,8 @@ import torch -from .node_visitor import NodeVisitor, register_node_visitor +from .node_visitor import NodeVisitor +from .node_visitor_manager import register_node_visitor from .qnn_constants import OpElementWiseDivide, QNN_OP_PACKAGE_NAME_QTI_AISW diff --git a/backends/qualcomm/builders/op_embedding.py b/backends/qualcomm/builders/op_embedding.py index 5b0d1600393..4d78f860dc8 100644 --- a/backends/qualcomm/builders/op_embedding.py +++ b/backends/qualcomm/builders/op_embedding.py @@ -11,7 +11,8 @@ import torch from executorch.backends.qualcomm.utils.constants import QCOM_DATA -from .node_visitor import NodeVisitor, register_node_visitor +from .node_visitor import NodeVisitor +from .node_visitor_manager import register_node_visitor from .qnn_constants import OpGather, QNN_OP_PACKAGE_NAME_QTI_AISW from .utils import get_parameter diff --git a/backends/qualcomm/builders/op_eq.py b/backends/qualcomm/builders/op_eq.py index 855c5e13be6..fb2ef37c00e 100644 --- a/backends/qualcomm/builders/op_eq.py +++ b/backends/qualcomm/builders/op_eq.py @@ -9,7 +9,8 @@ import torch -from .node_visitor import NodeVisitor, register_node_visitor +from .node_visitor import NodeVisitor +from .node_visitor_manager import register_node_visitor from .qnn_constants import OpElementWiseEqual, QNN_OP_PACKAGE_NAME_QTI_AISW diff --git a/backends/qualcomm/builders/op_expand.py b/backends/qualcomm/builders/op_expand.py index c098ed00c94..9af83201156 100644 --- a/backends/qualcomm/builders/op_expand.py +++ b/backends/qualcomm/builders/op_expand.py @@ -11,7 +11,8 @@ import numpy as np import torch -from .node_visitor import NodeVisitor, register_node_visitor +from .node_visitor import NodeVisitor +from .node_visitor_manager import register_node_visitor from .qnn_constants import OpTile, QNN_OP_PACKAGE_NAME_QTI_AISW diff --git a/backends/qualcomm/builders/op_full.py b/backends/qualcomm/builders/op_full.py index 4d9d8318fce..d58efd77791 100644 --- a/backends/qualcomm/builders/op_full.py +++ b/backends/qualcomm/builders/op_full.py @@ -9,7 +9,8 @@ import torch -from .node_visitor import NodeVisitor, register_node_visitor +from .node_visitor import NodeVisitor +from .node_visitor_manager import register_node_visitor @register_node_visitor diff --git a/backends/qualcomm/builders/op_full_like.py b/backends/qualcomm/builders/op_full_like.py index 2ffdf0c63a5..69609d887aa 100644 --- a/backends/qualcomm/builders/op_full_like.py +++ b/backends/qualcomm/builders/op_full_like.py @@ -9,7 +9,8 @@ import torch -from .node_visitor import NodeVisitor, register_node_visitor +from .node_visitor import NodeVisitor +from .node_visitor_manager import register_node_visitor @register_node_visitor diff --git a/backends/qualcomm/builders/op_ge.py b/backends/qualcomm/builders/op_ge.py index 6784167aa5b..057ab15d162 100644 --- a/backends/qualcomm/builders/op_ge.py +++ b/backends/qualcomm/builders/op_ge.py @@ -9,7 +9,8 @@ import torch -from .node_visitor import NodeVisitor, register_node_visitor +from .node_visitor import NodeVisitor +from .node_visitor_manager import register_node_visitor from .qnn_constants import OpElementWiseGreaterEqual, QNN_OP_PACKAGE_NAME_QTI_AISW diff --git a/backends/qualcomm/builders/op_gelu.py b/backends/qualcomm/builders/op_gelu.py index c178740448e..75c05c6f560 100644 --- a/backends/qualcomm/builders/op_gelu.py +++ b/backends/qualcomm/builders/op_gelu.py @@ -10,7 +10,8 @@ import torch -from .node_visitor import NodeVisitor, register_node_visitor +from .node_visitor import NodeVisitor +from .node_visitor_manager import register_node_visitor from .qnn_constants import OpGelu, QNN_OP_PACKAGE_NAME_QTI_AISW diff --git a/backends/qualcomm/builders/op_group_norm.py b/backends/qualcomm/builders/op_group_norm.py index 26700216b53..6ec70f54a99 100644 --- a/backends/qualcomm/builders/op_group_norm.py +++ b/backends/qualcomm/builders/op_group_norm.py @@ -12,7 +12,8 @@ import torch from executorch.backends.qualcomm.utils.constants import QCOM_DATA -from .node_visitor import NodeVisitor, register_node_visitor +from .node_visitor import NodeVisitor +from .node_visitor_manager import register_node_visitor from .qnn_constants import OpGroupNorm, QNN_OP_PACKAGE_NAME_QTI_AISW from .utils import get_parameter diff --git a/backends/qualcomm/builders/op_gt.py b/backends/qualcomm/builders/op_gt.py index 6c311f42b7f..119ffe152c3 100644 --- a/backends/qualcomm/builders/op_gt.py +++ b/backends/qualcomm/builders/op_gt.py @@ -9,7 +9,8 @@ import torch -from .node_visitor import NodeVisitor, register_node_visitor +from .node_visitor import NodeVisitor +from .node_visitor_manager import register_node_visitor from .qnn_constants import OpElementWiseGreater, QNN_OP_PACKAGE_NAME_QTI_AISW diff --git a/backends/qualcomm/builders/op_hardsigmoid.py b/backends/qualcomm/builders/op_hardsigmoid.py index 1acc08a387d..45f76c1117e 100644 --- a/backends/qualcomm/builders/op_hardsigmoid.py +++ b/backends/qualcomm/builders/op_hardsigmoid.py @@ -12,7 +12,8 @@ import torch from executorch.backends.qualcomm.utils.constants import QCOM_DATA -from .node_visitor import NodeVisitor, register_node_visitor +from .node_visitor import NodeVisitor +from .node_visitor_manager import register_node_visitor from .qnn_constants import OpElementWiseNeuron, QNN_OP_PACKAGE_NAME_QTI_AISW diff --git a/backends/qualcomm/builders/op_hardswish.py b/backends/qualcomm/builders/op_hardswish.py index ed28ff95f78..7745d421e74 100644 --- a/backends/qualcomm/builders/op_hardswish.py +++ b/backends/qualcomm/builders/op_hardswish.py @@ -10,7 +10,8 @@ import torch -from .node_visitor import NodeVisitor, register_node_visitor +from .node_visitor import NodeVisitor +from .node_visitor_manager import register_node_visitor from .qnn_constants import OpHardSwish, QNN_OP_PACKAGE_NAME_QTI_AISW diff --git a/backends/qualcomm/builders/op_hardtanh.py b/backends/qualcomm/builders/op_hardtanh.py index 68bafaaab8b..a02c0d6a88d 100644 --- a/backends/qualcomm/builders/op_hardtanh.py +++ b/backends/qualcomm/builders/op_hardtanh.py @@ -12,7 +12,8 @@ import torch from executorch.backends.qualcomm.utils.constants import QCOM_DATA -from .node_visitor import NodeVisitor, register_node_visitor +from .node_visitor import NodeVisitor +from .node_visitor_manager import register_node_visitor from .qnn_constants import OpReluMinMax, QNN_OP_PACKAGE_NAME_QTI_AISW diff --git a/backends/qualcomm/builders/op_index.py b/backends/qualcomm/builders/op_index.py index e78284a5e32..e062e3fb1d8 100644 --- a/backends/qualcomm/builders/op_index.py +++ b/backends/qualcomm/builders/op_index.py @@ -11,7 +11,8 @@ import torch from executorch.backends.qualcomm.utils.constants import QCOM_DATA -from .node_visitor import NodeVisitor, register_node_visitor +from .node_visitor import NodeVisitor +from .node_visitor_manager import register_node_visitor from .qnn_constants import OpGather, QNN_OP_PACKAGE_NAME_QTI_AISW diff --git a/backends/qualcomm/builders/op_index_put.py b/backends/qualcomm/builders/op_index_put.py index c317cc0a8b7..ea7a5dea207 100644 --- a/backends/qualcomm/builders/op_index_put.py +++ b/backends/qualcomm/builders/op_index_put.py @@ -4,7 +4,8 @@ import torch -from .node_visitor import NodeVisitor, register_node_visitor +from .node_visitor import NodeVisitor +from .node_visitor_manager import register_node_visitor from .qnn_constants import OpScatterNd, QNN_OP_PACKAGE_NAME_QTI_AISW diff --git a/backends/qualcomm/builders/op_instance_norm.py b/backends/qualcomm/builders/op_instance_norm.py index e7e7f14a944..1cdc4dedfb4 100644 --- a/backends/qualcomm/builders/op_instance_norm.py +++ b/backends/qualcomm/builders/op_instance_norm.py @@ -18,7 +18,8 @@ ) from executorch.exir.dialects._ops import ops as exir_ops -from .node_visitor import NodeVisitor, register_node_visitor +from .node_visitor import NodeVisitor +from .node_visitor_manager import register_node_visitor from .qnn_constants import OpInstanceNorm, QNN_OP_PACKAGE_NAME_QTI_AISW from .utils import get_parameter diff --git a/backends/qualcomm/builders/op_layer_norm.py b/backends/qualcomm/builders/op_layer_norm.py index 06f822014ed..8db80cfa34d 100644 --- a/backends/qualcomm/builders/op_layer_norm.py +++ b/backends/qualcomm/builders/op_layer_norm.py @@ -13,7 +13,8 @@ import torch from executorch.backends.qualcomm.utils.constants import QCOM_DATA -from .node_visitor import NodeVisitor, register_node_visitor +from .node_visitor import NodeVisitor +from .node_visitor_manager import register_node_visitor from .qnn_constants import OpLayerNorm, QNN_OP_PACKAGE_NAME_QTI_AISW from .utils import get_parameter diff --git a/backends/qualcomm/builders/op_le.py b/backends/qualcomm/builders/op_le.py index 1dd2a06b777..9905ab60fcf 100644 --- a/backends/qualcomm/builders/op_le.py +++ b/backends/qualcomm/builders/op_le.py @@ -9,7 +9,8 @@ import torch -from .node_visitor import NodeVisitor, register_node_visitor +from .node_visitor import NodeVisitor +from .node_visitor_manager import register_node_visitor from .qnn_constants import OpElementWiseLessEqual, QNN_OP_PACKAGE_NAME_QTI_AISW diff --git a/backends/qualcomm/builders/op_linear.py b/backends/qualcomm/builders/op_linear.py index 71b6072b9e5..7a9b311e08c 100644 --- a/backends/qualcomm/builders/op_linear.py +++ b/backends/qualcomm/builders/op_linear.py @@ -16,7 +16,8 @@ QCOM_ZERO_POINTS, ) -from .node_visitor import NodeVisitor, register_node_visitor +from .node_visitor import NodeVisitor +from .node_visitor_manager import register_node_visitor from .qnn_constants import OpFullyConnected, QNN_OP_PACKAGE_NAME_QTI_AISW from .utils import get_parameter diff --git a/backends/qualcomm/builders/op_log.py b/backends/qualcomm/builders/op_log.py index bcc40aa6268..c0721d26a4d 100644 --- a/backends/qualcomm/builders/op_log.py +++ b/backends/qualcomm/builders/op_log.py @@ -9,7 +9,8 @@ import torch -from .node_visitor import NodeVisitor, register_node_visitor +from .node_visitor import NodeVisitor +from .node_visitor_manager import register_node_visitor from .qnn_constants import OpElementWiseLog, QNN_OP_PACKAGE_NAME_QTI_AISW diff --git a/backends/qualcomm/builders/op_log_softmax.py b/backends/qualcomm/builders/op_log_softmax.py index d395d5eb66e..198c9be7300 100644 --- a/backends/qualcomm/builders/op_log_softmax.py +++ b/backends/qualcomm/builders/op_log_softmax.py @@ -11,7 +11,8 @@ import torch from executorch.backends.qualcomm.utils.constants import QCOM_AXIS_ORDER, QCOM_DATA -from .node_visitor import NodeVisitor, register_node_visitor +from .node_visitor import NodeVisitor +from .node_visitor_manager import register_node_visitor from .qnn_constants import OpLogSoftmax, QNN_OP_PACKAGE_NAME_QTI_AISW diff --git a/backends/qualcomm/builders/op_logical_not.py b/backends/qualcomm/builders/op_logical_not.py index 457a1007ada..b18a7fd76f7 100644 --- a/backends/qualcomm/builders/op_logical_not.py +++ b/backends/qualcomm/builders/op_logical_not.py @@ -9,7 +9,8 @@ import torch -from .node_visitor import NodeVisitor, register_node_visitor +from .node_visitor import NodeVisitor +from .node_visitor_manager import register_node_visitor from .qnn_constants import OpElementWiseNot, QNN_OP_PACKAGE_NAME_QTI_AISW diff --git a/backends/qualcomm/builders/op_lt.py b/backends/qualcomm/builders/op_lt.py index b4a080efc38..edc7622002e 100644 --- a/backends/qualcomm/builders/op_lt.py +++ b/backends/qualcomm/builders/op_lt.py @@ -9,7 +9,8 @@ import torch -from .node_visitor import NodeVisitor, register_node_visitor +from .node_visitor import NodeVisitor +from .node_visitor_manager import register_node_visitor from .qnn_constants import OpElementWiseLess, QNN_OP_PACKAGE_NAME_QTI_AISW diff --git a/backends/qualcomm/builders/op_matmul.py b/backends/qualcomm/builders/op_matmul.py index 577bcb12a42..eab1561eaf5 100644 --- a/backends/qualcomm/builders/op_matmul.py +++ b/backends/qualcomm/builders/op_matmul.py @@ -9,7 +9,8 @@ import torch -from .node_visitor import NodeVisitor, register_node_visitor +from .node_visitor import NodeVisitor +from .node_visitor_manager import register_node_visitor from .qnn_constants import OpMatMul, QNN_OP_PACKAGE_NAME_QTI_AISW diff --git a/backends/qualcomm/builders/op_max.py b/backends/qualcomm/builders/op_max.py index 7d41358a266..888dfb93a5f 100644 --- a/backends/qualcomm/builders/op_max.py +++ b/backends/qualcomm/builders/op_max.py @@ -9,7 +9,8 @@ import torch -from .node_visitor import NodeVisitor, register_node_visitor +from .node_visitor import NodeVisitor +from .node_visitor_manager import register_node_visitor from .qnn_constants import OpElementWiseMaximum, QNN_OP_PACKAGE_NAME_QTI_AISW diff --git a/backends/qualcomm/builders/op_max_pool2d.py b/backends/qualcomm/builders/op_max_pool2d.py index 8d0087eb2c6..a88bb9b87d2 100644 --- a/backends/qualcomm/builders/op_max_pool2d.py +++ b/backends/qualcomm/builders/op_max_pool2d.py @@ -12,7 +12,8 @@ import torch from executorch.backends.qualcomm.utils.constants import QCOM_DATA -from .node_visitor import NodeVisitor, register_node_visitor +from .node_visitor import NodeVisitor +from .node_visitor_manager import register_node_visitor from .qnn_constants import OpPoolMax2d, QNN_OP_PACKAGE_NAME_QTI_AISW diff --git a/backends/qualcomm/builders/op_mean_dim.py b/backends/qualcomm/builders/op_mean_dim.py index 313b24420db..81fed258557 100644 --- a/backends/qualcomm/builders/op_mean_dim.py +++ b/backends/qualcomm/builders/op_mean_dim.py @@ -12,7 +12,8 @@ import torch from executorch.backends.qualcomm.utils.constants import QCOM_AXIS_ORDER, QCOM_DATA -from .node_visitor import NodeVisitor, register_node_visitor +from .node_visitor import NodeVisitor +from .node_visitor_manager import register_node_visitor from .qnn_constants import OpReduceMean, QNN_OP_PACKAGE_NAME_QTI_AISW diff --git a/backends/qualcomm/builders/op_min.py b/backends/qualcomm/builders/op_min.py index 0df2796974d..c490c527ad0 100644 --- a/backends/qualcomm/builders/op_min.py +++ b/backends/qualcomm/builders/op_min.py @@ -9,7 +9,8 @@ import torch -from .node_visitor import NodeVisitor, register_node_visitor +from .node_visitor import NodeVisitor +from .node_visitor_manager import register_node_visitor from .qnn_constants import OpElementWiseMinimum, QNN_OP_PACKAGE_NAME_QTI_AISW diff --git a/backends/qualcomm/builders/op_mul.py b/backends/qualcomm/builders/op_mul.py index 3138d3b8c9b..9e84f9a1309 100644 --- a/backends/qualcomm/builders/op_mul.py +++ b/backends/qualcomm/builders/op_mul.py @@ -9,7 +9,8 @@ import torch -from .node_visitor import NodeVisitor, register_node_visitor +from .node_visitor import NodeVisitor +from .node_visitor_manager import register_node_visitor from .qnn_constants import OpElementWiseMultiply, QNN_OP_PACKAGE_NAME_QTI_AISW diff --git a/backends/qualcomm/builders/op_ne.py b/backends/qualcomm/builders/op_ne.py index 0227b02efbf..7c8977097f5 100644 --- a/backends/qualcomm/builders/op_ne.py +++ b/backends/qualcomm/builders/op_ne.py @@ -17,7 +17,8 @@ ) from executorch.exir.dialects._ops import ops as exir_ops -from .node_visitor import NodeVisitor, register_node_visitor +from .node_visitor import NodeVisitor +from .node_visitor_manager import register_node_visitor from .qnn_constants import OpElementWiseNotEqual, QNN_OP_PACKAGE_NAME_QTI_AISW diff --git a/backends/qualcomm/builders/op_neg.py b/backends/qualcomm/builders/op_neg.py index a950a1887ab..be4ebbcef31 100644 --- a/backends/qualcomm/builders/op_neg.py +++ b/backends/qualcomm/builders/op_neg.py @@ -8,7 +8,8 @@ import executorch.backends.qualcomm.python.PyQnnWrapperAdaptor as PyQnnWrapper import torch -from .node_visitor import NodeVisitor, register_node_visitor +from .node_visitor import NodeVisitor +from .node_visitor_manager import register_node_visitor from .qnn_constants import OpElementWiseNeg, QNN_OP_PACKAGE_NAME_QTI_AISW diff --git a/backends/qualcomm/builders/op_pad.py b/backends/qualcomm/builders/op_pad.py index 10948859be9..d4b3ffe38ac 100644 --- a/backends/qualcomm/builders/op_pad.py +++ b/backends/qualcomm/builders/op_pad.py @@ -11,7 +11,8 @@ import torch from executorch.backends.qualcomm.utils.constants import QCOM_AXIS_ORDER, QCOM_DATA -from .node_visitor import NodeVisitor, QNN_TENSOR_TYPE_MAP, register_node_visitor +from .node_visitor import NodeVisitor, QNN_TENSOR_TYPE_MAP +from .node_visitor_manager import register_node_visitor from .qnn_constants import OpPad, QNN_OP_PACKAGE_NAME_QTI_AISW diff --git a/backends/qualcomm/builders/op_pow.py b/backends/qualcomm/builders/op_pow.py index 3e89bdcfc4d..0db3d777732 100644 --- a/backends/qualcomm/builders/op_pow.py +++ b/backends/qualcomm/builders/op_pow.py @@ -9,7 +9,8 @@ import torch -from .node_visitor import NodeVisitor, register_node_visitor +from .node_visitor import NodeVisitor +from .node_visitor_manager import register_node_visitor from .qnn_constants import OpElementWisePower, QNN_OP_PACKAGE_NAME_QTI_AISW diff --git a/backends/qualcomm/builders/op_prelu.py b/backends/qualcomm/builders/op_prelu.py index e35839f535e..15a4e495578 100644 --- a/backends/qualcomm/builders/op_prelu.py +++ b/backends/qualcomm/builders/op_prelu.py @@ -10,7 +10,8 @@ import torch from executorch.backends.qualcomm.utils.constants import QCOM_AXIS_ORDER -from .node_visitor import get_parameter, NodeVisitor, register_node_visitor +from .node_visitor import get_parameter, NodeVisitor +from .node_visitor_manager import register_node_visitor from .qnn_constants import OpPRelu, QNN_OP_PACKAGE_NAME_QTI_AISW diff --git a/backends/qualcomm/builders/op_quantize.py b/backends/qualcomm/builders/op_quantize.py index 4921f96b467..b9ee94c305b 100644 --- a/backends/qualcomm/builders/op_quantize.py +++ b/backends/qualcomm/builders/op_quantize.py @@ -10,7 +10,8 @@ import torch from executorch.backends.qualcomm.utils.constants import QCOM_ENCODING, QCOM_QUANT_ATTRS -from .node_visitor import NodeVisitor, register_node_visitor +from .node_visitor import NodeVisitor +from .node_visitor_manager import register_node_visitor from .qnn_constants import OpQuantize, QNN_OP_PACKAGE_NAME_QTI_AISW diff --git a/backends/qualcomm/builders/op_relu.py b/backends/qualcomm/builders/op_relu.py index 29335797e28..b94edabaeec 100644 --- a/backends/qualcomm/builders/op_relu.py +++ b/backends/qualcomm/builders/op_relu.py @@ -9,7 +9,8 @@ import torch -from .node_visitor import NodeVisitor, register_node_visitor +from .node_visitor import NodeVisitor +from .node_visitor_manager import register_node_visitor from .qnn_constants import OpRelu, QNN_OP_PACKAGE_NAME_QTI_AISW diff --git a/backends/qualcomm/builders/op_repeat.py b/backends/qualcomm/builders/op_repeat.py index 9748f1e9619..983ab01df7d 100644 --- a/backends/qualcomm/builders/op_repeat.py +++ b/backends/qualcomm/builders/op_repeat.py @@ -11,7 +11,8 @@ import numpy as np import torch -from .node_visitor import NodeVisitor, register_node_visitor +from .node_visitor import NodeVisitor +from .node_visitor_manager import register_node_visitor from .qnn_constants import OpTile, QNN_OP_PACKAGE_NAME_QTI_AISW diff --git a/backends/qualcomm/builders/op_reshape.py b/backends/qualcomm/builders/op_reshape.py index ff4a603fa5b..6c2078e0b90 100644 --- a/backends/qualcomm/builders/op_reshape.py +++ b/backends/qualcomm/builders/op_reshape.py @@ -9,7 +9,8 @@ import torch -from .node_visitor import NodeVisitor, register_node_visitor +from .node_visitor import NodeVisitor +from .node_visitor_manager import register_node_visitor from .qnn_constants import OpReshape, QNN_OP_PACKAGE_NAME_QTI_AISW diff --git a/backends/qualcomm/builders/op_rms_norm.py b/backends/qualcomm/builders/op_rms_norm.py index e5b4778312e..975bcb2434e 100644 --- a/backends/qualcomm/builders/op_rms_norm.py +++ b/backends/qualcomm/builders/op_rms_norm.py @@ -15,7 +15,8 @@ from executorch.backends.qualcomm.utils.constants import QCOM_DATA, QCOM_QUANT_ATTRS from executorch.exir.dialects._ops import ops as exir_ops -from .node_visitor import NodeVisitor, register_node_visitor +from .node_visitor import NodeVisitor +from .node_visitor_manager import register_node_visitor from .qnn_constants import OpRmsNorm, QNN_OP_PACKAGE_NAME_QTI_AISW diff --git a/backends/qualcomm/builders/op_rsqrt.py b/backends/qualcomm/builders/op_rsqrt.py index 162b485e9e5..59d3e058390 100644 --- a/backends/qualcomm/builders/op_rsqrt.py +++ b/backends/qualcomm/builders/op_rsqrt.py @@ -9,7 +9,8 @@ import torch -from .node_visitor import NodeVisitor, register_node_visitor +from .node_visitor import NodeVisitor +from .node_visitor_manager import register_node_visitor from .qnn_constants import OpElementWiseRsqrt, QNN_OP_PACKAGE_NAME_QTI_AISW diff --git a/backends/qualcomm/builders/op_select_copy.py b/backends/qualcomm/builders/op_select_copy.py index 148888f1497..9ea83a18d2a 100644 --- a/backends/qualcomm/builders/op_select_copy.py +++ b/backends/qualcomm/builders/op_select_copy.py @@ -12,7 +12,8 @@ import torch from executorch.backends.qualcomm.utils.constants import QCOM_DATA -from .node_visitor import NodeVisitor, register_node_visitor +from .node_visitor import NodeVisitor +from .node_visitor_manager import register_node_visitor from .qnn_constants import OpStridedSlice, QNN_OP_PACKAGE_NAME_QTI_AISW diff --git a/backends/qualcomm/builders/op_sigmoid.py b/backends/qualcomm/builders/op_sigmoid.py index ae6e6709c0a..d1fca9e4192 100644 --- a/backends/qualcomm/builders/op_sigmoid.py +++ b/backends/qualcomm/builders/op_sigmoid.py @@ -9,7 +9,8 @@ import torch -from .node_visitor import NodeVisitor, register_node_visitor +from .node_visitor import NodeVisitor +from .node_visitor_manager import register_node_visitor from .qnn_constants import OpSigmoid, QNN_OP_PACKAGE_NAME_QTI_AISW diff --git a/backends/qualcomm/builders/op_sin.py b/backends/qualcomm/builders/op_sin.py index 89fce6bee9c..b6b0d4e6edc 100644 --- a/backends/qualcomm/builders/op_sin.py +++ b/backends/qualcomm/builders/op_sin.py @@ -10,7 +10,8 @@ import torch -from .node_visitor import NodeVisitor, register_node_visitor +from .node_visitor import NodeVisitor +from .node_visitor_manager import register_node_visitor from .qnn_constants import OpElementWiseSin, QNN_OP_PACKAGE_NAME_QTI_AISW diff --git a/backends/qualcomm/builders/op_skip_ops.py b/backends/qualcomm/builders/op_skip_ops.py index 0d651e80f8a..f52f69d6019 100644 --- a/backends/qualcomm/builders/op_skip_ops.py +++ b/backends/qualcomm/builders/op_skip_ops.py @@ -10,7 +10,8 @@ import torch -from .node_visitor import NodeVisitor, register_node_visitor +from .node_visitor import NodeVisitor +from .node_visitor_manager import register_node_visitor class OpSkipOps(NodeVisitor): diff --git a/backends/qualcomm/builders/op_slice_copy.py b/backends/qualcomm/builders/op_slice_copy.py index 8d12e03c0bb..7aac34b9d05 100644 --- a/backends/qualcomm/builders/op_slice_copy.py +++ b/backends/qualcomm/builders/op_slice_copy.py @@ -10,7 +10,8 @@ import numpy as np import torch -from .node_visitor import NodeVisitor, register_node_visitor +from .node_visitor import NodeVisitor +from .node_visitor_manager import register_node_visitor from .qnn_constants import OpStridedSlice, QNN_OP_PACKAGE_NAME_QTI_AISW diff --git a/backends/qualcomm/builders/op_softmax.py b/backends/qualcomm/builders/op_softmax.py index f6f826e2a40..46e5a65267d 100644 --- a/backends/qualcomm/builders/op_softmax.py +++ b/backends/qualcomm/builders/op_softmax.py @@ -11,7 +11,8 @@ import torch from executorch.backends.qualcomm.utils.constants import QCOM_AXIS_ORDER, QCOM_DATA -from .node_visitor import NodeVisitor, register_node_visitor +from .node_visitor import NodeVisitor +from .node_visitor_manager import register_node_visitor from .qnn_constants import OpSoftmax, QNN_OP_PACKAGE_NAME_QTI_AISW diff --git a/backends/qualcomm/builders/op_space_to_depth.py b/backends/qualcomm/builders/op_space_to_depth.py index 0282cf3f15a..e1789f179ac 100644 --- a/backends/qualcomm/builders/op_space_to_depth.py +++ b/backends/qualcomm/builders/op_space_to_depth.py @@ -12,7 +12,8 @@ import torch from executorch.backends.qualcomm.utils.constants import QCOM_DATA -from .node_visitor import NodeVisitor, register_node_visitor +from .node_visitor import NodeVisitor +from .node_visitor_manager import register_node_visitor from .qnn_constants import OpSpaceToDepth, QNN_OP_PACKAGE_NAME_QTI_AISW diff --git a/backends/qualcomm/builders/op_split_with_sizes.py b/backends/qualcomm/builders/op_split_with_sizes.py index 629110b3084..e52002078f1 100644 --- a/backends/qualcomm/builders/op_split_with_sizes.py +++ b/backends/qualcomm/builders/op_split_with_sizes.py @@ -11,7 +11,8 @@ import torch from executorch.backends.qualcomm.utils.constants import QCOM_AXIS_ORDER, QCOM_DATA -from .node_visitor import NodeVisitor, register_node_visitor +from .node_visitor import NodeVisitor +from .node_visitor_manager import register_node_visitor from .qnn_constants import OpSplit, QNN_OP_PACKAGE_NAME_QTI_AISW diff --git a/backends/qualcomm/builders/op_sqrt.py b/backends/qualcomm/builders/op_sqrt.py index dc6691460ca..6a775f3c532 100644 --- a/backends/qualcomm/builders/op_sqrt.py +++ b/backends/qualcomm/builders/op_sqrt.py @@ -9,7 +9,8 @@ import torch -from .node_visitor import NodeVisitor, register_node_visitor +from .node_visitor import NodeVisitor +from .node_visitor_manager import register_node_visitor from .qnn_constants import OpSqrt, QNN_OP_PACKAGE_NAME_QTI_AISW diff --git a/backends/qualcomm/builders/op_squeeze.py b/backends/qualcomm/builders/op_squeeze.py index b828bb7b0b9..4bb3718c8ee 100644 --- a/backends/qualcomm/builders/op_squeeze.py +++ b/backends/qualcomm/builders/op_squeeze.py @@ -9,7 +9,8 @@ import torch -from .node_visitor import NodeVisitor, register_node_visitor +from .node_visitor import NodeVisitor +from .node_visitor_manager import register_node_visitor from .qnn_constants import OpReshape, QNN_OP_PACKAGE_NAME_QTI_AISW diff --git a/backends/qualcomm/builders/op_sub.py b/backends/qualcomm/builders/op_sub.py index 954ca9d3917..44cedbd0de6 100644 --- a/backends/qualcomm/builders/op_sub.py +++ b/backends/qualcomm/builders/op_sub.py @@ -9,7 +9,8 @@ import torch -from .node_visitor import NodeVisitor, register_node_visitor +from .node_visitor import NodeVisitor +from .node_visitor_manager import register_node_visitor from .qnn_constants import OpElementWiseSubtract, QNN_OP_PACKAGE_NAME_QTI_AISW diff --git a/backends/qualcomm/builders/op_sum_int_list.py b/backends/qualcomm/builders/op_sum_int_list.py index 74181f46cb3..5e91736b5ea 100644 --- a/backends/qualcomm/builders/op_sum_int_list.py +++ b/backends/qualcomm/builders/op_sum_int_list.py @@ -11,7 +11,8 @@ import torch from executorch.backends.qualcomm.utils.constants import QCOM_AXIS_ORDER, QCOM_DATA -from .node_visitor import NodeVisitor, register_node_visitor +from .node_visitor import NodeVisitor +from .node_visitor_manager import register_node_visitor from .qnn_constants import OpReduceSum, QNN_OP_PACKAGE_NAME_QTI_AISW diff --git a/backends/qualcomm/builders/op_tanh.py b/backends/qualcomm/builders/op_tanh.py index ddc9fd2a2a6..14720a0691c 100644 --- a/backends/qualcomm/builders/op_tanh.py +++ b/backends/qualcomm/builders/op_tanh.py @@ -10,7 +10,8 @@ import torch -from .node_visitor import NodeVisitor, register_node_visitor +from .node_visitor import NodeVisitor +from .node_visitor_manager import register_node_visitor from .qnn_constants import OpTanh, QNN_OP_PACKAGE_NAME_QTI_AISW diff --git a/backends/qualcomm/builders/op_to.py b/backends/qualcomm/builders/op_to.py index 5fb016aef95..ffb19128c2c 100644 --- a/backends/qualcomm/builders/op_to.py +++ b/backends/qualcomm/builders/op_to.py @@ -10,7 +10,8 @@ import torch from executorch.backends.qualcomm.utils.constants import QCOM_QUANT_ATTRS -from .node_visitor import NodeVisitor, register_node_visitor +from .node_visitor import NodeVisitor +from .node_visitor_manager import register_node_visitor from .qnn_constants import OpCast, OpConvert, QNN_OP_PACKAGE_NAME_QTI_AISW diff --git a/backends/qualcomm/builders/op_topk.py b/backends/qualcomm/builders/op_topk.py index 745cf7b9935..7daa25cd6a8 100644 --- a/backends/qualcomm/builders/op_topk.py +++ b/backends/qualcomm/builders/op_topk.py @@ -16,7 +16,8 @@ QCOM_QUANT_ATTRS, ) -from .node_visitor import NodeVisitor, register_node_visitor +from .node_visitor import NodeVisitor +from .node_visitor_manager import register_node_visitor from .qnn_constants import OpTopK, QNN_OP_PACKAGE_NAME_QTI_AISW diff --git a/backends/qualcomm/builders/op_transpose.py b/backends/qualcomm/builders/op_transpose.py index d29fc73084c..f0852c27535 100644 --- a/backends/qualcomm/builders/op_transpose.py +++ b/backends/qualcomm/builders/op_transpose.py @@ -12,7 +12,8 @@ import torch from executorch.backends.qualcomm.utils.constants import QCOM_INSERTED_PERMUTE -from .node_visitor import NodeVisitor, register_node_visitor +from .node_visitor import NodeVisitor +from .node_visitor_manager import register_node_visitor from .qnn_constants import OpTranspose, QNN_OP_PACKAGE_NAME_QTI_AISW diff --git a/backends/qualcomm/builders/op_unsqueeze.py b/backends/qualcomm/builders/op_unsqueeze.py index 55790129462..7e7334be0e8 100644 --- a/backends/qualcomm/builders/op_unsqueeze.py +++ b/backends/qualcomm/builders/op_unsqueeze.py @@ -9,7 +9,8 @@ import torch -from .node_visitor import NodeVisitor, register_node_visitor +from .node_visitor import NodeVisitor +from .node_visitor_manager import register_node_visitor from .qnn_constants import OpReshape, QNN_OP_PACKAGE_NAME_QTI_AISW diff --git a/backends/qualcomm/builders/op_upsample_bilinear2d.py b/backends/qualcomm/builders/op_upsample_bilinear2d.py index 160f15494d8..eadc695c5d8 100644 --- a/backends/qualcomm/builders/op_upsample_bilinear2d.py +++ b/backends/qualcomm/builders/op_upsample_bilinear2d.py @@ -10,7 +10,8 @@ import torch from executorch.backends.qualcomm.utils.constants import QCOM_DATA -from .node_visitor import NodeVisitor, register_node_visitor +from .node_visitor import NodeVisitor +from .node_visitor_manager import register_node_visitor from .qnn_constants import OpResizeBilinear, QNN_OP_PACKAGE_NAME_QTI_AISW diff --git a/backends/qualcomm/builders/op_upsample_nearest2d.py b/backends/qualcomm/builders/op_upsample_nearest2d.py index 6b7949716cb..61c4b78ae05 100644 --- a/backends/qualcomm/builders/op_upsample_nearest2d.py +++ b/backends/qualcomm/builders/op_upsample_nearest2d.py @@ -10,7 +10,8 @@ import torch from executorch.backends.qualcomm.utils.constants import QCOM_DATA -from .node_visitor import NodeVisitor, register_node_visitor +from .node_visitor import NodeVisitor +from .node_visitor_manager import register_node_visitor from .qnn_constants import OpResizeNearestNeighbor, QNN_OP_PACKAGE_NAME_QTI_AISW diff --git a/backends/qualcomm/builders/op_where.py b/backends/qualcomm/builders/op_where.py index ecac45a7a6f..df089270a21 100644 --- a/backends/qualcomm/builders/op_where.py +++ b/backends/qualcomm/builders/op_where.py @@ -9,7 +9,8 @@ import torch -from .node_visitor import NodeVisitor, register_node_visitor +from .node_visitor import NodeVisitor +from .node_visitor_manager import register_node_visitor from .qnn_constants import OpElementWiseSelect, QNN_OP_PACKAGE_NAME_QTI_AISW diff --git a/backends/qualcomm/partition/qnn_partitioner.py b/backends/qualcomm/partition/qnn_partitioner.py index 93b1d50f5fe..375b4646345 100644 --- a/backends/qualcomm/partition/qnn_partitioner.py +++ b/backends/qualcomm/partition/qnn_partitioner.py @@ -9,9 +9,14 @@ import executorch.backends.qualcomm.python.PyQnnManagerAdaptor as PyQnnManager import torch -from executorch.backends.qualcomm.builders import node_visitor +from executorch.backends.qualcomm.builders import node_visitor_manager from executorch.backends.qualcomm.builders.qnn_constants import OpContextLoader from executorch.backends.qualcomm.qnn_preprocess import QnnBackend + +from executorch.backends.qualcomm.serialization.qc_schema_serialize import ( + flatbuffer_to_option, +) + from executorch.backends.qualcomm.utils.constants import QCOM_AXIS_ORDER from executorch.exir.backend.backend_details import CompileSpec @@ -44,7 +49,11 @@ def __init__( skip_node_id_set: set = None, skip_node_op_set: set = None, ): - self.node_visitors = node_visitor.get_node_visitors(edge_program) + python_options = flatbuffer_to_option(compiler_specs[0].value) + self.node_visitors = node_visitor_manager.get_node_visitors( + edge_program, + op_package_infos=python_options.op_package_options.op_package_infos, + ) self.skip_node_op_set = skip_node_op_set self.skip_node_id_set = skip_node_id_set diff --git a/backends/qualcomm/qnn_preprocess.py b/backends/qualcomm/qnn_preprocess.py index 8afb4851814..3c59acfaa00 100644 --- a/backends/qualcomm/qnn_preprocess.py +++ b/backends/qualcomm/qnn_preprocess.py @@ -17,9 +17,12 @@ InsertRequantize, LayoutTransform, ) -from executorch.backends.qualcomm.builders.node_visitor import get_node_visitors +from executorch.backends.qualcomm.builders.node_visitor_manager import get_node_visitors from executorch.backends.qualcomm.builders.qnn_constants import OpContextLoader from executorch.backends.qualcomm.partition.utils import generate_qnn_executorch_option +from executorch.backends.qualcomm.serialization.qc_schema_serialize import ( + flatbuffer_to_option, +) from executorch.exir.backend.backend_details import ( BackendDetails, CompileSpec, @@ -58,10 +61,13 @@ def preprocess( pass_result = qnn_compiler_passes(edge_program.graph_module) assert pass_result is not None + python_options = flatbuffer_to_option(compile_specs[0].value) enable_tensor_dump = qnn_manager.IsTensorDump() nodes_to_wrappers = defaultdict(dict) node_visitors = get_node_visitors( - edge_program, enable_tensor_dump=enable_tensor_dump + edge_program, + enable_tensor_dump=enable_tensor_dump, + op_package_infos=python_options.op_package_options.op_package_infos, ) py_op_wrapper_list = [] for node in pass_result.graph_module.graph.nodes: diff --git a/backends/qualcomm/runtime/QnnManager.cpp b/backends/qualcomm/runtime/QnnManager.cpp index 994cc1931cc..5381a0d251f 100644 --- a/backends/qualcomm/runtime/QnnManager.cpp +++ b/backends/qualcomm/runtime/QnnManager.cpp @@ -76,6 +76,9 @@ QnnManager::QnnManager( "Is on-device graph construction: %d", options->online_prepare()); QNN_EXECUTORCH_LOG_INFO( "Enable shared buffer: %d", options->shared_buffer()); + QNN_EXECUTORCH_LOG_INFO( + "The number of op packages: %d", + options_->op_package_options()->op_package_infos()->size()); } if (library_path.empty()) { @@ -296,7 +299,8 @@ Error QnnManager::Init() { Internal, "Fail to configure Qnn backend cache"); ET_CHECK_OR_RETURN_ERROR( - backend_params_ptr_->qnn_backend_ptr_->Configure() == Error::Ok, + backend_params_ptr_->qnn_backend_ptr_->Configure( + options_->op_package_options()) == Error::Ok, Internal, "Fail to configure Qnn backend"); ET_CHECK_OR_RETURN_ERROR( diff --git a/backends/qualcomm/runtime/backends/CMakeLists.txt b/backends/qualcomm/runtime/backends/CMakeLists.txt index 81536d26f78..94259f9489d 100644 --- a/backends/qualcomm/runtime/backends/CMakeLists.txt +++ b/backends/qualcomm/runtime/backends/CMakeLists.txt @@ -94,6 +94,13 @@ target_sources( ${HOST_ARCHITECTURE}/HtpGraphCustomConfig.cpp ) +# qnn_op_package_manager +target_sources( + qnn_op_package_manager + PUBLIC ${CMAKE_CURRENT_LIST_DIR}/QnnOpPackageManager.h + PRIVATE ${CMAKE_CURRENT_LIST_DIR}/QnnOpPackageManager.cpp +) + # qnn_backend target_sources( qnn_backend diff --git a/backends/qualcomm/runtime/backends/QnnBackendCommon.cpp b/backends/qualcomm/runtime/backends/QnnBackendCommon.cpp index 0df40ddb4e5..5b3dcce7829 100644 --- a/backends/qualcomm/runtime/backends/QnnBackendCommon.cpp +++ b/backends/qualcomm/runtime/backends/QnnBackendCommon.cpp @@ -30,7 +30,43 @@ QnnBackend::~QnnBackend() { } } -Error QnnBackend::Configure() { +void QnnBackend::BackendRegisterOpPackage( + const flatbuffers::Vector< + flatbuffers::Offset, + flatbuffers::uoffset_t>* op_packages_infos) { + const QnnInterface& qnn_interface = implementation_.GetQnnInterface(); + Qnn_ErrorHandle_t error = QNN_SUCCESS; + QnnExecuTorchOpPackagePlatform current_platform = + QnnExecuTorchOpPackagePlatform::UNKNOWN; +#if defined(__x86_64__) + current_platform = QnnExecuTorchOpPackagePlatform::X86_64; +#elif defined(__ANDROID__) + current_platform = QnnExecuTorchOpPackagePlatform::AARCH64_ANDROID; +#endif + for (const auto op_package_info : *op_packages_infos) { + if (current_platform != op_package_info->platform() || + op_package_manager_.Has(op_package_info->op_package_path()->c_str())) + continue; + + error = qnn_interface.qnn_backend_register_op_package( + handle_, + op_package_info->op_package_path()->c_str(), + op_package_info->interface_provider()->c_str(), + EnumNameQnnExecuTorchOpPackageTarget(op_package_info->target())); + if (error != QNN_SUCCESS) { + QNN_EXECUTORCH_LOG_ERROR( + "Failed to register op package: " + "%s , error=%d", + op_package_info->op_package_path()->c_str(), + QNN_GET_ERROR_CODE(error)); + } else { + op_package_manager_.Add(op_package_info->op_package_path()->c_str()); + } + } +} + +Error QnnBackend::Configure( + const QnnExecuTorchOpPackageOptions* op_package_options) { // create qnn backend const QnnInterface& qnn_interface = implementation_.GetQnnInterface(); Qnn_ErrorHandle_t error = QNN_SUCCESS; @@ -54,6 +90,11 @@ Error QnnBackend::Configure() { QNN_GET_ERROR_CODE(error)); return Error::Internal; } + + if (op_package_options->op_package_infos()->size() > 0) { + BackendRegisterOpPackage(op_package_options->op_package_infos()); + } + return Error::Ok; } diff --git a/backends/qualcomm/runtime/backends/QnnBackendCommon.h b/backends/qualcomm/runtime/backends/QnnBackendCommon.h index 56b5284c537..a8616289a0f 100644 --- a/backends/qualcomm/runtime/backends/QnnBackendCommon.h +++ b/backends/qualcomm/runtime/backends/QnnBackendCommon.h @@ -7,10 +7,12 @@ */ #pragma once +#include #include #include #include - +#include +#include #include #include "HTP/QnnHtpCommon.h" @@ -34,7 +36,8 @@ class QnnBackend { return false; } - executorch::runtime::Error Configure(); + executorch::runtime::Error Configure( + const QnnExecuTorchOpPackageOptions* op_package_options); Qnn_ErrorHandle_t BackendValidateOpConfig(const Qnn_OpConfig_t& op_config) { return implementation_.GetQnnInterface().qnn_backend_validate_op_config( @@ -56,8 +59,13 @@ class QnnBackend { }; private: + void BackendRegisterOpPackage( + const flatbuffers::Vector< + flatbuffers::Offset, + flatbuffers::uoffset_t>* op_packages_info); Qnn_BackendHandle_t handle_; const QnnImplementation& implementation_; + QnnOpPackageManager op_package_manager_; QnnLogger* logger_; executorch::runtime::Error VersionChecker( const Qnn_Version_t& qnn_version, diff --git a/backends/qualcomm/runtime/backends/QnnBackendFactory.cpp b/backends/qualcomm/runtime/backends/QnnBackendFactory.cpp index 29e6686740b..2106637ee21 100644 --- a/backends/qualcomm/runtime/backends/QnnBackendFactory.cpp +++ b/backends/qualcomm/runtime/backends/QnnBackendFactory.cpp @@ -23,13 +23,12 @@ std::unique_ptr QnnBackendFactory::Create( switch (options->backend_options()->backend_type()) { case QnnExecuTorchBackendType::kHtpBackend: { auto htp_options = options->backend_options()->htp_options(); + const std::string skel_library_dir = + htp_options->skel_library_dir()->str(); + if (!skel_library_dir.empty()) { + setenv("ADSP_LIBRARY_PATH", skel_library_dir.c_str(), /*overwrite=*/1); + } if (options->log_level() >= QnnExecuTorchLogLevel::kLogLevelInfo) { - const std::string skel_library_dir = - htp_options->skel_library_dir()->str(); - if (!skel_library_dir.empty()) { - setenv( - "ADSP_LIBRARY_PATH", skel_library_dir.c_str(), /*overwrite=*/1); - } QNN_EXECUTORCH_LOG_INFO( "skel_library_dir: %s", skel_library_dir.c_str()); QNN_EXECUTORCH_LOG_INFO( diff --git a/backends/qualcomm/runtime/backends/QnnImplementation.cpp b/backends/qualcomm/runtime/backends/QnnImplementation.cpp index 6baf4cbb411..540256d8ad0 100644 --- a/backends/qualcomm/runtime/backends/QnnImplementation.cpp +++ b/backends/qualcomm/runtime/backends/QnnImplementation.cpp @@ -57,8 +57,16 @@ Error QnnImplementation::StartBackend( const std::string& lib_path, const QnnSaver_Config_t** saver_config) { Qnn_ErrorHandle_t error = QNN_SUCCESS; - void* lib_handle = dlopen(lib_path.c_str(), RTLD_NOW | RTLD_GLOBAL); - + // RTLD_GLOBAL is needed on x86 as HTP op package has a requirement for the + // symbols in backend to be visible. Using RTLD_LOCAL on Android to allow full + // unloading of HTP backend shared library on dlclose() as RTLD_GLOBAL isn't + // letting it happen. + void* lib_handle = nullptr; +#if defined(__ANDROID__) + lib_handle = dlopen(lib_path.c_str(), RTLD_NOW | RTLD_LOCAL); +#else + lib_handle = dlopen(lib_path.c_str(), RTLD_NOW | RTLD_GLOBAL); +#endif if (lib_handle == nullptr) { QNN_EXECUTORCH_LOG_ERROR( "Cannot Open QNN library %s, with error: %s", diff --git a/backends/qualcomm/runtime/backends/QnnOpPackageManager.cpp b/backends/qualcomm/runtime/backends/QnnOpPackageManager.cpp new file mode 100644 index 00000000000..f0fe7ab34de --- /dev/null +++ b/backends/qualcomm/runtime/backends/QnnOpPackageManager.cpp @@ -0,0 +1,37 @@ +/* + * Copyright (c) Qualcomm Innovation Center, Inc. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ +#include +namespace executorch { +namespace backends { +namespace qnn { + +bool QnnOpPackageManager::Add(std::string qnn_op_name) { + const std::lock_guard lock(table_mutex_); + std::pair ret = + qnn_op_package_path_set_.emplace(qnn_op_name); + return ret.second; +} + +bool QnnOpPackageManager::Has(std::string qnn_op_name) { + const std::lock_guard lock(table_mutex_); + return qnn_op_package_path_set_.count(qnn_op_name) > 0; +} + +bool QnnOpPackageManager::Erase(std::string qnn_op_name) { + const std::lock_guard lock(table_mutex_); + return qnn_op_package_path_set_.erase(qnn_op_name) > 0; +} + +void QnnOpPackageManager::Clear() { + const std::lock_guard lock(table_mutex_); + qnn_op_package_path_set_.clear(); +}; + +} // namespace qnn +} // namespace backends +} // namespace executorch diff --git a/backends/qualcomm/runtime/backends/QnnOpPackageManager.h b/backends/qualcomm/runtime/backends/QnnOpPackageManager.h new file mode 100644 index 00000000000..02e522db365 --- /dev/null +++ b/backends/qualcomm/runtime/backends/QnnOpPackageManager.h @@ -0,0 +1,40 @@ +/* + * Copyright (c) Qualcomm Innovation Center, Inc. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ +#pragma once +#include +#include + +namespace executorch { +namespace backends { +namespace qnn { +class QnnOpPackageManager { + public: + QnnOpPackageManager() = default; + ~QnnOpPackageManager() = default; + + QnnOpPackageManager(const QnnOpPackageManager& rhs) = delete; + QnnOpPackageManager(QnnOpPackageManager&& rhs) = delete; + QnnOpPackageManager& operator=(const QnnOpPackageManager& rhs) = delete; + QnnOpPackageManager& operator=(QnnOpPackageManager&& rhs) = delete; + + bool Add(std::string qnn_op_name); + + bool Has(std::string qnn_op_name); + + bool Erase(std::string qnn_op_name); + + void Clear(); + + private: + std::unordered_set qnn_op_package_path_set_; + std::mutex table_mutex_; +}; + +} // namespace qnn +} // namespace backends +} // namespace executorch diff --git a/backends/qualcomm/serialization/qc_compiler_spec.fbs b/backends/qualcomm/serialization/qc_compiler_spec.fbs index 0ce41736394..9bd05661683 100644 --- a/backends/qualcomm/serialization/qc_compiler_spec.fbs +++ b/backends/qualcomm/serialization/qc_compiler_spec.fbs @@ -150,6 +150,51 @@ enum QnnExecuTorchProfileLevel: int { kProfileOptrace, } +/// The target of the op package library. +enum QnnExecuTorchOpPackageTarget: int { + UNKNOWN = 0, + CPU, + HTP, +} + +/// The platform of the op package library. +enum QnnExecuTorchOpPackagePlatform: int { + UNKNOWN = 0, + X86_64, + AARCH64_ANDROID, +} + + +table QnnExecuTorchOpPackageInfo { + /// The name of the op package. + op_package_name:string; + + /// The path on disk to the op package library. + op_package_path:string; + + /// The name of a function in the op package library which satisfies the + /// QnnOpPackage_InterfaceProvider_t interface. + interface_provider:string; + + /// The target which this op package library was compiled for. + target:QnnExecuTorchOpPackageTarget; + + /// The name of torch operator. + custom_op_name:string; + + /// The corresponding op type name defined in the op package. + qnn_op_type_name:string; + + /// The platform which this op package library was compiled for. + platform:QnnExecuTorchOpPackagePlatform; +} + + +table QnnExecuTorchOpPackageOptions { + /// An array of QnnExecuTorchOpPackageInfo structures. + op_package_infos:[QnnExecuTorchOpPackageInfo]; +} + /// QNN backends currently supported table QnnExecuTorchBackendOptions { /// The backend QNN library to open and execute the graph with. This is a @@ -193,6 +238,9 @@ table QnnExecuTorchOptions { /// True if there exists multiple graphs in one .pte file. multiple_graphs:bool; + + /// Optional structure to specify op packages loaded and used by the backend. + op_package_options:QnnExecuTorchOpPackageOptions; } root_type QnnExecuTorchOptions; diff --git a/backends/qualcomm/serialization/qc_schema.py b/backends/qualcomm/serialization/qc_schema.py index a1ce2b2f53c..2ca405d935f 100644 --- a/backends/qualcomm/serialization/qc_schema.py +++ b/backends/qualcomm/serialization/qc_schema.py @@ -8,8 +8,9 @@ Please refer to executorch/backends/qualcomm/serialization/schema.fbs for the schema definitions """ -from dataclasses import dataclass +from dataclasses import dataclass, field from enum import IntEnum, unique +from typing import List @dataclass @@ -142,6 +143,36 @@ class QnnExecuTorchBackendOptions: htp_options: QnnExecuTorchHtpBackendOptions +@unique +class QnnExecuTorchOpPackageTarget(IntEnum): + UNKNOWN = 0 + CPU = 1 + HTP = 2 + + +@unique +class QnnExecuTorchOpPackagePlatform(IntEnum): + UNKNOWN = 0 + X86_64 = 1 + AARCH64_ANDROID = 2 + + +@dataclass +class QnnExecuTorchOpPackageInfo: + op_package_name: str = "" + op_package_path: str = "" + interface_provider: str = "" + target: QnnExecuTorchOpPackageTarget = QnnExecuTorchOpPackageTarget.UNKNOWN + custom_op_name: str = "" + qnn_op_type_name: str = "" + platform: QnnExecuTorchOpPackagePlatform = QnnExecuTorchOpPackagePlatform.UNKNOWN + + +@dataclass +class QnnExecuTorchOpPackageOptions: + op_package_infos: List[QnnExecuTorchOpPackageInfo] = field(default_factory=list) + + @dataclass class QnnExecuTorchOptions: soc_info: SocInfo @@ -155,3 +186,4 @@ class QnnExecuTorchOptions: shared_buffer: bool = False is_from_context_binary: bool = False multiple_graphs: bool = False + op_package_options: QnnExecuTorchOpPackageOptions = QnnExecuTorchOpPackageOptions() diff --git a/backends/qualcomm/tests/test_qnn_delegate.py b/backends/qualcomm/tests/test_qnn_delegate.py index 986243d7a9c..611e95fedd0 100644 --- a/backends/qualcomm/tests/test_qnn_delegate.py +++ b/backends/qualcomm/tests/test_qnn_delegate.py @@ -61,7 +61,7 @@ InsertRequantize, LayoutTransform, ) -from executorch.backends.qualcomm.builders.node_visitor import get_node_visitors +from executorch.backends.qualcomm.builders.node_visitor_manager import get_node_visitors from executorch.backends.qualcomm.debugger.utils import DrawGraph from executorch.examples.models.deeplab_v3 import DeepLabV3ResNet101Model from executorch.examples.models.edsr import EdsrModel @@ -4179,6 +4179,38 @@ def test_deeplab_v3(self): self.assertGreaterEqual(msg["MPA"], 0.70) self.assertGreaterEqual(msg["MIoU"], 0.55) + def test_custom_op(self): + if not self.required_envs([self.op_package_dir]): + self.skipTest("missing required envs") + cmds = [ + "python", + f"{self.executorch_root}/examples/qualcomm/custom_op/custom_ops_1.py", + "--artifact", + self.artifact_dir, + "--build_folder", + self.build_folder, + "--device", + self.device, + "--model", + self.model, + "--ip", + self.ip, + "--port", + str(self.port), + "--op_package_dir", + self.op_package_dir, + "--build_op_package", + ] + if self.host: + cmds.extend(["--host", self.host]) + + p = subprocess.Popen(cmds, stdout=subprocess.DEVNULL) + with Listener((self.ip, self.port)) as listener: + conn = listener.accept() + p.communicate() + msg = json.loads(conn.recv()) + self.assertTrue(msg["is_close"]) + @unittest.skip("dynamic shape inputs appear in recent torch.export.export") def test_mobilebert(self): if not self.required_envs([self.pretrained_weight]): @@ -4382,6 +4414,13 @@ def setup_environment(): help="Path to open source software model repository", type=str, ) + parser.add_argument( + "-d", + "--op_package_dir", + help="Path to operator package which generates from qnn-op-package-generator", + default="", + type=str, + ) parser.add_argument( "--pre_gen_pte", @@ -4415,7 +4454,7 @@ def setup_environment(): TestQNN.compile_only = args.compile_only TestQNN.pre_gen_pte = args.pre_gen_pte TestQNN.llama_artifacts = args.llama_artifacts - + TestQNN.op_package_dir = args.op_package_dir return sys.argv[:1] + ns_args diff --git a/backends/qualcomm/tests/utils.py b/backends/qualcomm/tests/utils.py index 4908bf889a9..723c9262ff4 100644 --- a/backends/qualcomm/tests/utils.py +++ b/backends/qualcomm/tests/utils.py @@ -181,6 +181,7 @@ class TestQNN(unittest.TestCase): image_dataset: str = "" pretrained_weight: str = "" enable_profile: bool = False + op_package_dir: str = "" online_prepare: bool = False use_8a8w: str = "8a8w" use_16a16w: str = "16a16w" @@ -246,6 +247,7 @@ def verify_output( # noqa: C901 input_encodings: Tuple = (), output_encodings: Tuple = (), check_io_shape: bool = False, + op_package_paths: List[str] = None, ): with tempfile.TemporaryDirectory() as tmp_dir: ( @@ -433,7 +435,11 @@ def validate_intermediate_tensor(): else None ), ) - adb.push(inputs=[processed_inputs], input_list=input_list) + adb.push( + inputs=[processed_inputs], + input_list=input_list, + files=op_package_paths, + ) adb.execute(method_index=method_index) adb.pull(output_path=tmp_dir, callback=post_process) self._assert_outputs_equal(outputs, ref_outputs) diff --git a/backends/qualcomm/utils/utils.py b/backends/qualcomm/utils/utils.py index 5ae640adc6e..f39120fdd48 100644 --- a/backends/qualcomm/utils/utils.py +++ b/backends/qualcomm/utils/utils.py @@ -59,6 +59,7 @@ QnnExecuTorchHtpPerformanceMode, QnnExecuTorchHtpPrecision, QnnExecuTorchLogLevel, + QnnExecuTorchOpPackageOptions, QnnExecuTorchOptions, QnnExecuTorchProfileLevel, ) @@ -1170,6 +1171,7 @@ def generate_qnn_executorch_compiler_spec( multiple_graphs: bool = False, weight_sharing: bool = False, graph_name: str = "forward", + op_package_options: QnnExecuTorchOpPackageOptions = None, ) -> List[CompileSpec]: """ Helper function generating compiler specs for Qualcomm AI Engine Direct @@ -1201,6 +1203,8 @@ def generate_qnn_executorch_compiler_spec( Please see test cases for post-processing example. weight_sharing: Used with multiple_graphs, where model size will be reduced when operations have the same weights across multiple graphs. graph_name: Assign unique graph name if 'multiple_graphs' is used. + op_package_options: Optional structure to specify op packages + loaded and used by the backend. Returns: List[CompileSpec]: Compiler specs for Qualcomm AI Engine Direct. @@ -1273,6 +1277,9 @@ def generate_qnn_executorch_compiler_spec( ): backend_options.htp_options.use_weight_sharing = True + if op_package_options and len(op_package_options.op_package_infos) > 0: + qnn_executorch_options.op_package_options = op_package_options + return [ CompileSpec(QCOM_QNN_COMPILE_SPEC, option_to_flatbuffer(qnn_executorch_options)) ] diff --git a/examples/qualcomm/custom_op/README.md b/examples/qualcomm/custom_op/README.md new file mode 100644 index 00000000000..0d3882efc13 --- /dev/null +++ b/examples/qualcomm/custom_op/README.md @@ -0,0 +1,164 @@ +# Custom Operator Support +The Qualcomm AI Engine Direct Backend in ExecuTorch supports custom PyTorch operators via the Qualcomm AI Engine Direct Op Package mechanism. Custom PyTorch operators, utilizing the torch.library API, can be successfully delegated and supported through user-written op packages. Additionally, built-in PyTorch nodes can be overridden by these op packages. + +Note: The Qualcomm AI Engine Direct SDK is required to compile an OP package. + +This folder contains examples demonstrating how to register custom operators into PyTorch and how to register their op packages into the Qualcomm AI Engine Direct Backend in ExecuTorch. +## Prerequisite + +- Please finish tutorial [Setting up executorch](https://pytorch.org/executorch/stable/getting-started-setup). + +- Please finish [setup QNN backend](../../docs/source/build-run-qualcomm-ai-engine-direct-backend.md). + +- Please follow [the instructions to install proper version of Hexagon SDK and Hexagon Tools.](https://docs.qualcomm.com/bundle/publicresource/topics/80-63442-50/linux_setup.html#htp-and-dsp) + - This example is verified with SM8650 (Snapdragon 8 Gen 3). + - Install hexagon-sdk-5.4.0, hexagon-sdk-6.0.0, and hexagon tool 8.8.02 + ```bash + # install hexagon sdk 5.4.0 + qpm-cli --install hexagonsdk5.x --version 5.4.0.3 --path /path/to/Qualcomm/Hexagon_SDK/hexagon-sdk-5.4.0 + # install hexagon sdk 6.0.0 + qpm-cli --install hexagonsdk6.x --version 6.0.0.2 --path /path/to/Qualcomm/Hexagon_SDK/hexagon-sdk-6.0.0 + # install hexagon tool 8.8.02 + qpm-cli --extract hexagon8.8 --version 8.8.02.1 --path /path/to/Qualcomm/Hexagon_SDK/hexagon-sdk-6.0.0/tools/HEXAGON_Tools/8.8.02 + ``` + +## Setup environment variables +`$HEXAGON_SDK_ROOT` refers to the root of the specified version of Hexagon SDK, i.e., the directory containing `readme.txt` + +`$X86_CXX` refers to the clang++ compiler, verified with clang++9 + +```bash +export HEXAGON_SDK_ROOT=/path/to/Qualcomm/Hexagon_SDK/hexagon-sdk-5.4.0 +export X86_CXX=/path/to/clang-9.0.0/bin/clang++ +``` + + +## Instructions to build and run the example +Use the following command, we can get the op package for the custom op `ExampleCustomOp`. And then compiling the custom model containing the custom op `torch.ops.my_ops.mul3.default` to Qualcomm AI Engine Direct binary with the op package. + +```bash +python3 examples/qualcomm/custom_op/custom_ops_1.py --build_folder build-android -s -H -m SM8650 --op_package_dir examples/qualcomm/custom_op/example_op_package_htp/ExampleOpPackage --build_op_package +``` + +## How to quantize custom op in Qualcomm AI Engine Direct backend +Use the custom annotation in Qnn Quantizer +```python +quantizer = make_quantizer( + quant_dtype=quant_dtype, custom_annotations=(annotate_custom,) +) +``` + +## Generating Op Packages +To generate operation (op) packages, follow these steps: + +1. Define an XML OpDef Configuration File: + - Create an XML file that describes the package information, including the package name, version, and domain. + - Specify the operations the package contains. Refer to [the example op package XML file](example_op_package_htp/ExampleOpPackage/config/example_op_package_htp.xml) for guidance. +2. Generate Skeleton Sample Code: + - Once the XML file is fully defined according to the specifications, pass it as an argument to the `qnn-op-package-generator` tool using the --config_path or -p option. + - This will generate the skeleton sample code. +3. Implement the Operations: + - The generated interface generally does not require extra implementation. + - The source files will contain empty function bodies that need to be completed by users. Refer to [the example op package for implementation details](example_op_package_htp/ExampleOpPackage/src/ops/ExampleCustomOp.cpp). +4. Support Custom PyTorch Operators: + - To support the parameters of custom PyTorch operators, a custom op builder is generated from the meta and `_schema.argument` of `torch.fx.Node`. + - Ensure that the OpDef of the op package aligns with the schema of the custom PyTorch operators. + +## Op package format +### Inputs +in[0]…in[m-1] + +The same number of input tensors as defined in the PyTorch custom op. Where ``m`` is +the number of inputs. + +* Mandatory: true +* Data type: backend specific +* Shape: Any + +### Parameters + +Optionally, define one or more parameters for the operation. +* Mandatory: true +* Data type: backend specific +* Shape: Any + +### Outputs +out[0] + +For now, only support one output tensors. + +* Mandatory: true +* Data type: backend specific +* Shape: Any + +Consult the Qualcomm AI Engine Direct documentation for information on [generation op packages](https://docs.qualcomm.com/bundle/publicresource/topics/80-63442-50/op_def_schema.html). + +## Registering Op Packages +After an op package library has been generated, certain information needs to be passed to the `compile_spec` in order to properly delegate the nodes. The following code example shows how to construct the `QnnExecuTorchOpPackageOptions` and register op packages with the `compile spec`. +```python +def prepare_op_package( + workspace: str, op_package_dir: str, arch: HtpArch, build_op_package: bool +): + if build_op_package: + _run(["rm", "-rf", "build"], cwd=op_package_dir) + _run(["make", "htp_x86", "htp_aarch64", f"htp_v{arch}"], cwd=op_package_dir) + _run( + [ + "cp", + f"{op_package_dir}/build/hexagon-v{arch}/libQnnExampleOpPackage.so", + f"{op_package_dir}/build/hexagon-v{arch}/libQnnExampleOpPackage_HTP.so", + ] + ) + + op_package_paths = [ + f"{op_package_dir}/build/hexagon-v{arch}/libQnnExampleOpPackage_HTP.so", + f"{op_package_dir}/build/aarch64-android/libQnnExampleOpPackage.so", + ] + + op_package_infos_HTP = QnnExecuTorchOpPackageInfo() + op_package_infos_HTP.interface_provider = "ExampleOpPackageInterfaceProvider" + op_package_infos_HTP.op_package_name = "ExampleOpPackage" + op_package_infos_HTP.op_package_path = f"{workspace}/libQnnExampleOpPackage_HTP.so" + op_package_infos_HTP.target = QnnExecuTorchOpPackageTarget.HTP + op_package_infos_HTP.custom_op_name = "my_ops.mul3.default" + op_package_infos_HTP.qnn_op_type_name = "ExampleCustomOp" + op_package_infos_HTP.platform = QnnExecuTorchOpPackagePlatform.AARCH64_ANDROID + op_package_infos_aarch64_CPU = QnnExecuTorchOpPackageInfo() + op_package_infos_aarch64_CPU.interface_provider = ( + "ExampleOpPackageInterfaceProvider" + ) + op_package_infos_aarch64_CPU.op_package_name = "ExampleOpPackage" + op_package_infos_aarch64_CPU.op_package_path = ( + f"{workspace}/libQnnExampleOpPackage.so" + ) + op_package_infos_aarch64_CPU.target = QnnExecuTorchOpPackageTarget.CPU + op_package_infos_aarch64_CPU.custom_op_name = "my_ops.mul3.default" + op_package_infos_aarch64_CPU.qnn_op_type_name = "ExampleCustomOp" + op_package_infos_aarch64_CPU.platform = ( + QnnExecuTorchOpPackagePlatform.AARCH64_ANDROID + ) + op_package_infos_x86_CPU = QnnExecuTorchOpPackageInfo() + op_package_infos_x86_CPU.interface_provider = "ExampleOpPackageInterfaceProvider" + op_package_infos_x86_CPU.op_package_name = "ExampleOpPackage" + op_package_infos_x86_CPU.op_package_path = ( + f"{op_package_dir}/build/x86_64-linux-clang/libQnnExampleOpPackage.so" + ) + op_package_infos_x86_CPU.target = QnnExecuTorchOpPackageTarget.CPU + op_package_infos_x86_CPU.custom_op_name = "my_ops.mul3.default" + op_package_infos_x86_CPU.qnn_op_type_name = "ExampleCustomOp" + op_package_infos_x86_CPU.platform = QnnExecuTorchOpPackagePlatform.X86_64 + op_package_options = QnnExecuTorchOpPackageOptions() + op_package_options.op_package_infos = [ + op_package_infos_x86_CPU, + op_package_infos_aarch64_CPU, + op_package_infos_HTP, + ] + + return op_package_options, op_package_paths +... +op_package_options, op_package_paths = prepare_op_package(...) +compile_spec = generate_qnn_executorch_compiler_spec( + ... + op_package_options=op_package_options, +), +``` diff --git a/examples/qualcomm/custom_op/custom_ops_1.py b/examples/qualcomm/custom_op/custom_ops_1.py new file mode 100644 index 00000000000..ed9529bbf25 --- /dev/null +++ b/examples/qualcomm/custom_op/custom_ops_1.py @@ -0,0 +1,337 @@ +# Copyright (c) Qualcomm Innovation Center, Inc. +# All rights reserved +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +"""Example of showcasing registering custom operator through torch library API.""" +import json +import os +import subprocess +import sys +from multiprocessing.connection import Client + +import numpy as np +import torch + +from executorch.backends.qualcomm.quantizer.quantizer import QuantDtype +from executorch.backends.qualcomm.serialization.qc_schema import ( + _soc_info_table, + HtpArch, + QcomChipset, + QnnExecuTorchOpPackageInfo, + QnnExecuTorchOpPackageOptions, + QnnExecuTorchOpPackagePlatform, + QnnExecuTorchOpPackageTarget, +) +from executorch.examples.qualcomm.utils import ( + build_executorch_binary, + make_output_dir, + make_quantizer, + setup_common_args_and_variables, + SimpleADB, +) +from torch.library import impl, Library + +my_op_lib = Library("my_ops", "DEF") + +# registering an operator that multiplies input tensor by 3 and returns it. +my_op_lib.define("mul3(Tensor input) -> Tensor") # should print 'mul3' + + +@impl(my_op_lib, "mul3", dispatch_key="CompositeExplicitAutograd") +def mul3_impl(a: torch.Tensor) -> torch.Tensor: + return a * 3 + + +# registering the out variant. +my_op_lib.define( + "mul3.out(Tensor input, *, Tensor(a!) output) -> Tensor(a!)" +) # should print 'mul3.out' + + +@impl(my_op_lib, "mul3.out", dispatch_key="CompositeExplicitAutograd") +def mul3_out_impl(a: torch.Tensor, *, out: torch.Tensor) -> torch.Tensor: + out.copy_(a) + out.mul_(3) + return out + + +# example model +class Model(torch.nn.Module): + def forward(self, a): + return torch.ops.my_ops.mul3.default(a) + + +def annotate_custom(gm: torch.fx.GraphModule) -> None: + """ + This function is specific for custom op. + The source_fn of the rewritten nn module turns out to be "my_ops.mul3.default" + """ + import itertools + + from executorch.backends.qualcomm.quantizer.annotators import ( + _is_annotated, + QUANT_ANNOTATION_KEY, + ) + + from executorch.backends.qualcomm.quantizer.qconfig import ( + get_ptq_per_channel_quant_config, + ) + from torch.ao.quantization.quantize_pt2e import QuantizationAnnotation + from torch.fx import Node + from torch.fx.passes.utils.source_matcher_utils import get_source_partitions + + custom_partitions = get_source_partitions(gm.graph, [torch.ops.my_ops.mul3.default]) + custom_partitions = list(itertools.chain(*custom_partitions.values())) + quantization_config = get_ptq_per_channel_quant_config() + for custom_partition in custom_partitions: + if len(custom_partition.output_nodes) > 1: + raise ValueError("custom partition has more than one output node") + custom_node = custom_partition.output_nodes[0] + if ( + custom_node.op != "call_function" + or custom_node.target != torch.ops.my_ops.mul3.default + ): + raise ValueError(f"{custom_node} is not a custom operator") + # skip annotation if it is already annotated + if _is_annotated([custom_node]): + continue + + input_qspec_map = {} + input_act = custom_node.args[0] + assert isinstance(input_act, Node) + input_spec = quantization_config.input_activation + input_qspec_map[input_act] = input_spec + + custom_node.meta[QUANT_ANNOTATION_KEY] = QuantizationAnnotation( + input_qspec_map=input_qspec_map, + output_qspec=quantization_config.output_activation, + _annotated=True, + ) + + +def create_device_inputs(example_inputs): + input_list = "" + for idx, _ in enumerate(example_inputs): + input_name = f"input_0_{idx}.raw" + input_list += input_name + " " + input_list = input_list.strip() + "\n" + return input_list + + +def _run(cmd, cwd=None): + subprocess.run(cmd, stdout=sys.stdout, cwd=cwd, check=True) + + +def prepare_op_package( + workspace: str, op_package_dir: str, arch: HtpArch, build_op_package: bool +): + if build_op_package: + _run(["rm", "-rf", "build"], cwd=op_package_dir) + _run(["make", "htp_x86", "htp_aarch64", f"htp_v{arch}"], cwd=op_package_dir) + _run( + [ + "cp", + f"{op_package_dir}/build/hexagon-v{arch}/libQnnExampleOpPackage.so", + f"{op_package_dir}/build/hexagon-v{arch}/libQnnExampleOpPackage_HTP.so", + ] + ) + + op_package_paths = [ + f"{op_package_dir}/build/hexagon-v{arch}/libQnnExampleOpPackage_HTP.so", + f"{op_package_dir}/build/aarch64-android/libQnnExampleOpPackage.so", + ] + + op_package_infos_HTP = QnnExecuTorchOpPackageInfo() + op_package_infos_HTP.interface_provider = "ExampleOpPackageInterfaceProvider" + op_package_infos_HTP.op_package_name = "ExampleOpPackage" + op_package_infos_HTP.op_package_path = f"{workspace}/libQnnExampleOpPackage_HTP.so" + op_package_infos_HTP.target = QnnExecuTorchOpPackageTarget.HTP + op_package_infos_HTP.custom_op_name = "my_ops.mul3.default" + op_package_infos_HTP.qnn_op_type_name = "ExampleCustomOp" + op_package_infos_HTP.platform = QnnExecuTorchOpPackagePlatform.AARCH64_ANDROID + op_package_infos_aarch64_CPU = QnnExecuTorchOpPackageInfo() + op_package_infos_aarch64_CPU.interface_provider = ( + "ExampleOpPackageInterfaceProvider" + ) + op_package_infos_aarch64_CPU.op_package_name = "ExampleOpPackage" + op_package_infos_aarch64_CPU.op_package_path = ( + f"{workspace}/libQnnExampleOpPackage.so" + ) + op_package_infos_aarch64_CPU.target = QnnExecuTorchOpPackageTarget.CPU + op_package_infos_aarch64_CPU.custom_op_name = "my_ops.mul3.default" + op_package_infos_aarch64_CPU.qnn_op_type_name = "ExampleCustomOp" + op_package_infos_aarch64_CPU.platform = ( + QnnExecuTorchOpPackagePlatform.AARCH64_ANDROID + ) + op_package_infos_x86_CPU = QnnExecuTorchOpPackageInfo() + op_package_infos_x86_CPU.interface_provider = "ExampleOpPackageInterfaceProvider" + op_package_infos_x86_CPU.op_package_name = "ExampleOpPackage" + op_package_infos_x86_CPU.op_package_path = ( + f"{op_package_dir}/build/x86_64-linux-clang/libQnnExampleOpPackage.so" + ) + op_package_infos_x86_CPU.target = QnnExecuTorchOpPackageTarget.CPU + op_package_infos_x86_CPU.custom_op_name = "my_ops.mul3.default" + op_package_infos_x86_CPU.qnn_op_type_name = "ExampleCustomOp" + op_package_infos_x86_CPU.platform = QnnExecuTorchOpPackagePlatform.X86_64 + op_package_options = QnnExecuTorchOpPackageOptions() + op_package_options.op_package_infos = [ + op_package_infos_x86_CPU, + op_package_infos_aarch64_CPU, + op_package_infos_HTP, + ] + + return op_package_options, op_package_paths + + +def main(args): + if args.build_op_package: + if "HEXAGON_SDK_ROOT" not in os.environ: + raise RuntimeError("Environment variable HEXAGON_SDK_ROOT must be set") + print(f"HEXAGON_SDK_ROOT={os.getenv('HEXAGON_SDK_ROOT')}") + + if "ANDROID_NDK_ROOT" not in os.environ: + raise RuntimeError("Environment variable ANDROID_NDK_ROOT must be set") + print(f"ANDROID_NDK_ROOT={os.getenv('ANDROID_NDK_ROOT')}") + + # ensure the working directory exist. + os.makedirs(args.artifact, exist_ok=True) + + if not args.compile_only and args.device is None: + raise RuntimeError( + "device serial is required if not compile only. " + "Please specify a device serial by -s/--device argument." + ) + + quant_dtype = QuantDtype.use_8a8w + if args.use_fp16: + quant_dtype = None + + instance = Model() + pte_filename = "custom_qnn" + sample_input = (torch.ones(1, 32, 28, 28),) + workspace = f"/data/local/tmp/executorch/{pte_filename}" + + input_list = create_device_inputs(sample_input) + soc_info = _soc_info_table[getattr(QcomChipset, args.model)] + + op_package_options, op_package_paths = prepare_op_package( + workspace, + args.op_package_dir, + soc_info.htp_info.htp_arch, + args.build_op_package, + ) + quantizer = make_quantizer( + quant_dtype=quant_dtype, custom_annotations=(annotate_custom,) + ) + + build_executorch_binary( + instance, + sample_input, + args.model, + f"{args.artifact}/{pte_filename}", + sample_input, + op_package_options=op_package_options, + quant_dtype=quant_dtype, + custom_quantizer=quantizer, + ) + + if args.compile_only: + sys.exit(0) + + # setup required paths accordingly + # qnn_sdk : QNN SDK path setup in environment variable + # artifact_path : path where artifacts were built + # pte_path : path where executorch binary was stored + # device_id : serial number of android device + # workspace : folder for storing artifacts on android device + adb = SimpleADB( + qnn_sdk=os.getenv("QNN_SDK_ROOT"), + build_path=f"{args.build_folder}", + pte_path=f"{args.artifact}/{pte_filename}.pte", + workspace=workspace, + device_id=args.device, + host_id=args.host, + soc_model=args.model, + ) + adb.push(inputs=sample_input, input_list=input_list, files=op_package_paths) + adb.execute() + + # collect output data + output_data_folder = f"{args.artifact}/outputs" + make_output_dir(output_data_folder) + + adb.pull(output_path=args.artifact) + + x86_golden = instance(*sample_input) + device_output = torch.from_numpy( + np.fromfile( + os.path.join(output_data_folder, "output_0_0.raw"), dtype=np.float32 + ) + ).reshape(x86_golden.size()) + result = torch.all(torch.isclose(x86_golden, device_output, atol=1e-2)).tolist() + + if args.ip and args.port != -1: + with Client((args.ip, args.port)) as conn: + conn.send( + json.dumps( + { + "is_close": result, + } + ) + ) + else: + print(f"is_close? {result}") + if not result: + print(f"x86_golden {x86_golden}") + print(f"device_out {device_output}") + + +if __name__ == "__main__": + parser = setup_common_args_and_variables() + + parser.add_argument( + "-a", + "--artifact", + help="path for storing generated artifacts by this example. Default ./custom_op", + default="./custom_op", + type=str, + ) + + parser.add_argument( + "-d", + "--op_package_dir", + help="Path to operator package which generates from QNN.", + type=str, + required=True, + ) + + parser.add_argument( + "-F", + "--use_fp16", + help="If specified, will run in fp16 precision and discard ptq setting", + action="store_true", + default=False, + ) + + parser.add_argument( + "--build_op_package", + help="Build op package based on op_package_dir. Please set up " + "`HEXAGON_SDK_ROOT` and `ANDROID_NDK_ROOT` environment variable. " + "And add clang compiler into `PATH`. Please refer to Qualcomm AI Engine " + "Direct SDK document to get more details", + action="store_true", + default=False, + ) + + args = parser.parse_args() + + try: + main(args) + except Exception as e: + if args.ip and args.port != -1: + with Client((args.ip, args.port)) as conn: + conn.send(json.dumps({"Error": str(e)})) + else: + raise Exception(e) diff --git a/examples/qualcomm/custom_op/example_op_package_htp/ExampleOpPackage/Makefile b/examples/qualcomm/custom_op/example_op_package_htp/ExampleOpPackage/Makefile new file mode 100644 index 00000000000..8d37e042640 --- /dev/null +++ b/examples/qualcomm/custom_op/example_op_package_htp/ExampleOpPackage/Makefile @@ -0,0 +1,364 @@ +# Copyright (c) Qualcomm Innovation Center, Inc. +# All rights reserved +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# users should provide locations for QNN_INCLUDE and HEXAGON_SDK_ROOT +# export HEXAGON_SDK_ROOT = /path/to/hexagon-sdk + +# check all setup prerequisites if the command goal is not clean +ifneq ($(MAKECMDGOALS),clean) +ifndef QNN_INCLUDE +$(info "INFO: Qnn include not explicitly defined, attempting to use QNN_SDK_ROOT if it is valid") +QNN_INCLUDE := $(QNN_SDK_ROOT)/include/QNN +endif +ifeq ($(wildcard $(QNN_INCLUDE)),) +$(error "ERROR: QNN_INCLUDE path is not set. QNN include paths must be set to obtain BE headers necessary to compile the package") +endif +ifndef QNN_TARGET_LIB +$(info "INFO: Qnn target not explicitly defined, attempting to use QNN_SDK_ROOT if it is valid") +QNN_TARGET_LIB := $(QNN_SDK_ROOT)/lib/aarch64-android +endif +ifeq ($(wildcard $(QNN_TARGET_LIB)),) +ifeq ($(MAKECMDGOALS),htp_aarch64) +$(error "ERROR: QNN_TARGET_LIB is needed to compile package for aarch64") +else ifeq ($(MAKECMDGOALS),all) +$(info "WARNING:QNN_TARGET_LIB may need to be defined to compile packages") +endif +endif + +ifndef HEXAGON_SDK_ROOT +$(error "ERROR: HEXAGON_SDK_ROOT is not set. Hexagon-SDK path must be set to the latest hexagon-sdk-x.y.z") +endif + +ifeq ($(wildcard $(HEXAGON_SDK_ROOT)),) +$(error "ERROR: HEXAGON_SDK_ROOT is not set correctly. Please set HEXAGON_SDK_ROOT to latest hexagon-sdk-X.Y.Z path") +endif + +HEXAGON_SDK_BASE := $(dir $(HEXAGON_SDK_ROOT)) + +$(info "HEXAGON_SDK_ROOT is [${HEXAGON_SDK_ROOT}]") +# Users should note that the tools version may change between hexagon sdk versions +# Following combination of SDK and Tool version is supported +HEXAGON_SDK_ROOT_V68 := $(HEXAGON_SDK_BASE)/hexagon-sdk-4.2.0 +HEXAGON_SDK_ROOT_V69 := $(HEXAGON_SDK_BASE)/hexagon-sdk-4.3.0 +HEXAGON_SDK_ROOT_V73 := $(HEXAGON_SDK_BASE)/hexagon-sdk-5.4.0 +HEXAGON_SDK_ROOT_V75 := $(HEXAGON_SDK_BASE)/hexagon-sdk-5.4.0 +HEXAGON_SDK_ROOT_V79 := $(HEXAGON_SDK_BASE)/hexagon-sdk-6.0.0 + +#Updated to point to latest sdk to match with libQnnHtp.so +HEXAGON_SDK_ROOT_X86 := $(HEXAGON_SDK_BASE)/hexagon-sdk-6.0.0 +HEXAGON_TOOLS_VERSION_V68 := 8.4.09 +HEXAGON_TOOLS_VERSION_V69 := 8.5.03 +HEXAGON_TOOLS_VERSION_V73 := 8.6.02 +HEXAGON_TOOLS_VERSION_V75 := 8.7.03 +HEXAGON_TOOLS_VERSION_V79 := 8.8.02 + +#Updated to point to latest sdk to match with libQnnHtp.so +HEXAGON_TOOLS_VERSION_X86 := 8.8.02 + +ifndef ANDROID_NDK_ROOT +ifeq ($(MAKECMDGOALS),htp_aarch64) +$(error "ERROR: ANDROID_NDK_ROOT is not set. Android NDK path must be set to compile package for aarch64") +else ifeq ($(MAKECMDGOALS),all) +$(info "WARNING: ANDROID_NDK_ROOT is not set. Android NDK path must be set to compile package for aarch64") +endif +endif + +ifndef PACKAGE_NAME +export +PACKAGE_NAME := $(notdir $(shell pwd)) +$(info "INFO: No package name defined. Using current directory name: $(PACKAGE_NAME) as the package name") +endif + +WORK := build +SRC_DIR := src +OP_SRC_DIR := src/ops +OP_INCLUDE_DIR := ./include +OP_INCLUDES = #$(wildcard $(OP_INCLUDE_DIR)/*.h) user defined if any op specific headers are needed, add -I to common flags +LIBRARY_NAME := libQnn$(PACKAGE_NAME).so +SUPPORTED_TARGETS = x86_64-linux-clang hexagon-v68 hexagon-v69 hexagon-v73 hexagon-v75 hexagon-v79 aarch64-android + + +COMMON_CXX_FLAGS = -std=c++17 -I$(QNN_INCLUDE) -fPIC -Wall -Wreorder -Wno-missing-braces -Wno-unused-function +COMMON_CXX_FLAGS += -Werror -Wno-format -Wno-unused-command-line-argument -fvisibility=default -stdlib=libc++ +COMMON_CXX_FLAGS += -DQNN_API="__attribute__((visibility(\"default\")))" -D__QAIC_HEADER_EXPORT="__attribute__((visibility(\"default\")))" + +X86_LIBNATIVE_RELEASE_DIR := $(HEXAGON_SDK_ROOT_X86)/tools/HEXAGON_Tools/$(HEXAGON_TOOLS_VERSION_X86)/Tools + +# Ensure hexagon sdk tool version can be retrieved +ifeq ($(wildcard $(X86_LIBNATIVE_RELEASE_DIR)/.),) +$(error "Cannot retrieve hexagon tools from: $(X86_LIBNATIVE_RELEASE_DIR). \ + \ + Please check that hexagon tools version is correct. Expected: $(HEXAGON_TOOLS_VERSION_X86)") +endif + +#Check tools for hexagon_v68 are present. +ifeq ($(MAKECMDGOALS),htp_v68) +ifeq ($(wildcard $(HEXAGON_SDK_ROOT_V68)),) +$(error "ERROR: HEXAGON_SDK_ROOT_V68 is set incorrectly. Cannot retrieve $(HEXAGON_SDK_ROOT_V68)") +endif +endif + +ifeq ($(MAKECMDGOALS),htp_v69) +ifeq ($(wildcard $(HEXAGON_SDK_ROOT_V69)),) +$(error "ERROR: HEXAGON_SDK_ROOT_V69 is set incorrectly. Cannot retrieve $(HEXAGON_SDK_ROOT_V69)") +endif +endif + +ifeq ($(MAKECMDGOALS),htp_v73) +ifeq ($(wildcard $(HEXAGON_SDK_ROOT_V73)),) +$(error "ERROR: HEXAGON_SDK_ROOT_V73 is set incorrectly. Cannot retrieve $(HEXAGON_SDK_ROOT_V73)") +endif +endif + +ifeq ($(MAKECMDGOALS),htp_v75) +ifeq ($(wildcard $(HEXAGON_SDK_ROOT_V75)),) +$(error "ERROR: HEXAGON_SDK_ROOT_V75 is set incorrectly. Cannot retrieve $(HEXAGON_SDK_ROOT_V75)") +endif +endif + +#Check tools for hexagon_v79 are present. +ifeq ($(MAKECMDGOALS),htp_v79) +ifeq ($(wildcard $(HEXAGON_SDK_ROOT_V79)),) +$(error "ERROR: HEXAGON_SDK_ROOT_V79 is set incorrectly. Cannot retrieve $(HEXAGON_SDK_ROOT_V79)") +endif +endif + + + +endif +OP_SOURCES = $(wildcard $(OP_SRC_DIR)/*.cpp) +OTHER_SOURCES = $(wildcard $(SRC_DIR)/*.cpp) +HFILES = $(wildcard $(QNN_INCLUDE)/*.h) +HFILES += $(wildcard $(QNN_INCLUDE)/HTP/*.h) +HFILES += $(wildcard $(QNN_INCLUDE)/HTP/core/*.h) +OP_OBJS = $(patsubst $(SRC_DIR)/%,%,$(patsubst %.cpp,%.o,$(OP_SOURCES))) +OTHER_OBJS = $(patsubst $(SRC_DIR)/%,%,$(patsubst %.cpp,%.o,$(OTHER_SOURCES))) + +#======= Assembly ======== +OP_SOURCES_ASM_X86 += $(wildcard $(OP_SRC_DIR)/x86_asm/*.S) +OP_OBJS_ASM_X86 += $(subst /x86_asm/,/,$(patsubst $(SRC_DIR)/%,%,$(patsubst %.S,%.o,$(OP_SOURCES_ASM_X86)))) +OP_SOURCES_ASM_V68 += $(wildcard $(OP_SRC_DIR)/v68_asm/*.S) +OP_OBJS_ASM_V68 += $(subst /v68_asm/,/,$(patsubst $(SRC_DIR)/%,%,$(patsubst %.S,%.o,$(OP_SOURCES_ASM_V68)))) +OP_SOURCES_ASM_V69 += $(wildcard $(OP_SRC_DIR)/v69_asm/*.S) +OP_OBJS_ASM_V69 += $(subst /v69_asm/,/,$(patsubst $(SRC_DIR)/%,%,$(patsubst %.S,%.o,$(OP_SOURCES_ASM_V69)))) +OP_SOURCES_ASM_V73 += $(wildcard $(OP_SRC_DIR)/v73_asm/*.S) +OP_OBJS_ASM_V73 += $(subst /v73_asm/,/,$(patsubst $(SRC_DIR)/%,%,$(patsubst %.S,%.o,$(OP_SOURCES_ASM_V73)))) +OP_SOURCES_ASM_V75 += $(wildcard $(OP_SRC_DIR)/v75_asm/*.S) +OP_OBJS_ASM_V75 += $(subst /v75_asm/,/,$(patsubst $(SRC_DIR)/%,%,$(patsubst %.S,%.o,$(OP_SOURCES_ASM_V75)))) +OP_SOURCES_ASM_V79 += $(wildcard $(OP_SRC_DIR)/v79_asm/*.S) +OP_OBJS_ASM_V79 += $(subst /v79_asm/,/,$(patsubst $(SRC_DIR)/%,%,$(patsubst %.S,%.o,$(OP_SOURCES_ASM_V79)))) + +OP_SOURCES_ASM_ANDROID += $(wildcard $(OP_SRC_DIR)/android_asm/*.S) +OP_OBJS_ASM_ANDROID += $(subst /android_asm/,/,$(patsubst $(SRC_DIR)/%,%,$(patsubst %.S,%.o,$(OP_SOURCES_ASM_ANDROID)))) + + +all: htp_v73 htp_x86 htp_aarch64 + +#============================================================================================================ +# Setup compiler, compiler instructions and linker for x86 +X86_CXX ?= clang++-9 +# Checking if clang++-9 is present. If not switch to clang++ +ifeq ($(shell $(X86_CXX) -v 2>&1 | grep -c "clang version"), 0) + X86_CXX := clang++ +endif +X86_LDFLAGS:= -Wl,--whole-archive -L$(X86_LIBNATIVE_RELEASE_DIR)/libnative/lib -lnative -Wl,--no-whole-archive -lpthread +X86_C_FLAGS := -D__HVXDBL__ -I$(X86_LIBNATIVE_RELEASE_DIR)/libnative/include -ffast-math -DUSE_OS_LINUX +X86_CXX_FLAGS = $(COMMON_CXX_FLAGS) $(X86_C_FLAGS) -fomit-frame-pointer -Wno-invalid-offsetof +linux_objs = +#============================================================================================================ +# Setup compiler, compiler instructions and linker for hexagon +HEXAGON_CXX_FLAGS := $(COMMON_CXX_FLAGS) -mhvx -mhvx-length=128B -mhmx -DUSE_OS_QURT -O2 -Wno-reorder -DPREPARE_DISABLED + +HEXAGON_CXX_FLAGS_V68 := $(HEXAGON_CXX_FLAGS) -mv68 -I$(HEXAGON_SDK_ROOT_V68)/rtos/qurt/computev68/include/qurt -I$(HEXAGON_SDK_ROOT_V68)/rtos/qurt/computev68/include/posix -I$(HEXAGON_SDK_ROOT_V68)/incs -I$(HEXAGON_SDK_ROOT_V68)/incs/stddef +HEXAGON_CXX_FLAGS_V69 := $(HEXAGON_CXX_FLAGS) -mv69 -I$(HEXAGON_SDK_ROOT_V69)/rtos/qurt/computev69/include/qurt -I$(HEXAGON_SDK_ROOT_V69)/rtos/qurt/computev69/include/posix -I$(HEXAGON_SDK_ROOT_V69)/incs -I$(HEXAGON_SDK_ROOT_V69)/incs/stddef +HEXAGON_CXX_FLAGS_V73 := $(HEXAGON_CXX_FLAGS) -mv73 -I$(HEXAGON_SDK_ROOT_V73)/rtos/qurt/computev73/include/qurt -I$(HEXAGON_SDK_ROOT_V73)/rtos/qurt/computev73/include/posix -I$(HEXAGON_SDK_ROOT_V73)/incs -I$(HEXAGON_SDK_ROOT_V73)/incs/stddef +HEXAGON_CXX_FLAGS_V75 := $(HEXAGON_CXX_FLAGS) -mv75 -I$(HEXAGON_SDK_ROOT_V75)/rtos/qurt/computev75/include/qurt -I$(HEXAGON_SDK_ROOT_V75)/rtos/qurt/computev75/include/posix -I$(HEXAGON_SDK_ROOT_V75)/incs -I$(HEXAGON_SDK_ROOT_V75)/incs/stddef +HEXAGON_CXX_FLAGS_V79 := $(HEXAGON_CXX_FLAGS) -mv79 -I$(HEXAGON_SDK_ROOT_V79)/rtos/qurt/computev79/include/qurt -I$(HEXAGON_SDK_ROOT_V79)/rtos/qurt/computev79/include/posix -I$(HEXAGON_SDK_ROOT_V79)/incs -I$(HEXAGON_SDK_ROOT_V79)/incs/stddef + +HEXAGON_CXX_V68 := $(HEXAGON_SDK_ROOT_V68)/tools/HEXAGON_Tools/$(HEXAGON_TOOLS_VERSION_V68)/Tools/bin/hexagon-clang++ +HEXAGON_CXX_V69 := $(HEXAGON_SDK_ROOT_V69)/tools/HEXAGON_Tools/$(HEXAGON_TOOLS_VERSION_V69)/Tools/bin/hexagon-clang++ +HEXAGON_CXX_V73 := $(HEXAGON_SDK_ROOT_V73)/tools/HEXAGON_Tools/$(HEXAGON_TOOLS_VERSION_V73)/Tools/bin/hexagon-clang++ +HEXAGON_CXX_V75 := $(HEXAGON_SDK_ROOT_V75)/tools/HEXAGON_Tools/$(HEXAGON_TOOLS_VERSION_V75)/Tools/bin/hexagon-clang++ +HEXAGON_CXX_V79 := $(HEXAGON_SDK_ROOT_V79)/tools/HEXAGON_Tools/$(HEXAGON_TOOLS_VERSION_V79)/Tools/bin/hexagon-clang++ + + +HEX_LDFLAGS = +hexagon_objs = +#============================================================================================================ +# Setup compiler, compiler instructions and linker for aarch64 +AARCH64_C__FLAGS = -D__HVXDBL__ -I$(X86_LIBNATIVE_RELEASE_DIR)/libnative/include -ffast-math -DUSE_OS_LINUX -DANDROID +AARCH64_CXX_FLAGS = $(COMMON_CXX_FLAGS) $(AARCH64_C__FLAGS) -fomit-frame-pointer -Wno-invalid-offsetof -Wno-unused-variable -Wno-unused-parameter -Wno-missing-braces -Wno-sign-compare -Wno-unused-private-field -Wno-unused-variable -Wno-ignored-qualifiers -Wno-missing-field-initializers +ARM_CLANG_OPTS =--target=aarch64-none-linux-android21 --sysroot=$(ANDROID_NDK_ROOT)/toolchains/llvm/prebuilt/linux-x86_64/sysroot -stdlib=libc++ -static-libstdc++ +AARCH64_CXX = $(ANDROID_NDK_ROOT)/toolchains/llvm/prebuilt/linux-x86_64/bin/clang++ $(ARM_CLANG_OPTS) +AARCH64_LDFLAGS = -L$(QNN_TARGET_LIB) -lQnnHtp -lQnnHtpPrepare +aarch64_objs = +#============================================================================================================ +# Setup targets and goals + +htp_x86: X86_BUILD + +htp_v68: HEXAGON_BUILD_V68 + +htp_v69: HEXAGON_BUILD_V69 + +htp_v73: HEXAGON_BUILD_V73 + +htp_v75: HEXAGON_BUILD_V75 + +htp_v79: HEXAGON_BUILD_V79 + + + +htp_aarch64: AARCH64_BUILD + +AARCH64_BUILD: $(WORK)/aarch64-android/$(LIBRARY_NAME) + +HEXAGON_BUILD_V68: $(WORK)/hexagon-v68/$(LIBRARY_NAME) + +HEXAGON_BUILD_V69: $(WORK)/hexagon-v69/$(LIBRARY_NAME) + +HEXAGON_BUILD_V73: $(WORK)/hexagon-v73/$(LIBRARY_NAME) + +HEXAGON_BUILD_V75: $(WORK)/hexagon-v75/$(LIBRARY_NAME) + +HEXAGON_BUILD_V79: $(WORK)/hexagon-v79/$(LIBRARY_NAME) + + + +X86_BUILD: $(WORK)/x86_64-linux-clang/$(LIBRARY_NAME) + + +define build_objs = +ifneq ($(filter $(2),$(SUPPORTED_TARGETS)),) +$(2)_objs += $(foreach x,$(1),$(WORK)/$(2)/$(x)) +else +$$(error "Unknown target option provided: $(2): Supported targets are: $(SUPPORTED_TARGETS)") +endif +endef + +$(eval $(call build_objs,$(OTHER_OBJS),x86_64-linux-clang)) +$(eval $(call build_objs,$(OP_OBJS),x86_64-linux-clang)) +$(eval $(call build_objs,$(OP_OBJS_ASM_X86),x86_64-linux-clang)) +$(eval $(call build_objs,$(OTHER_OBJS),hexagon-v68)) +$(eval $(call build_objs,$(OP_OBJS),hexagon-v68)) +$(eval $(call build_objs,$(OP_OBJS_ASM_V68),hexagon-v68)) +$(eval $(call build_objs,$(OTHER_OBJS),hexagon-v69)) +$(eval $(call build_objs,$(OP_OBJS),hexagon-v69)) +$(eval $(call build_objs,$(OP_OBJS_ASM_V69),hexagon-v69)) +$(eval $(call build_objs,$(OTHER_OBJS),hexagon-v73)) +$(eval $(call build_objs,$(OP_OBJS),hexagon-v73)) +$(eval $(call build_objs,$(OP_OBJS_ASM_V73),hexagon-v73)) +$(eval $(call build_objs,$(OTHER_OBJS),hexagon-v75)) +$(eval $(call build_objs,$(OP_OBJS),hexagon-v75)) +$(eval $(call build_objs,$(OP_OBJS_ASM_V75),hexagon-v75)) +$(eval $(call build_objs,$(OTHER_OBJS),hexagon-v79)) +$(eval $(call build_objs,$(OP_OBJS),hexagon-v79)) +$(eval $(call build_objs,$(OP_OBJS_ASM_V75),hexagon-v79)) + +$(eval $(call build_objs,$(OTHER_OBJS),aarch64-android)) +$(eval $(call build_objs,$(OP_OBJS),aarch64-android)) +$(eval $(call build_objs,$(OP_OBJS_ASM_ANDROID),aarch64-android)) + +# x86 +$(WORK)/x86_64-linux-clang $(WORK)/hexagon-v68 $(WORK)/hexagon-v69 $(WORK)/hexagon-v73 $(WORK)/hexagon-v75 $(WORK)/hexagon-v79 $(WORK)/aarch64-android: + @mkdir -p $@/ops + +$(WORK)/x86_64-linux-clang/%.o: $(SRC_DIR)/%.cpp | $(WORK)/x86_64-linux-clang + $(X86_CXX) $(X86_CXX_FLAGS) -DTHIS_PKG_NAME=$(PACKAGE_NAME) -MMD -c $< -o $@ + +$(WORK)/x86_64-linux-clang/ops/%.o: $(OP_SRC_DIR)/%.cpp | $(WORK)/x86_64-linux-clang + $(X86_CXX) $(X86_CXX_FLAGS) -DTHIS_PKG_NAME=$(PACKAGE_NAME) -MMD -c $< -o $@ + +$(WORK)/x86_64-linux-clang/ops/%.o: $(OP_SRC_DIR)/x86_asm/%.S | $(WORK)/x86_64-linux-clang + $(X86_CXX) $(X86_CXX_FLAGS) -DTHIS_PKG_NAME=$(PACKAGE_NAME) -MMD -c $< -o $@ + +$(WORK)/x86_64-linux-clang/$(LIBRARY_NAME): $(x86_64-linux-clang_objs) | $(HFILES) + $(X86_CXX) -fPIC -std=c++17 -g -shared -o $@ $^ $(X86_LDFLAGS) + +# v68 +$(WORK)/hexagon-v68/%.o: $(SRC_DIR)/%.cpp | $(WORK)/hexagon-v68 + $(HEXAGON_CXX_V68) $(HEXAGON_CXX_FLAGS_V68) -DTHIS_PKG_NAME=$(PACKAGE_NAME) -MMD -c $< -o $@ + +$(WORK)/hexagon-v68/ops/%.o: $(OP_SRC_DIR)/%.cpp | $(WORK)/hexagon-v68 + $(HEXAGON_CXX_V68) $(HEXAGON_CXX_FLAGS_V68) -DTHIS_PKG_NAME=$(PACKAGE_NAME) -MMD -c $< -o $@ + +$(WORK)/hexagon-v68/ops/%.o: $(OP_SRC_DIR)/v68_asm/%.S | $(WORK)/hexagon-v68 + $(HEXAGON_CXX_V68) $(HEXAGON_CXX_FLAGS_V68) -DTHIS_PKG_NAME=$(PACKAGE_NAME) -MMD -c $< -o $@ + +$(WORK)/hexagon-v68/$(LIBRARY_NAME): $(hexagon-v68_objs) | $(HFILES) + $(HEXAGON_CXX_V68) -fPIC -std=c++17 -g -shared -o $@ $^ $(HEX_LDFLAGS) + +# v69 +$(WORK)/hexagon-v69/%.o: $(SRC_DIR)/%.cpp | $(WORK)/hexagon-v69 + $(HEXAGON_CXX_V69) $(HEXAGON_CXX_FLAGS_V69) -DTHIS_PKG_NAME=$(PACKAGE_NAME) -MMD -c $< -o $@ + +$(WORK)/hexagon-v69/ops/%.o: $(OP_SRC_DIR)/%.cpp | $(WORK)/hexagon-v69 + $(HEXAGON_CXX_V69) $(HEXAGON_CXX_FLAGS_V69) -DTHIS_PKG_NAME=$(PACKAGE_NAME) -MMD -c $< -o $@ + +$(WORK)/hexagon-v69/ops/%.o: $(OP_SRC_DIR)/v69_asm/%.S | $(WORK)/hexagon-v69 + $(HEXAGON_CXX_V69) $(HEXAGON_CXX_FLAGS_V69) -DTHIS_PKG_NAME=$(PACKAGE_NAME) -MMD -c $< -o $@ + +$(WORK)/hexagon-v69/$(LIBRARY_NAME): $(hexagon-v69_objs) | $(HFILES) + $(HEXAGON_CXX_V69) -fPIC -std=c++17 -g -shared -o $@ $^ $(HEX_LDFLAGS) + +# v73 +$(WORK)/hexagon-v73/%.o: $(SRC_DIR)/%.cpp | $(WORK)/hexagon-v73 + $(HEXAGON_CXX_V73) $(HEXAGON_CXX_FLAGS_V73) -DTHIS_PKG_NAME=$(PACKAGE_NAME) -MMD -c $< -o $@ + +$(WORK)/hexagon-v73/ops/%.o: $(OP_SRC_DIR)/%.cpp | $(WORK)/hexagon-v73 + $(HEXAGON_CXX_V73) $(HEXAGON_CXX_FLAGS_V73) -DTHIS_PKG_NAME=$(PACKAGE_NAME) -MMD -c $< -o $@ + +$(WORK)/hexagon-v73/ops/%.o: $(OP_SRC_DIR)/v73_asm/%.S | $(WORK)/hexagon-v73 + $(HEXAGON_CXX_V73) $(HEXAGON_CXX_FLAGS_V73) -DTHIS_PKG_NAME=$(PACKAGE_NAME) -MMD -c $< -o $@ + +$(WORK)/hexagon-v73/$(LIBRARY_NAME): $(hexagon-v73_objs) | $(HFILES) + $(HEXAGON_CXX_V73) -fPIC -std=c++17 -g -shared -o $@ $^ $(HEX_LDFLAGS) + +#v75 +$(WORK)/hexagon-v75/%.o: $(SRC_DIR)/%.cpp | $(WORK)/hexagon-v75 + $(HEXAGON_CXX_V75) $(HEXAGON_CXX_FLAGS_V75) -DTHIS_PKG_NAME=$(PACKAGE_NAME) -MMD -c $< -o $@ + +$(WORK)/hexagon-v75/ops/%.o: $(OP_SRC_DIR)/%.cpp | $(WORK)/hexagon-v75 + $(HEXAGON_CXX_V75) $(HEXAGON_CXX_FLAGS_V75) -DTHIS_PKG_NAME=$(PACKAGE_NAME) -MMD -c $< -o $@ + +$(WORK)/hexagon-v75/ops/%.o: $(OP_SRC_DIR)/v75_asm/%.S | $(WORK)/hexagon-v75 + $(HEXAGON_CXX_V75) $(HEXAGON_CXX_FLAGS_V75) -DTHIS_PKG_NAME=$(PACKAGE_NAME) -MMD -c $< -o $@ + +$(WORK)/hexagon-v75/$(LIBRARY_NAME): $(hexagon-v75_objs) | $(HFILES) + $(HEXAGON_CXX_V75) -fPIC -std=c++17 -g -shared -o $@ $^ $(HEX_LDFLAGS) + +#v79 +$(WORK)/hexagon-v79/%.o: $(SRC_DIR)/%.cpp | $(WORK)/hexagon-v79 + $(HEXAGON_CXX_V79) $(HEXAGON_CXX_FLAGS_V79) -DTHIS_PKG_NAME=$(PACKAGE_NAME) -MMD -c $< -o $@ + +$(WORK)/hexagon-v79/ops/%.o: $(OP_SRC_DIR)/%.cpp | $(WORK)/hexagon-v79 + $(HEXAGON_CXX_V79) $(HEXAGON_CXX_FLAGS_V79) -DTHIS_PKG_NAME=$(PACKAGE_NAME) -MMD -c $< -o $@ + +$(WORK)/hexagon-v79/ops/%.o: $(OP_SRC_DIR)/v79_asm/%.S | $(WORK)/hexagon-v79 + $(HEXAGON_CXX_V79) $(HEXAGON_CXX_FLAGS_V79) -DTHIS_PKG_NAME=$(PACKAGE_NAME) -MMD -c $< -o $@ + +$(WORK)/hexagon-v79/$(LIBRARY_NAME): $(hexagon-v79_objs) | $(HFILES) + $(HEXAGON_CXX_V79) -fPIC -std=c++17 -g -shared -o $@ $^ $(HEX_LDFLAGS) + + + +# aarch64 +$(WORK)/aarch64-android/%.o: $(SRC_DIR)/%.cpp | $(WORK)/aarch64-android + $(AARCH64_CXX) $(AARCH64_CXX_FLAGS) -DTHIS_PKG_NAME=$(PACKAGE_NAME) -MMD -c $< -o $@ + +$(WORK)/aarch64-android/ops/%.o: $(OP_SRC_DIR)/%.cpp | $(WORK)/aarch64-android + $(AARCH64_CXX) $(AARCH64_CXX_FLAGS) -DTHIS_PKG_NAME=$(PACKAGE_NAME) -MMD -c $< -o $@ + +$(WORK)/aarch64-android/ops/%.o: $(OP_SRC_DIR)/android_asm/%.S | $(WORK)/aarch64-android + $(AARCH64_CXX) $(AARCH64_CXX_FLAGS) -DTHIS_PKG_NAME=$(PACKAGE_NAME) -MMD -c $< -o $@ + +$(WORK)/aarch64-android/$(LIBRARY_NAME): $(aarch64-android_objs) | $(HFILES) + $(AARCH64_CXX) -fPIC -std=c++17 -g -shared -o $@ $^ $(AARCH64_LDFLAGS) + +clean: + -rm -rf $(WORK) + +.PHONY: all clean diff --git a/examples/qualcomm/custom_op/example_op_package_htp/ExampleOpPackage/config/example_op_package_htp.xml b/examples/qualcomm/custom_op/example_op_package_htp/ExampleOpPackage/config/example_op_package_htp.xml new file mode 100644 index 00000000000..24e17100bf8 --- /dev/null +++ b/examples/qualcomm/custom_op/example_op_package_htp/ExampleOpPackage/config/example_op_package_htp.xml @@ -0,0 +1,59 @@ + + + + + + ExampleCustomOp + + + ExampleCustomOp for testing OP package registration functionality. + + + + + input + + input activation + + true + QNN_DATATYPE_FLOAT_32 + QNN_DATATYPE_UFIXED_POINT_8 + + 4D + NHWC + a tensor of 4 dimension + + + + + output + + output activation + + true + QNN_DATATYPE_FLOAT_32 + QNN_DATATYPE_UFIXED_POINT_8 + + 4D + NHWC + a tensor of 4 dimension + + + + + HTP + + + + + diff --git a/examples/qualcomm/custom_op/example_op_package_htp/ExampleOpPackage/src/ExampleOpPackageInterface.cpp b/examples/qualcomm/custom_op/example_op_package_htp/ExampleOpPackage/src/ExampleOpPackageInterface.cpp new file mode 100644 index 00000000000..8eeca16e982 --- /dev/null +++ b/examples/qualcomm/custom_op/example_op_package_htp/ExampleOpPackage/src/ExampleOpPackageInterface.cpp @@ -0,0 +1,289 @@ +//============================================================================== +// Auto Generated Code for ExampleOpPackage +//============================================================================== + +#include "HTP/QnnHtpCommon.h" +#include "HTP/core/constraints.h" +#include "HTP/core/op_package_feature_support.h" +#include "HTP/core/op_register_ext.h" +#include "HTP/core/optimize.h" +#include "HTP/core/simple_reg.h" +#include "HTP/core/unique_types.h" +#include "QnnOpPackage.h" +#include "QnnSdkBuildId.h" + +DEFINE_UNIQ_TY() +BEGIN_PKG_OPS_OPTS_LIST() + +/** Note that the order of declarations given here defines the order in which + * ops and graph optimizations are registered to the HTP Core. Append the latest + * OpName at the bottom + */ +DECLARE_PKG_OPS_OPTS_LIST(PKG_ExampleCustomOp) + +END_PKG_OPS_OPTS_LIST() + +// op package info +static constexpr auto sg_packageName = + THIS_PKG_NAME_STR; // package name passed in as compile flag + +static std::array sg_opNames{{"ExampleCustomOp"}}; + +static Qnn_ApiVersion_t sg_sdkApiVersion = QNN_HTP_API_VERSION_INIT; +static QnnOpPackage_Info_t sg_packageInfo = QNN_OP_PACKAGE_INFO_INIT; + +// global data +static QnnOpPackage_GlobalInfrastructure_t sg_globalInfra = + nullptr; // global infrastructure not in use for now +static bool sg_packageInitialized = false; + +/* + * user provided logging call back function + * currently only supported on linux x86-64 and nonrpc versions + * typedef void (*QnnLog_Callback_t)(const char* fmt, + * QnnLog_Level_t level, + * uint64_t timestamp, + * va_list args); + * usage: if(sg_logInitialized && level <= sg_maxLogLevel) + * sg_logCallback(fmt, level, timestamp, args); + * + * for cross rpc versions, skel side user provided logging call back function + * can be defined as part of op packages. maximal log level sg_maxLogLevel + * can be set by Qnn_ErrorHandle_t ExampleOpPackageLogSetLevel(QnnLog_Level_t + * maxLogLevel) + */ +/* + * for alternative logging method provided by HTP core, please refer to log.h + */ +static QnnLog_Callback_t sg_logCallback = + nullptr; // user provided call back function pointer for logging +static QnnLog_Level_t sg_maxLogLevel = + (QnnLog_Level_t)0; // maximal log level used in user provided logging +static bool sg_logInitialized = + false; // tracks whether user provided logging method has been initialized + +/* + * op initialization + * needs to be global in the package + * one initialization per package before any op definitions + * syntax: INIT_PACKAGE_OP_DEF() + */ +INIT_PACKAGE_OP_DEF() + +/* + * optimization initialization + * needs to be global in the package + * one initialization per package before any optimization definitions + * syntax: INIT_PACKAGE_OPTIMIZATION_DEF() + */ +INIT_PACKAGE_OPTIMIZATION_DEF() + +/* + * op parameter order initialization + * needs to be global in the package + * one initialization per package before any op parameter order definitions + * syntax: INIT_PACKAGE_PARAM_ORDER_DEF() + */ +INIT_PACKAGE_PARAM_ORDER_DEF() + +/* + * axis parameter name list + * optional + * needs to be global in the package + * one list per package + * for listing axis parameter names passed into Qnn_AddNode API + * HTP backend auto-adjusts values in axis parameters based on HTP backfilling + * note: HTP backend backfills tensor dimensions to 4 dimensions + * syntax: LIST_PACKAGE_AXIS_PARAMS(...) + * e.g. LIST_PACKAGE_AXIS_PARAMS("Axis", "AXIS", "axis") + */ +// LIST_PACKAGE_AXIS_PARAMS() + +/* + * per-channel quantized op name list + * optional + * needs to be global in the package + * one list per package + * for listing op names which support per-channel quantization + * per-axis quantization info of an op is embeded in axisScaleOffsetEncoding + * inside Qnn_Tensor_t types + * HTP backend only supports per-channel scale ops + * i.e. along last dimension, offset is always zero + * if an op name is marked as having per-channel scale support, and in + * QNN_AddNode, at least one input, parameter, or output has + * QNN_QUANTIZATION_ENCODING_AXIS_SCALE_OFFSET type: + * then: + * HTP backend will pass to op implementation function the following: + * output(s), input(s), parameter(s), + * outputPerChannelScale(s), inputPerChannelScale(s), + * paramPerChannelScale(s) + * + * optimization rules can be used to remove extra perChannelScale tensors + * + * syntax: LIST_PACKAGE_PER_CHANNEL_QUANTIZED_OPS(...) + * e.g. LIST_PACKAGE_PER_CHANNEL_QUANTIZED_OPS(sg_op1Name, sg_op2Name) + */ + +// LIST_PACKAGE_PER_CHANNEL_QUANTIZED_OPS() + +/* + * Declare and define the special intialize function for HTP Backend to load + */ +INIT_PKG_CORE_INIT_FUNC() + +/* op package API's */ + +Qnn_ErrorHandle_t ExampleOpPackageInit( + QnnOpPackage_GlobalInfrastructure_t infrastructure) { + if (sg_packageInitialized) + return QNN_OP_PACKAGE_ERROR_LIBRARY_ALREADY_INITIALIZED; + + /* + * op parameter order registration + * registers all defined op parameter orders in the package + * syntax: REGISTER_PACKAGE_PARAM_ORDERS() + */ + REGISTER_PACKAGE_PARAM_ORDERS() + + /* + * op axis parameter name registration + * registers all axis parameter names in the package + * used with LIST_PACKAGE_AXIS_PARAMS(...) + * syntax: REGISTER_PACKAGE_AXIS_PARAMS() + */ + REGISTER_PACKAGE_AXIS_PARAMS() + + /* + * per-channel scale op name registration + * registers all per-channel scale op names in the package + * used with LIST_PACKAGE_PER_CHANNEL_QUANTIZED_OPS(...) + * syntax: REGISTER_PACKAGE_PER_CHANNEL_QUANTIZED_OPS() + */ + REGISTER_PACKAGE_PER_CHANNEL_QUANTIZED_OPS() + + sg_globalInfra = infrastructure; + sg_packageInitialized = true; + return QNN_SUCCESS; +} + +Qnn_ErrorHandle_t ExampleOpPackageGetInfo(const QnnOpPackage_Info_t** info) { + if (!sg_packageInitialized) + return QNN_OP_PACKAGE_ERROR_LIBRARY_NOT_INITIALIZED; + if (!info) + return QNN_OP_PACKAGE_ERROR_INVALID_INFO; + + sg_packageInfo = QNN_OP_PACKAGE_INFO_INIT; + sg_packageInfo.packageName = sg_packageName; + sg_packageInfo.operationNames = sg_opNames.data(); + sg_packageInfo.numOperations = sg_opNames.size(); + sg_packageInfo.sdkBuildId = QNN_SDK_BUILD_ID; + sg_packageInfo.sdkApiVersion = &sg_sdkApiVersion; + + *info = &sg_packageInfo; + return QNN_SUCCESS; +} + +Qnn_ErrorHandle_t ExampleOpPackageLogInitialize( + QnnLog_Callback_t callback, + QnnLog_Level_t maxLogLevel) { + if (sg_logInitialized) + return QNN_OP_PACKAGE_ERROR_LIBRARY_ALREADY_INITIALIZED; + if (!callback) + return QNN_LOG_ERROR_INVALID_ARGUMENT; + if (maxLogLevel < QNN_LOG_LEVEL_ERROR) + return QNN_LOG_ERROR_INVALID_ARGUMENT; + sg_logCallback = callback; + sg_maxLogLevel = maxLogLevel; + sg_logInitialized = true; + return QNN_SUCCESS; +} + +Qnn_ErrorHandle_t ExampleOpPackageLogSetLevel(QnnLog_Level_t maxLogLevel) { + if (maxLogLevel < QNN_LOG_LEVEL_ERROR) + return QNN_LOG_ERROR_INVALID_ARGUMENT; + sg_maxLogLevel = maxLogLevel; + return QNN_SUCCESS; +} + +Qnn_ErrorHandle_t ExampleOpPackageLogTerminate() { + if (!sg_logInitialized) + return QNN_OP_PACKAGE_ERROR_LIBRARY_NOT_INITIALIZED; + sg_logCallback = nullptr; + sg_maxLogLevel = (QnnLog_Level_t)0; + sg_logInitialized = false; + return QNN_SUCCESS; +} + +Qnn_ErrorHandle_t ExampleOpPackageValidateOpConfig(Qnn_OpConfig_t opConfig) { + if (std::string(sg_packageName) != opConfig.v1.packageName) { + return QNN_OP_PACKAGE_ERROR_VALIDATION_FAILURE; + } + + /* auto-generated validation code below + * Check if op config type matches any registered ops + * If a match is found, check number of inputs, outputs and params + */ + if (std::string(opConfig.v1.typeName) == "ExampleCustomOp") { + if (opConfig.v1.numOfParams != 0 || opConfig.v1.numOfInputs != 1 || + opConfig.v1.numOfOutputs != 1) { + return QNN_OP_PACKAGE_ERROR_VALIDATION_FAILURE; + } + } else { + return QNN_OP_PACKAGE_ERROR_VALIDATION_FAILURE; + } + + /* + * additional validation code here + * */ + + return QNN_SUCCESS; +} + +/* The following three functions in this comment are not called by HTP backend + *for now, no auto-generated implementations are created. Users should see + *example for full function signatures. (version 1.3.0) Qnn_ErrorHandle_t + *ExampleOpPackageCreateKernels (QnnOpPackage_GraphInfrastructure_t + * graphInfrastructure, QnnOpPackage_Node_t node, QnnOpPackage_Kernel_t** + *kernels, uint32_t* numKernels) (version 1.3.0) Qnn_ErrorHandle_t + *ExampleOpPackageFreeKernels (QnnOpPackage_Kernel_t* kernels) + * + * (version 1.4.0) Qnn_ErrorHandle_t ExampleOpPackageCreateOpImpl + *(QnnOpPackage_GraphInfrastructure_t graphInfrastructure, QnnOpPackage_Node_t + *node, QnnOpPackage_OpImpl_t* opImpl) (version 1.4.0) Qnn_ErrorHandle_t + *ExampleOpPackageFreeOpImpl (QnnOpPackage_OpImpl_t opImpl) + */ + +Qnn_ErrorHandle_t ExampleOpPackageTerminate() { + if (!sg_packageInitialized) + return QNN_OP_PACKAGE_ERROR_LIBRARY_NOT_INITIALIZED; + + sg_globalInfra = nullptr; + sg_packageInitialized = false; + return QNN_SUCCESS; +} + +#ifdef __cplusplus +extern "C" { +#endif + +/* latest version */ +Qnn_ErrorHandle_t ExampleOpPackageInterfaceProvider( + QnnOpPackage_Interface_t* interface) { + if (!interface) + return QNN_OP_PACKAGE_ERROR_INVALID_ARGUMENT; + interface->interfaceVersion = {1, 4, 0}; + interface->v1_4.init = ExampleOpPackageInit; + interface->v1_4.terminate = ExampleOpPackageTerminate; + interface->v1_4.getInfo = ExampleOpPackageGetInfo; + interface->v1_4.validateOpConfig = ExampleOpPackageValidateOpConfig; + interface->v1_4.createOpImpl = nullptr; + interface->v1_4.freeOpImpl = nullptr; + interface->v1_4.logInitialize = ExampleOpPackageLogInitialize; + interface->v1_4.logSetLevel = ExampleOpPackageLogSetLevel; + interface->v1_4.logTerminate = ExampleOpPackageLogTerminate; + return QNN_SUCCESS; +} + +#ifdef __cplusplus +} +#endif diff --git a/examples/qualcomm/custom_op/example_op_package_htp/ExampleOpPackage/src/ops/ExampleCustomOp.cpp b/examples/qualcomm/custom_op/example_op_package_htp/ExampleOpPackage/src/ops/ExampleCustomOp.cpp new file mode 100644 index 00000000000..69a1d3f1d57 --- /dev/null +++ b/examples/qualcomm/custom_op/example_op_package_htp/ExampleOpPackage/src/ops/ExampleCustomOp.cpp @@ -0,0 +1,211 @@ +//============================================================================== +// Auto Generated Code for ExampleOpPackage +//============================================================================== + +#include "HTP/core/constraints.h" +#include "HTP/core/op_package_feature_support.h" +#include "HTP/core/op_register_ext.h" +#include "HTP/core/optimize.h" +#include "HTP/core/simple_reg.h" +#include "QnnOpPackage.h" +#ifdef __hexagon__ +#include "HAP_farf.h" +#else /* __hexagon__ */ +#include +#define FARF(level, fmt, ...) printf(fmt "\n", ##__VA_ARGS__) +#endif /* __hexagon__ */ + +BEGIN_PKG_OP_DEFINITION(PKG_ExampleCustomOp); + +// op execute function declarations +template +GraphStatus examplecustomopImpl(TensorType& out_0, const TensorType& in_0); + +// forward declaration of sample cost function +static float examplecustomopCostFunc(const Op* op); + +/* + * method 1 for defining op, using default cost value (i.e. GLACIAL) and default + * flag (Flags::RESOURCE_HVX) syntax: DEF_PACKAGE_OP(F,OP) e.g. + * DEF_PACKAGE_OP((examplecustomopImpl), "ExampleCustomOp") + */ +DEF_PACKAGE_OP((examplecustomopImpl), "ExampleCustomOp") + +/* + * method 2 for defining op with specified cost value (one of GLACIAL, SNAIL, + * FAST, FREE) and provided flags syntax: + * DEF_PACKAGE_OP_AND_COST_AND_FLAGS(F,OP,COST,...) can use zero or more flags, + * FLAG options are IS_CONST, INHIBIT_CONST_PROP, RESOURCE_HVX, RESOURCE_HMX(not + * supported in external op packages) e.g. + * DEF_PACKAGE_OP_AND_COST_AND_FLAGS((examplecustomopImpl), + * "ExampleCustomOp", SNAIL) + */ + +/* + * method 3 for defining op with cost function pointer and provided flags + * cost function pointer type: typedef float (*cost_function) (const Op * op); + * syntax: DEF_PACKAGE_OP_AND_COST_F_AND_FLAGS(F,OP,COST_F,...) + * e.g. + * DEF_PACKAGE_OP_AND_COST_F_AND_FLAGS((examplecustomopImpl), + * "ExampleCustomOp", examplecustomopCostFunc, Flags::RESOURCE_HVX) + */ + +/* + * optimization definitions + * need to be global in the package + * one definition per optimization + * syntax: + * DEF_PACKAGE_OPTIMIZATION(PRIORITY,MATCHCODE,CONSTRAINTCODE,REPLACECODE) + * PRIORITY predefined values include EARLY(2000), MIDDLE(3000), LATE(4000) + * HTP core provides some replacement functions for op package to use + * for more information about optimization rules, please refer to HTP core + * documentations + */ + +/* + * op parameter order definitions + * need to be global in the package + * one definition per op, and this is optional + * syntax: + * DEF_PACKAGE_PARAM_ORDER(OP,PARAM1,MANDATORY1,DEFAULT1,PARAM2,MANDATORY2,DEFAULT2...) + * one or more parameters can be specified for each op + * order of parameters listed determines the order of parameters passed into op + * execution functions if an op does not have a parameter order definition, + * parameter order passed into Qnn_addNode will be passed into op execution + * functions if an op has a parameter order definition, any parameter passed + * into Qnn_addNode with unlisted name will be abandoned if two or more op + * packages with the same package name will be registered, they cannot list + * conflicting parameter orders + * PARAM refers to parameter name as a string literal + * MANDATORY refers to whether this parameter is required to be provided at + * Qnn_addNode DEFAULT is used when MANDATORY is false if provided as + * Qnn_Param_t*, DEFAULT will be used for graph construction when this parameter + * is not provided at Qnn_addNode if provided as nullptr, graph construction + * will skip this parameter when this parameter is not provided at Qnn_addNode + */ + +/* execute functions for ops */ + +template +GraphStatus examplecustomopImpl(TensorType& out_0, const TensorType& in_0) + +{ + /* + * add code here + * */ + /* + * To have good performance and stability, it is required to avoid heap memory + * allocation in this function. The heap memory allocation includes but not + * limited to calling malloc, operator new, constructing STL container objects + * like std::vector with default allocator, and adding items like calling + * std::vector::push_back to STL container objects with default allocator. + * + * Please check in SDK documentation for more information. + */ + const size_t input_num_elements = in_0.total_storage_elements(); + DTypeScaleOff input_intfc = in_0.get_dtype_intfc(); + + FARF( + ALWAYS, + "[QNN ExecuTorch Op Package test] " + "input num_elem: %zu, dtype %d, scale %f, offset %d", + input_num_elements, + input_intfc.dtype, + input_intfc.scale, + input_intfc.offset); + + if (input_intfc.dtype != DType::Float32 && + input_intfc.dtype != DType::QUInt8) { + FARF( + ALWAYS, + "[QNN ExecuTorch Op Package test]" + "[Error] The datatype of input is %d, not float32(%d) nor uint8(%d)", + input_intfc.dtype, + DType::Float32, + DType::QUInt8); + return GraphStatus::ErrorPrecision; + } + + const size_t output_num_elements = out_0.total_storage_elements(); + DTypeScaleOff out_intfc = out_0.get_dtype_intfc(); + FARF( + ALWAYS, + "[QNN ExecuTorch Op Package test] " + "out num_elem: %zu, dtype %d, scale %f, offset %d", + output_num_elements, + out_intfc.dtype, + out_intfc.scale, + out_intfc.offset); + if (out_intfc.dtype != DType::Float32 && out_intfc.dtype != DType::QUInt8) { + FARF( + ALWAYS, + "[QNN ExecuTorch Op Package test]" + "[Error] The datatype of output is %d, not float32(%d) nor uint8(%d)", + out_intfc.dtype, + DType::Float32, + DType::QUInt8); + return GraphStatus::ErrorPrecision; + } + + if (input_num_elements != output_num_elements) { + FARF( + ALWAYS, + "[QNN ExecuTorch Op Package test]" + "[Error] The number of input and output doesn't match. " + "input_num_elements: %zu, output_num_elements: %zu", + input_num_elements, + output_num_elements); + return GraphStatus::ErrorDimensions; + } + if (input_intfc.dtype == DType::Float32) { + const float* p_input = static_cast(in_0.raw_data_const()); + float* p_output = static_cast(out_0.raw_data()); + const int multiplier = 3; + for (size_t i = 0; i < input_num_elements; ++i) { + p_output[i] = multiplier * p_input[i]; + + FARF( + ALWAYS, + "[QNN ExecuTorch Op Package test]" + "input0[%zu]=%f, multiplier=%d, output[%zu]=%f", + i, + p_input[i], + multiplier, + i, + p_output[i]); + } + } else if (input_intfc.dtype == DType::QUInt8) { + const uint8_t* p_input = static_cast(in_0.raw_data_const()); + uint8_t* p_output = static_cast(out_0.raw_data()); + const int multiplier = 3 * input_intfc.scale / out_intfc.scale; + for (size_t i = 0; i < input_num_elements; ++i) { + p_output[i] = multiplier * p_input[i]; + + FARF( + ALWAYS, + "[QNN ExecuTorch Op Package test]" + "input0[%zu]=%f, multiplier=%d, output[%zu]=%f", + i, + p_input[i], + multiplier, + i, + p_output[i]); + } + } + + return GraphStatus::Success; +} + +__attribute__((unused)) static float examplecustomopCostFunc(const Op* op) { + /* + * add code here + * */ + + float cost = 0.0; // add cost computation here + return cost; +} + +/* At the bottom of the op file, call END_PKG_OP_DEFINITION(), + where is as BEGIN_PKG_OP_DEFINITION +*/ +END_PKG_OP_DEFINITION(PKG_ExampleCustomOp); diff --git a/examples/qualcomm/utils.py b/examples/qualcomm/utils.py index 5ecce30078e..ca06e6487e6 100755 --- a/examples/qualcomm/utils.py +++ b/examples/qualcomm/utils.py @@ -21,7 +21,10 @@ import torch from executorch.backends.qualcomm.partition.qnn_partitioner import QnnPartitioner from executorch.backends.qualcomm.quantizer.quantizer import QnnQuantizer, QuantDtype -from executorch.backends.qualcomm.serialization.qc_schema import QcomChipset +from executorch.backends.qualcomm.serialization.qc_schema import ( + QcomChipset, + QnnExecuTorchOpPackageOptions, +) from executorch.backends.qualcomm.utils.utils import ( capture_program, generate_htp_compiler_spec, @@ -301,6 +304,7 @@ def build_executorch_binary( dump_intermediate_outputs=False, passes_job=None, qat_training_data=None, + op_package_options: QnnExecuTorchOpPackageOptions = None, ): """ A function to generate an ExecuTorch binary for Qualcomm platforms. @@ -352,6 +356,7 @@ def build_executorch_binary( backend_options=backend_options, shared_buffer=shared_buffer, dump_intermediate_outputs=dump_intermediate_outputs, + op_package_options=op_package_options, ), skip_node_id_set, skip_node_op_set, From 79be8668ec5427c30fe2c6802ac84df32b2bf68b Mon Sep 17 00:00:00 2001 From: shewu-quic Date: Wed, 26 Feb 2025 09:33:03 +0800 Subject: [PATCH 2/3] move test_custom_op to TestUtilScript --- backends/qualcomm/tests/test_qnn_delegate.py | 106 ++++++------------- backends/qualcomm/tests/utils.py | 10 ++ 2 files changed, 44 insertions(+), 72 deletions(-) diff --git a/backends/qualcomm/tests/test_qnn_delegate.py b/backends/qualcomm/tests/test_qnn_delegate.py index 611e95fedd0..77fce0d3de6 100644 --- a/backends/qualcomm/tests/test_qnn_delegate.py +++ b/backends/qualcomm/tests/test_qnn_delegate.py @@ -3118,16 +3118,6 @@ def test_qnn_backend_draw_graph(self): class TestExampleLLMScript(TestQNN): - def required_envs(self, conditions=None) -> bool: - conditions = [] if conditions is None else conditions - return all( - [ - self.executorch_root, - self.artifact_dir, - *conditions, - ] - ) - def test_llama3_2_1b(self): if not self.required_envs(): self.skipTest("missing required envs") @@ -3285,16 +3275,6 @@ def test_llama_stories_110m(self): class TestExampleOssScript(TestQNN): - def required_envs(self, conditions=None) -> bool: - conditions = [] if conditions is None else conditions - return all( - [ - self.executorch_root, - self.artifact_dir, - *conditions, - ] - ) - def test_conv_former(self): if not self.required_envs([self.image_dataset]): self.skipTest("missing required envs") @@ -3666,16 +3646,6 @@ def test_ssd300_vgg16(self): class TestExampleQaihubScript(TestQNN): - def required_envs(self, conditions=None) -> bool: - conditions = [] if conditions is None else conditions - return all( - [ - self.executorch_root, - self.artifact_dir, - *conditions, - ] - ) - def test_utils_export(self): with tempfile.TemporaryDirectory() as tmp_dir: module = ContextBinaryExample() # noqa: F405 @@ -3904,16 +3874,6 @@ def test_stable_diffusion(self): class TestExampleScript(TestQNN): - def required_envs(self, conditions=None) -> bool: - conditions = [] if conditions is None else conditions - return all( - [ - self.executorch_root, - self.artifact_dir, - *conditions, - ] - ) - def test_mobilenet_v2(self): if not self.required_envs([self.image_dataset]): self.skipTest("missing required envs") @@ -4179,38 +4139,6 @@ def test_deeplab_v3(self): self.assertGreaterEqual(msg["MPA"], 0.70) self.assertGreaterEqual(msg["MIoU"], 0.55) - def test_custom_op(self): - if not self.required_envs([self.op_package_dir]): - self.skipTest("missing required envs") - cmds = [ - "python", - f"{self.executorch_root}/examples/qualcomm/custom_op/custom_ops_1.py", - "--artifact", - self.artifact_dir, - "--build_folder", - self.build_folder, - "--device", - self.device, - "--model", - self.model, - "--ip", - self.ip, - "--port", - str(self.port), - "--op_package_dir", - self.op_package_dir, - "--build_op_package", - ] - if self.host: - cmds.extend(["--host", self.host]) - - p = subprocess.Popen(cmds, stdout=subprocess.DEVNULL) - with Listener((self.ip, self.port)) as listener: - conn = listener.accept() - p.communicate() - msg = json.loads(conn.recv()) - self.assertTrue(msg["is_close"]) - @unittest.skip("dynamic shape inputs appear in recent torch.export.export") def test_mobilebert(self): if not self.required_envs([self.pretrained_weight]): @@ -4357,6 +4285,40 @@ def test_export_example(self): ) +class TestUtilScript(TestQNN): + def test_custom_op(self): + if not self.required_envs([self.op_package_dir]): + self.skipTest("missing required envs") + cmds = [ + "python", + f"{self.executorch_root}/examples/qualcomm/custom_op/custom_ops_1.py", + "--artifact", + self.artifact_dir, + "--build_folder", + self.build_folder, + "--device", + self.device, + "--model", + self.model, + "--ip", + self.ip, + "--port", + str(self.port), + "--op_package_dir", + self.op_package_dir, + "--build_op_package", + ] + if self.host: + cmds.extend(["--host", self.host]) + + p = subprocess.Popen(cmds, stdout=subprocess.DEVNULL) + with Listener((self.ip, self.port)) as listener: + conn = listener.accept() + p.communicate() + msg = json.loads(conn.recv()) + self.assertTrue(msg["is_close"]) + + def setup_environment(): parser = setup_common_args_and_variables() diff --git a/backends/qualcomm/tests/utils.py b/backends/qualcomm/tests/utils.py index 723c9262ff4..d0ef4ba6a98 100644 --- a/backends/qualcomm/tests/utils.py +++ b/backends/qualcomm/tests/utils.py @@ -235,6 +235,16 @@ def _save_model_and_expected_output( return input_list, ref_outputs, pte_fname + def required_envs(self, conditions=None) -> bool: + conditions = [] if conditions is None else conditions + return all( + [ + self.executorch_root, + self.artifact_dir, + *conditions, + ] + ) + def verify_output( # noqa: C901 self, module: torch.nn.Module, From 8e200b7f55f953e777ba9eab47a679a0198c878c Mon Sep 17 00:00:00 2001 From: haowhsu-quic Date: Tue, 4 Mar 2025 23:14:55 +0800 Subject: [PATCH 3/3] enable 4-bit embedding on llama.py --- .../qualcomm/partition/qnn_partitioner.py | 15 +- .../qualcomm/quantizer/custom_annotation.py | 29 ++ .../llama/custom_ops/embedding/Makefile | 364 ++++++++++++++++++ .../config/EmbeddingOpPackageHtp.xml | 90 +++++ .../llama/custom_ops/embedding/op.py | 66 ++++ .../src/EmbeddingOpPackageInterface.cpp | 293 ++++++++++++++ .../embedding/src/ops/Embedding.cpp | 111 ++++++ examples/qualcomm/oss_scripts/llama/llama.py | 140 ++++++- 8 files changed, 1097 insertions(+), 11 deletions(-) create mode 100644 examples/qualcomm/oss_scripts/llama/custom_ops/embedding/Makefile create mode 100644 examples/qualcomm/oss_scripts/llama/custom_ops/embedding/config/EmbeddingOpPackageHtp.xml create mode 100644 examples/qualcomm/oss_scripts/llama/custom_ops/embedding/op.py create mode 100644 examples/qualcomm/oss_scripts/llama/custom_ops/embedding/src/EmbeddingOpPackageInterface.cpp create mode 100644 examples/qualcomm/oss_scripts/llama/custom_ops/embedding/src/ops/Embedding.cpp diff --git a/backends/qualcomm/partition/qnn_partitioner.py b/backends/qualcomm/partition/qnn_partitioner.py index 375b4646345..61d80b4eaaf 100644 --- a/backends/qualcomm/partition/qnn_partitioner.py +++ b/backends/qualcomm/partition/qnn_partitioner.py @@ -111,7 +111,13 @@ def is_node_supported(self, _, node: torch.fx.Node) -> bool: return supported def __del__(self): - self.qnn_manager.Destroy() + # HTP op package contains some static data structures + # which will trigger preparation failure in qnn_preprocess + # if libQnnHtp.so is not fully unloaded + # --- + # currently we'll just keep manager alive for simplicity + #self.qnn_manager.Destroy() + pass class QnnPartitioner(Partitioner): @@ -179,7 +185,12 @@ def partition(self, edge_program: torch.export.ExportedProgram) -> PartitionResu # pop certain keys in meta for not affecting the passes in compilation # TODO: need to put property name in common definitions node.meta.pop(QCOM_AXIS_ORDER, "") - del self.op_support_checker + # HTP op package contains some static data structures + # which will trigger preparation failure in qnn_preprocess + # if libQnnHtp.so is not fully unloaded + # --- + # currently we'll just keep manager alive for simplicity + #del self.op_support_checker return PartitionResult( tagged_exported_program=edge_program, partition_tags=self.partition_tags ) diff --git a/backends/qualcomm/quantizer/custom_annotation.py b/backends/qualcomm/quantizer/custom_annotation.py index 33237f3bebe..7d9a5e571f9 100644 --- a/backends/qualcomm/quantizer/custom_annotation.py +++ b/backends/qualcomm/quantizer/custom_annotation.py @@ -9,6 +9,7 @@ from executorch.backends.qualcomm.quantizer.annotators import QUANT_ANNOTATION_KEY from executorch.backends.qualcomm.quantizer.quantizer import ( get_16a8w_qnn_ptq_config, + get_16a4w_qnn_ptq_config, get_8a8w_qnn_ptq_config, get_ptq_per_channel_quant_config, QuantizationConfig, @@ -53,6 +54,34 @@ def annotate_conv2d(node: Node, quantization_config: QuantizationConfig) -> None ) +def annotate_linear_16a4w_in_affine_layer(gm: torch.fx.GraphModule) -> None: + def annotate_conv2d(node: Node, quantization_config: QuantizationConfig) -> None: + input_qspec_map = {} + input_act = node.args[0] + input_spec = quantization_config.input_activation + input_qspec_map[input_act] = input_spec + + weight = node.args[1] + input_qspec_map[weight] = quantization_config.weight + + node.meta[QUANT_ANNOTATION_KEY] = QuantizationAnnotation( + input_qspec_map=input_qspec_map, + output_qspec=quantization_config.output_activation, + _annotated=True, + ) + + quantization_config_16a4w = get_16a4w_qnn_ptq_config(act_observer=MinMaxObserver) + for node in gm.graph.nodes: + if node.op == "call_function" and node.target == torch.ops.aten.conv2d.default: + if "nn_module_stack" in node.meta: + module_values_list = list(node.meta["nn_module_stack"].values()) + full_qualified_name = module_values_list[-1][0] + if full_qualified_name == "output.conv": + annotate_conv2d( + node, quantization_config=quantization_config_16a4w + ) + + def annotate_prefill_kv_output(gm: torch.fx.GraphModule, kv_quant_attrs: dict): for node in gm.graph.nodes: if node.op == "output": diff --git a/examples/qualcomm/oss_scripts/llama/custom_ops/embedding/Makefile b/examples/qualcomm/oss_scripts/llama/custom_ops/embedding/Makefile new file mode 100644 index 00000000000..8d37e042640 --- /dev/null +++ b/examples/qualcomm/oss_scripts/llama/custom_ops/embedding/Makefile @@ -0,0 +1,364 @@ +# Copyright (c) Qualcomm Innovation Center, Inc. +# All rights reserved +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# users should provide locations for QNN_INCLUDE and HEXAGON_SDK_ROOT +# export HEXAGON_SDK_ROOT = /path/to/hexagon-sdk + +# check all setup prerequisites if the command goal is not clean +ifneq ($(MAKECMDGOALS),clean) +ifndef QNN_INCLUDE +$(info "INFO: Qnn include not explicitly defined, attempting to use QNN_SDK_ROOT if it is valid") +QNN_INCLUDE := $(QNN_SDK_ROOT)/include/QNN +endif +ifeq ($(wildcard $(QNN_INCLUDE)),) +$(error "ERROR: QNN_INCLUDE path is not set. QNN include paths must be set to obtain BE headers necessary to compile the package") +endif +ifndef QNN_TARGET_LIB +$(info "INFO: Qnn target not explicitly defined, attempting to use QNN_SDK_ROOT if it is valid") +QNN_TARGET_LIB := $(QNN_SDK_ROOT)/lib/aarch64-android +endif +ifeq ($(wildcard $(QNN_TARGET_LIB)),) +ifeq ($(MAKECMDGOALS),htp_aarch64) +$(error "ERROR: QNN_TARGET_LIB is needed to compile package for aarch64") +else ifeq ($(MAKECMDGOALS),all) +$(info "WARNING:QNN_TARGET_LIB may need to be defined to compile packages") +endif +endif + +ifndef HEXAGON_SDK_ROOT +$(error "ERROR: HEXAGON_SDK_ROOT is not set. Hexagon-SDK path must be set to the latest hexagon-sdk-x.y.z") +endif + +ifeq ($(wildcard $(HEXAGON_SDK_ROOT)),) +$(error "ERROR: HEXAGON_SDK_ROOT is not set correctly. Please set HEXAGON_SDK_ROOT to latest hexagon-sdk-X.Y.Z path") +endif + +HEXAGON_SDK_BASE := $(dir $(HEXAGON_SDK_ROOT)) + +$(info "HEXAGON_SDK_ROOT is [${HEXAGON_SDK_ROOT}]") +# Users should note that the tools version may change between hexagon sdk versions +# Following combination of SDK and Tool version is supported +HEXAGON_SDK_ROOT_V68 := $(HEXAGON_SDK_BASE)/hexagon-sdk-4.2.0 +HEXAGON_SDK_ROOT_V69 := $(HEXAGON_SDK_BASE)/hexagon-sdk-4.3.0 +HEXAGON_SDK_ROOT_V73 := $(HEXAGON_SDK_BASE)/hexagon-sdk-5.4.0 +HEXAGON_SDK_ROOT_V75 := $(HEXAGON_SDK_BASE)/hexagon-sdk-5.4.0 +HEXAGON_SDK_ROOT_V79 := $(HEXAGON_SDK_BASE)/hexagon-sdk-6.0.0 + +#Updated to point to latest sdk to match with libQnnHtp.so +HEXAGON_SDK_ROOT_X86 := $(HEXAGON_SDK_BASE)/hexagon-sdk-6.0.0 +HEXAGON_TOOLS_VERSION_V68 := 8.4.09 +HEXAGON_TOOLS_VERSION_V69 := 8.5.03 +HEXAGON_TOOLS_VERSION_V73 := 8.6.02 +HEXAGON_TOOLS_VERSION_V75 := 8.7.03 +HEXAGON_TOOLS_VERSION_V79 := 8.8.02 + +#Updated to point to latest sdk to match with libQnnHtp.so +HEXAGON_TOOLS_VERSION_X86 := 8.8.02 + +ifndef ANDROID_NDK_ROOT +ifeq ($(MAKECMDGOALS),htp_aarch64) +$(error "ERROR: ANDROID_NDK_ROOT is not set. Android NDK path must be set to compile package for aarch64") +else ifeq ($(MAKECMDGOALS),all) +$(info "WARNING: ANDROID_NDK_ROOT is not set. Android NDK path must be set to compile package for aarch64") +endif +endif + +ifndef PACKAGE_NAME +export +PACKAGE_NAME := $(notdir $(shell pwd)) +$(info "INFO: No package name defined. Using current directory name: $(PACKAGE_NAME) as the package name") +endif + +WORK := build +SRC_DIR := src +OP_SRC_DIR := src/ops +OP_INCLUDE_DIR := ./include +OP_INCLUDES = #$(wildcard $(OP_INCLUDE_DIR)/*.h) user defined if any op specific headers are needed, add -I to common flags +LIBRARY_NAME := libQnn$(PACKAGE_NAME).so +SUPPORTED_TARGETS = x86_64-linux-clang hexagon-v68 hexagon-v69 hexagon-v73 hexagon-v75 hexagon-v79 aarch64-android + + +COMMON_CXX_FLAGS = -std=c++17 -I$(QNN_INCLUDE) -fPIC -Wall -Wreorder -Wno-missing-braces -Wno-unused-function +COMMON_CXX_FLAGS += -Werror -Wno-format -Wno-unused-command-line-argument -fvisibility=default -stdlib=libc++ +COMMON_CXX_FLAGS += -DQNN_API="__attribute__((visibility(\"default\")))" -D__QAIC_HEADER_EXPORT="__attribute__((visibility(\"default\")))" + +X86_LIBNATIVE_RELEASE_DIR := $(HEXAGON_SDK_ROOT_X86)/tools/HEXAGON_Tools/$(HEXAGON_TOOLS_VERSION_X86)/Tools + +# Ensure hexagon sdk tool version can be retrieved +ifeq ($(wildcard $(X86_LIBNATIVE_RELEASE_DIR)/.),) +$(error "Cannot retrieve hexagon tools from: $(X86_LIBNATIVE_RELEASE_DIR). \ + \ + Please check that hexagon tools version is correct. Expected: $(HEXAGON_TOOLS_VERSION_X86)") +endif + +#Check tools for hexagon_v68 are present. +ifeq ($(MAKECMDGOALS),htp_v68) +ifeq ($(wildcard $(HEXAGON_SDK_ROOT_V68)),) +$(error "ERROR: HEXAGON_SDK_ROOT_V68 is set incorrectly. Cannot retrieve $(HEXAGON_SDK_ROOT_V68)") +endif +endif + +ifeq ($(MAKECMDGOALS),htp_v69) +ifeq ($(wildcard $(HEXAGON_SDK_ROOT_V69)),) +$(error "ERROR: HEXAGON_SDK_ROOT_V69 is set incorrectly. Cannot retrieve $(HEXAGON_SDK_ROOT_V69)") +endif +endif + +ifeq ($(MAKECMDGOALS),htp_v73) +ifeq ($(wildcard $(HEXAGON_SDK_ROOT_V73)),) +$(error "ERROR: HEXAGON_SDK_ROOT_V73 is set incorrectly. Cannot retrieve $(HEXAGON_SDK_ROOT_V73)") +endif +endif + +ifeq ($(MAKECMDGOALS),htp_v75) +ifeq ($(wildcard $(HEXAGON_SDK_ROOT_V75)),) +$(error "ERROR: HEXAGON_SDK_ROOT_V75 is set incorrectly. Cannot retrieve $(HEXAGON_SDK_ROOT_V75)") +endif +endif + +#Check tools for hexagon_v79 are present. +ifeq ($(MAKECMDGOALS),htp_v79) +ifeq ($(wildcard $(HEXAGON_SDK_ROOT_V79)),) +$(error "ERROR: HEXAGON_SDK_ROOT_V79 is set incorrectly. Cannot retrieve $(HEXAGON_SDK_ROOT_V79)") +endif +endif + + + +endif +OP_SOURCES = $(wildcard $(OP_SRC_DIR)/*.cpp) +OTHER_SOURCES = $(wildcard $(SRC_DIR)/*.cpp) +HFILES = $(wildcard $(QNN_INCLUDE)/*.h) +HFILES += $(wildcard $(QNN_INCLUDE)/HTP/*.h) +HFILES += $(wildcard $(QNN_INCLUDE)/HTP/core/*.h) +OP_OBJS = $(patsubst $(SRC_DIR)/%,%,$(patsubst %.cpp,%.o,$(OP_SOURCES))) +OTHER_OBJS = $(patsubst $(SRC_DIR)/%,%,$(patsubst %.cpp,%.o,$(OTHER_SOURCES))) + +#======= Assembly ======== +OP_SOURCES_ASM_X86 += $(wildcard $(OP_SRC_DIR)/x86_asm/*.S) +OP_OBJS_ASM_X86 += $(subst /x86_asm/,/,$(patsubst $(SRC_DIR)/%,%,$(patsubst %.S,%.o,$(OP_SOURCES_ASM_X86)))) +OP_SOURCES_ASM_V68 += $(wildcard $(OP_SRC_DIR)/v68_asm/*.S) +OP_OBJS_ASM_V68 += $(subst /v68_asm/,/,$(patsubst $(SRC_DIR)/%,%,$(patsubst %.S,%.o,$(OP_SOURCES_ASM_V68)))) +OP_SOURCES_ASM_V69 += $(wildcard $(OP_SRC_DIR)/v69_asm/*.S) +OP_OBJS_ASM_V69 += $(subst /v69_asm/,/,$(patsubst $(SRC_DIR)/%,%,$(patsubst %.S,%.o,$(OP_SOURCES_ASM_V69)))) +OP_SOURCES_ASM_V73 += $(wildcard $(OP_SRC_DIR)/v73_asm/*.S) +OP_OBJS_ASM_V73 += $(subst /v73_asm/,/,$(patsubst $(SRC_DIR)/%,%,$(patsubst %.S,%.o,$(OP_SOURCES_ASM_V73)))) +OP_SOURCES_ASM_V75 += $(wildcard $(OP_SRC_DIR)/v75_asm/*.S) +OP_OBJS_ASM_V75 += $(subst /v75_asm/,/,$(patsubst $(SRC_DIR)/%,%,$(patsubst %.S,%.o,$(OP_SOURCES_ASM_V75)))) +OP_SOURCES_ASM_V79 += $(wildcard $(OP_SRC_DIR)/v79_asm/*.S) +OP_OBJS_ASM_V79 += $(subst /v79_asm/,/,$(patsubst $(SRC_DIR)/%,%,$(patsubst %.S,%.o,$(OP_SOURCES_ASM_V79)))) + +OP_SOURCES_ASM_ANDROID += $(wildcard $(OP_SRC_DIR)/android_asm/*.S) +OP_OBJS_ASM_ANDROID += $(subst /android_asm/,/,$(patsubst $(SRC_DIR)/%,%,$(patsubst %.S,%.o,$(OP_SOURCES_ASM_ANDROID)))) + + +all: htp_v73 htp_x86 htp_aarch64 + +#============================================================================================================ +# Setup compiler, compiler instructions and linker for x86 +X86_CXX ?= clang++-9 +# Checking if clang++-9 is present. If not switch to clang++ +ifeq ($(shell $(X86_CXX) -v 2>&1 | grep -c "clang version"), 0) + X86_CXX := clang++ +endif +X86_LDFLAGS:= -Wl,--whole-archive -L$(X86_LIBNATIVE_RELEASE_DIR)/libnative/lib -lnative -Wl,--no-whole-archive -lpthread +X86_C_FLAGS := -D__HVXDBL__ -I$(X86_LIBNATIVE_RELEASE_DIR)/libnative/include -ffast-math -DUSE_OS_LINUX +X86_CXX_FLAGS = $(COMMON_CXX_FLAGS) $(X86_C_FLAGS) -fomit-frame-pointer -Wno-invalid-offsetof +linux_objs = +#============================================================================================================ +# Setup compiler, compiler instructions and linker for hexagon +HEXAGON_CXX_FLAGS := $(COMMON_CXX_FLAGS) -mhvx -mhvx-length=128B -mhmx -DUSE_OS_QURT -O2 -Wno-reorder -DPREPARE_DISABLED + +HEXAGON_CXX_FLAGS_V68 := $(HEXAGON_CXX_FLAGS) -mv68 -I$(HEXAGON_SDK_ROOT_V68)/rtos/qurt/computev68/include/qurt -I$(HEXAGON_SDK_ROOT_V68)/rtos/qurt/computev68/include/posix -I$(HEXAGON_SDK_ROOT_V68)/incs -I$(HEXAGON_SDK_ROOT_V68)/incs/stddef +HEXAGON_CXX_FLAGS_V69 := $(HEXAGON_CXX_FLAGS) -mv69 -I$(HEXAGON_SDK_ROOT_V69)/rtos/qurt/computev69/include/qurt -I$(HEXAGON_SDK_ROOT_V69)/rtos/qurt/computev69/include/posix -I$(HEXAGON_SDK_ROOT_V69)/incs -I$(HEXAGON_SDK_ROOT_V69)/incs/stddef +HEXAGON_CXX_FLAGS_V73 := $(HEXAGON_CXX_FLAGS) -mv73 -I$(HEXAGON_SDK_ROOT_V73)/rtos/qurt/computev73/include/qurt -I$(HEXAGON_SDK_ROOT_V73)/rtos/qurt/computev73/include/posix -I$(HEXAGON_SDK_ROOT_V73)/incs -I$(HEXAGON_SDK_ROOT_V73)/incs/stddef +HEXAGON_CXX_FLAGS_V75 := $(HEXAGON_CXX_FLAGS) -mv75 -I$(HEXAGON_SDK_ROOT_V75)/rtos/qurt/computev75/include/qurt -I$(HEXAGON_SDK_ROOT_V75)/rtos/qurt/computev75/include/posix -I$(HEXAGON_SDK_ROOT_V75)/incs -I$(HEXAGON_SDK_ROOT_V75)/incs/stddef +HEXAGON_CXX_FLAGS_V79 := $(HEXAGON_CXX_FLAGS) -mv79 -I$(HEXAGON_SDK_ROOT_V79)/rtos/qurt/computev79/include/qurt -I$(HEXAGON_SDK_ROOT_V79)/rtos/qurt/computev79/include/posix -I$(HEXAGON_SDK_ROOT_V79)/incs -I$(HEXAGON_SDK_ROOT_V79)/incs/stddef + +HEXAGON_CXX_V68 := $(HEXAGON_SDK_ROOT_V68)/tools/HEXAGON_Tools/$(HEXAGON_TOOLS_VERSION_V68)/Tools/bin/hexagon-clang++ +HEXAGON_CXX_V69 := $(HEXAGON_SDK_ROOT_V69)/tools/HEXAGON_Tools/$(HEXAGON_TOOLS_VERSION_V69)/Tools/bin/hexagon-clang++ +HEXAGON_CXX_V73 := $(HEXAGON_SDK_ROOT_V73)/tools/HEXAGON_Tools/$(HEXAGON_TOOLS_VERSION_V73)/Tools/bin/hexagon-clang++ +HEXAGON_CXX_V75 := $(HEXAGON_SDK_ROOT_V75)/tools/HEXAGON_Tools/$(HEXAGON_TOOLS_VERSION_V75)/Tools/bin/hexagon-clang++ +HEXAGON_CXX_V79 := $(HEXAGON_SDK_ROOT_V79)/tools/HEXAGON_Tools/$(HEXAGON_TOOLS_VERSION_V79)/Tools/bin/hexagon-clang++ + + +HEX_LDFLAGS = +hexagon_objs = +#============================================================================================================ +# Setup compiler, compiler instructions and linker for aarch64 +AARCH64_C__FLAGS = -D__HVXDBL__ -I$(X86_LIBNATIVE_RELEASE_DIR)/libnative/include -ffast-math -DUSE_OS_LINUX -DANDROID +AARCH64_CXX_FLAGS = $(COMMON_CXX_FLAGS) $(AARCH64_C__FLAGS) -fomit-frame-pointer -Wno-invalid-offsetof -Wno-unused-variable -Wno-unused-parameter -Wno-missing-braces -Wno-sign-compare -Wno-unused-private-field -Wno-unused-variable -Wno-ignored-qualifiers -Wno-missing-field-initializers +ARM_CLANG_OPTS =--target=aarch64-none-linux-android21 --sysroot=$(ANDROID_NDK_ROOT)/toolchains/llvm/prebuilt/linux-x86_64/sysroot -stdlib=libc++ -static-libstdc++ +AARCH64_CXX = $(ANDROID_NDK_ROOT)/toolchains/llvm/prebuilt/linux-x86_64/bin/clang++ $(ARM_CLANG_OPTS) +AARCH64_LDFLAGS = -L$(QNN_TARGET_LIB) -lQnnHtp -lQnnHtpPrepare +aarch64_objs = +#============================================================================================================ +# Setup targets and goals + +htp_x86: X86_BUILD + +htp_v68: HEXAGON_BUILD_V68 + +htp_v69: HEXAGON_BUILD_V69 + +htp_v73: HEXAGON_BUILD_V73 + +htp_v75: HEXAGON_BUILD_V75 + +htp_v79: HEXAGON_BUILD_V79 + + + +htp_aarch64: AARCH64_BUILD + +AARCH64_BUILD: $(WORK)/aarch64-android/$(LIBRARY_NAME) + +HEXAGON_BUILD_V68: $(WORK)/hexagon-v68/$(LIBRARY_NAME) + +HEXAGON_BUILD_V69: $(WORK)/hexagon-v69/$(LIBRARY_NAME) + +HEXAGON_BUILD_V73: $(WORK)/hexagon-v73/$(LIBRARY_NAME) + +HEXAGON_BUILD_V75: $(WORK)/hexagon-v75/$(LIBRARY_NAME) + +HEXAGON_BUILD_V79: $(WORK)/hexagon-v79/$(LIBRARY_NAME) + + + +X86_BUILD: $(WORK)/x86_64-linux-clang/$(LIBRARY_NAME) + + +define build_objs = +ifneq ($(filter $(2),$(SUPPORTED_TARGETS)),) +$(2)_objs += $(foreach x,$(1),$(WORK)/$(2)/$(x)) +else +$$(error "Unknown target option provided: $(2): Supported targets are: $(SUPPORTED_TARGETS)") +endif +endef + +$(eval $(call build_objs,$(OTHER_OBJS),x86_64-linux-clang)) +$(eval $(call build_objs,$(OP_OBJS),x86_64-linux-clang)) +$(eval $(call build_objs,$(OP_OBJS_ASM_X86),x86_64-linux-clang)) +$(eval $(call build_objs,$(OTHER_OBJS),hexagon-v68)) +$(eval $(call build_objs,$(OP_OBJS),hexagon-v68)) +$(eval $(call build_objs,$(OP_OBJS_ASM_V68),hexagon-v68)) +$(eval $(call build_objs,$(OTHER_OBJS),hexagon-v69)) +$(eval $(call build_objs,$(OP_OBJS),hexagon-v69)) +$(eval $(call build_objs,$(OP_OBJS_ASM_V69),hexagon-v69)) +$(eval $(call build_objs,$(OTHER_OBJS),hexagon-v73)) +$(eval $(call build_objs,$(OP_OBJS),hexagon-v73)) +$(eval $(call build_objs,$(OP_OBJS_ASM_V73),hexagon-v73)) +$(eval $(call build_objs,$(OTHER_OBJS),hexagon-v75)) +$(eval $(call build_objs,$(OP_OBJS),hexagon-v75)) +$(eval $(call build_objs,$(OP_OBJS_ASM_V75),hexagon-v75)) +$(eval $(call build_objs,$(OTHER_OBJS),hexagon-v79)) +$(eval $(call build_objs,$(OP_OBJS),hexagon-v79)) +$(eval $(call build_objs,$(OP_OBJS_ASM_V75),hexagon-v79)) + +$(eval $(call build_objs,$(OTHER_OBJS),aarch64-android)) +$(eval $(call build_objs,$(OP_OBJS),aarch64-android)) +$(eval $(call build_objs,$(OP_OBJS_ASM_ANDROID),aarch64-android)) + +# x86 +$(WORK)/x86_64-linux-clang $(WORK)/hexagon-v68 $(WORK)/hexagon-v69 $(WORK)/hexagon-v73 $(WORK)/hexagon-v75 $(WORK)/hexagon-v79 $(WORK)/aarch64-android: + @mkdir -p $@/ops + +$(WORK)/x86_64-linux-clang/%.o: $(SRC_DIR)/%.cpp | $(WORK)/x86_64-linux-clang + $(X86_CXX) $(X86_CXX_FLAGS) -DTHIS_PKG_NAME=$(PACKAGE_NAME) -MMD -c $< -o $@ + +$(WORK)/x86_64-linux-clang/ops/%.o: $(OP_SRC_DIR)/%.cpp | $(WORK)/x86_64-linux-clang + $(X86_CXX) $(X86_CXX_FLAGS) -DTHIS_PKG_NAME=$(PACKAGE_NAME) -MMD -c $< -o $@ + +$(WORK)/x86_64-linux-clang/ops/%.o: $(OP_SRC_DIR)/x86_asm/%.S | $(WORK)/x86_64-linux-clang + $(X86_CXX) $(X86_CXX_FLAGS) -DTHIS_PKG_NAME=$(PACKAGE_NAME) -MMD -c $< -o $@ + +$(WORK)/x86_64-linux-clang/$(LIBRARY_NAME): $(x86_64-linux-clang_objs) | $(HFILES) + $(X86_CXX) -fPIC -std=c++17 -g -shared -o $@ $^ $(X86_LDFLAGS) + +# v68 +$(WORK)/hexagon-v68/%.o: $(SRC_DIR)/%.cpp | $(WORK)/hexagon-v68 + $(HEXAGON_CXX_V68) $(HEXAGON_CXX_FLAGS_V68) -DTHIS_PKG_NAME=$(PACKAGE_NAME) -MMD -c $< -o $@ + +$(WORK)/hexagon-v68/ops/%.o: $(OP_SRC_DIR)/%.cpp | $(WORK)/hexagon-v68 + $(HEXAGON_CXX_V68) $(HEXAGON_CXX_FLAGS_V68) -DTHIS_PKG_NAME=$(PACKAGE_NAME) -MMD -c $< -o $@ + +$(WORK)/hexagon-v68/ops/%.o: $(OP_SRC_DIR)/v68_asm/%.S | $(WORK)/hexagon-v68 + $(HEXAGON_CXX_V68) $(HEXAGON_CXX_FLAGS_V68) -DTHIS_PKG_NAME=$(PACKAGE_NAME) -MMD -c $< -o $@ + +$(WORK)/hexagon-v68/$(LIBRARY_NAME): $(hexagon-v68_objs) | $(HFILES) + $(HEXAGON_CXX_V68) -fPIC -std=c++17 -g -shared -o $@ $^ $(HEX_LDFLAGS) + +# v69 +$(WORK)/hexagon-v69/%.o: $(SRC_DIR)/%.cpp | $(WORK)/hexagon-v69 + $(HEXAGON_CXX_V69) $(HEXAGON_CXX_FLAGS_V69) -DTHIS_PKG_NAME=$(PACKAGE_NAME) -MMD -c $< -o $@ + +$(WORK)/hexagon-v69/ops/%.o: $(OP_SRC_DIR)/%.cpp | $(WORK)/hexagon-v69 + $(HEXAGON_CXX_V69) $(HEXAGON_CXX_FLAGS_V69) -DTHIS_PKG_NAME=$(PACKAGE_NAME) -MMD -c $< -o $@ + +$(WORK)/hexagon-v69/ops/%.o: $(OP_SRC_DIR)/v69_asm/%.S | $(WORK)/hexagon-v69 + $(HEXAGON_CXX_V69) $(HEXAGON_CXX_FLAGS_V69) -DTHIS_PKG_NAME=$(PACKAGE_NAME) -MMD -c $< -o $@ + +$(WORK)/hexagon-v69/$(LIBRARY_NAME): $(hexagon-v69_objs) | $(HFILES) + $(HEXAGON_CXX_V69) -fPIC -std=c++17 -g -shared -o $@ $^ $(HEX_LDFLAGS) + +# v73 +$(WORK)/hexagon-v73/%.o: $(SRC_DIR)/%.cpp | $(WORK)/hexagon-v73 + $(HEXAGON_CXX_V73) $(HEXAGON_CXX_FLAGS_V73) -DTHIS_PKG_NAME=$(PACKAGE_NAME) -MMD -c $< -o $@ + +$(WORK)/hexagon-v73/ops/%.o: $(OP_SRC_DIR)/%.cpp | $(WORK)/hexagon-v73 + $(HEXAGON_CXX_V73) $(HEXAGON_CXX_FLAGS_V73) -DTHIS_PKG_NAME=$(PACKAGE_NAME) -MMD -c $< -o $@ + +$(WORK)/hexagon-v73/ops/%.o: $(OP_SRC_DIR)/v73_asm/%.S | $(WORK)/hexagon-v73 + $(HEXAGON_CXX_V73) $(HEXAGON_CXX_FLAGS_V73) -DTHIS_PKG_NAME=$(PACKAGE_NAME) -MMD -c $< -o $@ + +$(WORK)/hexagon-v73/$(LIBRARY_NAME): $(hexagon-v73_objs) | $(HFILES) + $(HEXAGON_CXX_V73) -fPIC -std=c++17 -g -shared -o $@ $^ $(HEX_LDFLAGS) + +#v75 +$(WORK)/hexagon-v75/%.o: $(SRC_DIR)/%.cpp | $(WORK)/hexagon-v75 + $(HEXAGON_CXX_V75) $(HEXAGON_CXX_FLAGS_V75) -DTHIS_PKG_NAME=$(PACKAGE_NAME) -MMD -c $< -o $@ + +$(WORK)/hexagon-v75/ops/%.o: $(OP_SRC_DIR)/%.cpp | $(WORK)/hexagon-v75 + $(HEXAGON_CXX_V75) $(HEXAGON_CXX_FLAGS_V75) -DTHIS_PKG_NAME=$(PACKAGE_NAME) -MMD -c $< -o $@ + +$(WORK)/hexagon-v75/ops/%.o: $(OP_SRC_DIR)/v75_asm/%.S | $(WORK)/hexagon-v75 + $(HEXAGON_CXX_V75) $(HEXAGON_CXX_FLAGS_V75) -DTHIS_PKG_NAME=$(PACKAGE_NAME) -MMD -c $< -o $@ + +$(WORK)/hexagon-v75/$(LIBRARY_NAME): $(hexagon-v75_objs) | $(HFILES) + $(HEXAGON_CXX_V75) -fPIC -std=c++17 -g -shared -o $@ $^ $(HEX_LDFLAGS) + +#v79 +$(WORK)/hexagon-v79/%.o: $(SRC_DIR)/%.cpp | $(WORK)/hexagon-v79 + $(HEXAGON_CXX_V79) $(HEXAGON_CXX_FLAGS_V79) -DTHIS_PKG_NAME=$(PACKAGE_NAME) -MMD -c $< -o $@ + +$(WORK)/hexagon-v79/ops/%.o: $(OP_SRC_DIR)/%.cpp | $(WORK)/hexagon-v79 + $(HEXAGON_CXX_V79) $(HEXAGON_CXX_FLAGS_V79) -DTHIS_PKG_NAME=$(PACKAGE_NAME) -MMD -c $< -o $@ + +$(WORK)/hexagon-v79/ops/%.o: $(OP_SRC_DIR)/v79_asm/%.S | $(WORK)/hexagon-v79 + $(HEXAGON_CXX_V79) $(HEXAGON_CXX_FLAGS_V79) -DTHIS_PKG_NAME=$(PACKAGE_NAME) -MMD -c $< -o $@ + +$(WORK)/hexagon-v79/$(LIBRARY_NAME): $(hexagon-v79_objs) | $(HFILES) + $(HEXAGON_CXX_V79) -fPIC -std=c++17 -g -shared -o $@ $^ $(HEX_LDFLAGS) + + + +# aarch64 +$(WORK)/aarch64-android/%.o: $(SRC_DIR)/%.cpp | $(WORK)/aarch64-android + $(AARCH64_CXX) $(AARCH64_CXX_FLAGS) -DTHIS_PKG_NAME=$(PACKAGE_NAME) -MMD -c $< -o $@ + +$(WORK)/aarch64-android/ops/%.o: $(OP_SRC_DIR)/%.cpp | $(WORK)/aarch64-android + $(AARCH64_CXX) $(AARCH64_CXX_FLAGS) -DTHIS_PKG_NAME=$(PACKAGE_NAME) -MMD -c $< -o $@ + +$(WORK)/aarch64-android/ops/%.o: $(OP_SRC_DIR)/android_asm/%.S | $(WORK)/aarch64-android + $(AARCH64_CXX) $(AARCH64_CXX_FLAGS) -DTHIS_PKG_NAME=$(PACKAGE_NAME) -MMD -c $< -o $@ + +$(WORK)/aarch64-android/$(LIBRARY_NAME): $(aarch64-android_objs) | $(HFILES) + $(AARCH64_CXX) -fPIC -std=c++17 -g -shared -o $@ $^ $(AARCH64_LDFLAGS) + +clean: + -rm -rf $(WORK) + +.PHONY: all clean diff --git a/examples/qualcomm/oss_scripts/llama/custom_ops/embedding/config/EmbeddingOpPackageHtp.xml b/examples/qualcomm/oss_scripts/llama/custom_ops/embedding/config/EmbeddingOpPackageHtp.xml new file mode 100644 index 00000000000..c6a7696fb28 --- /dev/null +++ b/examples/qualcomm/oss_scripts/llama/custom_ops/embedding/config/EmbeddingOpPackageHtp.xml @@ -0,0 +1,90 @@ + + + + + + Embedding + + implmentation of torch.nn.Embedding + + + + + + input + + data table + + true + BACKEND_SPECIFIC + + 2D + a tensor of 2 dimension + + + + + indices + + indices to extract data + + true + QNN_DATATYPE_INT_32 + + ND + a tensor of N dimension + + + + + output + + output activation + + true + BACKEND_SPECIFIC + + ND + a tensor of N dimension + + + + + HTP + + + + + + + Embedding + + + + + Embedding + + + input + QNN_DATATYPE_SFIXED_POINT_8 + + + + output + QNN_DATATYPE_SFIXED_POINT_8 + + + + + diff --git a/examples/qualcomm/oss_scripts/llama/custom_ops/embedding/op.py b/examples/qualcomm/oss_scripts/llama/custom_ops/embedding/op.py new file mode 100644 index 00000000000..98ee5e6086e --- /dev/null +++ b/examples/qualcomm/oss_scripts/llama/custom_ops/embedding/op.py @@ -0,0 +1,66 @@ +# Copyright (c) Qualcomm Innovation Center, Inc. +# All rights reserved +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +import torch +from torch.library import impl, Library + +op_lib = Library("qaisw", "DEF") +op_lib.define("embedding(Tensor table, Tensor indices) -> Tensor") + +@impl(op_lib, "embedding", dispatch_key="CompositeExplicitAutograd") +def embedding_impl(table: torch.Tensor, indices: torch.Tensor) -> torch.Tensor: + return table[indices] + + +class CustomEmbedding(torch.nn.Module): + def __init__(self, weight): + super(CustomEmbedding, self).__init__() + self.weight = weight + + def forward(self, indices): + return torch.ops.qaisw.embedding.default(self.weight, indices) + + +def custom_embedding_annotation(gm: torch.fx.GraphModule) -> None: + import itertools + from executorch.backends.qualcomm.quantizer.annotators import ( + _is_annotated, + QUANT_ANNOTATION_KEY, + ) + from executorch.backends.qualcomm.quantizer.qconfig import ( + get_16a4w_qnn_ptq_config, + ) + from torch.ao.quantization.quantize_pt2e import QuantizationAnnotation, SharedQuantizationSpec + from torch.fx import Node + from torch.fx.passes.utils.source_matcher_utils import get_source_partitions + + custom_partitions = get_source_partitions(gm.graph, [torch.ops.qaisw.embedding.default]) + custom_partitions = list(itertools.chain(*custom_partitions.values())) + quantization_config = get_16a4w_qnn_ptq_config() + for custom_partition in custom_partitions: + if len(custom_partition.output_nodes) > 1: + raise ValueError("custom partition has more than one output node") + custom_node = custom_partition.output_nodes[0] + if ( + custom_node.op != "call_function" + or custom_node.target != torch.ops.qaisw.embedding.default + ): + raise ValueError(f"{custom_node} is not a custom operator") + # skip annotation if it is already annotated + if _is_annotated([custom_node]): + continue + + input_qspec_map = {} + input_act = custom_node.args[0] + assert isinstance(input_act, Node) + input_spec = quantization_config.weight + input_qspec_map[input_act] = input_spec + + custom_node.meta[QUANT_ANNOTATION_KEY] = QuantizationAnnotation( + input_qspec_map=input_qspec_map, + output_qspec=SharedQuantizationSpec((input_act, custom_node)), + _annotated=True, + ) diff --git a/examples/qualcomm/oss_scripts/llama/custom_ops/embedding/src/EmbeddingOpPackageInterface.cpp b/examples/qualcomm/oss_scripts/llama/custom_ops/embedding/src/EmbeddingOpPackageInterface.cpp new file mode 100644 index 00000000000..b9109c6b2f2 --- /dev/null +++ b/examples/qualcomm/oss_scripts/llama/custom_ops/embedding/src/EmbeddingOpPackageInterface.cpp @@ -0,0 +1,293 @@ +//============================================================================= +// +// Copyright (c) Qualcomm Innovation Center, Inc. +// All rights reserved +// +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. +// +//============================================================================ + +#include "HTP/QnnHtpCommon.h" +#include "HTP/core/constraints.h" +#include "HTP/core/op_package_feature_support.h" +#include "HTP/core/op_register_ext.h" +#include "HTP/core/optimize.h" +#include "HTP/core/simple_reg.h" +#include "HTP/core/unique_types.h" +#include "QnnOpPackage.h" +#include "QnnSdkBuildId.h" + +DEFINE_UNIQ_TY() +BEGIN_PKG_OPS_OPTS_LIST() + +/** Note that the order of declarations given here defines the order in which ops and graph optimizations are + * registered to the HTP Core. + * Append the latest OpName at the bottom + */ +DECLARE_PKG_OPS_OPTS_LIST(PKG_Embedding) + +END_PKG_OPS_OPTS_LIST() + +// op package info +static constexpr auto sg_packageName = THIS_PKG_NAME_STR; // package name passed in as compile flag + +static std::array sg_opNames{{"Embedding"}}; + +static Qnn_ApiVersion_t sg_sdkApiVersion = QNN_HTP_API_VERSION_INIT; +static QnnOpPackage_Info_t sg_packageInfo = QNN_OP_PACKAGE_INFO_INIT; + +// global data +static QnnOpPackage_GlobalInfrastructure_t sg_globalInfra = +nullptr; // global infrastructure not in use for now +static bool sg_packageInitialized = false; + +/* + * user provided logging call back function + * currently only supported on linux x86-64 and nonrpc versions + * typedef void (*QnnLog_Callback_t)(const char* fmt, + * QnnLog_Level_t level, + * uint64_t timestamp, + * va_list args); + * usage: if(sg_logInitialized && level <= sg_maxLogLevel) + * sg_logCallback(fmt, level, timestamp, args); + * + * for cross rpc versions, skel side user provided logging call back function + * can be defined as part of op packages. maximal log level sg_maxLogLevel + * can be set by Qnn_ErrorHandle_t EmbeddingOpPackageLogSetLevel(QnnLog_Level_t maxLogLevel) + */ +/* + * for alternative logging method provided by HTP core, please refer to log.h + */ +static QnnLog_Callback_t sg_logCallback = + nullptr; // user provided call back function pointer for logging +static QnnLog_Level_t sg_maxLogLevel = + (QnnLog_Level_t)0; // maximal log level used in user provided logging +static bool sg_logInitialized = + false; // tracks whether user provided logging method has been initialized + + +/* +* op initialization +* needs to be global in the package +* one initialization per package before any op definitions +* syntax: INIT_PACKAGE_OP_DEF() +*/ +INIT_PACKAGE_OP_DEF() + +/* +* optimization initialization +* needs to be global in the package +* one initialization per package before any optimization definitions +* syntax: INIT_PACKAGE_OPTIMIZATION_DEF() +*/ +INIT_PACKAGE_OPTIMIZATION_DEF() + +/* + * op parameter order initialization + * needs to be global in the package + * one initialization per package before any op parameter order definitions + * syntax: INIT_PACKAGE_PARAM_ORDER_DEF() + */ +INIT_PACKAGE_PARAM_ORDER_DEF() + +/* + * axis parameter name list + * optional + * needs to be global in the package + * one list per package + * for listing axis parameter names passed into Qnn_AddNode API + * HTP backend auto-adjusts values in axis parameters based on HTP backfilling + * note: HTP backend backfills tensor dimensions to 4 dimensions + * syntax: LIST_PACKAGE_AXIS_PARAMS(...) + * e.g. LIST_PACKAGE_AXIS_PARAMS("Axis", "AXIS", "axis") + */ +// LIST_PACKAGE_AXIS_PARAMS() + +/* + * per-channel quantized op name list + * optional + * needs to be global in the package + * one list per package + * for listing op names which support per-channel quantization + * per-axis quantization info of an op is embeded in axisScaleOffsetEncoding + * inside Qnn_Tensor_t types + * HTP backend only supports per-channel scale ops + * i.e. along last dimension, offset is always zero + * if an op name is marked as having per-channel scale support, and in + * QNN_AddNode, at least one input, parameter, or output has + * QNN_QUANTIZATION_ENCODING_AXIS_SCALE_OFFSET type: + * then: + * HTP backend will pass to op implementation function the following: + * output(s), input(s), parameter(s), + * outputPerChannelScale(s), inputPerChannelScale(s), paramPerChannelScale(s) + * + * optimization rules can be used to remove extra perChannelScale tensors + * + * syntax: LIST_PACKAGE_PER_CHANNEL_QUANTIZED_OPS(...) + * e.g. LIST_PACKAGE_PER_CHANNEL_QUANTIZED_OPS(sg_op1Name, sg_op2Name) + */ + +// LIST_PACKAGE_PER_CHANNEL_QUANTIZED_OPS() + +/* +* Declare and define the special intialize function for HTP Backend to load +*/ +INIT_PKG_CORE_INIT_FUNC() + +/* op package API's */ + +Qnn_ErrorHandle_t EmbeddingOpPackageInit(QnnOpPackage_GlobalInfrastructure_t infrastructure) { + if (sg_packageInitialized) return QNN_OP_PACKAGE_ERROR_LIBRARY_ALREADY_INITIALIZED; + + /* + * op parameter order registration + * registers all defined op parameter orders in the package + * syntax: REGISTER_PACKAGE_PARAM_ORDERS() + */ + REGISTER_PACKAGE_PARAM_ORDERS() + + /* + * op axis parameter name registration + * registers all axis parameter names in the package + * used with LIST_PACKAGE_AXIS_PARAMS(...) + * syntax: REGISTER_PACKAGE_AXIS_PARAMS() + */ + REGISTER_PACKAGE_AXIS_PARAMS() + + /* + * per-channel scale op name registration + * registers all per-channel scale op names in the package + * used with LIST_PACKAGE_PER_CHANNEL_QUANTIZED_OPS(...) + * syntax: REGISTER_PACKAGE_PER_CHANNEL_QUANTIZED_OPS() + */ + REGISTER_PACKAGE_PER_CHANNEL_QUANTIZED_OPS() + + sg_globalInfra = infrastructure; + sg_packageInitialized = true; + return QNN_SUCCESS; +} + +Qnn_ErrorHandle_t EmbeddingOpPackageGetInfo(const QnnOpPackage_Info_t** info) { + if (!sg_packageInitialized) return QNN_OP_PACKAGE_ERROR_LIBRARY_NOT_INITIALIZED; + if (!info) return QNN_OP_PACKAGE_ERROR_INVALID_INFO; + + sg_packageInfo = QNN_OP_PACKAGE_INFO_INIT; + sg_packageInfo.packageName = sg_packageName; + sg_packageInfo.operationNames = sg_opNames.data(); + sg_packageInfo.numOperations = sg_opNames.size(); + sg_packageInfo.sdkBuildId = QNN_SDK_BUILD_ID; + sg_packageInfo.sdkApiVersion = &sg_sdkApiVersion; + + *info = &sg_packageInfo; + return QNN_SUCCESS; +} + +Qnn_ErrorHandle_t EmbeddingOpPackageLogInitialize(QnnLog_Callback_t callback, QnnLog_Level_t maxLogLevel) { + if (sg_logInitialized) return QNN_OP_PACKAGE_ERROR_LIBRARY_ALREADY_INITIALIZED; + if (!callback) return QNN_LOG_ERROR_INVALID_ARGUMENT; + if (maxLogLevel < QNN_LOG_LEVEL_ERROR) return QNN_LOG_ERROR_INVALID_ARGUMENT; + sg_logCallback = callback; + sg_maxLogLevel = maxLogLevel; + sg_logInitialized = true; + return QNN_SUCCESS; +} + +Qnn_ErrorHandle_t EmbeddingOpPackageLogSetLevel(QnnLog_Level_t maxLogLevel) { + if (maxLogLevel < QNN_LOG_LEVEL_ERROR) return QNN_LOG_ERROR_INVALID_ARGUMENT; + sg_maxLogLevel = maxLogLevel; + return QNN_SUCCESS; +} + +Qnn_ErrorHandle_t EmbeddingOpPackageLogTerminate() { + if (!sg_logInitialized) return QNN_OP_PACKAGE_ERROR_LIBRARY_NOT_INITIALIZED; + sg_logCallback = nullptr; + sg_maxLogLevel = (QnnLog_Level_t)0; + sg_logInitialized = false; + return QNN_SUCCESS; +} + +Qnn_ErrorHandle_t EmbeddingOpPackageValidateOpConfig (Qnn_OpConfig_t opConfig){ + if (std::string(sg_packageName) != opConfig.v1.packageName) { + return QNN_OP_PACKAGE_ERROR_VALIDATION_FAILURE; + } + + /* auto-generated validation code below + * Check if op config type matches any registered ops + * If a match is found, check number of inputs, outputs and params + */ + if (std::string(opConfig.v1.typeName) == "Embedding"){ + // check function signature + if (opConfig.v1.numOfParams != 0 || opConfig.v1.numOfInputs != 2 || opConfig.v1.numOfOutputs != 1){ + return QNN_OP_PACKAGE_ERROR_VALIDATION_FAILURE; + } + // check implmentation constraints + if (opConfig.v1.inputTensors[0].v1.rank != 2 || + opConfig.v1.inputTensors[1].v1.rank > 3) { + return QNN_OP_PACKAGE_ERROR_VALIDATION_FAILURE; + } + // check IO tensor information + if (opConfig.v1.inputTensors[0].v1.type != QNN_TENSOR_TYPE_STATIC || + opConfig.v1.inputTensors[0].v1.dataType != QNN_DATATYPE_SFIXED_POINT_8 || + opConfig.v1.inputTensors[1].v1.dataType != QNN_DATATYPE_INT_32 || + opConfig.v1.inputTensors[0].v1.dataType != opConfig.v1.outputTensors[0].v1.dataType) { + return QNN_OP_PACKAGE_ERROR_VALIDATION_FAILURE; + } + } + else{ + return QNN_OP_PACKAGE_ERROR_VALIDATION_FAILURE; + } + + /* + * additional validation code here + * */ + + return QNN_SUCCESS; +} + +/* The following three functions in this comment are not called by HTP backend for now, + * no auto-generated implementations are created. Users should see example for full function signatures. + * (version 1.3.0) Qnn_ErrorHandle_t EmbeddingOpPackageCreateKernels (QnnOpPackage_GraphInfrastructure_t + * graphInfrastructure, QnnOpPackage_Node_t node, QnnOpPackage_Kernel_t** kernels, uint32_t* + * numKernels) + * (version 1.3.0) Qnn_ErrorHandle_t EmbeddingOpPackageFreeKernels (QnnOpPackage_Kernel_t* kernels) + * + * (version 1.4.0) Qnn_ErrorHandle_t EmbeddingOpPackageCreateOpImpl (QnnOpPackage_GraphInfrastructure_t + * graphInfrastructure, QnnOpPackage_Node_t node, QnnOpPackage_OpImpl_t* opImpl) + *(version 1.4.0) Qnn_ErrorHandle_t EmbeddingOpPackageFreeOpImpl (QnnOpPackage_OpImpl_t opImpl) + */ + +Qnn_ErrorHandle_t EmbeddingOpPackageTerminate() { +if (!sg_packageInitialized) return QNN_OP_PACKAGE_ERROR_LIBRARY_NOT_INITIALIZED; + +sg_globalInfra = nullptr; +sg_packageInitialized = false; +return QNN_SUCCESS; +} + +#ifdef __cplusplus +extern "C" { +#endif + + +/* latest version */ +Qnn_ErrorHandle_t EmbeddingOpPackageInterfaceProvider(QnnOpPackage_Interface_t* interface) { + if (!interface) return QNN_OP_PACKAGE_ERROR_INVALID_ARGUMENT; + interface->interfaceVersion = {1, 4, 0}; + interface->v1_4.init = EmbeddingOpPackageInit; + interface->v1_4.terminate = EmbeddingOpPackageTerminate; + interface->v1_4.getInfo = EmbeddingOpPackageGetInfo; + interface->v1_4.validateOpConfig = EmbeddingOpPackageValidateOpConfig; + interface->v1_4.createOpImpl = nullptr; + interface->v1_4.freeOpImpl = nullptr; + interface->v1_4.logInitialize = EmbeddingOpPackageLogInitialize; + interface->v1_4.logSetLevel = EmbeddingOpPackageLogSetLevel; + interface->v1_4.logTerminate = EmbeddingOpPackageLogTerminate; + return QNN_SUCCESS; +} + +#ifdef __cplusplus +} +#endif + + diff --git a/examples/qualcomm/oss_scripts/llama/custom_ops/embedding/src/ops/Embedding.cpp b/examples/qualcomm/oss_scripts/llama/custom_ops/embedding/src/ops/Embedding.cpp new file mode 100644 index 00000000000..ee45664d094 --- /dev/null +++ b/examples/qualcomm/oss_scripts/llama/custom_ops/embedding/src/ops/Embedding.cpp @@ -0,0 +1,111 @@ +//============================================================================= +// +// Copyright (c) Qualcomm Innovation Center, Inc. +// All rights reserved +// +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. +// +//============================================================================ + +#include "HTP/core/constraints.h" +#include "HTP/core/op_package_feature_support.h" +#include "HTP/core/op_register_ext.h" +#include "HTP/core/optimize.h" +#include "QnnOpPackage.h" +#include "HTP/core/simple_reg.h" + +BEGIN_PKG_OP_DEFINITION(PKG_Embedding); + +// op execute function declarations +template +int embeddingImpl(TensorType &output, const TensorType1 &input, const Int32Tensor_TCM &indices); + +/* + * method for defining op with specified cost value (one of GLACIAL, SNAIL, FAST, FREE) + * and provided flags + * syntax: DEF_PACKAGE_OP_AND_COST_AND_FLAGS(F,OP,COST,...) + * can use zero or more flags, FLAG options are IS_CONST, INHIBIT_CONST_PROP, + * RESOURCE_HVX, RESOURCE_HMX(not supported in external op packages) + */ +DEF_PACKAGE_OP( + (embeddingImpl), + "Embedding" +) + +DEF_TENSOR_PROPERTIES( + Op("Embedding", "input", "indices"), + Flat("input", "indices") +) + +/* + * optimization definitions + * need to be global in the package + * one definition per optimization + * syntax: DEF_PACKAGE_OPTIMIZATION(PRIORITY,MATCHCODE,CONSTRAINTCODE,REPLACECODE) + * PRIORITY predefined values include EARLY(2000), MIDDLE(3000), LATE(4000) + * HTP core provides some replacement functions for op package to use + * for more information about optimization rules, please refer to HTP core documentations + */ +DEF_PACKAGE_OPTIMIZATION( + PRE_TRANSLATE, + Op( + "Embedding", + "table", + LET("InputOp", + Op(FROM_DEFAULT_PACKAGE("*Input"), "indices", "original", "effective") + ) + ), + AND( + IS_INT32("InputOp"), + IS_SHAPE_1x1x1xd("InputOp"), + EQ(DIM_HEIGHT("table"), 1), + EQ(DIM_BATCHES("table"), 1), + EQ(DIM_WIDTH("*"), DIM_DEPTH("InputOp")), + EQ(DIM_DEPTH("*"), DIM_DEPTH("table")), + SAME_SHAPE("original", "effective"), + SAME_SHAPE("original", "InputOp"), + NE(RANK_OF("InputOp"), 5), + NE(RANK_OF("*"), 5), + IS_QUINT8("table") + ), + Op( + FROM_DEFAULT_PACKAGE("*InputGather2DDMAQuant"), + "table", + "indices", + gen_Shape(1, 1, DIM_DEPTH("InputOp"), 1), + gen_Shape(1, 1, DIM_DEPTH("InputOp"), 1), + gen_Shape(0, 0, 0, 0), + gen_ShapeOf("*"), + gen_Shape(0, 0, 0, 0) + ) +) + +/* execute functions for ops */ +template +int embeddingImpl(TensorType &output, const TensorType1 &input, const Int32Tensor_TCM &indices) { + // NOTE: this implementation is not intended to be used + // op should be replaced with HTP optimized version + // input dim: (1, 1, n, d) + // indices dim: (1, m, k, l) + // output dim: (m, k, l, d) + size_t out_dims[4] = {indices.dim(1), indices.dim(2), indices.dim(3), input.dim(3)}; + output.set_dims(out_dims); + uint8_t* out_ptr = (uint8_t *)output.get_raw_addr(0, 0, 0, 0); + const uint8_t* in_base = (const uint8_t*)input.get_raw_addr(0, 0, 0, 0); + const int32_t* indices_base = (const int32_t*)indices.get_raw_addr(0, 0, 0, 0); + size_t indices_len = 1, input_stride = input.dim(3); + for (int i = 0; i < 4; ++i) { + indices_len *= indices.dim(i); + } + for (int i = 0; i < indices_len; ++i) { + memcpy(out_ptr, in_base + indices_base[i]*input_stride, input_stride); + out_ptr += input_stride; + } + return GraphStatus::Success; +} + +/* At the bottom of the op file, call END_PKG_OP_DEFINITION(), + where is as BEGIN_PKG_OP_DEFINITION +*/ +END_PKG_OP_DEFINITION(PKG_Embedding); diff --git a/examples/qualcomm/oss_scripts/llama/llama.py b/examples/qualcomm/oss_scripts/llama/llama.py index 0829d99d57a..abe27a357b9 100755 --- a/examples/qualcomm/oss_scripts/llama/llama.py +++ b/examples/qualcomm/oss_scripts/llama/llama.py @@ -25,13 +25,21 @@ from executorch.backends.qualcomm.partition.qnn_partitioner import QnnPartitioner from executorch.backends.qualcomm.quantizer.custom_annotation import ( + annotate_linear_16a4w_in_affine_layer, annotate_linear_16a8w_in_affine_layer, annotate_matmul_16a8w, annotate_prefill_kv_output, ) from executorch.backends.qualcomm.quantizer.quantizer import QuantDtype -from executorch.backends.qualcomm.serialization.qc_schema import QcomChipset +from executorch.backends.qualcomm.serialization.qc_schema import ( + HtpArch, + QcomChipset, + QnnExecuTorchOpPackageInfo, + QnnExecuTorchOpPackageOptions, + QnnExecuTorchOpPackagePlatform, + QnnExecuTorchOpPackageTarget, +) from executorch.backends.qualcomm.serialization.qc_schema_serialize import ( flatbuffer_to_option, @@ -49,6 +57,7 @@ generate_multi_graph_program, generate_qnn_executorch_compiler_spec, get_capture_program_passes, + get_soc_to_arch_map, get_soc_to_chipset_map, update_spill_fill_size, ) @@ -62,6 +71,10 @@ LlamaModel, ModelArgs, ) +from executorch.examples.qualcomm.oss_scripts.llama.custom_ops.embedding.op import ( + CustomEmbedding, + custom_embedding_annotation, +) from executorch.examples.qualcomm.utils import ( make_output_dir, make_quantizer, @@ -414,6 +427,7 @@ def lowering_modules( passes_job=OrderedDict(), shared_buffer=False, verbose=False, + op_package_options=None, ): executorch_config = ExecutorchBackendConfig( # For shared buffer, user must pass the memory address @@ -435,6 +449,7 @@ def lowering_modules( soc_model=soc_model, backend_options=backend_options, shared_buffer=shared_buffer, + op_package_options=op_package_options, ) skip_node_op_set = {"llama.fallback.default"} partitioner = QnnPartitioner( @@ -480,6 +495,74 @@ def get_quant_attrs(self): return self.quant_attrs +def prepare_op_package( + workspace: str, op_package_dir: str, arch: HtpArch, build_op_package: bool +): + if build_op_package: + cur_env = os.environ.copy() + cur_env["PACKAGE_NAME"] = "Embedding" + def _run(cmd, cwd=None): + subprocess.run(cmd, stdout=sys.stdout, cwd=cwd, check=True, env=cur_env) + + _run(["rm", "-rf", "build"], cwd=op_package_dir) + _run(["make", "htp_x86", "htp_aarch64", f"htp_v{arch}"], cwd=op_package_dir) + _run( + [ + "cp", + f"{op_package_dir}/build/hexagon-v{arch}/libQnnEmbedding.so", + f"{op_package_dir}/build/hexagon-v{arch}/libQnnEmbedding_HTP.so", + ] + ) + + op_package_paths = [ + f"{op_package_dir}/build/hexagon-v{arch}/libQnnEmbedding_HTP.so", + f"{op_package_dir}/build/aarch64-android/libQnnEmbedding.so", + ] + + op_package_infos_HTP = QnnExecuTorchOpPackageInfo() + op_package_infos_HTP.interface_provider = "EmbeddingOpPackageInterfaceProvider" + op_package_infos_HTP.op_package_name = "Embedding" + op_package_infos_HTP.op_package_path = f"{workspace}/libQnnEmbedding_HTP.so" + op_package_infos_HTP.target = QnnExecuTorchOpPackageTarget.HTP + op_package_infos_HTP.custom_op_name = "qaisw.embedding.default" + op_package_infos_HTP.qnn_op_type_name = "Embedding" + op_package_infos_HTP.platform = QnnExecuTorchOpPackagePlatform.AARCH64_ANDROID + + op_package_infos_aarch64_CPU = QnnExecuTorchOpPackageInfo() + op_package_infos_aarch64_CPU.interface_provider = ( + "ExampleOpPackageInterfaceProvider" + ) + op_package_infos_aarch64_CPU.op_package_name = "Embedding" + op_package_infos_aarch64_CPU.op_package_path = ( + f"{workspace}/libQnnEmbedding.so" + ) + op_package_infos_aarch64_CPU.target = QnnExecuTorchOpPackageTarget.CPU + op_package_infos_aarch64_CPU.custom_op_name = "qaisw.embedding.default" + op_package_infos_aarch64_CPU.qnn_op_type_name = "Embedding" + op_package_infos_aarch64_CPU.platform = ( + QnnExecuTorchOpPackagePlatform.AARCH64_ANDROID + ) + + op_package_infos_x86_CPU = QnnExecuTorchOpPackageInfo() + op_package_infos_x86_CPU.interface_provider = "EmbeddingOpPackageInterfaceProvider" + op_package_infos_x86_CPU.op_package_name = "Embedding" + op_package_infos_x86_CPU.op_package_path = ( + f"{op_package_dir}/build/x86_64-linux-clang/libQnnEmbedding.so" + ) + op_package_infos_x86_CPU.target = QnnExecuTorchOpPackageTarget.CPU + op_package_infos_x86_CPU.custom_op_name = "qaisw.embedding.default" + op_package_infos_x86_CPU.qnn_op_type_name = "Embedding" + op_package_infos_x86_CPU.platform = QnnExecuTorchOpPackagePlatform.X86_64 + op_package_options = QnnExecuTorchOpPackageOptions() + op_package_options.op_package_infos = [ + op_package_infos_x86_CPU, + op_package_infos_aarch64_CPU, + op_package_infos_HTP, + ] + + return op_package_options, op_package_paths + + def compile(args, pte_filename, tokenizer): os.makedirs(args.artifact, exist_ok=True) start_ts = time.time() @@ -555,6 +638,15 @@ def compile(args, pte_filename, tokenizer): if getattr(layer.feed_forward, "prepare_feedfoward_conv", None): layer.feed_forward.prepare_feedfoward_conv() + op_package_options, op_package_paths = None, None + if args.use_custom_embedding: + op_package_options, op_package_paths = prepare_op_package( + f"/data/local/tmp/{getpass.getuser()}/executorch/single_llama", + f"{os.path.dirname(os.path.abspath(__file__))}/custom_ops/embedding", + get_soc_to_arch_map()[args.model], + True, + ) + use_fp16 = True fixed_point_type = {"kv_type": torch.float32, "io_type": torch.float32} if args.ptq: @@ -589,6 +681,11 @@ def compile(args, pte_filename, tokenizer): passes_job[ConstantI64toI32][QCOM_PASS_ARGS_KWARGS_DEFAULTS_KEY][ "skip_node" ] = {"tokens"} + elif args.use_custom_embedding: + llama_instance_list[i].tok_embeddings = CustomEmbedding( + llama_instance_list[i].tok_embeddings.weight + ) + llama_instance_list[i] = convert_linear_to_conv2d(llama_instance_list[i]) llama_instance_list[i] = SingleLlama( llama_instance_list[i].eval(), pte_filename @@ -597,7 +694,12 @@ def compile(args, pte_filename, tokenizer): if args.ptq: start_quantize_ts = time.time() custom_annotations = (annotate_matmul_16a8w,) - if args.llama_model == "stories110m": + if args.use_custom_embedding: + custom_annotations = custom_annotations + ( + custom_embedding_annotation, + annotate_linear_16a4w_in_affine_layer, + ) + elif args.llama_model == "stories110m": custom_annotations = custom_annotations + ( annotate_linear_16a8w_in_affine_layer, ) @@ -640,6 +742,7 @@ def compile(args, pte_filename, tokenizer): num_sharding=args.num_sharding, passes_job=passes_job, shared_buffer=args.shared_buffer, + op_package_options=op_package_options, ) quant_attrs = llama_instance_list[0].get_quant_attrs() elif args.model_mode == "hybrid": @@ -682,14 +785,16 @@ def compile(args, pte_filename, tokenizer): multiple_graphs=True, weight_sharing=not args.enable_x86_64, # x86 emulator does not support weight sharing graph_name=graph_name, + op_package_options=op_package_options, ) for graph_name in graph_names ] skip_node_op_set = {"llama.fallback.default"} + partitioner = QnnPartitioner(compiler_specs[i], skip_node_op_set=skip_node_op_set) exported_programs = [ to_backend( edge_prog.exported_program, - QnnPartitioner(compiler_specs[i], skip_node_op_set=skip_node_op_set), + partitioner, ) for i, edge_prog in enumerate(edge_progs) ] @@ -802,10 +907,16 @@ def compile(args, pte_filename, tokenizer): end_lowering_ts = time.time() logging.info(f"Time for compiling: {end_lowering_ts - start_lowering_ts}") - return quant_attrs + return quant_attrs, op_package_paths -def inference(args, quant_attrs, pte_filename, runtime_tokenizer_path, pre_gen_pte=""): +def inference( + args, + quant_attrs, + pte_filename, + runtime_tokenizer_path, + pre_gen_pte="", + op_package_paths=None): workspace = f"/data/local/tmp/{getpass.getuser()}/executorch/single_llama" if args.model_mode == "kv": @@ -902,7 +1013,10 @@ def post_process(): runner=f"examples/qualcomm/oss_scripts/llama/qnn_llama_runner", ) # No pregen inputs, input_list is not required - adb.push(inputs=[], input_list="", files=[runtime_tokenizer_path]) + files = [runtime_tokenizer_path] + if op_package_paths is not None: + files.extend(op_package_paths) + adb.push(inputs=[], input_list="", files=files) adb.execute(custom_runner_cmd=runner_cmd) adb.pull(output_path=args.artifact, callback=post_process) @@ -1060,6 +1174,12 @@ def _build_parser(): help="Fallback to cpu embedding operator and type of embedding quantization, ',', e.g., '4,32'.", ) + parser.add_argument( + "--use_custom_embedding", + action="store_true", + help="flag to use custom 4-bit per-tensor embedding layer", + ) + parser.add_argument("-v", "--verbose", action="store_true") return parser @@ -1115,7 +1235,7 @@ def export_llama(args) -> None: exit(f"Finish the running pre_gen_pte from {args.pre_gen_pte}") if args.compile_only: - quant_attrs = compile(args, pte_filename, tokenizer) + quant_attrs, _ = compile(args, pte_filename, tokenizer) if quant_attrs: json.dump( { @@ -1141,7 +1261,7 @@ def export_llama(args) -> None: exit(f"Finish compile_only and save to {args.artifact}") try: - quant_attrs = compile(args, pte_filename, tokenizer) + quant_attrs, op_package_paths = compile(args, pte_filename, tokenizer) if quant_attrs: logging.info( f"Logit scale: {quant_attrs['scale']}; Logit offset: {quant_attrs['zero_point']}" @@ -1155,7 +1275,9 @@ def export_llama(args) -> None: ) else: logging.warning("Quant attributes of the logit is None.") - inference(args, quant_attrs, pte_filename, runtime_tokenizer_path) + inference( + args, quant_attrs, pte_filename, runtime_tokenizer_path, op_package_paths=op_package_paths + ) except Exception as e: if args.ip and args.port != -1: with Client((args.ip, args.port)) as conn: