diff --git a/.ci/scripts/test_model.sh b/.ci/scripts/test_model.sh index 38c45dc3fb7..28029b33e7e 100755 --- a/.ci/scripts/test_model.sh +++ b/.ci/scripts/test_model.sh @@ -111,8 +111,8 @@ test_model() { run_portable_executor_runner } -build_cmake_xnn_executor_runner() { - echo "Building xnn_executor_runner" +build_cmake_executor_runner() { + echo "Building executor_runner" (rm -rf ${CMAKE_OUTPUT_DIR} \ && mkdir ${CMAKE_OUTPUT_DIR} \ @@ -148,12 +148,12 @@ test_model_with_xnnpack() { # Run test model if [[ "${BUILD_TOOL}" == "buck2" ]]; then - buck2 run //examples/xnnpack:xnn_executor_runner -- --model_path "${OUTPUT_MODEL_PATH}" + buck2 run //examples/portable/executor_runner:executor_runner_opt -- --model_path "${OUTPUT_MODEL_PATH}" elif [[ "${BUILD_TOOL}" == "cmake" ]]; then - if [[ ! -f ${CMAKE_OUTPUT_DIR}/backends/xnnpack/xnn_executor_runner ]]; then - build_cmake_xnn_executor_runner + if [[ ! -f ${CMAKE_OUTPUT_DIR}/executor_runner ]]; then + build_cmake_executor_runner fi - ./${CMAKE_OUTPUT_DIR}/backends/xnnpack/xnn_executor_runner --model_path "${OUTPUT_MODEL_PATH}" + ./${CMAKE_OUTPUT_DIR}/executor_runner --model_path "${OUTPUT_MODEL_PATH}" else echo "Invalid build tool ${BUILD_TOOL}. Only buck2 and cmake are supported atm" exit 1 diff --git a/backends/xnnpack/CMakeLists.txt b/backends/xnnpack/CMakeLists.txt index 670f1fba6df..a2efc9cdad7 100644 --- a/backends/xnnpack/CMakeLists.txt +++ b/backends/xnnpack/CMakeLists.txt @@ -132,43 +132,6 @@ target_include_directories( target_compile_options(xnnpack_backend PUBLIC ${_common_compile_options}) target_link_options_shared_lib(xnnpack_backend) -if(EXECUTORCH_BUILD_KERNELS_OPTIMIZED) - list(APPEND xnn_executor_runner_libs optimized_native_cpu_ops_lib) -else() - list(APPEND xnn_executor_runner_libs portable_ops_lib) -endif() - -if(EXECUTORCH_BUILD_KERNELS_CUSTOM) - list(APPEND xnn_executor_runner_libs $) -endif() - -if(EXECUTORCH_BUILD_KERNELS_QUANTIZED) - list(APPEND xnn_executor_runner_libs quantized_ops_lib) -endif() - -list(APPEND xnn_executor_runner_libs xnnpack_backend executorch) - -# ios can only build library but not binary -if(NOT CMAKE_TOOLCHAIN_FILE MATCHES ".*(iOS|ios\.toolchain)\.cmake$") - # - # xnn_executor_runner: Like executor_runner but with XNNPACK, the binary will - # be at ${CMAKE_BINARY_DIR}/backends/xnnpack - # - list(TRANSFORM _xnn_executor_runner__srcs PREPEND "${EXECUTORCH_ROOT}/") - add_executable(xnn_executor_runner ${_xnn_executor_runner__srcs}) - - if(EXECUTORCH_ENABLE_EVENT_TRACER) - list(APPEND xnn_executor_runner_libs etdump) - endif() - - target_link_libraries(xnn_executor_runner gflags ${xnn_executor_runner_libs}) - target_compile_options(xnn_executor_runner PUBLIC ${_common_compile_options}) - if(EXECUTORCH_BUILD_PTHREADPOOL) - target_link_libraries(xnn_executor_runner extension_threadpool pthreadpool) - target_compile_definitions(xnn_executor_runner PRIVATE ET_USE_THREADPOOL) - endif() -endif() - install( TARGETS xnnpack_backend DESTINATION lib diff --git a/backends/xnnpack/README.md b/backends/xnnpack/README.md index 2328f8e4b90..2de674069eb 100644 --- a/backends/xnnpack/README.md +++ b/backends/xnnpack/README.md @@ -92,7 +92,7 @@ After lowering to the XNNPACK Program, we can then prepare it for executorch and ### Running the XNNPACK Model with CMake -After exporting the XNNPACK Delegated model, we can now try running it with example inputs using CMake. We can build and use the xnn_executor_runner, which is a sample wrapper for the ExecuTorch Runtime and XNNPACK Backend. We first begin by configuring the CMake build like such: +After exporting the XNNPACK Delegated model, we can now try running it with example inputs using CMake. We can build and use the `executor_runner`, which is a sample wrapper for the ExecuTorch Runtime and XNNPACK Backend. We first begin by configuring the CMake build like such: ```bash # cd to the root of executorch repo cd executorch @@ -119,9 +119,9 @@ Then you can build the runtime componenets with cmake --build cmake-out -j9 --target install --config Release ``` -Now you should be able to find the executable built at `./cmake-out/backends/xnnpack/xnn_executor_runner` you can run the executable with the model you generated as such +Now you should be able to find the executable built at `./cmake-out/executor_runner` you can run the executable with the model you generated as such ```bash -./cmake-out/backends/xnnpack/xnn_executor_runner --model_path=./mv2_xnnpack_fp32.pte +./cmake-out/executor_runner --model_path=./mv2_xnnpack_fp32.pte ``` ## Help & Improvements diff --git a/docs/source/backend-delegates-xnnpack-reference.md b/docs/source/backend-delegates-xnnpack-reference.md index 8fe346680d4..d38c5af60fa 100644 --- a/docs/source/backend-delegates-xnnpack-reference.md +++ b/docs/source/backend-delegates-xnnpack-reference.md @@ -70,7 +70,7 @@ Since weight packing creates an extra copy of the weights inside XNNPACK, We fre When executing the XNNPACK subgraphs, we prepare the tensor inputs and outputs and feed them to the XNNPACK runtime graph. After executing the runtime graph, the output pointers are filled with the computed tensors. #### **Profiling** -We have enabled basic profiling for the XNNPACK delegate that can be enabled with the compiler flag `-DEXECUTORCH_ENABLE_EVENT_TRACER` (add `-DENABLE_XNNPACK_PROFILING` for additional details). With ExecuTorch's Developer Tools integration, you can also now use the Developer Tools to profile the model. You can follow the steps in [Using the ExecuTorch Developer Tools to Profile a Model](https://pytorch.org/executorch/main/tutorials/devtools-integration-tutorial) on how to profile ExecuTorch models and use Developer Tools' Inspector API to view XNNPACK's internal profiling information. An example implementation is available in the `xnn_executor_runner` (see [tutorial here](tutorial-xnnpack-delegate-lowering.md#profiling)). +We have enabled basic profiling for the XNNPACK delegate that can be enabled with the compiler flag `-DEXECUTORCH_ENABLE_EVENT_TRACER` (add `-DENABLE_XNNPACK_PROFILING` for additional details). With ExecuTorch's Developer Tools integration, you can also now use the Developer Tools to profile the model. You can follow the steps in [Using the ExecuTorch Developer Tools to Profile a Model](https://pytorch.org/executorch/main/tutorials/devtools-integration-tutorial) on how to profile ExecuTorch models and use Developer Tools' Inspector API to view XNNPACK's internal profiling information. An example implementation is available in the `executor_runner` (see [tutorial here](tutorial-xnnpack-delegate-lowering.md#profiling)). [comment]: <> (TODO: Refactor quantizer to a more official quantization doc) diff --git a/docs/source/tutorial-xnnpack-delegate-lowering.md b/docs/source/tutorial-xnnpack-delegate-lowering.md index add60a12deb..5117065b6fd 100644 --- a/docs/source/tutorial-xnnpack-delegate-lowering.md +++ b/docs/source/tutorial-xnnpack-delegate-lowering.md @@ -141,7 +141,7 @@ Note in the example above, The generated model file will be named `[model_name]_xnnpack_[qs8/fp32].pte` depending on the arguments supplied. ## Running the XNNPACK Model with CMake -After exporting the XNNPACK Delegated model, we can now try running it with example inputs using CMake. We can build and use the xnn_executor_runner, which is a sample wrapper for the ExecuTorch Runtime and XNNPACK Backend. We first begin by configuring the CMake build like such: +After exporting the XNNPACK Delegated model, we can now try running it with example inputs using CMake. We can build and use the `executor_runner`, which is a sample wrapper for the ExecuTorch Runtime and XNNPACK Backend. We first begin by configuring the CMake build like such: ```bash # cd to the root of executorch repo cd executorch @@ -168,15 +168,15 @@ Then you can build the runtime componenets with cmake --build cmake-out -j9 --target install --config Release ``` -Now you should be able to find the executable built at `./cmake-out/backends/xnnpack/xnn_executor_runner` you can run the executable with the model you generated as such +Now you should be able to find the executable built at `./cmake-out/executor_runner` you can run the executable with the model you generated as such ```bash -./cmake-out/backends/xnnpack/xnn_executor_runner --model_path=./mv2_xnnpack_fp32.pte +./cmake-out/executor_runner --model_path=./mv2_xnnpack_fp32.pte # or to run the quantized variant -./cmake-out/backends/xnnpack/xnn_executor_runner --model_path=./mv2_xnnpack_q8.pte +./cmake-out/executor_runner --model_path=./mv2_xnnpack_q8.pte ``` ## Building and Linking with the XNNPACK Backend You can build the XNNPACK backend [CMake target](https://github.com/pytorch/executorch/blob/main/backends/xnnpack/CMakeLists.txt#L83), and link it with your application binary such as an Android or iOS application. For more information on this you may take a look at this [resource](using-executorch-android.md) next. ## Profiling -To enable profiling in the `xnn_executor_runner` pass the flags `-DEXECUTORCH_ENABLE_EVENT_TRACER=ON` and `-DEXECUTORCH_BUILD_DEVTOOLS=ON` to the build command (add `-DENABLE_XNNPACK_PROFILING=ON` for additional details). This will enable ETDump generation when running the inference and enables command line flags for profiling (see `xnn_executor_runner --help` for details). +To enable profiling in the `executor_runner` pass the flags `-DEXECUTORCH_ENABLE_EVENT_TRACER=ON` and `-DEXECUTORCH_BUILD_DEVTOOLS=ON` to the build command (add `-DENABLE_XNNPACK_PROFILING=ON` for additional details). This will enable ETDump generation when running the inference and enables command line flags for profiling (see `executor_runner --help` for details). diff --git a/examples/xnnpack/README.md b/examples/xnnpack/README.md index 5c307d34717..4dc51b3a6bb 100644 --- a/examples/xnnpack/README.md +++ b/examples/xnnpack/README.md @@ -24,7 +24,7 @@ The following command will produce a floating-point XNNPACK delegated model `mv2 python3 -m examples.xnnpack.aot_compiler --model_name="mv2" --delegate ``` -Once we have the model binary (pte) file, then let's run it with ExecuTorch runtime using the `xnn_executor_runner`. With cmake, you first configure your cmake with the following: +Once we have the model binary (pte) file, then let's run it with ExecuTorch runtime using the `executor_runner`. With cmake, you first configure your cmake with the following: ```bash # cd to the root of executorch repo @@ -56,7 +56,7 @@ cmake --build cmake-out -j9 --target install --config Release Now finally you should be able to run this model with the following command ```bash -./cmake-out/backends/xnnpack/xnn_executor_runner --model_path ./mv2_xnnpack_fp32.pte +./cmake-out/executor_runner --model_path ./mv2_xnnpack_fp32.pte ``` ## Quantization @@ -80,7 +80,7 @@ python3 -m examples.xnnpack.quantization.example --help ``` ## Running the XNNPACK Model with CMake -After exporting the XNNPACK Delegated model, we can now try running it with example inputs using CMake. We can build and use the xnn_executor_runner, which is a sample wrapper for the ExecuTorch Runtime and XNNPACK Backend. We first begin by configuring the CMake build like such: +After exporting the XNNPACK Delegated model, we can now try running it with example inputs using CMake. We can build and use the `executor_runner`, which is a sample wrapper for the ExecuTorch Runtime and XNNPACK Backend. We first begin by configuring the CMake build like such: ```bash # cd to the root of executorch repo cd executorch @@ -107,9 +107,9 @@ Then you can build the runtime componenets with cmake --build cmake-out -j9 --target install --config Release ``` -Now you should be able to find the executable built at `./cmake-out/backends/xnnpack/xnn_executor_runner` you can run the executable with the model you generated as such +Now you should be able to find the executable built at `./cmake-out/executor_runner` you can run the executable with the model you generated as such ```bash -./cmake-out/backends/xnnpack/xnn_executor_runner --model_path=./mv2_quantized.pte +./cmake-out/executor_runner --model_path=./mv2_quantized.pte ``` ## Delegating a Quantized Model diff --git a/examples/xnnpack/executor_runner/TARGETS b/examples/xnnpack/executor_runner/TARGETS deleted file mode 100644 index 2341af9282f..00000000000 --- a/examples/xnnpack/executor_runner/TARGETS +++ /dev/null @@ -1,8 +0,0 @@ -# Any targets that should be shared between fbcode and xplat must be defined in -# targets.bzl. This file can contain fbcode-only targets. - -load(":targets.bzl", "define_common_targets") - -oncall("executorch") - -define_common_targets() diff --git a/examples/xnnpack/executor_runner/targets.bzl b/examples/xnnpack/executor_runner/targets.bzl deleted file mode 100644 index f9c333d5b47..00000000000 --- a/examples/xnnpack/executor_runner/targets.bzl +++ /dev/null @@ -1,20 +0,0 @@ -load("@fbsource//xplat/executorch/build:runtime_wrapper.bzl", "get_oss_build_kwargs", "runtime") - -def define_common_targets(): - """Defines targets that should be shared between fbcode and xplat. - - The directory containing this targets.bzl file should also contain both - TARGETS and BUCK files that call this function. - """ - - # executor_runner for XNNPACK Backend and portable kernels. - runtime.cxx_binary( - name = "xnn_executor_runner", - deps = [ - "//executorch/examples/portable/executor_runner:executor_runner_lib", - "//executorch/backends/xnnpack:xnnpack_backend", - "//executorch/kernels/portable:generated_lib", - ], - define_static_target = True, - **get_oss_build_kwargs() - ) diff --git a/examples/xnnpack/targets.bzl b/examples/xnnpack/targets.bzl index ce9575e8cca..c1e8d7946f1 100644 --- a/examples/xnnpack/targets.bzl +++ b/examples/xnnpack/targets.bzl @@ -49,15 +49,3 @@ def define_common_targets(): "@EXECUTORCH_CLIENTS", ], ) - - # executor_runner for XNNPACK Backend and portable kernels. - runtime.cxx_binary( - name = "xnn_executor_runner", - deps = [ - "//executorch/examples/portable/executor_runner:executor_runner_lib", - "//executorch/backends/xnnpack:xnnpack_backend", - "//executorch/kernels/portable:generated_lib", - ], - define_static_target = True, - **get_oss_build_kwargs() - ) diff --git a/tools/cmake/cmake_deps.toml b/tools/cmake/cmake_deps.toml index 6f12c9d4413..50f1e83ba7d 100644 --- a/tools/cmake/cmake_deps.toml +++ b/tools/cmake/cmake_deps.toml @@ -390,26 +390,6 @@ filters = [ # ---------------------------------- MPS end ---------------------------------- # ---------------------------------- XNNPACK start ---------------------------------- -[targets.xnn_executor_runner] -buck_targets = [ - "//examples/xnnpack:xnn_executor_runner", -] -filters = [ - ".cpp$", -] -excludes = [ - "^codegen", -] -deps = [ - "executorch", - "executorch_core", - "extension_threadpool", - "kernels_util_all_deps", - "xnnpack_backend", - "portable_kernels", - "etdump_flatcc", -] - [targets.xnnpack_backend] buck_targets = [ "//backends/xnnpack:xnnpack_backend",