Skip to content

[ExecuTorch] Remove xnn_executor_runner #9292

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 3 commits into
base: gh/swolchok/368/base
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
12 changes: 6 additions & 6 deletions .ci/scripts/test_model.sh
Original file line number Diff line number Diff line change
Expand Up @@ -115,8 +115,8 @@ test_model() {
run_portable_executor_runner
}

build_cmake_xnn_executor_runner() {
echo "Building xnn_executor_runner"
build_cmake_executor_runner() {
echo "Building executor_runner"

(rm -rf ${CMAKE_OUTPUT_DIR} \
&& mkdir ${CMAKE_OUTPUT_DIR} \
Expand Down Expand Up @@ -152,12 +152,12 @@ test_model_with_xnnpack() {

# Run test model
if [[ "${BUILD_TOOL}" == "buck2" ]]; then
buck2 run //examples/xnnpack:xnn_executor_runner -- --model_path "${OUTPUT_MODEL_PATH}"
buck2 run //examples/portable/executor_runner:executor_runner_opt -- --model_path "${OUTPUT_MODEL_PATH}"
elif [[ "${BUILD_TOOL}" == "cmake" ]]; then
if [[ ! -f ${CMAKE_OUTPUT_DIR}/backends/xnnpack/xnn_executor_runner ]]; then
build_cmake_xnn_executor_runner
if [[ ! -f ${CMAKE_OUTPUT_DIR}/executor_runner ]]; then
build_cmake_executor_runner
fi
./${CMAKE_OUTPUT_DIR}/backends/xnnpack/xnn_executor_runner --model_path "${OUTPUT_MODEL_PATH}"
./${CMAKE_OUTPUT_DIR}/executor_runner --model_path "${OUTPUT_MODEL_PATH}"
else
echo "Invalid build tool ${BUILD_TOOL}. Only buck2 and cmake are supported atm"
exit 1
Expand Down
40 changes: 0 additions & 40 deletions backends/xnnpack/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -136,46 +136,6 @@ target_include_directories(
target_compile_options(xnnpack_backend PUBLIC ${_common_compile_options})
target_link_options_shared_lib(xnnpack_backend)

if(EXECUTORCH_BUILD_KERNELS_OPTIMIZED)
list(APPEND xnn_executor_runner_libs optimized_native_cpu_ops_lib)
else()
list(APPEND xnn_executor_runner_libs portable_ops_lib)
endif()

if(EXECUTORCH_BUILD_KERNELS_CUSTOM)
list(APPEND xnn_executor_runner_libs $<LINK_LIBRARY:WHOLE_ARCHIVE,custom_ops>)
endif()

list(APPEND xnn_executor_runner_libs xnnpack_backend executorch)

# ios can only build library but not binary
if(NOT CMAKE_TOOLCHAIN_FILE MATCHES ".*(iOS|ios\.toolchain)\.cmake$")
#
# xnn_executor_runner: Like executor_runner but with XNNPACK, the binary will
# be at ${CMAKE_BINARY_DIR}/backends/xnnpack
#
list(TRANSFORM _xnn_executor_runner__srcs PREPEND "${EXECUTORCH_ROOT}/")
add_executable(xnn_executor_runner ${_xnn_executor_runner__srcs})

if(EXECUTORCH_ENABLE_EVENT_TRACER)
if(EXECUTORCH_BUILD_DEVTOOLS)
list(APPEND xnn_executor_runner_libs etdump)
else()
message(
SEND_ERROR
"Use of 'EXECUTORCH_ENABLE_EVENT_TRACER' requires 'EXECUTORCH_BUILD_DEVTOOLS' to be enabled."
)
endif()
endif()

target_link_libraries(xnn_executor_runner gflags ${xnn_executor_runner_libs})
target_compile_options(xnn_executor_runner PUBLIC ${_common_compile_options})
if(EXECUTORCH_BUILD_PTHREADPOOL)
target_link_libraries(xnn_executor_runner extension_threadpool pthreadpool)
target_compile_definitions(xnn_executor_runner PRIVATE ET_USE_THREADPOOL)
endif()
endif()

install(
TARGETS xnnpack_backend
DESTINATION lib
Expand Down
6 changes: 3 additions & 3 deletions backends/xnnpack/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -92,7 +92,7 @@ After lowering to the XNNPACK Program, we can then prepare it for executorch and


### Running the XNNPACK Model with CMake
After exporting the XNNPACK Delegated model, we can now try running it with example inputs using CMake. We can build and use the xnn_executor_runner, which is a sample wrapper for the ExecuTorch Runtime and XNNPACK Backend. We first begin by configuring the CMake build like such:
After exporting the XNNPACK Delegated model, we can now try running it with example inputs using CMake. We can build and use the `executor_runner`, which is a sample wrapper for the ExecuTorch Runtime and XNNPACK Backend. We first begin by configuring the CMake build like such:
```bash
# cd to the root of executorch repo
cd executorch
Expand All @@ -119,9 +119,9 @@ Then you can build the runtime componenets with
cmake --build cmake-out -j9 --target install --config Release
```

Now you should be able to find the executable built at `./cmake-out/backends/xnnpack/xnn_executor_runner` you can run the executable with the model you generated as such
Now you should be able to find the executable built at `./cmake-out/executor_runner` you can run the executable with the model you generated as such
```bash
./cmake-out/backends/xnnpack/xnn_executor_runner --model_path=./mv2_xnnpack_fp32.pte
./cmake-out/executor_runner --model_path=./mv2_xnnpack_fp32.pte
```

## Help & Improvements
Expand Down
18 changes: 0 additions & 18 deletions build/cmake_deps.toml
Original file line number Diff line number Diff line change
Expand Up @@ -353,24 +353,6 @@ filters = [
# ---------------------------------- MPS end ----------------------------------
# ---------------------------------- XNNPACK start ----------------------------------

[targets.xnn_executor_runner]
buck_targets = [
"//examples/xnnpack:xnn_executor_runner",
]
filters = [
".cpp$",
]
excludes = [
"^codegen",
]
deps = [
"executorch",
"executorch_core",
"extension_threadpool",
"xnnpack_backend",
"portable_kernels",
]

[targets.xnnpack_backend]
buck_targets = [
"//backends/xnnpack:xnnpack_backend",
Expand Down
2 changes: 1 addition & 1 deletion docs/source/backend-delegates-xnnpack-reference.md
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,7 @@ Since weight packing creates an extra copy of the weights inside XNNPACK, We fre
When executing the XNNPACK subgraphs, we prepare the tensor inputs and outputs and feed them to the XNNPACK runtime graph. After executing the runtime graph, the output pointers are filled with the computed tensors.

#### **Profiling**
We have enabled basic profiling for the XNNPACK delegate that can be enabled with the compiler flag `-DEXECUTORCH_ENABLE_EVENT_TRACER` (add `-DENABLE_XNNPACK_PROFILING` for additional details). With ExecuTorch's Developer Tools integration, you can also now use the Developer Tools to profile the model. You can follow the steps in [Using the ExecuTorch Developer Tools to Profile a Model](./tutorials/devtools-integration-tutorial) on how to profile ExecuTorch models and use Developer Tools' Inspector API to view XNNPACK's internal profiling information. An example implementation is available in the `xnn_executor_runner` (see [tutorial here](tutorial-xnnpack-delegate-lowering.md#profiling)).
We have enabled basic profiling for the XNNPACK delegate that can be enabled with the compiler flag `-DEXECUTORCH_ENABLE_EVENT_TRACER` (add `-DENABLE_XNNPACK_PROFILING` for additional details). With ExecuTorch's Developer Tools integration, you can also now use the Developer Tools to profile the model. You can follow the steps in [Using the ExecuTorch Developer Tools to Profile a Model](./tutorials/devtools-integration-tutorial) on how to profile ExecuTorch models and use Developer Tools' Inspector API to view XNNPACK's internal profiling information. An example implementation is available in the `executor_runner` (see [tutorial here](tutorial-xnnpack-delegate-lowering.md#profiling)).


[comment]: <> (TODO: Refactor quantizer to a more official quantization doc)
Expand Down
10 changes: 5 additions & 5 deletions docs/source/tutorial-xnnpack-delegate-lowering.md
Original file line number Diff line number Diff line change
Expand Up @@ -141,7 +141,7 @@ Note in the example above,
The generated model file will be named `[model_name]_xnnpack_[qs8/fp32].pte` depending on the arguments supplied.

## Running the XNNPACK Model with CMake
After exporting the XNNPACK Delegated model, we can now try running it with example inputs using CMake. We can build and use the xnn_executor_runner, which is a sample wrapper for the ExecuTorch Runtime and XNNPACK Backend. We first begin by configuring the CMake build like such:
After exporting the XNNPACK Delegated model, we can now try running it with example inputs using CMake. We can build and use the `executor_runner`, which is a sample wrapper for the ExecuTorch Runtime and XNNPACK Backend. We first begin by configuring the CMake build like such:
```bash
# cd to the root of executorch repo
cd executorch
Expand All @@ -168,15 +168,15 @@ Then you can build the runtime componenets with
cmake --build cmake-out -j9 --target install --config Release
```

Now you should be able to find the executable built at `./cmake-out/backends/xnnpack/xnn_executor_runner` you can run the executable with the model you generated as such
Now you should be able to find the executable built at `./cmake-out/executor_runner` you can run the executable with the model you generated as such
```bash
./cmake-out/backends/xnnpack/xnn_executor_runner --model_path=./mv2_xnnpack_fp32.pte
./cmake-out/executor_runner --model_path=./mv2_xnnpack_fp32.pte
# or to run the quantized variant
./cmake-out/backends/xnnpack/xnn_executor_runner --model_path=./mv2_xnnpack_q8.pte
./cmake-out/executor_runner --model_path=./mv2_xnnpack_q8.pte
```

## Building and Linking with the XNNPACK Backend
You can build the XNNPACK backend [CMake target](https://github.com/pytorch/executorch/blob/main/backends/xnnpack/CMakeLists.txt#L83), and link it with your application binary such as an Android or iOS application. For more information on this you may take a look at this [resource](demo-apps-android.md) next.

## Profiling
To enable profiling in the `xnn_executor_runner` pass the flags `-DEXECUTORCH_ENABLE_EVENT_TRACER=ON` and `-DEXECUTORCH_BUILD_DEVTOOLS=ON` to the build command (add `-DENABLE_XNNPACK_PROFILING=ON` for additional details). This will enable ETDump generation when running the inference and enables command line flags for profiling (see `xnn_executor_runner --help` for details).
To enable profiling in the `executor_runner` pass the flags `-DEXECUTORCH_ENABLE_EVENT_TRACER=ON` and `-DEXECUTORCH_BUILD_DEVTOOLS=ON` to the build command (add `-DENABLE_XNNPACK_PROFILING=ON` for additional details). This will enable ETDump generation when running the inference and enables command line flags for profiling (see `executor_runner --help` for details).
10 changes: 5 additions & 5 deletions examples/xnnpack/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ The following command will produce a floating-point XNNPACK delegated model `mv2
python3 -m examples.xnnpack.aot_compiler --model_name="mv2" --delegate
```

Once we have the model binary (pte) file, then let's run it with ExecuTorch runtime using the `xnn_executor_runner`. With cmake, you first configure your cmake with the following:
Once we have the model binary (pte) file, then let's run it with ExecuTorch runtime using the `executor_runner`. With cmake, you first configure your cmake with the following:

```bash
# cd to the root of executorch repo
Expand Down Expand Up @@ -56,7 +56,7 @@ cmake --build cmake-out -j9 --target install --config Release
Now finally you should be able to run this model with the following command

```bash
./cmake-out/backends/xnnpack/xnn_executor_runner --model_path ./mv2_xnnpack_fp32.pte
./cmake-out/executor_runner --model_path ./mv2_xnnpack_fp32.pte
```

## Quantization
Expand All @@ -80,7 +80,7 @@ python3 -m examples.xnnpack.quantization.example --help
```

## Running the XNNPACK Model with CMake
After exporting the XNNPACK Delegated model, we can now try running it with example inputs using CMake. We can build and use the xnn_executor_runner, which is a sample wrapper for the ExecuTorch Runtime and XNNPACK Backend. We first begin by configuring the CMake build like such:
After exporting the XNNPACK Delegated model, we can now try running it with example inputs using CMake. We can build and use the `executor_runner`, which is a sample wrapper for the ExecuTorch Runtime and XNNPACK Backend. We first begin by configuring the CMake build like such:
```bash
# cd to the root of executorch repo
cd executorch
Expand All @@ -107,9 +107,9 @@ Then you can build the runtime componenets with
cmake --build cmake-out -j9 --target install --config Release
```

Now you should be able to find the executable built at `./cmake-out/backends/xnnpack/xnn_executor_runner` you can run the executable with the model you generated as such
Now you should be able to find the executable built at `./cmake-out/executor_runner` you can run the executable with the model you generated as such
```bash
./cmake-out/backends/xnnpack/xnn_executor_runner --model_path=./mv2_quantized.pte
./cmake-out/executor_runner --model_path=./mv2_quantized.pte
```

## Delegating a Quantized Model
Expand Down
8 changes: 0 additions & 8 deletions examples/xnnpack/executor_runner/TARGETS

This file was deleted.

20 changes: 0 additions & 20 deletions examples/xnnpack/executor_runner/targets.bzl

This file was deleted.

12 changes: 0 additions & 12 deletions examples/xnnpack/targets.bzl
Original file line number Diff line number Diff line change
Expand Up @@ -49,15 +49,3 @@ def define_common_targets():
"@EXECUTORCH_CLIENTS",
],
)

# executor_runner for XNNPACK Backend and portable kernels.
runtime.cxx_binary(
name = "xnn_executor_runner",
deps = [
"//executorch/examples/portable/executor_runner:executor_runner_lib",
"//executorch/backends/xnnpack:xnnpack_backend",
"//executorch/kernels/portable:generated_lib",
],
define_static_target = True,
**get_oss_build_kwargs()
)
Loading