diff --git a/.github/workflows/backend-ncnn.yml b/.github/workflows/backend-ncnn.yml index e1584f826f..4a068ea720 100644 --- a/.github/workflows/backend-ncnn.yml +++ b/.github/workflows/backend-ncnn.yml @@ -58,6 +58,7 @@ jobs: echo $(pwd) ln -s build/bin/mmdeploy_onnx2ncnn ./ python .github/scripts/test_onnx2ncnn.py --run 1 + build_ncnn: runs-on: ubuntu-20.04 strategy: diff --git a/.github/workflows/backend-pplnn.yml b/.github/workflows/backend-pplnn.yml index 660e0cf8ae..399bd499b0 100644 --- a/.github/workflows/backend-pplnn.yml +++ b/.github/workflows/backend-pplnn.yml @@ -42,11 +42,22 @@ jobs: -Dpplnn_DIR=${pplnn_DIR} ls build/lib - name: Install mmdeploy with pplnn + id: badge_status run: | rm -rf .eggs && python3 -m pip install -e . export LD_LIBRARY_PATH="/root/workspace/mmdeploy/build/lib:${LD_LIBRARY_PATH}" python3 tools/check_env.py python3 -c 'import mmdeploy.apis.pplnn as pplnn_api; assert pplnn_api.is_available()' + - name: create badge + if: always() + uses: RubbaBoy/BYOB@v1.2.1 + with: + NAME: build_pplnn_cuda + LABEL: 'build' + STATUS: ${{ steps.badge_status.conclusion == 'success' && 'passing' || 'failing' }} + COLOR: ${{ steps.badge_status.conclusion == 'success' && 'green' || 'red' }} + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + build_pplnn_cpu: runs-on: ubuntu-20.04 @@ -56,8 +67,18 @@ jobs: with: submodules: 'recursive' - name: Install mmdeploy with pplnn + id: badge_status run: | python -m pip install torch==1.8.2 torchvision==0.9.2 --extra-index-url https://download.pytorch.org/whl/lts/1.8/cpu python -m pip install mmcv-lite protobuf==3.20.2 python tools/scripts/build_ubuntu_x64_pplnn.py 8 python -c 'import mmdeploy.apis.pplnn as pplnn_api; assert pplnn_api.is_available()' + - name: create badge + if: always() + uses: RubbaBoy/BYOB@v1.2.1 + with: + NAME: build_pplnn_cpu + LABEL: 'build' + STATUS: ${{ steps.badge_status.conclusion == 'success' && 'passing' || 'failing' }} + COLOR: ${{ steps.badge_status.conclusion == 'success' && 'blue' || 'red' }} + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 4801f0839e..2c477fe8cd 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -64,10 +64,20 @@ jobs: coverage xml coverage report -m - name: Run mmyolo deploy unittests + id: badge_status run: | python -m pip install xdoctest cd /home/runner/work/mmyolo pytest tests/test_deploy + - name: create badge + if: always() + uses: RubbaBoy/BYOB@v1.2.1 + with: + NAME: build_cpu_model_convert + LABEL: 'build' + STATUS: ${{ steps.badge_status.conclusion == 'success' && 'passing' || 'failing' }} + COLOR: ${{ steps.badge_status.conclusion == 'success' && 'green' || 'red' }} + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} build_cpu_sdk: runs-on: ubuntu-20.04 @@ -85,6 +95,7 @@ jobs: sudo apt install -y ffmpeg libsm6 libxext6 git ninja-build libglib2.0-0 libxrender-dev libc++1-9 libc++abi1-9 sudo apt install libopencv-dev lcov wget - name: Build and run SDK unit test without backend + id: badge_status run: | mkdir -p build && pushd build cmake .. -DCMAKE_CXX_COMPILER=g++ -DMMDEPLOY_CODEBASES=all -DMMDEPLOY_BUILD_SDK=ON -DMMDEPLOY_BUILD_SDK_PYTHON_API=OFF -DMMDEPLOY_TARGET_DEVICES=cpu -DMMDEPLOY_COVERAGE=ON -DMMDEPLOY_BUILD_TEST=ON @@ -95,6 +106,15 @@ jobs: lcov --capture --directory . --output-file coverage.info ls -lah coverage.info cp coverage.info ../ + - name: create badge + if: always() + uses: RubbaBoy/BYOB@v1.2.1 + with: + NAME: build_cpu_sdk + LABEL: 'build' + STATUS: ${{ steps.badge_status.conclusion == 'success' && 'passing' || 'failing' }} + COLOR: ${{ steps.badge_status.conclusion == 'success' && 'green' || 'red' }} + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} cross_build_aarch64: runs-on: ubuntu-20.04 @@ -110,8 +130,18 @@ jobs: with: python-version: 3.8 - name: gcc-multilib + id: badge_status run: | sh -ex tools/scripts/ubuntu_cross_build_aarch64.sh + - name: create badge + if: always() + uses: RubbaBoy/BYOB@v1.2.1 + with: + NAME: cross_build_aarch64 + LABEL: 'build' + STATUS: ${{ steps.badge_status.conclusion == 'success' && 'passing' || 'failing' }} + COLOR: ${{ steps.badge_status.conclusion == 'success' && 'green' || 'red' }} + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} build_cuda102: runs-on: ubuntu-20.04 @@ -155,10 +185,20 @@ jobs: rm -rf .eggs && python -m pip install -e . python tools/check_env.py - name: Run unittests and generate coverage report + id: badge_status run: | coverage run --branch --source mmdeploy -m pytest -rsE tests coverage xml coverage report -m + - name: create badge + if: always() + uses: RubbaBoy/BYOB@v1.2.1 + with: + NAME: build_cuda102 + LABEL: 'build' + STATUS: ${{ steps.badge_status.conclusion == 'success' && 'passing' || 'failing' }} + COLOR: ${{ steps.badge_status.conclusion == 'success' && 'green' || 'red' }} + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} build_cuda113: runs-on: ubuntu-20.04 @@ -205,6 +245,7 @@ jobs: coverage xml coverage report -m - name: Upload coverage to Codecov + id: badge_status uses: codecov/codecov-action@v2 with: file: ./coverage.xml,./coverage.info @@ -212,6 +253,15 @@ jobs: env_vars: OS,PYTHON,CPLUS name: codecov-umbrella fail_ci_if_error: false + - name: create badge + if: always() + uses: RubbaBoy/BYOB@v1.2.1 + with: + NAME: build_cuda113 + LABEL: 'build' + STATUS: ${{ steps.badge_status.conclusion == 'success' && 'passing' || 'failing' }} + COLOR: ${{ steps.badge_status.conclusion == 'success' && 'green' || 'red' }} + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} build_cuda113_linux: runs-on: [self-hosted, linux-3090] @@ -251,10 +301,20 @@ jobs: export LD_LIBRARY_PATH="/root/workspace/mmdeploy/build/lib:${LD_LIBRARY_PATH}" python3 tools/check_env.py - name: Test TensorRT pipeline + id: badge_status run: | export LD_LIBRARY_PATH="/root/workspace/mmdeploy/build/lib:${LD_LIBRARY_PATH}" export LD_LIBRARY_PATH="/root/workspace/mmdeploy/mmdeploy/lib:${LD_LIBRARY_PATH}" bash .github/scripts/linux/test_full_pipeline.sh trt cuda + - name: create badge + if: always() + uses: RubbaBoy/BYOB@v1.2.1 + with: + NAME: build_cuda113_linux + LABEL: 'build' + STATUS: ${{ steps.badge_status.conclusion == 'success' && 'passing' || 'failing' }} + COLOR: ${{ steps.badge_status.conclusion == 'success' && 'green' || 'red' }} + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} build_cuda113_windows: runs-on: [self-hosted, win10-3080] @@ -316,3 +376,17 @@ jobs: conda activate $pwd\tmp_env $env:path = "$pwd\build\bin\Release;" + $env:path .github\scripts\windows\test_full_pipeline.ps1 -Backend trt -Device cuda + + badge_build_cuda113_windows: + needs: build_cuda113_windows + if: always() + runs-on: ubuntu-20.04 + steps: + - name: create badge + uses: RubbaBoy/BYOB@v1.2.1 + with: + NAME: build_cuda113_windows + LABEL: 'build' + STATUS: ${{ needs.build_cuda113_windows.result == 'success' && 'passing' || needs.build_cuda113_windows.result }} + COLOR: ${{ needs.build_cuda113_windows.result == 'success' && 'green' || 'red' }} + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/README.md b/README.md index c305282d3f..584103d484 100644 --- a/README.md +++ b/README.md @@ -83,18 +83,188 @@ The supported Device-Platform-InferenceBackend matrix is presented as following, The benchmark can be found from [here](docs/en/03-benchmark/benchmark.md) -| Device / Platform | Linux | Windows | macOS | Android | -| ----------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | -| x86_64 CPU | [![](https://img.shields.io/github/actions/workflow/status/open-mmlab/mmdeploy/backend-ort.yml)](https://github.com/open-mmlab/mmdeploy/actions/workflows/backend-ort.yml)ONNXRuntime
[![](https://img.shields.io/github/actions/workflow/status/open-mmlab/mmdeploy/backend-pplnn.yml)](https://github.com/open-mmlab/mmdeploy/actions/workflows/backend-pplnn.yml)pplnn
[![](https://img.shields.io/github/actions/workflow/status/open-mmlab/mmdeploy/backend-ncnn.yml)](https://github.com/open-mmlab/mmdeploy/actions/workflows/backend-ncnn.yml)ncnn
[![](https://img.shields.io/github/actions/workflow/status/open-mmlab/mmdeploy/backend-torchscript.yml)](https://github.com/open-mmlab/mmdeploy/actions/workflows/backend-torchscript.yml)LibTorch
![](https://img.shields.io/badge/build-no%20status-lightgrey)OpenVINO
![](https://img.shields.io/badge/build-no%20status-lightgrey)TVM
| [![](https://img.shields.io/github/actions/workflow/status/open-mmlab/mmdeploy/build.yml)](https://github.com/open-mmlab/mmdeploy/actions/workflows/build.yml)ONNXRuntime
![](https://img.shields.io/badge/build-no%20status-lightgrey)OpenVINO
[![](https://img.shields.io/github/actions/workflow/status/open-mmlab/mmdeploy/build.yml)](https://github.com/open-mmlab/mmdeploy/actions/workflows/build.yml)ncnn
| - | - | -| ARM CPU | [![](https://img.shields.io/github/actions/workflow/status/open-mmlab/mmdeploy/build.yml)](https://github.com/open-mmlab/mmdeploy/actions/workflows/build.yml)ncnn
| - | - | [![](https://img.shields.io/github/actions/workflow/status/open-mmlab/mmdeploy/backend-rknn.yml)](https://github.com/open-mmlab/mmdeploy/actions/workflows/backend-rknn.yml)ncnn
| -| RISC-V | [![](https://img.shields.io/github/actions/workflow/status/open-mmlab/mmdeploy/linux-riscv64-gcc.yml)](https://github.com/open-mmlab/mmdeploy/actions/workflows/linux-riscv64-gcc.yml)ncnn
| - | - | - | -| NVIDIA GPU | [![](https://img.shields.io/github/actions/workflow/status/open-mmlab/mmdeploy/build.yml)](https://github.com/open-mmlab/mmdeploy/actions/workflows/build.yml)ONNXRuntime
[![](https://img.shields.io/github/actions/workflow/status/open-mmlab/mmdeploy/build.yml)](https://github.com/open-mmlab/mmdeploy/actions/workflows/build.yml)TensorRT
![](https://img.shields.io/badge/build-no%20status-lightgrey)LibTorch
[![](https://img.shields.io/github/actions/workflow/status/open-mmlab/mmdeploy/backend-pplnn.yml)](https://github.com/open-mmlab/mmdeploy/actions/workflows/backend-pplnn.yml)pplnn
| [![](https://img.shields.io/github/actions/workflow/status/open-mmlab/mmdeploy/build.yml)](https://github.com/open-mmlab/mmdeploy/actions/workflows/build.yml)ONNXRuntime
[![](https://img.shields.io/github/actions/workflow/status/open-mmlab/mmdeploy/build.yml)](https://github.com/open-mmlab/mmdeploy/actions/workflows/build.yml)TensorRT
| - | - | -| NVIDIA Jetson | ![](https://img.shields.io/badge/build-no%20status-lightgrey)TensorRT
| - | - | - | -| Huawei ascend310 | [![](https://img.shields.io/github/actions/workflow/status/open-mmlab/mmdeploy/backend-ascend.yml)](https://github.com/open-mmlab/mmdeploy/actions/workflows/backend-ascend.yml)CANN
| - | - | - | -| Rockchip | [![](https://img.shields.io/github/actions/workflow/status/open-mmlab/mmdeploy/backend-rknn.yml)](https://github.com/open-mmlab/mmdeploy/actions/workflows/backend-rknn.yml)RKNN
| - | - | - | -| Apple M1 | - | - | [![](https://img.shields.io/github/actions/workflow/status/open-mmlab/mmdeploy/backend-coreml.yml)](https://github.com/open-mmlab/mmdeploy/actions/workflows/backend-coreml.yml)CoreML
| - | -| Adreno GPU | - | - | - | [![](https://img.shields.io/github/actions/workflow/status/open-mmlab/mmdeploy/backend-snpe.yml)](https://github.com/open-mmlab/mmdeploy/actions/workflows/backend-snpe.yml)SNPE
[![](https://img.shields.io/github/actions/workflow/status/open-mmlab/mmdeploy/backend-rknn.yml)](https://github.com/open-mmlab/mmdeploy/actions/workflows/backend-rknn.yml)ncnn
| -| Hexagon DSP | - | - | - | [![](https://img.shields.io/github/actions/workflow/status/open-mmlab/mmdeploy/backend-snpe.yml)](https://github.com/open-mmlab/mmdeploy/actions/workflows/backend-snpe.yml)SNPE
| +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Device /
Platform
LinuxWindowsmacOSAndroid
x86_64
CPU
+ onnxruntime
+ pplnn
+ ncnn
+ LibTorch
+ OpenVINO
+ TVM
+
+ onnxruntime
+ OpenVINO
+ ncnn
+
+ - + + - +
ARM
CPU
+ ncnn
+
+ - + + - + + ncnn
+
RISC-V + ncnn
+
+ - + + - + + - +
NVIDIA
GPU
+ onnxruntime
+ TensorRT
+ LibTorch
+ pplnn
+
+ onnxruntime
+ TensorRT
+
+ - + + - +
NVIDIA
Jetson
+ TensorRT
+
+ - + + - + + - +
Huawei
ascend310
+ CANN
+
+ - + + - + + - +
Rockchip + RKNN
+
+ - + + - + + - +
Apple M1 + - + + - + + CoreML
+
+ - +
Adreno
GPU
+ - + + - + + - + + SNPE
+ ncnn
+
Hexagon
DSP
+ - + + - + + - + + SNPE
+
+
### Efficient and scalable C/C++ SDK Framework @@ -130,7 +300,7 @@ Please read [getting_started](docs/en/get_started.md) for the basic usage of MMD - [How to do regression test](docs/en/07-developer-guide/regression_test.md) - Custom Backend Ops - [ncnn](docs/en/06-custom-ops/ncnn.md) - - [onnxruntime](docs/en/06-custom-ops/onnxruntime.md) + - [ONNXRuntime](docs/en/06-custom-ops/onnxruntime.md) - [tensorrt](docs/en/06-custom-ops/tensorrt.md) - [FAQ](docs/en/faq.md) - [Contributing](.github/CONTRIBUTING.md) diff --git a/README_zh-CN.md b/README_zh-CN.md index b9f8309943..4eeb06030b 100644 --- a/README_zh-CN.md +++ b/README_zh-CN.md @@ -66,18 +66,188 @@ MMDeploy 是 [OpenMMLab](https://openmmlab.com/) 模型部署工具箱,**为 支持的设备平台和推理引擎如下表所示。benchmark请参考[这里](docs/zh_cn/03-benchmark/benchmark.md) -| Device / Platform | Linux | Windows | macOS | Android | -| ----------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | -| x86_64 CPU | [![](https://img.shields.io/github/actions/workflow/status/open-mmlab/mmdeploy/backend-ort.yml)](https://github.com/open-mmlab/mmdeploy/actions/workflows/backend-ort.yml)ONNXRuntime
[![](https://img.shields.io/github/actions/workflow/status/open-mmlab/mmdeploy/backend-pplnn.yml)](https://github.com/open-mmlab/mmdeploy/actions/workflows/backend-pplnn.yml)pplnn
[![](https://img.shields.io/github/actions/workflow/status/open-mmlab/mmdeploy/backend-ncnn.yml)](https://github.com/open-mmlab/mmdeploy/actions/workflows/backend-ncnn.yml)ncnn
[![](https://img.shields.io/github/actions/workflow/status/open-mmlab/mmdeploy/backend-torchscript.yml)](https://github.com/open-mmlab/mmdeploy/actions/workflows/backend-torchscript.yml)LibTorch
![](https://img.shields.io/badge/build-no%20status-lightgrey)OpenVINO
![](https://img.shields.io/badge/build-no%20status-lightgrey)TVM
| [![](https://img.shields.io/github/actions/workflow/status/open-mmlab/mmdeploy/build.yml)](https://github.com/open-mmlab/mmdeploy/actions/workflows/build.yml)ONNXRuntime
![](https://img.shields.io/badge/build-no%20status-lightgrey)OpenVINO
[![](https://img.shields.io/github/actions/workflow/status/open-mmlab/mmdeploy/build.yml)](https://github.com/open-mmlab/mmdeploy/actions/workflows/build.yml)ncnn
| - | - | -| ARM CPU | [![](https://img.shields.io/github/actions/workflow/status/open-mmlab/mmdeploy/build.yml)](https://github.com/open-mmlab/mmdeploy/actions/workflows/build.yml)ncnn
| - | - | [![](https://img.shields.io/github/actions/workflow/status/open-mmlab/mmdeploy/backend-rknn.yml)](https://github.com/open-mmlab/mmdeploy/actions/workflows/backend-rknn.yml)ncnn
| -| RISC-V | [![](https://img.shields.io/github/actions/workflow/status/open-mmlab/mmdeploy/linux-riscv64-gcc.yml)](https://github.com/open-mmlab/mmdeploy/actions/workflows/linux-riscv64-gcc.yml)ncnn
| - | - | - | -| NVIDIA GPU | [![](https://img.shields.io/github/actions/workflow/status/open-mmlab/mmdeploy/build.yml)](https://github.com/open-mmlab/mmdeploy/actions/workflows/build.yml)ONNXRuntime
[![](https://img.shields.io/github/actions/workflow/status/open-mmlab/mmdeploy/build.yml)](https://github.com/open-mmlab/mmdeploy/actions/workflows/build.yml)TensorRT
![](https://img.shields.io/badge/build-no%20status-lightgrey)LibTorch
[![](https://img.shields.io/github/actions/workflow/status/open-mmlab/mmdeploy/backend-pplnn.yml)](https://github.com/open-mmlab/mmdeploy/actions/workflows/backend-pplnn.yml)pplnn
| [![](https://img.shields.io/github/actions/workflow/status/open-mmlab/mmdeploy/build.yml)](https://github.com/open-mmlab/mmdeploy/actions/workflows/build.yml)ONNXRuntime
[![](https://img.shields.io/github/actions/workflow/status/open-mmlab/mmdeploy/build.yml)](https://github.com/open-mmlab/mmdeploy/actions/workflows/build.yml)TensorRT
| - | - | -| NVIDIA Jetson | ![](https://img.shields.io/badge/build-no%20status-lightgrey)TensorRT
| - | - | - | -| Huawei ascend310 | [![](https://img.shields.io/github/actions/workflow/status/open-mmlab/mmdeploy/backend-ascend.yml)](https://github.com/open-mmlab/mmdeploy/actions/workflows/backend-ascend.yml)CANN
| - | - | - | -| Rockchip | [![](https://img.shields.io/github/actions/workflow/status/open-mmlab/mmdeploy/backend-rknn.yml)](https://github.com/open-mmlab/mmdeploy/actions/workflows/backend-rknn.yml)RKNN
| - | - | - | -| Apple M1 | - | - | [![](https://img.shields.io/github/actions/workflow/status/open-mmlab/mmdeploy/backend-coreml.yml)](https://github.com/open-mmlab/mmdeploy/actions/workflows/backend-coreml.yml)CoreML
| - | -| Adreno GPU | - | - | - | [![](https://img.shields.io/github/actions/workflow/status/open-mmlab/mmdeploy/backend-snpe.yml)](https://github.com/open-mmlab/mmdeploy/actions/workflows/backend-snpe.yml)SNPE
[![](https://img.shields.io/github/actions/workflow/status/open-mmlab/mmdeploy/backend-rknn.yml)](https://github.com/open-mmlab/mmdeploy/actions/workflows/backend-rknn.yml)ncnn
| -| Hexagon DSP | - | - | - | [![](https://img.shields.io/github/actions/workflow/status/open-mmlab/mmdeploy/backend-snpe.yml)](https://github.com/open-mmlab/mmdeploy/actions/workflows/backend-snpe.yml)SNPE
| +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Device /
Platform
LinuxWindowsmacOSAndroid
x86_64
CPU
+ onnxruntime
+ pplnn
+ ncnn
+ LibTorch
+ OpenVINO
+ TVM
+
+ onnxruntime
+ OpenVINO
+ ncnn
+
+ - + + - +
ARM
CPU
+ ncnn
+
+ - + + - + + ncnn
+
RISC-V + ncnn
+
+ - + + - + + - +
NVIDIA
GPU
+ onnxruntime
+ TensorRT
+ LibTorch
+ pplnn
+
+ onnxruntime
+ TensorRT
+
+ - + + - +
NVIDIA
Jetson
+ TensorRT
+
+ - + + - + + - +
Huawei
ascend310
+ CANN
+
+ - + + - + + - +
Rockchip + RKNN
+
+ - + + - + + - +
Apple M1 + - + + - + + CoreML
+
+ - +
Adreno
GPU
+ - + + - + + - + + SNPE
+ ncnn
+
Hexagon
DSP
+ - + + - + + - + + SNPE
+
+
### SDK 可高度定制化