From abf89159d8f39d5c8908315093531ecee90ad1a5 Mon Sep 17 00:00:00 2001 From: RunningLeon Date: Tue, 8 Nov 2022 15:22:05 +0800 Subject: [PATCH 1/6] add md link check action --- .github/workflows/lint.yml | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index 07320fe5a2..369474e9e5 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -27,6 +27,14 @@ jobs: run: | python .github/scripts/check_index_rst.py docs/en/index.rst python .github/scripts/check_index_rst.py docs/zh_cn/index.rst + - name: Check markdown link + uses: gaurav-nelson/github-action-markdown-link-check@v1 + with: + use-quiet-mode: 'yes' + use-verbose-mode: 'yes' +# check-modified-files-only: 'yes' + folder-path: 'docs/en, docs/zh_cn' + file-path: './README.md, ./LICENSE, ./README_zh-CN.md' - name: Check doc link run: | python .github/scripts/doc_link_checker.py --target docs/zh_cn From 6c9575f783ae8fd62800b07eb1e54a356f722fcd Mon Sep 17 00:00:00 2001 From: RunningLeon Date: Tue, 8 Nov 2022 15:44:44 +0800 Subject: [PATCH 2/6] add config --- .github/md-link-config.json | 23 +++++++++++++++++++++++ .github/workflows/lint.yml | 1 + 2 files changed, 24 insertions(+) create mode 100644 .github/md-link-config.json diff --git a/.github/md-link-config.json b/.github/md-link-config.json new file mode 100644 index 0000000000..e68593a030 --- /dev/null +++ b/.github/md-link-config.json @@ -0,0 +1,23 @@ +{ + "ignorePatterns": [ + { + "pattern": "^https://developer.nvidia.com//" + }, + { + "pattern": "^http://localhost" + } + ], + "httpHeaders": [ + { + "urls": ["https://github.com/", "https://guides.github.com/", "https://help.github.com/", "https://docs.github.com/"], + "headers": { + "Accept-Encoding": "zstd, br, gzip, deflate" + } + } + ], + "timeout": "20s", + "retryOn429": true, + "retryCount": 5, + "fallbackRetryDelay": "30s", + "aliveStatusCodes": [200, 206, 429] +} diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index 369474e9e5..c3045171ef 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -33,6 +33,7 @@ jobs: use-quiet-mode: 'yes' use-verbose-mode: 'yes' # check-modified-files-only: 'yes' + config-file: '.github/md-link-config.json' folder-path: 'docs/en, docs/zh_cn' file-path: './README.md, ./LICENSE, ./README_zh-CN.md' - name: Check doc link From 1f7f8837837612ea717848cced7c4019ae5382aa Mon Sep 17 00:00:00 2001 From: RunningLeon Date: Tue, 8 Nov 2022 16:27:46 +0800 Subject: [PATCH 3/6] fix doc link --- .github/md-link-config.json | 6 ++++++ docs/en/01-how-to-build/jetsons.md | 2 +- docs/en/04-supported-codebases/mmedit.md | 4 ++-- docs/en/05-supported-backends/onnxruntime.md | 2 +- docs/zh_cn/05-supported-backends/onnxruntime.md | 2 +- docs/zh_cn/05-supported-backends/openvino.md | 2 +- 6 files changed, 12 insertions(+), 6 deletions(-) diff --git a/.github/md-link-config.json b/.github/md-link-config.json index e68593a030..34d874e98e 100644 --- a/.github/md-link-config.json +++ b/.github/md-link-config.json @@ -3,6 +3,12 @@ { "pattern": "^https://developer.nvidia.com//" }, + { + "pattern": "^https://developer.android.com/" + }, + { + "pattern": "^https://developer.qualcomm.com/" + }, { "pattern": "^http://localhost" } diff --git a/docs/en/01-how-to-build/jetsons.md b/docs/en/01-how-to-build/jetsons.md index 9786d04c97..185e7380ab 100644 --- a/docs/en/01-how-to-build/jetsons.md +++ b/docs/en/01-how-to-build/jetsons.md @@ -289,7 +289,7 @@ pip install -r requirements/build.txt pip install -v -e . # or "python setup.py develop" ``` -1. Follow [this document](../02-how-to-run/convert_model.md) on how to convert model files. +2. Follow [this document](docs/en/02-how-to-run/convert_model.md) on how to convert model files. For this example, we have used [retinanet_r18_fpn_1x_coco.py](https://github.com/open-mmlab/mmdetection/blob/3.x/configs/retinanet/retinanet_r18_fpn_1x_coco.py) as the model config, and [this file](https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_r18_fpn_1x_coco/retinanet_r18_fpn_1x_coco_20220407_171055-614fd399.pth) as the corresponding checkpoint file. Also for deploy config, we have used [detection_tensorrt_dynamic-320x320-1344x1344.py](https://github.com/open-mmlab/mmdeploy/blob/master/configs/mmdet/detection/detection_tensorrt_dynamic-320x320-1344x1344.py) diff --git a/docs/en/04-supported-codebases/mmedit.md b/docs/en/04-supported-codebases/mmedit.md index 7abd08a2e7..c72a1d201e 100644 --- a/docs/en/04-supported-codebases/mmedit.md +++ b/docs/en/04-supported-codebases/mmedit.md @@ -188,8 +188,8 @@ Besides python API, mmdeploy SDK also provides other FFI (Foreign Function Inter | [SRCNN](https://github.com/open-mmlab/mmediting/tree/1.x/configs/srcnn) | super-resolution | Y | Y | Y | Y | Y | | [ESRGAN](https://github.com/open-mmlab/mmediting/tree/1.x/configs/esrgan) | super-resolution | Y | Y | Y | Y | Y | | [ESRGAN-PSNR](https://github.com/open-mmlab/mmediting/tree/1.x/configs/esrgan) | super-resolution | Y | Y | Y | Y | Y | -| [SRGAN](https://github.com/open-mmlab/mmediting/tree/1.x/configs/srresnet_srgan) | super-resolution | Y | Y | Y | Y | Y | -| [SRResNet](https://github.com/open-mmlab/mmediting/tree/1.x/configs/srresnet_srgan) | super-resolution | Y | Y | Y | Y | Y | +| [SRGAN](https://github.com/open-mmlab/mmediting/tree/1.x/configs/srgan_resnet) | super-resolution | Y | Y | Y | Y | Y | +| [SRResNet](https://github.com/open-mmlab/mmediting/tree/1.x/configs/srgan_resnet) | super-resolution | Y | Y | Y | Y | Y | | [Real-ESRGAN](https://github.com/open-mmlab/mmediting/tree/1.x/configs/real_esrgan) | super-resolution | Y | Y | Y | Y | Y | | [EDSR](https://github.com/open-mmlab/mmediting/tree/1.x/configs/edsr) | super-resolution | Y | Y | Y | N | Y | | [RDN](https://github.com/open-mmlab/mmediting/tree/1.x/configs/rdn) | super-resolution | Y | Y | Y | Y | Y | diff --git a/docs/en/05-supported-backends/onnxruntime.md b/docs/en/05-supported-backends/onnxruntime.md index 10b1714bd2..51a134bd04 100644 --- a/docs/en/05-supported-backends/onnxruntime.md +++ b/docs/en/05-supported-backends/onnxruntime.md @@ -63,4 +63,4 @@ Take custom operator `roi_align` for example. ## References - [How to export Pytorch model with custom op to ONNX and run it in ONNX Runtime](https://github.com/onnx/tutorials/blob/master/PyTorchCustomOperator/README.md) -- [How to add a custom operator/kernel in ONNX Runtime](https://github.com/microsoft/onnxruntime/blob/master/docs/AddingCustomOp.md) +- [How to add a custom operator/kernel in ONNX Runtime](https://onnxruntime.ai/docs/reference/operators/add-custom-op.html) diff --git a/docs/zh_cn/05-supported-backends/onnxruntime.md b/docs/zh_cn/05-supported-backends/onnxruntime.md index 2d109dcfe6..4b3a25e4d0 100644 --- a/docs/zh_cn/05-supported-backends/onnxruntime.md +++ b/docs/zh_cn/05-supported-backends/onnxruntime.md @@ -63,4 +63,4 @@ Take custom operator `roi_align` for example. ## References - [How to export Pytorch model with custom op to ONNX and run it in ONNX Runtime](https://github.com/onnx/tutorials/blob/master/PyTorchCustomOperator/README.md) -- [How to add a custom operator/kernel in ONNX Runtime](https://github.com/microsoft/onnxruntime/blob/master/docs/AddingCustomOp.md) +- [How to add a custom operator/kernel in ONNX Runtime](https://onnxruntime.ai/docs/reference/operators/add-custom-op.html) diff --git a/docs/zh_cn/05-supported-backends/openvino.md b/docs/zh_cn/05-supported-backends/openvino.md index 9eccc3cc44..dc4188fcb0 100644 --- a/docs/zh_cn/05-supported-backends/openvino.md +++ b/docs/zh_cn/05-supported-backends/openvino.md @@ -17,7 +17,7 @@ pip install openvino-dev 3. Install MMDeploy following the [instructions](../01-how-to-build/build_from_source.md). -To work with models from [MMDetection](https://github.com/open-mmlab/mmdetection/blob/master/docs/get_started.md), you may need to install it additionally. +To work with models from [MMDetection](https://github.com/open-mmlab/mmdetection/blob/3.x/docs/en/get_started.md), you may need to install it additionally. ## Usage From 281414f454319915cca5981229650d2a0b9c1d33 Mon Sep 17 00:00:00 2001 From: RunningLeon Date: Thu, 1 Dec 2022 17:57:53 +0800 Subject: [PATCH 4/6] fix dead links --- .github/ISSUE_TEMPLATE/1-bug-report.yml | 2 +- .github/ISSUE_TEMPLATE/config.yml | 2 +- .github/md-link-config.json | 6 +++++- README.md | 2 +- README_zh-CN.md | 4 ++-- demo/tutorials/tutorials_1.ipynb | 2 +- docs/en/01-how-to-build/jetsons.md | 4 ++-- docs/en/01-how-to-build/rockchip.md | 4 ++-- docs/en/03-benchmark/supported_models.md | 4 ++-- docs/en/04-supported-codebases/mmaction2.md | 6 +++--- docs/en/04-supported-codebases/mmcls.md | 6 +++--- docs/en/04-supported-codebases/mmdet.md | 6 +++--- docs/en/04-supported-codebases/mmedit.md | 6 +++--- docs/en/04-supported-codebases/mmocr.md | 8 ++++---- docs/en/04-supported-codebases/mmpose.md | 6 +++--- docs/en/04-supported-codebases/mmseg.md | 8 ++++---- docs/en/05-supported-backends/openvino.md | 2 +- docs/en/05-supported-backends/rknn.md | 2 +- docs/en/get_started.md | 6 +++--- docs/zh_cn/01-how-to-build/rockchip.md | 4 ++-- docs/zh_cn/03-benchmark/supported_models.md | 4 ++-- docs/zh_cn/04-supported-codebases/mmaction2.md | 6 +++--- docs/zh_cn/04-supported-codebases/mmcls.md | 4 ++-- docs/zh_cn/04-supported-codebases/mmdet.md | 4 ++-- docs/zh_cn/04-supported-codebases/mmedit.md | 8 ++++---- docs/zh_cn/04-supported-codebases/mmocr.md | 6 +++--- docs/zh_cn/04-supported-codebases/mmpose.md | 4 ++-- docs/zh_cn/04-supported-codebases/mmseg.md | 6 +++--- docs/zh_cn/05-supported-backends/rknn.md | 2 +- docs/zh_cn/get_started.md | 6 +++--- docs/zh_cn/tutorial/02_challenges.md | 8 ++++---- docs/zh_cn/tutorial/06_introduction_to_tensorrt.md | 2 +- 32 files changed, 77 insertions(+), 73 deletions(-) diff --git a/.github/ISSUE_TEMPLATE/1-bug-report.yml b/.github/ISSUE_TEMPLATE/1-bug-report.yml index bf69247892..b1ed89775e 100644 --- a/.github/ISSUE_TEMPLATE/1-bug-report.yml +++ b/.github/ISSUE_TEMPLATE/1-bug-report.yml @@ -9,7 +9,7 @@ body: label: Checklist options: - label: I have searched related issues but cannot get the expected help. - - label: 2. I have read the [FAQ documentation](https://github.com/open-mmlab/mmdeploy/blob/master/docs/en/faq.md) but cannot get the expected help. + - label: 2. I have read the [FAQ documentation](https://github.com/open-mmlab/mmdeploy/tree/1.x/docs/en/faq.md) but cannot get the expected help. - label: 3. The bug has not been fixed in the latest version. - type: textarea attributes: diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml index bcf07914af..94287d30f6 100644 --- a/.github/ISSUE_TEMPLATE/config.yml +++ b/.github/ISSUE_TEMPLATE/config.yml @@ -2,7 +2,7 @@ blank_issues_enabled: false contact_links: - name: 💥 FAQ - url: https://github.com/open-mmlab/mmdeploy/blob/master/docs/en/faq.md + url: https://github.com/open-mmlab/mmdeploy/tree/1.x/docs/en/faq.md about: Check if your issue already has solutions - name: 💬 Forum url: https://github.com/open-mmlab/mmdeploy/discussions diff --git a/.github/md-link-config.json b/.github/md-link-config.json index 34d874e98e..76986cbd01 100644 --- a/.github/md-link-config.json +++ b/.github/md-link-config.json @@ -1,7 +1,11 @@ { "ignorePatterns": [ + { - "pattern": "^https://developer.nvidia.com//" + "pattern": "^https://developer.nvidia.com/" + }, + { + "pattern": "^https://docs.openvino.ai/" }, { "pattern": "^https://developer.android.com/" diff --git a/README.md b/README.md index e2a8f72a9f..bf968502a0 100644 --- a/README.md +++ b/README.md @@ -22,7 +22,7 @@ [![docs](https://img.shields.io/badge/docs-latest-blue)](https://mmdeploy.readthedocs.io/en/latest/) [![badge](https://github.com/open-mmlab/mmdeploy/workflows/build/badge.svg)](https://github.com/open-mmlab/mmdeploy/actions) [![codecov](https://codecov.io/gh/open-mmlab/mmdeploy/branch/master/graph/badge.svg)](https://codecov.io/gh/open-mmlab/mmdeploy) -[![license](https://img.shields.io/github/license/open-mmlab/mmdeploy.svg)](https://github.com/open-mmlab/mmdeploy/blob/master/LICENSE) +[![license](https://img.shields.io/github/license/open-mmlab/mmdeploy.svg)](https://github.com/open-mmlab/mmdeploy/tree/1.x/LICENSE) [![issue resolution](https://img.shields.io/github/issues-closed-raw/open-mmlab/mmdeploy)](https://github.com/open-mmlab/mmdeploy/issues) [![open issues](https://img.shields.io/github/issues-raw/open-mmlab/mmdeploy)](https://github.com/open-mmlab/mmdeploy/issues) diff --git a/README_zh-CN.md b/README_zh-CN.md index e71cdf4ee0..aa6f6f2c4e 100644 --- a/README_zh-CN.md +++ b/README_zh-CN.md @@ -22,7 +22,7 @@ [![docs](https://img.shields.io/badge/docs-latest-blue)](https://mmdeploy.readthedocs.io/zh_CN/latest/) [![badge](https://github.com/open-mmlab/mmdeploy/workflows/build/badge.svg)](https://github.com/open-mmlab/mmdeploy/actions) [![codecov](https://codecov.io/gh/open-mmlab/mmdeploy/branch/master/graph/badge.svg)](https://codecov.io/gh/open-mmlab/mmdeploy) -[![license](https://img.shields.io/github/license/open-mmlab/mmdeploy.svg)](https://github.com/open-mmlab/mmdeploy/blob/master/LICENSE) +[![license](https://img.shields.io/github/license/open-mmlab/mmdeploy.svg)](https://github.com/open-mmlab/mmdeploy/tree/1.x/LICENSE) [![issue resolution](https://img.shields.io/github/issues-closed-raw/open-mmlab/mmdeploy)](https://github.com/open-mmlab/mmdeploy/issues) [![open issues](https://img.shields.io/github/issues-raw/open-mmlab/mmdeploy)](https://github.com/open-mmlab/mmdeploy/issues) @@ -119,7 +119,7 @@ MMDeploy 是 [OpenMMLab](https://openmmlab.com/) 模型部署工具箱,**为 ## 基准与模型库 -基准和支持的模型列表可以在[基准](https://mmdeploy.readthedocs.io/zh_CN/latest/benchmark.html)和[模型列表](https://mmdeploy.readthedocs.io/en/latest/supported_models.html)中获得。 +基准和支持的模型列表可以在[基准](https://mmdeploy.readthedocs.io/zh_CN/dev-1.x/03-benchmark/benchmark.html)和[模型列表](https://mmdeploy.readthedocs.io/en/dev-1.x/03-benchmark/supported_models.html)中获得。 ## 贡献指南 diff --git a/demo/tutorials/tutorials_1.ipynb b/demo/tutorials/tutorials_1.ipynb index 1ea0a5fafa..c5644ae00e 100755 --- a/demo/tutorials/tutorials_1.ipynb +++ b/demo/tutorials/tutorials_1.ipynb @@ -6,7 +6,7 @@ "id": "mAWHDEbr6Q2i" }, "source": [ - "[![Open in colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/open-mmlab/mmdeploy/blob/master/demo/tutorials_1.ipynb)\n", + "[![Open in colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/open-mmlab/mmdeploy/tree/1.x/demo/tutorials_1.ipynb)\n", "# 前言\n", "OpenMMLab 的算法如何部署?是很多社区用户的困惑。而模型部署工具箱 [MMDeploy](https://zhuanlan.zhihu.com/p/450342651) 的开源,强势打通了从算法模型到应用程序这 \"最后一公里\"!\n", "今天我们将开启模型部署入门系列教程,在模型部署开源库 MMDeploy 的辅助下,介绍以下内容:\n", diff --git a/docs/en/01-how-to-build/jetsons.md b/docs/en/01-how-to-build/jetsons.md index 185e7380ab..a193b05714 100644 --- a/docs/en/01-how-to-build/jetsons.md +++ b/docs/en/01-how-to-build/jetsons.md @@ -289,9 +289,9 @@ pip install -r requirements/build.txt pip install -v -e . # or "python setup.py develop" ``` -2. Follow [this document](docs/en/02-how-to-run/convert_model.md) on how to convert model files. +2. Follow [this document](../02-how-to-run/convert_model.md) on how to convert model files. -For this example, we have used [retinanet_r18_fpn_1x_coco.py](https://github.com/open-mmlab/mmdetection/blob/3.x/configs/retinanet/retinanet_r18_fpn_1x_coco.py) as the model config, and [this file](https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_r18_fpn_1x_coco/retinanet_r18_fpn_1x_coco_20220407_171055-614fd399.pth) as the corresponding checkpoint file. Also for deploy config, we have used [detection_tensorrt_dynamic-320x320-1344x1344.py](https://github.com/open-mmlab/mmdeploy/blob/master/configs/mmdet/detection/detection_tensorrt_dynamic-320x320-1344x1344.py) +For this example, we have used [retinanet_r18_fpn_1x_coco.py](https://github.com/open-mmlab/mmdetection/blob/3.x/configs/retinanet/retinanet_r18_fpn_1x_coco.py) as the model config, and [this file](https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_r18_fpn_1x_coco/retinanet_r18_fpn_1x_coco_20220407_171055-614fd399.pth) as the corresponding checkpoint file. Also for deploy config, we have used [detection_tensorrt_dynamic-320x320-1344x1344.py](https://github.com/open-mmlab/mmdeploy/tree/1.x/configs/mmdet/detection/detection_tensorrt_dynamic-320x320-1344x1344.py) ```shell python ./tools/deploy.py \ diff --git a/docs/en/01-how-to-build/rockchip.md b/docs/en/01-how-to-build/rockchip.md index 74b4b40d4a..ae86bbf101 100644 --- a/docs/en/01-how-to-build/rockchip.md +++ b/docs/en/01-how-to-build/rockchip.md @@ -158,7 +158,7 @@ label: 65, score: 0.95 - MMDet models. - YOLOV3 & YOLOX: you may paste the following partition configuration into [detection_rknn_static-320x320.py](https://github.com/open-mmlab/mmdeploy/blob/master/configs/mmdet/detection/detection_rknn_static-320x320.py): + YOLOV3 & YOLOX: you may paste the following partition configuration into [detection_rknn_static-320x320.py](https://github.com/open-mmlab/mmdeploy/blob/1.x/configs/mmdet/detection/detection_rknn_static-320x320.py): ```python # yolov3, yolox @@ -173,7 +173,7 @@ label: 65, score: 0.95 ]) ``` - RetinaNet & SSD & FSAF with rknn-toolkit2, you may paste the following partition configuration into [detection_rknn_static-320x320.py](https://github.com/open-mmlab/mmdeploy/blob/master/configs/mmdet/detection/detection_rknn_static-320x320.py). Users with rknn-toolkit can directly use default config. + RetinaNet & SSD & FSAF with rknn-toolkit2, you may paste the following partition configuration into [detection_rknn_static-320x320.py](https://github.com/open-mmlab/mmdeploy/tree/1.x/configs/mmdet/detection/detection_rknn_static-320x320.py). Users with rknn-toolkit can directly use default config. ```python # retinanet, ssd diff --git a/docs/en/03-benchmark/supported_models.md b/docs/en/03-benchmark/supported_models.md index 46383d6092..140da9a383 100644 --- a/docs/en/03-benchmark/supported_models.md +++ b/docs/en/03-benchmark/supported_models.md @@ -61,8 +61,8 @@ The table below lists the models that are guaranteed to be exportable to other b | [Segmenter](https://github.com/open-mmlab/mmsegmentation/tree/1.x/configs/segmenter) [\*static](#note) | MMSegmentation | Y | Y | Y | Y | N | Y | N | N | | [SRCNN](https://github.com/open-mmlab/mmediting/tree/1.x/configs/srcnn) | MMEditing | Y | Y | Y | Y | Y | Y | N | N | | [ESRGAN](https://github.com/open-mmlab/mmediting/tree/1.x/configs/esrgan) | MMEditing | Y | Y | Y | Y | Y | Y | N | N | -| [SRGAN](https://github.com/open-mmlab/mmediting/tree/1.x/configs/srresnet_srgan) | MMEditing | Y | Y | Y | Y | Y | Y | N | N | -| [SRResNet](https://github.com/open-mmlab/mmediting/tree/1.x/configs/srresnet_srgan) | MMEditing | Y | Y | Y | Y | Y | Y | N | N | +| [SRGAN](https://github.com/open-mmlab/mmediting/tree/1.x/configs/srgan_resnet) | MMEditing | Y | Y | Y | Y | Y | Y | N | N | +| [SRResNet](https://github.com/open-mmlab/mmediting/tree/1.x/configs/srgan_resnet) | MMEditing | Y | Y | Y | Y | Y | Y | N | N | | [Real-ESRGAN](https://github.com/open-mmlab/mmediting/tree/1.x/configs/real_esrgan) | MMEditing | Y | Y | Y | Y | Y | Y | N | N | | [EDSR](https://github.com/open-mmlab/mmediting/tree/1.x/configs/edsr) | MMEditing | Y | Y | Y | Y | N | Y | N | N | | [RDN](https://github.com/open-mmlab/mmediting/tree/1.x/configs/rdn) | MMEditing | Y | Y | Y | Y | Y | Y | N | N | diff --git a/docs/en/04-supported-codebases/mmaction2.md b/docs/en/04-supported-codebases/mmaction2.md index b7f489212d..fbd4202772 100644 --- a/docs/en/04-supported-codebases/mmaction2.md +++ b/docs/en/04-supported-codebases/mmaction2.md @@ -50,9 +50,9 @@ If neither **I** nor **II** meets your requirements, [building mmdeploy from sou ## Convert model -You can use [tools/deploy.py](https://github.com/open-mmlab/mmdeploy/blob/dev-1.x/tools/deploy.py) to convert mmaction2 models to the specified backend models. Its detailed usage can be learned from [here](https://github.com/open-mmlab/mmdeploy/blob/master/docs/en/02-how-to-run/convert_model.md#usage). +You can use [tools/deploy.py](https://github.com/open-mmlab/mmdeploy/blob/dev-1.x/tools/deploy.py) to convert mmaction2 models to the specified backend models. Its detailed usage can be learned from [here](https://github.com/open-mmlab/mmdeploy/tree/1.x/docs/en/02-how-to-run/convert_model.md#usage). -When using `tools/deploy.py`, it is crucial to specify the correct deployment config. We've already provided builtin deployment config [files](https://github.com/open-mmlab/mmdeploy/tree/dev-1.x/configs/mmaction) of all supported backends for mmaction2, under which the config file path follows the pattern: +When using `tools/deploy.py`, it is crucial to specify the correct deployment config. We've already provided builtin deployment config [files](https://github.com/open-mmlab/mmdeploy/tree/1.x/configs/mmaction) of all supported backends for mmaction2, under which the config file path follows the pattern: ``` {task}/{task}_{backend}-{precision}_{static | dynamic}_{shape}.py @@ -178,7 +178,7 @@ for label_id, score in result: print(label_id, score) ``` -Besides python API, mmdeploy SDK also provides other FFI (Foreign Function Interface), such as C, C++, C#, Java and so on. You can learn their usage from [demos](https://github.com/open-mmlab/mmdeploy/tree/dev-1.x/demo). +Besides python API, mmdeploy SDK also provides other FFI (Foreign Function Interface), such as C, C++, C#, Java and so on. You can learn their usage from [demos](https://github.com/open-mmlab/mmdeploy/tree/1.x/demo). > MMAction2 only API of c, c++ and python for now. diff --git a/docs/en/04-supported-codebases/mmcls.md b/docs/en/04-supported-codebases/mmcls.md index 6cdf17ad15..cb31337bdc 100644 --- a/docs/en/04-supported-codebases/mmcls.md +++ b/docs/en/04-supported-codebases/mmcls.md @@ -70,7 +70,7 @@ python tools/deploy.py \ --dump-info ``` -It is crucial to specify the correct deployment config during model conversion. We've already provided builtin deployment config [files](https://github.com/open-mmlab/mmdeploy/tree/dev-1.x/configs/mmcls) of all supported backends for mmclassification. The config filename pattern is: +It is crucial to specify the correct deployment config during model conversion. We've already provided builtin deployment config [files](https://github.com/open-mmlab/mmdeploy/tree/1.x/configs/mmcls) of all supported backends for mmclassification. The config filename pattern is: ``` classification_{backend}-{precision}_{static | dynamic}_{shape}.py @@ -81,7 +81,7 @@ classification_{backend}-{precision}_{static | dynamic}_{shape}.py - **{static | dynamic}:** static shape or dynamic shape - **{shape}:** input shape or shape range of a model -Therefore, in the above example, you can also convert `resnet18` to other backend models by changing the deployment config file `classification_onnxruntime_dynamic.py` to [others](https://github.com/open-mmlab/mmdeploy/tree/dev-1.x/configs/mmcls), e.g., converting to tensorrt-fp16 model by `classification_tensorrt-fp16_dynamic-224x224-224x224.py`. +Therefore, in the above example, you can also convert `resnet18` to other backend models by changing the deployment config file `classification_onnxruntime_dynamic.py` to [others](https://github.com/open-mmlab/mmdeploy/tree/1.x/configs/mmcls), e.g., converting to tensorrt-fp16 model by `classification_tensorrt-fp16_dynamic-224x224-224x224.py`. ```{tip} When converting mmcls models to tensorrt models, --device should be set to "cuda" @@ -168,7 +168,7 @@ for label_id, score in result: print(label_id, score) ``` -Besides python API, mmdeploy SDK also provides other FFI (Foreign Function Interface), such as C, C++, C#, Java and so on. You can learn their usage from [demos](https://github.com/open-mmlab/mmdeploy/tree/dev-1.x/demo). +Besides python API, mmdeploy SDK also provides other FFI (Foreign Function Interface), such as C, C++, C#, Java and so on. You can learn their usage from [demos](https://github.com/open-mmlab/mmdeploy/tree/1.x/demo). ## Supported models diff --git a/docs/en/04-supported-codebases/mmdet.md b/docs/en/04-supported-codebases/mmdet.md index 2ce007346e..21aa12dbff 100644 --- a/docs/en/04-supported-codebases/mmdet.md +++ b/docs/en/04-supported-codebases/mmdet.md @@ -68,7 +68,7 @@ python tools/deploy.py \ --dump-info ``` -It is crucial to specify the correct deployment config during model conversion. We've already provided builtin deployment config [files](https://github.com/open-mmlab/mmdeploy/tree/dev-1.x/configs/mmdet) of all supported backends for mmdetection, under which the config file path follows the pattern: +It is crucial to specify the correct deployment config during model conversion. We've already provided builtin deployment config [files](https://github.com/open-mmlab/mmdeploy/tree/1.x/configs/mmdet) of all supported backends for mmdetection, under which the config file path follows the pattern: ``` {task}/{task}_{backend}-{precision}_{static | dynamic}_{shape}.py @@ -90,7 +90,7 @@ It is crucial to specify the correct deployment config during model conversion. - **{shape}:** input shape or shape range of a model -Therefore, in the above example, you can also convert `faster r-cnn` to other backend models by changing the deployment config file `detection_onnxruntime_dynamic.py` to [others](https://github.com/open-mmlab/mmdeploy/tree/dev-1.x/configs/mmdet/detection), e.g., converting to tensorrt-fp16 model by `detection_tensorrt-fp16_dynamic-320x320-1344x1344.py`. +Therefore, in the above example, you can also convert `faster r-cnn` to other backend models by changing the deployment config file `detection_onnxruntime_dynamic.py` to [others](https://github.com/open-mmlab/mmdeploy/tree/1.x/configs/mmdet/detection), e.g., converting to tensorrt-fp16 model by `detection_tensorrt-fp16_dynamic-320x320-1344x1344.py`. ```{tip} When converting mmdet models to tensorrt models, --device should be set to "cuda" @@ -185,7 +185,7 @@ for index, bbox, label_id in zip(indices, bboxes, labels): cv2.imwrite('output_detection.png', img) ``` -Besides python API, mmdeploy SDK also provides other FFI (Foreign Function Interface), such as C, C++, C#, Java and so on. You can learn their usage from [demos](https://github.com/open-mmlab/mmdeploy/tree/dev-1.x/demo). +Besides python API, mmdeploy SDK also provides other FFI (Foreign Function Interface), such as C, C++, C#, Java and so on. You can learn their usage from [demos](https://github.com/open-mmlab/mmdeploy/tree/1.x/demo). ## Supported models diff --git a/docs/en/04-supported-codebases/mmedit.md b/docs/en/04-supported-codebases/mmedit.md index c72a1d201e..0137dd385d 100644 --- a/docs/en/04-supported-codebases/mmedit.md +++ b/docs/en/04-supported-codebases/mmedit.md @@ -50,7 +50,7 @@ If neither **I** nor **II** meets your requirements, [building mmdeploy from sou You can use [tools/deploy.py](https://github.com/open-mmlab/mmdeploy/blob/dev-1.x/tools/deploy.py) to convert mmedit models to the specified backend models. Its detailed usage can be learned from [here](https://github.com/open-mmlab/mmdeploy/blob/dev-1.x/docs/en/02-how-to-run/convert_model.md#usage). -When using `tools/deploy.py`, it is crucial to specify the correct deployment config. We've already provided builtin deployment config [files](https://github.com/open-mmlab/mmdeploy/tree/dev-1.x/configs/mmedit) of all supported backends for mmedit, under which the config file path follows the pattern: +When using `tools/deploy.py`, it is crucial to specify the correct deployment config. We've already provided builtin deployment config [files](https://github.com/open-mmlab/mmdeploy/tree/1.x/configs/mmedit) of all supported backends for mmedit, under which the config file path follows the pattern: ``` {task}/{task}_{backend}-{precision}_{static | dynamic}_{shape}.py @@ -90,7 +90,7 @@ python tools/deploy.py \ --dump-info ``` -You can also convert the above model to other backend models by changing the deployment config file `*_onnxruntime_dynamic.py` to [others](https://github.com/open-mmlab/mmdeploy/tree/dev-1.x/configs/mmedit), e.g., converting to tensorrt model by `super-resolution/super-resolution_tensorrt-_dynamic-32x32-512x512.py`. +You can also convert the above model to other backend models by changing the deployment config file `*_onnxruntime_dynamic.py` to [others](https://github.com/open-mmlab/mmdeploy/tree/1.x/configs/mmedit), e.g., converting to tensorrt model by `super-resolution/super-resolution_tensorrt-_dynamic-32x32-512x512.py`. ```{tip} When converting mmedit models to tensorrt models, --device should be set to "cuda" @@ -179,7 +179,7 @@ result = result[..., ::-1] cv2.imwrite('output_restorer.bmp', result) ``` -Besides python API, mmdeploy SDK also provides other FFI (Foreign Function Interface), such as C, C++, C#, Java and so on. You can learn their usage from [demos](https://github.com/open-mmlab/mmdeploy/tree/dev-1.x/demo). +Besides python API, mmdeploy SDK also provides other FFI (Foreign Function Interface), such as C, C++, C#, Java and so on. You can learn their usage from [demos](https://github.com/open-mmlab/mmdeploy/tree/1.x/demo). ## Supported models diff --git a/docs/en/04-supported-codebases/mmocr.md b/docs/en/04-supported-codebases/mmocr.md index 883322527e..23ef1d0ab1 100644 --- a/docs/en/04-supported-codebases/mmocr.md +++ b/docs/en/04-supported-codebases/mmocr.md @@ -49,9 +49,9 @@ If neither **I** nor **II** meets your requirements, [building mmdeploy from sou ## Convert model -You can use [tools/deploy.py](https://github.com/open-mmlab/mmdeploy/blob/dev-1.x/tools/deploy.py) to convert mmocr models to the specified backend models. Its detailed usage can be learned from [here](https://github.com/open-mmlab/mmdeploy/blob/master/docs/en/02-how-to-run/convert_model.md#usage). +You can use [tools/deploy.py](https://github.com/open-mmlab/mmdeploy/blob/dev-1.x/tools/deploy.py) to convert mmocr models to the specified backend models. Its detailed usage can be learned from [here](https://github.com/open-mmlab/mmdeploy/tree/1.x/docs/en/02-how-to-run/convert_model.md#usage). -When using `tools/deploy.py`, it is crucial to specify the correct deployment config. We've already provided builtin deployment config [files](https://github.com/open-mmlab/mmdeploy/tree/dev-1.x/configs/mmocr) of all supported backends for mmocr, under which the config file path follows the pattern: +When using `tools/deploy.py`, it is crucial to specify the correct deployment config. We've already provided builtin deployment config [files](https://github.com/open-mmlab/mmdeploy/tree/1.x/configs/mmocr) of all supported backends for mmocr, under which the config file path follows the pattern: ``` {task}/{task}_{backend}-{precision}_{static | dynamic}_{shape}.py @@ -109,7 +109,7 @@ python tools/deploy.py \ --dump-info ``` -You can also convert the above models to other backend models by changing the deployment config file `*_onnxruntime_dynamic.py` to [others](https://github.com/open-mmlab/mmdeploy/tree/dev-1.x/configs/mmocr), e.g., converting `dbnet` to tensorrt-fp32 model by `text-detection/text-detection_tensorrt-_dynamic-320x320-2240x2240.py`. +You can also convert the above models to other backend models by changing the deployment config file `*_onnxruntime_dynamic.py` to [others](https://github.com/open-mmlab/mmdeploy/tree/1.x/configs/mmocr), e.g., converting `dbnet` to tensorrt-fp32 model by `text-detection/text-detection_tensorrt-_dynamic-320x320-2240x2240.py`. ```{tip} When converting mmocr models to tensorrt models, --device should be set to "cuda" @@ -226,7 +226,7 @@ texts = recognizer(img) print(texts) ``` -Besides python API, mmdeploy SDK also provides other FFI (Foreign Function Interface), such as C, C++, C#, Java and so on. You can learn their usage from [demos](https://github.com/open-mmlab/mmdeploy/tree/dev-1.x/demo). +Besides python API, mmdeploy SDK also provides other FFI (Foreign Function Interface), such as C, C++, C#, Java and so on. You can learn their usage from [demos](https://github.com/open-mmlab/mmdeploy/tree/1.x/demo). ## Supported models diff --git a/docs/en/04-supported-codebases/mmpose.md b/docs/en/04-supported-codebases/mmpose.md index 690ce91f25..4521697da9 100644 --- a/docs/en/04-supported-codebases/mmpose.md +++ b/docs/en/04-supported-codebases/mmpose.md @@ -48,7 +48,7 @@ If neither **I** nor **II** meets your requirements, [building mmdeploy from sou ## Convert model -You can use [tools/deploy.py](https://github.com/open-mmlab/mmdeploy/blob/dev-1.x/tools/deploy.py) to convert mmpose models to the specified backend models. Its detailed usage can be learned from [here](https://github.com/open-mmlab/mmdeploy/blob/master/docs/en/02-how-to-run/convert_model.md#usage). +You can use [tools/deploy.py](https://github.com/open-mmlab/mmdeploy/blob/dev-1.x/tools/deploy.py) to convert mmpose models to the specified backend models. Its detailed usage can be learned from [here](https://github.com/open-mmlab/mmdeploy/tree/1.x/docs/en/02-how-to-run/convert_model.md#usage). The command below shows an example about converting `hrnet` model to onnx model that can be inferred by ONNX Runtime. @@ -67,7 +67,7 @@ python tools/deploy.py \ --show ``` -It is crucial to specify the correct deployment config during model conversion. We've already provided builtin deployment config [files](https://github.com/open-mmlab/mmdeploy/tree/dev-1.x/configs/mmpose) of all supported backends for mmpose. The config filename pattern is: +It is crucial to specify the correct deployment config during model conversion. We've already provided builtin deployment config [files](https://github.com/open-mmlab/mmdeploy/tree/1.x/configs/mmpose) of all supported backends for mmpose. The config filename pattern is: ``` pose-detection_{backend}-{precision}_{static | dynamic}_{shape}.py @@ -78,7 +78,7 @@ pose-detection_{backend}-{precision}_{static | dynamic}_{shape}.py - **{static | dynamic}:** static shape or dynamic shape - **{shape}:** input shape or shape range of a model -Therefore, in the above example, you can also convert `hrnet` to other backend models by changing the deployment config file `pose-detection_onnxruntime_static.py` to [others](https://github.com/open-mmlab/mmdeploy/tree/dev-1.x/configs/mmpose), e.g., converting to tensorrt model by `pose-detection_tensorrt_static-256x192.py`. +Therefore, in the above example, you can also convert `hrnet` to other backend models by changing the deployment config file `pose-detection_onnxruntime_static.py` to [others](https://github.com/open-mmlab/mmdeploy/tree/1.x/configs/mmpose), e.g., converting to tensorrt model by `pose-detection_tensorrt_static-256x192.py`. ```{tip} When converting mmpose models to tensorrt models, --device should be set to "cuda" diff --git a/docs/en/04-supported-codebases/mmseg.md b/docs/en/04-supported-codebases/mmseg.md index 75ff516291..72375797d2 100644 --- a/docs/en/04-supported-codebases/mmseg.md +++ b/docs/en/04-supported-codebases/mmseg.md @@ -54,7 +54,7 @@ If neither **I** nor **II** meets your requirements, [building mmdeploy from sou ## Convert model -You can use [tools/deploy.py](https://github.com/open-mmlab/mmdeploy/blob/dev-1.x/tools/deploy.py) to convert mmseg models to the specified backend models. Its detailed usage can be learned from [here](https://github.com/open-mmlab/mmdeploy/blob/master/docs/en/02-how-to-run/convert_model.md#usage). +You can use [tools/deploy.py](https://github.com/open-mmlab/mmdeploy/blob/dev-1.x/tools/deploy.py) to convert mmseg models to the specified backend models. Its detailed usage can be learned from [here](https://github.com/open-mmlab/mmdeploy/tree/1.x/docs/en/02-how-to-run/convert_model.md#usage). The command below shows an example about converting `unet` model to onnx model that can be inferred by ONNX Runtime. @@ -76,7 +76,7 @@ python tools/deploy.py \ --dump-info ``` -It is crucial to specify the correct deployment config during model conversion. We've already provided builtin deployment config [files](https://github.com/open-mmlab/mmdeploy/tree/dev-1.x/configs/mmseg) of all supported backends for mmsegmentation. The config filename pattern is: +It is crucial to specify the correct deployment config during model conversion. We've already provided builtin deployment config [files](https://github.com/open-mmlab/mmdeploy/tree/1.x/configs/mmseg) of all supported backends for mmsegmentation. The config filename pattern is: ``` segmentation_{backend}-{precision}_{static | dynamic}_{shape}.py @@ -87,7 +87,7 @@ segmentation_{backend}-{precision}_{static | dynamic}_{shape}.py - **{static | dynamic}:** static shape or dynamic shape - **{shape}:** input shape or shape range of a model -Therefore, in the above example, you can also convert `unet` to other backend models by changing the deployment config file `segmentation_onnxruntime_dynamic.py` to [others](https://github.com/open-mmlab/mmdeploy/tree/dev-1.x/configs/mmseg), e.g., converting to tensorrt-fp16 model by `segmentation_tensorrt-fp16_dynamic-512x1024-2048x2048.py`. +Therefore, in the above example, you can also convert `unet` to other backend models by changing the deployment config file `segmentation_onnxruntime_dynamic.py` to [others](https://github.com/open-mmlab/mmdeploy/tree/1.x/configs/mmseg), e.g., converting to tensorrt-fp16 model by `segmentation_tensorrt-fp16_dynamic-512x1024-2048x2048.py`. ```{tip} When converting mmseg models to tensorrt models, --device should be set to "cuda" @@ -184,7 +184,7 @@ img = img.astype(np.uint8) cv2.imwrite('output_segmentation.png', img) ``` -Besides python API, mmdeploy SDK also provides other FFI (Foreign Function Interface), such as C, C++, C#, Java and so on. You can learn their usage from [demos](https://github.com/open-mmlab/mmdeploy/tree/dev-1.x/demo). +Besides python API, mmdeploy SDK also provides other FFI (Foreign Function Interface), such as C, C++, C#, Java and so on. You can learn their usage from [demos](https://github.com/open-mmlab/mmdeploy/tree/1.x/demo). ## Supported models diff --git a/docs/en/05-supported-backends/openvino.md b/docs/en/05-supported-backends/openvino.md index 44034b4e8a..e034cdf5ea 100644 --- a/docs/en/05-supported-backends/openvino.md +++ b/docs/en/05-supported-backends/openvino.md @@ -17,7 +17,7 @@ pip install openvino-dev 3. Install MMDeploy following the [instructions](../01-how-to-build/build_from_source.md). -To work with models from [MMDetection](https://github.com/open-mmlab/mmdetection/blob/master/docs/get_started.md), you may need to install it additionally. +To work with models from [MMDetection](https://mmdetection.readthedocs.io/en/3.x/get_started.html), you may need to install it additionally. ## Usage diff --git a/docs/en/05-supported-backends/rknn.md b/docs/en/05-supported-backends/rknn.md index cc4b8cbe94..28cb10e948 100644 --- a/docs/en/05-supported-backends/rknn.md +++ b/docs/en/05-supported-backends/rknn.md @@ -2,7 +2,7 @@ Currently, MMDeploy only tests rk3588 and rv1126 with linux platform. -The following features cannot be automatically enabled by mmdeploy and you need to manually modify the configuration in MMDeploy like [here](https://github.com/open-mmlab/mmdeploy/blob/master/configs/_base_/backends/rknn.py). +The following features cannot be automatically enabled by mmdeploy and you need to manually modify the configuration in MMDeploy like [here](https://github.com/open-mmlab/mmdeploy/tree/1.x/configs/_base_/backends/rknn.py). - target_platform other than default - quantization settings diff --git a/docs/en/get_started.md b/docs/en/get_started.md index 71059f2923..7698e84594 100644 --- a/docs/en/get_started.md +++ b/docs/en/get_started.md @@ -269,7 +269,7 @@ for index, bbox, label_id in zip(indices, bboxes, labels): cv2.imwrite('output_detection.png', img) ``` -You can find more examples from [here](https://github.com/open-mmlab/mmdeploy/tree/master/demo/python). +You can find more examples from [here](https://github.com/open-mmlab/mmdeploy/tree/1.x/demo/python). #### C++ API @@ -321,9 +321,9 @@ find_package(MMDeploy REQUIRED) target_link_libraries(${name} PRIVATE mmdeploy ${OpenCV_LIBS}) ``` -For more SDK C++ API usages, please read these [samples](https://github.com/open-mmlab/mmdeploy/tree/master/demo/csrc/cpp). +For more SDK C++ API usages, please read these [samples](https://github.com/open-mmlab/mmdeploy/tree/1.x/demo/csrc/cpp). -For the rest C, C# and Java API usages, please read [C demos](https://github.com/open-mmlab/mmdeploy/tree/master/demo/csrc/c), [C# demos](https://github.com/open-mmlab/mmdeploy/tree/master/demo/csharp) and [Java demos](https://github.com/open-mmlab/mmdeploy/tree/master/demo/java) respectively. +For the rest C, C# and Java API usages, please read [C demos](https://github.com/open-mmlab/mmdeploy/tree/1.x/demo/csrc/c), [C# demos](https://github.com/open-mmlab/mmdeploy/tree/1.x/demo/csharp) and [Java demos](https://github.com/open-mmlab/mmdeploy/tree/1.x/demo/java) respectively. We'll talk about them more in our next release. #### Accelerate preprocessing(Experimental) diff --git a/docs/zh_cn/01-how-to-build/rockchip.md b/docs/zh_cn/01-how-to-build/rockchip.md index 111745c31a..55651ffc31 100644 --- a/docs/zh_cn/01-how-to-build/rockchip.md +++ b/docs/zh_cn/01-how-to-build/rockchip.md @@ -102,7 +102,7 @@ python tools/deploy.py \ - YOLOV3 & YOLOX -将下面的模型拆分配置写入到 [detection_rknn_static.py](https://github.com/open-mmlab/mmdeploy/blob/master/configs/mmdet/detection/detection_rknn_static.py) +将下面的模型拆分配置写入到 [detection_rknn_static.py](https://github.com/open-mmlab/mmdeploy/blob/1.x/configs/mmdet/detection/detection_rknn_static-320x320.py) ```python # yolov3, yolox @@ -132,7 +132,7 @@ python tools/deploy.py \ - RetinaNet & SSD & FSAF with rknn-toolkit2 -将下面的模型拆分配置写入到 [detection_rknn_static.py](https://github.com/open-mmlab/mmdeploy/blob/master/configs/mmdet/detection/detection_rknn_static.py)。使用 rknn-toolkit 的用户则不用。 +将下面的模型拆分配置写入到 [detection_rknn_static.py](https://github.com/open-mmlab/mmdeploy/blob/1.x/configs/mmdet/detection/detection_rknn_static-320x320.py)。使用 rknn-toolkit 的用户则不用。 ```python # retinanet, ssd diff --git a/docs/zh_cn/03-benchmark/supported_models.md b/docs/zh_cn/03-benchmark/supported_models.md index 69ed2cdb6e..bcbfaf2f6a 100644 --- a/docs/zh_cn/03-benchmark/supported_models.md +++ b/docs/zh_cn/03-benchmark/supported_models.md @@ -61,8 +61,8 @@ | [Segmenter](https://github.com/open-mmlab/mmsegmentation/tree/1.x/configs/segmenter) [\*static](#note) | MMSegmentation | Y | Y | Y | Y | N | Y | N | N | | [SRCNN](https://github.com/open-mmlab/mmediting/tree/1.x/configs/srcnn) | MMEditing | Y | Y | Y | Y | Y | Y | N | N | | [ESRGAN](https://github.com/open-mmlab/mmediting/tree/1.x/configs/esrgan) | MMEditing | Y | Y | Y | Y | Y | Y | N | N | -| [SRGAN](https://github.com/open-mmlab/mmediting/tree/1.x/configs/srresnet_srgan) | MMEditing | Y | Y | Y | Y | Y | Y | N | N | -| [SRResNet](https://github.com/open-mmlab/mmediting/tree/1.x/configs/srresnet_srgan) | MMEditing | Y | Y | Y | Y | Y | Y | N | N | +| [SRGAN](https://github.com/open-mmlab/mmediting/tree/1.x/configs/srgan_resnet) | MMEditing | Y | Y | Y | Y | Y | Y | N | N | +| [SRResNet](https://github.com/open-mmlab/mmediting/tree/1.x/configs/srgan_resnet) | MMEditing | Y | Y | Y | Y | Y | Y | N | N | | [Real-ESRGAN](https://github.com/open-mmlab/mmediting/tree/1.x/configs/real_esrgan) | MMEditing | Y | Y | Y | Y | Y | Y | N | N | | [EDSR](https://github.com/open-mmlab/mmediting/tree/1.x/configs/edsr) | MMEditing | Y | Y | Y | Y | N | Y | N | N | | [RDN](https://github.com/open-mmlab/mmediting/tree/1.x/configs/rdn) | MMEditing | Y | Y | Y | Y | Y | Y | N | N | diff --git a/docs/zh_cn/04-supported-codebases/mmaction2.md b/docs/zh_cn/04-supported-codebases/mmaction2.md index c2f23626db..ff1901bb7d 100644 --- a/docs/zh_cn/04-supported-codebases/mmaction2.md +++ b/docs/zh_cn/04-supported-codebases/mmaction2.md @@ -51,9 +51,9 @@ export LD_LIBRARY_PATH=$(pwd)/../mmdeploy-dep/onnxruntime-linux-x64-1.8.1/lib/:$ ## 模型转换 你可以使用 [tools/deploy.py](https://github.com/open-mmlab/mmdeploy/blob/dev-1.x/tools/deploy.py) 把 mmaction2 模型一键式转换为推理后端模型。 -该工具的详细使用说明请参考[这里](https://github.com/open-mmlab/mmdeploy/blob/master/docs/en/02-how-to-run/convert_model.md#usage). +该工具的详细使用说明请参考[这里](https://github.com/open-mmlab/mmdeploy/tree/1.x/docs/en/02-how-to-run/convert_model.md#usage). -转换的关键之一是使用正确的配置文件。项目中已内置了各后端部署[配置文件](https://github.com/open-mmlab/mmdeploy/tree/dev-1.x/configs/mmaction)。 +转换的关键之一是使用正确的配置文件。项目中已内置了各后端部署[配置文件](https://github.com/open-mmlab/mmdeploy/tree/1.x/configs/mmaction)。 文件的命名模式是: ``` @@ -181,7 +181,7 @@ for label_id, score in result: ``` 除了python API,mmdeploy SDK 还提供了诸如 C、C++、C#、Java等多语言接口。 -你可以参考[样例](https://github.com/open-mmlab/mmdeploy/tree/dev-1.x/demo)学习其他语言接口的使用方法。 +你可以参考[样例](https://github.com/open-mmlab/mmdeploy/tree/1.x/demo)学习其他语言接口的使用方法。 > mmaction2 的 C#,Java接口待开发 diff --git a/docs/zh_cn/04-supported-codebases/mmcls.md b/docs/zh_cn/04-supported-codebases/mmcls.md index e498749e88..e59b6ed169 100644 --- a/docs/zh_cn/04-supported-codebases/mmcls.md +++ b/docs/zh_cn/04-supported-codebases/mmcls.md @@ -71,7 +71,7 @@ python tools/deploy.py \ --dump-info ``` -转换的关键之一是使用正确的配置文件。项目中已内置了各后端部署[配置文件](https://github.com/open-mmlab/mmdeploy/tree/dev-1.x/configs/mmcls)。 +转换的关键之一是使用正确的配置文件。项目中已内置了各后端部署[配置文件](https://github.com/open-mmlab/mmdeploy/tree/1.x/configs/mmcls)。 文件的命名模式是: ``` @@ -173,7 +173,7 @@ for label_id, score in result: ``` 除了python API,mmdeploy SDK 还提供了诸如 C、C++、C#、Java等多语言接口。 -你可以参考[样例](https://github.com/open-mmlab/mmdeploy/tree/dev-1.x/demo)学习其他语言接口的使用方法。 +你可以参考[样例](https://github.com/open-mmlab/mmdeploy/tree/1.x/demo)学习其他语言接口的使用方法。 ## 模型支持列表 diff --git a/docs/zh_cn/04-supported-codebases/mmdet.md b/docs/zh_cn/04-supported-codebases/mmdet.md index f6c43e43ac..600e0f2749 100644 --- a/docs/zh_cn/04-supported-codebases/mmdet.md +++ b/docs/zh_cn/04-supported-codebases/mmdet.md @@ -49,7 +49,7 @@ export LD_LIBRARY_PATH=$(pwd)/../mmdeploy-dep/onnxruntime-linux-x64-1.8.1/lib/:$ ## 模型转换 你可以使用 [tools/deploy.py](https://github.com/open-mmlab/mmdeploy/blob/1.x/tools/deploy.py) 把 mmdet 模型一键式转换为推理后端模型。 -该工具的详细使用说明请参考[这里](https://github.com/open-mmlab/mmdeploy/blob/master/docs/en/02-how-to-run/convert_model.md#usage). +该工具的详细使用说明请参考[这里](https://github.com/open-mmlab/mmdeploy/tree/1.x/docs/en/02-how-to-run/convert_model.md#usage). 以下,我们将演示如何把 `Faster R-CNN` 转换为 onnx 模型。 @@ -188,7 +188,7 @@ cv2.imwrite('output_detection.png', img) ``` 除了python API,mmdeploy SDK 还提供了诸如 C、C++、C#、Java等多语言接口。 -你可以参考[样例](https://github.com/open-mmlab/mmdeploy/tree/dev-1.x/demo)学习其他语言接口的使用方法。 +你可以参考[样例](https://github.com/open-mmlab/mmdeploy/tree/1.x/demo)学习其他语言接口的使用方法。 ## 模型支持列表 diff --git a/docs/zh_cn/04-supported-codebases/mmedit.md b/docs/zh_cn/04-supported-codebases/mmedit.md index b690adde94..68060e996a 100644 --- a/docs/zh_cn/04-supported-codebases/mmedit.md +++ b/docs/zh_cn/04-supported-codebases/mmedit.md @@ -52,7 +52,7 @@ export LD_LIBRARY_PATH=$(pwd)/../mmdeploy-dep/onnxruntime-linux-x64-1.8.1/lib/:$ 你可以使用 [tools/deploy.py](https://github.com/open-mmlab/mmdeploy/blob/dev-1.x/tools/deploy.py) 把 mmedit 模型一键式转换为推理后端模型。 该工具的详细使用说明请参考[这里](https://github.com/open-mmlab/mmdeploy/blob/dev-1.x/docs/zh_cn/02-how-to-run/convert_model.md#使用方法). -转换的关键之一是使用正确的配置文件。项目中已内置了各后端部署[配置文件](https://github.com/open-mmlab/mmdeploy/tree/dev-1.x/configs/mmedit)。 +转换的关键之一是使用正确的配置文件。项目中已内置了各后端部署[配置文件](https://github.com/open-mmlab/mmdeploy/tree/1.x/configs/mmedit)。 文件的命名模式是: ``` @@ -185,7 +185,7 @@ cv2.imwrite('output_restorer.bmp', result) ``` 除了python API,mmdeploy SDK 还提供了诸如 C、C++、C#、Java等多语言接口。 -你可以参考[样例](https://github.com/open-mmlab/mmdeploy/tree/dev-1.x/demo)学习其他语言接口的使用方法。 +你可以参考[样例](https://github.com/open-mmlab/mmdeploy/tree/1.x/demo)学习其他语言接口的使用方法。 ## 模型支持列表 @@ -194,8 +194,8 @@ cv2.imwrite('output_restorer.bmp', result) | [SRCNN](https://github.com/open-mmlab/mmediting/tree/1.x/configs/srcnn) | super-resolution | Y | Y | Y | Y | Y | | [ESRGAN](https://github.com/open-mmlab/mmediting/tree/1.x/configs/esrgan) | super-resolution | Y | Y | Y | Y | Y | | [ESRGAN-PSNR](https://github.com/open-mmlab/mmediting/tree/1.x/configs/esrgan) | super-resolution | Y | Y | Y | Y | Y | -| [SRGAN](https://github.com/open-mmlab/mmediting/tree/1.x/configs/srresnet_srgan) | super-resolution | Y | Y | Y | Y | Y | -| [SRResNet](https://github.com/open-mmlab/mmediting/tree/1.x/configs/srresnet_srgan) | super-resolution | Y | Y | Y | Y | Y | +| [SRGAN](https://github.com/open-mmlab/mmediting/tree/1.x/configs/srgan_resnet) | super-resolution | Y | Y | Y | Y | Y | +| [SRResNet](https://github.com/open-mmlab/mmediting/tree/1.x/configs/srgan_resnet) | super-resolution | Y | Y | Y | Y | Y | | [Real-ESRGAN](https://github.com/open-mmlab/mmediting/tree/1.x/configs/real_esrgan) | super-resolution | Y | Y | Y | Y | Y | | [EDSR](https://github.com/open-mmlab/mmediting/tree/1.x/configs/edsr) | super-resolution | Y | Y | Y | N | Y | | [RDN](https://github.com/open-mmlab/mmediting/tree/1.x/configs/rdn) | super-resolution | Y | Y | Y | Y | Y | diff --git a/docs/zh_cn/04-supported-codebases/mmocr.md b/docs/zh_cn/04-supported-codebases/mmocr.md index 65a082ade2..be68e70f62 100644 --- a/docs/zh_cn/04-supported-codebases/mmocr.md +++ b/docs/zh_cn/04-supported-codebases/mmocr.md @@ -52,9 +52,9 @@ export LD_LIBRARY_PATH=$(pwd)/../mmdeploy-dep/onnxruntime-linux-x64-1.8.1/lib/:$ ## 模型转换 你可以使用 [tools/deploy.py](https://github.com/open-mmlab/mmdeploy/blob/dev-1.x/tools/deploy.py) 把 mmocr 模型一键式转换为推理后端模型。 -该工具的详细使用说明请参考[这里](https://github.com/open-mmlab/mmdeploy/blob/master/docs/en/02-how-to-run/convert_model.md#usage). +该工具的详细使用说明请参考[这里](https://github.com/open-mmlab/mmdeploy/tree/1.x/docs/en/02-how-to-run/convert_model.md#usage). -转换的关键之一是使用正确的配置文件。项目中已内置了各后端部署[配置文件](https://github.com/open-mmlab/mmdeploy/tree/dev-1.x/configs/mmocr)。 +转换的关键之一是使用正确的配置文件。项目中已内置了各后端部署[配置文件](https://github.com/open-mmlab/mmdeploy/tree/1.x/configs/mmocr)。 文件的命名模式是: ``` @@ -232,7 +232,7 @@ print(texts) ``` 除了python API,mmdeploy SDK 还提供了诸如 C、C++、C#、Java等多语言接口。 -你可以参考[样例](https://github.com/open-mmlab/mmdeploy/tree/dev-1.x/demo)学习其他语言接口的使用方法。 +你可以参考[样例](https://github.com/open-mmlab/mmdeploy/tree/1.x/demo)学习其他语言接口的使用方法。 ## 模型支持列表 diff --git a/docs/zh_cn/04-supported-codebases/mmpose.md b/docs/zh_cn/04-supported-codebases/mmpose.md index 97c4933da0..67854a05a0 100644 --- a/docs/zh_cn/04-supported-codebases/mmpose.md +++ b/docs/zh_cn/04-supported-codebases/mmpose.md @@ -49,7 +49,7 @@ export LD_LIBRARY_PATH=$(pwd)/../mmdeploy-dep/onnxruntime-linux-x64-1.8.1/lib/:$ ## 模型转换 你可以使用 [tools/deploy.py](https://github.com/open-mmlab/mmdeploy/blob/dev-1.x/tools/deploy.py) 把 mmpose 模型一键式转换为推理后端模型。 -该工具的详细使用说明请参考[这里](https://github.com/open-mmlab/mmdeploy/blob/master/docs/en/02-how-to-run/convert_model.md#usage). +该工具的详细使用说明请参考[这里](https://github.com/open-mmlab/mmdeploy/tree/1.x/docs/en/02-how-to-run/convert_model.md#usage). 以下,我们将演示如何把 `hrnet` 转换为 onnx 模型。 @@ -68,7 +68,7 @@ python tools/deploy.py \ --show ``` -转换的关键之一是使用正确的配置文件。项目中已内置了各后端部署[配置文件](https://github.com/open-mmlab/mmdeploy/tree/dev-1.x/configs/mmpose)。 +转换的关键之一是使用正确的配置文件。项目中已内置了各后端部署[配置文件](https://github.com/open-mmlab/mmdeploy/tree/1.x/configs/mmpose)。 文件的命名模式是: ``` diff --git a/docs/zh_cn/04-supported-codebases/mmseg.md b/docs/zh_cn/04-supported-codebases/mmseg.md index c96ea6d56c..d27bc6935e 100644 --- a/docs/zh_cn/04-supported-codebases/mmseg.md +++ b/docs/zh_cn/04-supported-codebases/mmseg.md @@ -54,7 +54,7 @@ export LD_LIBRARY_PATH=$(pwd)/../mmdeploy-dep/onnxruntime-linux-x64-1.8.1/lib/:$ ## 模型转换 你可以使用 [tools/deploy.py](https://github.com/open-mmlab/mmdeploy/blob/dev-1.x/tools/deploy.py) 把 mmseg 模型一键式转换为推理后端模型。 -该工具的详细使用说明请参考[这里](https://github.com/open-mmlab/mmdeploy/blob/master/docs/en/02-how-to-run/convert_model.md#usage). +该工具的详细使用说明请参考[这里](https://github.com/open-mmlab/mmdeploy/tree/1.x/docs/en/02-how-to-run/convert_model.md#usage). 以下,我们将演示如何把 `unet` 转换为 onnx 模型。 @@ -76,7 +76,7 @@ python tools/deploy.py \ --dump-info ``` -转换的关键之一是使用正确的配置文件。项目中已内置了各后端部署[配置文件](https://github.com/open-mmlab/mmdeploy/tree/dev-1.x/configs/mmseg)。 +转换的关键之一是使用正确的配置文件。项目中已内置了各后端部署[配置文件](https://github.com/open-mmlab/mmdeploy/tree/1.x/configs/mmseg)。 文件的命名模式是: ``` @@ -188,7 +188,7 @@ cv2.imwrite('output_segmentation.png', img) ``` 除了python API,mmdeploy SDK 还提供了诸如 C、C++、C#、Java等多语言接口。 -你可以参考[样例](https://github.com/open-mmlab/mmdeploy/tree/dev-1.x/demo)学习其他语言接口的使用方法。 +你可以参考[样例](https://github.com/open-mmlab/mmdeploy/tree/1.x/demo)学习其他语言接口的使用方法。 ## 模型支持列表 diff --git a/docs/zh_cn/05-supported-backends/rknn.md b/docs/zh_cn/05-supported-backends/rknn.md index 66f6469062..165029e5b9 100644 --- a/docs/zh_cn/05-supported-backends/rknn.md +++ b/docs/zh_cn/05-supported-backends/rknn.md @@ -2,7 +2,7 @@ 目前, MMDeploy 只在 rk3588 和 rv1126 的 linux 平台上测试过. -以下特性需要手动在 MMDeploy 自行配置,如[这里](https://github.com/open-mmlab/mmdeploy/blob/master/configs/_base_/backends/rknn.py). +以下特性需要手动在 MMDeploy 自行配置,如[这里](https://github.com/open-mmlab/mmdeploy/tree/1.x/configs/_base_/backends/rknn.py). - target_platform != default - quantization settings diff --git a/docs/zh_cn/get_started.md b/docs/zh_cn/get_started.md index 07f18bceeb..1e729d8995 100644 --- a/docs/zh_cn/get_started.md +++ b/docs/zh_cn/get_started.md @@ -268,7 +268,7 @@ for index, bbox, label_id in zip(indices, bboxes, labels): cv2.imwrite('output_detection.png', img) ``` -更多示例,请查阅[这里](https://github.com/open-mmlab/mmdeploy/tree/master/demo/python)。 +更多示例,请查阅[这里](https://github.com/open-mmlab/mmdeploy/tree/1.x/demo/python)。 #### C++ API @@ -322,9 +322,9 @@ target_link_libraries(${name} PRIVATE mmdeploy ${OpenCV_LIBS}) ``` 编译时,使用 -DMMDeploy_DIR,传入MMDeloyConfig.cmake所在的路径。它在预编译包中的sdk/lib/cmake/MMDeloy下。 -更多示例,请查阅[此处](https://github.com/open-mmlab/mmdeploy/tree/master/demo/csrc/cpp)。 +更多示例,请查阅[此处](https://github.com/open-mmlab/mmdeploy/tree/1.x/demo/csrc/cpp)。 -对于 C API、C# API、Java API 的使用方法,请分别阅读代码[C demos](https://github.com/open-mmlab/mmdeploy/tree/master/demo/csrc/c), [C# demos](https://github.com/open-mmlab/mmdeploy/tree/master/demo/csharp) 和 [Java demos](https://github.com/open-mmlab/mmdeploy/tree/master/demo/java)。 +对于 C API、C# API、Java API 的使用方法,请分别阅读代码[C demos](https://github.com/open-mmlab/mmdeploy/tree/1.x/demo/csrc/c), [C# demos](https://github.com/open-mmlab/mmdeploy/tree/1.x/demo/csharp) 和 [Java demos](https://github.com/open-mmlab/mmdeploy/tree/1.x/demo/java)。 我们将在后续版本中详细讲述它们的用法。 #### 加速预处理(实验性功能) diff --git a/docs/zh_cn/tutorial/02_challenges.md b/docs/zh_cn/tutorial/02_challenges.md index b52151bd1d..c7d4ebb7a4 100644 --- a/docs/zh_cn/tutorial/02_challenges.md +++ b/docs/zh_cn/tutorial/02_challenges.md @@ -1,6 +1,6 @@ # 第二章:解决模型部署中的难题 -在[第一章](https://mmdeploy.readthedocs.io/zh_CN/latest/tutorials/chapter_01_introduction_to_model_deployment.html)中,我们部署了一个简单的超分辨率模型,一切都十分顺利。但是,上一个模型还有一些缺陷——图片的放大倍数固定是 4,我们无法让图片放大任意的倍数。现在,我们来尝试部署一个支持动态放大倍数的模型,体验一下在模型部署中可能会碰到的困难。 +在[第一章](https://mmdeploy.readthedocs.io/zh_CN/dev-1.x/tutorial/01_introduction_to_model_deployment.html)中,我们部署了一个简单的超分辨率模型,一切都十分顺利。但是,上一个模型还有一些缺陷——图片的放大倍数固定是 4,我们无法让图片放大任意的倍数。现在,我们来尝试部署一个支持动态放大倍数的模型,体验一下在模型部署中可能会碰到的困难。 ## 模型部署中常见的难题 @@ -10,7 +10,7 @@ - 新算子的实现。深度学习技术日新月异,提出新算子的速度往往快于 ONNX 维护者支持的速度。为了部署最新的模型,部署工程师往往需要自己在 ONNX 和推理引擎中支持新算子。 - 中间表示与推理引擎的兼容问题。由于各推理引擎的实现不同,对 ONNX 难以形成统一的支持。为了确保模型在不同的推理引擎中有同样的运行效果,部署工程师往往得为某个推理引擎定制模型代码,这为模型部署引入了许多工作量。 -我们会在后续教程详细讲述解决这些问题的方法。如果对前文中 ONNX、推理引擎、中间表示、算子等名词感觉陌生,不用担心,可以阅读[第一章](https://mmdeploy.readthedocs.io/zh_CN/latest/tutorials/chapter_01_introduction_to_model_deployment.html),了解有关概念。 +我们会在后续教程详细讲述解决这些问题的方法。如果对前文中 ONNX、推理引擎、中间表示、算子等名词感觉陌生,不用担心,可以阅读[第一章](https://mmdeploy.readthedocs.io/zh_CN/dev-1.x/tutorial/01_introduction_to_model_deployment.html),了解有关概念。 现在,让我们对原来的 SRCNN 模型做一些小的修改,体验一下模型动态化对模型部署造成的困难,并学习解决该问题的一种方法。 @@ -38,7 +38,7 @@ def init_torch_model(): 现在,假设我们要做一个超分辨率的应用。我们的用户希望图片的放大倍数能够自由设置。而我们交给用户的,只有一个 .onnx 文件和运行超分辨率模型的应用程序。我们在不修改 .onnx 文件的前提下改变放大倍数。 -因此,我们必须修改原来的模型,令模型的放大倍数变成推理时的输入。在[第一章](https://mmdeploy.readthedocs.io/zh_CN/latest/tutorials/chapter_01_introduction_to_model_deployment.html)中的 Python 脚本的基础上,我们做一些修改,得到这样的脚本: +因此,我们必须修改原来的模型,令模型的放大倍数变成推理时的输入。在[第一章](https://mmdeploy.readthedocs.io/zh_CN/dev-1.x/tutorial/01_introduction_to_model_deployment.html)中的 Python 脚本的基础上,我们做一些修改,得到这样的脚本: ```python import torch @@ -75,7 +75,7 @@ def init_torch_model(): torch_model = SuperResolutionNet() # Please read the code about downloading 'srcnn.pth' and 'face.png' in - # https://mmdeploy.readthedocs.io/zh_CN/latest/tutorials/chapter_01_introduction_to_model_deployment.html#pytorch + # https://mmdeploy.readthedocs.io/zh_CN/dev-1.x/tutorial/01_introduction_to_model_deployment.html#pytorch state_dict = torch.load('srcnn.pth')['state_dict'] # Adapt the checkpoint diff --git a/docs/zh_cn/tutorial/06_introduction_to_tensorrt.md b/docs/zh_cn/tutorial/06_introduction_to_tensorrt.md index 2f330e525f..dca251f127 100644 --- a/docs/zh_cn/tutorial/06_introduction_to_tensorrt.md +++ b/docs/zh_cn/tutorial/06_introduction_to_tensorrt.md @@ -44,7 +44,7 @@ python -c "import tensorrt;print(tensorrt.__version__)" ### Jetson -对于 Jetson 平台,我们有非常详细的安装环境配置教程,可参考 [MMDeploy 安装文档](https://github.com/open-mmlab/mmdeploy/blob/master/docs/zh_cn/01-how-to-build/jetsons.md)。需要注意的是,在 Jetson 上配置的 CUDA 版本 TensorRT 版本与 JetPack 强相关的,我们选择适配硬件的版本即可。配置好环境后,通过 `python -c "import tensorrt;print(tensorrt.__version__)"` 查看TensorRT版本是否正确。 +对于 Jetson 平台,我们有非常详细的安装环境配置教程,可参考 [MMDeploy 安装文档](https://github.com/open-mmlab/mmdeploy/tree/1.x/docs/zh_cn/01-how-to-build/jetsons.md)。需要注意的是,在 Jetson 上配置的 CUDA 版本 TensorRT 版本与 JetPack 强相关的,我们选择适配硬件的版本即可。配置好环境后,通过 `python -c "import tensorrt;print(tensorrt.__version__)"` 查看TensorRT版本是否正确。 ## 模型构建 From 6e00f4e6ebb310a91982f62f9152e677f5eae25b Mon Sep 17 00:00:00 2001 From: RunningLeon Date: Thu, 1 Dec 2022 18:33:47 +0800 Subject: [PATCH 5/6] change dev-1.x to 1.x --- README.md | 6 +++--- README_zh-CN.md | 8 ++++---- docker/CPU/Dockerfile | 4 ++-- docker/GPU/Dockerfile | 4 ++-- docs/en/01-how-to-build/android.md | 2 +- docs/en/01-how-to-build/build_from_source.md | 2 +- docs/en/01-how-to-build/jetsons.md | 2 +- docs/en/02-how-to-run/prebuilt_package_windows.md | 2 +- docs/en/04-supported-codebases/mmaction2.md | 4 ++-- docs/en/04-supported-codebases/mmcls.md | 6 +++--- docs/en/04-supported-codebases/mmdet.md | 6 +++--- docs/en/04-supported-codebases/mmedit.md | 6 +++--- docs/en/04-supported-codebases/mmocr.md | 6 +++--- docs/en/04-supported-codebases/mmpose.md | 6 +++--- docs/en/04-supported-codebases/mmseg.md | 6 +++--- docs/en/conf.py | 2 +- docs/en/switch_language.md | 4 ++-- docs/zh_cn/01-how-to-build/android.md | 2 +- docs/zh_cn/01-how-to-build/build_from_source.md | 2 +- docs/zh_cn/01-how-to-build/jetsons.md | 2 +- docs/zh_cn/02-how-to-run/prebuilt_package_windows.md | 2 +- docs/zh_cn/04-supported-codebases/mmaction2.md | 4 ++-- docs/zh_cn/04-supported-codebases/mmcls.md | 8 ++++---- docs/zh_cn/04-supported-codebases/mmedit.md | 8 ++++---- docs/zh_cn/04-supported-codebases/mmocr.md | 8 ++++---- docs/zh_cn/04-supported-codebases/mmpose.md | 6 +++--- docs/zh_cn/04-supported-codebases/mmseg.md | 6 +++--- docs/zh_cn/conf.py | 2 +- docs/zh_cn/switch_language.md | 4 ++-- docs/zh_cn/tutorial/02_challenges.md | 8 ++++---- mmdeploy/apis/onnx/passes/optimize_onnx.py | 2 +- 31 files changed, 70 insertions(+), 70 deletions(-) diff --git a/README.md b/README.md index bf968502a0..70c0827bdd 100644 --- a/README.md +++ b/README.md @@ -19,9 +19,9 @@
 
-[![docs](https://img.shields.io/badge/docs-latest-blue)](https://mmdeploy.readthedocs.io/en/latest/) +[![docs](https://img.shields.io/badge/docs-latest-blue)](https://mmdeploy.readthedocs.io/en/1.x/) [![badge](https://github.com/open-mmlab/mmdeploy/workflows/build/badge.svg)](https://github.com/open-mmlab/mmdeploy/actions) -[![codecov](https://codecov.io/gh/open-mmlab/mmdeploy/branch/master/graph/badge.svg)](https://codecov.io/gh/open-mmlab/mmdeploy) +[![codecov](https://codecov.io/gh/open-mmlab/mmdeploy/branch/1.x/graph/badge.svg)](https://codecov.io/gh/open-mmlab/mmdeploy) [![license](https://img.shields.io/github/license/open-mmlab/mmdeploy.svg)](https://github.com/open-mmlab/mmdeploy/tree/1.x/LICENSE) [![issue resolution](https://img.shields.io/github/issues-closed-raw/open-mmlab/mmdeploy)](https://github.com/open-mmlab/mmdeploy/issues) [![open issues](https://img.shields.io/github/issues-raw/open-mmlab/mmdeploy)](https://github.com/open-mmlab/mmdeploy/issues) @@ -75,7 +75,7 @@ The benchmark can be found from [here](docs/en/03-benchmark/benchmark.md) All kinds of modules in the SDK can be extended, such as `Transform` for image processing, `Net` for Neural Network inference, `Module` for postprocessing and so on -## [Documentation](https://mmdeploy.readthedocs.io/en/latest/) +## [Documentation](https://mmdeploy.readthedocs.io/en/1.x/) Please read [getting_started](docs/en/get_started.md) for the basic usage of MMDeploy. We also provide tutoials about: diff --git a/README_zh-CN.md b/README_zh-CN.md index aa6f6f2c4e..f81539d1eb 100644 --- a/README_zh-CN.md +++ b/README_zh-CN.md @@ -19,9 +19,9 @@
 
-[![docs](https://img.shields.io/badge/docs-latest-blue)](https://mmdeploy.readthedocs.io/zh_CN/latest/) +[![docs](https://img.shields.io/badge/docs-latest-blue)](https://mmdeploy.readthedocs.io/zh_CN/1.x/) [![badge](https://github.com/open-mmlab/mmdeploy/workflows/build/badge.svg)](https://github.com/open-mmlab/mmdeploy/actions) -[![codecov](https://codecov.io/gh/open-mmlab/mmdeploy/branch/master/graph/badge.svg)](https://codecov.io/gh/open-mmlab/mmdeploy) +[![codecov](https://codecov.io/gh/open-mmlab/mmdeploy/branch/1.x/graph/badge.svg)](https://codecov.io/gh/open-mmlab/mmdeploy) [![license](https://img.shields.io/github/license/open-mmlab/mmdeploy.svg)](https://github.com/open-mmlab/mmdeploy/tree/1.x/LICENSE) [![issue resolution](https://img.shields.io/github/issues-closed-raw/open-mmlab/mmdeploy)](https://github.com/open-mmlab/mmdeploy/issues) [![open issues](https://img.shields.io/github/issues-raw/open-mmlab/mmdeploy)](https://github.com/open-mmlab/mmdeploy/issues) @@ -75,7 +75,7 @@ MMDeploy 是 [OpenMMLab](https://openmmlab.com/) 模型部署工具箱,**为 - Net 推理 - Module 后处理 -## [中文文档](https://mmdeploy.readthedocs.io/zh_CN/latest/) +## [中文文档](https://mmdeploy.readthedocs.io/zh_CN/1.x/) - [快速上手](docs/zh_cn/get_started.md) - [编译](docs/zh_cn/01-how-to-build/build_from_source.md) @@ -119,7 +119,7 @@ MMDeploy 是 [OpenMMLab](https://openmmlab.com/) 模型部署工具箱,**为 ## 基准与模型库 -基准和支持的模型列表可以在[基准](https://mmdeploy.readthedocs.io/zh_CN/dev-1.x/03-benchmark/benchmark.html)和[模型列表](https://mmdeploy.readthedocs.io/en/dev-1.x/03-benchmark/supported_models.html)中获得。 +基准和支持的模型列表可以在[基准](https://mmdeploy.readthedocs.io/zh_CN/1.x/03-benchmark/benchmark.html)和[模型列表](https://mmdeploy.readthedocs.io/en/1.x/03-benchmark/supported_models.html)中获得。 ## 贡献指南 diff --git a/docker/CPU/Dockerfile b/docker/CPU/Dockerfile index 02be005705..fe884f52ff 100644 --- a/docker/CPU/Dockerfile +++ b/docker/CPU/Dockerfile @@ -76,9 +76,9 @@ ENV PATH="/root/workspace/ncnn/build/tools/quantize/:${PATH}" ### install mmdeploy WORKDIR /root/workspace ARG VERSION -RUN git clone -b dev-1.x https://github.com/open-mmlab/mmdeploy.git &&\ +RUN git clone -b 1.x https://github.com/open-mmlab/mmdeploy.git &&\ cd mmdeploy &&\ - if [ -z ${VERSION} ] ; then echo "No MMDeploy version passed in, building on dev-1.x" ; else git checkout tags/v${VERSION} -b tag_v${VERSION} ; fi &&\ + if [ -z ${VERSION} ] ; then echo "No MMDeploy version passed in, building on 1.x" ; else git checkout tags/v${VERSION} -b tag_v${VERSION} ; fi &&\ git submodule update --init --recursive &&\ rm -rf build &&\ mkdir build &&\ diff --git a/docker/GPU/Dockerfile b/docker/GPU/Dockerfile index a8d2509b4f..1dbc7dce3c 100644 --- a/docker/GPU/Dockerfile +++ b/docker/GPU/Dockerfile @@ -57,9 +57,9 @@ RUN cp -r /usr/local/lib/python${PYTHON_VERSION}/dist-packages/tensorrt* /opt/co ENV ONNXRUNTIME_DIR=/root/workspace/onnxruntime-linux-x64-${ONNXRUNTIME_VERSION} ENV TENSORRT_DIR=/workspace/tensorrt ARG VERSION -RUN git clone -b dev-1.x https://github.com/open-mmlab/mmdeploy &&\ +RUN git clone -b 1.x https://github.com/open-mmlab/mmdeploy &&\ cd mmdeploy &&\ - if [ -z ${VERSION} ] ; then echo "No MMDeploy version passed in, building on dev-1.x" ; else git checkout tags/v${VERSION} -b tag_v${VERSION} ; fi &&\ + if [ -z ${VERSION} ] ; then echo "No MMDeploy version passed in, building on 1.x" ; else git checkout tags/v${VERSION} -b tag_v${VERSION} ; fi &&\ git submodule update --init --recursive &&\ mkdir -p build &&\ cd build &&\ diff --git a/docs/en/01-how-to-build/android.md b/docs/en/01-how-to-build/android.md index 4799bb6385..c67c73f5d7 100644 --- a/docs/en/01-how-to-build/android.md +++ b/docs/en/01-how-to-build/android.md @@ -97,7 +97,7 @@ make -j$(nproc) install OpenJDK It is necessary for building Java API.
- See Java API build for building tutorials. + See Java API build for building tutorials. diff --git a/docs/en/01-how-to-build/build_from_source.md b/docs/en/01-how-to-build/build_from_source.md index 8851995c1a..e3c1d63582 100644 --- a/docs/en/01-how-to-build/build_from_source.md +++ b/docs/en/01-how-to-build/build_from_source.md @@ -26,7 +26,7 @@ Note: - If it fails when `git clone` via `SSH`, you can try the `HTTPS` protocol like this: ```shell - git clone -b dev-1.x https://github.com/open-mmlab/mmdeploy.git --recursive + git clone -b 1.x https://github.com/open-mmlab/mmdeploy.git --recursive ``` ## Build diff --git a/docs/en/01-how-to-build/jetsons.md b/docs/en/01-how-to-build/jetsons.md index a193b05714..593cdba89a 100644 --- a/docs/en/01-how-to-build/jetsons.md +++ b/docs/en/01-how-to-build/jetsons.md @@ -223,7 +223,7 @@ It takes about 15 minutes to install ppl.cv on a Jetson Nano. So, please be pati ## Install MMDeploy ```shell -git clone -b dev-1.x --recursive https://github.com/open-mmlab/mmdeploy.git +git clone -b 1.x --recursive https://github.com/open-mmlab/mmdeploy.git cd mmdeploy export MMDEPLOY_DIR=$(pwd) ``` diff --git a/docs/en/02-how-to-run/prebuilt_package_windows.md b/docs/en/02-how-to-run/prebuilt_package_windows.md index 5bd27f05c4..c5e8cfc573 100644 --- a/docs/en/02-how-to-run/prebuilt_package_windows.md +++ b/docs/en/02-how-to-run/prebuilt_package_windows.md @@ -47,7 +47,7 @@ In order to use the prebuilt package, you need to install some third-party depen 2. Clone the mmdeploy repository ```bash - git clone -b dev-1.x https://github.com/open-mmlab/mmdeploy.git + git clone -b 1.x https://github.com/open-mmlab/mmdeploy.git ``` :point_right: The main purpose here is to use the configs, so there is no need to compile `mmdeploy`. diff --git a/docs/en/04-supported-codebases/mmaction2.md b/docs/en/04-supported-codebases/mmaction2.md index fbd4202772..d30748cf85 100644 --- a/docs/en/04-supported-codebases/mmaction2.md +++ b/docs/en/04-supported-codebases/mmaction2.md @@ -37,7 +37,7 @@ If your target platform is **Ubuntu 18.04 or later version**, we encourage you t [scripts](../01-how-to-build/build_from_script.md). For example, the following commands install mmdeploy as well as inference engine - `ONNX Runtime`. ```shell -git clone --recursive -b dev-1.x https://github.com/open-mmlab/mmdeploy.git +git clone --recursive -b 1.x https://github.com/open-mmlab/mmdeploy.git cd mmdeploy python3 tools/scripts/build_ubuntu_x64_ort.py $(nproc) export PYTHONPATH=$(pwd)/build/lib:$PYTHONPATH @@ -50,7 +50,7 @@ If neither **I** nor **II** meets your requirements, [building mmdeploy from sou ## Convert model -You can use [tools/deploy.py](https://github.com/open-mmlab/mmdeploy/blob/dev-1.x/tools/deploy.py) to convert mmaction2 models to the specified backend models. Its detailed usage can be learned from [here](https://github.com/open-mmlab/mmdeploy/tree/1.x/docs/en/02-how-to-run/convert_model.md#usage). +You can use [tools/deploy.py](https://github.com/open-mmlab/mmdeploy/tree/1.x/tools/deploy.py) to convert mmaction2 models to the specified backend models. Its detailed usage can be learned from [here](https://github.com/open-mmlab/mmdeploy/tree/1.x/docs/en/02-how-to-run/convert_model.md#usage). When using `tools/deploy.py`, it is crucial to specify the correct deployment config. We've already provided builtin deployment config [files](https://github.com/open-mmlab/mmdeploy/tree/1.x/configs/mmaction) of all supported backends for mmaction2, under which the config file path follows the pattern: diff --git a/docs/en/04-supported-codebases/mmcls.md b/docs/en/04-supported-codebases/mmcls.md index cb31337bdc..4b52fb7732 100644 --- a/docs/en/04-supported-codebases/mmcls.md +++ b/docs/en/04-supported-codebases/mmcls.md @@ -27,7 +27,7 @@ There are several methods to install mmdeploy, among which you can choose an app **Method I:** Install precompiled package -> **TODO**. MMDeploy hasn't released based on dev-1.x branch. +> **TODO**. MMDeploy hasn't released based on 1.x branch. **Method II:** Build using scripts @@ -35,7 +35,7 @@ If your target platform is **Ubuntu 18.04 or later version**, we encourage you t [scripts](../01-how-to-build/build_from_script.md). For example, the following commands install mmdeploy as well as inference engine - `ONNX Runtime`. ```shell -git clone --recursive -b dev-1.x https://github.com/open-mmlab/mmdeploy.git +git clone --recursive -b 1.x https://github.com/open-mmlab/mmdeploy.git cd mmdeploy python3 tools/scripts/build_ubuntu_x64_ort.py $(nproc) export PYTHONPATH=$(pwd)/build/lib:$PYTHONPATH @@ -48,7 +48,7 @@ If neither **I** nor **II** meets your requirements, [building mmdeploy from sou ## Convert model -You can use [tools/deploy.py](https://github.com/open-mmlab/mmdeploy/blob/dev-1.x/tools/deploy.py) to convert mmcls models to the specified backend models. Its detailed usage can be learned from [here](https://github.com/open-mmlab/mmdeploy/blob/dev-1.x/docs/en/02-how-to-run/convert_model.md#usage). +You can use [tools/deploy.py](https://github.com/open-mmlab/mmdeploy/tree/1.x/tools/deploy.py) to convert mmcls models to the specified backend models. Its detailed usage can be learned from [here](https://github.com/open-mmlab/mmdeploy/tree/1.x/docs/en/02-how-to-run/convert_model.md#usage). The command below shows an example about converting `resnet18` model to onnx model that can be inferred by ONNX Runtime. diff --git a/docs/en/04-supported-codebases/mmdet.md b/docs/en/04-supported-codebases/mmdet.md index 21aa12dbff..c2356ef899 100644 --- a/docs/en/04-supported-codebases/mmdet.md +++ b/docs/en/04-supported-codebases/mmdet.md @@ -27,7 +27,7 @@ There are several methods to install mmdeploy, among which you can choose an app **Method I:** Install precompiled package -> **TODO**. MMDeploy hasn't released based on dev-1.x branch. +> **TODO**. MMDeploy hasn't released based on 1.x branch. **Method II:** Build using scripts @@ -35,7 +35,7 @@ If your target platform is **Ubuntu 18.04 or later version**, we encourage you t [scripts](../01-how-to-build/build_from_script.md). For example, the following commands install mmdeploy as well as inference engine - `ONNX Runtime`. ```shell -git clone --recursive -b dev-1.x https://github.com/open-mmlab/mmdeploy.git +git clone --recursive -b 1.x https://github.com/open-mmlab/mmdeploy.git cd mmdeploy python3 tools/scripts/build_ubuntu_x64_ort.py $(nproc) export PYTHONPATH=$(pwd)/build/lib:$PYTHONPATH @@ -48,7 +48,7 @@ If neither **I** nor **II** meets your requirements, [building mmdeploy from sou ## Convert model -You can use [tools/deploy.py](https://github.com/open-mmlab/mmdeploy/blob/dev-1.x/tools/deploy.py) to convert mmdet models to the specified backend models. Its detailed usage can be learned from [here](../02-how-to-run/convert_model.md). +You can use [tools/deploy.py](https://github.com/open-mmlab/mmdeploy/tree/1.x/tools/deploy.py) to convert mmdet models to the specified backend models. Its detailed usage can be learned from [here](../02-how-to-run/convert_model.md). The command below shows an example about converting `Faster R-CNN` model to onnx model that can be inferred by ONNX Runtime. diff --git a/docs/en/04-supported-codebases/mmedit.md b/docs/en/04-supported-codebases/mmedit.md index 0137dd385d..888b9798b3 100644 --- a/docs/en/04-supported-codebases/mmedit.md +++ b/docs/en/04-supported-codebases/mmedit.md @@ -27,7 +27,7 @@ There are several methods to install mmdeploy, among which you can choose an app **Method I:** Install precompiled package -> **TODO**. MMDeploy hasn't released based on dev-1.x branch. +> **TODO**. MMDeploy hasn't released based on 1.x branch. **Method II:** Build using scripts @@ -35,7 +35,7 @@ If your target platform is **Ubuntu 18.04 or later version**, we encourage you t [scripts](../01-how-to-build/build_from_script.md). For example, the following commands install mmdeploy as well as inference engine - `ONNX Runtime`. ```shell -git clone --recursive -b dev-1.x https://github.com/open-mmlab/mmdeploy.git +git clone --recursive -b 1.x https://github.com/open-mmlab/mmdeploy.git cd mmdeploy python3 tools/scripts/build_ubuntu_x64_ort.py $(nproc) export PYTHONPATH=$(pwd)/build/lib:$PYTHONPATH @@ -48,7 +48,7 @@ If neither **I** nor **II** meets your requirements, [building mmdeploy from sou ## Convert model -You can use [tools/deploy.py](https://github.com/open-mmlab/mmdeploy/blob/dev-1.x/tools/deploy.py) to convert mmedit models to the specified backend models. Its detailed usage can be learned from [here](https://github.com/open-mmlab/mmdeploy/blob/dev-1.x/docs/en/02-how-to-run/convert_model.md#usage). +You can use [tools/deploy.py](https://github.com/open-mmlab/mmdeploy/tree/1.x/tools/deploy.py) to convert mmedit models to the specified backend models. Its detailed usage can be learned from [here](https://github.com/open-mmlab/mmdeploy/tree/1.x/docs/en/02-how-to-run/convert_model.md#usage). When using `tools/deploy.py`, it is crucial to specify the correct deployment config. We've already provided builtin deployment config [files](https://github.com/open-mmlab/mmdeploy/tree/1.x/configs/mmedit) of all supported backends for mmedit, under which the config file path follows the pattern: diff --git a/docs/en/04-supported-codebases/mmocr.md b/docs/en/04-supported-codebases/mmocr.md index 23ef1d0ab1..592eca36d1 100644 --- a/docs/en/04-supported-codebases/mmocr.md +++ b/docs/en/04-supported-codebases/mmocr.md @@ -28,7 +28,7 @@ There are several methods to install mmdeploy, among which you can choose an app **Method I:** Install precompiled package -> **TODO**. MMDeploy hasn't released based on dev-1.x branch. +> **TODO**. MMDeploy hasn't released based on 1.x branch. **Method II:** Build using scripts @@ -36,7 +36,7 @@ If your target platform is **Ubuntu 18.04 or later version**, we encourage you t [scripts](../01-how-to-build/build_from_script.md). For example, the following commands install mmdeploy as well as inference engine - `ONNX Runtime`. ```shell -git clone --recursive -b dev-1.x https://github.com/open-mmlab/mmdeploy.git +git clone --recursive -b 1.x https://github.com/open-mmlab/mmdeploy.git cd mmdeploy python3 tools/scripts/build_ubuntu_x64_ort.py $(nproc) export PYTHONPATH=$(pwd)/build/lib:$PYTHONPATH @@ -49,7 +49,7 @@ If neither **I** nor **II** meets your requirements, [building mmdeploy from sou ## Convert model -You can use [tools/deploy.py](https://github.com/open-mmlab/mmdeploy/blob/dev-1.x/tools/deploy.py) to convert mmocr models to the specified backend models. Its detailed usage can be learned from [here](https://github.com/open-mmlab/mmdeploy/tree/1.x/docs/en/02-how-to-run/convert_model.md#usage). +You can use [tools/deploy.py](https://github.com/open-mmlab/mmdeploy/tree/1.x/tools/deploy.py) to convert mmocr models to the specified backend models. Its detailed usage can be learned from [here](https://github.com/open-mmlab/mmdeploy/tree/1.x/docs/en/02-how-to-run/convert_model.md#usage). When using `tools/deploy.py`, it is crucial to specify the correct deployment config. We've already provided builtin deployment config [files](https://github.com/open-mmlab/mmdeploy/tree/1.x/configs/mmocr) of all supported backends for mmocr, under which the config file path follows the pattern: diff --git a/docs/en/04-supported-codebases/mmpose.md b/docs/en/04-supported-codebases/mmpose.md index 4521697da9..350e45d809 100644 --- a/docs/en/04-supported-codebases/mmpose.md +++ b/docs/en/04-supported-codebases/mmpose.md @@ -27,7 +27,7 @@ There are several methods to install mmdeploy, among which you can choose an app **Method I:** Install precompiled package -> **TODO**. MMDeploy hasn't released based on dev-1.x branch. +> **TODO**. MMDeploy hasn't released based on 1.x branch. **Method II:** Build using scripts @@ -35,7 +35,7 @@ If your target platform is **Ubuntu 18.04 or later version**, we encourage you t [scripts](../01-how-to-build/build_from_script.md). For example, the following commands install mmdeploy as well as inference engine - `ONNX Runtime`. ```shell -git clone --recursive -b dev-1.x https://github.com/open-mmlab/mmdeploy.git +git clone --recursive -b 1.x https://github.com/open-mmlab/mmdeploy.git cd mmdeploy python3 tools/scripts/build_ubuntu_x64_ort.py $(nproc) export PYTHONPATH=$(pwd)/build/lib:$PYTHONPATH @@ -48,7 +48,7 @@ If neither **I** nor **II** meets your requirements, [building mmdeploy from sou ## Convert model -You can use [tools/deploy.py](https://github.com/open-mmlab/mmdeploy/blob/dev-1.x/tools/deploy.py) to convert mmpose models to the specified backend models. Its detailed usage can be learned from [here](https://github.com/open-mmlab/mmdeploy/tree/1.x/docs/en/02-how-to-run/convert_model.md#usage). +You can use [tools/deploy.py](https://github.com/open-mmlab/mmdeploy/tree/1.x/tools/deploy.py) to convert mmpose models to the specified backend models. Its detailed usage can be learned from [here](https://github.com/open-mmlab/mmdeploy/tree/1.x/docs/en/02-how-to-run/convert_model.md#usage). The command below shows an example about converting `hrnet` model to onnx model that can be inferred by ONNX Runtime. diff --git a/docs/en/04-supported-codebases/mmseg.md b/docs/en/04-supported-codebases/mmseg.md index 72375797d2..cecf6e570c 100644 --- a/docs/en/04-supported-codebases/mmseg.md +++ b/docs/en/04-supported-codebases/mmseg.md @@ -28,7 +28,7 @@ There are several methods to install mmdeploy, among which you can choose an app **Method I:** Install precompiled package -> **TODO**. MMDeploy hasn't released based on dev-1.x branch. +> **TODO**. MMDeploy hasn't released based on 1.x branch. **Method II:** Build using scripts @@ -36,7 +36,7 @@ If your target platform is **Ubuntu 18.04 or later version**, we encourage you t [scripts](../01-how-to-build/build_from_script.md). For example, the following commands install mmdeploy as well as inference engine - `ONNX Runtime`. ```shell -git clone --recursive -b dev-1.x https://github.com/open-mmlab/mmdeploy.git +git clone --recursive -b 1.x https://github.com/open-mmlab/mmdeploy.git cd mmdeploy python3 tools/scripts/build_ubuntu_x64_ort.py $(nproc) export PYTHONPATH=$(pwd)/build/lib:$PYTHONPATH @@ -54,7 +54,7 @@ If neither **I** nor **II** meets your requirements, [building mmdeploy from sou ## Convert model -You can use [tools/deploy.py](https://github.com/open-mmlab/mmdeploy/blob/dev-1.x/tools/deploy.py) to convert mmseg models to the specified backend models. Its detailed usage can be learned from [here](https://github.com/open-mmlab/mmdeploy/tree/1.x/docs/en/02-how-to-run/convert_model.md#usage). +You can use [tools/deploy.py](https://github.com/open-mmlab/mmdeploy/tree/1.x/tools/deploy.py) to convert mmseg models to the specified backend models. Its detailed usage can be learned from [here](https://github.com/open-mmlab/mmdeploy/tree/1.x/docs/en/02-how-to-run/convert_model.md#usage). The command below shows an example about converting `unet` model to onnx model that can be inferred by ONNX Runtime. diff --git a/docs/en/conf.py b/docs/en/conf.py index 6dcaeb0c18..c130810b74 100644 --- a/docs/en/conf.py +++ b/docs/en/conf.py @@ -105,7 +105,7 @@ # documentation. # html_theme_options = { - 'logo_url': 'https://mmdeploy.readthedocs.io/en/latest/', + 'logo_url': 'https://mmdeploy.readthedocs.io/en/1.x/', 'menu': [{ 'name': 'GitHub', 'url': 'https://github.com/open-mmlab/mmdeploy' diff --git a/docs/en/switch_language.md b/docs/en/switch_language.md index 1b5df42929..03bf778b72 100644 --- a/docs/en/switch_language.md +++ b/docs/en/switch_language.md @@ -1,3 +1,3 @@ -## English +## English -## 简体中文 +## 简体中文 diff --git a/docs/zh_cn/01-how-to-build/android.md b/docs/zh_cn/01-how-to-build/android.md index c00efaf5d4..a7f3fe5875 100644 --- a/docs/zh_cn/01-how-to-build/android.md +++ b/docs/zh_cn/01-how-to-build/android.md @@ -98,7 +98,7 @@ make -j$(nproc) install OpenJDK 编译Java API之前需要先准备OpenJDK开发环境
- 请参考 Java API 编译 进行构建. + 请参考 Java API 编译 进行构建. diff --git a/docs/zh_cn/01-how-to-build/build_from_source.md b/docs/zh_cn/01-how-to-build/build_from_source.md index c22447219a..d8397a2461 100644 --- a/docs/zh_cn/01-how-to-build/build_from_source.md +++ b/docs/zh_cn/01-how-to-build/build_from_source.md @@ -27,7 +27,7 @@ git clone -b master git@github.com:open-mmlab/mmdeploy.git --recursive - 如果以 `SSH` 方式 `git clone` 代码失败,您可以尝试使用 `HTTPS` 协议下载代码: ```bash - git clone -b dev-1.x https://github.com/open-mmlab/mmdeploy.git MMDeploy + git clone -b 1.x https://github.com/open-mmlab/mmdeploy.git MMDeploy cd MMDeploy git submodule update --init --recursive ``` diff --git a/docs/zh_cn/01-how-to-build/jetsons.md b/docs/zh_cn/01-how-to-build/jetsons.md index 5ccb91c0c4..e55a890b97 100644 --- a/docs/zh_cn/01-how-to-build/jetsons.md +++ b/docs/zh_cn/01-how-to-build/jetsons.md @@ -199,7 +199,7 @@ conda activate mmdeploy ## 安装 MMDeploy ```shell -git clone -b dev-1.x --recursive https://github.com/open-mmlab/mmdeploy.git +git clone -b 1.x --recursive https://github.com/open-mmlab/mmdeploy.git cd mmdeploy export MMDEPLOY_DIR=$(pwd) ``` diff --git a/docs/zh_cn/02-how-to-run/prebuilt_package_windows.md b/docs/zh_cn/02-how-to-run/prebuilt_package_windows.md index 1acf1c247e..76d0760fea 100644 --- a/docs/zh_cn/02-how-to-run/prebuilt_package_windows.md +++ b/docs/zh_cn/02-how-to-run/prebuilt_package_windows.md @@ -55,7 +55,7 @@ ______________________________________________________________________ 2. 克隆mmdeploy仓库 ```bash - git clone -b dev-1.x https://github.com/open-mmlab/mmdeploy.git + git clone -b 1.x https://github.com/open-mmlab/mmdeploy.git ``` :point_right: 这里主要为了使用configs文件,所以没有加`--recursive`来下载submodule,也不需要编译`mmdeploy` diff --git a/docs/zh_cn/04-supported-codebases/mmaction2.md b/docs/zh_cn/04-supported-codebases/mmaction2.md index ff1901bb7d..99cde57e50 100644 --- a/docs/zh_cn/04-supported-codebases/mmaction2.md +++ b/docs/zh_cn/04-supported-codebases/mmaction2.md @@ -37,7 +37,7 @@ mmdeploy 有以下几种安装方式: 比如,以下命令可以安装 mmdeploy 以及配套的推理引擎——`ONNX Runtime`. ```shell -git clone --recursive -b dev-1.x https://github.com/open-mmlab/mmdeploy.git +git clone --recursive -b 1.x https://github.com/open-mmlab/mmdeploy.git cd mmdeploy python3 tools/scripts/build_ubuntu_x64_ort.py $(nproc) export PYTHONPATH=$(pwd)/build/lib:$PYTHONPATH @@ -50,7 +50,7 @@ export LD_LIBRARY_PATH=$(pwd)/../mmdeploy-dep/onnxruntime-linux-x64-1.8.1/lib/:$ ## 模型转换 -你可以使用 [tools/deploy.py](https://github.com/open-mmlab/mmdeploy/blob/dev-1.x/tools/deploy.py) 把 mmaction2 模型一键式转换为推理后端模型。 +你可以使用 [tools/deploy.py](https://github.com/open-mmlab/mmdeploy/tree/1.x/tools/deploy.py) 把 mmaction2 模型一键式转换为推理后端模型。 该工具的详细使用说明请参考[这里](https://github.com/open-mmlab/mmdeploy/tree/1.x/docs/en/02-how-to-run/convert_model.md#usage). 转换的关键之一是使用正确的配置文件。项目中已内置了各后端部署[配置文件](https://github.com/open-mmlab/mmdeploy/tree/1.x/configs/mmaction)。 diff --git a/docs/zh_cn/04-supported-codebases/mmcls.md b/docs/zh_cn/04-supported-codebases/mmcls.md index e59b6ed169..a278f3c8e0 100644 --- a/docs/zh_cn/04-supported-codebases/mmcls.md +++ b/docs/zh_cn/04-supported-codebases/mmcls.md @@ -27,7 +27,7 @@ mmdeploy 有以下几种安装方式: **方式一:** 安装预编译包 -> 待 mmdeploy 正式发布 dev-1.x,再补充 +> 待 mmdeploy 正式发布 1.x,再补充 **方式二:** 一键式脚本安装 @@ -35,7 +35,7 @@ mmdeploy 有以下几种安装方式: 比如,以下命令可以安装 mmdeploy 以及配套的推理引擎——`ONNX Runtime`. ```shell -git clone --recursive -b dev-1.x https://github.com/open-mmlab/mmdeploy.git +git clone --recursive -b 1.x https://github.com/open-mmlab/mmdeploy.git cd mmdeploy python3 tools/scripts/build_ubuntu_x64_ort.py $(nproc) export PYTHONPATH=$(pwd)/build/lib:$PYTHONPATH @@ -48,8 +48,8 @@ export LD_LIBRARY_PATH=$(pwd)/../mmdeploy-dep/onnxruntime-linux-x64-1.8.1/lib/:$ ## 模型转换 -你可以使用 [tools/deploy.py](https://github.com/open-mmlab/mmdeploy/blob/dev-1.x/tools/deploy.py) 把 mmcls 模型一键式转换为推理后端模型。 -该工具的详细使用说明请参考[这里](https://github.com/open-mmlab/mmdeploy/blob/dev-1.x/docs/zh_cn/02-how-to-run/convert_model.md#使用方法). +你可以使用 [tools/deploy.py](https://github.com/open-mmlab/mmdeploy/tree/1.x/tools/deploy.py) 把 mmcls 模型一键式转换为推理后端模型。 +该工具的详细使用说明请参考[这里](https://github.com/open-mmlab/mmdeploy/tree/1.x/docs/zh_cn/02-how-to-run/convert_model.md#使用方法). 以下,我们将演示如何把 `resnet18` 转换为 onnx 模型。 diff --git a/docs/zh_cn/04-supported-codebases/mmedit.md b/docs/zh_cn/04-supported-codebases/mmedit.md index 68060e996a..c1d3267d2b 100644 --- a/docs/zh_cn/04-supported-codebases/mmedit.md +++ b/docs/zh_cn/04-supported-codebases/mmedit.md @@ -28,7 +28,7 @@ mmdeploy 有以下几种安装方式: **方式一:** 安装预编译包 -> 待 mmdeploy 正式发布 dev-1.x,再补充 +> 待 mmdeploy 正式发布 1.x,再补充 **方式二:** 一键式脚本安装 @@ -36,7 +36,7 @@ mmdeploy 有以下几种安装方式: 比如,以下命令可以安装 mmdeploy 以及配套的推理引擎——`ONNX Runtime`. ```shell -git clone --recursive -b dev-1.x https://github.com/open-mmlab/mmdeploy.git +git clone --recursive -b 1.x https://github.com/open-mmlab/mmdeploy.git cd mmdeploy python3 tools/scripts/build_ubuntu_x64_ort.py $(nproc) export PYTHONPATH=$(pwd)/build/lib:$PYTHONPATH @@ -49,8 +49,8 @@ export LD_LIBRARY_PATH=$(pwd)/../mmdeploy-dep/onnxruntime-linux-x64-1.8.1/lib/:$ ## 模型转换 -你可以使用 [tools/deploy.py](https://github.com/open-mmlab/mmdeploy/blob/dev-1.x/tools/deploy.py) 把 mmedit 模型一键式转换为推理后端模型。 -该工具的详细使用说明请参考[这里](https://github.com/open-mmlab/mmdeploy/blob/dev-1.x/docs/zh_cn/02-how-to-run/convert_model.md#使用方法). +你可以使用 [tools/deploy.py](https://github.com/open-mmlab/mmdeploy/tree/1.x/tools/deploy.py) 把 mmedit 模型一键式转换为推理后端模型。 +该工具的详细使用说明请参考[这里](https://github.com/open-mmlab/mmdeploy/tree/1.x/docs/zh_cn/02-how-to-run/convert_model.md#使用方法). 转换的关键之一是使用正确的配置文件。项目中已内置了各后端部署[配置文件](https://github.com/open-mmlab/mmdeploy/tree/1.x/configs/mmedit)。 文件的命名模式是: diff --git a/docs/zh_cn/04-supported-codebases/mmocr.md b/docs/zh_cn/04-supported-codebases/mmocr.md index be68e70f62..14cddcad09 100644 --- a/docs/zh_cn/04-supported-codebases/mmocr.md +++ b/docs/zh_cn/04-supported-codebases/mmocr.md @@ -22,7 +22,7 @@ ______________________________________________________________________ ### 安装 mmocr -请参考[官网安装指南](https://mmocr.readthedocs.io/en/dev-1.x/get_started/install.html). +请参考[官网安装指南](https://mmocr.readthedocs.io/en/1.x/get_started/install.html). ### 安装 mmdeploy @@ -30,7 +30,7 @@ mmdeploy 有以下几种安装方式: **方式一:** 安装预编译包 -> 待 mmdeploy 正式发布 dev-1.x,再补充 +> 待 mmdeploy 正式发布 1.x,再补充 **方式二:** 一键式脚本安装 @@ -38,7 +38,7 @@ mmdeploy 有以下几种安装方式: 比如,以下命令可以安装 mmdeploy 以及配套的推理引擎——`ONNX Runtime`. ```shell -git clone --recursive -b dev-1.x https://github.com/open-mmlab/mmdeploy.git +git clone --recursive -b 1.x https://github.com/open-mmlab/mmdeploy.git cd mmdeploy python3 tools/scripts/build_ubuntu_x64_ort.py $(nproc) export PYTHONPATH=$(pwd)/build/lib:$PYTHONPATH @@ -51,7 +51,7 @@ export LD_LIBRARY_PATH=$(pwd)/../mmdeploy-dep/onnxruntime-linux-x64-1.8.1/lib/:$ ## 模型转换 -你可以使用 [tools/deploy.py](https://github.com/open-mmlab/mmdeploy/blob/dev-1.x/tools/deploy.py) 把 mmocr 模型一键式转换为推理后端模型。 +你可以使用 [tools/deploy.py](https://github.com/open-mmlab/mmdeploy/tree/1.x/tools/deploy.py) 把 mmocr 模型一键式转换为推理后端模型。 该工具的详细使用说明请参考[这里](https://github.com/open-mmlab/mmdeploy/tree/1.x/docs/en/02-how-to-run/convert_model.md#usage). 转换的关键之一是使用正确的配置文件。项目中已内置了各后端部署[配置文件](https://github.com/open-mmlab/mmdeploy/tree/1.x/configs/mmocr)。 diff --git a/docs/zh_cn/04-supported-codebases/mmpose.md b/docs/zh_cn/04-supported-codebases/mmpose.md index 67854a05a0..ce5ea46574 100644 --- a/docs/zh_cn/04-supported-codebases/mmpose.md +++ b/docs/zh_cn/04-supported-codebases/mmpose.md @@ -27,7 +27,7 @@ mmdeploy 有以下几种安装方式: **方式一:** 安装预编译包 -> 待 mmdeploy 正式发布 dev-1.x,再补充 +> 待 mmdeploy 正式发布 1.x,再补充 **方式二:** 一键式脚本安装 @@ -35,7 +35,7 @@ mmdeploy 有以下几种安装方式: 比如,以下命令可以安装 mmdeploy 以及配套的推理引擎——`ONNX Runtime`. ```shell -git clone --recursive -b dev-1.x https://github.com/open-mmlab/mmdeploy.git +git clone --recursive -b 1.x https://github.com/open-mmlab/mmdeploy.git cd mmdeploy python3 tools/scripts/build_ubuntu_x64_ort.py $(nproc) export PYTHONPATH=$(pwd)/build/lib:$PYTHONPATH @@ -48,7 +48,7 @@ export LD_LIBRARY_PATH=$(pwd)/../mmdeploy-dep/onnxruntime-linux-x64-1.8.1/lib/:$ ## 模型转换 -你可以使用 [tools/deploy.py](https://github.com/open-mmlab/mmdeploy/blob/dev-1.x/tools/deploy.py) 把 mmpose 模型一键式转换为推理后端模型。 +你可以使用 [tools/deploy.py](https://github.com/open-mmlab/mmdeploy/tree/1.x/tools/deploy.py) 把 mmpose 模型一键式转换为推理后端模型。 该工具的详细使用说明请参考[这里](https://github.com/open-mmlab/mmdeploy/tree/1.x/docs/en/02-how-to-run/convert_model.md#usage). 以下,我们将演示如何把 `hrnet` 转换为 onnx 模型。 diff --git a/docs/zh_cn/04-supported-codebases/mmseg.md b/docs/zh_cn/04-supported-codebases/mmseg.md index d27bc6935e..a7b673f345 100644 --- a/docs/zh_cn/04-supported-codebases/mmseg.md +++ b/docs/zh_cn/04-supported-codebases/mmseg.md @@ -28,7 +28,7 @@ mmdeploy 有以下几种安装方式: **方式一:** 安装预编译包 -> 待 mmdeploy 正式发布 dev-1.x,再补充 +> 待 mmdeploy 正式发布 1.x,再补充 **方式二:** 一键式脚本安装 @@ -36,7 +36,7 @@ mmdeploy 有以下几种安装方式: 比如,以下命令可以安装 mmdeploy 以及配套的推理引擎——`ONNX Runtime`. ```shell -git clone --recursive -b dev-1.x https://github.com/open-mmlab/mmdeploy.git +git clone --recursive -b 1.x https://github.com/open-mmlab/mmdeploy.git cd mmdeploy python3 tools/scripts/build_ubuntu_x64_ort.py $(nproc) export PYTHONPATH=$(pwd)/build/lib:$PYTHONPATH @@ -53,7 +53,7 @@ export LD_LIBRARY_PATH=$(pwd)/../mmdeploy-dep/onnxruntime-linux-x64-1.8.1/lib/:$ ## 模型转换 -你可以使用 [tools/deploy.py](https://github.com/open-mmlab/mmdeploy/blob/dev-1.x/tools/deploy.py) 把 mmseg 模型一键式转换为推理后端模型。 +你可以使用 [tools/deploy.py](https://github.com/open-mmlab/mmdeploy/tree/1.x/tools/deploy.py) 把 mmseg 模型一键式转换为推理后端模型。 该工具的详细使用说明请参考[这里](https://github.com/open-mmlab/mmdeploy/tree/1.x/docs/en/02-how-to-run/convert_model.md#usage). 以下,我们将演示如何把 `unet` 转换为 onnx 模型。 diff --git a/docs/zh_cn/conf.py b/docs/zh_cn/conf.py index c060165f55..fd8ab6b95f 100644 --- a/docs/zh_cn/conf.py +++ b/docs/zh_cn/conf.py @@ -106,7 +106,7 @@ # documentation. # html_theme_options = { - 'logo_url': 'https://mmdeploy.readthedocs.io/zh_CN/latest/', + 'logo_url': 'https://mmdeploy.readthedocs.io/zh_CN/1.x/', 'menu': [{ 'name': 'GitHub', 'url': 'https://github.com/open-mmlab/mmdeploy' diff --git a/docs/zh_cn/switch_language.md b/docs/zh_cn/switch_language.md index 1b5df42929..03bf778b72 100644 --- a/docs/zh_cn/switch_language.md +++ b/docs/zh_cn/switch_language.md @@ -1,3 +1,3 @@ -## English +## English -## 简体中文 +## 简体中文 diff --git a/docs/zh_cn/tutorial/02_challenges.md b/docs/zh_cn/tutorial/02_challenges.md index c7d4ebb7a4..e3d0e37970 100644 --- a/docs/zh_cn/tutorial/02_challenges.md +++ b/docs/zh_cn/tutorial/02_challenges.md @@ -1,6 +1,6 @@ # 第二章:解决模型部署中的难题 -在[第一章](https://mmdeploy.readthedocs.io/zh_CN/dev-1.x/tutorial/01_introduction_to_model_deployment.html)中,我们部署了一个简单的超分辨率模型,一切都十分顺利。但是,上一个模型还有一些缺陷——图片的放大倍数固定是 4,我们无法让图片放大任意的倍数。现在,我们来尝试部署一个支持动态放大倍数的模型,体验一下在模型部署中可能会碰到的困难。 +在[第一章](https://mmdeploy.readthedocs.io/zh_CN/1.x/tutorial/01_introduction_to_model_deployment.html)中,我们部署了一个简单的超分辨率模型,一切都十分顺利。但是,上一个模型还有一些缺陷——图片的放大倍数固定是 4,我们无法让图片放大任意的倍数。现在,我们来尝试部署一个支持动态放大倍数的模型,体验一下在模型部署中可能会碰到的困难。 ## 模型部署中常见的难题 @@ -10,7 +10,7 @@ - 新算子的实现。深度学习技术日新月异,提出新算子的速度往往快于 ONNX 维护者支持的速度。为了部署最新的模型,部署工程师往往需要自己在 ONNX 和推理引擎中支持新算子。 - 中间表示与推理引擎的兼容问题。由于各推理引擎的实现不同,对 ONNX 难以形成统一的支持。为了确保模型在不同的推理引擎中有同样的运行效果,部署工程师往往得为某个推理引擎定制模型代码,这为模型部署引入了许多工作量。 -我们会在后续教程详细讲述解决这些问题的方法。如果对前文中 ONNX、推理引擎、中间表示、算子等名词感觉陌生,不用担心,可以阅读[第一章](https://mmdeploy.readthedocs.io/zh_CN/dev-1.x/tutorial/01_introduction_to_model_deployment.html),了解有关概念。 +我们会在后续教程详细讲述解决这些问题的方法。如果对前文中 ONNX、推理引擎、中间表示、算子等名词感觉陌生,不用担心,可以阅读[第一章](https://mmdeploy.readthedocs.io/zh_CN/1.x/tutorial/01_introduction_to_model_deployment.html),了解有关概念。 现在,让我们对原来的 SRCNN 模型做一些小的修改,体验一下模型动态化对模型部署造成的困难,并学习解决该问题的一种方法。 @@ -38,7 +38,7 @@ def init_torch_model(): 现在,假设我们要做一个超分辨率的应用。我们的用户希望图片的放大倍数能够自由设置。而我们交给用户的,只有一个 .onnx 文件和运行超分辨率模型的应用程序。我们在不修改 .onnx 文件的前提下改变放大倍数。 -因此,我们必须修改原来的模型,令模型的放大倍数变成推理时的输入。在[第一章](https://mmdeploy.readthedocs.io/zh_CN/dev-1.x/tutorial/01_introduction_to_model_deployment.html)中的 Python 脚本的基础上,我们做一些修改,得到这样的脚本: +因此,我们必须修改原来的模型,令模型的放大倍数变成推理时的输入。在[第一章](https://mmdeploy.readthedocs.io/zh_CN/1.x/tutorial/01_introduction_to_model_deployment.html)中的 Python 脚本的基础上,我们做一些修改,得到这样的脚本: ```python import torch @@ -75,7 +75,7 @@ def init_torch_model(): torch_model = SuperResolutionNet() # Please read the code about downloading 'srcnn.pth' and 'face.png' in - # https://mmdeploy.readthedocs.io/zh_CN/dev-1.x/tutorial/01_introduction_to_model_deployment.html#pytorch + # https://mmdeploy.readthedocs.io/zh_CN/1.x/tutorial/01_introduction_to_model_deployment.html#pytorch state_dict = torch.load('srcnn.pth')['state_dict'] # Adapt the checkpoint diff --git a/mmdeploy/apis/onnx/passes/optimize_onnx.py b/mmdeploy/apis/onnx/passes/optimize_onnx.py index 19e14bc292..485f1695bf 100644 --- a/mmdeploy/apis/onnx/passes/optimize_onnx.py +++ b/mmdeploy/apis/onnx/passes/optimize_onnx.py @@ -18,6 +18,6 @@ def optimize_onnx(ctx, graph, params_dict, torch_out): logger.warning( 'Can not optimize model, please build torchscipt extension.\n' 'More details: ' - 'https://github.com/open-mmlab/mmdeploy/blob/master/docs/en/experimental/onnx_optimizer.md' # noqa + 'https://github.com/open-mmlab/mmdeploy/tree/1.x/docs/en/experimental/onnx_optimizer.md' # noqa ) return graph, params_dict, torch_out From a393af680ff9cf36c5afc82fd86ce9ed6553e0a7 Mon Sep 17 00:00:00 2001 From: RunningLeon Date: Thu, 1 Dec 2022 19:16:35 +0800 Subject: [PATCH 6/6] fix mmocr url --- docs/zh_cn/04-supported-codebases/mmocr.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/zh_cn/04-supported-codebases/mmocr.md b/docs/zh_cn/04-supported-codebases/mmocr.md index 14cddcad09..73aea75d48 100644 --- a/docs/zh_cn/04-supported-codebases/mmocr.md +++ b/docs/zh_cn/04-supported-codebases/mmocr.md @@ -22,7 +22,7 @@ ______________________________________________________________________ ### 安装 mmocr -请参考[官网安装指南](https://mmocr.readthedocs.io/en/1.x/get_started/install.html). +请参考[官网安装指南](https://mmocr.readthedocs.io/en/dev-1.x/get_started/install.html). ### 安装 mmdeploy