Skip to content

Commit 6857029

Browse files
committed
add docstring to main_export
1 parent 1985e21 commit 6857029

File tree

3 files changed

+65
-2
lines changed

3 files changed

+65
-2
lines changed

optimum/exporters/openvino/__main__.py

Lines changed: 64 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -57,6 +57,70 @@ def main_export(
5757
fn_get_submodels: Optional[Callable] = None,
5858
**kwargs_shapes,
5959
):
60+
"""
61+
Full-suite OpenVINO export.
62+
63+
Args:
64+
> Required parameters
65+
66+
model_name_or_path (`str`):
67+
Model ID on huggingface.co or path on disk to the model repository to export.
68+
output (`Union[str, Path]`):
69+
Path indicating the directory where to store the generated ONNX model.
70+
71+
> Optional parameters
72+
73+
task (`Optional[str]`, defaults to `None`):
74+
The task to export the model for. If not specified, the task will be auto-inferred based on the model. For decoder models,
75+
use `xxx-with-past` to export the model using past key values in the decoder.
76+
device (`str`, defaults to `"cpu"`):
77+
The device to use to do the export. Defaults to "cpu".
78+
fp16 (`Optional[bool]`, defaults to `"False"`):
79+
Use half precision during the export. PyTorch-only, requires `device="cuda"`.
80+
framework (`Optional[str]`, defaults to `None`):
81+
The framework to use for the ONNX export (`"pt"` or `"tf"`). If not provided, will attempt to automatically detect
82+
the framework for the checkpoint.
83+
cache_dir (`Optional[str]`, defaults to `None`):
84+
Path indicating where to store cache. The default Hugging Face cache path will be used by default.
85+
trust_remote_code (`bool`, defaults to `False`):
86+
Allows to use custom code for the modeling hosted in the model repository. This option should only be set for repositories
87+
you trust and in which you have read the code, as it will execute on your local machine arbitrary code present in the
88+
model repository.
89+
pad_token_id (`Optional[int]`, defaults to `None`):
90+
This is needed by some models, for some tasks. If not provided, will attempt to use the tokenizer to guess it.
91+
subfolder (`str`, defaults to `""`):
92+
In case the relevant files are located inside a subfolder of the model repo either locally or on huggingface.co, you can
93+
specify the folder name here.
94+
revision (`str`, defaults to `"main"`):
95+
Revision is the specific model version to use. It can be a branch name, a tag name, or a commit id.
96+
force_download (`bool`, defaults to `False`):
97+
Whether or not to force the (re-)download of the model weights and configuration files, overriding the
98+
cached versions if they exist.
99+
local_files_only (`Optional[bool]`, defaults to `False`):
100+
Whether or not to only look at local files (i.e., do not try to download the model).
101+
use_auth_token (`Optional[str]`, defaults to `None`):
102+
The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated
103+
when running `transformers-cli login` (stored in `~/.huggingface`).
104+
model_kwargs (`Optional[Dict[str, Any]]`, defaults to `None`):
105+
Experimental usage: keyword arguments to pass to the model during
106+
the export. This argument should be used along the `custom_onnx_configs` argument
107+
in case, for example, the model inputs/outputs are changed (for example, if
108+
`model_kwargs={"output_attentions": True}` is passed).
109+
custom_onnx_configs (`Optional[Dict[str, OnnxConfig]]`, defaults to `None`):
110+
Experimental usage: override the default ONNX config used for the given model. This argument may be useful for advanced users that desire a finer-grained control on the export. An example is available [here](https://huggingface.co/docs/optimum/main/en/exporters/onnx/usage_guides/export_a_model).
111+
fn_get_submodels (`Optional[Callable]`, defaults to `None`):
112+
Experimental usage: Override the default submodels that are used at the export. This is
113+
especially useful when exporting a custom architecture that needs to split the ONNX (e.g. encoder-decoder). If unspecified with custom models, optimum will try to use the default submodels used for the given task, with no guarantee of success.
114+
**kwargs_shapes (`Dict`):
115+
Shapes to use during inference. This argument allows to override the default shapes used during the ONNX export.
116+
117+
Example usage:
118+
```python
119+
>>> from optimum.exporters.openvino import main_export
120+
121+
>>> main_export("gpt2", output="gpt2_onnx/")
122+
```
123+
"""
60124
output = Path(output)
61125
if not output.exists():
62126
output.mkdir(parents=True)

optimum/intel/openvino/modeling_base.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -30,7 +30,6 @@
3030
from optimum.modeling_base import OptimizedModel
3131

3232
from ...exporters.openvino import export
33-
from ...exporters.openvino.utils import is_torch_model
3433
from ..utils.import_utils import is_transformers_version
3534
from .utils import ONNX_WEIGHTS_NAME, OV_XML_FILE_NAME
3635

setup.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -42,7 +42,7 @@
4242
"onnxruntime<1.15.0",
4343
],
4444
"openvino": ["openvino==2023.1.0.dev20230811", "onnx", "onnxruntime"],
45-
"nncf": ["nncf @ git+https://github.com/openvinotoolkit/nncf.gitt@release_v260"],
45+
"nncf": ["nncf @ git+https://github.com/openvinotoolkit/nncf.git@release_v260"],
4646
"ipex": ["transformers<4.32.0", "intel-extension-for-pytorch", "onnx"],
4747
"diffusers": ["diffusers", "invisible-watermark>=0.2.0"],
4848
"quality": QUALITY_REQUIRE,

0 commit comments

Comments
 (0)