Skip to content

Commit 2348020

Browse files
committed
resolve dictionary as input
1 parent a98a161 commit 2348020

File tree

9 files changed

+42
-30
lines changed

9 files changed

+42
-30
lines changed

optimum/exporters/openvino/__main__.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -148,7 +148,6 @@ def main_export(
148148
else:
149149
possible_synonyms = ""
150150
logger.info(f"Automatic task detection to {task}{possible_synonyms}.")
151-
152151
onnx_config, models_and_onnx_configs = optimum_main._get_submodels_and_onnx_configs(
153152
model=model,
154153
task=task,

optimum/exporters/openvino/convert.py

Lines changed: 31 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@
2121

2222
from transformers.utils import is_tf_available, is_torch_available
2323

24-
from openvino.runtime import PartialShape, serialize
24+
from openvino.runtime import PartialShape, save_model
2525
from openvino.runtime.utils.types import get_element_type
2626
from openvino.tools.ovc import convert_model
2727
from optimum.exporters.onnx.base import OnnxConfig
@@ -151,8 +151,6 @@ def export_pytorch(
151151

152152
with torch.no_grad():
153153
model.config.return_dict = True
154-
custom_patcher = type(config).patch_model_for_export != OnnxConfig.patch_model_for_export
155-
model.config.torchscript = not custom_patcher
156154
model.eval()
157155

158156
# Check if we need to override certain configuration item
@@ -182,24 +180,30 @@ def export_pytorch(
182180
else:
183181
sig = inspect.signature(model.call)
184182

185-
dummy_inputs = remove_none_from_dummy_inputs(dummy_inputs)
183+
dummy_inputs, dict_inputs = remove_none_from_dummy_inputs(dummy_inputs)
186184
input_info = get_input_shapes(dummy_inputs, inputs)
187185
try:
188-
if custom_patcher:
189-
patcher = config.patch_model_for_export(model, model_kwargs=model_kwargs)
190-
patched_forward = patcher.patched_forward
191-
192-
@functools.wraps(patched_forward)
193-
def ts_patched_forward(*args, **kwargs):
194-
outputs = patched_forward(*args, **kwargs)
195-
return tuple(outputs.values())
196-
197-
patcher.patched_forward = ts_patched_forward
198-
with patcher:
199-
ov_model = convert_model(model, example_input=dummy_inputs, input=input_info)
200-
else:
186+
patcher = config.patch_model_for_export(model, model_kwargs=model_kwargs)
187+
patched_forward = patcher.patched_forward
188+
189+
@functools.wraps(patched_forward)
190+
def ts_patched_forward(*args, **kwargs):
191+
for i in range(len(dict_inputs)):
192+
input_name = dict_inputs[i][0]
193+
keys = dict_inputs[i][1]
194+
tuple_input = kwargs[input_name]
195+
input_dict = dict(zip(keys, tuple_input))
196+
kwargs[input_name] = input_dict
197+
outputs = patched_forward(*args, **kwargs)
198+
return tuple(outputs.values())
199+
200+
patcher.patched_forward = ts_patched_forward
201+
with patcher:
201202
ov_model = convert_model(model, example_input=dummy_inputs, input=input_info)
202203
except Exception:
204+
orig_torch_onnx_export = torch.onnx.export
205+
206+
torch.onnx.export = functools.partial(orig_torch_onnx_export, do_constant_folding=True)
203207
model.config.torchscript = False
204208
model.config.return_dict = True
205209
onnx_output = (
@@ -210,13 +214,19 @@ def ts_patched_forward(*args, **kwargs):
210214
input_names, output_names = export_pytorch_to_onnx(
211215
model, config, opset, onnx_output, device, input_shapes, model_kwargs
212216
)
217+
torch.onnx.export = orig_torch_onnx_export
213218
ov_model = convert_model(str(onnx_output))
214-
serialize(ov_model, output.parent / OV_XML_FILE_NAME if output.suffix != ".xml" else output)
219+
save_model(
220+
ov_model,
221+
output.parent / OV_XML_FILE_NAME if output.suffix != ".xml" else output,
222+
compress_to_fp16=False,
223+
)
215224
return input_names, output_names, True
216225
clear_class_registry()
217226
ordered_dummy_inputs = {param: dummy_inputs[param] for param in sig.parameters if param in dummy_inputs}
218227
ordered_input_names = list(inputs)
219228
flatten_inputs = flattenize_inputs(ordered_dummy_inputs.values())
229+
ov_model.validate_nodes_and_infer_types()
220230
for idx, out_tensor in enumerate(ov_model.outputs):
221231
if idx < len(output_names):
222232
out_tensor.get_tensor().set_names({output_names[idx]})
@@ -233,7 +243,9 @@ def ts_patched_forward(*args, **kwargs):
233243
inp_tensor.get_node().set_partial_shape(static_shape)
234244
inp_tensor.get_node().set_element_type(get_element_type(inp_data.cpu().numpy().dtype))
235245
ov_model.validate_nodes_and_infer_types()
236-
serialize(ov_model, output.parent / OV_XML_FILE_NAME if output.suffix != ".xml" else output)
246+
save_model(
247+
ov_model, output.parent / OV_XML_FILE_NAME if output.suffix != ".xml" else output, compress_to_fp16=False
248+
)
237249
del model
238250
gc.collect()
239251
return input_names, output_names, False

optimum/exporters/openvino/utils.py

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -46,18 +46,19 @@ def remove_none_from_list_tuple(item):
4646
return type(item)(new_item)
4747

4848
upd_dummy = {}
49+
dict_dummy = []
4950
for k, v in dummy_inputs.items():
5051
if v is None:
5152
continue
5253
if isinstance(v, dict):
53-
for kk, vv in v.items():
54-
upd_dummy[kk] = vv
54+
dict_dummy.append((k, list(v.keys())))
55+
upd_dummy[k] = remove_none_from_list_tuple(tuple(v.values()))
5556
continue
5657
if isinstance(v, (tuple, list)):
5758
upd_dummy[k] = remove_none_from_list_tuple(v)
5859
continue
5960
upd_dummy[k] = v
60-
return upd_dummy
61+
return upd_dummy, dict_dummy
6162

6263

6364
def get_input_shapes(dummy_inputs, inputs):

optimum/intel/openvino/modeling_base.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -150,7 +150,7 @@ def _save_pretrained(self, save_directory: Union[str, Path]):
150150
The directory where to save the model files.
151151
"""
152152
dst_path = os.path.join(save_directory, OV_XML_FILE_NAME)
153-
openvino.runtime.serialize(self.model, dst_path)
153+
openvino.save_model(self.model, dst_path, compress_to_fp16=False)
154154

155155
@classmethod
156156
def _from_pretrained(
@@ -201,7 +201,7 @@ def _from_pretrained(
201201
model_save_dir = model_id
202202
# Download the model from the hub
203203
else:
204-
model_file_names = [file_name]
204+
model_file_names = [file_name] if from_onnx else []
205205
# If not ONNX then OpenVINO IR
206206
if not from_onnx:
207207
model_file_names.append(file_name.replace(".xml", ".bin"))

optimum/intel/openvino/modeling_base_seq2seq.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -105,7 +105,7 @@ def _save_pretrained(self, save_directory: Union[str, Path]):
105105

106106
for src_file, dst_file_name in zip(src_files, dst_file_names):
107107
dst_path = os.path.join(save_directory, dst_file_name)
108-
openvino.runtime.serialize(src_file, dst_path)
108+
openvino.save_model(src_file, dst_path, compress_to_fp16=False)
109109

110110
@classmethod
111111
def _from_pretrained(

optimum/intel/openvino/modeling_decoder.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -191,7 +191,7 @@ def _save_pretrained(self, save_directory: Union[str, Path]):
191191
"""
192192
model_to_save = self.model if self._pkv_precision == Type.f32 else self._original_model
193193
dst_path = os.path.join(save_directory, OV_XML_FILE_NAME)
194-
openvino.runtime.serialize(model_to_save, dst_path)
194+
openvino.save_model(model_to_save, dst_path, compress_to_fp16=False)
195195

196196
@classmethod
197197
def _from_transformers(

optimum/intel/openvino/modeling_diffusion.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -155,7 +155,7 @@ def _save_pretrained(self, save_directory: Union[str, Path]):
155155
if ov_model is not None:
156156
dst_path = save_directory / dst_path / OV_XML_FILE_NAME
157157
dst_path.parent.mkdir(parents=True, exist_ok=True)
158-
openvino.runtime.serialize(ov_model.model, dst_path)
158+
openvino.save_model(ov_model.model, dst_path, compress_to_fp16=False)
159159
model_dir = ov_model.config.get("_name_or_path", None) or ov_model._model_dir / ov_model._model_name
160160
config_path = Path(model_dir) / ov_model.CONFIG_NAME
161161
if config_path.is_file():

optimum/intel/openvino/quantization.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -414,7 +414,7 @@ def _quantize_torchmodel(
414414
@staticmethod
415415
def _save_pretrained(model: openvino.runtime.Model, output_path: str):
416416
compress_quantize_weights_transformation(model)
417-
openvino.runtime.serialize(model, output_path)
417+
openvino.save_model(model, output_path, compress_to_fp16=False)
418418

419419
def _set_task(self):
420420
if self.task is None:

optimum/intel/openvino/trainer.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -772,7 +772,7 @@ def _save(self, output_dir: Optional[str] = None, state_dict=None):
772772
compress_quantize_weights_transformation(ov_model)
773773

774774
# Serialize IR xml and bin
775-
save_model(ov_model, output_path)
775+
save_model(ov_model, output_path, compress_to_fp16=False)
776776

777777
def _get_compression_controller_by_cls(
778778
self, controller_cls: Type[PTCompressionAlgorithmController]

0 commit comments

Comments
 (0)