Skip to content

Commit d5ab484

Browse files
committed
Update on "[Migration][DO NOT MERGE] Separate old ir into _legacy_ir folder"
All tests except for linter are expected to pass. [ghstack-poisoned]
2 parents 49c4104 + 60b2348 commit d5ab484

28 files changed

+1173
-298
lines changed

noxfile.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@
1313
"jinja2",
1414
"numpy==1.24.4",
1515
"typing_extensions",
16-
"beartype!=0.16.0",
16+
"beartype==0.17.2",
1717
"types-PyYAML",
1818
"expecttest==0.1.6",
1919
"hypothesis",
@@ -26,7 +26,7 @@
2626
"pytest!=7.1.0",
2727
"pyyaml",
2828
)
29-
ONNX = "onnx==1.15.0"
29+
ONNX = "onnx==1.15"
3030
ONNX_RUNTIME = "onnxruntime==1.16.1"
3131
PYTORCH = "torch==2.1.0"
3232
TORCHVISON = "torchvision==0.16"

onnxscript/_internal/utils.py

Lines changed: 7 additions & 57 deletions
Original file line numberDiff line numberDiff line change
@@ -5,17 +5,14 @@
55
from __future__ import annotations
66

77
import numbers
8-
from typing import Any, Iterable, Optional, Sequence
8+
from typing import Optional, Sequence
99

1010
import numpy as np
1111
import onnx
1212
import onnx.helper
13-
from onnx import FunctionProto, ModelProto, TensorProto, ValueInfoProto
1413

1514
from onnxscript import tensor
1615

17-
# pylint: enable=unused-import, ungrouped-imports
18-
1916

2017
def external_tensor(
2118
name: str,
@@ -26,7 +23,7 @@ def external_tensor(
2623
length: Optional[int] = None,
2724
checksum: Optional[str] = None,
2825
basepath: Optional[str] = None,
29-
) -> TensorProto:
26+
) -> onnx.TensorProto:
3027
"""Create a TensorProto referencing externally stored tensor-data.
3128
3229
Args:
@@ -44,11 +41,11 @@ def external_tensor(
4441
4542
See https://github.com/onnx/onnx/blob/main/docs/ExternalData.md for more details.
4643
"""
47-
tensor_proto = TensorProto()
44+
tensor_proto = onnx.TensorProto()
4845
tensor_proto.name = name
4946
tensor_proto.data_type = data_type
5047
tensor_proto.dims.extend(dims)
51-
tensor_proto.data_location = TensorProto.EXTERNAL
48+
tensor_proto.data_location = onnx.TensorProto.EXTERNAL
5249

5350
def add(k, v):
5451
entry = tensor_proto.external_data.add()
@@ -74,17 +71,17 @@ def value_to_type_proto(val):
7471
shape = val.shape
7572
return onnx.helper.make_tensor_type_proto(elem_type, shape)
7673
if isinstance(val, int):
77-
return onnx.helper.make_tensor_type_proto(TensorProto.INT32, [])
74+
return onnx.helper.make_tensor_type_proto(onnx.TensorProto.INT32, [])
7875
if isinstance(val, (float, np.float32)):
79-
return onnx.helper.make_tensor_type_proto(TensorProto.FLOAT, [])
76+
return onnx.helper.make_tensor_type_proto(onnx.TensorProto.FLOAT, [])
8077
if isinstance(val, list):
8178
if len(val) > 0:
8279
return onnx.helper.make_sequence_type_proto(value_to_type_proto(val[0]))
8380
# Edge-case. Cannot determine a suitable ONNX type for an empty list.
8481
# Should be using a typed-value instead.
8582
# Treated as a sequence of tensors of float-type.
8683
return onnx.helper.make_sequence_type_proto(
87-
onnx.helper.make_tensor_type_proto(TensorProto.FLOAT, None)
84+
onnx.helper.make_tensor_type_proto(onnx.TensorProto.FLOAT, None)
8885
)
8986
if isinstance(val, numbers.Number):
9087
nparray = np.array(val)
@@ -102,50 +99,3 @@ def values_to_value_infos(name_values):
10299
for (name, val) in name_values
103100
if val is not None
104101
]
105-
106-
107-
def make_model_from_function_proto(
108-
function_proto: FunctionProto,
109-
function_opset_version: int,
110-
input_value_infos: Sequence[ValueInfoProto],
111-
output_value_infos: Sequence[ValueInfoProto],
112-
**attrs: Any,
113-
) -> ModelProto:
114-
"""Creates a model containing a single call to a given
115-
function with input and output value_infos, etc.
116-
117-
Args:
118-
function_proto (FunctionProto): function proto
119-
representing a single call
120-
function_opset_version (int): function_proto's version
121-
input_value_infos (list of ValueInfoProto): function's input
122-
output_value_infos (list of ValueInfoProto): function's output
123-
**attrs (dict): the attributes of the node for the function
124-
125-
Returns:
126-
ModelProto
127-
"""
128-
129-
input_names = [vi.name for vi in input_value_infos]
130-
output_names = [vi.name for vi in output_value_infos]
131-
node = onnx.helper.make_node(
132-
function_proto.name,
133-
input_names,
134-
output_names,
135-
domain=function_proto.domain,
136-
**attrs,
137-
)
138-
graph = onnx.helper.make_graph([node], "node_graph", input_value_infos, output_value_infos)
139-
model_proto_opset: Iterable[onnx.OperatorSetIdProto] = function_proto.opset_import
140-
if all(o.domain != function_proto.domain for o in model_proto_opset):
141-
model_proto_opset = [
142-
*model_proto_opset,
143-
onnx.helper.make_opsetid(function_proto.domain, function_opset_version),
144-
]
145-
model = onnx.helper.make_model(
146-
graph,
147-
functions=[function_proto],
148-
producer_name="onnxscript",
149-
opset_imports=model_proto_opset,
150-
)
151-
return model

onnxscript/backend/onnx_export_test.py

Lines changed: 2 additions & 31 deletions
Original file line numberDiff line numberDiff line change
@@ -88,6 +88,7 @@ def skip(pattern: str | Pattern, reason: str, *, condition: bool = True):
8888
r"^test_range_int32_type_negative_delta_expanded",
8989
"Change when the converter supports support something like 'while i < n and cond:'",
9090
),
91+
skip(r"^test_ai_onnx_ml_label_encoder", "ONNX Runtime does not support Opset 21 at 1.17"),
9192
)
9293

9394

@@ -247,37 +248,7 @@ def test_export2python_produces_correct_onnx_script_model(
247248
functions = extract_functions(backend_test.name, code, self.test_folder)
248249
main_function = functions[f"bck_{backend_test.name}"]
249250
self.assertIsNotNone(main_function)
250-
proto = main_function.to_model_proto()
251-
252-
# Opset may be different when an binary operator is used.
253-
if backend_test.onnx_model.ir_version != proto.ir_version:
254-
if (
255-
not backend_test.name.startswith( # pylint: disable=too-many-boolean-expressions
256-
"test_add"
257-
)
258-
and not backend_test.name.startswith("test_and")
259-
and not backend_test.name.startswith("test_div")
260-
and not backend_test.name.startswith("test_equal")
261-
and not backend_test.name.startswith("test_greater")
262-
and not backend_test.name.startswith("test_less")
263-
and not backend_test.name.startswith("test_matmul")
264-
and not backend_test.name.startswith("test_mod")
265-
and not backend_test.name.startswith("test_mul")
266-
and not backend_test.name.startswith("test_not")
267-
and not backend_test.name.startswith("test_or")
268-
and not backend_test.name.startswith("test_pow")
269-
and not backend_test.name.startswith("test_sub")
270-
and (backend_test.onnx_model.ir_version, proto.ir_version)
271-
not in {(3, 4), (5, 6)}
272-
):
273-
# Unexpected behavior for old opsets
274-
raise AssertionError(
275-
f"Incompatible ir_version {(backend_test.onnx_model.ir_version)} !="
276-
f" {(proto.ir_version)}\n"
277-
f"{backend_test.onnx_model}\n"
278-
f"-----\n"
279-
f"{proto}"
280-
)
251+
proto = main_function.to_model_proto(ir_version=backend_test.onnx_model.ir_version)
281252

282253
try:
283254
session = ort.InferenceSession(

onnxscript/function_libs/torch_lib/graph_building_test.py

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,7 @@
33
# mypy: disable-error-code="arg-type,type-arg,valid-type"
44
from __future__ import annotations
55

6+
import os
67
import unittest
78

89
import torch
@@ -11,8 +12,11 @@
1112
import onnxscript.testing
1213
from onnxscript import FLOAT, evaluator
1314
from onnxscript import opset18 as op
15+
from onnxscript._internal import version_utils
1416
from onnxscript.function_libs.torch_lib import graph_building, ops
1517

18+
IS_WINDOWS = os.name == "nt"
19+
1620

1721
class TestTorchScriptTracingEvaluator(unittest.TestCase):
1822
def setUp(self):
@@ -138,6 +142,10 @@ def test_add_initializer_allows_adding_the_same_tensor_twice_using_same_name(sel
138142
graph.add_initializer("x", x_tensor)
139143

140144

145+
@unittest.skipIf(
146+
IS_WINDOWS and version_utils.torch_older_than("2.3"),
147+
"dynamo_export not supported on Windows in PyTorch<2.3",
148+
)
141149
class TestModelSaving(unittest.TestCase):
142150
def test_save_initializer_to_files_for_large_model(self):
143151
class MLP(torch.nn.Module):

onnxscript/irbuilder.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -31,7 +31,7 @@ def _format(seq: Sequence[Any], prefix: str, sep: str, suffix: str, formatter=st
3131
return prefix + sep.join([formatter(x) for x in seq]) + suffix
3232

3333

34-
def select_ir_version(version: int, domain: str = ""):
34+
def select_ir_version(version: int, domain: str = "") -> int:
3535
"""Selects a suitable ONNX ir_version for a given opset version."""
3636
if domain == "":
3737
domain = "ai.onnx"

onnxscript/onnx_opset/__init__.py

Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -36,9 +36,11 @@
3636
from onnxscript.onnx_opset._impl.opset17 import Opset17
3737
from onnxscript.onnx_opset._impl.opset18 import Opset18
3838
from onnxscript.onnx_opset._impl.opset19 import Opset19
39+
from onnxscript.onnx_opset._impl.opset20 import Opset20
3940
from onnxscript.onnx_opset._impl.opset_ai_onnx_ml1 import Opset_ai_onnx_ml1
4041
from onnxscript.onnx_opset._impl.opset_ai_onnx_ml2 import Opset_ai_onnx_ml2
4142
from onnxscript.onnx_opset._impl.opset_ai_onnx_ml3 import Opset_ai_onnx_ml3
43+
from onnxscript.onnx_opset._impl.opset_ai_onnx_ml4 import Opset_ai_onnx_ml4
4244
from onnxscript.onnx_opset._impl.opset_ai_onnx_preview_training1 import (
4345
Opset_ai_onnx_preview_training1,
4446
)
@@ -65,9 +67,11 @@
6567
"opset17",
6668
"opset18",
6769
"opset19",
70+
"opset20",
6871
"opset_ai_onnx_ml1",
6972
"opset_ai_onnx_ml2",
7073
"opset_ai_onnx_ml3",
74+
"opset_ai_onnx_ml4",
7175
"opset_ai_onnx_preview_training1",
7276
]
7377

@@ -97,9 +101,11 @@
97101
opset17 = Opset17()
98102
opset18 = Opset18()
99103
opset19 = Opset19()
104+
opset20 = Opset20()
100105
opset_ai_onnx_ml1 = Opset_ai_onnx_ml1()
101106
opset_ai_onnx_ml2 = Opset_ai_onnx_ml2()
102107
opset_ai_onnx_ml3 = Opset_ai_onnx_ml3()
108+
opset_ai_onnx_ml4 = Opset_ai_onnx_ml4()
103109
opset_ai_onnx_preview_training1 = Opset_ai_onnx_preview_training1()
104110
all_opsets: Mapping[Tuple[str, int], Opset] = {
105111
(
@@ -178,6 +184,10 @@
178184
"",
179185
19,
180186
): opset19,
187+
(
188+
"",
189+
20,
190+
): opset20,
181191
(
182192
"ai.onnx.ml",
183193
1,
@@ -190,6 +200,10 @@
190200
"ai.onnx.ml",
191201
3,
192202
): opset_ai_onnx_ml3,
203+
(
204+
"ai.onnx.ml",
205+
4,
206+
): opset_ai_onnx_ml4,
193207
(
194208
"ai.onnx.preview.training",
195209
1,

0 commit comments

Comments
 (0)