Skip to content

Commit 2abeab6

Browse files
committed
Skip full model shape inference if model > 2GB | feat(optimizer)
ghstack-source-id: 5179892 Pull Request resolved: #1340
1 parent aeac99c commit 2abeab6

File tree

1 file changed

+9
-3
lines changed

1 file changed

+9
-3
lines changed

onnxscript/optimizer/__init__.py

Lines changed: 9 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -56,9 +56,15 @@ def optimize(
5656
)
5757
for _ in range(num_iterations):
5858
if onnx_shape_inference:
59-
model = onnx.shape_inference.infer_shapes(
60-
model, check_type=True, strict_mode=True, data_prop=True
61-
)
59+
if model.ByteSize() < 1024 * 1024 * 1024 * 2:
60+
model = onnx.shape_inference.infer_shapes(
61+
model, check_type=True, strict_mode=True, data_prop=True
62+
)
63+
else:
64+
logger.warning(
65+
"The model size is too large for full model shape inference. "
66+
"Skipping this step."
67+
)
6268

6369
inline_simple_functions(model)
6470
modified = fold_constants(

0 commit comments

Comments
 (0)