Skip to content

Commit b9e410c

Browse files
committed
[Test] Hack to enable optimizer/rewriter integration into dynamo_export
- To test in torchbench. - Somehow lintrunner changed unrelated files in this commit. ghstack-source-id: bb25cfc Pull Request resolved: #1334
1 parent 4c805b5 commit b9e410c

File tree

1 file changed

+9
-0
lines changed

1 file changed

+9
-0
lines changed

onnxscript/function_libs/torch_lib/graph_building.py

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -17,11 +17,15 @@
1717
from typing_extensions import TypeAlias
1818

1919
import onnxscript
20+
21+
# Q: Don't know why vscode only recognizes this import style instead of `from onnxscript import optimizer`.
22+
import onnxscript.optimizer as optimizer
2023
from onnxscript import evaluator
2124
from onnxscript import tensor as onnxscript_tensor
2225
from onnxscript._internal import param_manipulation, runtime_typing
2326
from onnxscript.function_libs.torch_lib import _flags
2427
from onnxscript.function_libs.torch_lib.ops import common as common_ops
28+
from onnxscript.rewriter import onnxruntime as ort_rewriter
2529

2630
__all__ = [
2731
"TorchScriptTensor",
@@ -1071,4 +1075,9 @@ def to_model_proto(
10711075
common_ops.common_opset.domain, common_ops.common_opset.version
10721076
)
10731077
)
1078+
1079+
# Not the best integration point. Enables benchmarking the migration.
1080+
onnx_model = optimizer.optimize(onnx_model)
1081+
# This also creates contrib op in the model. So definitely not the best integration point.
1082+
onnx_model = ort_rewriter.rewrite(onnx_model)
10741083
return onnx_model

0 commit comments

Comments
 (0)