Skip to content

Commit c513e3f

Browse files
committed
[Test] Hack to enable optimizer/rewriter integration into dynamo_export
- To test in torchbench. - Somehow lintrunner changed unrelated files in this commit. ghstack-source-id: 6f1664a Pull Request resolved: #1334
1 parent 5b7f5f5 commit c513e3f

File tree

1 file changed

+9
-0
lines changed

1 file changed

+9
-0
lines changed

onnxscript/function_libs/torch_lib/graph_building.py

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -17,11 +17,15 @@
1717
from typing_extensions import TypeAlias
1818

1919
import onnxscript
20+
21+
# Q: Don't know why vscode only recognizes this import style instead of `from onnxscript import optimizer`.
22+
import onnxscript.optimizer as optimizer
2023
from onnxscript import evaluator
2124
from onnxscript import tensor as onnxscript_tensor
2225
from onnxscript._internal import param_manipulation, runtime_typing
2326
from onnxscript.function_libs.torch_lib import _flags
2427
from onnxscript.function_libs.torch_lib.ops import common as common_ops
28+
from onnxscript.rewriter import onnxruntime as ort_rewriter
2529

2630
__all__ = [
2731
"TorchScriptTensor",
@@ -1073,4 +1077,9 @@ def to_model_proto(
10731077
common_ops.common_opset.domain, common_ops.common_opset.version
10741078
)
10751079
)
1080+
1081+
# Not the best integration point. Enables benchmarking the migration.
1082+
onnx_model = optimizer.optimize(onnx_model)
1083+
# This also creates contrib op in the model. So definitely not the best integration point.
1084+
onnx_model = ort_rewriter.rewrite(onnx_model)
10761085
return onnx_model

0 commit comments

Comments
 (0)