Skip to content

Commit f7d8e7a

Browse files
authored
Fix the AOTI example (#3306)
Summary: The compiled model run takes the same input as Eager. No need to explicitly compose args as a tuple.
1 parent 81efd5f commit f7d8e7a

File tree

2 files changed

+5
-5
lines changed

2 files changed

+5
-5
lines changed

intermediate_source/torch_export_tutorial.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -995,7 +995,7 @@ def forward(self, x):
995995
# with torch.no_grad():
996996
# pt2_path = torch._inductor.aoti_compile_and_package(ep)
997997
#
998-
# # Load and run the .so file in Python.
998+
# # Load and run the .pt2 file in Python.
999999
# # To load and run it in a C++ environment, see:
10001000
# # https://pytorch.org/docs/main/torch.compiler_aot_inductor.html
10011001
# aoti_compiled = torch._inductor.aoti_load_package(pt2_path)

recipes_source/torch_export_aoti_python.py

+4-4
Original file line numberDiff line numberDiff line change
@@ -176,7 +176,7 @@
176176
model_path = os.path.join(os.getcwd(), "resnet18.pt2")
177177

178178
compiled_model = torch._inductor.aoti_load_package(model_path)
179-
example_inputs = (torch.randn(2, 3, 224, 224, device=device),)
179+
example_inputs = torch.randn(2, 3, 224, 224, device=device)
180180

181181
with torch.inference_mode():
182182
output = compiled_model(example_inputs)
@@ -238,11 +238,11 @@ def timed(fn):
238238

239239
torch._dynamo.reset()
240240

241-
model = torch._inductor.aoti_load_package(model_path)
242-
example_inputs = (torch.randn(1, 3, 224, 224, device=device),)
241+
compiled_model = torch._inductor.aoti_load_package(model_path)
242+
example_inputs = torch.randn(1, 3, 224, 224, device=device)
243243

244244
with torch.inference_mode():
245-
_, time_taken = timed(lambda: model(example_inputs))
245+
_, time_taken = timed(lambda: compiled_model(example_inputs))
246246
print(f"Time taken for first inference for AOTInductor is {time_taken:.2f} ms")
247247

248248

0 commit comments

Comments
 (0)