From a55b2e62e1dd41bda90862d2f78a849653549f38 Mon Sep 17 00:00:00 2001 From: "Yanan Cao (PyTorch)" Date: Wed, 5 Feb 2025 11:48:05 -0800 Subject: [PATCH] executorch (#8190) Summary: Pull Request resolved: https://github.com/pytorch/executorch/pull/8190 Reviewed By: avikchaudhuri, iseeyuan Differential Revision: D69068797 --- backends/qualcomm/utils/utils.py | 3 +-- devtools/visualization/visualization_utils_test.py | 4 ++-- exir/emit/test/test_emit.py | 2 +- exir/program/test/test_program.py | 2 +- extension/llm/modules/test/test_kv_cache.py | 1 + 5 files changed, 6 insertions(+), 6 deletions(-) diff --git a/backends/qualcomm/utils/utils.py b/backends/qualcomm/utils/utils.py index f94e22c5306..6b9e6baf0f6 100644 --- a/backends/qualcomm/utils/utils.py +++ b/backends/qualcomm/utils/utils.py @@ -873,7 +873,6 @@ def generate_multi_graph_program( backend_config: ExecutorchBackendConfig = None, constant_methods: Optional[Dict[str, Any]] = None, ) -> ExecutorchProgramManager: - # compile multiple graphs in qcir into single context binary ( graph_inputs, @@ -1060,7 +1059,7 @@ def forward( outputs_dict[graph_name], embedding_quantize, ) - prog = torch.export.export(composite_llama_module, sample_inputs) + prog = torch.export.export(composite_llama_module, sample_inputs, strict=True) progs_dict[graph_name] = prog # leverage ExecutorchProgramManager for generating pte with multi-methods edge_prog_mgr = to_edge( diff --git a/devtools/visualization/visualization_utils_test.py b/devtools/visualization/visualization_utils_test.py index d49c6d2f72d..4f44241518f 100644 --- a/devtools/visualization/visualization_utils_test.py +++ b/devtools/visualization/visualization_utils_test.py @@ -84,7 +84,7 @@ def forward(self, x: torch.Tensor) -> torch.Tensor: def test_visualize_manual_export(server): with server(): model = Linear(20, 30) - exported_program = torch.export.export(model, model.get_inputs()) + exported_program = torch.export.export(model, model.get_inputs(), strict=True) visualize(exported_program) time.sleep(3.0) @@ -150,7 +150,7 @@ def test_visualize_to_executorch(server): def test_visualize_graph(server): with server(): model = Linear(20, 30) - exported_program = torch.export.export(model, model.get_inputs()) + exported_program = torch.export.export(model, model.get_inputs(), strict=True) exported_program = to_edge_transform_and_lower( exported_program ).exported_program() diff --git a/exir/emit/test/test_emit.py b/exir/emit/test/test_emit.py index 10aba2f0a77..c2561d06487 100644 --- a/exir/emit/test/test_emit.py +++ b/exir/emit/test/test_emit.py @@ -265,7 +265,7 @@ def forward(self, x): m = TestModule() example_inputs = (torch.ones(10),) - ep = torch.export.export(m, example_inputs) + ep = torch.export.export(m, example_inputs, strict=True) edge = to_edge( ep, compile_config=EdgeCompileConfig( diff --git a/exir/program/test/test_program.py b/exir/program/test/test_program.py index d5e0d15d4ad..d96e8a24143 100644 --- a/exir/program/test/test_program.py +++ b/exir/program/test/test_program.py @@ -343,7 +343,7 @@ def forward(self, a, b, c): inp = (torch.randn(10), torch.randn(10), torch.tensor(3)) - ep = export(M(), inp) + ep = export(M(), inp, strict=True) edge = to_edge(ep) self.assertTrue( torch.allclose( diff --git a/extension/llm/modules/test/test_kv_cache.py b/extension/llm/modules/test/test_kv_cache.py index 4ed088c58f3..1e02072de7f 100644 --- a/extension/llm/modules/test/test_kv_cache.py +++ b/extension/llm/modules/test/test_kv_cache.py @@ -143,6 +143,7 @@ def forward(self, k_val: torch.Tensor, v_val: torch.Tensor): 3: torch.export.Dim.STATIC, }, }, + strict=True, ) return exported_kv_cache