From c5b34bf29618dcf9f45216b3a9ae2a6ac3519943 Mon Sep 17 00:00:00 2001 From: "Tugsbayasgalan (Tugsuu) Manlaibaatar" Date: Sat, 12 Aug 2023 12:36:28 -0700 Subject: [PATCH] Remove redundant pt2_mode=True (#54) Summary: X-link: https://github.com/facebookincubator/AITemplate/pull/896 Pull Request resolved: https://github.com/pytorch/executorch/pull/54 Title Differential Revision: D48181683 fbshipit-source-id: 47b415e67c75931f67682d5367d980238f7719fe --- .../qnnpack/partition/support_patterns.py | 4 +- backends/qnnpack/test/test_qnnpack.py | 26 +++--- .../qnnpack/test/test_qnnpack_partitioner.py | 2 +- backends/vulkan/test/test_vulkan_delegate.py | 8 +- .../xnnpack/partition/support_patterns.py | 4 +- backends/xnnpack/test/test_xnnpack.py | 4 +- backends/xnnpack/utils/configs.py | 6 +- bundled_program/tests/common.py | 2 +- exir/backend/test/demos/rpc/test_rpc.py | 10 +-- .../test/demos/test_delegate_aten_mode.py | 6 +- .../test/demos/test_xnnpack_qnnpack.py | 2 +- exir/backend/test/hta_partitioner_demo.py | 12 +-- exir/backend/test/test_backends.py | 79 +++++++------------ exir/backend/test/test_backends_lifted.py | 3 +- exir/backend/test/test_backends_nested.py | 2 +- exir/backend/test/test_graph_partition.py | 4 +- exir/backend/test/test_utils.py | 12 ++- exir/capture/_capture.py | 4 +- exir/emit/test/test_emit.py | 71 ++++++----------- exir/experimental/export_pt2.py | 2 +- exir/tests/models.py | 2 +- exir/tests/test_arg_validator.py | 4 +- exir/tests/test_capture.py | 4 +- exir/tests/test_delegate.py | 12 +-- exir/tests/test_dynamic_shape_propagation.py | 5 +- exir/tests/test_experimental.py | 4 +- exir/tests/test_fixtures.py | 2 +- exir/tests/test_memory_format_ops_pass.py | 2 +- exir/tests/test_memory_planning.py | 4 +- exir/tests/test_pass_infra.py | 6 +- exir/tests/test_passes.py | 75 +++++++----------- exir/tests/test_quant_fusion_pass.py | 16 +--- ...test_quant_lowering_custom_backend_pass.py | 14 ++-- exir/tests/test_serde.py | 12 ++- exir/tests/test_tracer.py | 44 +++++------ exir/tests/test_verification.py | 20 ++--- exir/verification/verifier.py | 2 +- extension/pybindings/test/test.py | 4 +- sdk/edir/tests/exported_op_graph_test.py | 6 +- sdk/etrecord/tests/etrecord_test.py | 4 +- test/end2end/test_end2end.py | 10 --- test/models/export_delegated_program.py | 2 +- 42 files changed, 195 insertions(+), 322 deletions(-) diff --git a/backends/qnnpack/partition/support_patterns.py b/backends/qnnpack/partition/support_patterns.py index e13f7f94ed2..2be813d4568 100644 --- a/backends/qnnpack/partition/support_patterns.py +++ b/backends/qnnpack/partition/support_patterns.py @@ -48,9 +48,9 @@ def get_dynamic_quantized_graph(f, example_inputs, dynamic_shape=False): # Convert module converted_mod = _convert_to_reference_decomposed_fx(prepared_mod) if dynamic_shape: - capture_config = CaptureConfig(pt2_mode=True, enable_dynamic_shape=True) + capture_config = CaptureConfig(enable_dynamic_shape=True) else: - capture_config = CaptureConfig(pt2_mode=True) + capture_config = CaptureConfig() # EXIR trace gm = ( exir.capture(converted_mod, example_inputs, capture_config) diff --git a/backends/qnnpack/test/test_qnnpack.py b/backends/qnnpack/test/test_qnnpack.py index a520a71194a..09cb20e9ceb 100644 --- a/backends/qnnpack/test/test_qnnpack.py +++ b/backends/qnnpack/test/test_qnnpack.py @@ -83,7 +83,7 @@ def test_qnnpack_per_channel_dynamic_mm(self): ) # Step 2: EXIR capturing - capture_config = CaptureConfig(pt2_mode=True, enable_dynamic_shape=False) + capture_config = CaptureConfig(enable_dynamic_shape=False) captured_mod = exir.capture( converted_mod, example_inputs, config=capture_config ).to_edge(EDGE_COMPILE_CONFIG) @@ -115,9 +115,7 @@ def forward(self, x): composite_model(*example_inputs) program = ( - exir.capture( - composite_model, example_inputs, exir.CaptureConfig(pt2_mode=True) - ) + exir.capture(composite_model, example_inputs, exir.CaptureConfig()) .to_edge(EDGE_COMPILE_CONFIG) .to_executorch(config=EXECUTORCH_BACKEND_CONFIG) .program @@ -159,7 +157,7 @@ def test_qnnpack_per_channel_dynamic_qlinear(self): ) # Step 2: EXIR capturing - capture_config = CaptureConfig(pt2_mode=True, enable_dynamic_shape=False) + capture_config = CaptureConfig(enable_dynamic_shape=False) captured_mod = exir.capture( converted_mod, example_inputs, config=capture_config ).to_edge(EDGE_COMPILE_CONFIG) @@ -202,9 +200,7 @@ def forward(self, x): composite_model(*example_inputs) program = ( - exir.capture( - composite_model, example_inputs, exir.CaptureConfig(pt2_mode=True) - ) + exir.capture(composite_model, example_inputs, exir.CaptureConfig()) .to_edge(EDGE_COMPILE_CONFIG) .to_executorch(config=EXECUTORCH_BACKEND_CONFIG) .program @@ -246,7 +242,7 @@ def test_qnnpack_per_tensor_dynamic_mm(self): ) # Step 2: EXIR capturing - capture_config = CaptureConfig(pt2_mode=True, enable_dynamic_shape=False) + capture_config = CaptureConfig(enable_dynamic_shape=False) captured_mod = exir.capture( converted_mod, example_inputs, config=capture_config ).to_edge(EDGE_COMPILE_CONFIG) @@ -321,7 +317,7 @@ def test_qnnpack_per_tensor_dynamic_qlinear(self): ) # Step 2: EXIR capturing - capture_config = CaptureConfig(pt2_mode=True, enable_dynamic_shape=False) + capture_config = CaptureConfig(enable_dynamic_shape=False) captured_mod = exir.capture( converted_mod, example_inputs, config=capture_config ).to_edge(EDGE_COMPILE_CONFIG) @@ -406,7 +402,7 @@ def test_qnnpack_per_channel_dynamic_mm_with_dynamic_shape(self): ) # Step 2: EXIR capturing - capture_config = CaptureConfig(pt2_mode=True, enable_dynamic_shape=True) + capture_config = CaptureConfig(enable_dynamic_shape=True) captured_mod = exir.capture( converted_mod, example_inputs, config=capture_config ).to_edge(EDGE_COMPILE_CONFIG) @@ -438,9 +434,7 @@ def forward(self, x): composite_model(*example_inputs) program = ( - exir.capture( - composite_model, example_inputs, exir.CaptureConfig(pt2_mode=True) - ) + exir.capture(composite_model, example_inputs, exir.CaptureConfig()) .to_edge(EDGE_COMPILE_CONFIG) .to_executorch(config=EXECUTORCH_BACKEND_CONFIG) .program @@ -483,7 +477,7 @@ def test_qnnpack_per_channel_dynamic_qlinear_via_partitioner(self): ) # Step 2: EXIR capturing - capture_config = CaptureConfig(pt2_mode=True, enable_dynamic_shape=False) + capture_config = CaptureConfig(enable_dynamic_shape=False) captured_mod = exir.capture( converted_mod, example_inputs, config=capture_config ).to_edge(EDGE_COMPILE_CONFIG) @@ -538,7 +532,7 @@ def test_qnnpack_per_channel_dynamic_qlinear_via_partitioner(self): # composite_model(*example_inputs) # program = ( # exir.capture( - # composite_model, example_inputs, exir.CaptureConfig(pt2_mode=True) + # composite_model, example_inputs, exir.CaptureConfig() # ) # .to_edge(EDGE_COMPILE_CONFIG) # .to_executorch() diff --git a/backends/qnnpack/test/test_qnnpack_partitioner.py b/backends/qnnpack/test/test_qnnpack_partitioner.py index 4c98fd0eba9..e067036e0eb 100644 --- a/backends/qnnpack/test/test_qnnpack_partitioner.py +++ b/backends/qnnpack/test/test_qnnpack_partitioner.py @@ -54,7 +54,7 @@ def get_actual_dyanmic_quantized_graph( converted_mod = _convert_to_reference_decomposed_fx(prepared_mod) # Step 2: EXIR capturing - capture_config = CaptureConfig(pt2_mode=True, enable_dynamic_shape=dynamic_shape) + capture_config = CaptureConfig(enable_dynamic_shape=dynamic_shape) dynamic_quantized_exir_graph = ( exir.capture(converted_mod, example_inputs, config=capture_config) .to_edge(exir.EdgeCompileConfig(_check_ir_validity=False)) diff --git a/backends/vulkan/test/test_vulkan_delegate.py b/backends/vulkan/test/test_vulkan_delegate.py index 22b4a44844d..1320d7bfc86 100644 --- a/backends/vulkan/test/test_vulkan_delegate.py +++ b/backends/vulkan/test/test_vulkan_delegate.py @@ -61,9 +61,7 @@ def lower_module_and_test_output( the given sample inputs. It then runs the lowered module and compares its outputs with the outputs of the eager module. """ - edgeir_m = exir.capture( - module, sample_inputs, exir.CaptureConfig(pt2_mode=True) - ).to_edge() + edgeir_m = exir.capture(module, sample_inputs, exir.CaptureConfig()).to_edge() lowered_module = to_backend("VulkanBackend", edgeir_m.exported_program, []) class WrappedModule(torch.nn.Module): @@ -75,9 +73,7 @@ def forward(self, *args): return self.one_module(*args) program = ( - exir.capture( - WrappedModule(), sample_inputs, exir.CaptureConfig(pt2_mode=True) - ) + exir.capture(WrappedModule(), sample_inputs, exir.CaptureConfig()) .to_edge() .to_executorch() .program diff --git a/backends/xnnpack/partition/support_patterns.py b/backends/xnnpack/partition/support_patterns.py index b7a744fb0b4..57976ae0371 100644 --- a/backends/xnnpack/partition/support_patterns.py +++ b/backends/xnnpack/partition/support_patterns.py @@ -41,7 +41,7 @@ exir.capture( add, model_inputs, - config=CaptureConfig(pt2_mode=True, enable_dynamic_shape=True), + config=CaptureConfig(enable_dynamic_shape=True), ) .to_edge().module .graph @@ -68,7 +68,7 @@ def _capture(module, example_inputs, pt_mode=True) -> torch.fx.GraphModule: - capture_config = CaptureConfig(pt2_mode=pt_mode, enable_dynamic_shape=False) + capture_config = CaptureConfig(enable_dynamic_shape=False) edge_config = exir.EdgeCompileConfig( _check_ir_validity=False, passes=[DuplicateDequantNodePass()], diff --git a/backends/xnnpack/test/test_xnnpack.py b/backends/xnnpack/test/test_xnnpack.py index 2c91665b6ba..94ae49ecc0e 100644 --- a/backends/xnnpack/test/test_xnnpack.py +++ b/backends/xnnpack/test/test_xnnpack.py @@ -236,7 +236,7 @@ def test_xnnpack_backend_conv2d_dw(self): conv.eval() self.lower_and_test_with_partitioner(conv, example_inputs) - @torch.inference_mode() # TODO Use pt2_mode=True for capturing. + @torch.inference_mode() # TODO Use for capturing. def test_xnnpack_backend_mm(self): in_sizes = [1, 4, 4] input_sizes = [4, 37, 17] @@ -329,7 +329,7 @@ def forward(self, x, y): ) self.lower_and_test_with_partitioner(module, model_inputs) - @torch.inference_mode() # TODO Use pt2_mode=True for capturing. + @torch.inference_mode() # TODO Use for capturing. def test_xnnpack_backend_linear(self): in_size = 2 input_size = 3 diff --git a/backends/xnnpack/utils/configs.py b/backends/xnnpack/utils/configs.py index 3945e6f7be3..b6114f939d9 100644 --- a/backends/xnnpack/utils/configs.py +++ b/backends/xnnpack/utils/configs.py @@ -34,8 +34,6 @@ def get_xnnpack_executorch_backend_config( def get_xnnpack_capture_config(dynamic_shape=False, enable_aot: Optional[bool] = None): if enable_aot is None: - return CaptureConfig(pt2_mode=True, enable_dynamic_shape=dynamic_shape) + return CaptureConfig(enable_dynamic_shape=dynamic_shape) else: - return CaptureConfig( - pt2_mode=True, enable_dynamic_shape=dynamic_shape, enable_aot=enable_aot - ) + return CaptureConfig(enable_dynamic_shape=dynamic_shape, enable_aot=enable_aot) diff --git a/bundled_program/tests/common.py b/bundled_program/tests/common.py index 185a8bd481c..a5c09a00a72 100644 --- a/bundled_program/tests/common.py +++ b/bundled_program/tests/common.py @@ -229,7 +229,7 @@ def get_common_program() -> Tuple[Program, BundledConfig]: DEFAULT_INT_INPUT, ) program = ( - exir.capture(eager_model, capture_input, CaptureConfig(pt2_mode=True)) + exir.capture(eager_model, capture_input, CaptureConfig()) .to_edge() .to_executorch() .program diff --git a/exir/backend/test/demos/rpc/test_rpc.py b/exir/backend/test/demos/rpc/test_rpc.py index 47bb3c8d4d8..f53d72b1b5e 100644 --- a/exir/backend/test/demos/rpc/test_rpc.py +++ b/exir/backend/test/demos/rpc/test_rpc.py @@ -103,7 +103,7 @@ def test_delegate_whole_program(self): simple_net = self.get_a_simple_net() simple_net_input = simple_net.get_example_inputs() exported_program = exir.capture( - simple_net, simple_net_input, exir.CaptureConfig(pt2_mode=True) + simple_net, simple_net_input, exir.CaptureConfig() ).to_edge( exir.EdgeCompileConfig( _check_ir_validity=False, @@ -125,9 +125,7 @@ def forward(self, *args): composite_model = CompositeModule() exec_prog = ( - exir.capture( - composite_model, simple_net_input, exir.CaptureConfig(pt2_mode=True) - ) + exir.capture(composite_model, simple_net_input, exir.CaptureConfig()) .to_edge() .to_executorch() ) @@ -165,9 +163,7 @@ def forward(self, a, x, b): model = Model() inputs = (torch.ones(2, 2), torch.ones(2, 2), torch.ones(2, 2)) - exported_program = exir.capture( - model, inputs, exir.CaptureConfig(pt2_mode=True) - ).to_edge() + exported_program = exir.capture(model, inputs, exir.CaptureConfig()).to_edge() # First lower to demo backend demo_backend_lowered = exported_program diff --git a/exir/backend/test/demos/test_delegate_aten_mode.py b/exir/backend/test/demos/test_delegate_aten_mode.py index 27a4c499e06..9198de3989b 100644 --- a/exir/backend/test/demos/test_delegate_aten_mode.py +++ b/exir/backend/test/demos/test_delegate_aten_mode.py @@ -37,7 +37,7 @@ def forward(self, a, x, b): add_mul_module = AddMulModule() model_inputs = (torch.ones(2, 2), 2 * torch.ones(2, 2), 3 * torch.ones(2, 2)) edge_graph_module = exir.capture( - add_mul_module, model_inputs, exir.CaptureConfig(pt2_mode=True) + add_mul_module, model_inputs, exir.CaptureConfig() ).to_edge() max_value = model_inputs[0].shape[0] compile_specs = [CompileSpec("max_value", bytes([max_value]))] @@ -60,9 +60,7 @@ def forward(self, a, x, b): composite_model(*model_inputs) exec_prog = ( - exir.capture( - composite_model, model_inputs, exir.CaptureConfig(pt2_mode=True) - ) + exir.capture(composite_model, model_inputs, exir.CaptureConfig()) .to_edge() .to_executorch() ) diff --git a/exir/backend/test/demos/test_xnnpack_qnnpack.py b/exir/backend/test/demos/test_xnnpack_qnnpack.py index 8d514c70d09..f7df6c22605 100644 --- a/exir/backend/test/demos/test_xnnpack_qnnpack.py +++ b/exir/backend/test/demos/test_xnnpack_qnnpack.py @@ -81,7 +81,7 @@ def forward(self, x, y): ) # Step 2: EXIR capturing - capture_config = CaptureConfig(pt2_mode=True, enable_dynamic_shape=False) + capture_config = CaptureConfig(enable_dynamic_shape=False) captured_mod = exir.capture( converted_mod, example_inputs, config=capture_config ).to_edge( diff --git a/exir/backend/test/hta_partitioner_demo.py b/exir/backend/test/hta_partitioner_demo.py index 97e05b90797..dc905785117 100644 --- a/exir/backend/test/hta_partitioner_demo.py +++ b/exir/backend/test/hta_partitioner_demo.py @@ -62,7 +62,7 @@ def forward(self, x_raw, h, c): exir.capture( LSTMConvPattern(), (input_x, input_h, input_c), - exir.CaptureConfig(pt2_mode=True, enable_aot=True), + exir.CaptureConfig(enable_aot=True), ) .to_edge( # torch._export.verifier.SpecViolationError: Operator torch._ops.aten.mkldnn_rnn_layer.default is not Aten Canonical. @@ -74,7 +74,7 @@ def forward(self, x_raw, h, c): exir.capture( LSTMConvPattern(), (input_x, input_h, input_c), - exir.CaptureConfig(pt2_mode=True), + exir.CaptureConfig(), ) .to_edge( # torch._export.verifier.SpecViolationError: Operator torch._ops.aten.mkldnn_rnn_layer.default is not Aten Canonical. @@ -90,7 +90,7 @@ def sub(x, y): exir.capture( sub, (input_x, input_h), - exir.CaptureConfig(pt2_mode=True, enable_aot=True, _unlift=False), + exir.CaptureConfig(enable_aot=True, _unlift=False), ) .to_edge(exir.EdgeCompileConfig(_use_edge_ops=True)) .exported_program.graph_module @@ -99,7 +99,7 @@ def sub(x, y): exir.capture( sub, (input_x, input_h), - exir.CaptureConfig(pt2_mode=True), + exir.CaptureConfig(), ) .to_edge() .exported_program.graph_module @@ -236,7 +236,7 @@ def forward(self, x_raw, h, c): exir.capture( LSTMConvPattern(), (input_x, input_h, input_c), - exir.CaptureConfig(pt2_mode=True, enable_aot=True), + exir.CaptureConfig(enable_aot=True), ) .to_edge( # torch._export.verifier.SpecViolationError: Operator torch._ops.aten.mkldnn_rnn_layer.default is not Aten Canonical. @@ -248,7 +248,7 @@ def forward(self, x_raw, h, c): exir.capture( LSTMConvPattern(), (input_x, input_h, input_c), - exir.CaptureConfig(pt2_mode=True), + exir.CaptureConfig(), ) .to_edge( # torch._export.verifier.SpecViolationError: Operator torch._ops.aten.mkldnn_rnn_layer.default is not Aten Canonical. diff --git a/exir/backend/test/test_backends.py b/exir/backend/test/test_backends.py index 930376ce6f9..9c71062cc64 100644 --- a/exir/backend/test/test_backends.py +++ b/exir/backend/test/test_backends.py @@ -128,7 +128,7 @@ def forward(self, x): model_inputs = (torch.ones(1),) expected_res = sin_module(*model_inputs) edgeir_m = exir.capture( - sin_module, model_inputs, exir.CaptureConfig(pt2_mode=True) + sin_module, model_inputs, exir.CaptureConfig() ).to_edge() lowered_sin_module = to_backend( @@ -155,7 +155,7 @@ def forward(self, x): sin_module = SinModule() model_inputs = (torch.ones(1),) edgeir_m = exir.capture( - sin_module, model_inputs, exir.CaptureConfig(pt2_mode=True) + sin_module, model_inputs, exir.CaptureConfig() ).to_edge() max_value = model_inputs[0].shape[0] compile_specs = [CompileSpec("max_value", bytes([max_value]))] @@ -177,9 +177,7 @@ def forward(self, x): composite_model(*model_inputs) exec_prog = ( - exir.capture( - composite_model, model_inputs, exir.CaptureConfig(pt2_mode=True) - ) + exir.capture(composite_model, model_inputs, exir.CaptureConfig()) .to_edge() .to_executorch( config=exir.ExecutorchBackendConfig(extract_segments=extract_segments) @@ -254,7 +252,7 @@ def forward(self, a, x, b): add_mul_module = AddMulModule() model_inputs = (torch.ones(2, 2), 2 * torch.ones(2, 2), 3 * torch.ones(2, 2)) edge_graph_module = exir.capture( - add_mul_module, model_inputs, exir.CaptureConfig(pt2_mode=True) + add_mul_module, model_inputs, exir.CaptureConfig() ).to_edge() max_value = model_inputs[0].shape[0] compile_specs = [CompileSpec("max_value", bytes([max_value]))] @@ -275,9 +273,7 @@ def forward(self, a, x, b): composite_model(*model_inputs) exec_prog = ( - exir.capture( - composite_model, model_inputs, exir.CaptureConfig(pt2_mode=True) - ) + exir.capture(composite_model, model_inputs, exir.CaptureConfig()) .to_edge() .to_executorch( config=exir.ExecutorchBackendConfig(extract_segments=extract_segments) @@ -309,7 +305,7 @@ def forward(self, x): # the backend only accepts shape <= 4 model_inputs = (torch.ones(6),) edgeir_m = exir.capture( - sin_module, model_inputs, exir.CaptureConfig(pt2_mode=True) + sin_module, model_inputs, exir.CaptureConfig() ).to_edge() max_value = model_inputs[0].shape[0] compile_specs = [CompileSpec("max_value", bytes([max_value]))] @@ -331,9 +327,7 @@ def forward(self, x): composite_model(*model_inputs) exec_prog = ( - exir.capture( - composite_model, model_inputs, exir.CaptureConfig(pt2_mode=True) - ) + exir.capture(composite_model, model_inputs, exir.CaptureConfig()) .to_edge() .to_executorch( config=exir.ExecutorchBackendConfig(extract_segments=extract_segments), @@ -375,7 +369,7 @@ def forward(self, x): sin_module = SinModule() model_inputs = (torch.ones(1),) edgeir_m = exir.capture( - sin_module, model_inputs, exir.CaptureConfig(pt2_mode=True) + sin_module, model_inputs, exir.CaptureConfig() ).to_edge() max_value = model_inputs[0].shape[0] compile_specs = [CompileSpec("max_value", bytes([max_value]))] @@ -399,9 +393,7 @@ def forward(self, x): composite_model(*model_inputs) exec_prog = ( - exir.capture( - composite_model, model_inputs, exir.CaptureConfig(pt2_mode=True) - ) + exir.capture(composite_model, model_inputs, exir.CaptureConfig()) .to_edge() .to_executorch( config=exir.ExecutorchBackendConfig(extract_segments=extract_segments), @@ -471,7 +463,7 @@ def forward(self, x): sin_module = SinModule() model_inputs = (torch.ones(1),) edgeir_m = exir.capture( - sin_module, model_inputs, exir.CaptureConfig(pt2_mode=True) + sin_module, model_inputs, exir.CaptureConfig() ).to_edge() error_msg = r"call_function aten.cos.default is not supported in backend BackendWithCompilerDemo" @@ -494,7 +486,7 @@ def forward(self, x): sin_module = SinModule() model_inputs = (torch.ones(1),) edgeir_m = exir.capture( - sin_module, model_inputs, exir.CaptureConfig(pt2_mode=True) + sin_module, model_inputs, exir.CaptureConfig() ).to_edge() error_msg = r"Backend FakeBackendWithCompilerDemo was not found." @@ -520,7 +512,7 @@ def forward(self, x): to_be_lowered = LowerableSubModel() example_input = (torch.ones(1),) to_be_lowered_exir_submodule = exir.capture( - to_be_lowered, example_input, exir.CaptureConfig(pt2_mode=True) + to_be_lowered, example_input, exir.CaptureConfig() ).to_edge() max_value = example_input[0].shape[0] @@ -561,9 +553,7 @@ def forward(self, x): composite_model(*model_inputs) exec_prog = ( - exir.capture( - composite_model, model_inputs, exir.CaptureConfig(pt2_mode=True) - ) + exir.capture(composite_model, model_inputs, exir.CaptureConfig()) .to_edge() .to_executorch( config=exir.ExecutorchBackendConfig(extract_segments=extract_segments), @@ -624,9 +614,7 @@ def forward(self, x_raw, h, c): composite_m = CompositeModel(3) orig_res = composite_m(*inputs) - traced = exir.capture( - composite_m, inputs, exir.CaptureConfig(pt2_mode=True) - ).to_edge( + traced = exir.capture(composite_m, inputs, exir.CaptureConfig()).to_edge( # torch._export.verifier.SpecViolationError: Operator torch._ops.aten.mkldnn_rnn_layer.default is not Aten Canonical. exir.EdgeCompileConfig(_check_ir_validity=False) ) @@ -735,11 +723,7 @@ def forward(self, x_raw, h, c): composite_m = CompositeModel(3) orig_res = composite_m(*inputs) - traced = exir.capture( - composite_m, - inputs, - exir.CaptureConfig(pt2_mode=True), - ).to_edge( + traced = exir.capture(composite_m, inputs, exir.CaptureConfig(),).to_edge( # torch._export.verifier.SpecViolationError: Operator torch._ops.aten.mkldnn_rnn_layer.default is not Aten Canonical. exir.EdgeCompileConfig(_check_ir_validity=False) ) @@ -748,7 +732,7 @@ def forward(self, x_raw, h, c): exir.capture( CompositeModel(3), (input_x, input_h, input_c), - exir.CaptureConfig(pt2_mode=True), + exir.CaptureConfig(), ) .to_edge( # torch._export.verifier.SpecViolationError: Operator torch._ops.aten.mkldnn_rnn_layer.default is not Aten Canonical. @@ -779,7 +763,7 @@ def forward(self, x_raw, h, c): # graph_module_with_delegate = exir.capture( # traced_with_delegate, # (input_x, input_h, input_c), - # exir.CaptureConfig(pt2_mode=True), + # exir.CaptureConfig(), # ).to_edge() # program_with_delegates = graph_module_with_delegate.to_executorch( @@ -854,7 +838,7 @@ def forward(self, a, x, b): inputs = (torch.randn(2, 2), torch.randn(2, 2), torch.randn(2, 2)) orig_res = m(*inputs) - ep = exir.capture(m, inputs, exir.CaptureConfig(pt2_mode=True)).to_edge() + ep = exir.capture(m, inputs, exir.CaptureConfig()).to_edge() executorch_prog = ep executorch_prog.exported_program = to_backend( ep.exported_program, AddMulPartitionerDemo @@ -912,7 +896,7 @@ def forward(self, x, y): inputs = (torch.randn(1, 3), torch.randn(1, 3)) orig_res = Model()(*inputs) - ep = exir.capture(Model(), inputs, exir.CaptureConfig(pt2_mode=True)).to_edge() + ep = exir.capture(Model(), inputs, exir.CaptureConfig()).to_edge() executorch_prog = ep executorch_prog.exported_program = to_backend( ep.exported_program, AddAttributePartitionerDemo @@ -966,7 +950,7 @@ def partition(self, edge_graph_module): node.target = exir_ops.edge.aten.mul.Tensor return edge_graph_module - ep = exir.capture(Model(), inputs, exir.CaptureConfig(pt2_mode=True)).to_edge() + ep = exir.capture(Model(), inputs, exir.CaptureConfig()).to_edge() with self.assertRaises(AssertionError): _ = to_backend(ep.exported_program, BadPartitioner) @@ -995,7 +979,6 @@ def test_quantized_with_delegate(self) -> None: converted_linear, example_inputs, exir.CaptureConfig( - pt2_mode=True, enable_aot=True, ), ).to_edge(exir.EdgeCompileConfig(_check_ir_validity=False)) @@ -1027,7 +1010,7 @@ def f(x, y): orig = exir.capture( f, inputs, - exir.CaptureConfig(pt2_mode=True), + exir.CaptureConfig(), ).to_edge() partitioned = orig partitioned.exported_program = to_backend( @@ -1080,7 +1063,7 @@ def f(xs, y): orig = exir.capture( f, inputs, - exir.CaptureConfig(pt2_mode=True), + exir.CaptureConfig(), ).to_edge() partitioned = orig partitioned.exported_program = to_backend( @@ -1153,7 +1136,7 @@ def f(xs, pred1, pred2, y): orig = exir.capture( f, inputs, - exir.CaptureConfig(pt2_mode=True), + exir.CaptureConfig(), ).to_edge() partitioned = orig partitioned.exported_program = to_backend( @@ -1217,7 +1200,7 @@ def f(x: List[torch.Tensor]): return y inputs = ([torch.randn(2, 2), torch.randn(2, 2)],) - edge_prog = exir.capture(f, inputs, exir.CaptureConfig(pt2_mode=True)).to_edge() + edge_prog = exir.capture(f, inputs, exir.CaptureConfig()).to_edge() lowered_gm = to_backend( BackendWithCompilerDemo.__name__, edge_prog.exported_program, [] ) @@ -1230,9 +1213,7 @@ def __init__(self): def forward(self, x: List[torch.Tensor]): return self.lowered(x) - gm = exir.capture( - ComposedM(), inputs, exir.CaptureConfig(pt2_mode=True) - ).to_edge() + gm = exir.capture(ComposedM(), inputs, exir.CaptureConfig()).to_edge() gm(*inputs) def test_dict_input(self): @@ -1241,7 +1222,7 @@ def f(x: Dict[str, torch.Tensor]): return y inputs = ({"a": torch.randn(2, 2), "b": torch.randn(2, 2)},) - edge_prog = exir.capture(f, inputs, exir.CaptureConfig(pt2_mode=True)).to_edge() + edge_prog = exir.capture(f, inputs, exir.CaptureConfig()).to_edge() lowered_gm = to_backend( BackendWithCompilerDemo.__name__, edge_prog.exported_program, [] ) @@ -1254,9 +1235,7 @@ def __init__(self): def forward(self, x: List[torch.Tensor]): return self.lowered(x) - gm = exir.capture( - ComposedM(), inputs, exir.CaptureConfig(pt2_mode=True) - ).to_edge() + gm = exir.capture(ComposedM(), inputs, exir.CaptureConfig()).to_edge() gm(*inputs) def test_lower_multiple(self) -> None: @@ -1283,7 +1262,7 @@ def method2( } multi_method_prog = exir.capture_multiple( - module, method_name_to_args, exir.CaptureConfig(pt2_mode=True) + module, method_name_to_args, exir.CaptureConfig() ).to_edge() lowered_multi_method_prog = to_backend_multiple( @@ -1338,7 +1317,7 @@ def method2( } multi_method_prog = exir.capture_multiple( - module, method_name_to_args, exir.CaptureConfig(pt2_mode=True) + module, method_name_to_args, exir.CaptureConfig() ).to_edge() method_name_to_partitioners = { diff --git a/exir/backend/test/test_backends_lifted.py b/exir/backend/test/test_backends_lifted.py index a98130aa3f8..14abef9b349 100644 --- a/exir/backend/test/test_backends_lifted.py +++ b/exir/backend/test/test_backends_lifted.py @@ -68,7 +68,7 @@ def get_testing_capture_config(): - return exir.CaptureConfig(pt2_mode=True, enable_aot=True, _unlift=False) + return exir.CaptureConfig(enable_aot=True, _unlift=False) def vary_segments(test_method): @@ -994,7 +994,6 @@ def test_quantized_with_delegate(self) -> None: converted_linear, example_inputs, exir.CaptureConfig( - pt2_mode=True, enable_aot=True, _unlift=True, ), diff --git a/exir/backend/test/test_backends_nested.py b/exir/backend/test/test_backends_nested.py index 41a8fba0317..d99b65aa6e0 100644 --- a/exir/backend/test/test_backends_nested.py +++ b/exir/backend/test/test_backends_nested.py @@ -200,7 +200,7 @@ def test(self) -> None: orig = exir.capture( m, m.get_example_inputs(), - exir.CaptureConfig(pt2_mode=True), + exir.CaptureConfig(), ).to_edge(exir.EdgeCompileConfig(_check_ir_validity=False)) partitioned = orig diff --git a/exir/backend/test/test_graph_partition.py b/exir/backend/test/test_graph_partition.py index 7966ce60d80..c1d7599685d 100644 --- a/exir/backend/test/test_graph_partition.py +++ b/exir/backend/test/test_graph_partition.py @@ -23,9 +23,7 @@ class TestGraphPartition(unittest.TestCase): def get_graph_module( self, module: torch.nn.Module, inputs: Tuple[torch.Tensor] ) -> torch.fx.GraphModule: - capture_config = CaptureConfig( - pt2_mode=True, - ) + capture_config = CaptureConfig() graph_module = ( exir.capture(module, inputs, capture_config) .to_edge( diff --git a/exir/backend/test/test_utils.py b/exir/backend/test/test_utils.py index 971eededff0..d486c79d31d 100644 --- a/exir/backend/test/test_utils.py +++ b/exir/backend/test/test_utils.py @@ -108,7 +108,7 @@ def forward(self, x, y): exir.capture( MyModule1(), (torch.rand(3, 4), torch.rand(3, 4)), - CaptureConfig(pt2_mode=True), + CaptureConfig(), ) .to_edge() .exported_program.graph_module @@ -117,7 +117,7 @@ def forward(self, x, y): exir.capture( MyModule2(), (torch.rand(3, 4), torch.rand(3, 4)), - CaptureConfig(pt2_mode=True), + CaptureConfig(), ) .to_edge() .exported_program.graph_module @@ -154,7 +154,7 @@ def forward(self, x): exir.capture( LargeModel(), inputs, - CaptureConfig(pt2_mode=True), + CaptureConfig(), ) .to_edge(exir.EdgeCompileConfig(_check_ir_validity=False)) .exported_program.graph_module @@ -170,7 +170,7 @@ def forward(self, x): # output output output ([addmm_default],) {} pattern = ( - exir.capture(torch.nn.Linear(3, 3), inputs, CaptureConfig(pt2_mode=True)) + exir.capture(torch.nn.Linear(3, 3), inputs, CaptureConfig()) .to_edge(exir.EdgeCompileConfig(_check_ir_validity=False)) .exported_program.graph_module.graph ) @@ -202,7 +202,6 @@ def test_remove_first_quant_and_last_dequant(self): converted_linear, example_inputs, CaptureConfig( - pt2_mode=True, enable_functionalization=False, ), ) @@ -264,7 +263,7 @@ def partition( exported_program = exir.capture( torch.nn.Linear(3, 3), (torch.randn(3, 3),), - CaptureConfig(pt2_mode=True), + CaptureConfig(), ).to_edge( exir.EdgeCompileConfig( _check_ir_validity=False, @@ -317,7 +316,6 @@ def forward(self, input): converted_linear, example_inputs, CaptureConfig( - pt2_mode=True, enable_functionalization=False, ), ) diff --git a/exir/capture/_capture.py b/exir/capture/_capture.py index 7ace36f0e3b..ef70376c71b 100644 --- a/exir/capture/_capture.py +++ b/exir/capture/_capture.py @@ -184,7 +184,7 @@ def convert_to_fake(x): else: warnings.warn( - "exir.capture with pt2_mode=False is deprecated. Please use the default (pt2_mode=True) instead." + "exir.capture with pt2_mode=False is deprecated. Please use the default () instead." ) if not config.enable_functionalization: raise InternalError( @@ -196,7 +196,7 @@ def convert_to_fake(x): ) if config.enable_aot: raise InternalError( - "Using AOT mode is not supported for leagacy capture mode, please use pt2_mode=True instead." + "Using AOT mode is not supported for leagacy capture mode, please use instead." ) graph_module = dispatch_trace(f, args) in_spec, out_spec = graph_module.in_spec, graph_module.out_spec diff --git a/exir/emit/test/test_emit.py b/exir/emit/test/test_emit.py index c45567cd1a2..31f52c9bc3a 100644 --- a/exir/emit/test/test_emit.py +++ b/exir/emit/test/test_emit.py @@ -107,7 +107,7 @@ def f(x: torch.Tensor, y: torch.Tensor) -> torch.Tensor: exir.capture( f, (torch.ones(3, 2), torch.zeros(3, 2)), - exir.CaptureConfig(pt2_mode=True), + exir.CaptureConfig(), ) .to_edge() .to_executorch() @@ -130,7 +130,7 @@ def f(x: torch.Tensor, y: torch.Tensor) -> torch.Tensor: def test_basic_end_to_end(self) -> None: f = models.BasicSinMax() program = ( - exir.capture(f, f.get_random_inputs(), exir.CaptureConfig(pt2_mode=True)) + exir.capture(f, f.get_random_inputs(), exir.CaptureConfig()) .to_edge() .to_executorch() .program @@ -159,10 +159,7 @@ def f(x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, List[torch.Tensor]]: x = (torch.randn(100),) program = ( - exir.capture(f, x, exir.CaptureConfig(pt2_mode=True)) - .to_edge() - .to_executorch() - .program + exir.capture(f, x, exir.CaptureConfig()).to_edge().to_executorch().program ) exec_plan = program.execution_plan[0] self.assertEqual(len(exec_plan.outputs), 4) @@ -182,7 +179,7 @@ def f(x: torch.Tensor) -> torch.Tensor: return torch.ones(100) + x + (torch.ones(100) * 2) program = ( - exir.capture(f, (torch.randn(100),), exir.CaptureConfig(pt2_mode=True)) + exir.capture(f, (torch.randn(100),), exir.CaptureConfig()) .to_edge(exir.EdgeCompileConfig(passes=[ConstPropPass()])) .to_executorch() .program @@ -224,7 +221,7 @@ def f(x: torch.Tensor) -> torch.Tensor: return z.max() inputs = (torch.ones((10, 10)),) - edge = exir.capture(f, inputs, exir.CaptureConfig(pt2_mode=True)).to_edge() + edge = exir.capture(f, inputs, exir.CaptureConfig()).to_edge() removed_ops = ["aten::relu_", "aten::view"] expected_ops = ["aten::sin", "aten::relu", "aten::max", "aten::view_copy"] @@ -266,7 +263,7 @@ def forward(self, x: torch.Tensor) -> torch.Tensor: inputs = (torch.ones(2, 2),) program = ( - exir.capture(model, inputs, exir.CaptureConfig(pt2_mode=True)) + exir.capture(model, inputs, exir.CaptureConfig()) .to_edge() .to_executorch() .program @@ -281,7 +278,7 @@ def f(x: torch.Tensor) -> torch.Tensor: return torch.permute(x, (2, 0, 1)) program = ( - exir.capture(f, (torch.randn(2, 3, 5),), exir.CaptureConfig(pt2_mode=True)) + exir.capture(f, (torch.randn(2, 3, 5),), exir.CaptureConfig()) .to_edge() .to_executorch() .program @@ -305,7 +302,7 @@ def f(x: torch.Tensor) -> torch.Tensor: return torch.addbmm(x, batch1, batch2, alpha=2, beta=3) program = ( - exir.capture(f, (torch.randn(3, 5),), exir.CaptureConfig(pt2_mode=True)) + exir.capture(f, (torch.randn(3, 5),), exir.CaptureConfig()) .to_edge() .to_executorch() .program @@ -325,7 +322,7 @@ def f(x: torch.Tensor) -> torch.Tensor: x, _ = torch.sort(torch.randn(3, 4)) program = ( - exir.capture(f, (x,), exir.CaptureConfig(pt2_mode=True)) + exir.capture(f, (x,), exir.CaptureConfig()) .to_edge() .to_executorch() .program @@ -338,7 +335,6 @@ def f(x: torch.Tensor) -> torch.Tensor: def test_no_input(self) -> None: capture_config = CaptureConfig( - pt2_mode=True, enable_functionalization=True, enable_dynamic_shape=False, ) @@ -374,9 +370,7 @@ def f_2(x: torch.Tensor, mem_format: torch.memory_format) -> torch.Tensor: return y program_supported_without_mem_format = ( - exir.capture( - f_1, (torch.ones([4, 4, 4, 4]),), exir.CaptureConfig(pt2_mode=True) - ) + exir.capture(f_1, (torch.ones([4, 4, 4, 4]),), exir.CaptureConfig()) .to_edge() .to_executorch() .program @@ -385,7 +379,7 @@ def f_2(x: torch.Tensor, mem_format: torch.memory_format) -> torch.Tensor: exir.capture( f_2, (torch.ones([4, 4, 4, 4]), torch.contiguous_format), - exir.CaptureConfig(pt2_mode=True), + exir.CaptureConfig(), ) .to_edge() .to_executorch() @@ -398,7 +392,7 @@ def f_2(x: torch.Tensor, mem_format: torch.memory_format) -> torch.Tensor: exir.capture( f_2, (torch.ones([4, 4, 4, 4]), torch.channels_last), - exir.CaptureConfig(pt2_mode=True), + exir.CaptureConfig(), ).to_edge().to_executorch().program # Get the indexes at which the memory_format values are present in the values list @@ -434,9 +428,7 @@ def f(x: torch.Tensor, y: torch.Tensor) -> torch.Tensor: return torch.mul(x, y, out=z) program = ( - exir.capture( - f, (torch.ones(3), torch.ones(3)), exir.CaptureConfig(pt2_mode=True) - ) + exir.capture(f, (torch.ones(3), torch.ones(3)), exir.CaptureConfig()) .to_edge() .to_executorch() .program @@ -467,7 +459,7 @@ def forward(self, x: torch.Tensor) -> torch.Tensor: # Trace to FX Graph. program = ( - exir.capture(model_out, inputs, exir.CaptureConfig(pt2_mode=True)) + exir.capture(model_out, inputs, exir.CaptureConfig()) .to_edge() .to_executorch() .program @@ -493,7 +485,7 @@ def h(x: torch.Tensor) -> torch.Tensor: x = (torch.randn(3, 2),) exec_prog = ( - exir.capture(h, x, exir.CaptureConfig(pt2_mode=True)) + exir.capture(h, x, exir.CaptureConfig()) .to_edge() .to_executorch(exir.ExecutorchBackendConfig(emit_stacktrace=True)) ) @@ -534,10 +526,7 @@ def h(x: torch.Tensor) -> torch.Tensor: x = (torch.randn(3, 2),) program = ( - exir.capture(h, x, exir.CaptureConfig(pt2_mode=True)) - .to_edge() - .to_executorch() - .program + exir.capture(h, x, exir.CaptureConfig()).to_edge().to_executorch().program ) # Check the stacktrace is None since we did not specify to get the stacktrace @@ -550,7 +539,7 @@ def f(x: torch.Tensor, n: torch.Tensor) -> torch.Tensor: x = torch.randn(3, 2) program = ( - exir.capture(f, (x, x), exir.CaptureConfig(pt2_mode=True)) + exir.capture(f, (x, x), exir.CaptureConfig()) .to_edge(self.compile_config) # TODO(larryliu): fix cat .to_executorch() .program @@ -566,7 +555,6 @@ def test_model(eager_module): inputs = eager_module.get_random_inputs() eager_output = eager_module.forward(*inputs) capture_config = exir.CaptureConfig( - pt2_mode=True, enable_functionalization=True, enable_aot=True, _unlift=False, @@ -599,7 +587,7 @@ def f(x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: x = (torch.randn(10),) program = ( - exir.capture(f, x, exir.CaptureConfig(pt2_mode=True)) + exir.capture(f, x, exir.CaptureConfig()) .to_edge(self.compile_config) # TODO(larryliu): fix topk .to_executorch() .program @@ -616,10 +604,7 @@ def f(x: torch.Tensor) -> torch.Tensor: x = (torch.randn(3, 2),) program = ( - exir.capture(f, x, exir.CaptureConfig(pt2_mode=True)) - .to_edge() - .to_executorch() - .program + exir.capture(f, x, exir.CaptureConfig()).to_edge().to_executorch().program ) vals = program.execution_plan[0].values @@ -635,7 +620,7 @@ def f(x: torch.Tensor) -> torch.Tensor: return b x = (torch.randn(3, 2),) - config = CaptureConfig(pt2_mode=True, enable_dynamic_shape=True) + config = CaptureConfig(enable_dynamic_shape=True) program = exir.capture(f, x, config=config).to_edge().to_executorch().program self.assertEqual( @@ -656,7 +641,7 @@ def forward(self, x): x = (torch.randn(1, 1, 2, 2),) program = ( - exir.capture(M(), x, exir.CaptureConfig(pt2_mode=True)) + exir.capture(M(), x, exir.CaptureConfig()) .to_edge(exir.EdgeCompileConfig(passes=[ConstPropPass()])) .to_executorch() .program @@ -673,7 +658,6 @@ def map_fn(x: torch.Tensor, y: torch.Tensor) -> torch.Tensor: return control_flow.map(map_fn, x, y) capture_config = CaptureConfig( - pt2_mode=True, enable_functionalization=False, enable_dynamic_shape=True, ) @@ -751,7 +735,6 @@ def forward(self, x: torch.Tensor) -> torch.Tensor: model = SimpleLinear() inputs = (torch.ones(10, 5),) capture_config = CaptureConfig( - pt2_mode=True, enable_dynamic_shape=True, ) program = ( @@ -855,7 +838,6 @@ def forward_sigmoid(self, x: torch.Tensor) -> torch.Tensor: model = SimpleLinear() inputs = (torch.ones(10, 5),) capture_config = CaptureConfig( - pt2_mode=True, enable_dynamic_shape=True, ) program_relu = ( @@ -922,7 +904,6 @@ def forward_sigmoid(self, x: torch.Tensor) -> torch.Tensor: model = SimpleLinear() inputs = (torch.ones(10, 5),) capture_config = CaptureConfig( - pt2_mode=True, enable_dynamic_shape=True, ) program_relu = ( @@ -987,7 +968,6 @@ def make_program( fn, inputs, CaptureConfig( - pt2_mode=True, enable_dynamic_shape=True, ), ) @@ -1048,7 +1028,6 @@ def forward(self, x: torch.Tensor) -> torch.Tensor: model = Simple() inputs = (torch.ones(10, 5),) capture_config = CaptureConfig( - pt2_mode=True, enable_dynamic_shape=True, ) program = ( @@ -1120,9 +1099,7 @@ def test_emit_debug_handle_map(self) -> None: exir.capture( mul_model, mul_model.get_random_inputs(), - CaptureConfig( - pt2_mode=True, - ), + CaptureConfig(), ) .to_edge(exir.EdgeCompileConfig(_check_ir_validity=False)) .to_executorch() @@ -1145,9 +1122,7 @@ def forward(self, x: torch.Tensor) -> torch.Tensor: exir.capture( mul_model, (torch.ones(2, 2),), - CaptureConfig( - pt2_mode=True, - ), + CaptureConfig(), ) .to_edge(exir.EdgeCompileConfig(_check_ir_validity=False)) .to_executorch() diff --git a/exir/experimental/export_pt2.py b/exir/experimental/export_pt2.py index e50d678c27a..d3d50f04ba9 100644 --- a/exir/experimental/export_pt2.py +++ b/exir/experimental/export_pt2.py @@ -139,7 +139,7 @@ def trace(root: Callable[..., Value], concrete_args: Tuple[Value, ...]) -> Trace graph_module = exir.capture( root, concrete_args, - CaptureConfig(pt2_mode=True, enable_functionalization=False), + CaptureConfig(enable_functionalization=False), ).graph_module # TODO convert torchdynamo guards to our own guards def _convert_dynamo_guard_to_exir_guard( diff --git a/exir/tests/models.py b/exir/tests/models.py index 671dc1c7dc8..1c7cc826914 100644 --- a/exir/tests/models.py +++ b/exir/tests/models.py @@ -151,7 +151,7 @@ def get_random_inputs(self) -> Tuple[Tensor, Tensor]: edge_ir_m = exir.capture( delegated_m, delegated_m.get_random_inputs(), - exir.CaptureConfig(pt2_mode=True), + exir.CaptureConfig(), ).to_edge() lowered_module = LoweredBackendModule( edge_program=edge_ir_m, diff --git a/exir/tests/test_arg_validator.py b/exir/tests/test_arg_validator.py index a7b9e946f2d..14c255bf775 100644 --- a/exir/tests/test_arg_validator.py +++ b/exir/tests/test_arg_validator.py @@ -32,7 +32,7 @@ def forward(self, x): m = TestModel() inputs = (torch.randn(1, 3, 100, 100).to(dtype=torch.int),) egm = ( - exir.capture(m, inputs, exir.CaptureConfig(pt2_mode=True)) + exir.capture(m, inputs, exir.CaptureConfig()) .to_edge(EdgeCompileConfig(_check_ir_validity=False)) .exported_program.graph_module ) @@ -52,7 +52,7 @@ def forward(self, x): inputs = (torch.randn(1, 3, 100, 100).to(dtype=torch.bfloat16),) egm = ( - exir.capture(M(), inputs, exir.CaptureConfig(pt2_mode=True)) + exir.capture(M(), inputs, exir.CaptureConfig()) .to_edge(EdgeCompileConfig(_check_ir_validity=False)) .exported_program.graph_module ) diff --git a/exir/tests/test_capture.py b/exir/tests/test_capture.py index ed40506f3ef..7663ed59080 100644 --- a/exir/tests/test_capture.py +++ b/exir/tests/test_capture.py @@ -23,9 +23,7 @@ def test_module_call(self, model_name: str, model: torch.nn.Module) -> None: inputs = model.get_random_inputs() expected = model(*inputs) # TODO(ycao): Replace it with capture_multiple - exported_program = exir.capture( - model, inputs, exir.CaptureConfig(pt2_mode=True) - ) + exported_program = exir.capture(model, inputs, exir.CaptureConfig()) self.assertTrue(torch.allclose(expected, exported_program(*inputs))) diff --git a/exir/tests/test_delegate.py b/exir/tests/test_delegate.py index e6c9060dfc7..f32070cfe8f 100644 --- a/exir/tests/test_delegate.py +++ b/exir/tests/test_delegate.py @@ -38,7 +38,7 @@ def g(x: torch.Tensor, y: torch.Tensor) -> torch.Tensor: return x + y inputs = (torch.ones(1, 3), torch.ones(1, 3)) - edge_ir_m = exir.capture(g, inputs, CaptureConfig(pt2_mode=True)).to_edge() + edge_ir_m = exir.capture(g, inputs, CaptureConfig()).to_edge() lowered_module: LoweredBackendModule = LoweredBackendModule( edge_ir_m, "BackendWithCompilerDemo", b"moo", [] ) @@ -51,7 +51,7 @@ def f(x: torch.Tensor, y: torch.Tensor) -> torch.Tensor: f, inputs, exir.CaptureConfig( - pt2_mode=True, enable_functionalization=True, enable_dynamic_shape=True + enable_functionalization=True, enable_dynamic_shape=True ), ) FileCheck().check("lowered_module_0").check( @@ -65,7 +65,7 @@ def test_to_backend(self) -> None: m = models.CompositeDelegateModule() exec_prog = ( - exir.capture(m, m.get_random_inputs(), exir.CaptureConfig(pt2_mode=True)) + exir.capture(m, m.get_random_inputs(), exir.CaptureConfig()) .to_edge( EdgeCompileConfig(_check_ir_validity=False) ) # TODO(larryliu): fix split_copy.Tensor @@ -164,7 +164,7 @@ def forward(self, x, y): return x orig_res = Model()(*inputs) - prog = exir.capture(Model(), inputs, CaptureConfig(pt2_mode=True)).to_edge() + prog = exir.capture(Model(), inputs, CaptureConfig()).to_edge() gm = prog.exported_program.graph_module node_list = [] @@ -224,7 +224,7 @@ def forward(self, x, y): return x orig_res = Model()(*inputs) - prog = exir.capture(Model(), inputs, CaptureConfig(pt2_mode=True)).to_edge() + prog = exir.capture(Model(), inputs, CaptureConfig()).to_edge() gm = prog.exported_program.graph_module node_list = [] @@ -283,7 +283,7 @@ def forward(self, x, y): return x orig_res = Model()(*inputs) - prog = exir.capture(Model(), inputs, CaptureConfig(pt2_mode=True)).to_edge() + prog = exir.capture(Model(), inputs, CaptureConfig()).to_edge() gm = prog.exported_program.graph_module node_list = [] diff --git a/exir/tests/test_dynamic_shape_propagation.py b/exir/tests/test_dynamic_shape_propagation.py index d294c9859e6..1a817ca02ab 100644 --- a/exir/tests/test_dynamic_shape_propagation.py +++ b/exir/tests/test_dynamic_shape_propagation.py @@ -20,10 +20,7 @@ def test_repeat(self): prog = exir.capture( eager_model, inputs, - exir.CaptureConfig( - pt2_mode=True, - enable_dynamic_shape=True, - ), + exir.CaptureConfig(enable_dynamic_shape=True), ).to_edge(exir.EdgeCompileConfig(_check_ir_validity=False)) new_prog = prog.transform(SpecPropPass(), SymShapeEvalPass()) diff --git a/exir/tests/test_experimental.py b/exir/tests/test_experimental.py index f856b3635dd..834d0b7cc99 100644 --- a/exir/tests/test_experimental.py +++ b/exir/tests/test_experimental.py @@ -40,9 +40,7 @@ def f(x: torch.Tensor) -> torch.Tensor: x = (torch.randn(100),) edge_gm = ( - exir.capture(f, x, CaptureConfig(pt2_mode=True)) - .to_edge() - .exported_program.graph_module + exir.capture(f, x, CaptureConfig()).to_edge().exported_program.graph_module ) validation_f = add_assertions(edge_gm) diff --git a/exir/tests/test_fixtures.py b/exir/tests/test_fixtures.py index 93e5bbc1ad5..f8c5c634039 100644 --- a/exir/tests/test_fixtures.py +++ b/exir/tests/test_fixtures.py @@ -53,7 +53,7 @@ def export_to_file(m: Any, inputs: Any) -> bytes: Given a module and its inputs, return the json flatbuffer of that module. """ exec_prog = ( - exir.capture(m, inputs, exir.CaptureConfig(pt2_mode=True)) + exir.capture(m, inputs, exir.CaptureConfig()) .to_edge(exir.EdgeCompileConfig(_check_ir_validity=False)) .to_executorch() ) diff --git a/exir/tests/test_memory_format_ops_pass.py b/exir/tests/test_memory_format_ops_pass.py index 290d4f4f54e..0c87f18b6af 100644 --- a/exir/tests/test_memory_format_ops_pass.py +++ b/exir/tests/test_memory_format_ops_pass.py @@ -46,7 +46,7 @@ def forward( before = exir.capture( module, sample_input, - CaptureConfig(pt2_mode=True, enable_dynamic_shape=True), + CaptureConfig(enable_dynamic_shape=True), ) # check op strings before diff --git a/exir/tests/test_memory_planning.py b/exir/tests/test_memory_planning.py index bcbf5c04b11..965acca441d 100644 --- a/exir/tests/test_memory_planning.py +++ b/exir/tests/test_memory_planning.py @@ -229,7 +229,7 @@ def wrapper(self: "TestMemoryPlanning") -> None: exir.capture( eager_module, inputs, - exir.CaptureConfig(pt2_mode=True), + exir.CaptureConfig(), ) # torch._ops.aten.t.default .to_edge( @@ -466,7 +466,6 @@ def test_asr_joiner(self) -> None: eager_model, inputs, exir.CaptureConfig( - pt2_mode=True, enable_dynamic_shape=True, ), ).to_edge( @@ -531,7 +530,6 @@ def test_multiple_pools( edge_program = exir.capture( MultiplePoolsToyModel(), (torch.ones(1),), - exir.CaptureConfig(pt2_mode=True), ).to_edge(exir.EdgeCompileConfig(_check_ir_validity=False)) program = edge_program.to_executorch( diff --git a/exir/tests/test_pass_infra.py b/exir/tests/test_pass_infra.py index c35365f0b97..2fcf4fb77f7 100644 --- a/exir/tests/test_pass_infra.py +++ b/exir/tests/test_pass_infra.py @@ -104,7 +104,7 @@ def f(x: torch.Tensor) -> torch.Tensor: return z f = ( - exir.capture(f, (torch.randn(10),), exir.CaptureConfig(pt2_mode=True)) + exir.capture(f, (torch.randn(10),), exir.CaptureConfig()) .to_edge() .exported_program.graph_module ) @@ -149,7 +149,7 @@ def f(x: torch.Tensor) -> torch.Tensor: return z traced_f1 = ( - exir.capture(f, (torch.randn(10),), exir.CaptureConfig(pt2_mode=True)) + exir.capture(f, (torch.randn(10),), exir.CaptureConfig()) .to_edge() .exported_program.graph_module ) @@ -166,7 +166,7 @@ def f(x: torch.Tensor, y: torch.Tensor) -> torch.Tensor: sample_inputs = (torch.randn(1, 3), torch.randn(1, 3)) gm = exir.capture( - f, sample_inputs, exir.CaptureConfig(pt2_mode=True) + f, sample_inputs, exir.CaptureConfig() ).exported_program.graph_module pass_result = ScalarToTensorPass()(gm) diff --git a/exir/tests/test_passes.py b/exir/tests/test_passes.py index ce27a6dc5d1..0c8c4748e23 100644 --- a/exir/tests/test_passes.py +++ b/exir/tests/test_passes.py @@ -104,7 +104,7 @@ def add(x: torch.Tensor, y: torch.Tensor) -> torch.Tensor: int_tensor = torch.tensor([[1, 2, 3]]) float_tensor = torch.tensor([[1.0, 2.0, 3.0]]) edge_prog = exir.capture( - add, (int_tensor, float_tensor), exir.CaptureConfig(pt2_mode=True) + add, (int_tensor, float_tensor), exir.CaptureConfig() ).to_edge() new_prog = edge_prog.transform(RemoveMixedTypeOperators()) @@ -129,7 +129,7 @@ def add(x: torch.Tensor, y: torch.Tensor) -> torch.Tensor: double_tensor = double_tensor.to(torch.double) double_prog = exir.capture( - add, (int_tensor, double_tensor), exir.CaptureConfig(pt2_mode=True) + add, (int_tensor, double_tensor), exir.CaptureConfig() ).to_edge() double_prog.transform(RemoveMixedTypeOperators()) @@ -155,7 +155,7 @@ def mult(x: torch.Tensor, y: torch.Tensor) -> torch.Tensor: float_tensor_vert = float_tensor.T mult_prog = exir.capture( - mult, (int_tensor, float_tensor_vert), exir.CaptureConfig(pt2_mode=True) + mult, (int_tensor, float_tensor_vert), exir.CaptureConfig() ).to_edge() # graph_module_mult.graph.print_tabular() @@ -186,7 +186,7 @@ def foo(x: torch.Tensor) -> torch.Tensor: edge_prog = exir.capture( foo, (torch.ones(1, dtype=torch.float32),), - exir.CaptureConfig(pt2_mode=True), + exir.CaptureConfig(), ).to_edge() edge_prog = edge_prog.transform(RemoveNoopPass()) self.assertIsNotNone(edge_prog.exported_program.graph_module) @@ -207,9 +207,7 @@ def foo_with_all_slices(x: torch.Tensor) -> torch.Tensor: # Turn off functionalization so that we can get the actual to.dtype op x = torch.ones((3, 8, 8)) - prog = exir.capture( - foo_with_no_slice, (x,), exir.CaptureConfig(pt2_mode=True) - ).to_edge() + prog = exir.capture(foo_with_no_slice, (x,), exir.CaptureConfig()).to_edge() prog = prog.transform(RemoveNoopPass()) new_graph_module = prog.exported_program.graph_module FileCheck().check_count( @@ -219,7 +217,7 @@ def foo_with_all_slices(x: torch.Tensor) -> torch.Tensor: prog = exir.capture( foo_with_one_slice, (x,), - exir.CaptureConfig(pt2_mode=True), + exir.CaptureConfig(), ).to_edge() prog = prog.transform(RemoveNoopPass()) new_graph_module = prog.exported_program.graph_module @@ -230,7 +228,7 @@ def foo_with_all_slices(x: torch.Tensor) -> torch.Tensor: prog = exir.capture( foo_with_all_slices, (x,), - exir.CaptureConfig(pt2_mode=True), + exir.CaptureConfig(), ).to_edge() prog = prog.transform(RemoveNoopPass()) new_graph_module = prog.exported_program.graph_module @@ -244,9 +242,7 @@ def f(x: torch.Tensor) -> torch.Tensor: x = (torch.randn(2, 3),) - exir.capture( - f, x, exir.CaptureConfig(pt2_mode=True) - ).to_edge().exported_program.graph_module + exir.capture(f, x, exir.CaptureConfig()).to_edge().exported_program.graph_module # TODO(angelayi): Add a utility function that verifies a model is in # the edge dialect @@ -274,7 +270,7 @@ def forward(self, x_raw, h, c): composite_m = CompositeModel(3) edge_prog = ( - exir.capture(composite_m, inputs, exir.CaptureConfig(pt2_mode=True)) + exir.capture(composite_m, inputs, exir.CaptureConfig()) # torch._ops.aten.t.default .to_edge(exir.EdgeCompileConfig(_check_ir_validity=False)) ) @@ -308,7 +304,7 @@ def get_random_inputs(self): model = MyModel() inputs = model.get_random_inputs() - prog = exir.capture(model, inputs, exir.CaptureConfig(pt2_mode=True)).to_edge( + prog = exir.capture(model, inputs, exir.CaptureConfig()).to_edge( EdgeCompileConfig(_check_ir_validity=False) ) # TODO(larryliu): fix split_copy new_prog = prog.transform(ToOutVarPass()) @@ -338,7 +334,7 @@ def get_random_inputs(self): model = MyModel() inputs = model.get_random_inputs() - prog = exir.capture(model, inputs, exir.CaptureConfig(pt2_mode=True)).to_edge( + prog = exir.capture(model, inputs, exir.CaptureConfig()).to_edge( EdgeCompileConfig(_check_ir_validity=False) ) # TODO(larryliu): fix topk @@ -367,9 +363,7 @@ def forward(self, x): inputs = torch.tensor(1.0, dtype=torch.float) model_res = model(inputs) - edge_dialect = exir.capture( - model, (inputs,), exir.CaptureConfig(pt2_mode=True) - ).to_edge() + edge_dialect = exir.capture(model, (inputs,), exir.CaptureConfig()).to_edge() edge_res = edge_dialect(inputs) self.assertTrue(torch.allclose(model_res, edge_res)) @@ -381,9 +375,7 @@ def f(x: torch.Tensor) -> List[torch.Tensor]: class NullPass(ExportPass): pass - prog = exir.capture( - f, (torch.ones(3, 2),), exir.CaptureConfig(pt2_mode=True) - ).to_edge( + prog = exir.capture(f, (torch.ones(3, 2),), exir.CaptureConfig()).to_edge( EdgeCompileConfig(_check_ir_validity=False) ) # TODO(larryliu): fix cat new_prog = prog.transform(NullPass()) @@ -412,7 +404,7 @@ class NullPass(ExportPass): prog = exir.capture( f, (torch.ones(3, 2),), - CaptureConfig(pt2_mode=True, enable_functionalization=False), + CaptureConfig(enable_functionalization=False), ).to_edge(exir.EdgeCompileConfig(_check_ir_validity=False)) new_prog = prog.transform(NullPass()) new_nodes = new_prog.exported_program.graph_module.graph.nodes @@ -448,9 +440,7 @@ def count_additions(gm: torch.fx.GraphModule) -> int: graph_module = exir.capture( M(), (torch.zeros(2, 2, 3),), - CaptureConfig( - pt2_mode=True, enable_dynamic_shape=True, enable_functionalization=True - ), + CaptureConfig(enable_dynamic_shape=True, enable_functionalization=True), ).exported_program.graph_module self.assertEqual(count_additions(graph_module), 3) @@ -463,9 +453,7 @@ def test_export_scalar_to_tensor_pass(self) -> None: def mul(x: torch.Tensor) -> torch.Tensor: return x * 3.14 - expo_prog = exir.capture( - mul, (torch.ones(1),), exir.CaptureConfig(pt2_mode=True) - ) + expo_prog = exir.capture(mul, (torch.ones(1),), exir.CaptureConfig()) new_prog = expo_prog.transform(ScalarToTensorPass()) self.assertIsNotNone(new_prog.exported_program.graph_module) new_graph_module = new_prog.exported_program.graph_module @@ -492,7 +480,7 @@ def f(x: torch.Tensor) -> torch.Tensor: gm = exir.capture( f, example_inputs, - exir.CaptureConfig(pt2_mode=True, enable_dynamic_shape=True), + exir.CaptureConfig(enable_dynamic_shape=True), ) new_gm = gm.transform( ReplaceSymSizeOpPass(), ScalarToTensorPass(), RemoveMixedTypeOperators() @@ -506,7 +494,7 @@ def f(x: torch.Tensor) -> torch.Tensor: return x + x gm = exir.capture( - f, (torch.ones(3, 2),), exir.CaptureConfig(pt2_mode=True) + f, (torch.ones(3, 2),), exir.CaptureConfig() ).exported_program.graph_module new_gm = SpecPropPass()(gm) self.assertIsNotNone(new_gm) @@ -525,7 +513,7 @@ def f(x: torch.Tensor) -> Tuple[torch.Tensor]: return (x + x,) gm = exir.capture( - f, (torch.ones(3, 2),), exir.CaptureConfig(pt2_mode=True) + f, (torch.ones(3, 2),), exir.CaptureConfig() ).exported_program.graph_module new_gm = SpecPropPass()(gm) self.assertIsNotNone(new_gm) @@ -549,7 +537,7 @@ def f(inp: torch.Tensor) -> torch.Tensor: return model(inp) # ReplaceBrokenOpsWithFunctionalOpsPass is used in to_edge() - prog = exir.capture(f, (x,), exir.CaptureConfig(pt2_mode=True)).to_edge( + prog = exir.capture(f, (x,), exir.CaptureConfig()).to_edge( exir.EdgeCompileConfig(_check_ir_validity=False) ) gm = prog.exported_program.graph_module @@ -570,7 +558,6 @@ def f(x: torch.Tensor) -> torch.Tensor: f, (torch.ones(3, 2),), exir.CaptureConfig( - pt2_mode=True, enable_functionalization=False, enable_dynamic_shape=True, ), @@ -594,7 +581,6 @@ def test_alloc_node_spec(self) -> None: eager_model, inputs, exir.CaptureConfig( - pt2_mode=True, enable_dynamic_shape=True, enable_functionalization=False, ), @@ -643,7 +629,7 @@ def test_dce_recursive(self) -> None: eager_model, inputs, exir.CaptureConfig( - pt2_mode=True, enable_dynamic_shape=True, enable_functionalization=False + enable_dynamic_shape=True, enable_functionalization=False ), ).exported_program.graph_module @@ -664,7 +650,6 @@ def f(x: torch.Tensor) -> torch.Tensor: f, (torch.rand(5),), config=CaptureConfig( - pt2_mode=True, enable_dynamic_shape=True, ), ) @@ -699,7 +684,6 @@ def f(x: torch.Tensor) -> torch.Tensor: f, (inp,), config=CaptureConfig( - pt2_mode=True, enable_dynamic_shape=True, ), ).to_edge( @@ -730,7 +714,7 @@ def f(x: torch.Tensor) -> torch.Tensor: return x + x gm = ( - exir.capture(f, (x,), exir.CaptureConfig(pt2_mode=True)) + exir.capture(f, (x,), exir.CaptureConfig()) .to_edge() .exported_program.graph_module ) @@ -753,7 +737,7 @@ def f(x: torch.Tensor, y: torch.Tensor) -> torch.Tensor: torch.randn(2, 2), torch.randn(2, 2), ), - exir.CaptureConfig(pt2_mode=True), + exir.CaptureConfig(), ) # should look like: # graph(): @@ -816,7 +800,7 @@ def call_operator(self, op, args, kwargs, meta): torch.randn(2, 2), torch.randn(2, 2), ), - exir.CaptureConfig(pt2_mode=True), + exir.CaptureConfig(), ) # Retrace-able, the graph "promote" back to ATen dialect, showing up add and relu, which is expected. FileCheck().check("torch.ops.aten.add.Tensor").check( @@ -831,7 +815,6 @@ def test_debug_handle_generator_pass(self) -> None: eager_model, inputs, exir.CaptureConfig( - pt2_mode=True, enable_dynamic_shape=False, enable_functionalization=True, ), @@ -849,7 +832,7 @@ def f(x: torch.Tensor) -> torch.Tensor: inputs = (torch.ones(6),) prog = exir.capture( - f, inputs, exir.CaptureConfig(pt2_mode=True, enable_dynamic_shape=True) + f, inputs, exir.CaptureConfig(enable_dynamic_shape=True) ).to_edge(exir.EdgeCompileConfig(_check_ir_validity=False)) prog = prog.transform(SymIntToTensorPass()) @@ -867,7 +850,7 @@ def f(x: torch.Tensor, y: torch.Tensor) -> torch.Tensor: torch.randn(2, 2), torch.randn(2, 2), ), - exir.CaptureConfig(pt2_mode=True), + exir.CaptureConfig(), ) # should look like: # graph(): @@ -917,7 +900,7 @@ def f(x: torch.Tensor) -> torch.Tensor: gm = exir.capture( f, (torch.randn(5),), - exir.CaptureConfig(pt2_mode=True), + exir.CaptureConfig(), ).to_edge(exir.EdgeCompileConfig(_check_ir_validity=False)) new_gm = gm.transform(RemoveAssertAsyncPass()) num_asserts = [ @@ -938,7 +921,7 @@ def forward(self, x): return torch.arange(start=0, end=2) + x _ = ( - exir.capture(M(), (torch.randn(2),), exir.CaptureConfig(pt2_mode=True)) + exir.capture(M(), (torch.randn(2),), exir.CaptureConfig()) .to_edge() .to_executorch() ) @@ -953,7 +936,7 @@ def forward(self, x): return self.a[:2] + x gm = ( - exir.capture(M(), (torch.randn(2),), exir.CaptureConfig(pt2_mode=True)) + exir.capture(M(), (torch.randn(2),), exir.CaptureConfig()) .to_edge() .exported_program.graph_module ) diff --git a/exir/tests/test_quant_fusion_pass.py b/exir/tests/test_quant_fusion_pass.py index 8f4a0b724e3..0d0cc0877db 100644 --- a/exir/tests/test_quant_fusion_pass.py +++ b/exir/tests/test_quant_fusion_pass.py @@ -56,9 +56,7 @@ def forward(self, x, y): ) m = _convert_to_reference_decomposed_fx(m) config = EdgeCompileConfig(_check_ir_validity=False) - m = exir.capture(m, example_inputs, CaptureConfig(pt2_mode=True)).to_edge( - config=config - ) + m = exir.capture(m, example_inputs, CaptureConfig()).to_edge(config=config) # QuantFusionPass should be part of to_executorch() config, separating it out so that we can check the graph. m = m.transform(QuantFusionPass()) # check that we are using functional variant of q/dq/add @@ -97,9 +95,7 @@ def forward(self, x, y): m(*example_inputs) m = _convert_to_reference_decomposed_fx(m) config = EdgeCompileConfig(_check_ir_validity=False) - m = exir.capture(m, example_inputs, CaptureConfig(pt2_mode=True)).to_edge( - config=config - ) + m = exir.capture(m, example_inputs, CaptureConfig()).to_edge(config=config) # QuantFusionPass should be part of to_executorch() config, separating it out so that we can check the graph. m = m.transform(QuantFusionPass()) # check that we are using functional variant of q/dq/add/reshape @@ -154,9 +150,7 @@ def forward(self, x, y): ) m = _convert_to_reference_decomposed_fx(m) config = EdgeCompileConfig(_check_ir_validity=False) - m = exir.capture(m, example_inputs, CaptureConfig(pt2_mode=True)).to_edge( - config=config - ) + m = exir.capture(m, example_inputs, CaptureConfig()).to_edge(config=config) # QuantFusionPass should be part of to_executorch() config, separating it out so that we can check the graph. m = m.transform(QuantFusionPass()) # check that we are using functional variant of q/dq/add/slice @@ -203,9 +197,7 @@ def forward(self, x, y): m(*example_inputs) m = _convert_to_reference_decomposed_fx(m) config = EdgeCompileConfig(_check_ir_validity=False) - m = exir.capture(m, example_inputs, CaptureConfig(pt2_mode=True)).to_edge( - config=config - ) + m = exir.capture(m, example_inputs, CaptureConfig()).to_edge(config=config) # QuantFusionPass should be part of to_executorch() config, separating it out so that we can check the graph. m = m.transform(QuantFusionPass()) # check that we are using functional variant of q/dq/cat diff --git a/exir/tests/test_quant_lowering_custom_backend_pass.py b/exir/tests/test_quant_lowering_custom_backend_pass.py index bfcedd16746..fec77513a67 100644 --- a/exir/tests/test_quant_lowering_custom_backend_pass.py +++ b/exir/tests/test_quant_lowering_custom_backend_pass.py @@ -224,7 +224,7 @@ def get_graph_module(self) -> torch.fx.GraphModule: exir.capture( self.pattern, self.inputs, - exir.CaptureConfig(pt2_mode=True), + exir.CaptureConfig(), ) .to_edge( exir.EdgeCompileConfig( @@ -493,7 +493,7 @@ class TestQuantLoweringCustomBackendPass(unittest.TestCase): def setUp(self) -> None: super().setUp() - @torch.inference_mode() # TODO Use pt2_mode=True for capturing. + @torch.inference_mode() # TODO Use for capturing. def test(self) -> None: mod = TestModel( constant_tensor=torch.ones( @@ -541,7 +541,7 @@ def test(self) -> None: # Step 2: EXIR capturing + duplicating dequant nodes captured_program = exir.capture( - converted_mod, example_inputs, exir.CaptureConfig(pt2_mode=True) + converted_mod, example_inputs, exir.CaptureConfig() ).to_edge( exir.EdgeCompileConfig( passes=[DuplicateDequantNodePass()], @@ -615,7 +615,7 @@ def test(self) -> None: # - Retracing to verify that it is still runnable before custom passes # - Target-aware pass where it fuses quantized ConvRelu and MaxPool to a DSP # fused_mod = exir.capture( - # delegated_mod, example_inputs, exir.CaptureConfig(pt2_mode=True) + # delegated_mod, example_inputs, exir.CaptureConfig() # ).to_edge( # EdgeCompileConfig( # passes=[ReplaceQuantizedOperatorsWithQualcommDSP()], @@ -657,7 +657,7 @@ def test_quantized_linear_dynamic(self) -> None: ) # Step 2: EXIR capturing - capture_config = CaptureConfig(pt2_mode=True, enable_aot=True, _unlift=True) + capture_config = CaptureConfig(enable_aot=True, _unlift=True) captured_mod = ( exir.capture(converted_mod, example_inputs, config=capture_config) .to_edge( @@ -798,7 +798,7 @@ def test_quantized_linear_dynamic_symmetric_act_per_channel_weight(self) -> None print("converted:", converted_mod) # Step 2: EXIR capturing - capture_config = CaptureConfig(pt2_mode=True, enable_aot=True, _unlift=True) + capture_config = CaptureConfig(enable_aot=True, _unlift=True) captured_mod = exir.capture( converted_mod, example_inputs, config=capture_config ).to_edge( @@ -866,7 +866,7 @@ def test_quantized_linear_dynamic_symmetric_act_per_tensor_weight(self) -> None: print("converted:", converted_mod) # Step 2: EXIR capturing - capture_config = CaptureConfig(pt2_mode=True, enable_aot=True, _unlift=True) + capture_config = CaptureConfig(enable_aot=True, _unlift=True) captured_mod = exir.capture( converted_mod, example_inputs, config=capture_config ).to_edge( diff --git a/exir/tests/test_serde.py b/exir/tests/test_serde.py index 3440f6955b1..2a4d78936de 100644 --- a/exir/tests/test_serde.py +++ b/exir/tests/test_serde.py @@ -46,7 +46,7 @@ def check_ep( # pyre-ignore def check_serde(self, m, inputs) -> None: - aten = exir.capture(m, inputs, exir.CaptureConfig(pt2_mode=True)) + aten = exir.capture(m, inputs, exir.CaptureConfig()) aten_new = deserialize(*serialize(aten.exported_program)) self.check_ep(aten.exported_program, aten_new, inputs) @@ -114,7 +114,7 @@ def forward(self, x): sin_module = SinModule() model_inputs = (torch.ones(1),) edgeir_m = exir.capture( - sin_module, model_inputs, exir.CaptureConfig(pt2_mode=True) + sin_module, model_inputs, exir.CaptureConfig() ).to_edge() max_value = model_inputs[0].shape[0] compile_specs = [CompileSpec("max_value", bytes([max_value]))] @@ -135,9 +135,7 @@ def forward(self, x): composite_model(*model_inputs) - aten = exir.capture( - composite_model, model_inputs, exir.CaptureConfig(pt2_mode=True) - ) + aten = exir.capture(composite_model, model_inputs, exir.CaptureConfig()) aten_new = deserialize(*serialize(aten.exported_program)) self.check_ep(aten.exported_program, aten_new, model_inputs) @@ -157,7 +155,7 @@ def forward(self, a, x, b): m = Model() inputs = (torch.randn(2, 2), torch.randn(2, 2), torch.randn(2, 2)) - ep = exir.capture(m, inputs, exir.CaptureConfig(pt2_mode=True)).to_edge() + ep = exir.capture(m, inputs, exir.CaptureConfig()).to_edge() edge = to_backend(ep.exported_program, AddMulPartitionerDemo) edge_new = deserialize(*serialize(edge)) self.check_ep(edge, edge_new, inputs) @@ -174,6 +172,6 @@ def forward(self, x): m = Model() inputs = (torch.tensor([1, 1]),) - edge = exir.capture(m, inputs, exir.CaptureConfig(pt2_mode=True)).to_edge() + edge = exir.capture(m, inputs, exir.CaptureConfig()).to_edge() edge_new = deserialize(*serialize(edge.exported_program)) self.check_ep(edge, edge_new, inputs) diff --git a/exir/tests/test_tracer.py b/exir/tests/test_tracer.py index d0a0fae1f61..b7bd4d197c2 100644 --- a/exir/tests/test_tracer.py +++ b/exir/tests/test_tracer.py @@ -35,7 +35,7 @@ def setUpClass(cls) -> None: def test_simple(self) -> None: f = models.BasicSinMax() f = ( - exir.capture(f, f.get_random_inputs(), exir.CaptureConfig(pt2_mode=True)) + exir.capture(f, f.get_random_inputs(), exir.CaptureConfig()) .to_edge() .exported_program.graph_module ) @@ -54,7 +54,7 @@ def f(pred: bool, x: torch.Tensor) -> torch.Tensor: pred = True x = torch.randn(100) f_true = ( - exir.capture(f, (pred, x), exir.CaptureConfig(pt2_mode=True)) + exir.capture(f, (pred, x), exir.CaptureConfig()) .to_edge() .exported_program.graph_module ) @@ -65,7 +65,7 @@ def f(pred: bool, x: torch.Tensor) -> torch.Tensor: pred = False f_false = ( - exir.capture(f, (pred, x), exir.CaptureConfig(pt2_mode=True)) + exir.capture(f, (pred, x), exir.CaptureConfig()) .to_edge() .exported_program.graph_module ) @@ -76,7 +76,7 @@ def f(pred: bool, x: torch.Tensor) -> torch.Tensor: def test_copy(self) -> None: f = models.BasicSinMax() f = ( - exir.capture(f, f.get_random_inputs(), exir.CaptureConfig(pt2_mode=True)) + exir.capture(f, f.get_random_inputs(), exir.CaptureConfig()) .to_edge() .exported_program.graph_module ) @@ -90,7 +90,7 @@ def f(x: torch.Tensor) -> torch.Tensor: return x + x traced_f = ( - exir.capture(f, (torch.rand(2, 2),), exir.CaptureConfig(pt2_mode=True)) + exir.capture(f, (torch.rand(2, 2),), exir.CaptureConfig()) .to_edge() .exported_program.graph_module ) @@ -110,9 +110,7 @@ def f(x: torch.Tensor) -> torch.Tensor: SpecViolationError, r"operator .* is not functional", ): - exir.capture( - f, (torch.zeros(5),), exir.CaptureConfig(pt2_mode=True) - ).to_edge() + exir.capture(f, (torch.zeros(5),), exir.CaptureConfig()).to_edge() def test_tensor_spec_for_const_tensors(self) -> None: class Module(torch.nn.Module): @@ -128,9 +126,7 @@ def get_random_inputs(self) -> Tuple[torch.Tensor, ...]: model = Module() graph_module = ( - exir.capture( - model, model.get_random_inputs(), exir.CaptureConfig(pt2_mode=True) - ) + exir.capture(model, model.get_random_inputs(), exir.CaptureConfig()) # torch._ops.aten.t.default .to_edge( exir.EdgeCompileConfig(_check_ir_validity=False) @@ -153,7 +149,7 @@ def f(x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: cnt = 0 module = ( - exir.capture(f, (torch.zeros(1, 2, 3),), exir.CaptureConfig(pt2_mode=True)) + exir.capture(f, (torch.zeros(1, 2, 3),), exir.CaptureConfig()) .to_edge() .exported_program.graph_module ) @@ -175,7 +171,7 @@ def f(x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: exir.capture( f, inputs, - exir.CaptureConfig(pt2_mode=True), + exir.CaptureConfig(), ) .to_edge() .exported_program.graph_module @@ -218,7 +214,6 @@ def forward(self, x): m, (example_input,), exir.CaptureConfig( - pt2_mode=True, enable_functionalization=False, enable_dynamic_shape=True, ), @@ -239,7 +234,6 @@ def forward(x: torch.Tensor) -> torch.Tensor: forward, (torch.ones(3, 2, dtype=torch.int64),), exir.CaptureConfig( - pt2_mode=True, enable_functionalization=False, enable_dynamic_shape=True, ), @@ -270,7 +264,7 @@ def forward( with using_dynamo(True): inp = ((torch.ones(6), (torch.ones(6), torch.ones(6))),) - gm = exir.capture(Module(), inp, exir.CaptureConfig(pt2_mode=True)) + gm = exir.capture(Module(), inp, exir.CaptureConfig()) self.assertTrue(torch.allclose(Module()(*inp), gm(*inp))) # TODO (tmanlaibaatar) remove this test @@ -296,7 +290,7 @@ def f(x: torch.Tensor, y: List[torch.Tensor]) -> Dict[str, torch.Tensor]: # pyre-fixme[23]: Unable to unpack `(...) -> Tuple[GraphModule, # Set[torch._guards.Guard]]` into 2 values. gm, _ = torch._dynamo.export(f, *inp, aten_graph=True, tracing_mode="symbolic") - prog = exir.capture(f, inp, config=exir.CaptureConfig(pt2_mode=True)).to_edge() + prog = exir.capture(f, inp, config=exir.CaptureConfig()).to_edge() self.assertEqual(prog(*inp), f(*inp)) @@ -337,7 +331,6 @@ def forward(self, x, y, z): model, example_inputs, exir.CaptureConfig( - pt2_mode=True, enable_aot=True, ), ) @@ -355,7 +348,6 @@ def forward(self, x, y, z): new_model, example_inputs, exir.CaptureConfig( - pt2_mode=True, enable_aot=True, ), ) @@ -380,7 +372,7 @@ def foo(x: torch.Tensor, y: torch.Tensor) -> torch.Tensor: dynamo_config = ExirDynamoConfig(assume_static_by_default=True) capture_config = exir.CaptureConfig( - pt2_mode=True, enable_dynamic_shape=True, _dynamo_config=dynamo_config + enable_dynamic_shape=True, _dynamo_config=dynamo_config ) captured = exir.capture( foo, (torch.ones(6, 2), torch.ones(6, 3)), capture_config @@ -405,7 +397,7 @@ def __init__(self): def forward(self, x): return x.cos() + self.buffer.sum() - capture_config = exir.CaptureConfig(pt2_mode=True, enable_aot=True) + capture_config = exir.CaptureConfig(enable_aot=True) captured_gm = exir.capture( FooWithBuffer(), (torch.ones(6, 2),), capture_config ).exported_program.graph_module @@ -441,7 +433,7 @@ def forward(self, x): ep = exir.capture( Foo(), (torch.ones(6, 4),), - exir.CaptureConfig(enable_aot=True, pt2_mode=True, _unlift=True), + exir.CaptureConfig(enable_aot=True, _unlift=True), ) self.assertTrue(torch.allclose(ep(torch.ones(6, 4)), Foo()(torch.ones(6, 4)))) @@ -459,7 +451,7 @@ def forward(self, x): ep = exir.capture( FooContainerInputOutput(), (inp,), - CaptureConfig(pt2_mode=True, enable_aot=True, _unlift=True), + CaptureConfig(enable_aot=True, _unlift=True), ) self.assertTrue(torch.allclose(ep(inp), FooContainerInputOutput()(inp))) @@ -476,7 +468,7 @@ def forward(self, x, y): ep = exir.capture( FooContainerInputOutputV2(), inp, - CaptureConfig(pt2_mode=True, enable_aot=True, _unlift=True), + CaptureConfig(enable_aot=True, _unlift=True), ) self.assertTrue(torch.allclose(ep(*inp), FooContainerInputOutputV2()(*inp))) @@ -507,7 +499,7 @@ def false_fn(x): ep = exir.capture( Foo(), (inp,), - CaptureConfig(pt2_mode=True, enable_aot=True, _unlift=True), + CaptureConfig(enable_aot=True, _unlift=True), ) self.assertTrue(torch.allclose(ep(torch.ones(6, 4)), Foo()(torch.ones(6, 4)))) @@ -544,7 +536,7 @@ def body(x, pred): ep = exir.capture( Module(), (torch.tensor(True), inp), - CaptureConfig(pt2_mode=True, enable_aot=True, _unlift=True), + CaptureConfig(enable_aot=True, _unlift=True), ) inp_test = torch.randn(3, 2, 1) diff --git a/exir/tests/test_verification.py b/exir/tests/test_verification.py index d13433eddb5..432e868c6f7 100644 --- a/exir/tests/test_verification.py +++ b/exir/tests/test_verification.py @@ -27,7 +27,7 @@ def f(x: torch.Tensor) -> torch.Tensor: # Generate program program = ( - exir.capture(f, (torch.randn(2),), exir.CaptureConfig(pt2_mode=True)) + exir.capture(f, (torch.randn(2),), exir.CaptureConfig()) .to_edge(exir.EdgeCompileConfig(passes=[ConstPropPass()])) .to_executorch() .program @@ -78,7 +78,7 @@ def forward(self, x: torch.Tensor) -> torch.Tensor: model1 = Op1() inputs = (torch.ones(2, 2),) program = ( - exir.capture(model1, inputs, exir.CaptureConfig(pt2_mode=True)) + exir.capture(model1, inputs, exir.CaptureConfig()) .to_edge() .to_executorch(ExecutorchBackendConfig(to_out_var_pass=ToOutVarPass(True))) .program @@ -95,7 +95,7 @@ def forward(self, x: torch.Tensor) -> torch.Tensor: model2 = Op2() inputs = (torch.ones(2, 2),) program = ( - exir.capture(model2, inputs, exir.CaptureConfig(pt2_mode=True)) + exir.capture(model2, inputs, exir.CaptureConfig()) .to_edge() .to_executorch() .program @@ -130,7 +130,7 @@ def forward(self, x: torch.Tensor) -> torch.Tensor: model2 = Op2() inputs = torch.ones(2, 2) exec_prog = ( - exir.capture(model2, (inputs,), exir.CaptureConfig(pt2_mode=True)) + exir.capture(model2, (inputs,), exir.CaptureConfig()) .to_edge() .to_executorch() ) @@ -159,7 +159,7 @@ def forward(self, x): exir.capture( m, (torch.randn(1, 3, 100, 100).to(dtype=torch.int),), - exir.CaptureConfig(pt2_mode=True), + exir.CaptureConfig(), ) .to_edge() .exported_program.graph_module @@ -182,7 +182,7 @@ def forward(self, x, weight, bias): exir.capture( m, (torch.rand(16, 8, 32, 32), torch.rand(8), torch.rand(8)), - exir.CaptureConfig(pt2_mode=True), + exir.CaptureConfig(), ) .to_edge() .exported_program.graph_module @@ -204,7 +204,7 @@ def forward(self, x): exir.capture( m, ([],), - exir.CaptureConfig(pt2_mode=True), + exir.CaptureConfig(), ) .to_edge() .exported_program.graph_module @@ -227,7 +227,7 @@ def forward(self, x): egm = exir.capture( m, (torch.randn(1, 3, 100, 100).to(dtype=torch.int),), - exir.CaptureConfig(pt2_mode=True), + exir.CaptureConfig(), ).exported_program.graph_module verifier = EXIREdgeDialectVerifier() with self.assertRaises(SpecViolationError): @@ -246,7 +246,7 @@ def forward(self, x): exir.capture( m, (torch.randn(1, 3, 100, 100).to(dtype=torch.int),), - exir.CaptureConfig(pt2_mode=True), + exir.CaptureConfig(), ) .to_edge(EdgeCompileConfig()) .exported_program.graph_module @@ -263,7 +263,7 @@ def test_edge_sad_with_edge_ops(self) -> None: exir.capture( m, (torch.randn(1, 3, 100, 100).to(dtype=torch.bfloat16),), - exir.CaptureConfig(pt2_mode=True), + exir.CaptureConfig(), ) .to_edge() .exported_program.graph_module diff --git a/exir/verification/verifier.py b/exir/verification/verifier.py index 3f92b1c5cff..c1eb53843c9 100644 --- a/exir/verification/verifier.py +++ b/exir/verification/verifier.py @@ -40,7 +40,7 @@ def valid_builtin_funcs(self): return builtin_funcs # TODO(angelayi): Delete this function when we migrate all tests to - # pt2_mode=True because right now old tracer does not add ["val"] metadata + # because right now old tracer does not add ["val"] metadata def check_valid(self, gm: GraphModule) -> None: # noqa: C901 for node in gm.graph.nodes: diff --git a/extension/pybindings/test/test.py b/extension/pybindings/test/test.py index b16ae963964..16f158c6efa 100644 --- a/extension/pybindings/test/test.py +++ b/extension/pybindings/test/test.py @@ -73,9 +73,7 @@ def create_program( # These cleanup passes are required to convert the `add` op to its out # variant, along with some other transformations. exec_prog = ( - exir.capture_multiple( - eager_module, input_map, config=CaptureConfig(pt2_mode=True) - ) + exir.capture_multiple(eager_module, input_map, config=CaptureConfig()) .to_edge() .to_executorch() ) diff --git a/sdk/edir/tests/exported_op_graph_test.py b/sdk/edir/tests/exported_op_graph_test.py index 5d31ab2450e..33c940491c1 100644 --- a/sdk/edir/tests/exported_op_graph_test.py +++ b/sdk/edir/tests/exported_op_graph_test.py @@ -341,7 +341,7 @@ def get_random_inputs(self) -> Tuple[Tensor, Tensor]: edge_ir_m = exir.capture( delegated_m, delegated_m.get_random_inputs(), - exir.CaptureConfig(pt2_mode=True), + exir.CaptureConfig(), ).to_edge() lowered_module = LoweredBackendModule( edge_program=edge_ir_m, @@ -550,7 +550,7 @@ def generate_op_graph(m: Any, inputs: Any) -> ExportedETOperatorGraph: Given a module and its inputs, returns the Operator Graph """ et_program = ( - exir.capture(m, inputs, exir.CaptureConfig(pt2_mode=True)) + exir.capture(m, inputs, exir.CaptureConfig()) .to_edge( exir.EdgeCompileConfig( _check_ir_validity=False, @@ -624,7 +624,7 @@ def gen_graphs_from_model( et_aten = exir.capture( model, model.get_random_inputs(), - exir.CaptureConfig(pt2_mode=True), + exir.CaptureConfig(), ) et_aten_copy = copy.deepcopy(et_aten) et_edge = et_aten.to_edge( diff --git a/sdk/etrecord/tests/etrecord_test.py b/sdk/etrecord/tests/etrecord_test.py index 792c1975146..83aebbef6db 100644 --- a/sdk/etrecord/tests/etrecord_test.py +++ b/sdk/etrecord/tests/etrecord_test.py @@ -19,9 +19,7 @@ class TestETRecord(unittest.TestCase): def get_test_model(self): f = models.BasicSinMax() - captured_output = exir.capture( - f, f.get_random_inputs(), exir.CaptureConfig(pt2_mode=True) - ) + captured_output = exir.capture(f, f.get_random_inputs(), exir.CaptureConfig()) captured_output_copy = copy.deepcopy(captured_output) edge_output = captured_output.to_edge( # TODO(gasoon): Remove _use_edge_ops=False once serde is fully migrated to Edge ops diff --git a/test/end2end/test_end2end.py b/test/end2end/test_end2end.py index d278d0a2f16..7d843c48156 100644 --- a/test/end2end/test_end2end.py +++ b/test/end2end/test_end2end.py @@ -653,7 +653,6 @@ class E2ETest(unittest.TestCase): test_mem_planning_toy_model = maketest( ToyModelForMemPlanning, capture_config=exir.CaptureConfig( - pt2_mode=True, enable_dynamic_shape=True, ), ) @@ -671,7 +670,6 @@ class E2ETest(unittest.TestCase): ModuleContainers, do_tree_flatten=True, capture_config=exir.CaptureConfig( - pt2_mode=True, enable_dynamic_shape=True, ), ) @@ -722,7 +720,6 @@ class DynamicModelE2ETest(unittest.TestCase): # with upperbound shape and may not match the actual shape. run_graph_module=False, capture_config=exir.CaptureConfig( - pt2_mode=True, enable_dynamic_shape=True, # enable_functionalization=False, # TODO enable functionalization ), @@ -736,7 +733,6 @@ class DynamicModelE2ETest(unittest.TestCase): run_graph_module=False, allow_non_contiguous_tensor=True, capture_config=exir.CaptureConfig( - pt2_mode=True, enable_dynamic_shape=True, ), ) @@ -756,7 +752,6 @@ class DynamicModelE2ETest(unittest.TestCase): test_ft_cond_basic = maketest( FTCondBasic, capture_config=exir.CaptureConfig( - pt2_mode=True, enable_dynamic_shape=True, enable_functionalization=False, # TODO enable functionalization ), @@ -766,7 +761,6 @@ class DynamicModelE2ETest(unittest.TestCase): maketest( FTMapBasic, capture_config=exir.CaptureConfig( - pt2_mode=True, enable_dynamic_shape=True, enable_functionalization=False, # TODO enable functionalization ), @@ -776,7 +770,6 @@ class DynamicModelE2ETest(unittest.TestCase): test_ft_cond_dynshape = maketest( FTCondDynShape, capture_config=exir.CaptureConfig( - pt2_mode=True, enable_dynamic_shape=True, enable_functionalization=False, # TODO enable functionalization ), @@ -786,7 +779,6 @@ class DynamicModelE2ETest(unittest.TestCase): maketest( FTMapDynShape, capture_config=exir.CaptureConfig( - pt2_mode=True, enable_dynamic_shape=True, enable_functionalization=False, # TODO enable functionalization ), @@ -796,7 +788,6 @@ class DynamicModelE2ETest(unittest.TestCase): test_batch_norm = maketest( BatchNormModel, capture_config=exir.CaptureConfig( - pt2_mode=True, enable_dynamic_shape=True, ), verify_graph=BatchNormModel.verify_graph, @@ -843,7 +834,6 @@ class BundledProgramE2ETest(unittest.TestCase): run_graph_module=False, bundled_io=True, capture_config=exir.CaptureConfig( - pt2_mode=True, enable_dynamic_shape=True, ), ) diff --git a/test/models/export_delegated_program.py b/test/models/export_delegated_program.py index eefec647180..622bd9b0d08 100644 --- a/test/models/export_delegated_program.py +++ b/test/models/export_delegated_program.py @@ -82,7 +82,7 @@ def export_module_to_program( inputs = () if hasattr(eager_module, "get_random_inputs"): inputs = eager_module.get_random_inputs() - capture_config = exir.CaptureConfig(pt2_mode=True) + capture_config = exir.CaptureConfig() edge: exir.ExirExportedProgram = exir.capture( getattr(eager_module, method),