From 68baa3f37fcebc18c596c06a5d1f1244838bf966 Mon Sep 17 00:00:00 2001 From: Mengwei Liu Date: Wed, 9 Apr 2025 15:30:03 -0700 Subject: [PATCH] Add a namespace for ATen mode (#9894) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Summary: Pull Request resolved: https://github.com/pytorch/executorch/pull/9894 ## Context As titled. This is an effort trying to solve a big pain point for ATen mode users: duplicate symbols and duplicate operators. A typical duplicate symbol issue looks like: ``` ld.lld: error: duplicate symbol: executorch::runtime::Method::get_num_external_constants() >>> defined at __stripped__/method.cpp.pic.stripped.o:(executorch::runtime::Method::get_num_external_constants()) in archive buck-out/v2/gen/fbcode/712c6d0a4cb497c7/executorch/runtime/executor/__program_no_prim_ops_aten__/libprogram_no_prim_ops_aten.stripped.pic.a >>> defined at __stripped__/method.cpp.pic.stripped.o:(.text._ZN10executorch7runtime6Method26get_num_external_constantsEv+0x0) in archive buck-out/v2/gen/fbcode/712c6d0a4cb497c7/executorch/runtime/executor/__program_no_prim_ops__/libprogram_no_prim_ops.stripped.pic.a ``` [User post](https://fb.workplace.com/groups/pytorch.edge.users/permalink/1727735561430063/) This is caused by user depending on both `program_no_prim_ops` and `program_no_prim_ops_aten`. The issue happens because both libraries define symbols like: `executorch::runtime::Method` and they transitively depend on different definitions of `Tensor` and other types, see `exec_aten.h`. The other common issue is re-registering operators: ``` buck2 test //arvr/libraries/wristband/tsn/transformers:TorchstreamTransformer -- --print-passing-details File changed: fbsource//xplat/executorch/build/fb/clients.bzl File changed: fbsource//xplat/executorch File changed: fbcode//executorch/build/fb/clients.bzl 16 additional file change events ⚠ Listing failed: fbsource//arvr/libraries/wristband/tsn/transformers:TorchstreamTransformerTestFbcode Failed to list tests. Expected exit code 0 but received: ExitStatus(unix_wait_status(134)) STDOUT: STDERR:E 00:00:00.000543 executorch:operator_registry.cpp:86] Re-registering aten::sym_size.int, from /data/sandcastle/boxes/fbsource/buck-out/v2/gen/fbsource/cfdc20bd56300721/arvr/libraries/wristband/tsn/transformers/__TorchstreamTransformerTestFbcode__/./__TorchstreamTransformerTestFbcode__shared_libs_symlink_tre$ E 00:00:00.000572 executorch:operator_registry.cpp:87] key: (null), is_fallback: true F 00:00:00.000576 executorch:operator_registry.cpp:111] In function register_kernels(), assert failed (false): Kernel registration failed with error 18, see error log for details. ``` [User post](https://fb.workplace.com/groups/pytorch.edge.users/permalink/1691696305033989/) [User post 2](https://fb.workplace.com/groups/pytorch.edge.users/permalink/1510414646495490/) This is worse than duplicate symbols because it's a runtime error. This happens because a user depends on a kernel library built with ATen tensors and a kernel library built with ET tensor at the same time. For example, if a C++ binary depends on `//executorch/kernels/prim_ops:prim_ops_registry` and `//executorch/kernels/prim_ops:prim_ops_registry_aten` then this will happen. ## My proposal Add a new namespace to the symbols in ATen mode. `executorch::runtime::Method` --> `executorch::runtime::aten::Method` This way a C++ binary is allowed to depend on an ET library with ATen mode enabled and an ET library with ATen mode disabled. This is not BC breaking for OSS users, since ATen mode was never exposed. Reviewed By: iseeyuan Differential Revision: D72440313 --- .../RegisterCodegenUnboxedKernels.cpp | 4 +- devtools/bundled_program/bundled_program.cpp | 7 +- devtools/bundled_program/bundled_program.h | 10 +- .../portable/custom_ops/custom_ops_2_out.cpp | 2 +- .../flat_tensor/flat_tensor_data_map.cpp | 2 +- extension/flat_tensor/flat_tensor_data_map.h | 6 +- extension/flat_tensor/targets.bzl | 44 ++--- extension/module/module.cpp | 17 +- extension/module/module.h | 22 ++- extension/module/targets.bzl | 2 +- extension/pybindings/pybindings.cpp | 16 +- extension/runner_util/inputs.cpp | 6 +- extension/runner_util/inputs.h | 9 +- extension/runner_util/inputs_aten.cpp | 4 +- extension/tensor/tensor_ptr.cpp | 2 +- kernels/aten/cpu/op__empty_dim_order.cpp | 2 +- kernels/aten/cpu/op__to_dim_order_copy.cpp | 2 +- .../portable/cpu/op__to_dim_order_copy.cpp | 2 +- kernels/portable/test/op_gelu_test.cpp | 2 +- kernels/prim_ops/register_prim_ops.cpp | 7 +- kernels/prim_ops/targets.bzl | 2 +- kernels/quantized/cpu/op_quantize.cpp | 3 +- kernels/quantized/test/op_add_test.cpp | 2 +- .../quantized/test/op_embedding2b_test.cpp | 2 +- .../quantized/test/op_embedding4b_test.cpp | 2 +- kernels/quantized/test/op_embedding_test.cpp | 2 +- .../quantized/test/op_mixed_linear_test.cpp | 2 +- kernels/quantized/test/op_mixed_mm_test.cpp | 2 +- kernels/test/TestUtil.h | 2 +- .../test/custom_kernel_example/op_relu.cpp | 2 +- kernels/test/op_atan2_test.cpp | 2 +- kernels/test/op_cdist_forward_test.cpp | 3 +- kernels/test/op_clamp_test.cpp | 2 +- kernels/test/op_diagonal_copy_test.cpp | 2 +- kernels/test/op_flip_test.cpp | 2 +- kernels/test/op_ge_test.cpp | 2 +- kernels/test/op_gt_test.cpp | 2 +- kernels/test/op_le_test.cpp | 2 +- kernels/test/op_lt_test.cpp | 2 +- kernels/test/op_maximum_test.cpp | 2 +- kernels/test/op_native_batch_norm_test.cpp | 2 +- kernels/test/op_native_group_norm_test.cpp | 2 +- kernels/test/op_ne_test.cpp | 2 +- kernels/test/op_pdist_forward_test.cpp | 2 +- kernels/test/op_prod_test.cpp | 4 +- kernels/test/op_reflection_pad1d_test.cpp | 2 +- kernels/test/op_reflection_pad2d_test.cpp | 2 +- kernels/test/op_reflection_pad3d_test.cpp | 2 +- kernels/test/op_replication_pad1d_test.cpp | 2 +- kernels/test/op_replication_pad2d_test.cpp | 2 +- kernels/test/op_replication_pad3d_test.cpp | 2 +- kernels/test/op_roll_test.cpp | 2 +- kernels/test/op_topk_test.cpp | 3 +- runtime/backend/backend_execution_context.h | 6 +- runtime/backend/backend_init_context.h | 8 +- runtime/backend/interface.cpp | 4 +- runtime/backend/interface.h | 19 +- runtime/core/event_tracer_hooks.h | 37 ++-- runtime/core/exec_aten/exec_aten.h | 15 ++ .../testing_util/test/tensor_factory_test.cpp | 2 +- runtime/core/exec_aten/util/tensor_util.h | 164 +++++++++--------- .../core/exec_aten/util/tensor_util_aten.cpp | 5 +- .../exec_aten/util/test/tensor_util_test.cpp | 44 ++--- runtime/core/named_data_map.h | 9 +- runtime/core/targets.bzl | 63 +++---- runtime/core/tensor_layout.cpp | 4 +- runtime/core/tensor_layout.h | 4 +- runtime/executor/method.cpp | 5 +- runtime/executor/method.h | 6 +- runtime/executor/method_meta.cpp | 5 +- runtime/executor/method_meta.h | 8 +- runtime/executor/platform_memory_allocator.h | 4 +- runtime/executor/program.cpp | 5 +- runtime/executor/program.h | 7 +- runtime/executor/pte_data_map.cpp | 4 +- runtime/executor/pte_data_map.h | 4 +- runtime/executor/targets.bzl | 53 +++--- runtime/executor/tensor_parser.h | 13 +- runtime/executor/tensor_parser_aten.cpp | 3 + runtime/executor/tensor_parser_exec_aten.cpp | 5 +- runtime/executor/tensor_parser_portable.cpp | 10 +- runtime/executor/test/executor_test.cpp | 12 +- runtime/executor/test/targets.bzl | 2 +- .../test/test_backend_compiler_lib.cpp | 12 +- .../test_backend_with_delegate_mapping.cpp | 12 +- runtime/kernel/kernel_runtime_context.h | 10 +- runtime/kernel/operator_registry.cpp | 4 +- runtime/kernel/operator_registry.h | 29 ++-- runtime/kernel/targets.bzl | 30 ++-- .../test/kernel_runtime_context_test.cpp | 2 +- runtime/kernel/test/targets.bzl | 13 ++ .../test/test_generated_lib_and_aten.cpp | 45 +++++ shim_et/xplat/executorch/codegen/codegen.bzl | 2 +- 93 files changed, 517 insertions(+), 411 deletions(-) create mode 100644 runtime/kernel/test/test_generated_lib_and_aten.cpp diff --git a/codegen/templates/RegisterCodegenUnboxedKernels.cpp b/codegen/templates/RegisterCodegenUnboxedKernels.cpp index 3076cde1a99..180baf9b2a9 100644 --- a/codegen/templates/RegisterCodegenUnboxedKernels.cpp +++ b/codegen/templates/RegisterCodegenUnboxedKernels.cpp @@ -22,8 +22,8 @@ // JIT op registry instead of c10 dispatcher. JIT op registry only takes boxed // kernels, so we are calling unboxing functions in UnboxingFunctions.h to cast // arguments into C++ types (instead of IValue) and delegate to unboxed kernels. -using KernelSpan = - ::executorch::runtime::Span; +using KernelSpan = ::executorch::runtime::Span< + const ::executorch::ET_RUNTIME_NAMESPACE::Kernel>; namespace torch { namespace executor { namespace function { diff --git a/devtools/bundled_program/bundled_program.cpp b/devtools/bundled_program/bundled_program.cpp index f12262f7dd0..4e5beb84615 100644 --- a/devtools/bundled_program/bundled_program.cpp +++ b/devtools/bundled_program/bundled_program.cpp @@ -27,9 +27,9 @@ using executorch::aten::ArrayRef; using executorch::aten::Half; using executorch::aten::ScalarType; using executorch::aten::Tensor; +using ::executorch::ET_RUNTIME_NAMESPACE::Method; using ::executorch::runtime::Error; using ::executorch::runtime::EValue; -using ::executorch::runtime::Method; using ::executorch::runtime::Result; namespace executorch { @@ -332,8 +332,9 @@ ET_NODISCARD Error load_bundled_input( static_cast(status)); } - ::executorch::runtime::internal::event_tracer_set_bundled_input_index( - method.get_event_tracer(), testset_idx); + ::executorch::ET_RUNTIME_NAMESPACE::internal:: + event_tracer_set_bundled_input_index( + method.get_event_tracer(), testset_idx); return Error::Ok; } diff --git a/devtools/bundled_program/bundled_program.h b/devtools/bundled_program/bundled_program.h index 884ca6f21bc..cedb2448eca 100644 --- a/devtools/bundled_program/bundled_program.h +++ b/devtools/bundled_program/bundled_program.h @@ -18,7 +18,7 @@ namespace bundled_program { * An opaque pointer to a serialized bundled program. */ using SerializedBundledProgram = const void; - +using ::executorch::ET_RUNTIME_NAMESPACE::Method; /** * Load testset_idx-th bundled input of method_idx-th Method test in * bundled_program_ptr to given Method. @@ -31,7 +31,7 @@ using SerializedBundledProgram = const void; * execution. */ ET_NODISCARD ::executorch::runtime::Error load_bundled_input( - ::executorch::runtime::Method& method, + Method& method, SerializedBundledProgram* bundled_program_ptr, size_t testset_idx); @@ -49,7 +49,7 @@ ET_NODISCARD ::executorch::runtime::Error load_bundled_input( * execution. */ ET_NODISCARD ::executorch::runtime::Error verify_method_outputs( - ::executorch::runtime::Method& method, + Method& method, SerializedBundledProgram* bundled_program_ptr, size_t testset_idx, double rtol = 1e-5, @@ -106,7 +106,7 @@ using serialized_bundled_program = ::executorch::bundled_program::SerializedBundledProgram; ET_NODISCARD inline ::executorch::runtime::Error LoadBundledInput( - ::executorch::runtime::Method& method, + Method& method, serialized_bundled_program* bundled_program_ptr, size_t testset_idx) { return ::executorch::bundled_program::load_bundled_input( @@ -115,7 +115,7 @@ ET_NODISCARD inline ::executorch::runtime::Error LoadBundledInput( ET_NODISCARD inline ::executorch::runtime::Error VerifyResultWithBundledExpectedOutput( - ::executorch::runtime::Method& method, + Method& method, serialized_bundled_program* bundled_program_ptr, size_t testset_idx, double rtol = 1e-5, diff --git a/examples/portable/custom_ops/custom_ops_2_out.cpp b/examples/portable/custom_ops/custom_ops_2_out.cpp index 138a8eeed89..2fb50e521c1 100644 --- a/examples/portable/custom_ops/custom_ops_2_out.cpp +++ b/examples/portable/custom_ops/custom_ops_2_out.cpp @@ -13,7 +13,7 @@ namespace native { using executorch::aten::ScalarType; using executorch::aten::Tensor; -using executorch::runtime::KernelRuntimeContext; +using executorch::ET_RUNTIME_NAMESPACE::KernelRuntimeContext; namespace { void check_preconditions(const Tensor& in, Tensor& out) { diff --git a/extension/flat_tensor/flat_tensor_data_map.cpp b/extension/flat_tensor/flat_tensor_data_map.cpp index bf54ae014b5..8aa0af13928 100644 --- a/extension/flat_tensor/flat_tensor_data_map.cpp +++ b/extension/flat_tensor/flat_tensor_data_map.cpp @@ -25,8 +25,8 @@ using executorch::runtime::Result; using executorch::runtime::Span; using executorch::aten::ScalarType; +using executorch::ET_RUNTIME_NAMESPACE::TensorLayout; using executorch::runtime::DataLoader; -using executorch::runtime::TensorLayout; namespace executorch { namespace extension { diff --git a/extension/flat_tensor/flat_tensor_data_map.h b/extension/flat_tensor/flat_tensor_data_map.h index 972a5fa9c55..0e7aee8ffc8 100644 --- a/extension/flat_tensor/flat_tensor_data_map.h +++ b/extension/flat_tensor/flat_tensor_data_map.h @@ -32,7 +32,8 @@ namespace extension { /** * A NamedDataMap implementation for FlatTensor-serialized data. */ -class FlatTensorDataMap final : public executorch::runtime::NamedDataMap { +class FlatTensorDataMap final + : public executorch::ET_RUNTIME_NAMESPACE::NamedDataMap { public: /** * Creates a new DataMap that wraps FlatTensor data. @@ -51,7 +52,8 @@ class FlatTensorDataMap final : public executorch::runtime::NamedDataMap { * @return Error::NotFound if the key is not present. */ ET_NODISCARD - executorch::runtime::Result + executorch::runtime::Result< + const executorch::ET_RUNTIME_NAMESPACE::TensorLayout> get_metadata(const char* key) const override; /** diff --git a/extension/flat_tensor/targets.bzl b/extension/flat_tensor/targets.bzl index 0d49995aa6e..4ac515b7bf0 100644 --- a/extension/flat_tensor/targets.bzl +++ b/extension/flat_tensor/targets.bzl @@ -1,24 +1,26 @@ load("@fbsource//xplat/executorch/build:runtime_wrapper.bzl", "runtime") def define_common_targets(): - runtime.cxx_library( - name = "flat_tensor_data_map", - srcs = [ - "flat_tensor_data_map.cpp", - ], - exported_headers = ["flat_tensor_data_map.h"], - deps = [ - "//executorch/runtime/core:core", - "//executorch/runtime/core:evalue", - "//executorch/runtime/core:named_data_map", - "//executorch/runtime/core/exec_aten:lib", - "//executorch/runtime/core/exec_aten/util:tensor_util", - ], - exported_deps = [ - "//executorch/extension/flat_tensor/serialize:flat_tensor_header", - "//executorch/extension/flat_tensor/serialize:generated_headers", - ], - visibility = [ - "//executorch/...", - ], - ) + for aten_mode in [True, False]: + aten_suffix = "_aten" if aten_mode else "" + runtime.cxx_library( + name = "flat_tensor_data_map" + aten_suffix, + srcs = [ + "flat_tensor_data_map.cpp", + ], + exported_headers = ["flat_tensor_data_map.h"], + deps = [ + "//executorch/runtime/core:core", + "//executorch/runtime/core:evalue", + "//executorch/runtime/core:named_data_map" + aten_suffix, + "//executorch/runtime/core/exec_aten:lib" + aten_suffix, + "//executorch/runtime/core/exec_aten/util:tensor_util", + ], + exported_deps = [ + "//executorch/extension/flat_tensor/serialize:flat_tensor_header", + "//executorch/extension/flat_tensor/serialize:generated_headers", + ], + visibility = [ + "//executorch/...", + ], + ) diff --git a/extension/module/module.cpp b/extension/module/module.cpp index 400a2c45049..6c534b8d560 100644 --- a/extension/module/module.cpp +++ b/extension/module/module.cpp @@ -37,6 +37,9 @@ namespace executorch { namespace extension { +using ET_RUNTIME_NAMESPACE::MethodMeta; +using ET_RUNTIME_NAMESPACE::Program; + namespace { runtime::Result> load_file( const std::string& file_path, @@ -113,7 +116,7 @@ Module::Module( } Module::Module( - std::shared_ptr program, + std::shared_ptr program, std::unique_ptr memory_allocator, std::unique_ptr temp_allocator, std::unique_ptr event_tracer, @@ -131,7 +134,7 @@ Module::Module( runtime::runtime_init(); } -runtime::Error Module::load(const runtime::Program::Verification verification) { +runtime::Error Module::load(const Program::Verification verification) { if (!is_loaded()) { // Load the program if (!data_loader_) { @@ -156,10 +159,10 @@ runtime::Error Module::load(const runtime::Program::Verification verification) { } // else: either the map itself was provided or we have no data map, either // way no work to do. - auto program = ET_UNWRAP_UNIQUE( - runtime::Program::load(data_loader_.get(), verification)); - program_ = std::shared_ptr( - program.release(), [](runtime::Program* pointer) { delete pointer; }); + auto program = + ET_UNWRAP_UNIQUE(Program::load(data_loader_.get(), verification)); + program_ = std::shared_ptr( + program.release(), [](Program* pointer) { delete pointer; }); } return runtime::Error::Ok; } @@ -224,7 +227,7 @@ runtime::Error Module::load_method( return runtime::Error::Ok; } -runtime::Result Module::method_meta( +runtime::Result Module::method_meta( const std::string& method_name) { ET_CHECK_OK_OR_RETURN_ERROR(load_method(method_name)); return methods_.at(method_name).method->method_meta(); diff --git a/extension/module/module.h b/extension/module/module.h index 45d2cc1d14b..73c7328ee0a 100644 --- a/extension/module/module.h +++ b/extension/module/module.h @@ -19,6 +19,11 @@ namespace executorch { namespace extension { +using ET_RUNTIME_NAMESPACE::Method; +using ET_RUNTIME_NAMESPACE::MethodMeta; +using ET_RUNTIME_NAMESPACE::NamedDataMap; +using ET_RUNTIME_NAMESPACE::Program; + /** * A facade class for loading programs and executing methods within them. */ @@ -95,7 +100,7 @@ class Module { * @param[in] data_map_loader A DataLoader used for loading external weights. */ explicit Module( - std::shared_ptr program, + std::shared_ptr program, std::unique_ptr memory_allocator = nullptr, std::unique_ptr temp_allocator = nullptr, std::unique_ptr event_tracer = nullptr, @@ -116,8 +121,8 @@ class Module { */ ET_NODISCARD runtime::Error load( - const runtime::Program::Verification verification = - runtime::Program::Verification::Minimal); + const Program::Verification verification = + Program::Verification::Minimal); /** * Checks if the program is loaded. @@ -134,7 +139,7 @@ class Module { * * @returns Shared pointer to the program or nullptr if it's not yet loaded. */ - inline std::shared_ptr program() const { + inline std::shared_ptr program() const { return program_; } @@ -224,8 +229,7 @@ class Module { * @returns A method metadata, or an error if the program or method failed to * load. */ - runtime::Result method_meta( - const std::string& method_name); + runtime::Result method_meta(const std::string& method_name); /** * Execute a specific method with the given input values and retrieve the @@ -473,20 +477,20 @@ class Module { std::vector> planned_spans; std::unique_ptr planned_memory; std::unique_ptr memory_manager; - std::unique_ptr method; + std::unique_ptr method; std::vector inputs; }; std::string file_path_; std::string data_map_path_; LoadMode load_mode_{LoadMode::MmapUseMlock}; - std::shared_ptr program_; + std::shared_ptr program_; std::unique_ptr data_loader_; std::unique_ptr memory_allocator_; std::unique_ptr temp_allocator_; std::unique_ptr event_tracer_; std::unique_ptr data_map_loader_; - std::unique_ptr data_map_; + std::unique_ptr data_map_; protected: std::unordered_map methods_; diff --git a/extension/module/targets.bzl b/extension/module/targets.bzl index 09a610a1fca..d8019ce9c4e 100644 --- a/extension/module/targets.bzl +++ b/extension/module/targets.bzl @@ -25,7 +25,7 @@ def define_common_targets(): "//executorch/extension/memory_allocator:malloc_memory_allocator", "//executorch/extension/data_loader:file_data_loader", "//executorch/extension/data_loader:mmap_data_loader", - "//executorch/extension/flat_tensor:flat_tensor_data_map", + "//executorch/extension/flat_tensor:flat_tensor_data_map" + aten_suffix, ], exported_deps = [ "//executorch/runtime/executor:program" + aten_suffix, diff --git a/extension/pybindings/pybindings.cpp b/extension/pybindings/pybindings.cpp index a998e591f30..766288d7e47 100644 --- a/extension/pybindings/pybindings.cpp +++ b/extension/pybindings/pybindings.cpp @@ -85,26 +85,26 @@ void et_pal_emit_log_message( namespace py = pybind11; using executorch::bundled_program::verify_method_outputs; +using ::executorch::ET_RUNTIME_NAMESPACE::BackendInterface; +using ::executorch::ET_RUNTIME_NAMESPACE::get_backend_class; +using ::executorch::ET_RUNTIME_NAMESPACE::get_backend_name; +using ::executorch::ET_RUNTIME_NAMESPACE::get_num_registered_backends; +using ::executorch::ET_RUNTIME_NAMESPACE::get_registered_kernels; +using ::executorch::ET_RUNTIME_NAMESPACE::Kernel; +using ::executorch::ET_RUNTIME_NAMESPACE::Method; +using ::executorch::ET_RUNTIME_NAMESPACE::Program; using ::executorch::extension::BufferDataLoader; using ::executorch::extension::MallocMemoryAllocator; using ::executorch::extension::MmapDataLoader; using ::executorch::runtime::ArrayRef; -using ::executorch::runtime::BackendInterface; using ::executorch::runtime::DataLoader; using ::executorch::runtime::Error; using ::executorch::runtime::EValue; using ::executorch::runtime::EventTracerDebugLogLevel; -using ::executorch::runtime::get_backend_class; -using ::executorch::runtime::get_backend_name; -using ::executorch::runtime::get_num_registered_backends; -using ::executorch::runtime::get_registered_kernels; using ::executorch::runtime::HierarchicalAllocator; -using ::executorch::runtime::Kernel; using ::executorch::runtime::MemoryAllocator; using ::executorch::runtime::MemoryManager; -using ::executorch::runtime::Method; using ::executorch::runtime::prof_result_t; -using ::executorch::runtime::Program; using ::executorch::runtime::Result; using ::executorch::runtime::Span; using ::executorch::runtime::Tag; diff --git a/extension/runner_util/inputs.cpp b/extension/runner_util/inputs.cpp index 11cd176b5d1..842ba25532f 100644 --- a/extension/runner_util/inputs.cpp +++ b/extension/runner_util/inputs.cpp @@ -12,12 +12,12 @@ #include #include +using executorch::ET_RUNTIME_NAMESPACE::Method; +using executorch::ET_RUNTIME_NAMESPACE::MethodMeta; +using executorch::ET_RUNTIME_NAMESPACE::TensorInfo; using executorch::runtime::Error; -using executorch::runtime::Method; -using executorch::runtime::MethodMeta; using executorch::runtime::Result; using executorch::runtime::Tag; -using executorch::runtime::TensorInfo; namespace executorch { namespace extension { diff --git a/extension/runner_util/inputs.h b/extension/runner_util/inputs.h index 73722c0d7bf..214b76d67e3 100644 --- a/extension/runner_util/inputs.h +++ b/extension/runner_util/inputs.h @@ -15,6 +15,9 @@ namespace executorch { namespace extension { +using ::executorch::ET_RUNTIME_NAMESPACE::Method; +using ::executorch::ET_RUNTIME_NAMESPACE::TensorInfo; + /** * RAII helper that frees a set of buffers when destroyed. Movable. */ @@ -80,7 +83,7 @@ struct PrepareInputTensorsOptions { * @returns An error on failure. */ executorch::runtime::Result prepare_input_tensors( - executorch::runtime::Method& method, + Method& method, PrepareInputTensorsOptions options = {}); namespace internal { @@ -89,8 +92,8 @@ namespace internal { * fills it with ones, and sets the input at `input_index`. */ executorch::runtime::Error fill_and_set_input( - executorch::runtime::Method& method, - executorch::runtime::TensorInfo& tensor_meta, + Method& method, + TensorInfo& tensor_meta, size_t input_index, void* data_ptr); } // namespace internal diff --git a/extension/runner_util/inputs_aten.cpp b/extension/runner_util/inputs_aten.cpp index 83d12dac42d..b89562a2f69 100644 --- a/extension/runner_util/inputs_aten.cpp +++ b/extension/runner_util/inputs_aten.cpp @@ -15,8 +15,8 @@ #include using executorch::runtime::Error; -using executorch::runtime::Method; -using executorch::runtime::TensorInfo; +using executorch::runtime::aten::Method; +using executorch::runtime::aten::TensorInfo; namespace executorch { namespace extension { diff --git a/extension/tensor/tensor_ptr.cpp b/extension/tensor/tensor_ptr.cpp index c1742fc599b..8a35e83a526 100644 --- a/extension/tensor/tensor_ptr.cpp +++ b/extension/tensor/tensor_ptr.cpp @@ -188,7 +188,7 @@ TensorPtr clone_tensor_ptr(const executorch::aten::Tensor& tensor) { runtime::Error resize_tensor_ptr( TensorPtr& tensor, const std::vector& sizes) { - return runtime::resize_tensor( + return ET_RUNTIME_NAMESPACE::resize_tensor( *tensor, executorch::aten::ArrayRef( sizes.data(), sizes.size())); diff --git a/kernels/aten/cpu/op__empty_dim_order.cpp b/kernels/aten/cpu/op__empty_dim_order.cpp index e75963a9c4e..654b29c778d 100644 --- a/kernels/aten/cpu/op__empty_dim_order.cpp +++ b/kernels/aten/cpu/op__empty_dim_order.cpp @@ -102,7 +102,7 @@ Tensor& _empty_dim_order_out( IntArrayRef size, OptionalIntArrayRef dim_order, Tensor& out) { - executorch::runtime::KernelRuntimeContext ctx{}; + KernelRuntimeContext ctx{}; return _empty_dim_order_out(ctx, size, dim_order, out); } diff --git a/kernels/aten/cpu/op__to_dim_order_copy.cpp b/kernels/aten/cpu/op__to_dim_order_copy.cpp index 10793d24db5..a8216c9a8e9 100644 --- a/kernels/aten/cpu/op__to_dim_order_copy.cpp +++ b/kernels/aten/cpu/op__to_dim_order_copy.cpp @@ -116,7 +116,7 @@ Tensor& _to_dim_order_copy_out( bool non_blocking, OptionalArrayRef dim_order, Tensor& out) { - executorch::runtime::KernelRuntimeContext ctx{}; + KernelRuntimeContext ctx{}; return _to_dim_order_copy_out(ctx, self, non_blocking, dim_order, out); } diff --git a/kernels/portable/cpu/op__to_dim_order_copy.cpp b/kernels/portable/cpu/op__to_dim_order_copy.cpp index 40ce86e8fdc..70fc3507f05 100644 --- a/kernels/portable/cpu/op__to_dim_order_copy.cpp +++ b/kernels/portable/cpu/op__to_dim_order_copy.cpp @@ -125,7 +125,7 @@ Tensor& _to_dim_order_copy_out( bool non_blocking, OptionalArrayRef dim_order, Tensor& out) { - executorch::runtime::KernelRuntimeContext context{}; + executorch::ET_RUNTIME_NAMESPACE::KernelRuntimeContext context{}; return _to_dim_order_copy_out(context, self, non_blocking, dim_order, out); } diff --git a/kernels/portable/test/op_gelu_test.cpp b/kernels/portable/test/op_gelu_test.cpp index 19e757b4bd0..2e5cad55c35 100644 --- a/kernels/portable/test/op_gelu_test.cpp +++ b/kernels/portable/test/op_gelu_test.cpp @@ -25,7 +25,7 @@ using torch::executor::testing::TensorFactory; // executorch/kernels/test/op_gelu_test.cpp instead. Tensor& op_gelu_out(const Tensor& self, string_view approximate, Tensor& out) { - executorch::runtime::KernelRuntimeContext context{}; + executorch::ET_RUNTIME_NAMESPACE::KernelRuntimeContext context{}; return torch::executor::native::gelu_out(context, self, approximate, out); } diff --git a/kernels/prim_ops/register_prim_ops.cpp b/kernels/prim_ops/register_prim_ops.cpp index 1d197b63584..62aead8978f 100644 --- a/kernels/prim_ops/register_prim_ops.cpp +++ b/kernels/prim_ops/register_prim_ops.cpp @@ -381,14 +381,13 @@ static Kernel prim_ops[] = { }; -executorch::runtime::Span kernel_span( - prim_ops, - prim_ops + sizeof(prim_ops) / sizeof(Kernel)); +executorch::runtime::Span + kernel_span(prim_ops, prim_ops + sizeof(prim_ops) / sizeof(Kernel)); // Return value not used. Keep the static variable assignment to register // operators in static initialization time. auto success_with_kernel_reg = - executorch::runtime::register_kernels(kernel_span); + executorch::ET_RUNTIME_NAMESPACE::register_kernels(kernel_span); } // namespace } // namespace function diff --git a/kernels/prim_ops/targets.bzl b/kernels/prim_ops/targets.bzl index c1af21a7e73..d2cff10a194 100644 --- a/kernels/prim_ops/targets.bzl +++ b/kernels/prim_ops/targets.bzl @@ -56,7 +56,7 @@ def define_common_targets(): ":et_copy_index" + aten_suffix, ":et_view" + aten_suffix, "//executorch/runtime/core:evalue" + aten_suffix, - "//executorch/runtime/kernel:operator_registry", + "//executorch/runtime/kernel:operator_registry" + aten_suffix, "//executorch/runtime/kernel:kernel_includes" + aten_suffix, ], ) diff --git a/kernels/quantized/cpu/op_quantize.cpp b/kernels/quantized/cpu/op_quantize.cpp index 5079109683f..632bddd58c4 100644 --- a/kernels/quantized/cpu/op_quantize.cpp +++ b/kernels/quantized/cpu/op_quantize.cpp @@ -22,6 +22,7 @@ namespace native { using Tensor = executorch::aten::Tensor; using Scalar = executorch::aten::Scalar; using ScalarType = executorch::aten::ScalarType; +using KernelRuntimeContext = torch::executor::KernelRuntimeContext; namespace { @@ -214,7 +215,7 @@ Tensor& quantize_per_tensor_tensor_args_out( int64_t quant_max, ScalarType dtype, Tensor& out) { - auto context = executorch::runtime::KernelRuntimeContext(); + auto context = KernelRuntimeContext(); auto& res = quantize_per_tensor_tensor_args_out( context, input, scale, zero_point, quant_min, quant_max, dtype, out); ET_CHECK(context.failure_state() == Error::Ok); diff --git a/kernels/quantized/test/op_add_test.cpp b/kernels/quantized/test/op_add_test.cpp index 17dd1cfb3fc..3f258827973 100644 --- a/kernels/quantized/test/op_add_test.cpp +++ b/kernels/quantized/test/op_add_test.cpp @@ -24,7 +24,7 @@ using executorch::aten::optional; using executorch::aten::Scalar; using executorch::aten::ScalarType; using executorch::aten::Tensor; -using executorch::runtime::KernelRuntimeContext; +using executorch::ET_RUNTIME_NAMESPACE::KernelRuntimeContext; using torch::executor::native::add_out; using torch::executor::native::dequantize_per_tensor_out; using torch::executor::native::quantize_per_tensor_out; diff --git a/kernels/quantized/test/op_embedding2b_test.cpp b/kernels/quantized/test/op_embedding2b_test.cpp index bf48fa4227b..a350b77ec0d 100644 --- a/kernels/quantized/test/op_embedding2b_test.cpp +++ b/kernels/quantized/test/op_embedding2b_test.cpp @@ -21,7 +21,7 @@ using executorch::aten::ArrayRef; using executorch::aten::optional; using executorch::aten::ScalarType; using executorch::aten::Tensor; -using executorch::runtime::KernelRuntimeContext; +using executorch::ET_RUNTIME_NAMESPACE::KernelRuntimeContext; using torch::executor::native::quantized_embedding_2bit_out; using torch::executor::testing::TensorFactory; diff --git a/kernels/quantized/test/op_embedding4b_test.cpp b/kernels/quantized/test/op_embedding4b_test.cpp index 9f205be80e3..6ab10376b88 100644 --- a/kernels/quantized/test/op_embedding4b_test.cpp +++ b/kernels/quantized/test/op_embedding4b_test.cpp @@ -21,7 +21,7 @@ using executorch::aten::ArrayRef; using executorch::aten::optional; using executorch::aten::ScalarType; using executorch::aten::Tensor; -using executorch::runtime::KernelRuntimeContext; +using executorch::ET_RUNTIME_NAMESPACE::KernelRuntimeContext; using torch::executor::native::quantized_embedding_4bit_out; using torch::executor::testing::TensorFactory; diff --git a/kernels/quantized/test/op_embedding_test.cpp b/kernels/quantized/test/op_embedding_test.cpp index 252aca41314..6c949bd6e69 100644 --- a/kernels/quantized/test/op_embedding_test.cpp +++ b/kernels/quantized/test/op_embedding_test.cpp @@ -24,7 +24,7 @@ using executorch::aten::optional; using executorch::aten::Scalar; using executorch::aten::ScalarType; using executorch::aten::Tensor; -using executorch::runtime::KernelRuntimeContext; +using executorch::ET_RUNTIME_NAMESPACE::KernelRuntimeContext; using torch::executor::native::dequantize_per_tensor_out; using torch::executor::native::embedding_out; using torch::executor::native::quantize_per_tensor_out; diff --git a/kernels/quantized/test/op_mixed_linear_test.cpp b/kernels/quantized/test/op_mixed_linear_test.cpp index 6b86b199f60..833fc766ffd 100644 --- a/kernels/quantized/test/op_mixed_linear_test.cpp +++ b/kernels/quantized/test/op_mixed_linear_test.cpp @@ -20,7 +20,7 @@ using namespace ::testing; using executorch::aten::optional; using executorch::aten::ScalarType; using executorch::aten::Tensor; -using executorch::runtime::KernelRuntimeContext; +using executorch::ET_RUNTIME_NAMESPACE::KernelRuntimeContext; using torch::executor::native::quantized_mixed_linear_out; using torch::executor::testing::TensorFactory; diff --git a/kernels/quantized/test/op_mixed_mm_test.cpp b/kernels/quantized/test/op_mixed_mm_test.cpp index e20ac96d610..4d81089fa91 100644 --- a/kernels/quantized/test/op_mixed_mm_test.cpp +++ b/kernels/quantized/test/op_mixed_mm_test.cpp @@ -20,7 +20,7 @@ using namespace ::testing; using executorch::aten::optional; using executorch::aten::ScalarType; using executorch::aten::Tensor; -using executorch::runtime::KernelRuntimeContext; +using executorch::ET_RUNTIME_NAMESPACE::KernelRuntimeContext; using torch::executor::native::quantized_mixed_mm_out; using torch::executor::testing::TensorFactory; diff --git a/kernels/test/TestUtil.h b/kernels/test/TestUtil.h index aa220f5bfd5..7ec20c11bef 100644 --- a/kernels/test/TestUtil.h +++ b/kernels/test/TestUtil.h @@ -116,6 +116,6 @@ class OperatorTest : public ::testing::Test { } protected: - executorch::runtime::KernelRuntimeContext context_; + ::torch::executor::KernelRuntimeContext context_; bool expect_failure_; }; diff --git a/kernels/test/custom_kernel_example/op_relu.cpp b/kernels/test/custom_kernel_example/op_relu.cpp index 2cc3eefe0a8..074ebe6b900 100644 --- a/kernels/test/custom_kernel_example/op_relu.cpp +++ b/kernels/test/custom_kernel_example/op_relu.cpp @@ -17,8 +17,8 @@ namespace native { using executorch::aten::ScalarType; using executorch::aten::Tensor; +using executorch::ET_RUNTIME_NAMESPACE::KernelRuntimeContext; using executorch::runtime::Error; -using executorch::runtime::KernelRuntimeContext; using executorch::runtime::resize_tensor; using executorch::runtime::tensors_have_same_shape_and_dtype; diff --git a/kernels/test/op_atan2_test.cpp b/kernels/test/op_atan2_test.cpp index e69ea0e90c8..436826e2b6d 100644 --- a/kernels/test/op_atan2_test.cpp +++ b/kernels/test/op_atan2_test.cpp @@ -23,7 +23,7 @@ using torch::executor::testing::SupportedFeatures; using torch::executor::testing::TensorFactory; Tensor& op_atan2_out(const Tensor& self, const Tensor& other, Tensor& out) { - executorch::runtime::KernelRuntimeContext context{}; + executorch::ET_RUNTIME_NAMESPACE::KernelRuntimeContext context{}; return torch::executor::aten::atan2_outf(context, self, other, out); } diff --git a/kernels/test/op_cdist_forward_test.cpp b/kernels/test/op_cdist_forward_test.cpp index 32465ca439b..9ddab4c3c49 100644 --- a/kernels/test/op_cdist_forward_test.cpp +++ b/kernels/test/op_cdist_forward_test.cpp @@ -21,6 +21,7 @@ using executorch::aten::ArrayRef; using executorch::aten::optional; using executorch::aten::ScalarType; using executorch::aten::Tensor; +using executorch::ET_RUNTIME_NAMESPACE::KernelRuntimeContext; using torch::executor::testing::TensorFactory; Tensor& op_cdist_forward_out( @@ -29,7 +30,7 @@ Tensor& op_cdist_forward_out( double p, optional compute_mode, Tensor& out) { - executorch::runtime::KernelRuntimeContext context{}; + KernelRuntimeContext context{}; return torch::executor::aten::_cdist_forward_outf( context, x1, x2, p, compute_mode, out); } diff --git a/kernels/test/op_clamp_test.cpp b/kernels/test/op_clamp_test.cpp index a1003e892e0..8a021c70303 100644 --- a/kernels/test/op_clamp_test.cpp +++ b/kernels/test/op_clamp_test.cpp @@ -260,7 +260,7 @@ class OpClampTensorOutTest : public OperatorTest { const optional& min, const optional& max, Tensor& out) { - executorch::runtime::KernelRuntimeContext context{}; + executorch::ET_RUNTIME_NAMESPACE::KernelRuntimeContext context{}; return torch::executor::aten::clamp_outf(context, self, min, max, out); } }; diff --git a/kernels/test/op_diagonal_copy_test.cpp b/kernels/test/op_diagonal_copy_test.cpp index cc0bd02e1a5..35d19bf661e 100644 --- a/kernels/test/op_diagonal_copy_test.cpp +++ b/kernels/test/op_diagonal_copy_test.cpp @@ -27,7 +27,7 @@ Tensor& op_diagonal_copy_out( int64_t dim1, int64_t dim2, Tensor& out) { - executorch::runtime::KernelRuntimeContext context{}; + executorch::ET_RUNTIME_NAMESPACE::KernelRuntimeContext context{}; return torch::executor::aten::diagonal_copy_outf( context, input, offset, dim1, dim2, out); } diff --git a/kernels/test/op_flip_test.cpp b/kernels/test/op_flip_test.cpp index f240dfd4ad3..be06e397be2 100644 --- a/kernels/test/op_flip_test.cpp +++ b/kernels/test/op_flip_test.cpp @@ -22,7 +22,7 @@ using executorch::aten::Tensor; using torch::executor::testing::TensorFactory; Tensor& op_flip_out(const Tensor& input, IntArrayRef dims, Tensor& out) { - executorch::runtime::KernelRuntimeContext context{}; + executorch::ET_RUNTIME_NAMESPACE::KernelRuntimeContext context{}; return torch::executor::aten::flip_outf(context, input, dims, out); } diff --git a/kernels/test/op_ge_test.cpp b/kernels/test/op_ge_test.cpp index 4b21644a5c5..a79502b266e 100644 --- a/kernels/test/op_ge_test.cpp +++ b/kernels/test/op_ge_test.cpp @@ -18,7 +18,7 @@ using namespace ::testing; using executorch::aten::Scalar; using executorch::aten::ScalarType; using executorch::aten::Tensor; -using executorch::runtime::KernelRuntimeContext; +using executorch::ET_RUNTIME_NAMESPACE::KernelRuntimeContext; using torch::executor::testing::TensorFactory; class OpGeTensorOutTest : public OperatorTest { diff --git a/kernels/test/op_gt_test.cpp b/kernels/test/op_gt_test.cpp index 29a2fb0e8b8..96c0e95f950 100644 --- a/kernels/test/op_gt_test.cpp +++ b/kernels/test/op_gt_test.cpp @@ -18,7 +18,7 @@ using namespace ::testing; using executorch::aten::Scalar; using executorch::aten::ScalarType; using executorch::aten::Tensor; -using executorch::runtime::KernelRuntimeContext; +using executorch::ET_RUNTIME_NAMESPACE::KernelRuntimeContext; using torch::executor::testing::TensorFactory; class OpGtScalarOutTest : public OperatorTest { diff --git a/kernels/test/op_le_test.cpp b/kernels/test/op_le_test.cpp index 49ef5235d0f..bcd40d24d89 100644 --- a/kernels/test/op_le_test.cpp +++ b/kernels/test/op_le_test.cpp @@ -18,7 +18,7 @@ using namespace ::testing; using executorch::aten::Scalar; using executorch::aten::ScalarType; using executorch::aten::Tensor; -using executorch::runtime::KernelRuntimeContext; +using executorch::ET_RUNTIME_NAMESPACE::KernelRuntimeContext; using torch::executor::testing::TensorFactory; class OpLeScalarOutTest : public OperatorTest { diff --git a/kernels/test/op_lt_test.cpp b/kernels/test/op_lt_test.cpp index 51ccb310e4a..eee12c50521 100644 --- a/kernels/test/op_lt_test.cpp +++ b/kernels/test/op_lt_test.cpp @@ -18,7 +18,7 @@ using namespace ::testing; using executorch::aten::Scalar; using executorch::aten::ScalarType; using executorch::aten::Tensor; -using executorch::runtime::KernelRuntimeContext; +using executorch::ET_RUNTIME_NAMESPACE::KernelRuntimeContext; using torch::executor::testing::TensorFactory; class OpLtScalarOutTest : public OperatorTest { diff --git a/kernels/test/op_maximum_test.cpp b/kernels/test/op_maximum_test.cpp index 9c701e208eb..faa18fa56cd 100644 --- a/kernels/test/op_maximum_test.cpp +++ b/kernels/test/op_maximum_test.cpp @@ -21,7 +21,7 @@ using executorch::aten::Tensor; using torch::executor::testing::TensorFactory; Tensor& op_maximum_out(const Tensor& self, const Tensor& other, Tensor& out) { - executorch::runtime::KernelRuntimeContext context{}; + executorch::ET_RUNTIME_NAMESPACE::KernelRuntimeContext context{}; return torch::executor::aten::maximum_outf(context, self, other, out); } diff --git a/kernels/test/op_native_batch_norm_test.cpp b/kernels/test/op_native_batch_norm_test.cpp index 67e46b27508..bf05a87312d 100644 --- a/kernels/test/op_native_batch_norm_test.cpp +++ b/kernels/test/op_native_batch_norm_test.cpp @@ -173,7 +173,7 @@ class OpNativeBatchNormLegitOutTest : public OperatorTest { executorch::aten::Tensor& out0, executorch::aten::Tensor& out1, executorch::aten::Tensor& out2) { - executorch::runtime::KernelRuntimeContext context{}; + executorch::ET_RUNTIME_NAMESPACE::KernelRuntimeContext context{}; return torch::executor::aten::_native_batch_norm_legit_outf( context, input, diff --git a/kernels/test/op_native_group_norm_test.cpp b/kernels/test/op_native_group_norm_test.cpp index ea742e97231..7452350ad29 100644 --- a/kernels/test/op_native_group_norm_test.cpp +++ b/kernels/test/op_native_group_norm_test.cpp @@ -32,7 +32,7 @@ ::std::tuple op_native_group_norm_out( Tensor& out0, Tensor& out1, Tensor& out2) { - executorch::runtime::KernelRuntimeContext context{}; + executorch::ET_RUNTIME_NAMESPACE::KernelRuntimeContext context{}; return torch::executor::aten::native_group_norm_outf( context, input, weight, bias, N, C, HxW, group, eps, out0, out1, out2); } diff --git a/kernels/test/op_ne_test.cpp b/kernels/test/op_ne_test.cpp index 6cb0217ec0f..fe4e6c3621c 100644 --- a/kernels/test/op_ne_test.cpp +++ b/kernels/test/op_ne_test.cpp @@ -18,7 +18,7 @@ using namespace ::testing; using executorch::aten::Scalar; using executorch::aten::ScalarType; using executorch::aten::Tensor; -using executorch::runtime::KernelRuntimeContext; +using executorch::ET_RUNTIME_NAMESPACE::KernelRuntimeContext; using torch::executor::testing::TensorFactory; class OpNeTest : public OperatorTest { diff --git a/kernels/test/op_pdist_forward_test.cpp b/kernels/test/op_pdist_forward_test.cpp index e6c0d472517..2b28591f7fc 100644 --- a/kernels/test/op_pdist_forward_test.cpp +++ b/kernels/test/op_pdist_forward_test.cpp @@ -23,7 +23,7 @@ using executorch::aten::Tensor; using torch::executor::testing::TensorFactory; Tensor& op_pdist_forward_out(const Tensor& input, double p, Tensor& out) { - executorch::runtime::KernelRuntimeContext context{}; + executorch::ET_RUNTIME_NAMESPACE::KernelRuntimeContext context{}; return torch::executor::aten::_pdist_forward_outf(context, input, p, out); } diff --git a/kernels/test/op_prod_test.cpp b/kernels/test/op_prod_test.cpp index f9cf53ded57..11a7e3fae4f 100644 --- a/kernels/test/op_prod_test.cpp +++ b/kernels/test/op_prod_test.cpp @@ -23,7 +23,7 @@ using torch::executor::testing::TensorFactory; Tensor& op_prod_out(const Tensor& self, optional dtype, Tensor& out) { - executorch::runtime::KernelRuntimeContext context{}; + executorch::ET_RUNTIME_NAMESPACE::KernelRuntimeContext context{}; return torch::executor::aten::prod_outf(context, self, dtype, out); } @@ -33,7 +33,7 @@ Tensor& op_prod_int_out( bool keepdim, optional dtype, Tensor& out) { - executorch::runtime::KernelRuntimeContext context{}; + executorch::ET_RUNTIME_NAMESPACE::KernelRuntimeContext context{}; return torch::executor::aten::prod_outf( context, self, dim, keepdim, dtype, out); } diff --git a/kernels/test/op_reflection_pad1d_test.cpp b/kernels/test/op_reflection_pad1d_test.cpp index 5f3b2a1c273..aebf057326a 100644 --- a/kernels/test/op_reflection_pad1d_test.cpp +++ b/kernels/test/op_reflection_pad1d_test.cpp @@ -25,7 +25,7 @@ Tensor& op_reflection_pad1d_out( const Tensor& input, ArrayRef padding, Tensor& out) { - executorch::runtime::KernelRuntimeContext context{}; + executorch::ET_RUNTIME_NAMESPACE::KernelRuntimeContext context{}; return torch::executor::aten::reflection_pad1d_outf( context, input, padding, out); } diff --git a/kernels/test/op_reflection_pad2d_test.cpp b/kernels/test/op_reflection_pad2d_test.cpp index 8696b5dff7b..01e0619b9f1 100644 --- a/kernels/test/op_reflection_pad2d_test.cpp +++ b/kernels/test/op_reflection_pad2d_test.cpp @@ -25,7 +25,7 @@ Tensor& op_reflection_pad2d_out( const Tensor& input, ArrayRef padding, Tensor& out) { - executorch::runtime::KernelRuntimeContext context{}; + executorch::ET_RUNTIME_NAMESPACE::KernelRuntimeContext context{}; return torch::executor::aten::reflection_pad2d_outf( context, input, padding, out); } diff --git a/kernels/test/op_reflection_pad3d_test.cpp b/kernels/test/op_reflection_pad3d_test.cpp index 7d5cc84c6bc..55ed906a958 100644 --- a/kernels/test/op_reflection_pad3d_test.cpp +++ b/kernels/test/op_reflection_pad3d_test.cpp @@ -25,7 +25,7 @@ Tensor& op_reflection_pad3d_out( const Tensor& input, ArrayRef padding, Tensor& out) { - executorch::runtime::KernelRuntimeContext context{}; + executorch::ET_RUNTIME_NAMESPACE::KernelRuntimeContext context{}; return torch::executor::aten::reflection_pad3d_outf( context, input, padding, out); } diff --git a/kernels/test/op_replication_pad1d_test.cpp b/kernels/test/op_replication_pad1d_test.cpp index 9a6d3b2285e..f8a3fc0a48b 100644 --- a/kernels/test/op_replication_pad1d_test.cpp +++ b/kernels/test/op_replication_pad1d_test.cpp @@ -25,7 +25,7 @@ Tensor& op_replication_pad1d_out( const Tensor& input, ArrayRef padding, Tensor& out) { - executorch::runtime::KernelRuntimeContext context{}; + executorch::ET_RUNTIME_NAMESPACE::KernelRuntimeContext context{}; return torch::executor::aten::replication_pad1d_outf( context, input, padding, out); } diff --git a/kernels/test/op_replication_pad2d_test.cpp b/kernels/test/op_replication_pad2d_test.cpp index 00bc76ac093..7f62f5c9b6e 100644 --- a/kernels/test/op_replication_pad2d_test.cpp +++ b/kernels/test/op_replication_pad2d_test.cpp @@ -25,7 +25,7 @@ Tensor& op_replication_pad2d_out( const Tensor& input, ArrayRef padding, Tensor& out) { - executorch::runtime::KernelRuntimeContext context{}; + executorch::ET_RUNTIME_NAMESPACE::KernelRuntimeContext context{}; return torch::executor::aten::replication_pad2d_outf( context, input, padding, out); } diff --git a/kernels/test/op_replication_pad3d_test.cpp b/kernels/test/op_replication_pad3d_test.cpp index 010870298d9..5b931fee3f9 100644 --- a/kernels/test/op_replication_pad3d_test.cpp +++ b/kernels/test/op_replication_pad3d_test.cpp @@ -25,7 +25,7 @@ Tensor& op_replication_pad3d_out( const Tensor& input, ArrayRef padding, Tensor& out) { - executorch::runtime::KernelRuntimeContext context{}; + executorch::ET_RUNTIME_NAMESPACE::KernelRuntimeContext context{}; return torch::executor::aten::replication_pad3d_outf( context, input, padding, out); } diff --git a/kernels/test/op_roll_test.cpp b/kernels/test/op_roll_test.cpp index fc5baaad4a7..4407e395db6 100644 --- a/kernels/test/op_roll_test.cpp +++ b/kernels/test/op_roll_test.cpp @@ -26,7 +26,7 @@ Tensor& op_roll_out( ArrayRef shifts, ArrayRef dims, Tensor& out) { - executorch::runtime::KernelRuntimeContext context{}; + executorch::ET_RUNTIME_NAMESPACE::KernelRuntimeContext context{}; return torch::executor::aten::roll_outf(context, input, shifts, dims, out); } diff --git a/kernels/test/op_topk_test.cpp b/kernels/test/op_topk_test.cpp index 46098a81b68..bdd185daaae 100644 --- a/kernels/test/op_topk_test.cpp +++ b/kernels/test/op_topk_test.cpp @@ -106,7 +106,8 @@ std::tuple op_topk_values( Tensor& values, Tensor& indices) { TempMemoryAllocator allocator = TempMemoryAllocator(); - executorch::runtime::KernelRuntimeContext context(nullptr, &allocator); + executorch::ET_RUNTIME_NAMESPACE::KernelRuntimeContext context( + nullptr, &allocator); return torch::executor::aten::topk_outf( context, input, k, dim, largest, sorted, values, indices); } diff --git a/runtime/backend/backend_execution_context.h b/runtime/backend/backend_execution_context.h index d2790b158ef..7ee41d8e5b1 100644 --- a/runtime/backend/backend_execution_context.h +++ b/runtime/backend/backend_execution_context.h @@ -12,7 +12,7 @@ #include namespace executorch { -namespace runtime { +namespace ET_RUNTIME_NAMESPACE { /** * BackendExecutionContext will be used to inject run time context. @@ -68,13 +68,13 @@ class BackendExecutionContext final { const char* method_name_ = nullptr; }; -} // namespace runtime +} // namespace ET_RUNTIME_NAMESPACE } // namespace executorch namespace torch { namespace executor { // TODO(T197294990): Remove these deprecated aliases once all users have moved // to the new `::executorch` namespaces. -using ::executorch::runtime::BackendExecutionContext; +using ::executorch::ET_RUNTIME_NAMESPACE::BackendExecutionContext; } // namespace executor } // namespace torch diff --git a/runtime/backend/backend_init_context.h b/runtime/backend/backend_init_context.h index de1661c3af0..71c5182f401 100644 --- a/runtime/backend/backend_init_context.h +++ b/runtime/backend/backend_init_context.h @@ -7,12 +7,12 @@ */ #pragma once +#include #include #include namespace executorch { -namespace runtime { - +namespace ET_RUNTIME_NAMESPACE { /** * BackendInitContext will be used to inject runtime info for to initialize * delegate. @@ -70,13 +70,13 @@ class BackendInitContext final { const NamedDataMap* named_data_map_ = nullptr; }; -} // namespace runtime +} // namespace ET_RUNTIME_NAMESPACE } // namespace executorch namespace torch { namespace executor { // TODO(T197294990): Remove these deprecated aliases once all users have moved // to the new `::executorch` namespaces. -using ::executorch::runtime::BackendInitContext; +using ::executorch::ET_RUNTIME_NAMESPACE::BackendInitContext; } // namespace executor } // namespace torch diff --git a/runtime/backend/interface.cpp b/runtime/backend/interface.cpp index 4fb1eadfa87..ffeb133fbf2 100644 --- a/runtime/backend/interface.cpp +++ b/runtime/backend/interface.cpp @@ -9,7 +9,7 @@ #include namespace executorch { -namespace runtime { +namespace ET_RUNTIME_NAMESPACE { // Pure-virtual dtors still need an implementation. BackendInterface::~BackendInterface() {} @@ -66,5 +66,5 @@ Result get_backend_name(size_t index) { return registered_backends[index].name; } -} // namespace runtime +} // namespace ET_RUNTIME_NAMESPACE } // namespace executorch diff --git a/runtime/backend/interface.h b/runtime/backend/interface.h index 0a3c069a201..95705d48f92 100644 --- a/runtime/backend/interface.h +++ b/runtime/backend/interface.h @@ -22,7 +22,7 @@ #include namespace executorch { -namespace runtime { +namespace ET_RUNTIME_NAMESPACE { struct SizedBuffer { void* buffer; @@ -150,19 +150,20 @@ size_t get_num_registered_backends(); */ Result get_backend_name(size_t index); -} // namespace runtime +} // namespace ET_RUNTIME_NAMESPACE } // namespace executorch namespace torch { namespace executor { // TODO(T197294990): Remove these deprecated aliases once all users have moved // to the new `::executorch` namespaces. -using ::executorch::runtime::Backend; -using ::executorch::runtime::CompileSpec; -using ::executorch::runtime::DelegateHandle; -using ::executorch::runtime::get_backend_class; -using ::executorch::runtime::register_backend; -using ::executorch::runtime::SizedBuffer; -using PyTorchBackendInterface = ::executorch::runtime::BackendInterface; +using ::executorch::ET_RUNTIME_NAMESPACE::Backend; +using ::executorch::ET_RUNTIME_NAMESPACE::CompileSpec; +using ::executorch::ET_RUNTIME_NAMESPACE::DelegateHandle; +using ::executorch::ET_RUNTIME_NAMESPACE::get_backend_class; +using ::executorch::ET_RUNTIME_NAMESPACE::register_backend; +using ::executorch::ET_RUNTIME_NAMESPACE::SizedBuffer; +using PyTorchBackendInterface = + ::executorch::ET_RUNTIME_NAMESPACE::BackendInterface; } // namespace executor } // namespace torch diff --git a/runtime/core/event_tracer_hooks.h b/runtime/core/event_tracer_hooks.h index 40754160c41..cd74b447ca8 100644 --- a/runtime/core/event_tracer_hooks.h +++ b/runtime/core/event_tracer_hooks.h @@ -30,7 +30,7 @@ */ namespace executorch { -namespace runtime { +namespace ET_RUNTIME_NAMESPACE { namespace internal { /** @@ -305,7 +305,7 @@ inline void event_tracer_set_bundled_input_index( } } // namespace internal -} // namespace runtime +} // namespace ET_RUNTIME_NAMESPACE } // namespace executorch namespace torch { @@ -313,18 +313,27 @@ namespace executor { namespace internal { // TODO(T197294990): Remove these deprecated aliases once all users have moved // to the new `::executorch` namespaces. -using ::executorch::runtime::internal::event_tracer_begin_profiling_event; -using ::executorch::runtime::internal::event_tracer_create_event_block; -using ::executorch::runtime::internal::event_tracer_end_profiling_event; -using ::executorch::runtime::internal::event_tracer_log_evalue; -using ::executorch::runtime::internal::event_tracer_log_evalue_output; -using ::executorch::runtime::internal::event_tracer_set_bundled_input_index; -using ::executorch::runtime::internal::event_tracer_track_allocation; -using ::executorch::runtime::internal::event_tracer_track_allocator; -using ::executorch::runtime::internal::EventTracerProfileInstructionScope; -using ::executorch::runtime::internal::EventTracerProfileMethodScope; -using ::executorch::runtime::internal::EventTracerProfileOpScope; -using ::executorch::runtime::internal::EventTracerProfileScope; +using ::executorch::ET_RUNTIME_NAMESPACE::internal:: + event_tracer_begin_profiling_event; +using ::executorch::ET_RUNTIME_NAMESPACE::internal:: + event_tracer_create_event_block; +using ::executorch::ET_RUNTIME_NAMESPACE::internal:: + event_tracer_end_profiling_event; +using ::executorch::ET_RUNTIME_NAMESPACE::internal::event_tracer_log_evalue; +using ::executorch::ET_RUNTIME_NAMESPACE::internal:: + event_tracer_log_evalue_output; +using ::executorch::ET_RUNTIME_NAMESPACE::internal:: + event_tracer_set_bundled_input_index; +using ::executorch::ET_RUNTIME_NAMESPACE::internal:: + event_tracer_track_allocation; +using ::executorch::ET_RUNTIME_NAMESPACE::internal:: + event_tracer_track_allocator; +using ::executorch::ET_RUNTIME_NAMESPACE::internal:: + EventTracerProfileInstructionScope; +using ::executorch::ET_RUNTIME_NAMESPACE::internal:: + EventTracerProfileMethodScope; +using ::executorch::ET_RUNTIME_NAMESPACE::internal::EventTracerProfileOpScope; +using ::executorch::ET_RUNTIME_NAMESPACE::internal::EventTracerProfileScope; } // namespace internal } // namespace executor diff --git a/runtime/core/exec_aten/exec_aten.h b/runtime/core/exec_aten/exec_aten.h index 704bb868abd..10075ab5920 100644 --- a/runtime/core/exec_aten/exec_aten.h +++ b/runtime/core/exec_aten/exec_aten.h @@ -47,6 +47,21 @@ #endif +/** + * This hack is for separating out ATen mode vs non-ATen mode. In ATen mode, + * we use the ATen types directly. In non-ATen mode, we use the portable types. + * To avoid duplicate symbols and/or duplicate operator registration, when a + * user depends on both the ATen mode and non-ATen mode versions of the + * ExecuTorch library. + */ +#ifndef ET_RUNTIME_NAMESPACE +#if defined(USE_ATEN_LIB) +#define ET_RUNTIME_NAMESPACE runtime::aten +#else +#define ET_RUNTIME_NAMESPACE runtime +#endif +#endif + namespace executorch { namespace aten { diff --git a/runtime/core/exec_aten/testing_util/test/tensor_factory_test.cpp b/runtime/core/exec_aten/testing_util/test/tensor_factory_test.cpp index ed8cc00f4ef..feb00f79b8f 100644 --- a/runtime/core/exec_aten/testing_util/test/tensor_factory_test.cpp +++ b/runtime/core/exec_aten/testing_util/test/tensor_factory_test.cpp @@ -26,8 +26,8 @@ using executorch::aten::SizesType; using executorch::aten::StridesType; using executorch::aten::Tensor; using executorch::aten::TensorList; +using executorch::ET_RUNTIME_NAMESPACE::resize_tensor; using executorch::runtime::Error; -using executorch::runtime::resize_tensor; using executorch::runtime::TensorShapeDynamism; using executorch::runtime::testing::TensorFactory; using executorch::runtime::testing::TensorListFactory; diff --git a/runtime/core/exec_aten/util/tensor_util.h b/runtime/core/exec_aten/util/tensor_util.h index 4e5a0cebb07..b0b79882361 100644 --- a/runtime/core/exec_aten/util/tensor_util.h +++ b/runtime/core/exec_aten/util/tensor_util.h @@ -396,8 +396,7 @@ #scalar_tensor " could not be extracted: wrong type or out of range"); namespace executorch { -namespace runtime { - +namespace ET_RUNTIME_NAMESPACE { // // Utility functions for checking tensor attributes // @@ -446,10 +445,10 @@ inline bool tensor_can_cast_to( executorch::aten::Tensor a, executorch::aten::ScalarType dtype) { ET_CHECK_OR_RETURN_FALSE( - torch::executor::canCast(a.scalar_type(), dtype), + ::torch::executor::canCast(a.scalar_type(), dtype), "Tensor of dtype %s cannot cast to dtype %s", - torch::executor::toString(a.scalar_type()), - torch::executor::toString(dtype)); + ::torch::executor::toString(a.scalar_type()), + ::torch::executor::toString(dtype)); return true; } @@ -458,7 +457,7 @@ inline bool tensor_is_bool_type(executorch::aten::Tensor t) { ET_CHECK_OR_RETURN_FALSE( t.scalar_type() == executorch::aten::ScalarType::Bool, "Expected to find bool type, but tensor has type %s", - torch::executor::toString(t.scalar_type())); + ::torch::executor::toString(t.scalar_type())); return true; } @@ -469,8 +468,8 @@ inline bool tensor_is_type( ET_CHECK_OR_RETURN_FALSE( t.scalar_type() == dtype, "Expected to find %s type, but tensor has type %s", - torch::executor::toString(dtype), - torch::executor::toString(t.scalar_type())); + ::torch::executor::toString(dtype), + ::torch::executor::toString(t.scalar_type())); return true; } @@ -482,9 +481,9 @@ inline bool tensor_is_type( ET_LOG_MSG_AND_RETURN_IF_FALSE( t.scalar_type() == dtype || t.scalar_type() == dtype2, "Expected to find %s or %s type, but tensor has type %s", - torch::executor::toString(dtype), - torch::executor::toString(dtype2), - torch::executor::toString(t.scalar_type())); + ::torch::executor::toString(dtype), + ::torch::executor::toString(dtype2), + ::torch::executor::toString(t.scalar_type())); return true; } @@ -498,10 +497,10 @@ inline bool tensor_is_type( t.scalar_type() == dtype || t.scalar_type() == dtype2 || t.scalar_type() == dtype3, "Expected to find %s, %s, or %s type, but tensor has type %s", - torch::executor::toString(dtype), - torch::executor::toString(dtype2), - torch::executor::toString(dtype3), - torch::executor::toString(t.scalar_type())); + ::torch::executor::toString(dtype), + ::torch::executor::toString(dtype2), + ::torch::executor::toString(dtype3), + ::torch::executor::toString(t.scalar_type())); return true; } @@ -510,36 +509,36 @@ inline bool tensor_is_integral_type( executorch::aten::Tensor t, bool includeBool = false) { ET_CHECK_OR_RETURN_FALSE( - torch::executor::isIntegralType(t.scalar_type(), includeBool), + ::torch::executor::isIntegralType(t.scalar_type(), includeBool), "Expected to find a integral type, but tensor has type %s", - torch::executor::toString(t.scalar_type())); + ::torch::executor::toString(t.scalar_type())); return true; } inline bool tensor_is_floating_type(executorch::aten::Tensor t) { ET_CHECK_OR_RETURN_FALSE( - torch::executor::isFloatingType(t.scalar_type()), + ::torch::executor::isFloatingType(t.scalar_type()), "Expected to find a floating type, but tensor has type %s", - torch::executor::toString(t.scalar_type())); + ::torch::executor::toString(t.scalar_type())); return true; } inline bool tensor_is_real_type(executorch::aten::Tensor t) { ET_CHECK_OR_RETURN_FALSE( - torch::executor::isRealType(t.scalar_type()), + ::torch::executor::isRealType(t.scalar_type()), "Expected to find a real type, but tensor has type %s", - torch::executor::toString(t.scalar_type())); + ::torch::executor::toString(t.scalar_type())); return true; } inline bool tensor_is_realh_type(executorch::aten::Tensor t) { ET_CHECK_OR_RETURN_FALSE( - torch::executor::isRealHType(t.scalar_type()), + ::torch::executor::isRealHType(t.scalar_type()), "Expected to find a real type, but tensor has type %s", - torch::executor::toString(t.scalar_type())); + ::torch::executor::toString(t.scalar_type())); return true; } @@ -548,16 +547,16 @@ inline bool tensor_is_realhbf16_type(executorch::aten::Tensor t) { ET_CHECK_OR_RETURN_FALSE( executorch::runtime::isRealHBF16Type(t.scalar_type()), "Expected to find a real type, but tensor has type %s", - torch::executor::toString(t.scalar_type())); + ::torch::executor::toString(t.scalar_type())); return true; } inline bool tensor_is_realhb_type(executorch::aten::Tensor t) { ET_CHECK_OR_RETURN_FALSE( - torch::executor::isRealHBType(t.scalar_type()), + ::torch::executor::isRealHBType(t.scalar_type()), "Expected to find a real type, but tensor has type %s", - torch::executor::toString(t.scalar_type())); + ::torch::executor::toString(t.scalar_type())); return true; } @@ -566,25 +565,25 @@ inline bool tensor_is_realhbbf16_type(executorch::aten::Tensor t) { ET_CHECK_OR_RETURN_FALSE( executorch::runtime::isRealHBBF16Type(t.scalar_type()), "Expected to find a real type, but tensor has type %s", - torch::executor::toString(t.scalar_type())); + ::torch::executor::toString(t.scalar_type())); return true; } inline bool tensor_is_complex_type(executorch::aten::Tensor t) { ET_CHECK_OR_RETURN_FALSE( - torch::executor::isComplexType(t.scalar_type()), + ::torch::executor::isComplexType(t.scalar_type()), "Expected to find a complex type, but tensor has type %s", - torch::executor::toString(t.scalar_type())); + ::torch::executor::toString(t.scalar_type())); return true; } inline bool tensor_is_bits_type(executorch::aten::Tensor t) { ET_CHECK_OR_RETURN_FALSE( - torch::executor::isBitsType(t.scalar_type()), + ::torch::executor::isBitsType(t.scalar_type()), "Expected to find a bits type, but tensor has type %s", - torch::executor::toString(t.scalar_type())); + ::torch::executor::toString(t.scalar_type())); return true; } @@ -595,8 +594,8 @@ inline bool tensors_have_same_dtype( ET_CHECK_OR_RETURN_FALSE( a.scalar_type() == b.scalar_type(), ET_TENSOR_CHECK_PREFIX__ ": dtype={%s, %s}", - torch::executor::toString(a.scalar_type()), - torch::executor::toString(b.scalar_type())); + ::torch::executor::toString(a.scalar_type()), + ::torch::executor::toString(b.scalar_type())); return true; } @@ -607,9 +606,9 @@ inline bool tensors_have_same_dtype( ET_CHECK_OR_RETURN_FALSE( a.scalar_type() == b.scalar_type() && b.scalar_type() == c.scalar_type(), ET_TENSOR_CHECK_PREFIX__ ": dtype={%s, %s, %s}", - torch::executor::toString(a.scalar_type()), - torch::executor::toString(b.scalar_type()), - torch::executor::toString(c.scalar_type())); + ::torch::executor::toString(a.scalar_type()), + ::torch::executor::toString(b.scalar_type()), + ::torch::executor::toString(c.scalar_type())); return true; } @@ -1349,60 +1348,61 @@ inline size_t calculate_linear_index( return index; } -} // namespace runtime +} // namespace ET_RUNTIME_NAMESPACE } // namespace executorch namespace torch { namespace executor { // TODO(T197294990): Remove these deprecated aliases once all users have moved // to the new `::executorch` namespaces. -using ::executorch::runtime::calculate_linear_index; -using ::executorch::runtime::coordinateToIndex; -using ::executorch::runtime::dim_is_valid; -using ::executorch::runtime::extract_scalar_tensor; -using ::executorch::runtime::get_dim_order; -using ::executorch::runtime::getLeadingDims; -using ::executorch::runtime::getTrailingDims; -using ::executorch::runtime::indexToCoordinate; +using ::executorch::ET_RUNTIME_NAMESPACE::calculate_linear_index; +using ::executorch::ET_RUNTIME_NAMESPACE::coordinateToIndex; +using ::executorch::ET_RUNTIME_NAMESPACE::dim_is_valid; +using ::executorch::ET_RUNTIME_NAMESPACE::extract_scalar_tensor; +using ::executorch::ET_RUNTIME_NAMESPACE::get_dim_order; +using ::executorch::ET_RUNTIME_NAMESPACE::getLeadingDims; +using ::executorch::ET_RUNTIME_NAMESPACE::getTrailingDims; +using ::executorch::ET_RUNTIME_NAMESPACE::indexToCoordinate; +using ::executorch::ET_RUNTIME_NAMESPACE::nonempty_size; +using ::executorch::ET_RUNTIME_NAMESPACE::nonzero_dim; +using ::executorch::ET_RUNTIME_NAMESPACE::resize; +using ::executorch::ET_RUNTIME_NAMESPACE::resize_tensor; +using ::executorch::ET_RUNTIME_NAMESPACE::tensor_can_cast_to; +using ::executorch::ET_RUNTIME_NAMESPACE::tensor_dim_has_index; +using ::executorch::ET_RUNTIME_NAMESPACE::tensor_has_dim; +using ::executorch::ET_RUNTIME_NAMESPACE::tensor_has_expected_size; +using ::executorch::ET_RUNTIME_NAMESPACE::tensor_has_non_empty_dim; +using ::executorch::ET_RUNTIME_NAMESPACE::tensor_has_rank_greater_or_equal_to; +using ::executorch::ET_RUNTIME_NAMESPACE::tensor_has_rank_smaller_or_equal_to; +using ::executorch::ET_RUNTIME_NAMESPACE::tensor_has_valid_dim_order; +using ::executorch::ET_RUNTIME_NAMESPACE::tensor_is_bits_type; +using ::executorch::ET_RUNTIME_NAMESPACE::tensor_is_bool_type; +using ::executorch::ET_RUNTIME_NAMESPACE::tensor_is_complex_type; +using ::executorch::ET_RUNTIME_NAMESPACE::tensor_is_contiguous; +using ::executorch::ET_RUNTIME_NAMESPACE::tensor_is_default_dim_order; +using ::executorch::ET_RUNTIME_NAMESPACE:: + tensor_is_default_or_channels_last_dim_order; +using ::executorch::ET_RUNTIME_NAMESPACE::tensor_is_floating_type; +using ::executorch::ET_RUNTIME_NAMESPACE::tensor_is_integral_type; +using ::executorch::ET_RUNTIME_NAMESPACE::tensor_is_rank; +using ::executorch::ET_RUNTIME_NAMESPACE::tensor_is_real_type; +using ::executorch::ET_RUNTIME_NAMESPACE::tensor_is_realh_type; +using ::executorch::ET_RUNTIME_NAMESPACE::tensor_is_realhb_type; +using ::executorch::ET_RUNTIME_NAMESPACE::tensor_is_scalar; +using ::executorch::ET_RUNTIME_NAMESPACE::tensors_have_same_dim_order; +using ::executorch::ET_RUNTIME_NAMESPACE::tensors_have_same_dtype; +using ::executorch::ET_RUNTIME_NAMESPACE::tensors_have_same_rank; +using ::executorch::ET_RUNTIME_NAMESPACE::tensors_have_same_shape; +using ::executorch::ET_RUNTIME_NAMESPACE::tensors_have_same_shape_and_dtype; +using ::executorch::ET_RUNTIME_NAMESPACE::tensors_have_same_size_at_dims; +using ::executorch::ET_RUNTIME_NAMESPACE::tensors_have_same_strides; using ::executorch::runtime::kTensorDimensionLimit; -using ::executorch::runtime::nonempty_size; -using ::executorch::runtime::nonzero_dim; -using ::executorch::runtime::resize; -using ::executorch::runtime::resize_tensor; -using ::executorch::runtime::tensor_can_cast_to; -using ::executorch::runtime::tensor_dim_has_index; -using ::executorch::runtime::tensor_has_dim; -using ::executorch::runtime::tensor_has_expected_size; -using ::executorch::runtime::tensor_has_non_empty_dim; -using ::executorch::runtime::tensor_has_rank_greater_or_equal_to; -using ::executorch::runtime::tensor_has_rank_smaller_or_equal_to; -using ::executorch::runtime::tensor_has_valid_dim_order; -using ::executorch::runtime::tensor_is_bits_type; -using ::executorch::runtime::tensor_is_bool_type; -using ::executorch::runtime::tensor_is_complex_type; -using ::executorch::runtime::tensor_is_contiguous; -using ::executorch::runtime::tensor_is_default_dim_order; -using ::executorch::runtime::tensor_is_default_or_channels_last_dim_order; -using ::executorch::runtime::tensor_is_floating_type; -using ::executorch::runtime::tensor_is_integral_type; -using ::executorch::runtime::tensor_is_rank; -using ::executorch::runtime::tensor_is_real_type; -using ::executorch::runtime::tensor_is_realh_type; -using ::executorch::runtime::tensor_is_realhb_type; -using ::executorch::runtime::tensor_is_scalar; -using ::executorch::runtime::tensors_have_same_dim_order; -using ::executorch::runtime::tensors_have_same_dtype; -using ::executorch::runtime::tensors_have_same_rank; -using ::executorch::runtime::tensors_have_same_shape; -using ::executorch::runtime::tensors_have_same_shape_and_dtype; -using ::executorch::runtime::tensors_have_same_size_at_dims; -using ::executorch::runtime::tensors_have_same_strides; namespace internal { -using ::executorch::runtime::internal::copy_tensor_data; -using ::executorch::runtime::internal::reset_data_ptr; -using ::executorch::runtime::internal::resize_tensor_impl; -using ::executorch::runtime::internal::set_tensor_data; -using ::executorch::runtime::internal::share_tensor_data; +using ::executorch::ET_RUNTIME_NAMESPACE::internal::copy_tensor_data; +using ::executorch::ET_RUNTIME_NAMESPACE::internal::reset_data_ptr; +using ::executorch::ET_RUNTIME_NAMESPACE::internal::resize_tensor_impl; +using ::executorch::ET_RUNTIME_NAMESPACE::internal::set_tensor_data; +using ::executorch::ET_RUNTIME_NAMESPACE::internal::share_tensor_data; } // namespace internal } // namespace executor } // namespace torch diff --git a/runtime/core/exec_aten/util/tensor_util_aten.cpp b/runtime/core/exec_aten/util/tensor_util_aten.cpp index 4df273d4dbb..ddfd0560a69 100644 --- a/runtime/core/exec_aten/util/tensor_util_aten.cpp +++ b/runtime/core/exec_aten/util/tensor_util_aten.cpp @@ -12,7 +12,7 @@ #include namespace executorch { -namespace runtime { +namespace ET_RUNTIME_NAMESPACE { /** * Implementation for ATen tensor util, should only be included in * `_aten` target and only be used in ATen mode. Explicitly taking @@ -214,6 +214,5 @@ Error resize_tensor_impl( } } // namespace internal - -} // namespace runtime +} // namespace ET_RUNTIME_NAMESPACE } // namespace executorch diff --git a/runtime/core/exec_aten/util/test/tensor_util_test.cpp b/runtime/core/exec_aten/util/test/tensor_util_test.cpp index 7d30b0bbdbe..cdc391adf20 100644 --- a/runtime/core/exec_aten/util/test/tensor_util_test.cpp +++ b/runtime/core/exec_aten/util/test/tensor_util_test.cpp @@ -17,7 +17,7 @@ using namespace ::testing; using executorch::aten::ScalarType; using executorch::aten::Tensor; -using executorch::runtime::extract_scalar_tensor; +using executorch::ET_RUNTIME_NAMESPACE::extract_scalar_tensor; using executorch::runtime::testing::TensorFactory; class TensorUtilTest : public ::testing::Test { @@ -148,13 +148,13 @@ TEST_F(TensorUtilTest, GetLeadingDimsSmokeTest) { Tensor t = tf_int_.ones({2, 3, 4}); // getLeadingDims(t, 1) => t.size(0) - EXPECT_EQ(executorch::runtime::getLeadingDims(t, 1), 2); + EXPECT_EQ(executorch::ET_RUNTIME_NAMESPACE::getLeadingDims(t, 1), 2); // getLeadingDims(t, 2) => t.size(0) * t.size(1) - EXPECT_EQ(executorch::runtime::getLeadingDims(t, 2), 6); + EXPECT_EQ(executorch::ET_RUNTIME_NAMESPACE::getLeadingDims(t, 2), 6); // getLeadingDims(t, 3) => t.size(0) * t.size(1) * t.size(2) - EXPECT_EQ(executorch::runtime::getLeadingDims(t, 3), 24); + EXPECT_EQ(executorch::ET_RUNTIME_NAMESPACE::getLeadingDims(t, 3), 24); } TEST_F(TensorUtilTest, GetLeadingDimsInputOutOfBoundDies) { @@ -162,9 +162,9 @@ TEST_F(TensorUtilTest, GetLeadingDimsInputOutOfBoundDies) { Tensor t = tf_int_.ones({2, 3, 4}); // dim needs to be in the range [0, t.dim()] - ET_EXPECT_DEATH(executorch::runtime::getLeadingDims(t, -2), ""); - ET_EXPECT_DEATH(executorch::runtime::getLeadingDims(t, -1), ""); - ET_EXPECT_DEATH(executorch::runtime::getLeadingDims(t, 4), ""); + ET_EXPECT_DEATH(executorch::ET_RUNTIME_NAMESPACE::getLeadingDims(t, -2), ""); + ET_EXPECT_DEATH(executorch::ET_RUNTIME_NAMESPACE::getLeadingDims(t, -1), ""); + ET_EXPECT_DEATH(executorch::ET_RUNTIME_NAMESPACE::getLeadingDims(t, 4), ""); } TEST_F(TensorUtilTest, GetTrailingDimsSmokeTest) { @@ -172,13 +172,13 @@ TEST_F(TensorUtilTest, GetTrailingDimsSmokeTest) { Tensor t = tf_int_.ones({2, 3, 4}); // getTrailingDims(t, 1) => t.size(2) - EXPECT_EQ(executorch::runtime::getTrailingDims(t, 1), 4); + EXPECT_EQ(executorch::ET_RUNTIME_NAMESPACE::getTrailingDims(t, 1), 4); // getTrailingDims(t, 0) => t.size(1) * t.size(2) - EXPECT_EQ(executorch::runtime::getTrailingDims(t, 0), 12); + EXPECT_EQ(executorch::ET_RUNTIME_NAMESPACE::getTrailingDims(t, 0), 12); // getTrailingDims(t, -1) => t.size(0) * t.size(1) * t.size(2) - EXPECT_EQ(executorch::runtime::getTrailingDims(t, -1), 24); + EXPECT_EQ(executorch::ET_RUNTIME_NAMESPACE::getTrailingDims(t, -1), 24); } TEST_F(TensorUtilTest, GetTrailingDimsInputOutOfBoundDies) { @@ -186,9 +186,9 @@ TEST_F(TensorUtilTest, GetTrailingDimsInputOutOfBoundDies) { Tensor t = tf_int_.ones({2, 3, 4}); // dim needs to be in the range [-1, t.dim() - 1) - ET_EXPECT_DEATH(executorch::runtime::getTrailingDims(t, -2), ""); - ET_EXPECT_DEATH(executorch::runtime::getTrailingDims(t, 3), ""); - ET_EXPECT_DEATH(executorch::runtime::getTrailingDims(t, 4), ""); + ET_EXPECT_DEATH(executorch::ET_RUNTIME_NAMESPACE::getTrailingDims(t, -2), ""); + ET_EXPECT_DEATH(executorch::ET_RUNTIME_NAMESPACE::getTrailingDims(t, 3), ""); + ET_EXPECT_DEATH(executorch::ET_RUNTIME_NAMESPACE::getTrailingDims(t, 4), ""); } TEST_F(TensorUtilTest, ContiguousCheckSupported) { @@ -421,7 +421,7 @@ TEST_F(TensorUtilTest, BoolTensorNotScalarFails) { // TEST_F(TensorUtilTest, TensorIsRankTest) { - using executorch::runtime::tensor_is_rank; + using executorch::ET_RUNTIME_NAMESPACE::tensor_is_rank; Tensor a = tf_float_.ones({2, 3, 5}); EXPECT_TRUE(tensor_is_rank(a, 3)); @@ -430,7 +430,7 @@ TEST_F(TensorUtilTest, TensorIsRankTest) { } TEST_F(TensorUtilTest, TensorHasDimTest) { - using executorch::runtime::tensor_has_dim; + using executorch::ET_RUNTIME_NAMESPACE::tensor_has_dim; Tensor a = tf_float_.ones({2, 3, 5}); EXPECT_TRUE(tensor_has_dim(a, 2)); @@ -445,7 +445,7 @@ TEST_F(TensorUtilTest, TensorHasDimTest) { } TEST_F(TensorUtilTest, TensorsHaveSameDtypeTest) { - using executorch::runtime::tensors_have_same_dtype; + using executorch::ET_RUNTIME_NAMESPACE::tensors_have_same_dtype; Tensor a = tf_float_.ones({2, 3}); Tensor b = tf_float_.ones({2, 3}); Tensor c = tf_float_.ones({3, 3}); @@ -458,7 +458,7 @@ TEST_F(TensorUtilTest, TensorsHaveSameDtypeTest) { } TEST_F(TensorUtilTest, TensorsHaveSameSizeAtDimTest) { - using executorch::runtime::tensors_have_same_size_at_dims; + using executorch::ET_RUNTIME_NAMESPACE::tensors_have_same_size_at_dims; Tensor a = tf_float_.ones({2, 3, 4, 5}); Tensor b = tf_float_.ones({5, 4, 3, 2}); @@ -470,7 +470,7 @@ TEST_F(TensorUtilTest, TensorsHaveSameSizeAtDimTest) { } TEST_F(TensorUtilTest, TensorsHaveSameShapeTest) { - using executorch::runtime::tensors_have_same_shape; + using executorch::ET_RUNTIME_NAMESPACE::tensors_have_same_shape; Tensor a = tf_float_.ones({2, 3}); Tensor b = tf_int_.ones({2, 3}); Tensor c = tf_byte_.ones({2, 3}); @@ -493,7 +493,7 @@ TEST_F(TensorUtilTest, TensorsHaveSameShapeTest) { } TEST_F(TensorUtilTest, TensorsHaveSameShapeAndDtypeTest) { - using executorch::runtime::tensors_have_same_shape_and_dtype; + using executorch::ET_RUNTIME_NAMESPACE::tensors_have_same_shape_and_dtype; Tensor a = tf_float_.ones({2, 3}); Tensor b = tf_float_.ones({2, 3}); Tensor c = tf_float_.ones({2, 3}); @@ -515,7 +515,7 @@ TEST_F(TensorUtilTest, TensorsHaveSameShapeAndDtypeTest) { } TEST_F(TensorUtilTest, TensorsHaveSameStridesTest) { - using executorch::runtime::tensors_have_same_strides; + using executorch::ET_RUNTIME_NAMESPACE::tensors_have_same_strides; Tensor a = tf_float_.full_channels_last({4, 5, 2, 3}, 1); Tensor b = tf_float_.full_channels_last({4, 5, 2, 3}, 2); Tensor c = tf_float_.full_channels_last({4, 5, 2, 3}, 3); @@ -530,7 +530,7 @@ TEST_F(TensorUtilTest, TensorsHaveSameStridesTest) { } TEST_F(TensorUtilTest, TensorIsContiguous) { - using executorch::runtime::tensor_is_contiguous; + using executorch::ET_RUNTIME_NAMESPACE::tensor_is_contiguous; // Note that the strides.size() == 0 case is not tested, since Tensor a = tf_float_.full_channels_last({4, 5, 2, 3}, 1); Tensor b = tf_float_.ones({4, 5, 2, 3}); @@ -547,7 +547,7 @@ TEST_F(TensorUtilTest, ResizeZeroDimTensor) { Tensor a = tf_float_.ones({}); EXPECT_EQ( - executorch::runtime::resize_tensor(a, {}), + executorch::ET_RUNTIME_NAMESPACE::resize_tensor(a, {}), executorch::runtime::Error::Ok); EXPECT_EQ(a.dim(), 0); } diff --git a/runtime/core/named_data_map.h b/runtime/core/named_data_map.h index e79c7035989..14179d22795 100644 --- a/runtime/core/named_data_map.h +++ b/runtime/core/named_data_map.h @@ -22,8 +22,7 @@ #include namespace executorch { -namespace runtime { - +namespace ET_RUNTIME_NAMESPACE { /** * Interface to access and retrieve data via name. * See executorch/extension/flat_tensor/ for an example. @@ -37,8 +36,8 @@ class ET_EXPERIMENTAL NamedDataMap { * @param key The name of the tensor. * @return Result containing TensorLayout with tensor metadata. */ - ET_NODISCARD virtual Result - get_metadata(const char* key) const = 0; + ET_NODISCARD virtual Result get_metadata( + const char* key) const = 0; /** * Get data by key. * @@ -78,7 +77,7 @@ class ET_EXPERIMENTAL NamedDataMap { ET_NODISCARD virtual Result get_key(size_t index) const = 0; }; -} // namespace runtime +} // namespace ET_RUNTIME_NAMESPACE } // namespace executorch #ifdef __GNUC__ diff --git a/runtime/core/targets.bzl b/runtime/core/targets.bzl index 3195e727d96..d3e02b1afb5 100644 --- a/runtime/core/targets.bzl +++ b/runtime/core/targets.bzl @@ -95,9 +95,9 @@ def define_common_targets(): "@EXECUTORCH_CLIENTS", ], exported_deps = [ - "//executorch/runtime/core:core", - "//executorch/runtime/core/exec_aten:lib" + aten_suffix, + ":core", ":tag", + "//executorch/runtime/core/exec_aten:lib" + aten_suffix, ], ) @@ -119,6 +119,37 @@ def define_common_targets(): ], ) + runtime.cxx_library( + name = "named_data_map" + aten_suffix, + exported_headers = [ + "named_data_map.h", + ], + visibility = [ + "//executorch/...", + "@EXECUTORCH_CLIENTS", + ], + exported_deps = [ + ":tensor_layout" + aten_suffix, + "//executorch/runtime/core/exec_aten:lib" + aten_suffix, + ], + ) + + + runtime.cxx_library( + name = "tensor_layout" + aten_suffix, + srcs = ["tensor_layout.cpp"], + exported_headers = ["tensor_layout.h"], + deps = [ + "//executorch/runtime/core/portable_type/c10/c10:c10", + ], + exported_deps = [ + ":core", + "//executorch/runtime/core/exec_aten:lib" + aten_suffix, + "//executorch/runtime/core/exec_aten/util:scalar_type_util" + aten_suffix, + ], + visibility = ["//executorch/..."], + ) + runtime.cxx_library( name = "tag", srcs = ["tag.cpp"], @@ -133,31 +164,3 @@ def define_common_targets(): "//executorch/...", ], ) - - runtime.cxx_library( - name = "named_data_map", - exported_headers = [ - "named_data_map.h", - ], - visibility = [ - "//executorch/...", - "@EXECUTORCH_CLIENTS", - ], - exported_deps = [ - ":tensor_layout", - ], - ) - - runtime.cxx_library( - name = "tensor_layout", - srcs = ["tensor_layout.cpp"], - exported_headers = ["tensor_layout.h"], - deps = [ - "//executorch/runtime/core/portable_type/c10/c10:c10", - ], - exported_deps = [ - ":core", - "//executorch/runtime/core/exec_aten:lib", - ], - visibility = ["//executorch/..."], - ) diff --git a/runtime/core/tensor_layout.cpp b/runtime/core/tensor_layout.cpp index 2b862e6dc14..d33f79f27c4 100644 --- a/runtime/core/tensor_layout.cpp +++ b/runtime/core/tensor_layout.cpp @@ -13,7 +13,7 @@ #include namespace executorch { -namespace runtime { +namespace ET_RUNTIME_NAMESPACE { namespace { Result calculate_nbytes( @@ -51,5 +51,5 @@ Result TensorLayout::create( } return TensorLayout(sizes, dim_order, scalar_type, nbytes.get()); } -} // namespace runtime +} // namespace ET_RUNTIME_NAMESPACE } // namespace executorch diff --git a/runtime/core/tensor_layout.h b/runtime/core/tensor_layout.h index c2c3833f528..42131e6506e 100644 --- a/runtime/core/tensor_layout.h +++ b/runtime/core/tensor_layout.h @@ -14,7 +14,7 @@ #include namespace executorch { -namespace runtime { +namespace ET_RUNTIME_NAMESPACE { /** * Describes the layout of a tensor. @@ -89,5 +89,5 @@ class ET_EXPERIMENTAL TensorLayout final { const size_t nbytes_; }; -} // namespace runtime +} // namespace ET_RUNTIME_NAMESPACE } // namespace executorch diff --git a/runtime/executor/method.cpp b/runtime/executor/method.cpp index 41d44522a22..63a3166bfdd 100644 --- a/runtime/executor/method.cpp +++ b/runtime/executor/method.cpp @@ -32,9 +32,8 @@ #include namespace executorch { -namespace runtime { +namespace ET_RUNTIME_NAMESPACE { -using deserialization::NamedData; using internal::PlatformMemoryAllocator; /** @@ -1640,5 +1639,5 @@ Method::~Method() { } // All other fields are trivially destructible. } -} // namespace runtime +} // namespace ET_RUNTIME_NAMESPACE } // namespace executorch diff --git a/runtime/executor/method.h b/runtime/executor/method.h index 0ca2df440ad..c730af5778b 100644 --- a/runtime/executor/method.h +++ b/runtime/executor/method.h @@ -32,7 +32,7 @@ struct EValue; } // namespace executorch_flatbuffer namespace executorch { -namespace runtime { +namespace ET_RUNTIME_NAMESPACE { // Forward declare NamedData. This is a public header and must not include // internal data types. @@ -394,14 +394,14 @@ class Method final { void log_outputs(); }; -} // namespace runtime +} // namespace ET_RUNTIME_NAMESPACE } // namespace executorch namespace torch { namespace executor { // TODO(T197294990): Remove these deprecated aliases once all users have moved // to the new `::executorch` namespaces. -using ::executorch::runtime::Method; +using ::executorch::ET_RUNTIME_NAMESPACE::Method; } // namespace executor } // namespace torch diff --git a/runtime/executor/method_meta.cpp b/runtime/executor/method_meta.cpp index 8f84fea940f..687b9c30471 100644 --- a/runtime/executor/method_meta.cpp +++ b/runtime/executor/method_meta.cpp @@ -16,7 +16,7 @@ #include namespace executorch { -namespace runtime { +namespace ET_RUNTIME_NAMESPACE { namespace { Result get_tag( @@ -279,6 +279,5 @@ size_t MethodMeta::num_instructions() const { } return num_instructions; } - -} // namespace runtime +} // namespace ET_RUNTIME_NAMESPACE } // namespace executorch diff --git a/runtime/executor/method_meta.h b/runtime/executor/method_meta.h index d9bb64d68a7..a74cb090523 100644 --- a/runtime/executor/method_meta.h +++ b/runtime/executor/method_meta.h @@ -20,7 +20,7 @@ struct ExecutionPlan; } // namespace executorch_flatbuffer namespace executorch { -namespace runtime { +namespace ET_RUNTIME_NAMESPACE { /** * Metadata about a specific tensor of an ExecuTorch Program. @@ -240,14 +240,14 @@ class MethodMeta final { const executorch_flatbuffer::ExecutionPlan* s_plan_; }; -} // namespace runtime +} // namespace ET_RUNTIME_NAMESPACE } // namespace executorch namespace torch { namespace executor { // TODO(T197294990): Remove these deprecated aliases once all users have moved // to the new `::executorch` namespaces. -using ::executorch::runtime::MethodMeta; -using ::executorch::runtime::TensorInfo; +using ::executorch::ET_RUNTIME_NAMESPACE::MethodMeta; +using ::executorch::ET_RUNTIME_NAMESPACE::TensorInfo; } // namespace executor } // namespace torch diff --git a/runtime/executor/platform_memory_allocator.h b/runtime/executor/platform_memory_allocator.h index 09195a460ac..7ab58bf0f3d 100644 --- a/runtime/executor/platform_memory_allocator.h +++ b/runtime/executor/platform_memory_allocator.h @@ -17,7 +17,7 @@ #include namespace executorch { -namespace runtime { +namespace ET_RUNTIME_NAMESPACE { namespace internal { /** @@ -107,5 +107,5 @@ class PlatformMemoryAllocator final : public MemoryAllocator { }; } // namespace internal -} // namespace runtime +} // namespace ET_RUNTIME_NAMESPACE } // namespace executorch diff --git a/runtime/executor/program.cpp b/runtime/executor/program.cpp index 14e0b83d8aa..238c806b1d6 100644 --- a/runtime/executor/program.cpp +++ b/runtime/executor/program.cpp @@ -28,8 +28,7 @@ #endif namespace executorch { -namespace runtime { - +namespace ET_RUNTIME_NAMESPACE { namespace { /** @@ -535,5 +534,5 @@ Error Program::load_mutable_subsegment_into( segment_base_offset_ + segment->offset() + offset, size, info, buffer); } -} // namespace runtime +} // namespace ET_RUNTIME_NAMESPACE } // namespace executorch diff --git a/runtime/executor/program.h b/runtime/executor/program.h index 0932e51619f..9670fd7c79f 100644 --- a/runtime/executor/program.h +++ b/runtime/executor/program.h @@ -36,8 +36,7 @@ struct Program; } // namespace executorch_flatbuffer namespace executorch { -namespace runtime { - +namespace ET_RUNTIME_NAMESPACE { namespace testing { // Provides test access to private Program methods. class ProgramTestFriend; @@ -313,14 +312,14 @@ class Program final { std::optional pte_data_map_; }; -} // namespace runtime +} // namespace ET_RUNTIME_NAMESPACE } // namespace executorch namespace torch { namespace executor { // TODO(T197294990): Remove these deprecated aliases once all users have moved // to the new `::executorch` namespaces. -using ::executorch::runtime::Program; +using ::executorch::ET_RUNTIME_NAMESPACE::Program; } // namespace executor } // namespace torch diff --git a/runtime/executor/pte_data_map.cpp b/runtime/executor/pte_data_map.cpp index 5829395028a..fd064cb8256 100644 --- a/runtime/executor/pte_data_map.cpp +++ b/runtime/executor/pte_data_map.cpp @@ -10,7 +10,7 @@ #include namespace executorch { -namespace runtime { +namespace ET_RUNTIME_NAMESPACE { namespace internal { /* static */ executorch::runtime::Result PteDataMap::create( @@ -83,5 +83,5 @@ ET_NODISCARD executorch::runtime::Result PteDataMap::get_key( } } // namespace internal -} // namespace runtime +} // namespace ET_RUNTIME_NAMESPACE } // namespace executorch diff --git a/runtime/executor/pte_data_map.h b/runtime/executor/pte_data_map.h index 01c15555786..b26c0ac42f9 100644 --- a/runtime/executor/pte_data_map.h +++ b/runtime/executor/pte_data_map.h @@ -46,7 +46,7 @@ using FlatbufferDataSegment = flatbuffers:: #endif namespace executorch { -namespace runtime { +namespace ET_RUNTIME_NAMESPACE { namespace internal { /** @@ -147,5 +147,5 @@ class PteDataMap final : public NamedDataMap { }; } // namespace internal -} // namespace runtime +} // namespace ET_RUNTIME_NAMESPACE } // namespace executorch diff --git a/runtime/executor/targets.bzl b/runtime/executor/targets.bzl index cfb6c607359..649b2c13cc1 100644 --- a/runtime/executor/targets.bzl +++ b/runtime/executor/targets.bzl @@ -42,30 +42,33 @@ def define_common_targets(): ], ) - runtime.cxx_library( - name = "pte_data_map", - srcs = [ - "pte_data_map.cpp", - ], - exported_headers = [ - "pte_data_map.h", - ], - visibility = [ - "//executorch/runtime/executor/...", - "@EXECUTORCH_CLIENTS", - ], - exported_deps = [ - "//executorch/runtime/core:core", - "//executorch/runtime/core:named_data_map", - ], - deps = [ - "//executorch/schema:program", - ], - exported_preprocessor_flags = [] if runtime.is_oss else ["-DEXECUTORCH_INTERNAL_FLATBUFFERS=1"], - ) for aten_mode in get_aten_mode_options(): aten_suffix = "_aten" if aten_mode else "" + + runtime.cxx_library( + name = "pte_data_map" + aten_suffix, + srcs = [ + "pte_data_map.cpp", + ], + exported_headers = [ + "pte_data_map.h", + ], + visibility = [ + "//executorch/runtime/executor/...", + "@EXECUTORCH_CLIENTS", + ], + exported_deps = [ + "//executorch/runtime/core:core", + "//executorch/runtime/core:named_data_map" + aten_suffix, + "//executorch/runtime/core/exec_aten/util:scalar_type_util" + aten_suffix, + ], + deps = [ + "//executorch/schema:program", + ], + exported_preprocessor_flags = [] if runtime.is_oss else ["-DEXECUTORCH_INTERNAL_FLATBUFFERS=1"], + ) + runtime.cxx_library( name = "program" + aten_suffix, exported_deps = [ @@ -103,17 +106,17 @@ def define_common_targets(): preprocessor_flags = _program_preprocessor_flags(), exported_deps = [ ":memory_manager", - ":pte_data_map", - "//executorch/runtime/backend:interface", + ":pte_data_map" + aten_suffix, + "//executorch/runtime/backend:interface" + aten_suffix, "//executorch/runtime/core:core", - "//executorch/runtime/core:named_data_map", + "//executorch/runtime/core:named_data_map" + aten_suffix, "//executorch/runtime/core:evalue" + aten_suffix, "//executorch/runtime/core:event_tracer" + aten_suffix, "//executorch/runtime/core/exec_aten:lib" + aten_suffix, "//executorch/runtime/core/exec_aten/util:scalar_type_util" + aten_suffix, "//executorch/runtime/core/exec_aten/util:tensor_util" + aten_suffix, "//executorch/runtime/kernel:kernel_runtime_context" + aten_suffix, - "//executorch/runtime/kernel:operator_registry", + "//executorch/runtime/kernel:operator_registry" + aten_suffix, "//executorch/runtime/platform:platform", "//executorch/schema:extended_header", ], diff --git a/runtime/executor/tensor_parser.h b/runtime/executor/tensor_parser.h index 1fae84cfb05..e2b5ff8d6ea 100644 --- a/runtime/executor/tensor_parser.h +++ b/runtime/executor/tensor_parser.h @@ -21,7 +21,7 @@ #include namespace executorch { -namespace runtime { +namespace ET_RUNTIME_NAMESPACE { namespace deserialization { /// Data structure to hold key and data buffer for external data used @@ -142,7 +142,7 @@ ET_NODISCARD Result getTensorDataPtr( Span external_constants = {}); } // namespace deserialization -} // namespace runtime +} // namespace ET_RUNTIME_NAMESPACE } // namespace executorch namespace torch { @@ -150,10 +150,11 @@ namespace executor { namespace deserialization { // TODO(T197294990): Remove these deprecated aliases once all users have moved // to the new `::executorch` namespaces. -using ::executorch::runtime::deserialization::getTensorDataPtr; -using ::executorch::runtime::deserialization::parseListOptionalType; -using ::executorch::runtime::deserialization::parseTensor; -using ::executorch::runtime::deserialization::parseTensorList; +using ::executorch::ET_RUNTIME_NAMESPACE::deserialization::getTensorDataPtr; +using ::executorch::ET_RUNTIME_NAMESPACE::deserialization:: + parseListOptionalType; +using ::executorch::ET_RUNTIME_NAMESPACE::deserialization::parseTensor; +using ::executorch::ET_RUNTIME_NAMESPACE::deserialization::parseTensorList; } // namespace deserialization } // namespace executor } // namespace torch diff --git a/runtime/executor/tensor_parser_aten.cpp b/runtime/executor/tensor_parser_aten.cpp index d1a2f712853..2d454d15be5 100644 --- a/runtime/executor/tensor_parser_aten.cpp +++ b/runtime/executor/tensor_parser_aten.cpp @@ -19,7 +19,9 @@ #include // @donotremove @manual=//caffe2/aten:ATen-core namespace executorch { +// This file is only used in ATen mode, so we use the runtime_aten namespace. namespace runtime { +namespace aten { namespace deserialization { namespace { @@ -126,5 +128,6 @@ Result parseTensor( } } // namespace deserialization +} // namespace aten } // namespace runtime } // namespace executorch diff --git a/runtime/executor/tensor_parser_exec_aten.cpp b/runtime/executor/tensor_parser_exec_aten.cpp index 14ba5e0d42c..aa27bbf929d 100644 --- a/runtime/executor/tensor_parser_exec_aten.cpp +++ b/runtime/executor/tensor_parser_exec_aten.cpp @@ -16,11 +16,10 @@ #include namespace executorch { -namespace runtime { +namespace ET_RUNTIME_NAMESPACE { namespace deserialization { using executorch::aten::ScalarType; -using executorch::runtime::TensorLayout; // Provides access to private Program methods. class TensorParser final { public: @@ -256,5 +255,5 @@ ET_NODISCARD Result getTensorDataPtr( } } // namespace deserialization -} // namespace runtime +} // namespace ET_RUNTIME_NAMESPACE } // namespace executorch diff --git a/runtime/executor/tensor_parser_portable.cpp b/runtime/executor/tensor_parser_portable.cpp index 787af8b506b..e1f09d557ac 100644 --- a/runtime/executor/tensor_parser_portable.cpp +++ b/runtime/executor/tensor_parser_portable.cpp @@ -18,13 +18,13 @@ #include namespace executorch { -namespace runtime { +namespace ET_RUNTIME_NAMESPACE { namespace deserialization { using executorch::runtime::Span; -using torch::executor::ScalarType; -using torch::executor::Tensor; -using torch::executor::TensorImpl; +using ::torch::executor::ScalarType; +using ::torch::executor::Tensor; +using ::torch::executor::TensorImpl; Result parseTensor( const Program* program, @@ -176,5 +176,5 @@ Result parseTensor( } } // namespace deserialization -} // namespace runtime +} // namespace ET_RUNTIME_NAMESPACE } // namespace executorch diff --git a/runtime/executor/test/executor_test.cpp b/runtime/executor/test/executor_test.cpp index 328b23a8df3..e2a44429941 100644 --- a/runtime/executor/test/executor_test.cpp +++ b/runtime/executor/test/executor_test.cpp @@ -22,14 +22,14 @@ using executorch::aten::Scalar; using executorch::aten::ScalarType; using executorch::aten::SizesType; using executorch::aten::Tensor; +using executorch::ET_RUNTIME_NAMESPACE::get_op_function_from_registry; +using executorch::ET_RUNTIME_NAMESPACE::Kernel; +using executorch::ET_RUNTIME_NAMESPACE::KernelRuntimeContext; +using executorch::ET_RUNTIME_NAMESPACE::OpFunction; +using executorch::ET_RUNTIME_NAMESPACE::register_kernel; +using executorch::ET_RUNTIME_NAMESPACE::registry_has_op_function; using executorch::runtime::Error; using executorch::runtime::EValue; -using executorch::runtime::get_op_function_from_registry; -using executorch::runtime::Kernel; -using executorch::runtime::KernelRuntimeContext; -using executorch::runtime::OpFunction; -using executorch::runtime::register_kernel; -using executorch::runtime::registry_has_op_function; using executorch::runtime::Result; using executorch::runtime::testing::TensorFactory; diff --git a/runtime/executor/test/targets.bzl b/runtime/executor/test/targets.bzl index dd5262b5ac6..940601779cb 100644 --- a/runtime/executor/test/targets.bzl +++ b/runtime/executor/test/targets.bzl @@ -70,7 +70,7 @@ def define_common_targets(is_fbcode = False): "//executorch/runtime/core/exec_aten:lib" + aten_suffix, "//executorch/runtime/core:evalue" + aten_suffix, "//executorch/runtime/kernel:kernel_runtime_context" + aten_suffix, - "//executorch/runtime/kernel:operator_registry", + "//executorch/runtime/kernel:operator_registry" + aten_suffix, "//executorch/runtime/platform:platform", ], ) diff --git a/runtime/executor/test/test_backend_compiler_lib.cpp b/runtime/executor/test/test_backend_compiler_lib.cpp index 9eea6384d6f..ce631eb4f57 100644 --- a/runtime/executor/test/test_backend_compiler_lib.cpp +++ b/runtime/executor/test/test_backend_compiler_lib.cpp @@ -13,13 +13,13 @@ #include #include /* strtol */ +using executorch::ET_RUNTIME_NAMESPACE::Backend; +using executorch::ET_RUNTIME_NAMESPACE::BackendExecutionContext; +using executorch::ET_RUNTIME_NAMESPACE::BackendInitContext; +using executorch::ET_RUNTIME_NAMESPACE::BackendInterface; +using executorch::ET_RUNTIME_NAMESPACE::CompileSpec; +using executorch::ET_RUNTIME_NAMESPACE::DelegateHandle; using executorch::runtime::ArrayRef; -using executorch::runtime::Backend; -using executorch::runtime::BackendExecutionContext; -using executorch::runtime::BackendInitContext; -using executorch::runtime::BackendInterface; -using executorch::runtime::CompileSpec; -using executorch::runtime::DelegateHandle; using executorch::runtime::Error; using executorch::runtime::EValue; using executorch::runtime::FreeableBuffer; diff --git a/runtime/executor/test/test_backend_with_delegate_mapping.cpp b/runtime/executor/test/test_backend_with_delegate_mapping.cpp index e6d84aca189..a0b79b09c6d 100644 --- a/runtime/executor/test/test_backend_with_delegate_mapping.cpp +++ b/runtime/executor/test/test_backend_with_delegate_mapping.cpp @@ -14,13 +14,13 @@ #include /* strtol */ #include +using executorch::ET_RUNTIME_NAMESPACE::Backend; +using executorch::ET_RUNTIME_NAMESPACE::BackendExecutionContext; +using executorch::ET_RUNTIME_NAMESPACE::BackendInitContext; +using executorch::ET_RUNTIME_NAMESPACE::BackendInterface; +using executorch::ET_RUNTIME_NAMESPACE::CompileSpec; +using executorch::ET_RUNTIME_NAMESPACE::DelegateHandle; using executorch::runtime::ArrayRef; -using executorch::runtime::Backend; -using executorch::runtime::BackendExecutionContext; -using executorch::runtime::BackendInitContext; -using executorch::runtime::BackendInterface; -using executorch::runtime::CompileSpec; -using executorch::runtime::DelegateHandle; using executorch::runtime::Error; using executorch::runtime::EValue; using executorch::runtime::FreeableBuffer; diff --git a/runtime/kernel/kernel_runtime_context.h b/runtime/kernel/kernel_runtime_context.h index ad269f5dd4b..6facecc7632 100644 --- a/runtime/kernel/kernel_runtime_context.h +++ b/runtime/kernel/kernel_runtime_context.h @@ -15,7 +15,7 @@ #include namespace executorch { -namespace runtime { +namespace ET_RUNTIME_NAMESPACE { /** * Runtime state and functionality for kernel implementations. @@ -107,7 +107,7 @@ class KernelRuntimeContext { Error failure_state_ = Error::Ok; }; -} // namespace runtime +} // namespace ET_RUNTIME_NAMESPACE } // namespace executorch // TODO(T197294990): Remove these deprecated aliases once all users have moved @@ -115,15 +115,15 @@ class KernelRuntimeContext { namespace torch { namespace executor { /// DEPRECATED: Use ::executorch::runtime::KernelRuntimeContext instead. -using ::executorch::runtime::KernelRuntimeContext; +using ::executorch::ET_RUNTIME_NAMESPACE::KernelRuntimeContext; /// DEPRECATED: Use ::executorch::runtime::KernelRuntimeContext instead. -using RuntimeContext = ::executorch::runtime::KernelRuntimeContext; +using RuntimeContext = ::executorch::ET_RUNTIME_NAMESPACE::KernelRuntimeContext; } // namespace executor } // namespace torch namespace executorch { namespace aten { /// DEPRECATED: Use ::executorch::runtime::KernelRuntimeContext instead. -using RuntimeContext = ::executorch::runtime::KernelRuntimeContext; +using RuntimeContext = ::executorch::ET_RUNTIME_NAMESPACE::KernelRuntimeContext; } // namespace aten } // namespace executorch // DEPRECATED: The executorch::aten:: namespace is deprecated. Use diff --git a/runtime/kernel/operator_registry.cpp b/runtime/kernel/operator_registry.cpp index 85705e5b3fd..d7e7b298c10 100644 --- a/runtime/kernel/operator_registry.cpp +++ b/runtime/kernel/operator_registry.cpp @@ -15,7 +15,7 @@ #include namespace executorch { -namespace runtime { +namespace ET_RUNTIME_NAMESPACE { namespace { @@ -258,5 +258,5 @@ Span get_registered_kernels() { return {registered_kernels, num_registered_kernels}; } -} // namespace runtime +} // namespace ET_RUNTIME_NAMESPACE } // namespace executorch diff --git a/runtime/kernel/operator_registry.h b/runtime/kernel/operator_registry.h index a3cdcd66cee..f7a62208dd8 100644 --- a/runtime/kernel/operator_registry.h +++ b/runtime/kernel/operator_registry.h @@ -40,7 +40,7 @@ } namespace executorch { -namespace runtime { +namespace ET_RUNTIME_NAMESPACE { class KernelRuntimeContext; // Forward declaration using OpFunction = void (*)(KernelRuntimeContext&, EValue**); @@ -258,38 +258,41 @@ ET_NODISCARD inline Error register_kernel(const Kernel& kernel) { return register_kernels({&kernel, 1}); }; -} // namespace runtime +} // namespace ET_RUNTIME_NAMESPACE } // namespace executorch namespace torch { namespace executor { // TODO(T197294990): Remove these deprecated aliases once all users have moved // to the new `::executorch` namespaces. -using ::executorch::runtime::Kernel; -using ::executorch::runtime::KernelKey; -using ::executorch::runtime::KernelRuntimeContext; -using ::executorch::runtime::OpFunction; -using ::executorch::runtime::TensorMeta; -using KernelRuntimeContext = ::executorch::runtime::KernelRuntimeContext; +using ::executorch::ET_RUNTIME_NAMESPACE::Kernel; +using ::executorch::ET_RUNTIME_NAMESPACE::KernelKey; +using ::executorch::ET_RUNTIME_NAMESPACE::KernelRuntimeContext; +using ::executorch::ET_RUNTIME_NAMESPACE::OpFunction; +using ::executorch::ET_RUNTIME_NAMESPACE::TensorMeta; +using KernelRuntimeContext = + ::executorch::ET_RUNTIME_NAMESPACE::KernelRuntimeContext; inline ::executorch::runtime::Error register_kernels(ArrayRef kernels) { - return ::executorch::runtime::register_kernels( + return ::executorch::ET_RUNTIME_NAMESPACE::register_kernels( {kernels.data(), kernels.size()}); } inline OpFunction getOpsFn( const char* name, ArrayRef meta_list = {}) { - auto result = ::executorch::runtime::get_op_function_from_registry( - name, {meta_list.data(), meta_list.size()}); + auto result = + ::executorch::ET_RUNTIME_NAMESPACE::get_op_function_from_registry( + name, {meta_list.data(), meta_list.size()}); ET_CHECK(result.ok()); // get_op_function_from_registry() logs details. return *result; } inline bool hasOpsFn(const char* name, ArrayRef meta_list = {}) { - return ::executorch::runtime::registry_has_op_function( + return ::executorch::ET_RUNTIME_NAMESPACE::registry_has_op_function( name, {meta_list.data(), meta_list.size()}); } inline ArrayRef get_kernels() { - Span kernels = ::executorch::runtime::get_registered_kernels(); + Span kernels = + ::executorch::ET_RUNTIME_NAMESPACE::get_registered_kernels(); return ArrayRef(kernels.data(), kernels.size()); } } // namespace executor diff --git a/runtime/kernel/targets.bzl b/runtime/kernel/targets.bzl index b6aa9d7a95e..8a945f19881 100644 --- a/runtime/kernel/targets.bzl +++ b/runtime/kernel/targets.bzl @@ -21,21 +21,6 @@ def define_common_targets(): TARGETS and BUCK files that call this function. """ - runtime.cxx_library( - name = "operator_registry", - srcs = ["operator_registry.cpp"], - exported_headers = ["operator_registry.h"], - visibility = [ - "//executorch/...", - "@EXECUTORCH_CLIENTS", - ], - exported_deps = [ - "//executorch/runtime/core:core", - "//executorch/runtime/core:evalue", - ], - preprocessor_flags = _operator_registry_preprocessor_flags(), - ) - runtime.cxx_library( name = "operator_registry_MAX_NUM_KERNELS_TEST_ONLY", srcs = ["operator_registry.cpp"], @@ -68,6 +53,21 @@ def define_common_targets(): for aten_mode in get_aten_mode_options(): aten_suffix = "_aten" if aten_mode else "" + runtime.cxx_library( + name = "operator_registry" + aten_suffix, + srcs = ["operator_registry.cpp"], + exported_headers = ["operator_registry.h"], + visibility = [ + "//executorch/...", + "@EXECUTORCH_CLIENTS", + ], + exported_deps = [ + "//executorch/runtime/core:core", + "//executorch/runtime/core:evalue" + aten_suffix, + ], + preprocessor_flags = _operator_registry_preprocessor_flags(), + ) + runtime.cxx_library( name = "kernel_runtime_context" + aten_suffix, exported_headers = [ diff --git a/runtime/kernel/test/kernel_runtime_context_test.cpp b/runtime/kernel/test/kernel_runtime_context_test.cpp index 50bd860fb9c..2c3b536b0d4 100644 --- a/runtime/kernel/test/kernel_runtime_context_test.cpp +++ b/runtime/kernel/test/kernel_runtime_context_test.cpp @@ -13,8 +13,8 @@ #include using namespace ::testing; +using executorch::ET_RUNTIME_NAMESPACE::KernelRuntimeContext; using executorch::runtime::Error; -using executorch::runtime::KernelRuntimeContext; using executorch::runtime::MemoryAllocator; using executorch::runtime::Result; diff --git a/runtime/kernel/test/targets.bzl b/runtime/kernel/test/targets.bzl index bd66fc05b6f..4b3ed0f3139 100644 --- a/runtime/kernel/test/targets.bzl +++ b/runtime/kernel/test/targets.bzl @@ -101,3 +101,16 @@ def define_common_targets(): ":specialized_kernel_generated_lib", ], ) + + if aten_mode: + # Make sure we can depend on both generated_lib and generated_lib_aten + # in the same binary. + runtime.cxx_test( + name = "test_generated_lib_and_aten", + srcs = ["test_generated_lib_and_aten.cpp"], + deps = [ + "//executorch/kernels/portable:generated_lib", + "//executorch/kernels/portable:generated_lib_aten", + "//executorch/runtime/kernel:operator_registry_aten", + ], + ) diff --git a/runtime/kernel/test/test_generated_lib_and_aten.cpp b/runtime/kernel/test/test_generated_lib_and_aten.cpp new file mode 100644 index 00000000000..f9bfebc4a80 --- /dev/null +++ b/runtime/kernel/test/test_generated_lib_and_aten.cpp @@ -0,0 +1,45 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +using namespace ::testing; +using executorch::aten::ScalarType; +using executorch::runtime::Error; +using executorch::runtime::EValue; + +class GeneratedLibAndAtenTest : public ::testing::Test { + public: + void SetUp() override { + executorch::runtime::runtime_init(); + } +}; + +TEST_F(GeneratedLibAndAtenTest, GetKernelsFromATenRegistry) { + // Check if the kernel exists in the ATen registry + bool has_kernel = + executorch::runtime::aten::registry_has_op_function("aten::add.out"); + EXPECT_TRUE(has_kernel) + << "Kernel 'aten::add.out' not found in ATen registry"; + + // Get the kernel from the ATen registry + auto result = + executorch::runtime::aten::get_op_function_from_registry("aten::add.out"); + EXPECT_EQ(result.error(), Error::Ok) + << "Failed to get kernel from ATen registry"; + EXPECT_NE(*result, nullptr) << "Kernel function from ATen registry is null"; +} diff --git a/shim_et/xplat/executorch/codegen/codegen.bzl b/shim_et/xplat/executorch/codegen/codegen.bzl index a6d6d59e0c2..e1cebaa1140 100644 --- a/shim_et/xplat/executorch/codegen/codegen.bzl +++ b/shim_et/xplat/executorch/codegen/codegen.bzl @@ -688,7 +688,7 @@ def executorch_generated_lib( "ovr_config//os:windows": [], }) + compiler_flags, deps = [ - "//executorch/runtime/kernel:operator_registry", + "//executorch/runtime/kernel:operator_registry" + aten_suffix, "//executorch/kernels/prim_ops:prim_ops_registry" + aten_suffix, "//executorch/runtime/core:evalue" + aten_suffix, "//executorch/codegen:macros",