Skip to content

Move tensor_shape_to_c_string back to runtime/core #8296

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 19 commits into from
Feb 7, 2025
22 changes: 17 additions & 5 deletions kernels/portable/cpu/util/broadcast_util.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -7,9 +7,9 @@
*/

#include <executorch/kernels/portable/cpu/util/repeat_util.h>
#include <executorch/kernels/portable/cpu/util/tensor_util.h>
#include <executorch/runtime/core/exec_aten/exec_aten.h>
#include <executorch/runtime/core/exec_aten/util/scalar_type_util.h>
#include <executorch/runtime/core/exec_aten/util/tensor_shape_to_c_string.h>
#include <string.h>

namespace torch {
Expand Down Expand Up @@ -213,10 +213,22 @@ ET_NODISCARD Error get_broadcast_target_size(
Tensor::SizesType* out_sizes,
const size_t out_sizes_len,
size_t* out_dim) {
ET_CHECK_OR_RETURN_ERROR(
tensors_are_broadcastable_between(a_size, b_size),
InvalidArgument,
"Two input tensors should be broadcastable.\n");
if ET_UNLIKELY (!tensors_are_broadcastable_between(a_size, b_size)) {
#ifdef ET_LOG_ENABLED
const auto a_shape_str = tensor_shape_to_c_string(
executorch::runtime::Span<const Tensor::SizesType>(
a_size.data(), a_size.size()));
const auto b_shape_str = tensor_shape_to_c_string(
executorch::runtime::Span<const Tensor::SizesType>(
b_size.data(), b_size.size()));
#endif
ET_LOG(
Error,
"Two input tensors should be broadcastable but got shapes %s and %s.",
a_shape_str.data(),
b_shape_str.data());
return executorch::runtime::Error::InvalidArgument;
}

auto a_dim = a_size.size();
auto b_dim = b_size.size();
Expand Down
13 changes: 1 addition & 12 deletions kernels/portable/cpu/util/targets.bzl
Original file line number Diff line number Diff line change
Expand Up @@ -73,8 +73,8 @@ def define_common_targets():
compiler_flags = ["-Wno-missing-prototypes"],
deps = [
":repeat_util",
":tensor_util",
"//executorch/runtime/kernel:kernel_includes",
"//executorch/runtime/core/exec_aten/util:tensor_shape_to_c_string",
"//executorch/runtime/core/exec_aten/util:tensor_util",
],
visibility = ["//executorch/kernels/portable/cpu/...", "//executorch/kernels/optimized/cpu/...", "@EXECUTORCH_CLIENTS"],
Expand Down Expand Up @@ -268,17 +268,6 @@ def define_common_targets():
visibility = ["//executorch/kernels/portable/cpu/..."],
)

runtime.cxx_library(
name = "tensor_util",
srcs = ["tensor_util.cpp"],
exported_headers = ["tensor_util.h"],
deps = [
"//executorch/runtime/core/exec_aten:lib",
"//executorch/runtime/kernel:kernel_includes",
],
visibility = ["//executorch/kernels/portable/cpu/..."],
)

runtime.cxx_library(
name = "upsample_util",
srcs = ["upsample_util.cpp"],
Expand Down
2 changes: 1 addition & 1 deletion kernels/portable/cpu/util/test/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ set(EXECUTORCH_ROOT ${CMAKE_CURRENT_SOURCE_DIR}/../../../../..)

include(${EXECUTORCH_ROOT}/build/Test.cmake)

set(_test_srcs broadcast_test.cpp reduce_test.cpp tensor_util_test.cpp)
set(_test_srcs broadcast_test.cpp reduce_test.cpp)

et_cxx_test(
kernels_portable_cpu_util_test SOURCES ${_test_srcs} EXTRA_LIBS
Expand Down
9 changes: 9 additions & 0 deletions kernels/portable/cpu/util/test/broadcast_test.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -129,6 +129,15 @@ TEST(BroadcastUtilTest, GetBroadcastTargetSize) {
EXPECT_TRUE(
ArrayRef<Tensor::SizesType>(expected_output_size, expected_output_dim)
.equals(ArrayRef<Tensor::SizesType>({5, 2, 2})));

Tensor c = tf.zeros({4, 5});
err = get_broadcast_target_size(
a,
c,
expected_output_size,
torch::executor::kTensorDimensionLimit,
&expected_output_dim);
EXPECT_EQ(err, torch::executor::Error::InvalidArgument);
}

size_t linearize_indexes(size_t* indexes, size_t indexes_len, const Tensor& t) {
Expand Down
8 changes: 0 additions & 8 deletions kernels/portable/cpu/util/test/targets.bzl
Original file line number Diff line number Diff line change
Expand Up @@ -21,11 +21,3 @@ def define_common_targets():
"//executorch/kernels/portable/cpu/util:reduce_util",
],
)

runtime.cxx_test(
name = "tensor_util_test",
srcs = ["tensor_util_test.cpp"],
deps = [
"//executorch/kernels/portable/cpu/util:tensor_util",
],
)
29 changes: 0 additions & 29 deletions runtime/core/exec_aten/util/dim_order_util.h
Original file line number Diff line number Diff line change
Expand Up @@ -260,35 +260,6 @@ ET_NODISCARD inline Error stride_to_dim_order(
return Error::Ok;
}

/**
* Print a string representation of an ArrayRef of tensor sizes into a
* user-provided string buffer. If the user buffer is too small, the string
* will be truncated. The output is of the format (1,2,3,4).
*
* Note that we cannot use ArrayRef here due to a circular dependency (see
* above comments).
*/
template <class SizesType>
inline void sizes_to_string(
char* output,
size_t output_size,
SizesType* sizes,
size_t rank) {
auto remaining_size = output_size;
for (auto i = 0; remaining_size > 0 && i < rank; i++) {
snprintf(
output,
remaining_size,
"%s%zd",
i == 0 ? "(" : ",",
static_cast<size_t>(sizes[i]));
auto len = strlen(output);
output += len;
remaining_size -= len;
}
snprintf(output, remaining_size, ")");
}

} // namespace runtime
} // namespace executorch

Expand Down
23 changes: 23 additions & 0 deletions runtime/core/exec_aten/util/targets.bzl
Original file line number Diff line number Diff line change
Expand Up @@ -72,3 +72,26 @@ def define_common_targets():
# specify library directory path.
force_static = True,
)

runtime.cxx_library(
name = "tensor_shape_to_c_string" + aten_suffix,
srcs = ["tensor_shape_to_c_string.cpp"],
exported_deps = [
"//executorch/runtime/core:core",
"//executorch/runtime/core/exec_aten/util:tensor_dimension_limit",
],
exported_headers = ["tensor_shape_to_c_string.h"],
visibility = [
"//executorch/...",
"@EXECUTORCH_CLIENTS",
],
)

runtime.cxx_library(
name = "tensor_dimension_limit",
exported_headers = ["tensor_dimension_limit.h"],
visibility = [
"//executorch/...",
"@EXECUTORCH_CLIENTS",
],
)
21 changes: 21 additions & 0 deletions runtime/core/exec_aten/util/tensor_dimension_limit.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/

#pragma once

namespace executorch::runtime {
/**
* The expected output size may not be the existing size of any inputs and
* outputs if the operator supports both broadcast and dynamic shape.
* Therefore such operators needs extra space to store the calculated expected
* output size. such dynamic allocation is troublesome in executorch so we can
* just hard code a static value of a relatively small value because users
* don't create high dimensional tensors.
*/
constexpr size_t kTensorDimensionLimit = 16;
} // namespace executorch::runtime
Original file line number Diff line number Diff line change
Expand Up @@ -6,17 +6,19 @@
* LICENSE file in the root directory of this source tree.
*/

#include <executorch/kernels/portable/cpu/util/tensor_util.h>
#include <executorch/runtime/core/exec_aten/util/tensor_util.h>
#include <executorch/runtime/core/exec_aten/util/tensor_shape_to_c_string.h>

#include <executorch/runtime/platform/assert.h>

#include <cinttypes>
#include <cstdio> // For snprintf.
#include <cstring>

namespace executorch::runtime {
/**
* Shared implementation for tensor_util.h, may only contain code that
* works whether or not ATen mode is active.
*/
std::array<char, kTensorShapeStringSizeLimit> tensor_shape_to_c_string(
executorch::runtime::Span<const executorch::aten::SizesType> shape) {
namespace {
template <typename SizesType>
std::array<char, kTensorShapeStringSizeLimit> tensor_shape_to_c_string_impl(
executorch::runtime::Span<SizesType> shape) {
std::array<char, kTensorShapeStringSizeLimit> out;
char* p = out.data();
if ET_UNLIKELY (shape.size() > kTensorDimensionLimit) {
Expand Down Expand Up @@ -48,5 +50,16 @@ std::array<char, kTensorShapeStringSizeLimit> tensor_shape_to_c_string(
*(p - 1) = '\0';
return out;
}
} // namespace

std::array<char, kTensorShapeStringSizeLimit> tensor_shape_to_c_string(
executorch::runtime::Span<const std::int32_t> shape) {
return tensor_shape_to_c_string_impl(shape);
}

std::array<char, kTensorShapeStringSizeLimit> tensor_shape_to_c_string(
executorch::runtime::Span<const std::int64_t> shape) {
return tensor_shape_to_c_string_impl(shape);
}

} // namespace executorch::runtime
Original file line number Diff line number Diff line change
Expand Up @@ -10,10 +10,10 @@

#include <array>
#include <cstddef>
#include <cstdint>
#include <limits>

#include <executorch/runtime/core/exec_aten/exec_aten.h>
#include <executorch/runtime/core/exec_aten/util/tensor_util.h>
#include <executorch/runtime/core/exec_aten/util/tensor_dimension_limit.h>
#include <executorch/runtime/core/span.h>

namespace executorch::runtime {
Expand All @@ -34,18 +34,35 @@ constexpr size_t kTensorShapeStringSizeLimit = 1 + /* opening parenthesis */

namespace internal {
constexpr size_t kMaximumPrintableTensorShapeElement =
std::is_same_v<executorch::aten::SizesType, int32_t>
? std::numeric_limits<int32_t>::max()
: std::numeric_limits<uint32_t>::max();
std::numeric_limits<int32_t>::max();
} // namespace internal

/**
* Convert a shape to a NUL-terminated C string with limited size. If
* elements of the shape are larger than
* kMaximumPrintableTensorShapeElement, those elements will be
* rendered as ERR instead.
*
* NOTE: There are two overloads of this function to support both ATen
* tensors and ExecuTorch Tensors, which have different SizesType,
* while also avoiding a dependency on exec_aten.h from this header
* because that would cause a circular dependency.
*/
std::array<char, kTensorShapeStringSizeLimit> tensor_shape_to_c_string(
executorch::runtime::Span<const std::int32_t> shape);

/**
* Convert a shape to a NUL-terminated C string with limited size. If
* elements of the shape are larger than
* kMaximumPrintableTensorShapeElement, those elements will be
* rendered as ERR instead.
*
* NOTE: There are two overloads of this function to support both ATen
* tensors and ExecuTorch Tensors, which have different SizesType,
* while also avoiding a dependency on exec_aten.h from this header
* because that would cause a circular dependency.
*/
std::array<char, kTensorShapeStringSizeLimit> tensor_shape_to_c_string(
executorch::runtime::Span<const executorch::aten::SizesType> shape);
executorch::runtime::Span<const std::int64_t> shape);

} // namespace executorch::runtime
11 changes: 1 addition & 10 deletions runtime/core/exec_aten/util/tensor_util.h
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@
#include <executorch/runtime/core/exec_aten/exec_aten.h>
#include <executorch/runtime/core/exec_aten/util/dim_order_util.h>
#include <executorch/runtime/core/exec_aten/util/scalar_type_util.h>
#include <executorch/runtime/core/exec_aten/util/tensor_dimension_limit.h>
#include <executorch/runtime/core/span.h>
#include <executorch/runtime/platform/assert.h>
#include <executorch/runtime/platform/compiler.h>
Expand Down Expand Up @@ -893,16 +894,6 @@ inline bool tensor_is_scalar(executorch::aten::Tensor t) {
return t.dim() == 0 && t.numel() == 1;
}

/**
* The expected output size may not be the existing size of any inputs and
* outputs if the operator supports both broadcast and dynamic shape.
* Therefore such operators needs extra space to store the calculated expected
* output size. such dynamic allocation is troublesome in executorch so we can
* just hard code a static value of a relatively small value because users
* don't create high dimensional tensors.
*/
constexpr size_t kTensorDimensionLimit = 16;

/// Returns the product of dim[0:dim), not including dim.
inline size_t getLeadingDims(
const executorch::aten::Tensor& tensor,
Expand Down
6 changes: 4 additions & 2 deletions runtime/core/exec_aten/util/test/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -19,8 +19,10 @@ set(EXECUTORCH_ROOT ${CMAKE_CURRENT_SOURCE_DIR}/../../../../..)

include(${EXECUTORCH_ROOT}/build/Test.cmake)

set(_test_srcs scalar_type_util_test.cpp
operator_impl_example_test.cpp dim_order_util_test.cpp
set(_test_srcs
dim_order_util_test.cpp operator_impl_example_test.cpp
scalar_type_util_test.cpp tensor_shape_to_c_string_test.cpp
tensor_util_test.cpp
)

et_cxx_test(runtime_core_exec_aten_util_test SOURCES ${_test_srcs} EXTRA_LIBS)
8 changes: 8 additions & 0 deletions runtime/core/exec_aten/util/test/targets.bzl
Original file line number Diff line number Diff line change
Expand Up @@ -46,3 +46,11 @@ def define_common_targets():
"//executorch/runtime/core/exec_aten/util:tensor_util" + aten_suffix,
],
)

runtime.cxx_test(
name = "tensor_shape_to_c_string_test",
srcs = ["tensor_shape_to_c_string_test.cpp"],
deps = [
"//executorch/runtime/core/exec_aten/util:tensor_shape_to_c_string",
],
)
Original file line number Diff line number Diff line change
Expand Up @@ -7,8 +7,8 @@
*/
#include <gtest/gtest.h>

#include <executorch/kernels/portable/cpu/util/tensor_util.h>
#include <executorch/runtime/core/exec_aten/exec_aten.h>
#include <executorch/runtime/core/exec_aten/util/tensor_shape_to_c_string.h>
#include <executorch/runtime/platform/runtime.h>
#include <array>

Expand All @@ -17,7 +17,7 @@ using executorch::runtime::Span;
using executorch::runtime::tensor_shape_to_c_string;
using executorch::runtime::internal::kMaximumPrintableTensorShapeElement;

TEST(TensorUtilTest, TensorShapeToCStringBasic) {
TEST(TensorShapeToCStringTest, Basic) {
std::array<executorch::aten::SizesType, 3> sizes = {123, 456, 789};
auto str = tensor_shape_to_c_string(
Span<const executorch::aten::SizesType>(sizes.data(), sizes.size()));
Expand All @@ -29,7 +29,7 @@ TEST(TensorUtilTest, TensorShapeToCStringBasic) {
EXPECT_STREQ(str.data(), "(1234567890)");
}

TEST(TensorUtilTest, TensorShapeToCStringNegativeItems) {
TEST(TensorShapeToCStringTest, NegativeItems) {
std::array<executorch::aten::SizesType, 4> sizes = {-1, -3, -2, 4};
auto str = tensor_shape_to_c_string(
Span<const executorch::aten::SizesType>(sizes.data(), sizes.size()));
Expand All @@ -44,7 +44,7 @@ TEST(TensorUtilTest, TensorShapeToCStringNegativeItems) {
EXPECT_EQ(str.data(), "(" + std::to_string(one_size[0]) + ")");
}
}
TEST(TensorUtilTest, TensorShapeToCStringMaximumElement) {
TEST(TensorShapeToCStringTest, MaximumElement) {
std::array<executorch::aten::SizesType, 3> sizes = {
123, std::numeric_limits<executorch::aten::SizesType>::max(), 789};
auto str = tensor_shape_to_c_string(
Expand All @@ -60,7 +60,7 @@ TEST(TensorUtilTest, TensorShapeToCStringMaximumElement) {
EXPECT_EQ(str.data(), expected_str);
}

TEST(TensorUtilTest, TensorShapeToCStringMaximumLength) {
TEST(TensorShapeToCStringTest, MaximumLength) {
std::array<executorch::aten::SizesType, kTensorDimensionLimit> sizes;
std::fill(sizes.begin(), sizes.end(), kMaximumPrintableTensorShapeElement);

Expand All @@ -78,7 +78,7 @@ TEST(TensorUtilTest, TensorShapeToCStringMaximumLength) {
EXPECT_EQ(expected_str, str.data());
}

TEST(TensorUtilTest, TensorShapeToCStringExceedsDimensionLimit) {
TEST(TensorShapeToCStringTest, ExceedsDimensionLimit) {
std::array<executorch::aten::SizesType, kTensorDimensionLimit + 1> sizes;
std::fill(sizes.begin(), sizes.end(), kMaximumPrintableTensorShapeElement);

Expand Down
Loading
Loading