Skip to content

Commit 11cf2c8

Browse files
authored
Add ET_PRI_* macros for Tensor methods that differ with ATen (#9768)
#9751 added a bunch of logging to error messages. We discovered after commit that it caused -Wformat warnings (turned into errors via -Werror) in ATen mode, because a bunch of Tensor methods return different types between ExecuTorch and ATen. This PR adds formatting macros to be used with this methods and uses them.
1 parent cc231cf commit 11cf2c8

14 files changed

+71
-26
lines changed

kernels/portable/cpu/op_convolution_backward.cpp

+3-2
Original file line numberDiff line numberDiff line change
@@ -42,7 +42,7 @@ bool check_convolution_backward_args(
4242
transposed == false, "Transposed Convolution Backward not supported yet");
4343
ET_CHECK_OR_RETURN_FALSE(
4444
weight.dim() == 4,
45-
"Only 2D Convolution Backward supported for now; weight.dim() = %zd",
45+
"Only 2D Convolution Backward supported for now; weight.dim() = %" ET_PRI_TENSOR_DIM,
4646
weight.dim());
4747

4848
ET_LOG_AND_RETURN_IF_FALSE(tensors_have_same_dtype(weight, input));
@@ -93,7 +93,8 @@ bool check_convolution_backward_args(
9393

9494
ET_CHECK_OR_RETURN_FALSE(
9595
grad_output.dim() == input.dim(),
96-
"grad_output should have same number of dimensions as input; grad_output.dim() = %zd, input.dim() = %zd",
96+
"grad_output should have same number of dimensions as input; grad_output.dim() = %" ET_PRI_TENSOR_DIM
97+
", input.dim() = %" ET_PRI_TENSOR_DIM,
9798
grad_output.dim(),
9899
input.dim());
99100

kernels/portable/cpu/op_linear_scratch_example.cpp

+6-3
Original file line numberDiff line numberDiff line change
@@ -42,19 +42,22 @@ bool check_linear_scratch_example_args(
4242
Tensor& scratch) {
4343
ET_CHECK_OR_RETURN_FALSE(
4444
input.size(1) == weight.size(1),
45-
"Unexpected weight size 1; input.size(1) = %zd, weight.size(1) = %zd",
45+
"Unexpected weight size 1; input.size(1) = %" ET_PRI_TENSOR_SIZE
46+
", weight.size(1) = %" ET_PRI_TENSOR_SIZE,
4647
input.size(1),
4748
weight.size(1));
4849

4950
ET_CHECK_OR_RETURN_FALSE(
5051
scratch.size(0) == input.size(0),
51-
"Unexpected scratch size 0; scratch.size(0) = %zd, input.size(0) = %zd",
52+
"Unexpected scratch size 0; scratch.size(0) = %" ET_PRI_TENSOR_SIZE
53+
", input.size(0) = %" ET_PRI_TENSOR_SIZE,
5254
scratch.size(0),
5355
input.size(0));
5456

5557
ET_CHECK_OR_RETURN_FALSE(
5658
scratch.size(1) == weight.size(0),
57-
"Unexpected scratch size 1; scratch.size(1) = %zd, weight.size(0) = %zd",
59+
"Unexpected scratch size 1; scratch.size(1) = %" ET_PRI_TENSOR_SIZE
60+
", weight.size(0) = %" ET_PRI_TENSOR_SIZE,
5861
scratch.size(1),
5962
weight.size(0));
6063

kernels/portable/cpu/op_max_pool2d_with_indices_backward.cpp

+2-1
Original file line numberDiff line numberDiff line change
@@ -62,7 +62,8 @@ bool check_max_pool2d_backward_args(
6262

6363
ET_CHECK_OR_RETURN_FALSE(
6464
grad_output.dim() == input.dim(),
65-
"grad_output should have same number of dimensions as input; grad_output.dim() = %zd, input.dim() = %zd",
65+
"grad_output should have same number of dimensions as input; grad_output.dim() = %" ET_PRI_TENSOR_DIM
66+
", input.dim() = %" ET_PRI_TENSOR_DIM,
6667
grad_output.dim(),
6768
input.dim());
6869

kernels/portable/cpu/op_repeat_interleave.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,7 @@ bool check_repeat_interleave_args(
2626
static_cast<int>(repeats.scalar_type()));
2727
ET_CHECK_OR_RETURN_FALSE(
2828
repeats.dim() == 1,
29-
"repeats must be 1-D; repeats.dim() = %zd",
29+
"repeats must be 1-D; repeats.dim() = %" ET_PRI_TENSOR_DIM,
3030
repeats.dim());
3131
ET_CHECK_OR_RETURN_FALSE(
3232
output_size_value == repeats_sum,

kernels/portable/cpu/op_topk.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -32,7 +32,7 @@ bool check_topk_args(
3232
ET_CHECK_OR_RETURN_FALSE(
3333
k >= 0 && k <= nonempty_size(in, dim),
3434
"selected index k out of range; k = %" PRId64 ", dim = %" PRId64
35-
", in.dim() = %zd, nonempty_size(in, dim) = %zd",
35+
", in.dim() = %" ET_PRI_TENSOR_DIM ", nonempty_size(in, dim) = %zd",
3636
k,
3737
dim,
3838
in.dim(),

kernels/portable/cpu/util/activation_ops_util.cpp

+2-1
Original file line numberDiff line numberDiff line change
@@ -43,7 +43,8 @@ bool check_glu_args(const Tensor& in, int64_t dim, Tensor& out) {
4343
ET_LOG_AND_RETURN_IF_FALSE(tensors_have_same_rank(in, out));
4444
ET_CHECK_OR_RETURN_FALSE(
4545
out.size(non_negative_dim) == dim_size / 2,
46-
"output tensor must have half the size of the input tensor along the specified dimension; out.size(%zu) = %zd, dim_size = %zd",
46+
"output tensor must have half the size of the input tensor along the specified dimension; out.size(%zu) = %" ET_PRI_TENSOR_SIZE
47+
", dim_size = %zd",
4748
non_negative_dim,
4849
out.size(non_negative_dim),
4950
dim_size);

kernels/portable/cpu/util/advanced_index_util.cpp

+3-2
Original file line numberDiff line numberDiff line change
@@ -296,14 +296,15 @@ bool get_index_out_target_size(
296296

297297
ET_CHECK_OR_RETURN_FALSE(
298298
static_cast<ssize_t>(num_null_indices + num_indexed_dims) <= in.dim(),
299-
"Indexing too many dimensions; num_null_indices = %zu, num_indexed_dims = %zu, in.dim() = %zd",
299+
"Indexing too many dimensions; num_null_indices = %zu, num_indexed_dims = %zu, in.dim() = %" ET_PRI_TENSOR_DIM,
300300
num_null_indices,
301301
num_indexed_dims,
302302
in.dim());
303303

304304
ET_CHECK_OR_RETURN_FALSE(
305305
in.dim() + broadcast_ndim - num_indexed_dims <= kTensorDimensionLimit,
306-
"Out tensor would exceed number of allowed dimensions; in.dim() = %zd, broadcast_ndim = %zu, num_indexed_dims = %zu, kTensorDimensionLimit = %zu",
306+
"Out tensor would exceed number of allowed dimensions; in.dim() = %" ET_PRI_TENSOR_DIM
307+
", broadcast_ndim = %zu, num_indexed_dims = %zu, kTensorDimensionLimit = %zu",
307308
in.dim(),
308309
broadcast_ndim,
309310
num_indexed_dims,

kernels/portable/cpu/util/index_util.cpp

+2-2
Original file line numberDiff line numberDiff line change
@@ -84,7 +84,7 @@ bool check_index_select_args(
8484
if (index.dim() > 0 && in.dim() == 0) {
8585
ET_CHECK_OR_RETURN_FALSE(
8686
index.numel() == 1,
87-
"index_select: Index to scalar must have exactly 1 value; index.numel() = %zd",
87+
"index_select: Index to scalar must have exactly 1 value; index.numel() = %" ET_PRI_TENSOR_NUMEL,
8888
index.numel());
8989
}
9090

@@ -155,7 +155,7 @@ bool check_scatter_add_args(
155155
ET_LOG_AND_RETURN_IF_FALSE(tensors_have_same_dtype(self, src));
156156
ET_CHECK_OR_RETURN_FALSE(
157157
index.scalar_type() == ScalarType::Long,
158-
"Expected dypte int64 for index; index.scalar_type() = %d",
158+
"Expected dtype int64 for index; index.scalar_type() = %d",
159159
static_cast<int>(index.scalar_type()));
160160
ET_LOG_AND_RETURN_IF_FALSE(tensor_has_dim(self, dim));
161161

kernels/portable/cpu/util/kernel_ops_util.cpp

+5-4
Original file line numberDiff line numberDiff line change
@@ -279,7 +279,7 @@ bool check_avg_pool2d_args(
279279
ET_CHECK_OR_RETURN_FALSE(
280280
(in.dim() == 3 && in.size(0) > 0 && in.size(1) > 0 && in.size(2) > 0) ||
281281
(in.dim() == 4 && in.size(1) > 0 && in.size(2) > 0 && in.size(3) > 0),
282-
"Expected 3D or 4D (batch mode) tensor with optional 0 dim batch size for input; in.dim() = %zd",
282+
"Expected 3D or 4D (batch mode) tensor with optional 0 dim batch size for input; in.dim() = %" ET_PRI_TENSOR_DIM,
283283
in.dim());
284284

285285
ET_LOG_AND_RETURN_IF_FALSE(
@@ -351,7 +351,8 @@ bool check_convolution_args(
351351
ET_CHECK_OR_RETURN_FALSE(
352352
bias.value().size(0) == transposed ? groups * weight.size(1)
353353
: weight.size(0),
354-
"bias length must equal number of output channels, but got %zd; expected %" PRId64,
354+
"bias length must equal number of output channels, but got %" ET_PRI_TENSOR_SIZE
355+
"; expected %" PRId64,
355356
bias.value().size(0),
356357
transposed ? groups * weight.size(1) : weight.size(0));
357358
}
@@ -491,7 +492,7 @@ bool check_max_pool2d_with_indices_args(
491492
ET_CHECK_OR_RETURN_FALSE(
492493
(in.dim() == 3 && in.size(0) > 0 && in.size(1) > 0 && in.size(2) > 0) ||
493494
(in.dim() == 4 && in.size(1) > 0 && in.size(2) > 0 && in.size(3) > 0),
494-
"Expected 3D or 4D (batch mode) tensor with optional 0 dim batch size for input; in.dim() = %zd",
495+
"Expected 3D or 4D (batch mode) tensor with optional 0 dim batch size for input; in.dim() = %" ET_PRI_TENSOR_DIM,
495496
in.dim());
496497

497498
ET_LOG_AND_RETURN_IF_FALSE(
@@ -559,7 +560,7 @@ bool check_constant_pad_args(
559560

560561
ET_CHECK_OR_RETURN_FALSE(
561562
static_cast<ssize_t>(pad.size() / 2) <= in.dim(),
562-
"Padding array contains too many elements; pad.size()/2 = %zu, in.dim() = %zd",
563+
"Padding array contains too many elements; pad.size()/2 = %zu, in.dim() = %" ET_PRI_TENSOR_DIM,
563564
pad.size() / 2,
564565
in.dim());
565566

kernels/portable/cpu/util/normalization_ops_util.cpp

+8-4
Original file line numberDiff line numberDiff line change
@@ -88,14 +88,16 @@ bool check_layer_norm_args(
8888
ndim);
8989
ET_CHECK_OR_RETURN_FALSE(
9090
in.dim() >= static_cast<ssize_t>(ndim),
91-
"Expected input tensor to have rank >= the length of normalized_shape; in.dim() = %zd, ndim = %zu",
91+
"Expected input tensor to have rank >= the length of normalized_shape; in.dim() = %" ET_PRI_TENSOR_DIM
92+
", ndim = %zu",
9293
in.dim(),
9394
ndim);
9495
size_t shift = in.dim() - ndim;
9596
for (const auto d : c10::irange(ndim)) {
9697
ET_CHECK_OR_RETURN_FALSE(
9798
in.size(d + shift) == normalized_shape[d],
98-
"Expected normalized_shape to match the sizes of input's rightmost dimensions; in.size(%zu) = %zd, normalized_shape[%zu] = %" PRId64,
99+
"Expected normalized_shape to match the sizes of input's rightmost dimensions; in.size(%zu) = %" ET_PRI_TENSOR_SIZE
100+
", normalized_shape[%zu] = %" PRId64,
99101
d + shift,
100102
in.size(d + shift),
101103
d,
@@ -166,15 +168,17 @@ bool check_group_norm_args(
166168
ET_CHECK_OR_RETURN_FALSE(
167169
!weight.has_value() ||
168170
(weight.value().dim() == 1 && weight.value().size(0) == C),
169-
"Expected weight to be a vector of size equal to the number of channels in input; weight.has_value() = %d, weight.dim() = %zd, weight.size(0) = %zd, C = %" PRId64,
171+
"Expected weight to be a vector of size equal to the number of channels in input; weight.has_value() = %d, weight.dim() = %" ET_PRI_TENSOR_DIM
172+
", weight.size(0) = %" ET_PRI_TENSOR_SIZE ", C = %" PRId64,
170173
weight.has_value(),
171174
weight.has_value() ? weight.value().dim() : -1,
172175
weight.has_value() ? weight.value().size(0) : -1,
173176
C);
174177
ET_CHECK_OR_RETURN_FALSE(
175178
!bias.has_value() ||
176179
(bias.value().dim() == 1 && bias.value().size(0) == C),
177-
"Expected bias to be a vector of size equal to the number of channels in input; bias.has_value() = %d, bias.dim() = %zd, bias.size(0) = %zd, C = %" PRId64,
180+
"Expected bias to be a vector of size equal to the number of channels in input; bias.has_value() = %d, bias.dim() = %" ET_PRI_TENSOR_DIM
181+
", bias.size(0) = %" ET_PRI_TENSOR_SIZE ", C = %" PRId64,
178182
bias.has_value(),
179183
bias.has_value() ? bias.value().dim() : -1,
180184
bias.has_value() ? bias.value().size(0) : -1,

kernels/portable/cpu/util/repeat_util.cpp

+3-2
Original file line numberDiff line numberDiff line change
@@ -28,7 +28,7 @@ bool check_repeat_args(
2828
// Ensure the self tensors list is non-empty.
2929
ET_CHECK_OR_RETURN_FALSE(
3030
static_cast<ssize_t>(repeats.size()) >= self.dim(),
31-
"Number of dimensions of repeat dims can not be smaller than number of dimensions of tensor; repeats.size() = %zu, self.dim() = %zd",
31+
"Number of dimensions of repeat dims can not be smaller than number of dimensions of tensor; repeats.size() = %zu, self.dim() = %" ET_PRI_TENSOR_DIM,
3232
repeats.size(),
3333
self.dim());
3434

@@ -43,7 +43,8 @@ bool check_repeat_args(
4343
/// Check if out.size() is legal.
4444
ET_CHECK_OR_RETURN_FALSE(
4545
static_cast<size_t>(out.dim()) == repeats.size(),
46-
"The dimension of out shall equal size of repeats, but now is %zd and %zu",
46+
"The dimension of out shall equal size of repeats, but now is %" ET_PRI_TENSOR_DIM
47+
" and %zu",
4748
out.dim(),
4849
repeats.size());
4950

kernels/portable/cpu/util/slice_util.cpp

+3-3
Original file line numberDiff line numberDiff line change
@@ -20,19 +20,19 @@ bool check_narrow_copy_args(
2020
const Tensor& in,
2121
int64_t dim,
2222
int64_t start,
23-
int64_t lenth,
23+
int64_t length,
2424
Tensor& out) {
2525
ET_LOG_AND_RETURN_IF_FALSE(in.dim() > 0);
2626
ET_LOG_AND_RETURN_IF_FALSE(tensors_have_same_dtype(in, out));
2727
ET_LOG_AND_RETURN_IF_FALSE(tensor_has_dim(in, dim));
2828
ET_CHECK_OR_RETURN_FALSE(
29-
lenth >= 0, "lenth must be non-negative; lenth = %" PRId64, lenth);
29+
length >= 0, "length must be non-negative; length = %" PRId64, length);
3030
ET_LOG_AND_RETURN_IF_FALSE(start >= -in.size(dim));
3131
ET_LOG_AND_RETURN_IF_FALSE(start <= in.size(dim));
3232
if (start < 0) {
3333
start += in.size(dim);
3434
}
35-
ET_LOG_AND_RETURN_IF_FALSE(start + lenth <= in.size(dim));
35+
ET_LOG_AND_RETURN_IF_FALSE(start + length <= in.size(dim));
3636
return true;
3737
}
3838

runtime/core/exec_aten/exec_aten.h

+12
Original file line numberDiff line numberDiff line change
@@ -95,6 +95,18 @@ inline ssize_t compute_numel(const SizesType* sizes, ssize_t dim) {
9595
c10::multiply_integers(c10::ArrayRef<SizesType>(sizes, dim)));
9696
}
9797

98+
#undef ET_PRI_TENSOR_SIZE
99+
#define ET_PRI_TENSOR_SIZE PRId64
100+
101+
#undef ET_PRI_TENSOR_DIM
102+
#define ET_PRI_TENSOR_DIM PRId64
103+
104+
#undef ET_PRI_TENSOR_NUMEL
105+
#define ET_PRI_TENSOR_NUMEL PRId64
106+
107+
#undef ET_PRI_SIZES_AND_STRIDES
108+
#define ET_PRI_SIZES_AND_STRIDES PRId64
109+
98110
#else // Use executor types
99111

100112
using Tensor = torch::executor::Tensor;

runtime/core/portable_type/tensor_impl.h

+20
Original file line numberDiff line numberDiff line change
@@ -270,6 +270,26 @@ ssize_t compute_numel(
270270
const ::executorch::runtime::etensor::TensorImpl::SizesType* sizes,
271271
ssize_t dim);
272272

273+
/// Appropriate format specifier for the result of calling
274+
/// size(). Must be used instead of using zd directly to support ATen
275+
/// mode.
276+
#define ET_PRI_TENSOR_SIZE "zd"
277+
278+
/// Appropriate format specifier for the result of calling
279+
/// dim(). Must be used instead of using zd directly to support ATen
280+
/// mode.
281+
#define ET_PRI_TENSOR_DIM "zd"
282+
283+
/// Appropriate format specifier for the result of calling
284+
/// numel(). Must be used instead of using zd directly to support ATen
285+
/// mode.
286+
#define ET_PRI_TENSOR_NUMEL "zd"
287+
288+
// Appropriate format specifier for elements of sizes() and
289+
// strides(). Must be used instead of using d directly to support ATen
290+
// mode.
291+
#define ET_PRI_SIZES_AND_STRIDES "d"
292+
273293
} // namespace etensor
274294
} // namespace runtime
275295
} // namespace executorch

0 commit comments

Comments
 (0)