diff --git a/kernels/portable/cpu/op_convolution_backward.cpp b/kernels/portable/cpu/op_convolution_backward.cpp index cd635cda8f9..897efc9292c 100644 --- a/kernels/portable/cpu/op_convolution_backward.cpp +++ b/kernels/portable/cpu/op_convolution_backward.cpp @@ -41,7 +41,9 @@ bool check_convolution_backward_args( ET_CHECK_OR_RETURN_FALSE( transposed == false, "Transposed Convolution Backward not supported yet"); ET_CHECK_OR_RETURN_FALSE( - weight.dim() == 4, "Only 2D Convolution Backward supported for now"); + weight.dim() == 4, + "Only 2D Convolution Backward supported for now; weight.dim() = %zd", + weight.dim()); ET_LOG_AND_RETURN_IF_FALSE(tensors_have_same_dtype(weight, input)); ET_LOG_AND_RETURN_IF_FALSE(tensors_have_same_dtype(grad_output, input)); @@ -91,7 +93,9 @@ bool check_convolution_backward_args( ET_CHECK_OR_RETURN_FALSE( grad_output.dim() == input.dim(), - "grad_output should have same number of dimensions as input"); + "grad_output should have same number of dimensions as input; grad_output.dim() = %zd, input.dim() = %zd", + grad_output.dim(), + input.dim()); ET_LOG_AND_RETURN_IF_FALSE( tensor_has_expected_size(grad_output, {output_sizes, output_ndim})); diff --git a/kernels/portable/cpu/op_linear_scratch_example.cpp b/kernels/portable/cpu/op_linear_scratch_example.cpp index 096fea8bc4c..ecadb837379 100644 --- a/kernels/portable/cpu/op_linear_scratch_example.cpp +++ b/kernels/portable/cpu/op_linear_scratch_example.cpp @@ -41,13 +41,22 @@ bool check_linear_scratch_example_args( Tensor& out, Tensor& scratch) { ET_CHECK_OR_RETURN_FALSE( - input.size(1) == weight.size(1), "Unexpected weight size 1"); + input.size(1) == weight.size(1), + "Unexpected weight size 1; input.size(1) = %zd, weight.size(1) = %zd", + input.size(1), + weight.size(1)); ET_CHECK_OR_RETURN_FALSE( - scratch.size(0) == input.size(0), "Unexpected scratch size 0"); + scratch.size(0) == input.size(0), + "Unexpected scratch size 0; scratch.size(0) = %zd, input.size(0) = %zd", + scratch.size(0), + input.size(0)); ET_CHECK_OR_RETURN_FALSE( - scratch.size(1) == weight.size(0), "Unexpected scratch size 1"); + scratch.size(1) == weight.size(0), + "Unexpected scratch size 1; scratch.size(1) = %zd, weight.size(0) = %zd", + scratch.size(1), + weight.size(0)); return true; } diff --git a/kernels/portable/cpu/op_max_pool2d_with_indices_backward.cpp b/kernels/portable/cpu/op_max_pool2d_with_indices_backward.cpp index 5edce5a2c67..135bfa3ccee 100644 --- a/kernels/portable/cpu/op_max_pool2d_with_indices_backward.cpp +++ b/kernels/portable/cpu/op_max_pool2d_with_indices_backward.cpp @@ -62,7 +62,9 @@ bool check_max_pool2d_backward_args( ET_CHECK_OR_RETURN_FALSE( grad_output.dim() == input.dim(), - "grad_output should have same number of dimensions as input"); + "grad_output should have same number of dimensions as input; grad_output.dim() = %zd, input.dim() = %zd", + grad_output.dim(), + input.dim()); ET_LOG_AND_RETURN_IF_FALSE( tensor_has_expected_size(grad_output, {output_sizes, output_ndim})); diff --git a/kernels/portable/cpu/op_repeat_interleave.cpp b/kernels/portable/cpu/op_repeat_interleave.cpp index 4ee77695f86..39b2531691f 100644 --- a/kernels/portable/cpu/op_repeat_interleave.cpp +++ b/kernels/portable/cpu/op_repeat_interleave.cpp @@ -22,24 +22,37 @@ bool check_repeat_interleave_args( ET_CHECK_OR_RETURN_FALSE( repeats.scalar_type() == ScalarType::Int || repeats.scalar_type() == ScalarType::Long, - "repeats must be int or long"); - ET_CHECK_OR_RETURN_FALSE(repeats.dim() == 1, "repeats must be 1D"); + "repeats must be int or long; repeats.scalar_type() = %d", + static_cast(repeats.scalar_type())); + ET_CHECK_OR_RETURN_FALSE( + repeats.dim() == 1, + "repeats must be 1-D; repeats.dim() = %zd", + repeats.dim()); ET_CHECK_OR_RETURN_FALSE( output_size_value == repeats_sum, - "output_size, if provided, must be equal to repeats.sum()"); + "output_size, if provided, must be equal to repeats.sum(); output_size_value = %" PRId64 + ", repeats_sum = %" PRId64, + output_size_value, + repeats_sum); ET_LOG_AND_RETURN_IF_FALSE(tensors_have_same_dtype(repeats, out)); if (repeats.scalar_type() == ScalarType::Long) { const int64_t* const repeats_data = repeats.const_data_ptr(); for (const auto i : c10::irange(repeats.numel())) { ET_CHECK_OR_RETURN_FALSE( - repeats_data[i] >= 0, "repeats cannot be negative"); + repeats_data[i] >= 0, + "repeats cannot be negative; repeats_data[%" PRId64 "] = %" PRId64, + static_cast(i), + repeats_data[i]); } } else { const int32_t* const repeats_data = repeats.const_data_ptr(); for (const auto i : c10::irange(repeats.numel())) { ET_CHECK_OR_RETURN_FALSE( - repeats_data[i] >= 0, "repeats cannot be negative"); + repeats_data[i] >= 0, + "repeats cannot be negative; repeats_data[%" PRId64 "] = %d", + static_cast(i), + repeats_data[i]); } } diff --git a/kernels/portable/cpu/op_topk.cpp b/kernels/portable/cpu/op_topk.cpp index c56545b9235..6c721b37952 100644 --- a/kernels/portable/cpu/op_topk.cpp +++ b/kernels/portable/cpu/op_topk.cpp @@ -30,7 +30,13 @@ bool check_topk_args( dim += nonzero_dim(in); } ET_CHECK_OR_RETURN_FALSE( - k >= 0 && k <= nonempty_size(in, dim), "selected index k out of range"); + k >= 0 && k <= nonempty_size(in, dim), + "selected index k out of range; k = %" PRId64 ", dim = %" PRId64 + ", in.dim() = %zd, nonempty_size(in, dim) = %zd", + k, + dim, + in.dim(), + nonempty_size(in, dim)); return true; } diff --git a/kernels/portable/cpu/util/activation_ops_util.cpp b/kernels/portable/cpu/util/activation_ops_util.cpp index abde15f8740..4d2938dbf07 100644 --- a/kernels/portable/cpu/util/activation_ops_util.cpp +++ b/kernels/portable/cpu/util/activation_ops_util.cpp @@ -43,7 +43,10 @@ bool check_glu_args(const Tensor& in, int64_t dim, Tensor& out) { ET_LOG_AND_RETURN_IF_FALSE(tensors_have_same_rank(in, out)); ET_CHECK_OR_RETURN_FALSE( out.size(non_negative_dim) == dim_size / 2, - "output tensor must have half the size of the input tensor along the specified dimension."); + "output tensor must have half the size of the input tensor along the specified dimension; out.size(%zu) = %zd, dim_size = %zd", + non_negative_dim, + out.size(non_negative_dim), + dim_size); for (const auto i : c10::irange(in.dim())) { if (static_cast(i) != non_negative_dim) { diff --git a/kernels/portable/cpu/util/advanced_index_util.cpp b/kernels/portable/cpu/util/advanced_index_util.cpp index 304ba3a3f96..36ecd2e504f 100644 --- a/kernels/portable/cpu/util/advanced_index_util.cpp +++ b/kernels/portable/cpu/util/advanced_index_util.cpp @@ -28,7 +28,8 @@ bool check_indices_dtypes(TensorOptList indices) { ET_CHECK_OR_RETURN_FALSE( ix_type == ScalarType::Long || ix_type == ScalarType::Int || ix_type == ScalarType::Byte || ix_type == ScalarType::Bool, - "Index tensors should be Long, Int, Byte or Bool"); + "Index tensors should be Long, Int, Byte or Bool; got %d", + static_cast(ix_type)); } } return true; @@ -295,11 +296,18 @@ bool get_index_out_target_size( ET_CHECK_OR_RETURN_FALSE( static_cast(num_null_indices + num_indexed_dims) <= in.dim(), - "Indexing too many dimensions"); + "Indexing too many dimensions; num_null_indices = %zu, num_indexed_dims = %zu, in.dim() = %zd", + num_null_indices, + num_indexed_dims, + in.dim()); ET_CHECK_OR_RETURN_FALSE( in.dim() + broadcast_ndim - num_indexed_dims <= kTensorDimensionLimit, - "Out tensor would exceed number of allowed dimensions"); + "Out tensor would exceed number of allowed dimensions; in.dim() = %zd, broadcast_ndim = %zu, num_indexed_dims = %zu, kTensorDimensionLimit = %zu", + in.dim(), + broadcast_ndim, + num_indexed_dims, + kTensorDimensionLimit); (*out_ndim) = in.dim() + broadcast_ndim - num_indexed_dims; diff --git a/kernels/portable/cpu/util/copy_ops_util.cpp b/kernels/portable/cpu/util/copy_ops_util.cpp index 229fba2dad0..93725d92dab 100644 --- a/kernels/portable/cpu/util/copy_ops_util.cpp +++ b/kernels/portable/cpu/util/copy_ops_util.cpp @@ -46,7 +46,10 @@ bool check_as_strided_copy_args( Tensor& out) { ET_LOG_AND_RETURN_IF_FALSE(tensors_have_same_dtype(in, out)); ET_CHECK_OR_RETURN_FALSE( - size.size() == stride.size(), "mismatch in length of strides and shape"); + size.size() == stride.size(), + "mismatch in length of strides and shape; size.size() = %zu, stride.size() = %zu", + size.size(), + stride.size()); for (const auto& val : stride) { ET_CHECK_OR_RETURN_FALSE( val >= 0, @@ -242,7 +245,9 @@ bool check_permute_copy_args(const Tensor& in, IntArrayRef dims, Tensor& out) { // Check that the dimension hasn't been seen previously. ET_CHECK_OR_RETURN_FALSE( - dim_exist[dim] == false, "duplicate dims are not allowed."); + dim_exist[dim] == false, + "duplicate dims are not allowed; dim = %zu", + dim); dim_exist[dim] = true; } @@ -424,19 +429,27 @@ bool check_split_with_sizes_copy_args( ET_CHECK_OR_RETURN_FALSE( split_sizes.size() == out.size(), - "Number of split sizes must match the number of output tensors"); + "Number of split sizes must match the number of output tensors; split_sizes.size() = %zu, out.size() = %zu", + split_sizes.size(), + out.size()); int64_t sum = 0; for (const auto i : c10::irange(split_sizes.size())) { ET_CHECK_OR_RETURN_FALSE( - split_sizes[i] >= 0, "All split sizes must be non negative."); + split_sizes[i] >= 0, + "All split sizes must be non negative; split_sizes[%zu] = %" PRId64, + i, + split_sizes[i]); sum += split_sizes[i]; } const ssize_t dim_size = in.size(dim); ET_CHECK_OR_RETURN_FALSE( sum == dim_size, - "Sum of split sizes does not match input size at given dim"); + "Sum of split sizes does not match input size at given dim; sum = %" PRId64 + ", dim_size = %zd", + sum, + dim_size); return true; } diff --git a/kernels/portable/cpu/util/distance_util.cpp b/kernels/portable/cpu/util/distance_util.cpp index 21a111d2c47..e7f146e2e9d 100644 --- a/kernels/portable/cpu/util/distance_util.cpp +++ b/kernels/portable/cpu/util/distance_util.cpp @@ -14,7 +14,8 @@ namespace executor { bool check_pdist_args(const Tensor& in, double p, const Tensor& out) { ET_LOG_AND_RETURN_IF_FALSE(tensors_have_same_dtype(in, out)); ET_LOG_AND_RETURN_IF_FALSE(tensor_is_rank(in, 2)); - ET_CHECK_OR_RETURN_FALSE(p >= 0, "pdist only supports non-negative p values"); + ET_CHECK_OR_RETURN_FALSE( + p >= 0, "pdist only supports non-negative p values; p = %.6f", p); return true; } @@ -39,7 +40,8 @@ bool check_cdist_args( ET_LOG_AND_RETURN_IF_FALSE(tensor_has_rank_greater_or_equal_to(x2, 2)); ET_LOG_AND_RETURN_IF_FALSE( tensors_have_same_size_at_dims(x1, x1.dim() - 1, x2, x2.dim() - 1)); - ET_CHECK_OR_RETURN_FALSE(p >= 0, "cdist only supports non-negative p values"); + ET_CHECK_OR_RETURN_FALSE( + p >= 0, "cdist only supports non-negative p values; p = %.6f", p); if (compute_mode.has_value()) { int64_t mode = compute_mode.value(); ET_CHECK_OR_RETURN_FALSE( diff --git a/kernels/portable/cpu/util/index_util.cpp b/kernels/portable/cpu/util/index_util.cpp index bcf15c4bb4c..bfdd708912e 100644 --- a/kernels/portable/cpu/util/index_util.cpp +++ b/kernels/portable/cpu/util/index_util.cpp @@ -23,12 +23,15 @@ bool check_gather_args( ET_LOG_AND_RETURN_IF_FALSE(tensor_has_dim(in, dim)); ET_CHECK_OR_RETURN_FALSE( index.scalar_type() == ScalarType::Long, - "Expected dypte int64 for index"); + "Expected dypte int64 for index; index.scalar_type() = %d", + static_cast(index.scalar_type())); if (index.numel() != 0) { ET_CHECK_OR_RETURN_FALSE( nonzero_dim(in) == nonzero_dim(index), "self and index should have the same dimensionality when index is not empty " - "except for the case when one has dimension 0 and the other has dimension 1"); + "except for the case when one has dimension 0 and the other has dimension 1; nonzero_dim(in) = %zd, nonzero_dim(index) = %zd", + nonzero_dim(in), + nonzero_dim(index)); } // Normalize dim to non-negative value @@ -67,7 +70,8 @@ bool check_index_select_args( dim = dim < 0 ? dim + nonzero_dim(in) : dim; ET_CHECK_OR_RETURN_FALSE( nonempty_size(in, dim) > 0, - "index_select: Indexing axis dim should be positive"); + "index_select: Indexing axis dim should be positive; nonempty_size(in, dim) = %zd", + nonempty_size(in, dim)); ET_LOG_AND_RETURN_IF_FALSE(tensors_have_same_dtype(in, out)); ET_CHECK_OR_RETURN_FALSE( @@ -80,7 +84,8 @@ bool check_index_select_args( if (index.dim() > 0 && in.dim() == 0) { ET_CHECK_OR_RETURN_FALSE( index.numel() == 1, - "index_select: Index to scalar must have exactly 1 value"); + "index_select: Index to scalar must have exactly 1 value; index.numel() = %zd", + index.numel()); } if (index.scalar_type() == ScalarType::Long) { @@ -150,7 +155,8 @@ bool check_scatter_add_args( ET_LOG_AND_RETURN_IF_FALSE(tensors_have_same_dtype(self, src)); ET_CHECK_OR_RETURN_FALSE( index.scalar_type() == ScalarType::Long, - "Expected dypte int64 for index"); + "Expected dypte int64 for index; index.scalar_type() = %d", + static_cast(index.scalar_type())); ET_LOG_AND_RETURN_IF_FALSE(tensor_has_dim(self, dim)); if (index.numel() == 0) { @@ -160,7 +166,10 @@ bool check_scatter_add_args( ET_CHECK_OR_RETURN_FALSE( nonzero_dim(self) == nonzero_dim(src) && nonzero_dim(self) == nonzero_dim(index), - "self, index and src should have same number of dimensions."); + "self, index and src should have same number of dimensions; nonzero_dim(self) = %zd, nonzero_dim(src) = %zd, nonzero_dim(index) = %zd", + nonzero_dim(self), + nonzero_dim(src), + nonzero_dim(index)); // Normalize dim to non-negative value if (dim < 0) { diff --git a/kernels/portable/cpu/util/kernel_ops_util.cpp b/kernels/portable/cpu/util/kernel_ops_util.cpp index 00b088a5cec..d9a12f1e7da 100644 --- a/kernels/portable/cpu/util/kernel_ops_util.cpp +++ b/kernels/portable/cpu/util/kernel_ops_util.cpp @@ -254,7 +254,10 @@ bool check_arange_args(double start, double end, double step, Tensor& out) { ET_CHECK_OR_RETURN_FALSE( (step > 0 && (end >= start)) || (step < 0 && (end <= start)), - "upper bound and larger bound inconsistent with step sign"); + "upper bound and larger bound inconsistent with step sign; step = %.6f, start = %.6f, end = %.6f", + step, + start, + end); return true; } @@ -276,7 +279,8 @@ bool check_avg_pool2d_args( ET_CHECK_OR_RETURN_FALSE( (in.dim() == 3 && in.size(0) > 0 && in.size(1) > 0 && in.size(2) > 0) || (in.dim() == 4 && in.size(1) > 0 && in.size(2) > 0 && in.size(3) > 0), - "Expected 3D or 4D (batch mode) tensor with optional 0 dim batch size for input"); + "Expected 3D or 4D (batch mode) tensor with optional 0 dim batch size for input; in.dim() = %zd", + in.dim()); ET_LOG_AND_RETURN_IF_FALSE( kernel_size_is_valid(kernel_size, /*kernel_ndim=*/2)); @@ -347,8 +351,9 @@ bool check_convolution_args( ET_CHECK_OR_RETURN_FALSE( bias.value().size(0) == transposed ? groups * weight.size(1) : weight.size(0), - "bias length must equal number of output channels, but got %zd", - bias.value().size(0)); + "bias length must equal number of output channels, but got %zd; expected %" PRId64, + bias.value().size(0), + transposed ? groups * weight.size(1) : weight.size(0)); } int64_t kernel_size[2]; @@ -398,7 +403,9 @@ bool check_convolution_args( } else { ET_CHECK_OR_RETURN_FALSE( in.size(1) == weight.size(0), - "input channels must match weight.size(0) in transposed convolution"); + "input channels must match weight.size(0) in transposed convolution; in.size(1) = %zd, weight.size(0) = %zd", + in.size(1), + weight.size(0)); } return true; @@ -484,7 +491,8 @@ bool check_max_pool2d_with_indices_args( ET_CHECK_OR_RETURN_FALSE( (in.dim() == 3 && in.size(0) > 0 && in.size(1) > 0 && in.size(2) > 0) || (in.dim() == 4 && in.size(1) > 0 && in.size(2) > 0 && in.size(3) > 0), - "Expected 3D or 4D (batch mode) tensor with optional 0 dim batch size for input"); + "Expected 3D or 4D (batch mode) tensor with optional 0 dim batch size for input; in.dim() = %zd", + in.dim()); ET_LOG_AND_RETURN_IF_FALSE( kernel_size_is_valid(kernel_size, /*kernel_ndim=*/2)); @@ -545,11 +553,15 @@ bool check_constant_pad_args( ET_LOG_AND_RETURN_IF_FALSE(tensors_have_same_rank(in, out)); ET_CHECK_OR_RETURN_FALSE( - pad.size() % 2 == 0, "Padding array must be a multiple of 2"); + pad.size() % 2 == 0, + "Padding array must be a multiple of 2; pad.size() = %zu", + pad.size()); ET_CHECK_OR_RETURN_FALSE( static_cast(pad.size() / 2) <= in.dim(), - "Padding array contains too many elements"); + "Padding array contains too many elements; pad.size()/2 = %zu, in.dim() = %zd", + pad.size() / 2, + in.dim()); return true; } diff --git a/kernels/portable/cpu/util/normalization_ops_util.cpp b/kernels/portable/cpu/util/normalization_ops_util.cpp index db18cf0c053..ce37d29ef34 100644 --- a/kernels/portable/cpu/util/normalization_ops_util.cpp +++ b/kernels/portable/cpu/util/normalization_ops_util.cpp @@ -84,15 +84,22 @@ bool check_layer_norm_args( size_t ndim = normalized_shape.size(); ET_CHECK_OR_RETURN_FALSE( ndim >= 1, - "Expected normalized_shape to be at least 1-dimensional, i.e., containing at least one element."); + "Expected normalized_shape to be at least 1-dimensional, i.e., containing at least one element; ndim = %zu", + ndim); ET_CHECK_OR_RETURN_FALSE( in.dim() >= static_cast(ndim), - "Expected input tensor to have rank >= the length of normalized_shape."); + "Expected input tensor to have rank >= the length of normalized_shape; in.dim() = %zd, ndim = %zu", + in.dim(), + ndim); size_t shift = in.dim() - ndim; for (const auto d : c10::irange(ndim)) { ET_CHECK_OR_RETURN_FALSE( in.size(d + shift) == normalized_shape[d], - "Expected normalized_shape to match the sizes of input's rightmost dimensions."); + "Expected normalized_shape to match the sizes of input's rightmost dimensions; in.size(%zu) = %zd, normalized_shape[%zu] = %" PRId64, + d + shift, + in.size(d + shift), + d, + normalized_shape[d]); } executorch::aten::SizesType shape[ndim]; for (const auto i : c10::irange(ndim)) { @@ -146,18 +153,32 @@ bool check_group_norm_args( ET_LOG_AND_RETURN_IF_FALSE(in.size(1) == C); ET_LOG_AND_RETURN_IF_FALSE(in.numel() == N * C * HxW); ET_CHECK_OR_RETURN_FALSE( - group > 0, "Expected number of groups to be greater than 0"); + group > 0, + "Expected number of groups to be greater than 0; group = %" PRId64, + group); ET_CHECK_OR_RETURN_FALSE( C % group == 0, - "Expected number of channels in input to be divisible by number of groups"); + "Expected number of channels in input to be divisible by number of groups; C = %" PRId64 + ", group = %" PRId64 ", C %% group = %" PRId64, + C, + group, + C % group); ET_CHECK_OR_RETURN_FALSE( !weight.has_value() || (weight.value().dim() == 1 && weight.value().size(0) == C), - "Expected weight to be a vector of size equal to the number of channels in input"); + "Expected weight to be a vector of size equal to the number of channels in input; weight.has_value() = %d, weight.dim() = %zd, weight.size(0) = %zd, C = %" PRId64, + weight.has_value(), + weight.has_value() ? weight.value().dim() : -1, + weight.has_value() ? weight.value().size(0) : -1, + C); ET_CHECK_OR_RETURN_FALSE( !bias.has_value() || (bias.value().dim() == 1 && bias.value().size(0) == C), - "Expected bias to be a vector of size equal to the number of channels in input"); + "Expected bias to be a vector of size equal to the number of channels in input; bias.has_value() = %d, bias.dim() = %zd, bias.size(0) = %zd, C = %" PRId64, + bias.has_value(), + bias.has_value() ? bias.value().dim() : -1, + bias.has_value() ? bias.value().size(0) : -1, + C); if (weight.has_value()) { ET_LOG_AND_RETURN_IF_FALSE(tensors_have_same_dtype(in, weight.value())); diff --git a/kernels/portable/cpu/util/repeat_util.cpp b/kernels/portable/cpu/util/repeat_util.cpp index 63ca8d019cf..3577c1457a2 100644 --- a/kernels/portable/cpu/util/repeat_util.cpp +++ b/kernels/portable/cpu/util/repeat_util.cpp @@ -28,7 +28,9 @@ bool check_repeat_args( // Ensure the self tensors list is non-empty. ET_CHECK_OR_RETURN_FALSE( static_cast(repeats.size()) >= self.dim(), - "Number of dimensions of repeat dims can not be smaller than number of dimensions of tensor"); + "Number of dimensions of repeat dims can not be smaller than number of dimensions of tensor; repeats.size() = %zu, self.dim() = %zd", + repeats.size(), + self.dim()); // Repeat arrayref shall not contain negative element. bool all_non_negative = true; @@ -41,7 +43,7 @@ bool check_repeat_args( /// Check if out.size() is legal. ET_CHECK_OR_RETURN_FALSE( static_cast(out.dim()) == repeats.size(), - "The dimension of out shall equal size of repeats, but now is %zd and %zd", + "The dimension of out shall equal size of repeats, but now is %zd and %zu", out.dim(), repeats.size()); @@ -50,7 +52,7 @@ bool check_repeat_args( // dimension of out tensor shall have more than or equal to self tensor ET_CHECK_OR_RETURN_FALSE( static_cast(out.dim()) <= kTensorDimensionLimit, - "The dimension of input and output should not be larger than %zd", + "The dimension of input and output should not be larger than %zu", kTensorDimensionLimit); ET_LOG_AND_RETURN_IF_FALSE(tensors_have_same_dtype(out, self)); diff --git a/kernels/portable/cpu/util/slice_util.cpp b/kernels/portable/cpu/util/slice_util.cpp index 5761dee0ba7..a5bdb1a1e7b 100644 --- a/kernels/portable/cpu/util/slice_util.cpp +++ b/kernels/portable/cpu/util/slice_util.cpp @@ -25,7 +25,8 @@ bool check_narrow_copy_args( ET_LOG_AND_RETURN_IF_FALSE(in.dim() > 0); ET_LOG_AND_RETURN_IF_FALSE(tensors_have_same_dtype(in, out)); ET_LOG_AND_RETURN_IF_FALSE(tensor_has_dim(in, dim)); - ET_CHECK_OR_RETURN_FALSE(lenth >= 0, "lenth must be non-negative"); + ET_CHECK_OR_RETURN_FALSE( + lenth >= 0, "lenth must be non-negative; lenth = %" PRId64, lenth); ET_LOG_AND_RETURN_IF_FALSE(start >= -in.size(dim)); ET_LOG_AND_RETURN_IF_FALSE(start <= in.size(dim)); if (start < 0) { @@ -57,7 +58,8 @@ bool check_slice_copy_args( ET_LOG_AND_RETURN_IF_FALSE(in.dim() > 0); ET_LOG_AND_RETURN_IF_FALSE(tensors_have_same_dtype(in, out)); ET_LOG_AND_RETURN_IF_FALSE(tensor_has_dim(in, dim)); - ET_CHECK_OR_RETURN_FALSE(step > 0, "slice step must be greater than zero"); + ET_CHECK_OR_RETURN_FALSE( + step > 0, "slice step must be greater than zero; step = %" PRId64, step); return true; } @@ -89,7 +91,8 @@ bool check_slice_scatter_args( ET_LOG_AND_RETURN_IF_FALSE(tensors_have_same_rank(input, src)); // Check step. Step must be greater than zero - ET_CHECK_OR_RETURN_FALSE(step > 0, "slice step must be greater than zero"); + ET_CHECK_OR_RETURN_FALSE( + step > 0, "slice step must be greater than zero; step = %" PRId64, step); // The size of src tensor should follow these rules: // - src.size(i) shall equal to input.size(i) if i != dim,