Skip to content

Tiny fixes to make gcc pedantic build happy #8933

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 6 commits into from
Mar 25, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion kernels/portable/cpu/op_constant_pad_nd.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -189,7 +189,7 @@ Tensor& constant_pad_nd_out(
CTYPE value_v;
ET_SWITCH_SCALAR_OBJ_TYPES(
value_type, ctx, "constant_pad_nd.out", CTYPE_VALUE, [&]() {
CTYPE_VALUE val;
CTYPE_VALUE val = 0;
utils::extract_scalar(value, &val);
value_v = static_cast<CTYPE>(val);
});
Expand Down
2 changes: 1 addition & 1 deletion kernels/portable/cpu/op_fill.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ Tensor& fill_scalar_out(
ET_SWITCH_REALHBBF16_TYPES(a_type, ctx, "fill.Scalar_out", CTYPE_A, [&] {
CTYPE_A b_casted;
ET_SWITCH_SCALAR_OBJ_TYPES(b_type, ctx, "fill.Scalar_out", CTYPE_B, [&] {
CTYPE_B b_val;
CTYPE_B b_val = 0;
utils::extract_scalar(b, &b_val);
b_casted = static_cast<CTYPE_A>(b_val);
});
Expand Down
4 changes: 2 additions & 2 deletions kernels/portable/cpu/op_hardtanh.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -49,14 +49,14 @@ Tensor& hardtanh_out(
ET_SWITCH_REALHBF16_TYPES(in_type, ctx, "hardtanh.out", CTYPE, [&]() {
CTYPE min_casted;
ET_SWITCH_SCALAR_OBJ_TYPES(min_type, ctx, "hardtanh.out", CTYPE_MIN, [&]() {
CTYPE_MIN min_val;
CTYPE_MIN min_val = 0;
utils::extract_scalar(min, &min_val);
min_casted = static_cast<CTYPE>(min_val);
});

CTYPE max_casted;
ET_SWITCH_SCALAR_OBJ_TYPES(max_type, ctx, "hardtanh.out", CTYPE_MAX, [&]() {
CTYPE_MAX max_val;
CTYPE_MAX max_val = 0;
utils::extract_scalar(max, &max_val);
max_casted = static_cast<CTYPE>(max_val);
});
Expand Down
4 changes: 2 additions & 2 deletions kernels/portable/cpu/op_leaky_relu.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -45,10 +45,10 @@ Tensor& leaky_relu_out(
ET_KERNEL_CHECK(ctx, in_type == out_type, InvalidArgument, out);

ET_SWITCH_FLOATHBF16_TYPES(in_type, ctx, "leaky_relu.out", CTYPE, [&]() {
CTYPE negative_slope_casted;
CTYPE negative_slope_casted = 0;
ET_SWITCH_SCALAR_OBJ_TYPES(
sc_type, ctx, "leaky_relu.out", CTYPE_MIN, [&]() {
CTYPE_MIN negative_slope_val;
CTYPE_MIN negative_slope_val = 0;
utils::extract_scalar(negative_slope, &negative_slope_val);
negative_slope_casted = static_cast<CTYPE>(negative_slope_val);
});
Expand Down
2 changes: 1 addition & 1 deletion kernels/portable/cpu/op_masked_fill.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ Tensor& masked_fill_scalar_out(
in_type, ctx, "masked_fill.Scalar_out", CTYPE, [&]() {
ET_SWITCH_REAL_TYPES_AND(
Bool, val_type, ctx, "masked_fill.Scalar_out", CTYPE_VAL, [&]() {
CTYPE_VAL value_v;
CTYPE_VAL value_v = 0;
utils::extract_scalar(value, &value_v);
CTYPE val = static_cast<CTYPE>(value_v);

Expand Down
2 changes: 1 addition & 1 deletion kernels/portable/cpu/op_scalar_tensor.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ scalar_tensor_out(KernelRuntimeContext& ctx, const Scalar& s, Tensor& out) {
ET_SWITCH_REAL_TYPES_AND3(
Half, Bool, BFloat16, out_type, ctx, name, CTYPE, [&]() {
ET_SWITCH_SCALAR_OBJ_TYPES(s_type, ctx, name, CTYPE_S, [&]() {
CTYPE_S val_s;
CTYPE_S val_s = 0;
utils::extract_scalar(s, &val_s);
out.mutable_data_ptr<CTYPE>()[0] = convert<CTYPE, CTYPE_S>(val_s);
});
Expand Down
2 changes: 1 addition & 1 deletion kernels/portable/cpu/util/repeat_util.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,7 @@ bool check_repeat_args(
}
for (size_t i = 0; i < repeats.size(); i++) {
ET_CHECK_OR_RETURN_FALSE(
reformat_self_size[i] * repeats[i] ==
reformat_self_size[i] * static_cast<size_t>(repeats[i]) ==
static_cast<uint64_t>(out.size(i)),
"Expect out size at dimension %zu is %" PRId64 ", but now is %zd",
i,
Expand Down
6 changes: 5 additions & 1 deletion runtime/executor/method_meta.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -233,7 +233,11 @@ bool MethodMeta::uses_backend(const char* backend_name) const {
const auto delegates = s_plan_->delegates();
for (size_t i = 0; i < delegates->size(); i++) {
auto delegate = delegates->Get(i);
if (std::strcmp(delegate->id()->c_str(), backend_name) == 0) {
auto backend_name_len = std::strlen(backend_name);
auto delegate_id_len = delegate->id()->size();
if (backend_name_len == delegate_id_len &&
std::strncmp(delegate->id()->c_str(), backend_name, backend_name_len) ==
0) {
Comment on lines +236 to +240
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

you know what, I'm really confused about what error we're fixing here. how is this better than strcmp?

return true;
}
}
Expand Down
2 changes: 1 addition & 1 deletion tools/cmake/Utils.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,7 @@ function(executorch_print_configuration_summary)
message(STATUS " EXECUTORCH_BUILD_EXTENSION_FLAT_TENSOR : "
"${EXECUTORCH_BUILD_EXTENSION_FLAT_TENSOR}"
)
message(STATUS " EXECUTORCH_BUILD_EXTENSION_LLM : "
message(STATUS " EXECUTORCH_BUILD_EXTENSION_LLM : "
"${EXECUTORCH_BUILD_EXTENSION_LLM}"
)
message(STATUS " EXECUTORCH_BUILD_EXTENSION_MODULE : "
Expand Down
Loading