diff --git a/kernels/portable/cpu/op_constant_pad_nd.cpp b/kernels/portable/cpu/op_constant_pad_nd.cpp index 71dc7ff658f..e1e37e5b4d2 100644 --- a/kernels/portable/cpu/op_constant_pad_nd.cpp +++ b/kernels/portable/cpu/op_constant_pad_nd.cpp @@ -189,7 +189,7 @@ Tensor& constant_pad_nd_out( CTYPE value_v; ET_SWITCH_SCALAR_OBJ_TYPES( value_type, ctx, "constant_pad_nd.out", CTYPE_VALUE, [&]() { - CTYPE_VALUE val; + CTYPE_VALUE val = 0; utils::extract_scalar(value, &val); value_v = static_cast(val); }); diff --git a/kernels/portable/cpu/op_fill.cpp b/kernels/portable/cpu/op_fill.cpp index 55187ba1b19..3ed8557c29e 100644 --- a/kernels/portable/cpu/op_fill.cpp +++ b/kernels/portable/cpu/op_fill.cpp @@ -45,7 +45,7 @@ Tensor& fill_scalar_out( ET_SWITCH_REALHBBF16_TYPES(a_type, ctx, "fill.Scalar_out", CTYPE_A, [&] { CTYPE_A b_casted; ET_SWITCH_SCALAR_OBJ_TYPES(b_type, ctx, "fill.Scalar_out", CTYPE_B, [&] { - CTYPE_B b_val; + CTYPE_B b_val = 0; utils::extract_scalar(b, &b_val); b_casted = static_cast(b_val); }); diff --git a/kernels/portable/cpu/op_hardtanh.cpp b/kernels/portable/cpu/op_hardtanh.cpp index 09855a175d8..97355b0f17c 100644 --- a/kernels/portable/cpu/op_hardtanh.cpp +++ b/kernels/portable/cpu/op_hardtanh.cpp @@ -49,14 +49,14 @@ Tensor& hardtanh_out( ET_SWITCH_REALHBF16_TYPES(in_type, ctx, "hardtanh.out", CTYPE, [&]() { CTYPE min_casted; ET_SWITCH_SCALAR_OBJ_TYPES(min_type, ctx, "hardtanh.out", CTYPE_MIN, [&]() { - CTYPE_MIN min_val; + CTYPE_MIN min_val = 0; utils::extract_scalar(min, &min_val); min_casted = static_cast(min_val); }); CTYPE max_casted; ET_SWITCH_SCALAR_OBJ_TYPES(max_type, ctx, "hardtanh.out", CTYPE_MAX, [&]() { - CTYPE_MAX max_val; + CTYPE_MAX max_val = 0; utils::extract_scalar(max, &max_val); max_casted = static_cast(max_val); }); diff --git a/kernels/portable/cpu/op_leaky_relu.cpp b/kernels/portable/cpu/op_leaky_relu.cpp index 71f01ef0640..a04365814c7 100644 --- a/kernels/portable/cpu/op_leaky_relu.cpp +++ b/kernels/portable/cpu/op_leaky_relu.cpp @@ -45,10 +45,10 @@ Tensor& leaky_relu_out( ET_KERNEL_CHECK(ctx, in_type == out_type, InvalidArgument, out); ET_SWITCH_FLOATHBF16_TYPES(in_type, ctx, "leaky_relu.out", CTYPE, [&]() { - CTYPE negative_slope_casted; + CTYPE negative_slope_casted = 0; ET_SWITCH_SCALAR_OBJ_TYPES( sc_type, ctx, "leaky_relu.out", CTYPE_MIN, [&]() { - CTYPE_MIN negative_slope_val; + CTYPE_MIN negative_slope_val = 0; utils::extract_scalar(negative_slope, &negative_slope_val); negative_slope_casted = static_cast(negative_slope_val); }); diff --git a/kernels/portable/cpu/op_masked_fill.cpp b/kernels/portable/cpu/op_masked_fill.cpp index 033c9320801..85006106070 100644 --- a/kernels/portable/cpu/op_masked_fill.cpp +++ b/kernels/portable/cpu/op_masked_fill.cpp @@ -46,7 +46,7 @@ Tensor& masked_fill_scalar_out( in_type, ctx, "masked_fill.Scalar_out", CTYPE, [&]() { ET_SWITCH_REAL_TYPES_AND( Bool, val_type, ctx, "masked_fill.Scalar_out", CTYPE_VAL, [&]() { - CTYPE_VAL value_v; + CTYPE_VAL value_v = 0; utils::extract_scalar(value, &value_v); CTYPE val = static_cast(value_v); diff --git a/kernels/portable/cpu/op_scalar_tensor.cpp b/kernels/portable/cpu/op_scalar_tensor.cpp index f7a4acfdcee..5be65a2e060 100644 --- a/kernels/portable/cpu/op_scalar_tensor.cpp +++ b/kernels/portable/cpu/op_scalar_tensor.cpp @@ -28,7 +28,7 @@ scalar_tensor_out(KernelRuntimeContext& ctx, const Scalar& s, Tensor& out) { ET_SWITCH_REAL_TYPES_AND3( Half, Bool, BFloat16, out_type, ctx, name, CTYPE, [&]() { ET_SWITCH_SCALAR_OBJ_TYPES(s_type, ctx, name, CTYPE_S, [&]() { - CTYPE_S val_s; + CTYPE_S val_s = 0; utils::extract_scalar(s, &val_s); out.mutable_data_ptr()[0] = convert(val_s); }); diff --git a/kernels/portable/cpu/util/repeat_util.cpp b/kernels/portable/cpu/util/repeat_util.cpp index be7231cb621..63ca8d019cf 100644 --- a/kernels/portable/cpu/util/repeat_util.cpp +++ b/kernels/portable/cpu/util/repeat_util.cpp @@ -68,7 +68,7 @@ bool check_repeat_args( } for (size_t i = 0; i < repeats.size(); i++) { ET_CHECK_OR_RETURN_FALSE( - reformat_self_size[i] * repeats[i] == + reformat_self_size[i] * static_cast(repeats[i]) == static_cast(out.size(i)), "Expect out size at dimension %zu is %" PRId64 ", but now is %zd", i, diff --git a/runtime/executor/method_meta.cpp b/runtime/executor/method_meta.cpp index eb019f64e71..8f84fea940f 100644 --- a/runtime/executor/method_meta.cpp +++ b/runtime/executor/method_meta.cpp @@ -233,7 +233,11 @@ bool MethodMeta::uses_backend(const char* backend_name) const { const auto delegates = s_plan_->delegates(); for (size_t i = 0; i < delegates->size(); i++) { auto delegate = delegates->Get(i); - if (std::strcmp(delegate->id()->c_str(), backend_name) == 0) { + auto backend_name_len = std::strlen(backend_name); + auto delegate_id_len = delegate->id()->size(); + if (backend_name_len == delegate_id_len && + std::strncmp(delegate->id()->c_str(), backend_name, backend_name_len) == + 0) { return true; } } diff --git a/tools/cmake/Utils.cmake b/tools/cmake/Utils.cmake index 4408994562e..b66a4eb9cf5 100644 --- a/tools/cmake/Utils.cmake +++ b/tools/cmake/Utils.cmake @@ -70,7 +70,7 @@ function(executorch_print_configuration_summary) message(STATUS " EXECUTORCH_BUILD_EXTENSION_FLAT_TENSOR : " "${EXECUTORCH_BUILD_EXTENSION_FLAT_TENSOR}" ) - message(STATUS " EXECUTORCH_BUILD_EXTENSION_LLM : " + message(STATUS " EXECUTORCH_BUILD_EXTENSION_LLM : " "${EXECUTORCH_BUILD_EXTENSION_LLM}" ) message(STATUS " EXECUTORCH_BUILD_EXTENSION_MODULE : "