From 5f23603f7eb206cbd05782cbe1c1ff2f84fb87cc Mon Sep 17 00:00:00 2001 From: lind Date: Wed, 27 Nov 2024 09:55:12 -0800 Subject: [PATCH] run lintrunner --- .../cadence/fusion_g3/operators/op_add.cpp | 12 +++---- .../cadence/fusion_g3/operators/op_mul.cpp | 10 +++--- .../cadence/hifi/operators/op_maximum.cpp | 1 - backends/cadence/hifi/operators/op_pow.cpp | 1 - .../hifi/operators/quantized_linear_out.cpp | 31 +++++++++---------- 5 files changed, 26 insertions(+), 29 deletions(-) diff --git a/backends/cadence/fusion_g3/operators/op_add.cpp b/backends/cadence/fusion_g3/operators/op_add.cpp index 551c6652f1d..9537cbacb70 100644 --- a/backends/cadence/fusion_g3/operators/op_add.cpp +++ b/backends/cadence/fusion_g3/operators/op_add.cpp @@ -95,15 +95,15 @@ Tensor& add_out( } for (int i = 0; i < max_dim; i++) { - out_shape[i] = 1; + out_shape[i] = 1; inp1_shape[i] = 1; inp2_shape[i] = 1; } - - int offset_out = max_dim - out.dim(); + + int offset_out = max_dim - out.dim(); int offset_inp1 = max_dim - a.dim(); int offset_inp2 = max_dim - b.dim(); - + for (int i = 0; i < out.dim(); i++) { out_shape[i + offset_out] = out.size(i); } @@ -111,10 +111,10 @@ Tensor& add_out( inp1_shape[i + offset_inp1] = a.size(i); } for (int i = 0; i < b.dim(); i++) { - inp2_shape[i + offset_inp2] = b.size(i); + inp2_shape[i + offset_inp2] = b.size(i); } - if ((compute_type == ScalarType::Int) && (optimized)){ + if ((compute_type == ScalarType::Int) && (optimized)) { const int* const inp1_data = a.const_data_ptr(); const int* const inp2_data = b.const_data_ptr(); int* const out_data = out.mutable_data_ptr(); diff --git a/backends/cadence/fusion_g3/operators/op_mul.cpp b/backends/cadence/fusion_g3/operators/op_mul.cpp index 82e84bdbe15..31cd50314e1 100644 --- a/backends/cadence/fusion_g3/operators/op_mul.cpp +++ b/backends/cadence/fusion_g3/operators/op_mul.cpp @@ -87,15 +87,15 @@ Tensor& mul_out( } for (int i = 0; i < max_dim; i++) { - out_shape[i] = 1; + out_shape[i] = 1; inp1_shape[i] = 1; inp2_shape[i] = 1; } - - int offset_out = max_dim - out.dim(); + + int offset_out = max_dim - out.dim(); int offset_inp1 = max_dim - a.dim(); int offset_inp2 = max_dim - b.dim(); - + for (int i = 0; i < out.dim(); i++) { out_shape[i + offset_out] = out.size(i); } @@ -103,7 +103,7 @@ Tensor& mul_out( inp1_shape[i + offset_inp1] = a.size(i); } for (int i = 0; i < b.dim(); i++) { - inp2_shape[i + offset_inp2] = b.size(i); + inp2_shape[i + offset_inp2] = b.size(i); } if ((compute_type == ScalarType::Int) && (optimized)) { diff --git a/backends/cadence/hifi/operators/op_maximum.cpp b/backends/cadence/hifi/operators/op_maximum.cpp index f9a3658891b..f85d3470e93 100644 --- a/backends/cadence/hifi/operators/op_maximum.cpp +++ b/backends/cadence/hifi/operators/op_maximum.cpp @@ -23,7 +23,6 @@ using torch::executor::apply_binary_elementwise_fn; using torch::executor::Error; using torch::executor::resize_to_broadcast_target_size; - namespace cadence { namespace impl { namespace HiFi { diff --git a/backends/cadence/hifi/operators/op_pow.cpp b/backends/cadence/hifi/operators/op_pow.cpp index 9669e961230..1399c24a346 100644 --- a/backends/cadence/hifi/operators/op_pow.cpp +++ b/backends/cadence/hifi/operators/op_pow.cpp @@ -351,4 +351,3 @@ Tensor& pow_Scalar_out( } // namespace HiFi } // namespace impl } // namespace cadence - diff --git a/backends/cadence/hifi/operators/quantized_linear_out.cpp b/backends/cadence/hifi/operators/quantized_linear_out.cpp index accc6101329..b8e1d117fb9 100644 --- a/backends/cadence/hifi/operators/quantized_linear_out.cpp +++ b/backends/cadence/hifi/operators/quantized_linear_out.cpp @@ -26,8 +26,7 @@ using ::executorch::aten::Tensor; using ::executorch::runtime::getLeadingDims; using ::executorch::runtime::KernelRuntimeContext; - - // The nnlib kernel to compute quantized linear via matmul. +// The nnlib kernel to compute quantized linear via matmul. void _quantized_linear_asym8u( const Tensor& in, @@ -48,22 +47,22 @@ void _quantized_linear_asym8u( const int32_t* __restrict__ bias_data = bias.const_data_ptr(); uint8_t* __restrict__ out_data = out.mutable_data_ptr(); int32_t ret = xa_nn_matmul_asym8uxasym8u_asym8u( - out_data, - weight_data, - in_data, - bias_data, - out_dim, - in_dim, - in_dim, - leading_dims, - in_dim, - out_dim, - 1, + out_data, + weight_data, + in_data, + bias_data, + out_dim, + in_dim, + in_dim, + leading_dims, + in_dim, + out_dim, + 1, -weight_zero_point.const_data_ptr()[0], // mat1_zero_bias -in_zero_point, // mat2_zero_bias - out_multiplier.const_data_ptr()[0], - out_shift.const_data_ptr()[0], - out_zero_point); + out_multiplier.const_data_ptr()[0], + out_shift.const_data_ptr()[0], + out_zero_point); ET_DCHECK_MSG(ret == 0, "HiFi quantized::linear failed"); }