Skip to content

Commit 5f23603

Browse files
author
lind
committed
run lintrunner
1 parent d136206 commit 5f23603

File tree

5 files changed

+26
-29
lines changed

5 files changed

+26
-29
lines changed

backends/cadence/fusion_g3/operators/op_add.cpp

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -95,26 +95,26 @@ Tensor& add_out(
9595
}
9696

9797
for (int i = 0; i < max_dim; i++) {
98-
out_shape[i] = 1;
98+
out_shape[i] = 1;
9999
inp1_shape[i] = 1;
100100
inp2_shape[i] = 1;
101101
}
102-
103-
int offset_out = max_dim - out.dim();
102+
103+
int offset_out = max_dim - out.dim();
104104
int offset_inp1 = max_dim - a.dim();
105105
int offset_inp2 = max_dim - b.dim();
106-
106+
107107
for (int i = 0; i < out.dim(); i++) {
108108
out_shape[i + offset_out] = out.size(i);
109109
}
110110
for (int i = 0; i < a.dim(); i++) {
111111
inp1_shape[i + offset_inp1] = a.size(i);
112112
}
113113
for (int i = 0; i < b.dim(); i++) {
114-
inp2_shape[i + offset_inp2] = b.size(i);
114+
inp2_shape[i + offset_inp2] = b.size(i);
115115
}
116116

117-
if ((compute_type == ScalarType::Int) && (optimized)){
117+
if ((compute_type == ScalarType::Int) && (optimized)) {
118118
const int* const inp1_data = a.const_data_ptr<int>();
119119
const int* const inp2_data = b.const_data_ptr<int>();
120120
int* const out_data = out.mutable_data_ptr<int>();

backends/cadence/fusion_g3/operators/op_mul.cpp

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -87,23 +87,23 @@ Tensor& mul_out(
8787
}
8888

8989
for (int i = 0; i < max_dim; i++) {
90-
out_shape[i] = 1;
90+
out_shape[i] = 1;
9191
inp1_shape[i] = 1;
9292
inp2_shape[i] = 1;
9393
}
94-
95-
int offset_out = max_dim - out.dim();
94+
95+
int offset_out = max_dim - out.dim();
9696
int offset_inp1 = max_dim - a.dim();
9797
int offset_inp2 = max_dim - b.dim();
98-
98+
9999
for (int i = 0; i < out.dim(); i++) {
100100
out_shape[i + offset_out] = out.size(i);
101101
}
102102
for (int i = 0; i < a.dim(); i++) {
103103
inp1_shape[i + offset_inp1] = a.size(i);
104104
}
105105
for (int i = 0; i < b.dim(); i++) {
106-
inp2_shape[i + offset_inp2] = b.size(i);
106+
inp2_shape[i + offset_inp2] = b.size(i);
107107
}
108108

109109
if ((compute_type == ScalarType::Int) && (optimized)) {

backends/cadence/hifi/operators/op_maximum.cpp

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,6 @@ using torch::executor::apply_binary_elementwise_fn;
2323
using torch::executor::Error;
2424
using torch::executor::resize_to_broadcast_target_size;
2525

26-
2726
namespace cadence {
2827
namespace impl {
2928
namespace HiFi {

backends/cadence/hifi/operators/op_pow.cpp

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -351,4 +351,3 @@ Tensor& pow_Scalar_out(
351351
} // namespace HiFi
352352
} // namespace impl
353353
} // namespace cadence
354-

backends/cadence/hifi/operators/quantized_linear_out.cpp

Lines changed: 15 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -26,8 +26,7 @@ using ::executorch::aten::Tensor;
2626
using ::executorch::runtime::getLeadingDims;
2727
using ::executorch::runtime::KernelRuntimeContext;
2828

29-
30-
// The nnlib kernel to compute quantized linear via matmul.
29+
// The nnlib kernel to compute quantized linear via matmul.
3130

3231
void _quantized_linear_asym8u(
3332
const Tensor& in,
@@ -48,22 +47,22 @@ void _quantized_linear_asym8u(
4847
const int32_t* __restrict__ bias_data = bias.const_data_ptr<int32_t>();
4948
uint8_t* __restrict__ out_data = out.mutable_data_ptr<uint8_t>();
5049
int32_t ret = xa_nn_matmul_asym8uxasym8u_asym8u(
51-
out_data,
52-
weight_data,
53-
in_data,
54-
bias_data,
55-
out_dim,
56-
in_dim,
57-
in_dim,
58-
leading_dims,
59-
in_dim,
60-
out_dim,
61-
1,
50+
out_data,
51+
weight_data,
52+
in_data,
53+
bias_data,
54+
out_dim,
55+
in_dim,
56+
in_dim,
57+
leading_dims,
58+
in_dim,
59+
out_dim,
60+
1,
6261
-weight_zero_point.const_data_ptr<int32_t>()[0], // mat1_zero_bias
6362
-in_zero_point, // mat2_zero_bias
64-
out_multiplier.const_data_ptr<int32_t>()[0],
65-
out_shift.const_data_ptr<int32_t>()[0],
66-
out_zero_point);
63+
out_multiplier.const_data_ptr<int32_t>()[0],
64+
out_shift.const_data_ptr<int32_t>()[0],
65+
out_zero_point);
6766
ET_DCHECK_MSG(ret == 0, "HiFi quantized::linear failed");
6867
}
6968

0 commit comments

Comments
 (0)