File tree 5 files changed +26
-29
lines changed 5 files changed +26
-29
lines changed Original file line number Diff line number Diff line change @@ -95,26 +95,26 @@ Tensor& add_out(
95
95
}
96
96
97
97
for (int i = 0 ; i < max_dim; i++) {
98
- out_shape[i] = 1 ;
98
+ out_shape[i] = 1 ;
99
99
inp1_shape[i] = 1 ;
100
100
inp2_shape[i] = 1 ;
101
101
}
102
-
103
- int offset_out = max_dim - out.dim ();
102
+
103
+ int offset_out = max_dim - out.dim ();
104
104
int offset_inp1 = max_dim - a.dim ();
105
105
int offset_inp2 = max_dim - b.dim ();
106
-
106
+
107
107
for (int i = 0 ; i < out.dim (); i++) {
108
108
out_shape[i + offset_out] = out.size (i);
109
109
}
110
110
for (int i = 0 ; i < a.dim (); i++) {
111
111
inp1_shape[i + offset_inp1] = a.size (i);
112
112
}
113
113
for (int i = 0 ; i < b.dim (); i++) {
114
- inp2_shape[i + offset_inp2] = b.size (i);
114
+ inp2_shape[i + offset_inp2] = b.size (i);
115
115
}
116
116
117
- if ((compute_type == ScalarType::Int) && (optimized)){
117
+ if ((compute_type == ScalarType::Int) && (optimized)) {
118
118
const int * const inp1_data = a.const_data_ptr <int >();
119
119
const int * const inp2_data = b.const_data_ptr <int >();
120
120
int * const out_data = out.mutable_data_ptr <int >();
Original file line number Diff line number Diff line change @@ -87,23 +87,23 @@ Tensor& mul_out(
87
87
}
88
88
89
89
for (int i = 0 ; i < max_dim; i++) {
90
- out_shape[i] = 1 ;
90
+ out_shape[i] = 1 ;
91
91
inp1_shape[i] = 1 ;
92
92
inp2_shape[i] = 1 ;
93
93
}
94
-
95
- int offset_out = max_dim - out.dim ();
94
+
95
+ int offset_out = max_dim - out.dim ();
96
96
int offset_inp1 = max_dim - a.dim ();
97
97
int offset_inp2 = max_dim - b.dim ();
98
-
98
+
99
99
for (int i = 0 ; i < out.dim (); i++) {
100
100
out_shape[i + offset_out] = out.size (i);
101
101
}
102
102
for (int i = 0 ; i < a.dim (); i++) {
103
103
inp1_shape[i + offset_inp1] = a.size (i);
104
104
}
105
105
for (int i = 0 ; i < b.dim (); i++) {
106
- inp2_shape[i + offset_inp2] = b.size (i);
106
+ inp2_shape[i + offset_inp2] = b.size (i);
107
107
}
108
108
109
109
if ((compute_type == ScalarType::Int) && (optimized)) {
Original file line number Diff line number Diff line change @@ -23,7 +23,6 @@ using torch::executor::apply_binary_elementwise_fn;
23
23
using torch::executor::Error;
24
24
using torch::executor::resize_to_broadcast_target_size;
25
25
26
-
27
26
namespace cadence {
28
27
namespace impl {
29
28
namespace HiFi {
Original file line number Diff line number Diff line change @@ -351,4 +351,3 @@ Tensor& pow_Scalar_out(
351
351
} // namespace HiFi
352
352
} // namespace impl
353
353
} // namespace cadence
354
-
Original file line number Diff line number Diff line change @@ -26,8 +26,7 @@ using ::executorch::aten::Tensor;
26
26
using ::executorch::runtime::getLeadingDims;
27
27
using ::executorch::runtime::KernelRuntimeContext;
28
28
29
-
30
- // The nnlib kernel to compute quantized linear via matmul.
29
+ // The nnlib kernel to compute quantized linear via matmul.
31
30
32
31
void _quantized_linear_asym8u (
33
32
const Tensor& in,
@@ -48,22 +47,22 @@ void _quantized_linear_asym8u(
48
47
const int32_t * __restrict__ bias_data = bias.const_data_ptr <int32_t >();
49
48
uint8_t * __restrict__ out_data = out.mutable_data_ptr <uint8_t >();
50
49
int32_t ret = xa_nn_matmul_asym8uxasym8u_asym8u (
51
- out_data,
52
- weight_data,
53
- in_data,
54
- bias_data,
55
- out_dim,
56
- in_dim,
57
- in_dim,
58
- leading_dims,
59
- in_dim,
60
- out_dim,
61
- 1 ,
50
+ out_data,
51
+ weight_data,
52
+ in_data,
53
+ bias_data,
54
+ out_dim,
55
+ in_dim,
56
+ in_dim,
57
+ leading_dims,
58
+ in_dim,
59
+ out_dim,
60
+ 1 ,
62
61
-weight_zero_point.const_data_ptr <int32_t >()[0 ], // mat1_zero_bias
63
62
-in_zero_point, // mat2_zero_bias
64
- out_multiplier.const_data_ptr <int32_t >()[0 ],
65
- out_shift.const_data_ptr <int32_t >()[0 ],
66
- out_zero_point);
63
+ out_multiplier.const_data_ptr <int32_t >()[0 ],
64
+ out_shift.const_data_ptr <int32_t >()[0 ],
65
+ out_zero_point);
67
66
ET_DCHECK_MSG (ret == 0 , " HiFi quantized::linear failed" );
68
67
}
69
68
You can’t perform that action at this time.
0 commit comments