@@ -136,6 +136,48 @@ pub unsafe fn _mm512_mask_cmpgt_epu64_mask(m: __mmask8, a: __m512i, b: __m512i)
136
136
_mm512_cmpgt_epu64_mask ( a, b) & m
137
137
}
138
138
139
+ /// Compare packed unsigned 64-bit integers in a and b for less-than, and store the results in a mask vector.
140
+ ///
141
+ /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#expand=727,1063,4909,1062,1062&text=_mm512_cmple_epu64)
142
+ #[ inline]
143
+ #[ target_feature( enable = "avx512f" ) ]
144
+ #[ cfg_attr( test, assert_instr( vpcmp) ) ]
145
+ pub unsafe fn _mm512_cmple_epu64_mask ( a : __m512i , b : __m512i ) -> __mmask8 {
146
+ _mm512_cmpgt_epu64_mask ( b, a)
147
+ }
148
+
149
+ ///Compare packed unsigned 64-bit integers in a and b for less-than, and store the results in a mask vector k
150
+ /// using zeromask m (elements are zeroed out when the corresponding mask bit is not set).
151
+ ///
152
+ /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#expand=727,1063,4909,1062,1062,1063&text=_mm512_mask_cmple_epu64)
153
+ #[ inline]
154
+ #[ target_feature( enable = "avx512f" ) ]
155
+ #[ cfg_attr( test, assert_instr( vpcmp) ) ]
156
+ pub unsafe fn _mm512_mask_cmple_epu64_mask ( m : __mmask8 , a : __m512i , b : __m512i ) -> __mmask8 {
157
+ _mm512_cmpgt_epu64_mask ( b, a) & m
158
+ }
159
+
160
+ /// Compare packed unsigned 64-bit integers in a and b for less-than, and store the results in a mask vector.
161
+ ///
162
+ /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#expand=727,1063,4909,1062,1062&text=_mm512_cmpge_epu64)
163
+ #[ inline]
164
+ #[ target_feature( enable = "avx512f" ) ]
165
+ #[ cfg_attr( test, assert_instr( vpcmp) ) ]
166
+ pub unsafe fn _mm512_cmpge_epu64_mask ( a : __m512i , b : __m512i ) -> __mmask8 {
167
+ _mm512_cmplt_epu64_mask ( b, a)
168
+ }
169
+
170
+ ///Compare packed unsigned 64-bit integers in a and b for less-than, and store the results in a mask vector k
171
+ /// using zeromask m (elements are zeroed out when the corresponding mask bit is not set).
172
+ ///
173
+ /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#expand=727,1063,4909,1062,1062,1063&text=_mm512_mask_cmpge_epu64)
174
+ #[ inline]
175
+ #[ target_feature( enable = "avx512f" ) ]
176
+ #[ cfg_attr( test, assert_instr( vpcmp) ) ]
177
+ pub unsafe fn _mm512_mask_cmpge_epu64_mask ( m : __mmask8 , a : __m512i , b : __m512i ) -> __mmask8 {
178
+ _mm512_cmplt_epu64_mask ( b, a) & m
179
+ }
180
+
139
181
/// Compare packed unsigned 64-bit integers in a and b for less-than, and store the results in a mask vector.
140
182
///
141
183
/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#expand=727,1063,4909,1062,1062&text=_mm512_cmpeq_epu64)
@@ -199,6 +241,48 @@ pub unsafe fn _mm512_mask_cmpgt_epi64_mask(m: __mmask8, a: __m512i, b: __m512i)
199
241
_mm512_cmpgt_epi64_mask ( a, b) & m
200
242
}
201
243
244
+ /// Compare packed unsigned 64-bit integers in a and b for less-than, and store the results in a mask vector.
245
+ ///
246
+ /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#expand=727,1063,4909,1062,1062&text=_mm512_cmple_epi64)
247
+ #[ inline]
248
+ #[ target_feature( enable = "avx512f" ) ]
249
+ #[ cfg_attr( test, assert_instr( vpcmp) ) ]
250
+ pub unsafe fn _mm512_cmple_epi64_mask ( a : __m512i , b : __m512i ) -> __mmask8 {
251
+ _mm512_cmpgt_epi64_mask ( b, a)
252
+ }
253
+
254
+ ///Compare packed unsigned 64-bit integers in a and b for less-than, and store the results in a mask vector k
255
+ /// using zeromask m (elements are zeroed out when the corresponding mask bit is not set).
256
+ ///
257
+ /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#expand=727,1063,4909,1062,1062,1063&text=_mm512_mask_cmple_epi64)
258
+ #[ inline]
259
+ #[ target_feature( enable = "avx512f" ) ]
260
+ #[ cfg_attr( test, assert_instr( vpcmp) ) ]
261
+ pub unsafe fn _mm512_mask_cmple_epi64_mask ( m : __mmask8 , a : __m512i , b : __m512i ) -> __mmask8 {
262
+ _mm512_cmpgt_epi64_mask ( b, a) & m
263
+ }
264
+
265
+ /// Compare packed unsigned 64-bit integers in a and b for less-than, and store the results in a mask vector.
266
+ ///
267
+ /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#expand=727,1063,4909,1062,1062&text=_mm512_cmpge_epi64)
268
+ #[ inline]
269
+ #[ target_feature( enable = "avx512f" ) ]
270
+ #[ cfg_attr( test, assert_instr( vpcmp) ) ]
271
+ pub unsafe fn _mm512_cmpge_epi64_mask ( a : __m512i , b : __m512i ) -> __mmask8 {
272
+ _mm512_cmplt_epi64_mask ( b, a)
273
+ }
274
+
275
+ ///Compare packed unsigned 64-bit integers in a and b for less-than, and store the results in a mask vector k
276
+ /// using zeromask m (elements are zeroed out when the corresponding mask bit is not set).
277
+ ///
278
+ /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#expand=727,1063,4909,1062,1062,1063&text=_mm512_mask_cmpge_epi64)
279
+ #[ inline]
280
+ #[ target_feature( enable = "avx512f" ) ]
281
+ #[ cfg_attr( test, assert_instr( vpcmp) ) ]
282
+ pub unsafe fn _mm512_mask_cmpge_epi64_mask ( m : __mmask8 , a : __m512i , b : __m512i ) -> __mmask8 {
283
+ _mm512_cmplt_epi64_mask ( b, a) & m
284
+ }
285
+
202
286
/// Compare packed unsigned 64-bit integers in a and b for less-than, and store the results in a mask vector.
203
287
///
204
288
/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#expand=727,1063,4909,1062,1062&text=_mm512_cmpeq_epi64)
0 commit comments