|
1 | 1 | ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
2 |
| -; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+zvfh,+v,+experimental-zfbfmin,+experimental-zvfbfmin -target-abi=ilp32d \ |
| 2 | +; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+zvfh,+v -target-abi=ilp32d \ |
3 | 3 | ; RUN: -verify-machineinstrs < %s | FileCheck %s
|
4 |
| -; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+zvfh,+v,+experimental-zfbfmin,+experimental-zvfbfmin -target-abi=lp64d \ |
| 4 | +; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+zvfh,+v -target-abi=lp64d \ |
5 | 5 | ; RUN: -verify-machineinstrs < %s | FileCheck %s
|
6 |
| -; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+zvfhmin,+v,+m,+experimental-zfbfmin,+experimental-zvfbfmin -target-abi=ilp32d -riscv-v-vector-bits-min=128 \ |
| 6 | +; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+zvfhmin,+v,+m -target-abi=ilp32d -riscv-v-vector-bits-min=128 \ |
7 | 7 | ; RUN: -verify-machineinstrs < %s | FileCheck %s
|
8 |
| -; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+zvfhmin,+v,+m,+experimental-zfbfmin,+experimental-zvfbfmin -target-abi=lp64d -riscv-v-vector-bits-min=128 \ |
| 8 | +; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+zvfhmin,+v,+m -target-abi=lp64d -riscv-v-vector-bits-min=128 \ |
9 | 9 | ; RUN: -verify-machineinstrs < %s | FileCheck %s
|
10 | 10 |
|
11 | 11 | define <2 x half> @select_v2f16(i1 zeroext %c, <2 x half> %a, <2 x half> %b) {
|
@@ -119,347 +119,3 @@ define <16 x half> @selectcc_v16f16(half %a, half %b, <16 x half> %c, <16 x half
|
119 | 119 | %v = select i1 %cmp, <16 x half> %c, <16 x half> %d
|
120 | 120 | ret <16 x half> %v
|
121 | 121 | }
|
122 |
| - |
123 |
| -define <2 x float> @select_v2f32(i1 zeroext %c, <2 x float> %a, <2 x float> %b) { |
124 |
| -; CHECK-LABEL: select_v2f32: |
125 |
| -; CHECK: # %bb.0: |
126 |
| -; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma |
127 |
| -; CHECK-NEXT: vmv.v.x v10, a0 |
128 |
| -; CHECK-NEXT: vmsne.vi v0, v10, 0 |
129 |
| -; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma |
130 |
| -; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 |
131 |
| -; CHECK-NEXT: ret |
132 |
| - %v = select i1 %c, <2 x float> %a, <2 x float> %b |
133 |
| - ret <2 x float> %v |
134 |
| -} |
135 |
| - |
136 |
| -define <2 x float> @selectcc_v2f32(float %a, float %b, <2 x float> %c, <2 x float> %d) { |
137 |
| -; CHECK-LABEL: selectcc_v2f32: |
138 |
| -; CHECK: # %bb.0: |
139 |
| -; CHECK-NEXT: feq.s a0, fa0, fa1 |
140 |
| -; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma |
141 |
| -; CHECK-NEXT: vmv.v.x v10, a0 |
142 |
| -; CHECK-NEXT: vmsne.vi v0, v10, 0 |
143 |
| -; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma |
144 |
| -; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 |
145 |
| -; CHECK-NEXT: ret |
146 |
| - %cmp = fcmp oeq float %a, %b |
147 |
| - %v = select i1 %cmp, <2 x float> %c, <2 x float> %d |
148 |
| - ret <2 x float> %v |
149 |
| -} |
150 |
| - |
151 |
| -define <4 x float> @select_v4f32(i1 zeroext %c, <4 x float> %a, <4 x float> %b) { |
152 |
| -; CHECK-LABEL: select_v4f32: |
153 |
| -; CHECK: # %bb.0: |
154 |
| -; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma |
155 |
| -; CHECK-NEXT: vmv.v.x v10, a0 |
156 |
| -; CHECK-NEXT: vmsne.vi v0, v10, 0 |
157 |
| -; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma |
158 |
| -; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 |
159 |
| -; CHECK-NEXT: ret |
160 |
| - %v = select i1 %c, <4 x float> %a, <4 x float> %b |
161 |
| - ret <4 x float> %v |
162 |
| -} |
163 |
| - |
164 |
| -define <4 x float> @selectcc_v4f32(float %a, float %b, <4 x float> %c, <4 x float> %d) { |
165 |
| -; CHECK-LABEL: selectcc_v4f32: |
166 |
| -; CHECK: # %bb.0: |
167 |
| -; CHECK-NEXT: feq.s a0, fa0, fa1 |
168 |
| -; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma |
169 |
| -; CHECK-NEXT: vmv.v.x v10, a0 |
170 |
| -; CHECK-NEXT: vmsne.vi v0, v10, 0 |
171 |
| -; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma |
172 |
| -; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 |
173 |
| -; CHECK-NEXT: ret |
174 |
| - %cmp = fcmp oeq float %a, %b |
175 |
| - %v = select i1 %cmp, <4 x float> %c, <4 x float> %d |
176 |
| - ret <4 x float> %v |
177 |
| -} |
178 |
| - |
179 |
| -define <8 x float> @select_v8f32(i1 zeroext %c, <8 x float> %a, <8 x float> %b) { |
180 |
| -; CHECK-LABEL: select_v8f32: |
181 |
| -; CHECK: # %bb.0: |
182 |
| -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma |
183 |
| -; CHECK-NEXT: vmv.v.x v12, a0 |
184 |
| -; CHECK-NEXT: vmsne.vi v0, v12, 0 |
185 |
| -; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma |
186 |
| -; CHECK-NEXT: vmerge.vvm v8, v10, v8, v0 |
187 |
| -; CHECK-NEXT: ret |
188 |
| - %v = select i1 %c, <8 x float> %a, <8 x float> %b |
189 |
| - ret <8 x float> %v |
190 |
| -} |
191 |
| - |
192 |
| -define <8 x float> @selectcc_v8f32(float %a, float %b, <8 x float> %c, <8 x float> %d) { |
193 |
| -; CHECK-LABEL: selectcc_v8f32: |
194 |
| -; CHECK: # %bb.0: |
195 |
| -; CHECK-NEXT: feq.s a0, fa0, fa1 |
196 |
| -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma |
197 |
| -; CHECK-NEXT: vmv.v.x v12, a0 |
198 |
| -; CHECK-NEXT: vmsne.vi v0, v12, 0 |
199 |
| -; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma |
200 |
| -; CHECK-NEXT: vmerge.vvm v8, v10, v8, v0 |
201 |
| -; CHECK-NEXT: ret |
202 |
| - %cmp = fcmp oeq float %a, %b |
203 |
| - %v = select i1 %cmp, <8 x float> %c, <8 x float> %d |
204 |
| - ret <8 x float> %v |
205 |
| -} |
206 |
| - |
207 |
| -define <16 x float> @select_v16f32(i1 zeroext %c, <16 x float> %a, <16 x float> %b) { |
208 |
| -; CHECK-LABEL: select_v16f32: |
209 |
| -; CHECK: # %bb.0: |
210 |
| -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma |
211 |
| -; CHECK-NEXT: vmv.v.x v16, a0 |
212 |
| -; CHECK-NEXT: vmsne.vi v0, v16, 0 |
213 |
| -; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma |
214 |
| -; CHECK-NEXT: vmerge.vvm v8, v12, v8, v0 |
215 |
| -; CHECK-NEXT: ret |
216 |
| - %v = select i1 %c, <16 x float> %a, <16 x float> %b |
217 |
| - ret <16 x float> %v |
218 |
| -} |
219 |
| - |
220 |
| -define <16 x float> @selectcc_v16f32(float %a, float %b, <16 x float> %c, <16 x float> %d) { |
221 |
| -; CHECK-LABEL: selectcc_v16f32: |
222 |
| -; CHECK: # %bb.0: |
223 |
| -; CHECK-NEXT: feq.s a0, fa0, fa1 |
224 |
| -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma |
225 |
| -; CHECK-NEXT: vmv.v.x v16, a0 |
226 |
| -; CHECK-NEXT: vmsne.vi v0, v16, 0 |
227 |
| -; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma |
228 |
| -; CHECK-NEXT: vmerge.vvm v8, v12, v8, v0 |
229 |
| -; CHECK-NEXT: ret |
230 |
| - %cmp = fcmp oeq float %a, %b |
231 |
| - %v = select i1 %cmp, <16 x float> %c, <16 x float> %d |
232 |
| - ret <16 x float> %v |
233 |
| -} |
234 |
| - |
235 |
| -define <2 x double> @select_v2f64(i1 zeroext %c, <2 x double> %a, <2 x double> %b) { |
236 |
| -; CHECK-LABEL: select_v2f64: |
237 |
| -; CHECK: # %bb.0: |
238 |
| -; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma |
239 |
| -; CHECK-NEXT: vmv.v.x v10, a0 |
240 |
| -; CHECK-NEXT: vmsne.vi v0, v10, 0 |
241 |
| -; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma |
242 |
| -; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 |
243 |
| -; CHECK-NEXT: ret |
244 |
| - %v = select i1 %c, <2 x double> %a, <2 x double> %b |
245 |
| - ret <2 x double> %v |
246 |
| -} |
247 |
| - |
248 |
| -define <2 x double> @selectcc_v2f64(double %a, double %b, <2 x double> %c, <2 x double> %d) { |
249 |
| -; CHECK-LABEL: selectcc_v2f64: |
250 |
| -; CHECK: # %bb.0: |
251 |
| -; CHECK-NEXT: feq.d a0, fa0, fa1 |
252 |
| -; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma |
253 |
| -; CHECK-NEXT: vmv.v.x v10, a0 |
254 |
| -; CHECK-NEXT: vmsne.vi v0, v10, 0 |
255 |
| -; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma |
256 |
| -; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 |
257 |
| -; CHECK-NEXT: ret |
258 |
| - %cmp = fcmp oeq double %a, %b |
259 |
| - %v = select i1 %cmp, <2 x double> %c, <2 x double> %d |
260 |
| - ret <2 x double> %v |
261 |
| -} |
262 |
| - |
263 |
| -define <4 x double> @select_v4f64(i1 zeroext %c, <4 x double> %a, <4 x double> %b) { |
264 |
| -; CHECK-LABEL: select_v4f64: |
265 |
| -; CHECK: # %bb.0: |
266 |
| -; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma |
267 |
| -; CHECK-NEXT: vmv.v.x v12, a0 |
268 |
| -; CHECK-NEXT: vmsne.vi v0, v12, 0 |
269 |
| -; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma |
270 |
| -; CHECK-NEXT: vmerge.vvm v8, v10, v8, v0 |
271 |
| -; CHECK-NEXT: ret |
272 |
| - %v = select i1 %c, <4 x double> %a, <4 x double> %b |
273 |
| - ret <4 x double> %v |
274 |
| -} |
275 |
| - |
276 |
| -define <4 x double> @selectcc_v4f64(double %a, double %b, <4 x double> %c, <4 x double> %d) { |
277 |
| -; CHECK-LABEL: selectcc_v4f64: |
278 |
| -; CHECK: # %bb.0: |
279 |
| -; CHECK-NEXT: feq.d a0, fa0, fa1 |
280 |
| -; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma |
281 |
| -; CHECK-NEXT: vmv.v.x v12, a0 |
282 |
| -; CHECK-NEXT: vmsne.vi v0, v12, 0 |
283 |
| -; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma |
284 |
| -; CHECK-NEXT: vmerge.vvm v8, v10, v8, v0 |
285 |
| -; CHECK-NEXT: ret |
286 |
| - %cmp = fcmp oeq double %a, %b |
287 |
| - %v = select i1 %cmp, <4 x double> %c, <4 x double> %d |
288 |
| - ret <4 x double> %v |
289 |
| -} |
290 |
| - |
291 |
| -define <8 x double> @select_v8f64(i1 zeroext %c, <8 x double> %a, <8 x double> %b) { |
292 |
| -; CHECK-LABEL: select_v8f64: |
293 |
| -; CHECK: # %bb.0: |
294 |
| -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma |
295 |
| -; CHECK-NEXT: vmv.v.x v16, a0 |
296 |
| -; CHECK-NEXT: vmsne.vi v0, v16, 0 |
297 |
| -; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma |
298 |
| -; CHECK-NEXT: vmerge.vvm v8, v12, v8, v0 |
299 |
| -; CHECK-NEXT: ret |
300 |
| - %v = select i1 %c, <8 x double> %a, <8 x double> %b |
301 |
| - ret <8 x double> %v |
302 |
| -} |
303 |
| - |
304 |
| -define <8 x double> @selectcc_v8f64(double %a, double %b, <8 x double> %c, <8 x double> %d) { |
305 |
| -; CHECK-LABEL: selectcc_v8f64: |
306 |
| -; CHECK: # %bb.0: |
307 |
| -; CHECK-NEXT: feq.d a0, fa0, fa1 |
308 |
| -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma |
309 |
| -; CHECK-NEXT: vmv.v.x v16, a0 |
310 |
| -; CHECK-NEXT: vmsne.vi v0, v16, 0 |
311 |
| -; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma |
312 |
| -; CHECK-NEXT: vmerge.vvm v8, v12, v8, v0 |
313 |
| -; CHECK-NEXT: ret |
314 |
| - %cmp = fcmp oeq double %a, %b |
315 |
| - %v = select i1 %cmp, <8 x double> %c, <8 x double> %d |
316 |
| - ret <8 x double> %v |
317 |
| -} |
318 |
| - |
319 |
| -define <16 x double> @select_v16f64(i1 zeroext %c, <16 x double> %a, <16 x double> %b) { |
320 |
| -; CHECK-LABEL: select_v16f64: |
321 |
| -; CHECK: # %bb.0: |
322 |
| -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma |
323 |
| -; CHECK-NEXT: vmv.v.x v24, a0 |
324 |
| -; CHECK-NEXT: vmsne.vi v0, v24, 0 |
325 |
| -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma |
326 |
| -; CHECK-NEXT: vmerge.vvm v8, v16, v8, v0 |
327 |
| -; CHECK-NEXT: ret |
328 |
| - %v = select i1 %c, <16 x double> %a, <16 x double> %b |
329 |
| - ret <16 x double> %v |
330 |
| -} |
331 |
| - |
332 |
| -define <16 x double> @selectcc_v16f64(double %a, double %b, <16 x double> %c, <16 x double> %d) { |
333 |
| -; CHECK-LABEL: selectcc_v16f64: |
334 |
| -; CHECK: # %bb.0: |
335 |
| -; CHECK-NEXT: feq.d a0, fa0, fa1 |
336 |
| -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma |
337 |
| -; CHECK-NEXT: vmv.v.x v24, a0 |
338 |
| -; CHECK-NEXT: vmsne.vi v0, v24, 0 |
339 |
| -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma |
340 |
| -; CHECK-NEXT: vmerge.vvm v8, v16, v8, v0 |
341 |
| -; CHECK-NEXT: ret |
342 |
| - %cmp = fcmp oeq double %a, %b |
343 |
| - %v = select i1 %cmp, <16 x double> %c, <16 x double> %d |
344 |
| - ret <16 x double> %v |
345 |
| -} |
346 |
| - |
347 |
| -define <2 x bfloat> @select_v2bf16(i1 zeroext %c, <2 x bfloat> %a, <2 x bfloat> %b) { |
348 |
| -; CHECK-LABEL: select_v2bf16: |
349 |
| -; CHECK: # %bb.0: |
350 |
| -; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma |
351 |
| -; CHECK-NEXT: vmv.v.x v10, a0 |
352 |
| -; CHECK-NEXT: vmsne.vi v0, v10, 0 |
353 |
| -; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma |
354 |
| -; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 |
355 |
| -; CHECK-NEXT: ret |
356 |
| - %v = select i1 %c, <2 x bfloat> %a, <2 x bfloat> %b |
357 |
| - ret <2 x bfloat> %v |
358 |
| -} |
359 |
| - |
360 |
| -define <2 x bfloat> @selectcc_v2bf16(bfloat %a, bfloat %b, <2 x bfloat> %c, <2 x bfloat> %d) { |
361 |
| -; CHECK-LABEL: selectcc_v2bf16: |
362 |
| -; CHECK: # %bb.0: |
363 |
| -; CHECK-NEXT: fcvt.s.bf16 fa5, fa1 |
364 |
| -; CHECK-NEXT: fcvt.s.bf16 fa4, fa0 |
365 |
| -; CHECK-NEXT: feq.s a0, fa4, fa5 |
366 |
| -; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma |
367 |
| -; CHECK-NEXT: vmv.v.x v10, a0 |
368 |
| -; CHECK-NEXT: vmsne.vi v0, v10, 0 |
369 |
| -; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma |
370 |
| -; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 |
371 |
| -; CHECK-NEXT: ret |
372 |
| - %cmp = fcmp oeq bfloat %a, %b |
373 |
| - %v = select i1 %cmp, <2 x bfloat> %c, <2 x bfloat> %d |
374 |
| - ret <2 x bfloat> %v |
375 |
| -} |
376 |
| - |
377 |
| -define <4 x bfloat> @select_v4bf16(i1 zeroext %c, <4 x bfloat> %a, <4 x bfloat> %b) { |
378 |
| -; CHECK-LABEL: select_v4bf16: |
379 |
| -; CHECK: # %bb.0: |
380 |
| -; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma |
381 |
| -; CHECK-NEXT: vmv.v.x v10, a0 |
382 |
| -; CHECK-NEXT: vmsne.vi v0, v10, 0 |
383 |
| -; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma |
384 |
| -; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 |
385 |
| -; CHECK-NEXT: ret |
386 |
| - %v = select i1 %c, <4 x bfloat> %a, <4 x bfloat> %b |
387 |
| - ret <4 x bfloat> %v |
388 |
| -} |
389 |
| - |
390 |
| -define <4 x bfloat> @selectcc_v4bf16(bfloat %a, bfloat %b, <4 x bfloat> %c, <4 x bfloat> %d) { |
391 |
| -; CHECK-LABEL: selectcc_v4bf16: |
392 |
| -; CHECK: # %bb.0: |
393 |
| -; CHECK-NEXT: fcvt.s.bf16 fa5, fa1 |
394 |
| -; CHECK-NEXT: fcvt.s.bf16 fa4, fa0 |
395 |
| -; CHECK-NEXT: feq.s a0, fa4, fa5 |
396 |
| -; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma |
397 |
| -; CHECK-NEXT: vmv.v.x v10, a0 |
398 |
| -; CHECK-NEXT: vmsne.vi v0, v10, 0 |
399 |
| -; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma |
400 |
| -; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 |
401 |
| -; CHECK-NEXT: ret |
402 |
| - %cmp = fcmp oeq bfloat %a, %b |
403 |
| - %v = select i1 %cmp, <4 x bfloat> %c, <4 x bfloat> %d |
404 |
| - ret <4 x bfloat> %v |
405 |
| -} |
406 |
| - |
407 |
| -define <8 x bfloat> @select_v8bf16(i1 zeroext %c, <8 x bfloat> %a, <8 x bfloat> %b) { |
408 |
| -; CHECK-LABEL: select_v8bf16: |
409 |
| -; CHECK: # %bb.0: |
410 |
| -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma |
411 |
| -; CHECK-NEXT: vmv.v.x v10, a0 |
412 |
| -; CHECK-NEXT: vmsne.vi v0, v10, 0 |
413 |
| -; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma |
414 |
| -; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 |
415 |
| -; CHECK-NEXT: ret |
416 |
| - %v = select i1 %c, <8 x bfloat> %a, <8 x bfloat> %b |
417 |
| - ret <8 x bfloat> %v |
418 |
| -} |
419 |
| - |
420 |
| -define <8 x bfloat> @selectcc_v8bf16(bfloat %a, bfloat %b, <8 x bfloat> %c, <8 x bfloat> %d) { |
421 |
| -; CHECK-LABEL: selectcc_v8bf16: |
422 |
| -; CHECK: # %bb.0: |
423 |
| -; CHECK-NEXT: fcvt.s.bf16 fa5, fa1 |
424 |
| -; CHECK-NEXT: fcvt.s.bf16 fa4, fa0 |
425 |
| -; CHECK-NEXT: feq.s a0, fa4, fa5 |
426 |
| -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma |
427 |
| -; CHECK-NEXT: vmv.v.x v10, a0 |
428 |
| -; CHECK-NEXT: vmsne.vi v0, v10, 0 |
429 |
| -; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma |
430 |
| -; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 |
431 |
| -; CHECK-NEXT: ret |
432 |
| - %cmp = fcmp oeq bfloat %a, %b |
433 |
| - %v = select i1 %cmp, <8 x bfloat> %c, <8 x bfloat> %d |
434 |
| - ret <8 x bfloat> %v |
435 |
| -} |
436 |
| - |
437 |
| -define <16 x bfloat> @select_v16bf16(i1 zeroext %c, <16 x bfloat> %a, <16 x bfloat> %b) { |
438 |
| -; CHECK-LABEL: select_v16bf16: |
439 |
| -; CHECK: # %bb.0: |
440 |
| -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma |
441 |
| -; CHECK-NEXT: vmv.v.x v12, a0 |
442 |
| -; CHECK-NEXT: vmsne.vi v0, v12, 0 |
443 |
| -; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma |
444 |
| -; CHECK-NEXT: vmerge.vvm v8, v10, v8, v0 |
445 |
| -; CHECK-NEXT: ret |
446 |
| - %v = select i1 %c, <16 x bfloat> %a, <16 x bfloat> %b |
447 |
| - ret <16 x bfloat> %v |
448 |
| -} |
449 |
| - |
450 |
| -define <16 x bfloat> @selectcc_v16bf16(bfloat %a, bfloat %b, <16 x bfloat> %c, <16 x bfloat> %d) { |
451 |
| -; CHECK-LABEL: selectcc_v16bf16: |
452 |
| -; CHECK: # %bb.0: |
453 |
| -; CHECK-NEXT: fcvt.s.bf16 fa5, fa1 |
454 |
| -; CHECK-NEXT: fcvt.s.bf16 fa4, fa0 |
455 |
| -; CHECK-NEXT: feq.s a0, fa4, fa5 |
456 |
| -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma |
457 |
| -; CHECK-NEXT: vmv.v.x v12, a0 |
458 |
| -; CHECK-NEXT: vmsne.vi v0, v12, 0 |
459 |
| -; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma |
460 |
| -; CHECK-NEXT: vmerge.vvm v8, v10, v8, v0 |
461 |
| -; CHECK-NEXT: ret |
462 |
| - %cmp = fcmp oeq bfloat %a, %b |
463 |
| - %v = select i1 %cmp, <16 x bfloat> %c, <16 x bfloat> %d |
464 |
| - ret <16 x bfloat> %v |
465 |
| -} |
0 commit comments