From ace996b3f312b768e201b16cdc5f6d80d8e51fe9 Mon Sep 17 00:00:00 2001 From: Lawrence Benson Date: Thu, 15 Aug 2024 09:09:02 +0200 Subject: [PATCH 1/9] Add basic vector_compress pattern match --- llvm/lib/Target/X86/X86ISelLowering.cpp | 4 ++++ llvm/lib/Target/X86/X86InstrAVX512.td | 6 ++++++ 2 files changed, 10 insertions(+) diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp index 2759252693f9f..0c046d53a092b 100644 --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -2321,6 +2321,10 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM, } } + if (Subtarget.hasAVX512()) { + setOperationAction(ISD::VECTOR_COMPRESS, MVT::v4i32, Legal); + } + if (!Subtarget.useSoftFloat() && (Subtarget.hasAVXNECONVERT() || Subtarget.hasBF16())) { addRegisterClass(MVT::v8bf16, Subtarget.hasAVX512() ? &X86::VR128XRegClass diff --git a/llvm/lib/Target/X86/X86InstrAVX512.td b/llvm/lib/Target/X86/X86InstrAVX512.td index e616a8a37c648..487fa12ac17fa 100644 --- a/llvm/lib/Target/X86/X86InstrAVX512.td +++ b/llvm/lib/Target/X86/X86InstrAVX512.td @@ -10543,6 +10543,12 @@ multiclass compress_by_vec_width_lowering { def : Pat<(X86compress (_.VT _.RC:$src), _.ImmAllZerosV, _.KRCWM:$mask), (!cast(Name#_.ZSuffix#rrkz) _.KRCWM:$mask, _.RC:$src)>; + def : Pat<(_.VT (vector_compress _.RC:$src, _.KRCWM:$mask, undef)), + (!cast(Name#_.ZSuffix#rrkz) + _.KRCWM:$mask, _.RC:$src)>; + def : Pat<(_.VT (vector_compress _.RC:$src, _.KRCWM:$mask, _.RC:$passthru)), + (!cast(Name#_.ZSuffix#rrk) + _.RC:$src, _.KRCWM:$mask, _.RC:$passthru)>; } multiclass compress_by_elt_width opc, string OpcodeStr, From 589e89646266abb59e730bff281a706f804971c3 Mon Sep 17 00:00:00 2001 From: Lawrence Benson Date: Tue, 20 Aug 2024 10:05:12 +0200 Subject: [PATCH 2/9] Add VBMI2 handling --- .../CodeGen/SelectionDAG/TargetLowering.cpp | 9 +- llvm/lib/Target/X86/X86ISelLowering.cpp | 10 +- llvm/lib/Target/X86/X86InstrAVX512.td | 2 +- llvm/test/CodeGen/X86/vector-compress-avx2.ll | 281 +++++++++++ .../CodeGen/X86/vector-compress-avx512.ll | 455 ++++++++++++++++++ 5 files changed, 752 insertions(+), 5 deletions(-) create mode 100644 llvm/test/CodeGen/X86/vector-compress-avx2.ll create mode 100644 llvm/test/CodeGen/X86/vector-compress-avx512.ll diff --git a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp index c4f4261a708fd..cebbc80974d6f 100644 --- a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp @@ -11582,11 +11582,12 @@ SDValue TargetLowering::expandVECTOR_COMPRESS(SDNode *Node, // ... if it is not a splat vector, we need to get the passthru value at // position = popcount(mask) and re-load it from the stack before it is // overwritten in the loop below. + EVT PopcountVT = ScalarVT.changeTypeToInteger(); SDValue Popcount = DAG.getNode( ISD::TRUNCATE, DL, MaskVT.changeVectorElementType(MVT::i1), Mask); Popcount = DAG.getNode(ISD::ZERO_EXTEND, DL, - MaskVT.changeVectorElementType(ScalarVT), Popcount); - Popcount = DAG.getNode(ISD::VECREDUCE_ADD, DL, ScalarVT, Popcount); + MaskVT.changeVectorElementType(PopcountVT), Popcount); + Popcount = DAG.getNode(ISD::VECREDUCE_ADD, DL, PopcountVT, Popcount); SDValue LastElmtPtr = getVectorElementPointer(DAG, StackPtr, VecVT, Popcount); LastWriteVal = DAG.getLoad( @@ -11625,8 +11626,10 @@ SDValue TargetLowering::expandVECTOR_COMPRESS(SDNode *Node, // Re-write the last ValI if all lanes were selected. Otherwise, // overwrite the last write it with the passthru value. + SDNodeFlags Flags{}; + Flags.setUnpredictable(true); LastWriteVal = - DAG.getSelect(DL, ScalarVT, AllLanesSelected, ValI, LastWriteVal); + DAG.getSelect(DL, ScalarVT, AllLanesSelected, ValI, LastWriteVal, Flags); Chain = DAG.getStore( Chain, DL, LastWriteVal, OutPtr, MachinePointerInfo::getUnknownStack(DAG.getMachineFunction())); diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp index 0c046d53a092b..fa7c7b2789b7d 100644 --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -2322,7 +2322,15 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM, } if (Subtarget.hasAVX512()) { - setOperationAction(ISD::VECTOR_COMPRESS, MVT::v4i32, Legal); + for (MVT VT : {MVT::v4i32, MVT::v4f32, MVT::v2i64, MVT::v2f64, MVT::v8i32, + MVT::v8f32, MVT::v4i64, MVT::v4f64, MVT::v16i32, MVT::v16f32, + MVT::v8i64, MVT::v8f64}) + setOperationAction(ISD::VECTOR_COMPRESS, VT, Legal); + + if (Subtarget.hasVBMI2()) + for (MVT VT : {MVT::v16i8, MVT::v8i16, MVT::v32i8, MVT::v16i16, + MVT::v64i8, MVT::v32i16}) + setOperationAction(ISD::VECTOR_COMPRESS, VT, Legal); } if (!Subtarget.useSoftFloat() && diff --git a/llvm/lib/Target/X86/X86InstrAVX512.td b/llvm/lib/Target/X86/X86InstrAVX512.td index 487fa12ac17fa..d9f9432a10114 100644 --- a/llvm/lib/Target/X86/X86InstrAVX512.td +++ b/llvm/lib/Target/X86/X86InstrAVX512.td @@ -10548,7 +10548,7 @@ multiclass compress_by_vec_width_lowering { _.KRCWM:$mask, _.RC:$src)>; def : Pat<(_.VT (vector_compress _.RC:$src, _.KRCWM:$mask, _.RC:$passthru)), (!cast(Name#_.ZSuffix#rrk) - _.RC:$src, _.KRCWM:$mask, _.RC:$passthru)>; + _.RC:$passthru, _.KRCWM:$mask, _.RC:$src)>; } multiclass compress_by_elt_width opc, string OpcodeStr, diff --git a/llvm/test/CodeGen/X86/vector-compress-avx2.ll b/llvm/test/CodeGen/X86/vector-compress-avx2.ll new file mode 100644 index 0000000000000..59db0fb795386 --- /dev/null +++ b/llvm/test/CodeGen/X86/vector-compress-avx2.ll @@ -0,0 +1,281 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 +; RUN: llc -mtriple=x86_64 -mattr=avx2 < %s | FileCheck %s + +; The main logic for vpcompress is tested in the -avx512.ll version of this file. +; This file only checks the fallback expand path. + +define <4 x i32> @test_compress_v4i32(<4 x i32> %vec, <4 x i1> %mask, <4 x i32> %passthru) { +; CHECK-LABEL: test_compress_v4i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vpslld $31, %xmm1, %xmm1 +; CHECK-NEXT: vpsrad $31, %xmm1, %xmm1 +; CHECK-NEXT: vmovaps %xmm2, -{{[0-9]+}}(%rsp) +; CHECK-NEXT: vpextrd $1, %xmm1, %eax +; CHECK-NEXT: vmovd %xmm1, %esi +; CHECK-NEXT: andl $1, %esi +; CHECK-NEXT: movl %esi, %edi +; CHECK-NEXT: subl %eax, %edi +; CHECK-NEXT: vpextrd $2, %xmm1, %edx +; CHECK-NEXT: subl %edx, %edi +; CHECK-NEXT: vpextrd $3, %xmm1, %ecx +; CHECK-NEXT: subl %ecx, %edi +; CHECK-NEXT: andl $3, %edi +; CHECK-NEXT: andl $1, %eax +; CHECK-NEXT: addq %rsi, %rax +; CHECK-NEXT: andl $1, %edx +; CHECK-NEXT: addq %rax, %rdx +; CHECK-NEXT: andl $1, %ecx +; CHECK-NEXT: addq %rdx, %rcx +; CHECK-NEXT: vextractps $3, %xmm0, %r8d +; CHECK-NEXT: cmpq $4, %rcx +; CHECK-NEXT: cmovbl -24(%rsp,%rdi,4), %r8d +; CHECK-NEXT: vmovss %xmm0, -{{[0-9]+}}(%rsp) +; CHECK-NEXT: vextractps $1, %xmm0, -24(%rsp,%rsi,4) +; CHECK-NEXT: vextractps $2, %xmm0, -24(%rsp,%rax,4) +; CHECK-NEXT: andl $3, %edx +; CHECK-NEXT: vextractps $3, %xmm0, -24(%rsp,%rdx,4) +; CHECK-NEXT: cmpq $3, %rcx +; CHECK-NEXT: movl $3, %eax +; CHECK-NEXT: cmovbq %rcx, %rax +; CHECK-NEXT: movl %r8d, -24(%rsp,%rax,4) +; CHECK-NEXT: vmovaps -{{[0-9]+}}(%rsp), %xmm0 +; CHECK-NEXT: retq + %out = call <4 x i32> @llvm.experimental.vector.compress(<4 x i32> %vec, <4 x i1> %mask, <4 x i32> %passthru) + ret <4 x i32> %out +} + +define <4 x float> @test_compress_v4f32(<4 x float> %vec, <4 x i1> %mask, <4 x float> %passthru) { +; CHECK-LABEL: test_compress_v4f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vpslld $31, %xmm1, %xmm1 +; CHECK-NEXT: vpsrad $31, %xmm1, %xmm1 +; CHECK-NEXT: vmovaps %xmm2, -{{[0-9]+}}(%rsp) +; CHECK-NEXT: vpextrd $1, %xmm1, %edx +; CHECK-NEXT: vmovd %xmm1, %esi +; CHECK-NEXT: andl $1, %esi +; CHECK-NEXT: movl %esi, %edi +; CHECK-NEXT: subl %edx, %edi +; CHECK-NEXT: vpextrd $2, %xmm1, %ecx +; CHECK-NEXT: subl %ecx, %edi +; CHECK-NEXT: vpextrd $3, %xmm1, %eax +; CHECK-NEXT: subl %eax, %edi +; CHECK-NEXT: andl $3, %edi +; CHECK-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero +; CHECK-NEXT: vmovss %xmm0, -{{[0-9]+}}(%rsp) +; CHECK-NEXT: vextractps $1, %xmm0, -24(%rsp,%rsi,4) +; CHECK-NEXT: andl $1, %edx +; CHECK-NEXT: addq %rsi, %rdx +; CHECK-NEXT: vextractps $2, %xmm0, -24(%rsp,%rdx,4) +; CHECK-NEXT: andl $1, %ecx +; CHECK-NEXT: addq %rdx, %rcx +; CHECK-NEXT: andl $1, %eax +; CHECK-NEXT: addq %rcx, %rax +; CHECK-NEXT: # kill: def $ecx killed $ecx killed $rcx def $rcx +; CHECK-NEXT: andl $3, %ecx +; CHECK-NEXT: vshufps {{.*#+}} xmm0 = xmm0[3,3,3,3] +; CHECK-NEXT: vmovss %xmm0, -24(%rsp,%rcx,4) +; CHECK-NEXT: cmpq $3, %rax +; CHECK-NEXT: movl $3, %ecx +; CHECK-NEXT: cmovbq %rax, %rcx +; CHECK-NEXT: ja .LBB1_2 +; CHECK-NEXT: # %bb.1: +; CHECK-NEXT: vmovaps %xmm1, %xmm0 +; CHECK-NEXT: .LBB1_2: +; CHECK-NEXT: vmovss %xmm0, -24(%rsp,%rcx,4) +; CHECK-NEXT: vmovaps -{{[0-9]+}}(%rsp), %xmm0 +; CHECK-NEXT: retq + %out = call <4 x float> @llvm.experimental.vector.compress(<4 x float> %vec, <4 x i1> %mask, <4 x float> %passthru) + ret <4 x float> %out +} + +define <2 x i64> @test_compress_v2i64(<2 x i64> %vec, <2 x i1> %mask, <2 x i64> %passthru) { +; CHECK-LABEL: test_compress_v2i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vpsllq $63, %xmm1, %xmm1 +; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3 +; CHECK-NEXT: vpcmpgtq %xmm1, %xmm3, %xmm1 +; CHECK-NEXT: vmovaps %xmm2, -{{[0-9]+}}(%rsp) +; CHECK-NEXT: vpextrq $1, %xmm1, %rax +; CHECK-NEXT: vmovq %xmm1, %rcx +; CHECK-NEXT: movl %ecx, %edx +; CHECK-NEXT: subl %eax, %edx +; CHECK-NEXT: andl $1, %edx +; CHECK-NEXT: andl $1, %eax +; CHECK-NEXT: andl $1, %ecx +; CHECK-NEXT: addq %rcx, %rax +; CHECK-NEXT: vpextrq $1, %xmm0, %rsi +; CHECK-NEXT: cmpq $2, %rax +; CHECK-NEXT: cmovbq -24(%rsp,%rdx,8), %rsi +; CHECK-NEXT: vmovq %xmm0, -{{[0-9]+}}(%rsp) +; CHECK-NEXT: movl %ecx, %ecx +; CHECK-NEXT: vpextrq $1, %xmm0, -24(%rsp,%rcx,8) +; CHECK-NEXT: cmpq $1, %rax +; CHECK-NEXT: movl $1, %ecx +; CHECK-NEXT: cmovbq %rax, %rcx +; CHECK-NEXT: movq %rsi, -24(%rsp,%rcx,8) +; CHECK-NEXT: vmovaps -{{[0-9]+}}(%rsp), %xmm0 +; CHECK-NEXT: retq + %out = call <2 x i64> @llvm.experimental.vector.compress(<2 x i64> %vec, <2 x i1> %mask, <2 x i64> %passthru) + ret <2 x i64> %out +} + +define <2 x double> @test_compress_v2f64(<2 x double> %vec, <2 x i1> %mask, <2 x double> %passthru) { +; CHECK-LABEL: test_compress_v2f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vpsllq $63, %xmm1, %xmm1 +; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3 +; CHECK-NEXT: vpcmpgtq %xmm1, %xmm3, %xmm1 +; CHECK-NEXT: vmovaps %xmm2, -{{[0-9]+}}(%rsp) +; CHECK-NEXT: vpextrq $1, %xmm1, %rax +; CHECK-NEXT: vmovq %xmm1, %rcx +; CHECK-NEXT: movl %ecx, %edx +; CHECK-NEXT: subl %eax, %edx +; CHECK-NEXT: andl $1, %edx +; CHECK-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero +; CHECK-NEXT: vmovlpd %xmm0, -{{[0-9]+}}(%rsp) +; CHECK-NEXT: andl $1, %ecx +; CHECK-NEXT: movl %ecx, %edx +; CHECK-NEXT: vmovhpd %xmm0, -24(%rsp,%rdx,8) +; CHECK-NEXT: andl $1, %eax +; CHECK-NEXT: addq %rcx, %rax +; CHECK-NEXT: cmpq $2, %rax +; CHECK-NEXT: jb .LBB3_2 +; CHECK-NEXT: # %bb.1: +; CHECK-NEXT: vshufpd {{.*#+}} xmm1 = xmm0[1,0] +; CHECK-NEXT: .LBB3_2: +; CHECK-NEXT: cmpq $1, %rax +; CHECK-NEXT: movl $1, %ecx +; CHECK-NEXT: cmovbq %rax, %rcx +; CHECK-NEXT: vmovsd %xmm1, -24(%rsp,%rcx,8) +; CHECK-NEXT: vmovaps -{{[0-9]+}}(%rsp), %xmm0 +; CHECK-NEXT: retq + %out = call <2 x double> @llvm.experimental.vector.compress(<2 x double> %vec, <2 x i1> %mask, <2 x double> %passthru) + ret <2 x double> %out +} + +define <8 x i32> @test_compress_v8i32(<8 x i32> %vec, <8 x i1> %mask, <8 x i32> %passthru) { +; CHECK-LABEL: test_compress_v8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: pushq %rbp +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: .cfi_offset %rbp, -16 +; CHECK-NEXT: movq %rsp, %rbp +; CHECK-NEXT: .cfi_def_cfa_register %rbp +; CHECK-NEXT: pushq %rbx +; CHECK-NEXT: andq $-32, %rsp +; CHECK-NEXT: subq $64, %rsp +; CHECK-NEXT: .cfi_offset %rbx, -24 +; CHECK-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero +; CHECK-NEXT: vpslld $31, %ymm1, %ymm1 +; CHECK-NEXT: vpsrad $31, %ymm1, %ymm3 +; CHECK-NEXT: vmovaps %ymm2, (%rsp) +; CHECK-NEXT: vextracti128 $1, %ymm3, %xmm1 +; CHECK-NEXT: vpackssdw %xmm1, %xmm3, %xmm2 +; CHECK-NEXT: vpmovzxwd {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero +; CHECK-NEXT: vpslld $31, %ymm2, %ymm2 +; CHECK-NEXT: vpsrld $31, %ymm2, %ymm2 +; CHECK-NEXT: vextracti128 $1, %ymm2, %xmm4 +; CHECK-NEXT: vpaddd %xmm4, %xmm2, %xmm2 +; CHECK-NEXT: vpextrd $1, %xmm2, %eax +; CHECK-NEXT: vmovd %xmm2, %ecx +; CHECK-NEXT: addl %eax, %ecx +; CHECK-NEXT: vpextrd $2, %xmm2, %edx +; CHECK-NEXT: vpextrd $3, %xmm2, %eax +; CHECK-NEXT: addl %edx, %eax +; CHECK-NEXT: addl %ecx, %eax +; CHECK-NEXT: andl $7, %eax +; CHECK-NEXT: vpextrd $1, %xmm3, %ecx +; CHECK-NEXT: andl $1, %ecx +; CHECK-NEXT: vmovd %xmm3, %edx +; CHECK-NEXT: andl $1, %edx +; CHECK-NEXT: addq %rdx, %rcx +; CHECK-NEXT: vpextrd $2, %xmm3, %esi +; CHECK-NEXT: andl $1, %esi +; CHECK-NEXT: addq %rcx, %rsi +; CHECK-NEXT: vpextrd $3, %xmm3, %edi +; CHECK-NEXT: andl $1, %edi +; CHECK-NEXT: addq %rsi, %rdi +; CHECK-NEXT: vmovd %xmm1, %r8d +; CHECK-NEXT: andl $1, %r8d +; CHECK-NEXT: addq %rdi, %r8 +; CHECK-NEXT: vpextrd $1, %xmm1, %r9d +; CHECK-NEXT: andl $1, %r9d +; CHECK-NEXT: addq %r8, %r9 +; CHECK-NEXT: vpextrd $2, %xmm1, %r10d +; CHECK-NEXT: andl $1, %r10d +; CHECK-NEXT: addq %r9, %r10 +; CHECK-NEXT: vpextrd $3, %xmm1, %r11d +; CHECK-NEXT: andl $1, %r11d +; CHECK-NEXT: addq %r10, %r11 +; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm1 +; CHECK-NEXT: vextractps $3, %xmm1, %ebx +; CHECK-NEXT: cmpq $8, %r11 +; CHECK-NEXT: cmovbl (%rsp,%rax,4), %ebx +; CHECK-NEXT: vmovss %xmm0, (%rsp) +; CHECK-NEXT: vextractps $1, %xmm0, (%rsp,%rdx,4) +; CHECK-NEXT: vextractps $2, %xmm0, (%rsp,%rcx,4) +; CHECK-NEXT: vextractps $3, %xmm0, (%rsp,%rsi,4) +; CHECK-NEXT: andl $7, %edi +; CHECK-NEXT: vmovss %xmm1, (%rsp,%rdi,4) +; CHECK-NEXT: andl $7, %r8d +; CHECK-NEXT: vextractps $1, %xmm1, (%rsp,%r8,4) +; CHECK-NEXT: andl $7, %r9d +; CHECK-NEXT: vextractps $2, %xmm1, (%rsp,%r9,4) +; CHECK-NEXT: andl $7, %r10d +; CHECK-NEXT: vextractps $3, %xmm1, (%rsp,%r10,4) +; CHECK-NEXT: cmpq $7, %r11 +; CHECK-NEXT: movl $7, %eax +; CHECK-NEXT: cmovbq %r11, %rax +; CHECK-NEXT: movl %eax, %eax +; CHECK-NEXT: movl %ebx, (%rsp,%rax,4) +; CHECK-NEXT: vmovaps (%rsp), %ymm0 +; CHECK-NEXT: leaq -8(%rbp), %rsp +; CHECK-NEXT: popq %rbx +; CHECK-NEXT: popq %rbp +; CHECK-NEXT: .cfi_def_cfa %rsp, 8 +; CHECK-NEXT: retq + %out = call <8 x i32> @llvm.experimental.vector.compress(<8 x i32> %vec, <8 x i1> %mask, <8 x i32> %passthru) + ret <8 x i32> %out +} + +define <4 x i32> @test_compress_all_const() { +; CHECK-LABEL: test_compress_all_const: +; CHECK: # %bb.0: +; CHECK-NEXT: vmovsd {{.*#+}} xmm0 = [5,9,0,0] +; CHECK-NEXT: retq + %out = call <4 x i32> @llvm.experimental.vector.compress(<4 x i32> , + <4 x i1> , + <4 x i32> undef) + ret <4 x i32> %out +} + +define <4 x i32> @test_compress_const_mask(<4 x i32> %vec) { +; CHECK-LABEL: test_compress_const_mask: +; CHECK: # %bb.0: +; CHECK-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,3,2,3] +; CHECK-NEXT: retq + %out = call <4 x i32> @llvm.experimental.vector.compress(<4 x i32> %vec, <4 x i1> , <4 x i32> undef) + ret <4 x i32> %out +} + +define <4 x i32> @test_compress_const_mask_passthrough(<4 x i32> %vec, <4 x i32> %passthru) { +; CHECK-LABEL: test_compress_const_mask_passthrough: +; CHECK: # %bb.0: +; CHECK-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,3],xmm1[2,3] +; CHECK-NEXT: retq + %out = call <4 x i32> @llvm.experimental.vector.compress(<4 x i32> %vec, <4 x i1> , <4 x i32> %passthru) + ret <4 x i32> %out +} + +define <4 x i32> @test_compress_const_mask_const_passthrough(<4 x i32> %vec) { +; CHECK-LABEL: test_compress_const_mask_const_passthrough: +; CHECK: # %bb.0: +; CHECK-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,3,2,3] +; CHECK-NEXT: movl $7, %eax +; CHECK-NEXT: vpinsrd $2, %eax, %xmm0, %xmm0 +; CHECK-NEXT: movl $8, %eax +; CHECK-NEXT: vpinsrd $3, %eax, %xmm0, %xmm0 +; CHECK-NEXT: retq + %out = call <4 x i32> @llvm.experimental.vector.compress(<4 x i32> %vec, <4 x i1> , <4 x i32> ) + ret <4 x i32> %out +} diff --git a/llvm/test/CodeGen/X86/vector-compress-avx512.ll b/llvm/test/CodeGen/X86/vector-compress-avx512.ll new file mode 100644 index 0000000000000..670d5bc12aabd --- /dev/null +++ b/llvm/test/CodeGen/X86/vector-compress-avx512.ll @@ -0,0 +1,455 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 +; RUN: llc -O3 -mtriple=x86_64 -mattr=+avx512f,+avx512vl,+avx512vbmi2 < %s | FileCheck %s + +define <4 x i32> @test_compress_v4i32(<4 x i32> %vec, <4 x i1> %mask, <4 x i32> %passthru) { +; CHECK-LABEL: test_compress_v4i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vpslld $31, %xmm1, %xmm1 +; CHECK-NEXT: vptestmd %xmm1, %xmm1, %k1 +; CHECK-NEXT: vpcompressd %xmm0, %xmm2 {%k1} +; CHECK-NEXT: vmovdqa %xmm2, %xmm0 +; CHECK-NEXT: retq + %out = call <4 x i32> @llvm.experimental.vector.compress(<4 x i32> %vec, <4 x i1> %mask, <4 x i32> %passthru) + ret <4 x i32> %out +} + +define <4 x float> @test_compress_v4f32(<4 x float> %vec, <4 x i1> %mask, <4 x float> %passthru) { +; CHECK-LABEL: test_compress_v4f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vpslld $31, %xmm1, %xmm1 +; CHECK-NEXT: vptestmd %xmm1, %xmm1, %k1 +; CHECK-NEXT: vcompressps %xmm0, %xmm2 {%k1} +; CHECK-NEXT: vmovdqa %xmm2, %xmm0 +; CHECK-NEXT: retq + %out = call <4 x float> @llvm.experimental.vector.compress(<4 x float> %vec, <4 x i1> %mask, <4 x float> %passthru) + ret <4 x float> %out +} + +define <2 x i64> @test_compress_v2i64(<2 x i64> %vec, <2 x i1> %mask, <2 x i64> %passthru) { +; CHECK-LABEL: test_compress_v2i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vpsllq $63, %xmm1, %xmm1 +; CHECK-NEXT: vptestmq %xmm1, %xmm1, %k1 +; CHECK-NEXT: vpcompressq %xmm0, %xmm2 {%k1} +; CHECK-NEXT: vmovdqa %xmm2, %xmm0 +; CHECK-NEXT: retq + %out = call <2 x i64> @llvm.experimental.vector.compress(<2 x i64> %vec, <2 x i1> %mask, <2 x i64> %passthru) + ret <2 x i64> %out +} + +define <2 x double> @test_compress_v2f64(<2 x double> %vec, <2 x i1> %mask, <2 x double> %passthru) { +; CHECK-LABEL: test_compress_v2f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vpsllq $63, %xmm1, %xmm1 +; CHECK-NEXT: vptestmq %xmm1, %xmm1, %k1 +; CHECK-NEXT: vcompresspd %xmm0, %xmm2 {%k1} +; CHECK-NEXT: vmovdqa %xmm2, %xmm0 +; CHECK-NEXT: retq + %out = call <2 x double> @llvm.experimental.vector.compress(<2 x double> %vec, <2 x i1> %mask, <2 x double> %passthru) + ret <2 x double> %out +} + +define <8 x i32> @test_compress_v8i32(<8 x i32> %vec, <8 x i1> %mask, <8 x i32> %passthru) { +; CHECK-LABEL: test_compress_v8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vpsllw $15, %xmm1, %xmm1 +; CHECK-NEXT: vpmovw2m %xmm1, %k1 +; CHECK-NEXT: vpcompressd %ymm0, %ymm2 {%k1} +; CHECK-NEXT: vmovdqa %ymm2, %ymm0 +; CHECK-NEXT: retq + %out = call <8 x i32> @llvm.experimental.vector.compress(<8 x i32> %vec, <8 x i1> %mask, <8 x i32> %passthru) + ret <8 x i32> %out +} + +define <8 x float> @test_compress_v8f32(<8 x float> %vec, <8 x i1> %mask, <8 x float> %passthru) { +; CHECK-LABEL: test_compress_v8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vpsllw $15, %xmm1, %xmm1 +; CHECK-NEXT: vpmovw2m %xmm1, %k1 +; CHECK-NEXT: vcompressps %ymm0, %ymm2 {%k1} +; CHECK-NEXT: vmovdqa %ymm2, %ymm0 +; CHECK-NEXT: retq + %out = call <8 x float> @llvm.experimental.vector.compress(<8 x float> %vec, <8 x i1> %mask, <8 x float> %passthru) + ret <8 x float> %out +} + +define <4 x i64> @test_compress_v4i64(<4 x i64> %vec, <4 x i1> %mask, <4 x i64> %passthru) { +; CHECK-LABEL: test_compress_v4i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vpslld $31, %xmm1, %xmm1 +; CHECK-NEXT: vptestmd %xmm1, %xmm1, %k1 +; CHECK-NEXT: vpcompressq %ymm0, %ymm2 {%k1} +; CHECK-NEXT: vmovdqa %ymm2, %ymm0 +; CHECK-NEXT: retq + %out = call <4 x i64> @llvm.experimental.vector.compress(<4 x i64> %vec, <4 x i1> %mask, <4 x i64> %passthru) + ret <4 x i64> %out +} + +define <4 x double> @test_compress_v4f64(<4 x double> %vec, <4 x i1> %mask, <4 x double> %passthru) { +; CHECK-LABEL: test_compress_v4f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vpslld $31, %xmm1, %xmm1 +; CHECK-NEXT: vptestmd %xmm1, %xmm1, %k1 +; CHECK-NEXT: vcompresspd %ymm0, %ymm2 {%k1} +; CHECK-NEXT: vmovdqa %ymm2, %ymm0 +; CHECK-NEXT: retq + %out = call <4 x double> @llvm.experimental.vector.compress(<4 x double> %vec, <4 x i1> %mask, <4 x double> %passthru) + ret <4 x double> %out +} + +define <16 x i32> @test_compress_v16i32(<16 x i32> %vec, <16 x i1> %mask, <16 x i32> %passthru) { +; CHECK-LABEL: test_compress_v16i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vpsllw $7, %xmm1, %xmm1 +; CHECK-NEXT: vpmovb2m %xmm1, %k1 +; CHECK-NEXT: vpcompressd %zmm0, %zmm2 {%k1} +; CHECK-NEXT: vmovdqa64 %zmm2, %zmm0 +; CHECK-NEXT: retq + %out = call <16 x i32> @llvm.experimental.vector.compress(<16 x i32> %vec, <16 x i1> %mask, <16 x i32> %passthru) + ret <16 x i32> %out +} + +define <16 x float> @test_compress_v16f32(<16 x float> %vec, <16 x i1> %mask, <16 x float> %passthru) { +; CHECK-LABEL: test_compress_v16f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vpsllw $7, %xmm1, %xmm1 +; CHECK-NEXT: vpmovb2m %xmm1, %k1 +; CHECK-NEXT: vcompressps %zmm0, %zmm2 {%k1} +; CHECK-NEXT: vmovdqa64 %zmm2, %zmm0 +; CHECK-NEXT: retq + %out = call <16 x float> @llvm.experimental.vector.compress(<16 x float> %vec, <16 x i1> %mask, <16 x float> %passthru) + ret <16 x float> %out +} + +define <8 x i64> @test_compress_v8i64(<8 x i64> %vec, <8 x i1> %mask, <8 x i64> %passthru) { +; CHECK-LABEL: test_compress_v8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vpsllw $15, %xmm1, %xmm1 +; CHECK-NEXT: vpmovw2m %xmm1, %k1 +; CHECK-NEXT: vpcompressq %zmm0, %zmm2 {%k1} +; CHECK-NEXT: vmovdqa64 %zmm2, %zmm0 +; CHECK-NEXT: retq + %out = call <8 x i64> @llvm.experimental.vector.compress(<8 x i64> %vec, <8 x i1> %mask, <8 x i64> %passthru) + ret <8 x i64> %out +} + +define <8 x double> @test_compress_v8f64(<8 x double> %vec, <8 x i1> %mask, <8 x double> %passthru) { +; CHECK-LABEL: test_compress_v8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vpsllw $15, %xmm1, %xmm1 +; CHECK-NEXT: vpmovw2m %xmm1, %k1 +; CHECK-NEXT: vcompresspd %zmm0, %zmm2 {%k1} +; CHECK-NEXT: vmovdqa64 %zmm2, %zmm0 +; CHECK-NEXT: retq + %out = call <8 x double> @llvm.experimental.vector.compress(<8 x double> %vec, <8 x i1> %mask, <8 x double> %passthru) + ret <8 x double> %out +} + +define <16 x i8> @test_compress_v16i8(<16 x i8> %vec, <16 x i1> %mask, <16 x i8> %passthru) { +; CHECK-LABEL: test_compress_v16i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vpsllw $7, %xmm1, %xmm1 +; CHECK-NEXT: vpmovb2m %xmm1, %k1 +; CHECK-NEXT: vpcompressb %xmm0, %xmm2 {%k1} +; CHECK-NEXT: vmovdqa %xmm2, %xmm0 +; CHECK-NEXT: retq + %out = call <16 x i8> @llvm.experimental.vector.compress(<16 x i8> %vec, <16 x i1> %mask, <16 x i8> %passthru) + ret <16 x i8> %out +} + +define <8 x i16> @test_compress_v8i16(<8 x i16> %vec, <8 x i1> %mask, <8 x i16> %passthru) { +; CHECK-LABEL: test_compress_v8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vpsllw $15, %xmm1, %xmm1 +; CHECK-NEXT: vpmovw2m %xmm1, %k1 +; CHECK-NEXT: vpcompressw %xmm0, %xmm2 {%k1} +; CHECK-NEXT: vmovdqa %xmm2, %xmm0 +; CHECK-NEXT: retq + %out = call <8 x i16> @llvm.experimental.vector.compress(<8 x i16> %vec, <8 x i1> %mask, <8 x i16> %passthru) + ret <8 x i16> %out +} + +define <32 x i8> @test_compress_v32i8(<32 x i8> %vec, <32 x i1> %mask, <32 x i8> %passthru) { +; CHECK-LABEL: test_compress_v32i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vpsllw $7, %ymm1, %ymm1 +; CHECK-NEXT: vpmovb2m %ymm1, %k1 +; CHECK-NEXT: vpcompressb %ymm0, %ymm2 {%k1} +; CHECK-NEXT: vmovdqa %ymm2, %ymm0 +; CHECK-NEXT: retq + %out = call <32 x i8> @llvm.experimental.vector.compress(<32 x i8> %vec, <32 x i1> %mask, <32 x i8> %passthru) + ret <32 x i8> %out +} + +define <16 x i16> @test_compress_v16i16(<16 x i16> %vec, <16 x i1> %mask, <16 x i16> %passthru) { +; CHECK-LABEL: test_compress_v16i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vpsllw $7, %xmm1, %xmm1 +; CHECK-NEXT: vpmovb2m %xmm1, %k1 +; CHECK-NEXT: vpcompressw %ymm0, %ymm2 {%k1} +; CHECK-NEXT: vmovdqa %ymm2, %ymm0 +; CHECK-NEXT: retq + %out = call <16 x i16> @llvm.experimental.vector.compress(<16 x i16> %vec, <16 x i1> %mask, <16 x i16> %passthru) + ret <16 x i16> %out +} + +define <64 x i8> @test_compress_v64i8(<64 x i8> %vec, <64 x i1> %mask, <64 x i8> %passthru) { +; CHECK-LABEL: test_compress_v64i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vpsllw $7, %zmm1, %zmm1 +; CHECK-NEXT: vpmovb2m %zmm1, %k1 +; CHECK-NEXT: vpcompressb %zmm0, %zmm2 {%k1} +; CHECK-NEXT: vmovdqa64 %zmm2, %zmm0 +; CHECK-NEXT: retq + %out = call <64 x i8> @llvm.experimental.vector.compress(<64 x i8> %vec, <64 x i1> %mask, <64 x i8> %passthru) + ret <64 x i8> %out +} + +define <32 x i16> @test_compress_v32i16(<32 x i16> %vec, <32 x i1> %mask, <32 x i16> %passthru) { +; CHECK-LABEL: test_compress_v32i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vpsllw $7, %ymm1, %ymm1 +; CHECK-NEXT: vpmovb2m %ymm1, %k1 +; CHECK-NEXT: vpcompressw %zmm0, %zmm2 {%k1} +; CHECK-NEXT: vmovdqa64 %zmm2, %zmm0 +; CHECK-NEXT: retq + %out = call <32 x i16> @llvm.experimental.vector.compress(<32 x i16> %vec, <32 x i1> %mask, <32 x i16> %passthru) + ret <32 x i16> %out +} + +define <64 x i32> @test_compress_large(<64 x i1> %mask, <64 x i32> %vec, <64 x i32> %passthru) { +; CHECK-LABEL: test_compress_large: +; CHECK: # %bb.0: +; CHECK-NEXT: pushq %rbp +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: .cfi_offset %rbp, -16 +; CHECK-NEXT: movq %rsp, %rbp +; CHECK-NEXT: .cfi_def_cfa_register %rbp +; CHECK-NEXT: andq $-64, %rsp +; CHECK-NEXT: subq $576, %rsp # imm = 0x240 +; CHECK-NEXT: vpsllw $7, %zmm0, %zmm0 +; CHECK-NEXT: vpmovb2m %zmm0, %k1 +; CHECK-NEXT: kshiftrq $32, %k1, %k4 +; CHECK-NEXT: kshiftrd $16, %k4, %k3 +; CHECK-NEXT: kshiftrd $16, %k1, %k2 +; CHECK-NEXT: vpcompressd %zmm1, %zmm0 {%k1} {z} +; CHECK-NEXT: vmovdqa64 %zmm0, (%rsp) +; CHECK-NEXT: kshiftrw $8, %k1, %k0 +; CHECK-NEXT: kxorw %k0, %k1, %k0 +; CHECK-NEXT: kshiftrw $4, %k0, %k5 +; CHECK-NEXT: kxorw %k5, %k0, %k0 +; CHECK-NEXT: kshiftrw $2, %k0, %k5 +; CHECK-NEXT: kxorw %k5, %k0, %k0 +; CHECK-NEXT: kshiftrw $1, %k0, %k5 +; CHECK-NEXT: kxorw %k5, %k0, %k0 +; CHECK-NEXT: kmovd %k0, %eax +; CHECK-NEXT: andl $31, %eax +; CHECK-NEXT: vpcompressd %zmm2, %zmm0 {%k2} {z} +; CHECK-NEXT: vmovdqa64 %zmm0, (%rsp,%rax,4) +; CHECK-NEXT: vpcompressd %zmm3, %zmm0 {%k4} {z} +; CHECK-NEXT: vmovdqa64 %zmm0, {{[0-9]+}}(%rsp) +; CHECK-NEXT: kshiftrw $8, %k4, %k0 +; CHECK-NEXT: kxorw %k0, %k4, %k0 +; CHECK-NEXT: kshiftrw $4, %k0, %k4 +; CHECK-NEXT: kxorw %k4, %k0, %k0 +; CHECK-NEXT: kshiftrw $2, %k0, %k4 +; CHECK-NEXT: kxorw %k4, %k0, %k0 +; CHECK-NEXT: kshiftrw $1, %k0, %k4 +; CHECK-NEXT: kxorw %k4, %k0, %k0 +; CHECK-NEXT: kmovd %k0, %eax +; CHECK-NEXT: andl $31, %eax +; CHECK-NEXT: vpcompressd %zmm4, %zmm0 {%k3} {z} +; CHECK-NEXT: vmovdqa64 %zmm0, 128(%rsp,%rax,4) +; CHECK-NEXT: vmovaps (%rsp), %zmm0 +; CHECK-NEXT: vmovaps {{[0-9]+}}(%rsp), %zmm1 +; CHECK-NEXT: vmovaps %zmm0, {{[0-9]+}}(%rsp) +; CHECK-NEXT: kxorw %k2, %k1, %k0 +; CHECK-NEXT: kshiftrw $8, %k0, %k1 +; CHECK-NEXT: kxorw %k1, %k0, %k0 +; CHECK-NEXT: kshiftrw $4, %k0, %k1 +; CHECK-NEXT: kxorw %k1, %k0, %k0 +; CHECK-NEXT: kshiftrw $2, %k0, %k1 +; CHECK-NEXT: kxorw %k1, %k0, %k0 +; CHECK-NEXT: kshiftrw $1, %k0, %k1 +; CHECK-NEXT: kxorw %k1, %k0, %k0 +; CHECK-NEXT: kmovd %k0, %eax +; CHECK-NEXT: andl $63, %eax +; CHECK-NEXT: vmovaps {{[0-9]+}}(%rsp), %zmm0 +; CHECK-NEXT: vmovaps {{[0-9]+}}(%rsp), %zmm2 +; CHECK-NEXT: vmovaps %zmm0, 256(%rsp,%rax,4) +; CHECK-NEXT: vmovaps %zmm1, {{[0-9]+}}(%rsp) +; CHECK-NEXT: vmovaps %zmm2, 320(%rsp,%rax,4) +; CHECK-NEXT: vmovaps {{[0-9]+}}(%rsp), %zmm0 +; CHECK-NEXT: vmovaps {{[0-9]+}}(%rsp), %zmm1 +; CHECK-NEXT: vmovaps {{[0-9]+}}(%rsp), %zmm2 +; CHECK-NEXT: vmovaps {{[0-9]+}}(%rsp), %zmm3 +; CHECK-NEXT: movq %rbp, %rsp +; CHECK-NEXT: popq %rbp +; CHECK-NEXT: .cfi_def_cfa %rsp, 8 +; CHECK-NEXT: retq + %out = call <64 x i32> @llvm.experimental.vector.compress(<64 x i32> %vec, <64 x i1> %mask, <64 x i32> undef) + ret <64 x i32> %out +} + +define <4 x i32> @test_compress_all_const() { +; CHECK-LABEL: test_compress_all_const: +; CHECK: # %bb.0: +; CHECK-NEXT: vmovsd {{.*#+}} xmm0 = [5,9,0,0] +; CHECK-NEXT: retq + %out = call <4 x i32> @llvm.experimental.vector.compress(<4 x i32> , + <4 x i1> , + <4 x i32> undef) + ret <4 x i32> %out +} + +define <4 x i32> @test_compress_const_mask(<4 x i32> %vec) { +; CHECK-LABEL: test_compress_const_mask: +; CHECK: # %bb.0: +; CHECK-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,3,2,3] +; CHECK-NEXT: retq + %out = call <4 x i32> @llvm.experimental.vector.compress(<4 x i32> %vec, <4 x i1> , <4 x i32> undef) + ret <4 x i32> %out +} + +define <4 x i32> @test_compress_const_mask_passthrough(<4 x i32> %vec, <4 x i32> %passthru) { +; CHECK-LABEL: test_compress_const_mask_passthrough: +; CHECK: # %bb.0: +; CHECK-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,3],xmm1[2,3] +; CHECK-NEXT: retq + %out = call <4 x i32> @llvm.experimental.vector.compress(<4 x i32> %vec, <4 x i1> , <4 x i32> %passthru) + ret <4 x i32> %out +} + +define <4 x i32> @test_compress_const_mask_const_passthrough(<4 x i32> %vec) { +; CHECK-LABEL: test_compress_const_mask_const_passthrough: +; CHECK: # %bb.0: +; CHECK-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,3,2,3] +; CHECK-NEXT: movl $7, %eax +; CHECK-NEXT: vpinsrd $2, %eax, %xmm0, %xmm0 +; CHECK-NEXT: movl $8, %eax +; CHECK-NEXT: vpinsrd $3, %eax, %xmm0, %xmm0 +; CHECK-NEXT: retq + %out = call <4 x i32> @llvm.experimental.vector.compress(<4 x i32> %vec, <4 x i1> , <4 x i32> ) + ret <4 x i32> %out +} + +; We pass a placeholder value for the const_mask* tests to check that they are converted to a no-op by simply copying +; the second vector input register to the return register or doing nothing. +define <4 x i32> @test_compress_const_splat1_mask(<4 x i32> %ignore, <4 x i32> %vec) { +; CHECK-LABEL: test_compress_const_splat1_mask: +; CHECK: # %bb.0: +; CHECK-NEXT: vmovaps %xmm1, %xmm0 +; CHECK-NEXT: retq + %out = call <4 x i32> @llvm.experimental.vector.compress(<4 x i32> %vec, <4 x i1> splat (i1 -1), <4 x i32> undef) + ret <4 x i32> %out +} +define <4 x i32> @test_compress_const_splat0_mask(<4 x i32> %ignore, <4 x i32> %vec) { +; CHECK-LABEL: test_compress_const_splat0_mask: +; CHECK: # %bb.0: +; CHECK-NEXT: retq + %out = call <4 x i32> @llvm.experimental.vector.compress(<4 x i32> %vec, <4 x i1> splat (i1 0), <4 x i32> undef) + ret <4 x i32> %out +} +define <4 x i32> @test_compress_undef_mask(<4 x i32> %ignore, <4 x i32> %vec) { +; CHECK-LABEL: test_compress_undef_mask: +; CHECK: # %bb.0: +; CHECK-NEXT: retq + %out = call <4 x i32> @llvm.experimental.vector.compress(<4 x i32> %vec, <4 x i1> undef, <4 x i32> undef) + ret <4 x i32> %out +} +define <4 x i32> @test_compress_const_splat0_mask_with_passthru(<4 x i32> %ignore, <4 x i32> %vec, <4 x i32> %passthru) { +; CHECK-LABEL: test_compress_const_splat0_mask_with_passthru: +; CHECK: # %bb.0: +; CHECK-NEXT: vmovaps %xmm2, %xmm0 +; CHECK-NEXT: retq + %out = call <4 x i32> @llvm.experimental.vector.compress(<4 x i32> %vec, <4 x i1> splat (i1 0), <4 x i32> %passthru) + ret <4 x i32> %out +} +define <4 x i32> @test_compress_const_splat0_mask_without_passthru(<4 x i32> %ignore, <4 x i32> %vec) { +; CHECK-LABEL: test_compress_const_splat0_mask_without_passthru: +; CHECK: # %bb.0: +; CHECK-NEXT: retq + %out = call <4 x i32> @llvm.experimental.vector.compress(<4 x i32> %vec, <4 x i1> splat (i1 0), <4 x i32> undef) + ret <4 x i32> %out +} + +define <4 x i8> @test_compress_small(<4 x i8> %vec, <4 x i1> %mask) { +; CHECK-LABEL: test_compress_small: +; CHECK: # %bb.0: +; CHECK-NEXT: vpslld $31, %xmm1, %xmm1 +; CHECK-NEXT: vptestmd %xmm1, %xmm1, %k1 +; CHECK-NEXT: vpcompressb %xmm0, %xmm0 {%k1} {z} +; CHECK-NEXT: retq + %out = call <4 x i8> @llvm.experimental.vector.compress(<4 x i8> %vec, <4 x i1> %mask, <4 x i8> undef) + ret <4 x i8> %out +} + +define <4 x i4> @test_compress_illegal_element_type(<4 x i4> %vec, <4 x i1> %mask) { +; CHECK-LABEL: test_compress_illegal_element_type: +; CHECK: # %bb.0: +; CHECK-NEXT: vpslld $31, %xmm1, %xmm1 +; CHECK-NEXT: vptestmd %xmm1, %xmm1, %k1 +; CHECK-NEXT: vpcompressd %xmm0, %xmm0 {%k1} {z} +; CHECK-NEXT: retq + %out = call <4 x i4> @llvm.experimental.vector.compress(<4 x i4> %vec, <4 x i1> %mask, <4 x i4> undef) + ret <4 x i4> %out +} + +define <3 x i32> @test_compress_narrow(<3 x i32> %vec, <3 x i1> %mask) { +; CHECK-LABEL: test_compress_narrow: +; CHECK: # %bb.0: +; CHECK-NEXT: andl $1, %edi +; CHECK-NEXT: kmovw %edi, %k0 +; CHECK-NEXT: kmovd %esi, %k1 +; CHECK-NEXT: kshiftlw $15, %k1, %k1 +; CHECK-NEXT: kshiftrw $14, %k1, %k1 +; CHECK-NEXT: korw %k1, %k0, %k0 +; CHECK-NEXT: movw $-5, %ax +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: kandw %k1, %k0, %k0 +; CHECK-NEXT: kmovd %edx, %k1 +; CHECK-NEXT: kshiftlw $15, %k1, %k1 +; CHECK-NEXT: kshiftrw $13, %k1, %k1 +; CHECK-NEXT: korw %k1, %k0, %k0 +; CHECK-NEXT: movb $7, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: kandw %k1, %k0, %k1 +; CHECK-NEXT: vpcompressd %xmm0, %xmm0 {%k1} {z} +; CHECK-NEXT: retq + %out = call <3 x i32> @llvm.experimental.vector.compress(<3 x i32> %vec, <3 x i1> %mask, <3 x i32> undef) + ret <3 x i32> %out +} + +define <3 x i3> @test_compress_narrow_illegal_element_type(<3 x i3> %vec, <3 x i1> %mask) { +; CHECK-LABEL: test_compress_narrow_illegal_element_type: +; CHECK: # %bb.0: +; CHECK-NEXT: andl $1, %ecx +; CHECK-NEXT: kmovw %ecx, %k0 +; CHECK-NEXT: kmovd %r8d, %k1 +; CHECK-NEXT: kshiftlw $15, %k1, %k1 +; CHECK-NEXT: kshiftrw $14, %k1, %k1 +; CHECK-NEXT: korw %k1, %k0, %k0 +; CHECK-NEXT: movw $-5, %ax +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: kandw %k1, %k0, %k0 +; CHECK-NEXT: kmovd %r9d, %k1 +; CHECK-NEXT: kshiftlw $15, %k1, %k1 +; CHECK-NEXT: kshiftrw $13, %k1, %k1 +; CHECK-NEXT: korw %k1, %k0, %k0 +; CHECK-NEXT: movb $7, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: kandw %k1, %k0, %k1 +; CHECK-NEXT: vmovd %edi, %xmm0 +; CHECK-NEXT: vpinsrd $1, %esi, %xmm0, %xmm0 +; CHECK-NEXT: vpinsrd $2, %edx, %xmm0, %xmm0 +; CHECK-NEXT: vpcompressd %xmm0, %xmm0 {%k1} {z} +; CHECK-NEXT: vmovd %xmm0, %eax +; CHECK-NEXT: vpextrb $4, %xmm0, %edx +; CHECK-NEXT: vpextrb $8, %xmm0, %ecx +; CHECK-NEXT: # kill: def $al killed $al killed $eax +; CHECK-NEXT: # kill: def $dl killed $dl killed $edx +; CHECK-NEXT: # kill: def $cl killed $cl killed $ecx +; CHECK-NEXT: retq + %out = call <3 x i3> @llvm.experimental.vector.compress(<3 x i3> %vec, <3 x i1> %mask, <3 x i3> undef) + ret <3 x i3> %out +} From 087e5111f2c3d902e0ecbc8a9a8ec84678e23098 Mon Sep 17 00:00:00 2001 From: Lawrence Benson Date: Tue, 20 Aug 2024 13:38:24 +0200 Subject: [PATCH 3/9] Fix formatting --- llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp index cebbc80974d6f..8252c0338252c 100644 --- a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp @@ -11585,8 +11585,9 @@ SDValue TargetLowering::expandVECTOR_COMPRESS(SDNode *Node, EVT PopcountVT = ScalarVT.changeTypeToInteger(); SDValue Popcount = DAG.getNode( ISD::TRUNCATE, DL, MaskVT.changeVectorElementType(MVT::i1), Mask); - Popcount = DAG.getNode(ISD::ZERO_EXTEND, DL, - MaskVT.changeVectorElementType(PopcountVT), Popcount); + Popcount = + DAG.getNode(ISD::ZERO_EXTEND, DL, + MaskVT.changeVectorElementType(PopcountVT), Popcount); Popcount = DAG.getNode(ISD::VECREDUCE_ADD, DL, PopcountVT, Popcount); SDValue LastElmtPtr = getVectorElementPointer(DAG, StackPtr, VecVT, Popcount); @@ -11628,8 +11629,8 @@ SDValue TargetLowering::expandVECTOR_COMPRESS(SDNode *Node, // overwrite the last write it with the passthru value. SDNodeFlags Flags{}; Flags.setUnpredictable(true); - LastWriteVal = - DAG.getSelect(DL, ScalarVT, AllLanesSelected, ValI, LastWriteVal, Flags); + LastWriteVal = DAG.getSelect(DL, ScalarVT, AllLanesSelected, ValI, + LastWriteVal, Flags); Chain = DAG.getStore( Chain, DL, LastWriteVal, OutPtr, MachinePointerInfo::getUnknownStack(DAG.getMachineFunction())); From ef0beacce31f2b5e550ac10ec74653393dd5d568 Mon Sep 17 00:00:00 2001 From: Lawrence Benson Date: Tue, 20 Aug 2024 14:15:25 +0200 Subject: [PATCH 4/9] Add VLX check --- llvm/lib/Target/X86/X86ISelLowering.cpp | 21 ++++++++++++++++----- 1 file changed, 16 insertions(+), 5 deletions(-) diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp index fa7c7b2789b7d..fcdf12c034d52 100644 --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -2321,15 +2321,26 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM, } } + // vpcompress depends on various AVX512 extensions. if (Subtarget.hasAVX512()) { - for (MVT VT : {MVT::v4i32, MVT::v4f32, MVT::v2i64, MVT::v2f64, MVT::v8i32, - MVT::v8f32, MVT::v4i64, MVT::v4f64, MVT::v16i32, MVT::v16f32, - MVT::v8i64, MVT::v8f64}) + // Legal in AVX512F + for (MVT VT : {MVT::v16i32, MVT::v16f32, MVT::v8i64, MVT::v8f64}) setOperationAction(ISD::VECTOR_COMPRESS, VT, Legal); + // Legal in AVX512F + AVX512VL + if (Subtarget.hasVLX()) + for (MVT VT : {MVT::v8i32, MVT::v8f32, MVT::v4i32, MVT::v4f32, MVT::v4i64, + MVT::v4f64, MVT::v2i64, MVT::v2f64}) + setOperationAction(ISD::VECTOR_COMPRESS, VT, Legal); + + // Legal in AVX512F + AVX512VBMI2 if (Subtarget.hasVBMI2()) - for (MVT VT : {MVT::v16i8, MVT::v8i16, MVT::v32i8, MVT::v16i16, - MVT::v64i8, MVT::v32i16}) + for (MVT VT : {MVT::v32i16, MVT::v64i8}) + setOperationAction(ISD::VECTOR_COMPRESS, VT, Legal); + + // Legal in AVX512F + AVX512VL + AVX512VBMI2 + if (Subtarget.hasVBMI2() && Subtarget.hasVLX()) + for (MVT VT : {MVT::v16i8, MVT::v8i16, MVT::v32i8, MVT::v16i16}) setOperationAction(ISD::VECTOR_COMPRESS, VT, Legal); } From e9b8021ced831c9f46c513876c14a47b7e95e575 Mon Sep 17 00:00:00 2001 From: Lawrence Benson Date: Wed, 21 Aug 2024 11:15:22 +0200 Subject: [PATCH 5/9] Merge test files --- llvm/test/CodeGen/X86/vector-compress-avx2.ll | 281 ------- .../CodeGen/X86/vector-compress-avx512.ll | 455 ------------ llvm/test/CodeGen/X86/vector-compress.ll | 689 ++++++++++++++++++ 3 files changed, 689 insertions(+), 736 deletions(-) delete mode 100644 llvm/test/CodeGen/X86/vector-compress-avx2.ll delete mode 100644 llvm/test/CodeGen/X86/vector-compress-avx512.ll create mode 100644 llvm/test/CodeGen/X86/vector-compress.ll diff --git a/llvm/test/CodeGen/X86/vector-compress-avx2.ll b/llvm/test/CodeGen/X86/vector-compress-avx2.ll deleted file mode 100644 index 59db0fb795386..0000000000000 --- a/llvm/test/CodeGen/X86/vector-compress-avx2.ll +++ /dev/null @@ -1,281 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 -; RUN: llc -mtriple=x86_64 -mattr=avx2 < %s | FileCheck %s - -; The main logic for vpcompress is tested in the -avx512.ll version of this file. -; This file only checks the fallback expand path. - -define <4 x i32> @test_compress_v4i32(<4 x i32> %vec, <4 x i1> %mask, <4 x i32> %passthru) { -; CHECK-LABEL: test_compress_v4i32: -; CHECK: # %bb.0: -; CHECK-NEXT: vpslld $31, %xmm1, %xmm1 -; CHECK-NEXT: vpsrad $31, %xmm1, %xmm1 -; CHECK-NEXT: vmovaps %xmm2, -{{[0-9]+}}(%rsp) -; CHECK-NEXT: vpextrd $1, %xmm1, %eax -; CHECK-NEXT: vmovd %xmm1, %esi -; CHECK-NEXT: andl $1, %esi -; CHECK-NEXT: movl %esi, %edi -; CHECK-NEXT: subl %eax, %edi -; CHECK-NEXT: vpextrd $2, %xmm1, %edx -; CHECK-NEXT: subl %edx, %edi -; CHECK-NEXT: vpextrd $3, %xmm1, %ecx -; CHECK-NEXT: subl %ecx, %edi -; CHECK-NEXT: andl $3, %edi -; CHECK-NEXT: andl $1, %eax -; CHECK-NEXT: addq %rsi, %rax -; CHECK-NEXT: andl $1, %edx -; CHECK-NEXT: addq %rax, %rdx -; CHECK-NEXT: andl $1, %ecx -; CHECK-NEXT: addq %rdx, %rcx -; CHECK-NEXT: vextractps $3, %xmm0, %r8d -; CHECK-NEXT: cmpq $4, %rcx -; CHECK-NEXT: cmovbl -24(%rsp,%rdi,4), %r8d -; CHECK-NEXT: vmovss %xmm0, -{{[0-9]+}}(%rsp) -; CHECK-NEXT: vextractps $1, %xmm0, -24(%rsp,%rsi,4) -; CHECK-NEXT: vextractps $2, %xmm0, -24(%rsp,%rax,4) -; CHECK-NEXT: andl $3, %edx -; CHECK-NEXT: vextractps $3, %xmm0, -24(%rsp,%rdx,4) -; CHECK-NEXT: cmpq $3, %rcx -; CHECK-NEXT: movl $3, %eax -; CHECK-NEXT: cmovbq %rcx, %rax -; CHECK-NEXT: movl %r8d, -24(%rsp,%rax,4) -; CHECK-NEXT: vmovaps -{{[0-9]+}}(%rsp), %xmm0 -; CHECK-NEXT: retq - %out = call <4 x i32> @llvm.experimental.vector.compress(<4 x i32> %vec, <4 x i1> %mask, <4 x i32> %passthru) - ret <4 x i32> %out -} - -define <4 x float> @test_compress_v4f32(<4 x float> %vec, <4 x i1> %mask, <4 x float> %passthru) { -; CHECK-LABEL: test_compress_v4f32: -; CHECK: # %bb.0: -; CHECK-NEXT: vpslld $31, %xmm1, %xmm1 -; CHECK-NEXT: vpsrad $31, %xmm1, %xmm1 -; CHECK-NEXT: vmovaps %xmm2, -{{[0-9]+}}(%rsp) -; CHECK-NEXT: vpextrd $1, %xmm1, %edx -; CHECK-NEXT: vmovd %xmm1, %esi -; CHECK-NEXT: andl $1, %esi -; CHECK-NEXT: movl %esi, %edi -; CHECK-NEXT: subl %edx, %edi -; CHECK-NEXT: vpextrd $2, %xmm1, %ecx -; CHECK-NEXT: subl %ecx, %edi -; CHECK-NEXT: vpextrd $3, %xmm1, %eax -; CHECK-NEXT: subl %eax, %edi -; CHECK-NEXT: andl $3, %edi -; CHECK-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero -; CHECK-NEXT: vmovss %xmm0, -{{[0-9]+}}(%rsp) -; CHECK-NEXT: vextractps $1, %xmm0, -24(%rsp,%rsi,4) -; CHECK-NEXT: andl $1, %edx -; CHECK-NEXT: addq %rsi, %rdx -; CHECK-NEXT: vextractps $2, %xmm0, -24(%rsp,%rdx,4) -; CHECK-NEXT: andl $1, %ecx -; CHECK-NEXT: addq %rdx, %rcx -; CHECK-NEXT: andl $1, %eax -; CHECK-NEXT: addq %rcx, %rax -; CHECK-NEXT: # kill: def $ecx killed $ecx killed $rcx def $rcx -; CHECK-NEXT: andl $3, %ecx -; CHECK-NEXT: vshufps {{.*#+}} xmm0 = xmm0[3,3,3,3] -; CHECK-NEXT: vmovss %xmm0, -24(%rsp,%rcx,4) -; CHECK-NEXT: cmpq $3, %rax -; CHECK-NEXT: movl $3, %ecx -; CHECK-NEXT: cmovbq %rax, %rcx -; CHECK-NEXT: ja .LBB1_2 -; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: vmovaps %xmm1, %xmm0 -; CHECK-NEXT: .LBB1_2: -; CHECK-NEXT: vmovss %xmm0, -24(%rsp,%rcx,4) -; CHECK-NEXT: vmovaps -{{[0-9]+}}(%rsp), %xmm0 -; CHECK-NEXT: retq - %out = call <4 x float> @llvm.experimental.vector.compress(<4 x float> %vec, <4 x i1> %mask, <4 x float> %passthru) - ret <4 x float> %out -} - -define <2 x i64> @test_compress_v2i64(<2 x i64> %vec, <2 x i1> %mask, <2 x i64> %passthru) { -; CHECK-LABEL: test_compress_v2i64: -; CHECK: # %bb.0: -; CHECK-NEXT: vpsllq $63, %xmm1, %xmm1 -; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3 -; CHECK-NEXT: vpcmpgtq %xmm1, %xmm3, %xmm1 -; CHECK-NEXT: vmovaps %xmm2, -{{[0-9]+}}(%rsp) -; CHECK-NEXT: vpextrq $1, %xmm1, %rax -; CHECK-NEXT: vmovq %xmm1, %rcx -; CHECK-NEXT: movl %ecx, %edx -; CHECK-NEXT: subl %eax, %edx -; CHECK-NEXT: andl $1, %edx -; CHECK-NEXT: andl $1, %eax -; CHECK-NEXT: andl $1, %ecx -; CHECK-NEXT: addq %rcx, %rax -; CHECK-NEXT: vpextrq $1, %xmm0, %rsi -; CHECK-NEXT: cmpq $2, %rax -; CHECK-NEXT: cmovbq -24(%rsp,%rdx,8), %rsi -; CHECK-NEXT: vmovq %xmm0, -{{[0-9]+}}(%rsp) -; CHECK-NEXT: movl %ecx, %ecx -; CHECK-NEXT: vpextrq $1, %xmm0, -24(%rsp,%rcx,8) -; CHECK-NEXT: cmpq $1, %rax -; CHECK-NEXT: movl $1, %ecx -; CHECK-NEXT: cmovbq %rax, %rcx -; CHECK-NEXT: movq %rsi, -24(%rsp,%rcx,8) -; CHECK-NEXT: vmovaps -{{[0-9]+}}(%rsp), %xmm0 -; CHECK-NEXT: retq - %out = call <2 x i64> @llvm.experimental.vector.compress(<2 x i64> %vec, <2 x i1> %mask, <2 x i64> %passthru) - ret <2 x i64> %out -} - -define <2 x double> @test_compress_v2f64(<2 x double> %vec, <2 x i1> %mask, <2 x double> %passthru) { -; CHECK-LABEL: test_compress_v2f64: -; CHECK: # %bb.0: -; CHECK-NEXT: vpsllq $63, %xmm1, %xmm1 -; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3 -; CHECK-NEXT: vpcmpgtq %xmm1, %xmm3, %xmm1 -; CHECK-NEXT: vmovaps %xmm2, -{{[0-9]+}}(%rsp) -; CHECK-NEXT: vpextrq $1, %xmm1, %rax -; CHECK-NEXT: vmovq %xmm1, %rcx -; CHECK-NEXT: movl %ecx, %edx -; CHECK-NEXT: subl %eax, %edx -; CHECK-NEXT: andl $1, %edx -; CHECK-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero -; CHECK-NEXT: vmovlpd %xmm0, -{{[0-9]+}}(%rsp) -; CHECK-NEXT: andl $1, %ecx -; CHECK-NEXT: movl %ecx, %edx -; CHECK-NEXT: vmovhpd %xmm0, -24(%rsp,%rdx,8) -; CHECK-NEXT: andl $1, %eax -; CHECK-NEXT: addq %rcx, %rax -; CHECK-NEXT: cmpq $2, %rax -; CHECK-NEXT: jb .LBB3_2 -; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: vshufpd {{.*#+}} xmm1 = xmm0[1,0] -; CHECK-NEXT: .LBB3_2: -; CHECK-NEXT: cmpq $1, %rax -; CHECK-NEXT: movl $1, %ecx -; CHECK-NEXT: cmovbq %rax, %rcx -; CHECK-NEXT: vmovsd %xmm1, -24(%rsp,%rcx,8) -; CHECK-NEXT: vmovaps -{{[0-9]+}}(%rsp), %xmm0 -; CHECK-NEXT: retq - %out = call <2 x double> @llvm.experimental.vector.compress(<2 x double> %vec, <2 x i1> %mask, <2 x double> %passthru) - ret <2 x double> %out -} - -define <8 x i32> @test_compress_v8i32(<8 x i32> %vec, <8 x i1> %mask, <8 x i32> %passthru) { -; CHECK-LABEL: test_compress_v8i32: -; CHECK: # %bb.0: -; CHECK-NEXT: pushq %rbp -; CHECK-NEXT: .cfi_def_cfa_offset 16 -; CHECK-NEXT: .cfi_offset %rbp, -16 -; CHECK-NEXT: movq %rsp, %rbp -; CHECK-NEXT: .cfi_def_cfa_register %rbp -; CHECK-NEXT: pushq %rbx -; CHECK-NEXT: andq $-32, %rsp -; CHECK-NEXT: subq $64, %rsp -; CHECK-NEXT: .cfi_offset %rbx, -24 -; CHECK-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero -; CHECK-NEXT: vpslld $31, %ymm1, %ymm1 -; CHECK-NEXT: vpsrad $31, %ymm1, %ymm3 -; CHECK-NEXT: vmovaps %ymm2, (%rsp) -; CHECK-NEXT: vextracti128 $1, %ymm3, %xmm1 -; CHECK-NEXT: vpackssdw %xmm1, %xmm3, %xmm2 -; CHECK-NEXT: vpmovzxwd {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero -; CHECK-NEXT: vpslld $31, %ymm2, %ymm2 -; CHECK-NEXT: vpsrld $31, %ymm2, %ymm2 -; CHECK-NEXT: vextracti128 $1, %ymm2, %xmm4 -; CHECK-NEXT: vpaddd %xmm4, %xmm2, %xmm2 -; CHECK-NEXT: vpextrd $1, %xmm2, %eax -; CHECK-NEXT: vmovd %xmm2, %ecx -; CHECK-NEXT: addl %eax, %ecx -; CHECK-NEXT: vpextrd $2, %xmm2, %edx -; CHECK-NEXT: vpextrd $3, %xmm2, %eax -; CHECK-NEXT: addl %edx, %eax -; CHECK-NEXT: addl %ecx, %eax -; CHECK-NEXT: andl $7, %eax -; CHECK-NEXT: vpextrd $1, %xmm3, %ecx -; CHECK-NEXT: andl $1, %ecx -; CHECK-NEXT: vmovd %xmm3, %edx -; CHECK-NEXT: andl $1, %edx -; CHECK-NEXT: addq %rdx, %rcx -; CHECK-NEXT: vpextrd $2, %xmm3, %esi -; CHECK-NEXT: andl $1, %esi -; CHECK-NEXT: addq %rcx, %rsi -; CHECK-NEXT: vpextrd $3, %xmm3, %edi -; CHECK-NEXT: andl $1, %edi -; CHECK-NEXT: addq %rsi, %rdi -; CHECK-NEXT: vmovd %xmm1, %r8d -; CHECK-NEXT: andl $1, %r8d -; CHECK-NEXT: addq %rdi, %r8 -; CHECK-NEXT: vpextrd $1, %xmm1, %r9d -; CHECK-NEXT: andl $1, %r9d -; CHECK-NEXT: addq %r8, %r9 -; CHECK-NEXT: vpextrd $2, %xmm1, %r10d -; CHECK-NEXT: andl $1, %r10d -; CHECK-NEXT: addq %r9, %r10 -; CHECK-NEXT: vpextrd $3, %xmm1, %r11d -; CHECK-NEXT: andl $1, %r11d -; CHECK-NEXT: addq %r10, %r11 -; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm1 -; CHECK-NEXT: vextractps $3, %xmm1, %ebx -; CHECK-NEXT: cmpq $8, %r11 -; CHECK-NEXT: cmovbl (%rsp,%rax,4), %ebx -; CHECK-NEXT: vmovss %xmm0, (%rsp) -; CHECK-NEXT: vextractps $1, %xmm0, (%rsp,%rdx,4) -; CHECK-NEXT: vextractps $2, %xmm0, (%rsp,%rcx,4) -; CHECK-NEXT: vextractps $3, %xmm0, (%rsp,%rsi,4) -; CHECK-NEXT: andl $7, %edi -; CHECK-NEXT: vmovss %xmm1, (%rsp,%rdi,4) -; CHECK-NEXT: andl $7, %r8d -; CHECK-NEXT: vextractps $1, %xmm1, (%rsp,%r8,4) -; CHECK-NEXT: andl $7, %r9d -; CHECK-NEXT: vextractps $2, %xmm1, (%rsp,%r9,4) -; CHECK-NEXT: andl $7, %r10d -; CHECK-NEXT: vextractps $3, %xmm1, (%rsp,%r10,4) -; CHECK-NEXT: cmpq $7, %r11 -; CHECK-NEXT: movl $7, %eax -; CHECK-NEXT: cmovbq %r11, %rax -; CHECK-NEXT: movl %eax, %eax -; CHECK-NEXT: movl %ebx, (%rsp,%rax,4) -; CHECK-NEXT: vmovaps (%rsp), %ymm0 -; CHECK-NEXT: leaq -8(%rbp), %rsp -; CHECK-NEXT: popq %rbx -; CHECK-NEXT: popq %rbp -; CHECK-NEXT: .cfi_def_cfa %rsp, 8 -; CHECK-NEXT: retq - %out = call <8 x i32> @llvm.experimental.vector.compress(<8 x i32> %vec, <8 x i1> %mask, <8 x i32> %passthru) - ret <8 x i32> %out -} - -define <4 x i32> @test_compress_all_const() { -; CHECK-LABEL: test_compress_all_const: -; CHECK: # %bb.0: -; CHECK-NEXT: vmovsd {{.*#+}} xmm0 = [5,9,0,0] -; CHECK-NEXT: retq - %out = call <4 x i32> @llvm.experimental.vector.compress(<4 x i32> , - <4 x i1> , - <4 x i32> undef) - ret <4 x i32> %out -} - -define <4 x i32> @test_compress_const_mask(<4 x i32> %vec) { -; CHECK-LABEL: test_compress_const_mask: -; CHECK: # %bb.0: -; CHECK-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,3,2,3] -; CHECK-NEXT: retq - %out = call <4 x i32> @llvm.experimental.vector.compress(<4 x i32> %vec, <4 x i1> , <4 x i32> undef) - ret <4 x i32> %out -} - -define <4 x i32> @test_compress_const_mask_passthrough(<4 x i32> %vec, <4 x i32> %passthru) { -; CHECK-LABEL: test_compress_const_mask_passthrough: -; CHECK: # %bb.0: -; CHECK-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,3],xmm1[2,3] -; CHECK-NEXT: retq - %out = call <4 x i32> @llvm.experimental.vector.compress(<4 x i32> %vec, <4 x i1> , <4 x i32> %passthru) - ret <4 x i32> %out -} - -define <4 x i32> @test_compress_const_mask_const_passthrough(<4 x i32> %vec) { -; CHECK-LABEL: test_compress_const_mask_const_passthrough: -; CHECK: # %bb.0: -; CHECK-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,3,2,3] -; CHECK-NEXT: movl $7, %eax -; CHECK-NEXT: vpinsrd $2, %eax, %xmm0, %xmm0 -; CHECK-NEXT: movl $8, %eax -; CHECK-NEXT: vpinsrd $3, %eax, %xmm0, %xmm0 -; CHECK-NEXT: retq - %out = call <4 x i32> @llvm.experimental.vector.compress(<4 x i32> %vec, <4 x i1> , <4 x i32> ) - ret <4 x i32> %out -} diff --git a/llvm/test/CodeGen/X86/vector-compress-avx512.ll b/llvm/test/CodeGen/X86/vector-compress-avx512.ll deleted file mode 100644 index 670d5bc12aabd..0000000000000 --- a/llvm/test/CodeGen/X86/vector-compress-avx512.ll +++ /dev/null @@ -1,455 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 -; RUN: llc -O3 -mtriple=x86_64 -mattr=+avx512f,+avx512vl,+avx512vbmi2 < %s | FileCheck %s - -define <4 x i32> @test_compress_v4i32(<4 x i32> %vec, <4 x i1> %mask, <4 x i32> %passthru) { -; CHECK-LABEL: test_compress_v4i32: -; CHECK: # %bb.0: -; CHECK-NEXT: vpslld $31, %xmm1, %xmm1 -; CHECK-NEXT: vptestmd %xmm1, %xmm1, %k1 -; CHECK-NEXT: vpcompressd %xmm0, %xmm2 {%k1} -; CHECK-NEXT: vmovdqa %xmm2, %xmm0 -; CHECK-NEXT: retq - %out = call <4 x i32> @llvm.experimental.vector.compress(<4 x i32> %vec, <4 x i1> %mask, <4 x i32> %passthru) - ret <4 x i32> %out -} - -define <4 x float> @test_compress_v4f32(<4 x float> %vec, <4 x i1> %mask, <4 x float> %passthru) { -; CHECK-LABEL: test_compress_v4f32: -; CHECK: # %bb.0: -; CHECK-NEXT: vpslld $31, %xmm1, %xmm1 -; CHECK-NEXT: vptestmd %xmm1, %xmm1, %k1 -; CHECK-NEXT: vcompressps %xmm0, %xmm2 {%k1} -; CHECK-NEXT: vmovdqa %xmm2, %xmm0 -; CHECK-NEXT: retq - %out = call <4 x float> @llvm.experimental.vector.compress(<4 x float> %vec, <4 x i1> %mask, <4 x float> %passthru) - ret <4 x float> %out -} - -define <2 x i64> @test_compress_v2i64(<2 x i64> %vec, <2 x i1> %mask, <2 x i64> %passthru) { -; CHECK-LABEL: test_compress_v2i64: -; CHECK: # %bb.0: -; CHECK-NEXT: vpsllq $63, %xmm1, %xmm1 -; CHECK-NEXT: vptestmq %xmm1, %xmm1, %k1 -; CHECK-NEXT: vpcompressq %xmm0, %xmm2 {%k1} -; CHECK-NEXT: vmovdqa %xmm2, %xmm0 -; CHECK-NEXT: retq - %out = call <2 x i64> @llvm.experimental.vector.compress(<2 x i64> %vec, <2 x i1> %mask, <2 x i64> %passthru) - ret <2 x i64> %out -} - -define <2 x double> @test_compress_v2f64(<2 x double> %vec, <2 x i1> %mask, <2 x double> %passthru) { -; CHECK-LABEL: test_compress_v2f64: -; CHECK: # %bb.0: -; CHECK-NEXT: vpsllq $63, %xmm1, %xmm1 -; CHECK-NEXT: vptestmq %xmm1, %xmm1, %k1 -; CHECK-NEXT: vcompresspd %xmm0, %xmm2 {%k1} -; CHECK-NEXT: vmovdqa %xmm2, %xmm0 -; CHECK-NEXT: retq - %out = call <2 x double> @llvm.experimental.vector.compress(<2 x double> %vec, <2 x i1> %mask, <2 x double> %passthru) - ret <2 x double> %out -} - -define <8 x i32> @test_compress_v8i32(<8 x i32> %vec, <8 x i1> %mask, <8 x i32> %passthru) { -; CHECK-LABEL: test_compress_v8i32: -; CHECK: # %bb.0: -; CHECK-NEXT: vpsllw $15, %xmm1, %xmm1 -; CHECK-NEXT: vpmovw2m %xmm1, %k1 -; CHECK-NEXT: vpcompressd %ymm0, %ymm2 {%k1} -; CHECK-NEXT: vmovdqa %ymm2, %ymm0 -; CHECK-NEXT: retq - %out = call <8 x i32> @llvm.experimental.vector.compress(<8 x i32> %vec, <8 x i1> %mask, <8 x i32> %passthru) - ret <8 x i32> %out -} - -define <8 x float> @test_compress_v8f32(<8 x float> %vec, <8 x i1> %mask, <8 x float> %passthru) { -; CHECK-LABEL: test_compress_v8f32: -; CHECK: # %bb.0: -; CHECK-NEXT: vpsllw $15, %xmm1, %xmm1 -; CHECK-NEXT: vpmovw2m %xmm1, %k1 -; CHECK-NEXT: vcompressps %ymm0, %ymm2 {%k1} -; CHECK-NEXT: vmovdqa %ymm2, %ymm0 -; CHECK-NEXT: retq - %out = call <8 x float> @llvm.experimental.vector.compress(<8 x float> %vec, <8 x i1> %mask, <8 x float> %passthru) - ret <8 x float> %out -} - -define <4 x i64> @test_compress_v4i64(<4 x i64> %vec, <4 x i1> %mask, <4 x i64> %passthru) { -; CHECK-LABEL: test_compress_v4i64: -; CHECK: # %bb.0: -; CHECK-NEXT: vpslld $31, %xmm1, %xmm1 -; CHECK-NEXT: vptestmd %xmm1, %xmm1, %k1 -; CHECK-NEXT: vpcompressq %ymm0, %ymm2 {%k1} -; CHECK-NEXT: vmovdqa %ymm2, %ymm0 -; CHECK-NEXT: retq - %out = call <4 x i64> @llvm.experimental.vector.compress(<4 x i64> %vec, <4 x i1> %mask, <4 x i64> %passthru) - ret <4 x i64> %out -} - -define <4 x double> @test_compress_v4f64(<4 x double> %vec, <4 x i1> %mask, <4 x double> %passthru) { -; CHECK-LABEL: test_compress_v4f64: -; CHECK: # %bb.0: -; CHECK-NEXT: vpslld $31, %xmm1, %xmm1 -; CHECK-NEXT: vptestmd %xmm1, %xmm1, %k1 -; CHECK-NEXT: vcompresspd %ymm0, %ymm2 {%k1} -; CHECK-NEXT: vmovdqa %ymm2, %ymm0 -; CHECK-NEXT: retq - %out = call <4 x double> @llvm.experimental.vector.compress(<4 x double> %vec, <4 x i1> %mask, <4 x double> %passthru) - ret <4 x double> %out -} - -define <16 x i32> @test_compress_v16i32(<16 x i32> %vec, <16 x i1> %mask, <16 x i32> %passthru) { -; CHECK-LABEL: test_compress_v16i32: -; CHECK: # %bb.0: -; CHECK-NEXT: vpsllw $7, %xmm1, %xmm1 -; CHECK-NEXT: vpmovb2m %xmm1, %k1 -; CHECK-NEXT: vpcompressd %zmm0, %zmm2 {%k1} -; CHECK-NEXT: vmovdqa64 %zmm2, %zmm0 -; CHECK-NEXT: retq - %out = call <16 x i32> @llvm.experimental.vector.compress(<16 x i32> %vec, <16 x i1> %mask, <16 x i32> %passthru) - ret <16 x i32> %out -} - -define <16 x float> @test_compress_v16f32(<16 x float> %vec, <16 x i1> %mask, <16 x float> %passthru) { -; CHECK-LABEL: test_compress_v16f32: -; CHECK: # %bb.0: -; CHECK-NEXT: vpsllw $7, %xmm1, %xmm1 -; CHECK-NEXT: vpmovb2m %xmm1, %k1 -; CHECK-NEXT: vcompressps %zmm0, %zmm2 {%k1} -; CHECK-NEXT: vmovdqa64 %zmm2, %zmm0 -; CHECK-NEXT: retq - %out = call <16 x float> @llvm.experimental.vector.compress(<16 x float> %vec, <16 x i1> %mask, <16 x float> %passthru) - ret <16 x float> %out -} - -define <8 x i64> @test_compress_v8i64(<8 x i64> %vec, <8 x i1> %mask, <8 x i64> %passthru) { -; CHECK-LABEL: test_compress_v8i64: -; CHECK: # %bb.0: -; CHECK-NEXT: vpsllw $15, %xmm1, %xmm1 -; CHECK-NEXT: vpmovw2m %xmm1, %k1 -; CHECK-NEXT: vpcompressq %zmm0, %zmm2 {%k1} -; CHECK-NEXT: vmovdqa64 %zmm2, %zmm0 -; CHECK-NEXT: retq - %out = call <8 x i64> @llvm.experimental.vector.compress(<8 x i64> %vec, <8 x i1> %mask, <8 x i64> %passthru) - ret <8 x i64> %out -} - -define <8 x double> @test_compress_v8f64(<8 x double> %vec, <8 x i1> %mask, <8 x double> %passthru) { -; CHECK-LABEL: test_compress_v8f64: -; CHECK: # %bb.0: -; CHECK-NEXT: vpsllw $15, %xmm1, %xmm1 -; CHECK-NEXT: vpmovw2m %xmm1, %k1 -; CHECK-NEXT: vcompresspd %zmm0, %zmm2 {%k1} -; CHECK-NEXT: vmovdqa64 %zmm2, %zmm0 -; CHECK-NEXT: retq - %out = call <8 x double> @llvm.experimental.vector.compress(<8 x double> %vec, <8 x i1> %mask, <8 x double> %passthru) - ret <8 x double> %out -} - -define <16 x i8> @test_compress_v16i8(<16 x i8> %vec, <16 x i1> %mask, <16 x i8> %passthru) { -; CHECK-LABEL: test_compress_v16i8: -; CHECK: # %bb.0: -; CHECK-NEXT: vpsllw $7, %xmm1, %xmm1 -; CHECK-NEXT: vpmovb2m %xmm1, %k1 -; CHECK-NEXT: vpcompressb %xmm0, %xmm2 {%k1} -; CHECK-NEXT: vmovdqa %xmm2, %xmm0 -; CHECK-NEXT: retq - %out = call <16 x i8> @llvm.experimental.vector.compress(<16 x i8> %vec, <16 x i1> %mask, <16 x i8> %passthru) - ret <16 x i8> %out -} - -define <8 x i16> @test_compress_v8i16(<8 x i16> %vec, <8 x i1> %mask, <8 x i16> %passthru) { -; CHECK-LABEL: test_compress_v8i16: -; CHECK: # %bb.0: -; CHECK-NEXT: vpsllw $15, %xmm1, %xmm1 -; CHECK-NEXT: vpmovw2m %xmm1, %k1 -; CHECK-NEXT: vpcompressw %xmm0, %xmm2 {%k1} -; CHECK-NEXT: vmovdqa %xmm2, %xmm0 -; CHECK-NEXT: retq - %out = call <8 x i16> @llvm.experimental.vector.compress(<8 x i16> %vec, <8 x i1> %mask, <8 x i16> %passthru) - ret <8 x i16> %out -} - -define <32 x i8> @test_compress_v32i8(<32 x i8> %vec, <32 x i1> %mask, <32 x i8> %passthru) { -; CHECK-LABEL: test_compress_v32i8: -; CHECK: # %bb.0: -; CHECK-NEXT: vpsllw $7, %ymm1, %ymm1 -; CHECK-NEXT: vpmovb2m %ymm1, %k1 -; CHECK-NEXT: vpcompressb %ymm0, %ymm2 {%k1} -; CHECK-NEXT: vmovdqa %ymm2, %ymm0 -; CHECK-NEXT: retq - %out = call <32 x i8> @llvm.experimental.vector.compress(<32 x i8> %vec, <32 x i1> %mask, <32 x i8> %passthru) - ret <32 x i8> %out -} - -define <16 x i16> @test_compress_v16i16(<16 x i16> %vec, <16 x i1> %mask, <16 x i16> %passthru) { -; CHECK-LABEL: test_compress_v16i16: -; CHECK: # %bb.0: -; CHECK-NEXT: vpsllw $7, %xmm1, %xmm1 -; CHECK-NEXT: vpmovb2m %xmm1, %k1 -; CHECK-NEXT: vpcompressw %ymm0, %ymm2 {%k1} -; CHECK-NEXT: vmovdqa %ymm2, %ymm0 -; CHECK-NEXT: retq - %out = call <16 x i16> @llvm.experimental.vector.compress(<16 x i16> %vec, <16 x i1> %mask, <16 x i16> %passthru) - ret <16 x i16> %out -} - -define <64 x i8> @test_compress_v64i8(<64 x i8> %vec, <64 x i1> %mask, <64 x i8> %passthru) { -; CHECK-LABEL: test_compress_v64i8: -; CHECK: # %bb.0: -; CHECK-NEXT: vpsllw $7, %zmm1, %zmm1 -; CHECK-NEXT: vpmovb2m %zmm1, %k1 -; CHECK-NEXT: vpcompressb %zmm0, %zmm2 {%k1} -; CHECK-NEXT: vmovdqa64 %zmm2, %zmm0 -; CHECK-NEXT: retq - %out = call <64 x i8> @llvm.experimental.vector.compress(<64 x i8> %vec, <64 x i1> %mask, <64 x i8> %passthru) - ret <64 x i8> %out -} - -define <32 x i16> @test_compress_v32i16(<32 x i16> %vec, <32 x i1> %mask, <32 x i16> %passthru) { -; CHECK-LABEL: test_compress_v32i16: -; CHECK: # %bb.0: -; CHECK-NEXT: vpsllw $7, %ymm1, %ymm1 -; CHECK-NEXT: vpmovb2m %ymm1, %k1 -; CHECK-NEXT: vpcompressw %zmm0, %zmm2 {%k1} -; CHECK-NEXT: vmovdqa64 %zmm2, %zmm0 -; CHECK-NEXT: retq - %out = call <32 x i16> @llvm.experimental.vector.compress(<32 x i16> %vec, <32 x i1> %mask, <32 x i16> %passthru) - ret <32 x i16> %out -} - -define <64 x i32> @test_compress_large(<64 x i1> %mask, <64 x i32> %vec, <64 x i32> %passthru) { -; CHECK-LABEL: test_compress_large: -; CHECK: # %bb.0: -; CHECK-NEXT: pushq %rbp -; CHECK-NEXT: .cfi_def_cfa_offset 16 -; CHECK-NEXT: .cfi_offset %rbp, -16 -; CHECK-NEXT: movq %rsp, %rbp -; CHECK-NEXT: .cfi_def_cfa_register %rbp -; CHECK-NEXT: andq $-64, %rsp -; CHECK-NEXT: subq $576, %rsp # imm = 0x240 -; CHECK-NEXT: vpsllw $7, %zmm0, %zmm0 -; CHECK-NEXT: vpmovb2m %zmm0, %k1 -; CHECK-NEXT: kshiftrq $32, %k1, %k4 -; CHECK-NEXT: kshiftrd $16, %k4, %k3 -; CHECK-NEXT: kshiftrd $16, %k1, %k2 -; CHECK-NEXT: vpcompressd %zmm1, %zmm0 {%k1} {z} -; CHECK-NEXT: vmovdqa64 %zmm0, (%rsp) -; CHECK-NEXT: kshiftrw $8, %k1, %k0 -; CHECK-NEXT: kxorw %k0, %k1, %k0 -; CHECK-NEXT: kshiftrw $4, %k0, %k5 -; CHECK-NEXT: kxorw %k5, %k0, %k0 -; CHECK-NEXT: kshiftrw $2, %k0, %k5 -; CHECK-NEXT: kxorw %k5, %k0, %k0 -; CHECK-NEXT: kshiftrw $1, %k0, %k5 -; CHECK-NEXT: kxorw %k5, %k0, %k0 -; CHECK-NEXT: kmovd %k0, %eax -; CHECK-NEXT: andl $31, %eax -; CHECK-NEXT: vpcompressd %zmm2, %zmm0 {%k2} {z} -; CHECK-NEXT: vmovdqa64 %zmm0, (%rsp,%rax,4) -; CHECK-NEXT: vpcompressd %zmm3, %zmm0 {%k4} {z} -; CHECK-NEXT: vmovdqa64 %zmm0, {{[0-9]+}}(%rsp) -; CHECK-NEXT: kshiftrw $8, %k4, %k0 -; CHECK-NEXT: kxorw %k0, %k4, %k0 -; CHECK-NEXT: kshiftrw $4, %k0, %k4 -; CHECK-NEXT: kxorw %k4, %k0, %k0 -; CHECK-NEXT: kshiftrw $2, %k0, %k4 -; CHECK-NEXT: kxorw %k4, %k0, %k0 -; CHECK-NEXT: kshiftrw $1, %k0, %k4 -; CHECK-NEXT: kxorw %k4, %k0, %k0 -; CHECK-NEXT: kmovd %k0, %eax -; CHECK-NEXT: andl $31, %eax -; CHECK-NEXT: vpcompressd %zmm4, %zmm0 {%k3} {z} -; CHECK-NEXT: vmovdqa64 %zmm0, 128(%rsp,%rax,4) -; CHECK-NEXT: vmovaps (%rsp), %zmm0 -; CHECK-NEXT: vmovaps {{[0-9]+}}(%rsp), %zmm1 -; CHECK-NEXT: vmovaps %zmm0, {{[0-9]+}}(%rsp) -; CHECK-NEXT: kxorw %k2, %k1, %k0 -; CHECK-NEXT: kshiftrw $8, %k0, %k1 -; CHECK-NEXT: kxorw %k1, %k0, %k0 -; CHECK-NEXT: kshiftrw $4, %k0, %k1 -; CHECK-NEXT: kxorw %k1, %k0, %k0 -; CHECK-NEXT: kshiftrw $2, %k0, %k1 -; CHECK-NEXT: kxorw %k1, %k0, %k0 -; CHECK-NEXT: kshiftrw $1, %k0, %k1 -; CHECK-NEXT: kxorw %k1, %k0, %k0 -; CHECK-NEXT: kmovd %k0, %eax -; CHECK-NEXT: andl $63, %eax -; CHECK-NEXT: vmovaps {{[0-9]+}}(%rsp), %zmm0 -; CHECK-NEXT: vmovaps {{[0-9]+}}(%rsp), %zmm2 -; CHECK-NEXT: vmovaps %zmm0, 256(%rsp,%rax,4) -; CHECK-NEXT: vmovaps %zmm1, {{[0-9]+}}(%rsp) -; CHECK-NEXT: vmovaps %zmm2, 320(%rsp,%rax,4) -; CHECK-NEXT: vmovaps {{[0-9]+}}(%rsp), %zmm0 -; CHECK-NEXT: vmovaps {{[0-9]+}}(%rsp), %zmm1 -; CHECK-NEXT: vmovaps {{[0-9]+}}(%rsp), %zmm2 -; CHECK-NEXT: vmovaps {{[0-9]+}}(%rsp), %zmm3 -; CHECK-NEXT: movq %rbp, %rsp -; CHECK-NEXT: popq %rbp -; CHECK-NEXT: .cfi_def_cfa %rsp, 8 -; CHECK-NEXT: retq - %out = call <64 x i32> @llvm.experimental.vector.compress(<64 x i32> %vec, <64 x i1> %mask, <64 x i32> undef) - ret <64 x i32> %out -} - -define <4 x i32> @test_compress_all_const() { -; CHECK-LABEL: test_compress_all_const: -; CHECK: # %bb.0: -; CHECK-NEXT: vmovsd {{.*#+}} xmm0 = [5,9,0,0] -; CHECK-NEXT: retq - %out = call <4 x i32> @llvm.experimental.vector.compress(<4 x i32> , - <4 x i1> , - <4 x i32> undef) - ret <4 x i32> %out -} - -define <4 x i32> @test_compress_const_mask(<4 x i32> %vec) { -; CHECK-LABEL: test_compress_const_mask: -; CHECK: # %bb.0: -; CHECK-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,3,2,3] -; CHECK-NEXT: retq - %out = call <4 x i32> @llvm.experimental.vector.compress(<4 x i32> %vec, <4 x i1> , <4 x i32> undef) - ret <4 x i32> %out -} - -define <4 x i32> @test_compress_const_mask_passthrough(<4 x i32> %vec, <4 x i32> %passthru) { -; CHECK-LABEL: test_compress_const_mask_passthrough: -; CHECK: # %bb.0: -; CHECK-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,3],xmm1[2,3] -; CHECK-NEXT: retq - %out = call <4 x i32> @llvm.experimental.vector.compress(<4 x i32> %vec, <4 x i1> , <4 x i32> %passthru) - ret <4 x i32> %out -} - -define <4 x i32> @test_compress_const_mask_const_passthrough(<4 x i32> %vec) { -; CHECK-LABEL: test_compress_const_mask_const_passthrough: -; CHECK: # %bb.0: -; CHECK-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,3,2,3] -; CHECK-NEXT: movl $7, %eax -; CHECK-NEXT: vpinsrd $2, %eax, %xmm0, %xmm0 -; CHECK-NEXT: movl $8, %eax -; CHECK-NEXT: vpinsrd $3, %eax, %xmm0, %xmm0 -; CHECK-NEXT: retq - %out = call <4 x i32> @llvm.experimental.vector.compress(<4 x i32> %vec, <4 x i1> , <4 x i32> ) - ret <4 x i32> %out -} - -; We pass a placeholder value for the const_mask* tests to check that they are converted to a no-op by simply copying -; the second vector input register to the return register or doing nothing. -define <4 x i32> @test_compress_const_splat1_mask(<4 x i32> %ignore, <4 x i32> %vec) { -; CHECK-LABEL: test_compress_const_splat1_mask: -; CHECK: # %bb.0: -; CHECK-NEXT: vmovaps %xmm1, %xmm0 -; CHECK-NEXT: retq - %out = call <4 x i32> @llvm.experimental.vector.compress(<4 x i32> %vec, <4 x i1> splat (i1 -1), <4 x i32> undef) - ret <4 x i32> %out -} -define <4 x i32> @test_compress_const_splat0_mask(<4 x i32> %ignore, <4 x i32> %vec) { -; CHECK-LABEL: test_compress_const_splat0_mask: -; CHECK: # %bb.0: -; CHECK-NEXT: retq - %out = call <4 x i32> @llvm.experimental.vector.compress(<4 x i32> %vec, <4 x i1> splat (i1 0), <4 x i32> undef) - ret <4 x i32> %out -} -define <4 x i32> @test_compress_undef_mask(<4 x i32> %ignore, <4 x i32> %vec) { -; CHECK-LABEL: test_compress_undef_mask: -; CHECK: # %bb.0: -; CHECK-NEXT: retq - %out = call <4 x i32> @llvm.experimental.vector.compress(<4 x i32> %vec, <4 x i1> undef, <4 x i32> undef) - ret <4 x i32> %out -} -define <4 x i32> @test_compress_const_splat0_mask_with_passthru(<4 x i32> %ignore, <4 x i32> %vec, <4 x i32> %passthru) { -; CHECK-LABEL: test_compress_const_splat0_mask_with_passthru: -; CHECK: # %bb.0: -; CHECK-NEXT: vmovaps %xmm2, %xmm0 -; CHECK-NEXT: retq - %out = call <4 x i32> @llvm.experimental.vector.compress(<4 x i32> %vec, <4 x i1> splat (i1 0), <4 x i32> %passthru) - ret <4 x i32> %out -} -define <4 x i32> @test_compress_const_splat0_mask_without_passthru(<4 x i32> %ignore, <4 x i32> %vec) { -; CHECK-LABEL: test_compress_const_splat0_mask_without_passthru: -; CHECK: # %bb.0: -; CHECK-NEXT: retq - %out = call <4 x i32> @llvm.experimental.vector.compress(<4 x i32> %vec, <4 x i1> splat (i1 0), <4 x i32> undef) - ret <4 x i32> %out -} - -define <4 x i8> @test_compress_small(<4 x i8> %vec, <4 x i1> %mask) { -; CHECK-LABEL: test_compress_small: -; CHECK: # %bb.0: -; CHECK-NEXT: vpslld $31, %xmm1, %xmm1 -; CHECK-NEXT: vptestmd %xmm1, %xmm1, %k1 -; CHECK-NEXT: vpcompressb %xmm0, %xmm0 {%k1} {z} -; CHECK-NEXT: retq - %out = call <4 x i8> @llvm.experimental.vector.compress(<4 x i8> %vec, <4 x i1> %mask, <4 x i8> undef) - ret <4 x i8> %out -} - -define <4 x i4> @test_compress_illegal_element_type(<4 x i4> %vec, <4 x i1> %mask) { -; CHECK-LABEL: test_compress_illegal_element_type: -; CHECK: # %bb.0: -; CHECK-NEXT: vpslld $31, %xmm1, %xmm1 -; CHECK-NEXT: vptestmd %xmm1, %xmm1, %k1 -; CHECK-NEXT: vpcompressd %xmm0, %xmm0 {%k1} {z} -; CHECK-NEXT: retq - %out = call <4 x i4> @llvm.experimental.vector.compress(<4 x i4> %vec, <4 x i1> %mask, <4 x i4> undef) - ret <4 x i4> %out -} - -define <3 x i32> @test_compress_narrow(<3 x i32> %vec, <3 x i1> %mask) { -; CHECK-LABEL: test_compress_narrow: -; CHECK: # %bb.0: -; CHECK-NEXT: andl $1, %edi -; CHECK-NEXT: kmovw %edi, %k0 -; CHECK-NEXT: kmovd %esi, %k1 -; CHECK-NEXT: kshiftlw $15, %k1, %k1 -; CHECK-NEXT: kshiftrw $14, %k1, %k1 -; CHECK-NEXT: korw %k1, %k0, %k0 -; CHECK-NEXT: movw $-5, %ax -; CHECK-NEXT: kmovd %eax, %k1 -; CHECK-NEXT: kandw %k1, %k0, %k0 -; CHECK-NEXT: kmovd %edx, %k1 -; CHECK-NEXT: kshiftlw $15, %k1, %k1 -; CHECK-NEXT: kshiftrw $13, %k1, %k1 -; CHECK-NEXT: korw %k1, %k0, %k0 -; CHECK-NEXT: movb $7, %al -; CHECK-NEXT: kmovd %eax, %k1 -; CHECK-NEXT: kandw %k1, %k0, %k1 -; CHECK-NEXT: vpcompressd %xmm0, %xmm0 {%k1} {z} -; CHECK-NEXT: retq - %out = call <3 x i32> @llvm.experimental.vector.compress(<3 x i32> %vec, <3 x i1> %mask, <3 x i32> undef) - ret <3 x i32> %out -} - -define <3 x i3> @test_compress_narrow_illegal_element_type(<3 x i3> %vec, <3 x i1> %mask) { -; CHECK-LABEL: test_compress_narrow_illegal_element_type: -; CHECK: # %bb.0: -; CHECK-NEXT: andl $1, %ecx -; CHECK-NEXT: kmovw %ecx, %k0 -; CHECK-NEXT: kmovd %r8d, %k1 -; CHECK-NEXT: kshiftlw $15, %k1, %k1 -; CHECK-NEXT: kshiftrw $14, %k1, %k1 -; CHECK-NEXT: korw %k1, %k0, %k0 -; CHECK-NEXT: movw $-5, %ax -; CHECK-NEXT: kmovd %eax, %k1 -; CHECK-NEXT: kandw %k1, %k0, %k0 -; CHECK-NEXT: kmovd %r9d, %k1 -; CHECK-NEXT: kshiftlw $15, %k1, %k1 -; CHECK-NEXT: kshiftrw $13, %k1, %k1 -; CHECK-NEXT: korw %k1, %k0, %k0 -; CHECK-NEXT: movb $7, %al -; CHECK-NEXT: kmovd %eax, %k1 -; CHECK-NEXT: kandw %k1, %k0, %k1 -; CHECK-NEXT: vmovd %edi, %xmm0 -; CHECK-NEXT: vpinsrd $1, %esi, %xmm0, %xmm0 -; CHECK-NEXT: vpinsrd $2, %edx, %xmm0, %xmm0 -; CHECK-NEXT: vpcompressd %xmm0, %xmm0 {%k1} {z} -; CHECK-NEXT: vmovd %xmm0, %eax -; CHECK-NEXT: vpextrb $4, %xmm0, %edx -; CHECK-NEXT: vpextrb $8, %xmm0, %ecx -; CHECK-NEXT: # kill: def $al killed $al killed $eax -; CHECK-NEXT: # kill: def $dl killed $dl killed $edx -; CHECK-NEXT: # kill: def $cl killed $cl killed $ecx -; CHECK-NEXT: retq - %out = call <3 x i3> @llvm.experimental.vector.compress(<3 x i3> %vec, <3 x i1> %mask, <3 x i3> undef) - ret <3 x i3> %out -} diff --git a/llvm/test/CodeGen/X86/vector-compress.ll b/llvm/test/CodeGen/X86/vector-compress.ll new file mode 100644 index 0000000000000..17e195c9f0ec0 --- /dev/null +++ b/llvm/test/CodeGen/X86/vector-compress.ll @@ -0,0 +1,689 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 +; RUN: llc -O1 -mtriple=x86_64 -mattr=+avx2 < %s | FileCheck %s --check-prefixes=CHECK,AVX2 +; RUN: llc -O1 -mtriple=x86_64 -mattr=+avx512f,+avx512vl,+avx512vbmi2 < %s | FileCheck %s --check-prefixes=CHECK,AVX512 + +define <4 x i32> @test_compress_v4i32(<4 x i32> %vec, <4 x i1> %mask, <4 x i32> %passthru) { +; AVX2-LABEL: test_compress_v4i32: +; AVX2: # %bb.0: +; AVX2-NEXT: vpslld $31, %xmm1, %xmm1 +; AVX2-NEXT: vpsrad $31, %xmm1, %xmm1 +; AVX2-NEXT: vmovaps %xmm2, -{{[0-9]+}}(%rsp) +; AVX2-NEXT: vpextrd $1, %xmm1, %eax +; AVX2-NEXT: vmovd %xmm1, %esi +; AVX2-NEXT: andl $1, %esi +; AVX2-NEXT: movl %esi, %edi +; AVX2-NEXT: subl %eax, %edi +; AVX2-NEXT: vpextrd $2, %xmm1, %edx +; AVX2-NEXT: subl %edx, %edi +; AVX2-NEXT: vpextrd $3, %xmm1, %ecx +; AVX2-NEXT: subl %ecx, %edi +; AVX2-NEXT: andl $3, %edi +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: addq %rsi, %rax +; AVX2-NEXT: andl $1, %edx +; AVX2-NEXT: addq %rax, %rdx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: addq %rdx, %rcx +; AVX2-NEXT: vextractps $3, %xmm0, %r8d +; AVX2-NEXT: cmpq $4, %rcx +; AVX2-NEXT: cmovbl -24(%rsp,%rdi,4), %r8d +; AVX2-NEXT: vmovss %xmm0, -{{[0-9]+}}(%rsp) +; AVX2-NEXT: vextractps $1, %xmm0, -24(%rsp,%rsi,4) +; AVX2-NEXT: vextractps $2, %xmm0, -24(%rsp,%rax,4) +; AVX2-NEXT: andl $3, %edx +; AVX2-NEXT: vextractps $3, %xmm0, -24(%rsp,%rdx,4) +; AVX2-NEXT: cmpq $3, %rcx +; AVX2-NEXT: movl $3, %eax +; AVX2-NEXT: cmovbq %rcx, %rax +; AVX2-NEXT: movl %r8d, -24(%rsp,%rax,4) +; AVX2-NEXT: vmovaps -{{[0-9]+}}(%rsp), %xmm0 +; AVX2-NEXT: retq +; +; AVX512-LABEL: test_compress_v4i32: +; AVX512: # %bb.0: +; AVX512-NEXT: vpslld $31, %xmm1, %xmm1 +; AVX512-NEXT: vptestmd %xmm1, %xmm1, %k1 +; AVX512-NEXT: vpcompressd %xmm0, %xmm2 {%k1} +; AVX512-NEXT: vmovdqa %xmm2, %xmm0 +; AVX512-NEXT: retq + %out = call <4 x i32> @llvm.experimental.vector.compress(<4 x i32> %vec, <4 x i1> %mask, <4 x i32> %passthru) + ret <4 x i32> %out +} + +define <4 x float> @test_compress_v4f32(<4 x float> %vec, <4 x i1> %mask, <4 x float> %passthru) { +; AVX2-LABEL: test_compress_v4f32: +; AVX2: # %bb.0: +; AVX2-NEXT: vpslld $31, %xmm1, %xmm1 +; AVX2-NEXT: vpsrad $31, %xmm1, %xmm1 +; AVX2-NEXT: vmovaps %xmm2, -{{[0-9]+}}(%rsp) +; AVX2-NEXT: vpextrd $1, %xmm1, %edx +; AVX2-NEXT: vmovd %xmm1, %esi +; AVX2-NEXT: andl $1, %esi +; AVX2-NEXT: movl %esi, %edi +; AVX2-NEXT: subl %edx, %edi +; AVX2-NEXT: vpextrd $2, %xmm1, %ecx +; AVX2-NEXT: subl %ecx, %edi +; AVX2-NEXT: vpextrd $3, %xmm1, %eax +; AVX2-NEXT: subl %eax, %edi +; AVX2-NEXT: andl $3, %edi +; AVX2-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero +; AVX2-NEXT: vmovss %xmm0, -{{[0-9]+}}(%rsp) +; AVX2-NEXT: vextractps $1, %xmm0, -24(%rsp,%rsi,4) +; AVX2-NEXT: andl $1, %edx +; AVX2-NEXT: addq %rsi, %rdx +; AVX2-NEXT: vextractps $2, %xmm0, -24(%rsp,%rdx,4) +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: addq %rdx, %rcx +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: addq %rcx, %rax +; AVX2-NEXT: # kill: def $ecx killed $ecx killed $rcx def $rcx +; AVX2-NEXT: andl $3, %ecx +; AVX2-NEXT: vshufps {{.*#+}} xmm0 = xmm0[3,3,3,3] +; AVX2-NEXT: vmovss %xmm0, -24(%rsp,%rcx,4) +; AVX2-NEXT: cmpq $3, %rax +; AVX2-NEXT: movl $3, %ecx +; AVX2-NEXT: cmovbq %rax, %rcx +; AVX2-NEXT: ja .LBB1_2 +; AVX2-NEXT: # %bb.1: +; AVX2-NEXT: vmovaps %xmm1, %xmm0 +; AVX2-NEXT: .LBB1_2: +; AVX2-NEXT: vmovss %xmm0, -24(%rsp,%rcx,4) +; AVX2-NEXT: vmovaps -{{[0-9]+}}(%rsp), %xmm0 +; AVX2-NEXT: retq +; +; AVX512-LABEL: test_compress_v4f32: +; AVX512: # %bb.0: +; AVX512-NEXT: vpslld $31, %xmm1, %xmm1 +; AVX512-NEXT: vptestmd %xmm1, %xmm1, %k1 +; AVX512-NEXT: vcompressps %xmm0, %xmm2 {%k1} +; AVX512-NEXT: vmovdqa %xmm2, %xmm0 +; AVX512-NEXT: retq + %out = call <4 x float> @llvm.experimental.vector.compress(<4 x float> %vec, <4 x i1> %mask, <4 x float> %passthru) + ret <4 x float> %out +} + +define <2 x i64> @test_compress_v2i64(<2 x i64> %vec, <2 x i1> %mask, <2 x i64> %passthru) { +; AVX2-LABEL: test_compress_v2i64: +; AVX2: # %bb.0: +; AVX2-NEXT: vpsllq $63, %xmm1, %xmm1 +; AVX2-NEXT: vpxor %xmm3, %xmm3, %xmm3 +; AVX2-NEXT: vpcmpgtq %xmm1, %xmm3, %xmm1 +; AVX2-NEXT: vmovaps %xmm2, -{{[0-9]+}}(%rsp) +; AVX2-NEXT: vpextrq $1, %xmm1, %rax +; AVX2-NEXT: vmovq %xmm1, %rcx +; AVX2-NEXT: movl %ecx, %edx +; AVX2-NEXT: subl %eax, %edx +; AVX2-NEXT: andl $1, %edx +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: addq %rcx, %rax +; AVX2-NEXT: vpextrq $1, %xmm0, %rsi +; AVX2-NEXT: cmpq $2, %rax +; AVX2-NEXT: cmovbq -24(%rsp,%rdx,8), %rsi +; AVX2-NEXT: vmovq %xmm0, -{{[0-9]+}}(%rsp) +; AVX2-NEXT: movl %ecx, %ecx +; AVX2-NEXT: vpextrq $1, %xmm0, -24(%rsp,%rcx,8) +; AVX2-NEXT: cmpq $1, %rax +; AVX2-NEXT: movl $1, %ecx +; AVX2-NEXT: cmovbq %rax, %rcx +; AVX2-NEXT: movq %rsi, -24(%rsp,%rcx,8) +; AVX2-NEXT: vmovaps -{{[0-9]+}}(%rsp), %xmm0 +; AVX2-NEXT: retq +; +; AVX512-LABEL: test_compress_v2i64: +; AVX512: # %bb.0: +; AVX512-NEXT: vpsllq $63, %xmm1, %xmm1 +; AVX512-NEXT: vptestmq %xmm1, %xmm1, %k1 +; AVX512-NEXT: vpcompressq %xmm0, %xmm2 {%k1} +; AVX512-NEXT: vmovdqa %xmm2, %xmm0 +; AVX512-NEXT: retq + %out = call <2 x i64> @llvm.experimental.vector.compress(<2 x i64> %vec, <2 x i1> %mask, <2 x i64> %passthru) + ret <2 x i64> %out +} + +define <2 x double> @test_compress_v2f64(<2 x double> %vec, <2 x i1> %mask, <2 x double> %passthru) { +; AVX2-LABEL: test_compress_v2f64: +; AVX2: # %bb.0: +; AVX2-NEXT: vpsllq $63, %xmm1, %xmm1 +; AVX2-NEXT: vpxor %xmm3, %xmm3, %xmm3 +; AVX2-NEXT: vpcmpgtq %xmm1, %xmm3, %xmm1 +; AVX2-NEXT: vmovaps %xmm2, -{{[0-9]+}}(%rsp) +; AVX2-NEXT: vpextrq $1, %xmm1, %rax +; AVX2-NEXT: vmovq %xmm1, %rcx +; AVX2-NEXT: movl %ecx, %edx +; AVX2-NEXT: subl %eax, %edx +; AVX2-NEXT: andl $1, %edx +; AVX2-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero +; AVX2-NEXT: vmovlpd %xmm0, -{{[0-9]+}}(%rsp) +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: movl %ecx, %edx +; AVX2-NEXT: vmovhpd %xmm0, -24(%rsp,%rdx,8) +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: addq %rcx, %rax +; AVX2-NEXT: cmpq $2, %rax +; AVX2-NEXT: jb .LBB3_2 +; AVX2-NEXT: # %bb.1: +; AVX2-NEXT: vshufpd {{.*#+}} xmm1 = xmm0[1,0] +; AVX2-NEXT: .LBB3_2: +; AVX2-NEXT: cmpq $1, %rax +; AVX2-NEXT: movl $1, %ecx +; AVX2-NEXT: cmovbq %rax, %rcx +; AVX2-NEXT: vmovsd %xmm1, -24(%rsp,%rcx,8) +; AVX2-NEXT: vmovaps -{{[0-9]+}}(%rsp), %xmm0 +; AVX2-NEXT: retq +; +; AVX512-LABEL: test_compress_v2f64: +; AVX512: # %bb.0: +; AVX512-NEXT: vpsllq $63, %xmm1, %xmm1 +; AVX512-NEXT: vptestmq %xmm1, %xmm1, %k1 +; AVX512-NEXT: vcompresspd %xmm0, %xmm2 {%k1} +; AVX512-NEXT: vmovdqa %xmm2, %xmm0 +; AVX512-NEXT: retq + %out = call <2 x double> @llvm.experimental.vector.compress(<2 x double> %vec, <2 x i1> %mask, <2 x double> %passthru) + ret <2 x double> %out +} + +define <8 x i32> @test_compress_v8i32(<8 x i32> %vec, <8 x i1> %mask, <8 x i32> %passthru) { +; AVX2-LABEL: test_compress_v8i32: +; AVX2: # %bb.0: +; AVX2-NEXT: pushq %rbp +; AVX2-NEXT: .cfi_def_cfa_offset 16 +; AVX2-NEXT: .cfi_offset %rbp, -16 +; AVX2-NEXT: movq %rsp, %rbp +; AVX2-NEXT: .cfi_def_cfa_register %rbp +; AVX2-NEXT: pushq %rbx +; AVX2-NEXT: andq $-32, %rsp +; AVX2-NEXT: subq $64, %rsp +; AVX2-NEXT: .cfi_offset %rbx, -24 +; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero +; AVX2-NEXT: vpslld $31, %ymm1, %ymm1 +; AVX2-NEXT: vpsrad $31, %ymm1, %ymm3 +; AVX2-NEXT: vmovaps %ymm2, (%rsp) +; AVX2-NEXT: vextracti128 $1, %ymm3, %xmm1 +; AVX2-NEXT: vpackssdw %xmm1, %xmm3, %xmm2 +; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero +; AVX2-NEXT: vpslld $31, %ymm2, %ymm2 +; AVX2-NEXT: vpsrld $31, %ymm2, %ymm2 +; AVX2-NEXT: vextracti128 $1, %ymm2, %xmm4 +; AVX2-NEXT: vpaddd %xmm4, %xmm2, %xmm2 +; AVX2-NEXT: vpextrd $1, %xmm2, %eax +; AVX2-NEXT: vmovd %xmm2, %ecx +; AVX2-NEXT: addl %eax, %ecx +; AVX2-NEXT: vpextrd $2, %xmm2, %edx +; AVX2-NEXT: vpextrd $3, %xmm2, %eax +; AVX2-NEXT: addl %edx, %eax +; AVX2-NEXT: addl %ecx, %eax +; AVX2-NEXT: andl $7, %eax +; AVX2-NEXT: vpextrd $1, %xmm3, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: vmovd %xmm3, %edx +; AVX2-NEXT: andl $1, %edx +; AVX2-NEXT: addq %rdx, %rcx +; AVX2-NEXT: vpextrd $2, %xmm3, %esi +; AVX2-NEXT: andl $1, %esi +; AVX2-NEXT: addq %rcx, %rsi +; AVX2-NEXT: vpextrd $3, %xmm3, %edi +; AVX2-NEXT: andl $1, %edi +; AVX2-NEXT: addq %rsi, %rdi +; AVX2-NEXT: vmovd %xmm1, %r8d +; AVX2-NEXT: andl $1, %r8d +; AVX2-NEXT: addq %rdi, %r8 +; AVX2-NEXT: vpextrd $1, %xmm1, %r9d +; AVX2-NEXT: andl $1, %r9d +; AVX2-NEXT: addq %r8, %r9 +; AVX2-NEXT: vpextrd $2, %xmm1, %r10d +; AVX2-NEXT: andl $1, %r10d +; AVX2-NEXT: addq %r9, %r10 +; AVX2-NEXT: vpextrd $3, %xmm1, %r11d +; AVX2-NEXT: andl $1, %r11d +; AVX2-NEXT: addq %r10, %r11 +; AVX2-NEXT: vextractf128 $1, %ymm0, %xmm1 +; AVX2-NEXT: vextractps $3, %xmm1, %ebx +; AVX2-NEXT: cmpq $8, %r11 +; AVX2-NEXT: cmovbl (%rsp,%rax,4), %ebx +; AVX2-NEXT: vmovss %xmm0, (%rsp) +; AVX2-NEXT: vextractps $1, %xmm0, (%rsp,%rdx,4) +; AVX2-NEXT: vextractps $2, %xmm0, (%rsp,%rcx,4) +; AVX2-NEXT: vextractps $3, %xmm0, (%rsp,%rsi,4) +; AVX2-NEXT: andl $7, %edi +; AVX2-NEXT: vmovss %xmm1, (%rsp,%rdi,4) +; AVX2-NEXT: andl $7, %r8d +; AVX2-NEXT: vextractps $1, %xmm1, (%rsp,%r8,4) +; AVX2-NEXT: andl $7, %r9d +; AVX2-NEXT: vextractps $2, %xmm1, (%rsp,%r9,4) +; AVX2-NEXT: andl $7, %r10d +; AVX2-NEXT: vextractps $3, %xmm1, (%rsp,%r10,4) +; AVX2-NEXT: cmpq $7, %r11 +; AVX2-NEXT: movl $7, %eax +; AVX2-NEXT: cmovbq %r11, %rax +; AVX2-NEXT: movl %eax, %eax +; AVX2-NEXT: movl %ebx, (%rsp,%rax,4) +; AVX2-NEXT: vmovaps (%rsp), %ymm0 +; AVX2-NEXT: leaq -8(%rbp), %rsp +; AVX2-NEXT: popq %rbx +; AVX2-NEXT: popq %rbp +; AVX2-NEXT: .cfi_def_cfa %rsp, 8 +; AVX2-NEXT: retq +; +; AVX512-LABEL: test_compress_v8i32: +; AVX512: # %bb.0: +; AVX512-NEXT: vpsllw $15, %xmm1, %xmm1 +; AVX512-NEXT: vpmovw2m %xmm1, %k1 +; AVX512-NEXT: vpcompressd %ymm0, %ymm2 {%k1} +; AVX512-NEXT: vmovdqa %ymm2, %ymm0 +; AVX512-NEXT: retq + %out = call <8 x i32> @llvm.experimental.vector.compress(<8 x i32> %vec, <8 x i1> %mask, <8 x i32> %passthru) + ret <8 x i32> %out +} + +define <8 x float> @test_compress_v8f32(<8 x float> %vec, <8 x i1> %mask, <8 x float> %passthru) { +; AVX512-LABEL: test_compress_v8f32: +; AVX512: # %bb.0: +; AVX512-NEXT: vpsllw $15, %xmm1, %xmm1 +; AVX512-NEXT: vpmovw2m %xmm1, %k1 +; AVX512-NEXT: vcompressps %ymm0, %ymm2 {%k1} +; AVX512-NEXT: vmovdqa %ymm2, %ymm0 +; AVX512-NEXT: retq + %out = call <8 x float> @llvm.experimental.vector.compress(<8 x float> %vec, <8 x i1> %mask, <8 x float> %passthru) + ret <8 x float> %out +} + +define <4 x i64> @test_compress_v4i64(<4 x i64> %vec, <4 x i1> %mask, <4 x i64> %passthru) { +; AVX512-LABEL: test_compress_v4i64: +; AVX512: # %bb.0: +; AVX512-NEXT: vpslld $31, %xmm1, %xmm1 +; AVX512-NEXT: vptestmd %xmm1, %xmm1, %k1 +; AVX512-NEXT: vpcompressq %ymm0, %ymm2 {%k1} +; AVX512-NEXT: vmovdqa %ymm2, %ymm0 +; AVX512-NEXT: retq + %out = call <4 x i64> @llvm.experimental.vector.compress(<4 x i64> %vec, <4 x i1> %mask, <4 x i64> %passthru) + ret <4 x i64> %out +} + +define <4 x double> @test_compress_v4f64(<4 x double> %vec, <4 x i1> %mask, <4 x double> %passthru) { +; AVX512-LABEL: test_compress_v4f64: +; AVX512: # %bb.0: +; AVX512-NEXT: vpslld $31, %xmm1, %xmm1 +; AVX512-NEXT: vptestmd %xmm1, %xmm1, %k1 +; AVX512-NEXT: vcompresspd %ymm0, %ymm2 {%k1} +; AVX512-NEXT: vmovdqa %ymm2, %ymm0 +; AVX512-NEXT: retq + %out = call <4 x double> @llvm.experimental.vector.compress(<4 x double> %vec, <4 x i1> %mask, <4 x double> %passthru) + ret <4 x double> %out +} + +define <16 x i32> @test_compress_v16i32(<16 x i32> %vec, <16 x i1> %mask, <16 x i32> %passthru) { +; AVX512-LABEL: test_compress_v16i32: +; AVX512: # %bb.0: +; AVX512-NEXT: vpsllw $7, %xmm1, %xmm1 +; AVX512-NEXT: vpmovb2m %xmm1, %k1 +; AVX512-NEXT: vpcompressd %zmm0, %zmm2 {%k1} +; AVX512-NEXT: vmovdqa64 %zmm2, %zmm0 +; AVX512-NEXT: retq + %out = call <16 x i32> @llvm.experimental.vector.compress(<16 x i32> %vec, <16 x i1> %mask, <16 x i32> %passthru) + ret <16 x i32> %out +} + +define <16 x float> @test_compress_v16f32(<16 x float> %vec, <16 x i1> %mask, <16 x float> %passthru) { +; AVX512-LABEL: test_compress_v16f32: +; AVX512: # %bb.0: +; AVX512-NEXT: vpsllw $7, %xmm1, %xmm1 +; AVX512-NEXT: vpmovb2m %xmm1, %k1 +; AVX512-NEXT: vcompressps %zmm0, %zmm2 {%k1} +; AVX512-NEXT: vmovdqa64 %zmm2, %zmm0 +; AVX512-NEXT: retq + %out = call <16 x float> @llvm.experimental.vector.compress(<16 x float> %vec, <16 x i1> %mask, <16 x float> %passthru) + ret <16 x float> %out +} + +define <8 x i64> @test_compress_v8i64(<8 x i64> %vec, <8 x i1> %mask, <8 x i64> %passthru) { +; AVX512-LABEL: test_compress_v8i64: +; AVX512: # %bb.0: +; AVX512-NEXT: vpsllw $15, %xmm1, %xmm1 +; AVX512-NEXT: vpmovw2m %xmm1, %k1 +; AVX512-NEXT: vpcompressq %zmm0, %zmm2 {%k1} +; AVX512-NEXT: vmovdqa64 %zmm2, %zmm0 +; AVX512-NEXT: retq + %out = call <8 x i64> @llvm.experimental.vector.compress(<8 x i64> %vec, <8 x i1> %mask, <8 x i64> %passthru) + ret <8 x i64> %out +} + +define <8 x double> @test_compress_v8f64(<8 x double> %vec, <8 x i1> %mask, <8 x double> %passthru) { +; AVX512-LABEL: test_compress_v8f64: +; AVX512: # %bb.0: +; AVX512-NEXT: vpsllw $15, %xmm1, %xmm1 +; AVX512-NEXT: vpmovw2m %xmm1, %k1 +; AVX512-NEXT: vcompresspd %zmm0, %zmm2 {%k1} +; AVX512-NEXT: vmovdqa64 %zmm2, %zmm0 +; AVX512-NEXT: retq + %out = call <8 x double> @llvm.experimental.vector.compress(<8 x double> %vec, <8 x i1> %mask, <8 x double> %passthru) + ret <8 x double> %out +} + +define <16 x i8> @test_compress_v16i8(<16 x i8> %vec, <16 x i1> %mask, <16 x i8> %passthru) { +; AVX512-LABEL: test_compress_v16i8: +; AVX512: # %bb.0: +; AVX512-NEXT: vpsllw $7, %xmm1, %xmm1 +; AVX512-NEXT: vpmovb2m %xmm1, %k1 +; AVX512-NEXT: vpcompressb %xmm0, %xmm2 {%k1} +; AVX512-NEXT: vmovdqa %xmm2, %xmm0 +; AVX512-NEXT: retq + %out = call <16 x i8> @llvm.experimental.vector.compress(<16 x i8> %vec, <16 x i1> %mask, <16 x i8> %passthru) + ret <16 x i8> %out +} + +define <8 x i16> @test_compress_v8i16(<8 x i16> %vec, <8 x i1> %mask, <8 x i16> %passthru) { +; AVX512-LABEL: test_compress_v8i16: +; AVX512: # %bb.0: +; AVX512-NEXT: vpsllw $15, %xmm1, %xmm1 +; AVX512-NEXT: vpmovw2m %xmm1, %k1 +; AVX512-NEXT: vpcompressw %xmm0, %xmm2 {%k1} +; AVX512-NEXT: vmovdqa %xmm2, %xmm0 +; AVX512-NEXT: retq + %out = call <8 x i16> @llvm.experimental.vector.compress(<8 x i16> %vec, <8 x i1> %mask, <8 x i16> %passthru) + ret <8 x i16> %out +} + +define <32 x i8> @test_compress_v32i8(<32 x i8> %vec, <32 x i1> %mask, <32 x i8> %passthru) { +; AVX512-LABEL: test_compress_v32i8: +; AVX512: # %bb.0: +; AVX512-NEXT: vpsllw $7, %ymm1, %ymm1 +; AVX512-NEXT: vpmovb2m %ymm1, %k1 +; AVX512-NEXT: vpcompressb %ymm0, %ymm2 {%k1} +; AVX512-NEXT: vmovdqa %ymm2, %ymm0 +; AVX512-NEXT: retq + %out = call <32 x i8> @llvm.experimental.vector.compress(<32 x i8> %vec, <32 x i1> %mask, <32 x i8> %passthru) + ret <32 x i8> %out +} + +define <16 x i16> @test_compress_v16i16(<16 x i16> %vec, <16 x i1> %mask, <16 x i16> %passthru) { +; AVX512-LABEL: test_compress_v16i16: +; AVX512: # %bb.0: +; AVX512-NEXT: vpsllw $7, %xmm1, %xmm1 +; AVX512-NEXT: vpmovb2m %xmm1, %k1 +; AVX512-NEXT: vpcompressw %ymm0, %ymm2 {%k1} +; AVX512-NEXT: vmovdqa %ymm2, %ymm0 +; AVX512-NEXT: retq + %out = call <16 x i16> @llvm.experimental.vector.compress(<16 x i16> %vec, <16 x i1> %mask, <16 x i16> %passthru) + ret <16 x i16> %out +} + +define <64 x i8> @test_compress_v64i8(<64 x i8> %vec, <64 x i1> %mask, <64 x i8> %passthru) { +; AVX512-LABEL: test_compress_v64i8: +; AVX512: # %bb.0: +; AVX512-NEXT: vpsllw $7, %zmm1, %zmm1 +; AVX512-NEXT: vpmovb2m %zmm1, %k1 +; AVX512-NEXT: vpcompressb %zmm0, %zmm2 {%k1} +; AVX512-NEXT: vmovdqa64 %zmm2, %zmm0 +; AVX512-NEXT: retq + %out = call <64 x i8> @llvm.experimental.vector.compress(<64 x i8> %vec, <64 x i1> %mask, <64 x i8> %passthru) + ret <64 x i8> %out +} + +define <32 x i16> @test_compress_v32i16(<32 x i16> %vec, <32 x i1> %mask, <32 x i16> %passthru) { +; AVX512-LABEL: test_compress_v32i16: +; AVX512: # %bb.0: +; AVX512-NEXT: vpsllw $7, %ymm1, %ymm1 +; AVX512-NEXT: vpmovb2m %ymm1, %k1 +; AVX512-NEXT: vpcompressw %zmm0, %zmm2 {%k1} +; AVX512-NEXT: vmovdqa64 %zmm2, %zmm0 +; AVX512-NEXT: retq + %out = call <32 x i16> @llvm.experimental.vector.compress(<32 x i16> %vec, <32 x i1> %mask, <32 x i16> %passthru) + ret <32 x i16> %out +} + +define <64 x i32> @test_compress_large(<64 x i1> %mask, <64 x i32> %vec, <64 x i32> %passthru) { +; AVX512-LABEL: test_compress_large: +; AVX512: # %bb.0: +; AVX512-NEXT: pushq %rbp +; AVX512-NEXT: .cfi_def_cfa_offset 16 +; AVX512-NEXT: .cfi_offset %rbp, -16 +; AVX512-NEXT: movq %rsp, %rbp +; AVX512-NEXT: .cfi_def_cfa_register %rbp +; AVX512-NEXT: andq $-64, %rsp +; AVX512-NEXT: subq $576, %rsp # imm = 0x240 +; AVX512-NEXT: vpsllw $7, %zmm0, %zmm0 +; AVX512-NEXT: vpmovb2m %zmm0, %k1 +; AVX512-NEXT: kshiftrq $32, %k1, %k4 +; AVX512-NEXT: kshiftrd $16, %k4, %k3 +; AVX512-NEXT: kshiftrd $16, %k1, %k2 +; AVX512-NEXT: vpcompressd %zmm1, %zmm0 {%k1} {z} +; AVX512-NEXT: vmovdqa64 %zmm0, (%rsp) +; AVX512-NEXT: kshiftrw $8, %k1, %k0 +; AVX512-NEXT: kxorw %k0, %k1, %k0 +; AVX512-NEXT: kshiftrw $4, %k0, %k5 +; AVX512-NEXT: kxorw %k5, %k0, %k0 +; AVX512-NEXT: kshiftrw $2, %k0, %k5 +; AVX512-NEXT: kxorw %k5, %k0, %k0 +; AVX512-NEXT: kshiftrw $1, %k0, %k5 +; AVX512-NEXT: kxorw %k5, %k0, %k0 +; AVX512-NEXT: kmovd %k0, %eax +; AVX512-NEXT: andl $31, %eax +; AVX512-NEXT: vpcompressd %zmm2, %zmm0 {%k2} {z} +; AVX512-NEXT: vmovdqa64 %zmm0, (%rsp,%rax,4) +; AVX512-NEXT: vpcompressd %zmm3, %zmm0 {%k4} {z} +; AVX512-NEXT: vmovdqa64 %zmm0, {{[0-9]+}}(%rsp) +; AVX512-NEXT: kshiftrw $8, %k4, %k0 +; AVX512-NEXT: kxorw %k0, %k4, %k0 +; AVX512-NEXT: kshiftrw $4, %k0, %k4 +; AVX512-NEXT: kxorw %k4, %k0, %k0 +; AVX512-NEXT: kshiftrw $2, %k0, %k4 +; AVX512-NEXT: kxorw %k4, %k0, %k0 +; AVX512-NEXT: kshiftrw $1, %k0, %k4 +; AVX512-NEXT: kxorw %k4, %k0, %k0 +; AVX512-NEXT: kmovd %k0, %eax +; AVX512-NEXT: andl $31, %eax +; AVX512-NEXT: vpcompressd %zmm4, %zmm0 {%k3} {z} +; AVX512-NEXT: vmovdqa64 %zmm0, 128(%rsp,%rax,4) +; AVX512-NEXT: vmovaps (%rsp), %zmm0 +; AVX512-NEXT: vmovaps {{[0-9]+}}(%rsp), %zmm1 +; AVX512-NEXT: vmovaps %zmm0, {{[0-9]+}}(%rsp) +; AVX512-NEXT: kxorw %k2, %k1, %k0 +; AVX512-NEXT: kshiftrw $8, %k0, %k1 +; AVX512-NEXT: kxorw %k1, %k0, %k0 +; AVX512-NEXT: kshiftrw $4, %k0, %k1 +; AVX512-NEXT: kxorw %k1, %k0, %k0 +; AVX512-NEXT: kshiftrw $2, %k0, %k1 +; AVX512-NEXT: kxorw %k1, %k0, %k0 +; AVX512-NEXT: kshiftrw $1, %k0, %k1 +; AVX512-NEXT: kxorw %k1, %k0, %k0 +; AVX512-NEXT: kmovd %k0, %eax +; AVX512-NEXT: andl $63, %eax +; AVX512-NEXT: vmovaps {{[0-9]+}}(%rsp), %zmm0 +; AVX512-NEXT: vmovaps {{[0-9]+}}(%rsp), %zmm2 +; AVX512-NEXT: vmovaps %zmm0, 256(%rsp,%rax,4) +; AVX512-NEXT: vmovaps %zmm1, {{[0-9]+}}(%rsp) +; AVX512-NEXT: vmovaps %zmm2, 320(%rsp,%rax,4) +; AVX512-NEXT: vmovaps {{[0-9]+}}(%rsp), %zmm0 +; AVX512-NEXT: vmovaps {{[0-9]+}}(%rsp), %zmm1 +; AVX512-NEXT: vmovaps {{[0-9]+}}(%rsp), %zmm2 +; AVX512-NEXT: vmovaps {{[0-9]+}}(%rsp), %zmm3 +; AVX512-NEXT: movq %rbp, %rsp +; AVX512-NEXT: popq %rbp +; AVX512-NEXT: .cfi_def_cfa %rsp, 8 +; AVX512-NEXT: retq + %out = call <64 x i32> @llvm.experimental.vector.compress(<64 x i32> %vec, <64 x i1> %mask, <64 x i32> undef) + ret <64 x i32> %out +} + +define <4 x i32> @test_compress_all_const() { +; CHECK-LABEL: test_compress_all_const: +; CHECK: # %bb.0: +; CHECK-NEXT: vmovsd {{.*#+}} xmm0 = [5,9,0,0] +; CHECK-NEXT: retq + %out = call <4 x i32> @llvm.experimental.vector.compress(<4 x i32> , + <4 x i1> , + <4 x i32> undef) + ret <4 x i32> %out +} + +define <4 x i32> @test_compress_const_mask(<4 x i32> %vec) { +; CHECK-LABEL: test_compress_const_mask: +; CHECK: # %bb.0: +; CHECK-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,3,2,3] +; CHECK-NEXT: retq + %out = call <4 x i32> @llvm.experimental.vector.compress(<4 x i32> %vec, <4 x i1> , <4 x i32> undef) + ret <4 x i32> %out +} + +define <4 x i32> @test_compress_const_mask_passthrough(<4 x i32> %vec, <4 x i32> %passthru) { +; CHECK-LABEL: test_compress_const_mask_passthrough: +; CHECK: # %bb.0: +; CHECK-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,3],xmm1[2,3] +; CHECK-NEXT: retq + %out = call <4 x i32> @llvm.experimental.vector.compress(<4 x i32> %vec, <4 x i1> , <4 x i32> %passthru) + ret <4 x i32> %out +} + +define <4 x i32> @test_compress_const_mask_const_passthrough(<4 x i32> %vec) { +; CHECK-LABEL: test_compress_const_mask_const_passthrough: +; CHECK: # %bb.0: +; CHECK-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,3,2,3] +; CHECK-NEXT: movl $7, %eax +; CHECK-NEXT: vpinsrd $2, %eax, %xmm0, %xmm0 +; CHECK-NEXT: movl $8, %eax +; CHECK-NEXT: vpinsrd $3, %eax, %xmm0, %xmm0 +; CHECK-NEXT: retq + %out = call <4 x i32> @llvm.experimental.vector.compress(<4 x i32> %vec, <4 x i1> , <4 x i32> ) + ret <4 x i32> %out +} + +; We pass a placeholder value for the const_mask* tests to check that they are converted to a no-op by simply copying +; the second vector input register to the return register or doing nothing. +define <4 x i32> @test_compress_const_splat1_mask(<4 x i32> %ignore, <4 x i32> %vec) { +; CHECK-LABEL: test_compress_const_splat1_mask: +; CHECK: # %bb.0: +; CHECK-NEXT: vmovaps %xmm1, %xmm0 +; CHECK-NEXT: retq + %out = call <4 x i32> @llvm.experimental.vector.compress(<4 x i32> %vec, <4 x i1> splat (i1 -1), <4 x i32> undef) + ret <4 x i32> %out +} +define <4 x i32> @test_compress_const_splat0_mask(<4 x i32> %ignore, <4 x i32> %vec) { +; CHECK-LABEL: test_compress_const_splat0_mask: +; CHECK: # %bb.0: +; CHECK-NEXT: retq + %out = call <4 x i32> @llvm.experimental.vector.compress(<4 x i32> %vec, <4 x i1> splat (i1 0), <4 x i32> undef) + ret <4 x i32> %out +} +define <4 x i32> @test_compress_undef_mask(<4 x i32> %ignore, <4 x i32> %vec) { +; CHECK-LABEL: test_compress_undef_mask: +; CHECK: # %bb.0: +; CHECK-NEXT: retq + %out = call <4 x i32> @llvm.experimental.vector.compress(<4 x i32> %vec, <4 x i1> undef, <4 x i32> undef) + ret <4 x i32> %out +} +define <4 x i32> @test_compress_const_splat0_mask_with_passthru(<4 x i32> %ignore, <4 x i32> %vec, <4 x i32> %passthru) { +; CHECK-LABEL: test_compress_const_splat0_mask_with_passthru: +; CHECK: # %bb.0: +; CHECK-NEXT: vmovaps %xmm2, %xmm0 +; CHECK-NEXT: retq + %out = call <4 x i32> @llvm.experimental.vector.compress(<4 x i32> %vec, <4 x i1> splat (i1 0), <4 x i32> %passthru) + ret <4 x i32> %out +} +define <4 x i32> @test_compress_const_splat0_mask_without_passthru(<4 x i32> %ignore, <4 x i32> %vec) { +; CHECK-LABEL: test_compress_const_splat0_mask_without_passthru: +; CHECK: # %bb.0: +; CHECK-NEXT: retq + %out = call <4 x i32> @llvm.experimental.vector.compress(<4 x i32> %vec, <4 x i1> splat (i1 0), <4 x i32> undef) + ret <4 x i32> %out +} + +define <4 x i8> @test_compress_small(<4 x i8> %vec, <4 x i1> %mask) { +; AVX512-LABEL: test_compress_small: +; AVX512: # %bb.0: +; AVX512-NEXT: vpslld $31, %xmm1, %xmm1 +; AVX512-NEXT: vptestmd %xmm1, %xmm1, %k1 +; AVX512-NEXT: vpcompressb %xmm0, %xmm0 {%k1} {z} +; AVX512-NEXT: retq + %out = call <4 x i8> @llvm.experimental.vector.compress(<4 x i8> %vec, <4 x i1> %mask, <4 x i8> undef) + ret <4 x i8> %out +} + +define <4 x i4> @test_compress_illegal_element_type(<4 x i4> %vec, <4 x i1> %mask) { +; AVX2-LABEL: test_compress_illegal_element_type: +; AVX2: # %bb.0: +; AVX2-NEXT: vpslld $31, %xmm1, %xmm1 +; AVX2-NEXT: vpsrad $31, %xmm1, %xmm1 +; AVX2-NEXT: vmovss %xmm0, -{{[0-9]+}}(%rsp) +; AVX2-NEXT: vmovd %xmm1, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: vextractps $1, %xmm0, -24(%rsp,%rax,4) +; AVX2-NEXT: vpextrd $1, %xmm1, %ecx +; AVX2-NEXT: subl %ecx, %eax +; AVX2-NEXT: leal (,%rax,4), %ecx +; AVX2-NEXT: vextractps $2, %xmm0, -24(%rsp,%rcx) +; AVX2-NEXT: vpextrd $2, %xmm1, %ecx +; AVX2-NEXT: subl %ecx, %eax +; AVX2-NEXT: andl $3, %eax +; AVX2-NEXT: vextractps $3, %xmm0, -24(%rsp,%rax,4) +; AVX2-NEXT: vmovaps -{{[0-9]+}}(%rsp), %xmm0 +; AVX2-NEXT: retq +; +; AVX512-LABEL: test_compress_illegal_element_type: +; AVX512: # %bb.0: +; AVX512-NEXT: vpslld $31, %xmm1, %xmm1 +; AVX512-NEXT: vptestmd %xmm1, %xmm1, %k1 +; AVX512-NEXT: vpcompressd %xmm0, %xmm0 {%k1} {z} +; AVX512-NEXT: retq + %out = call <4 x i4> @llvm.experimental.vector.compress(<4 x i4> %vec, <4 x i1> %mask, <4 x i4> undef) + ret <4 x i4> %out +} + +define <3 x i32> @test_compress_narrow(<3 x i32> %vec, <3 x i1> %mask) { +; AVX512-LABEL: test_compress_narrow: +; AVX512: # %bb.0: +; AVX512-NEXT: andl $1, %edi +; AVX512-NEXT: kmovw %edi, %k0 +; AVX512-NEXT: kmovd %esi, %k1 +; AVX512-NEXT: kshiftlw $15, %k1, %k1 +; AVX512-NEXT: kshiftrw $14, %k1, %k1 +; AVX512-NEXT: korw %k1, %k0, %k0 +; AVX512-NEXT: movw $-5, %ax +; AVX512-NEXT: kmovd %eax, %k1 +; AVX512-NEXT: kandw %k1, %k0, %k0 +; AVX512-NEXT: kmovd %edx, %k1 +; AVX512-NEXT: kshiftlw $15, %k1, %k1 +; AVX512-NEXT: kshiftrw $13, %k1, %k1 +; AVX512-NEXT: korw %k1, %k0, %k0 +; AVX512-NEXT: movb $7, %al +; AVX512-NEXT: kmovd %eax, %k1 +; AVX512-NEXT: kandw %k1, %k0, %k1 +; AVX512-NEXT: vpcompressd %xmm0, %xmm0 {%k1} {z} +; AVX512-NEXT: retq + %out = call <3 x i32> @llvm.experimental.vector.compress(<3 x i32> %vec, <3 x i1> %mask, <3 x i32> undef) + ret <3 x i32> %out +} + +define <3 x i3> @test_compress_narrow_illegal_element_type(<3 x i3> %vec, <3 x i1> %mask) { +; AVX512-LABEL: test_compress_narrow_illegal_element_type: +; AVX512: # %bb.0: +; AVX512-NEXT: andl $1, %ecx +; AVX512-NEXT: kmovw %ecx, %k0 +; AVX512-NEXT: kmovd %r8d, %k1 +; AVX512-NEXT: kshiftlw $15, %k1, %k1 +; AVX512-NEXT: kshiftrw $14, %k1, %k1 +; AVX512-NEXT: korw %k1, %k0, %k0 +; AVX512-NEXT: movw $-5, %ax +; AVX512-NEXT: kmovd %eax, %k1 +; AVX512-NEXT: kandw %k1, %k0, %k0 +; AVX512-NEXT: kmovd %r9d, %k1 +; AVX512-NEXT: kshiftlw $15, %k1, %k1 +; AVX512-NEXT: kshiftrw $13, %k1, %k1 +; AVX512-NEXT: korw %k1, %k0, %k0 +; AVX512-NEXT: movb $7, %al +; AVX512-NEXT: kmovd %eax, %k1 +; AVX512-NEXT: kandw %k1, %k0, %k1 +; AVX512-NEXT: vmovd %edi, %xmm0 +; AVX512-NEXT: vpinsrd $1, %esi, %xmm0, %xmm0 +; AVX512-NEXT: vpinsrd $2, %edx, %xmm0, %xmm0 +; AVX512-NEXT: vpcompressd %xmm0, %xmm0 {%k1} {z} +; AVX512-NEXT: vmovd %xmm0, %eax +; AVX512-NEXT: vpextrb $4, %xmm0, %edx +; AVX512-NEXT: vpextrb $8, %xmm0, %ecx +; AVX512-NEXT: # kill: def $al killed $al killed $eax +; AVX512-NEXT: # kill: def $dl killed $dl killed $edx +; AVX512-NEXT: # kill: def $cl killed $cl killed $ecx +; AVX512-NEXT: retq + %out = call <3 x i3> @llvm.experimental.vector.compress(<3 x i3> %vec, <3 x i1> %mask, <3 x i3> undef) + ret <3 x i3> %out +} From b1cb814879f832e3732b0c08d200b705aefecd2b Mon Sep 17 00:00:00 2001 From: Lawrence Benson Date: Thu, 29 Aug 2024 12:42:22 +0200 Subject: [PATCH 6/9] Address PR comments --- llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h | 1 + .../SelectionDAG/LegalizeVectorTypes.cpp | 17 +++++++ llvm/lib/Target/X86/X86ISelLowering.cpp | 44 +++++++++---------- llvm/test/CodeGen/X86/vector-compress.ll | 5 ++- 4 files changed, 42 insertions(+), 25 deletions(-) diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h b/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h index 3a49a8ff10860..d58d7d84067b8 100644 --- a/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h +++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h @@ -953,6 +953,7 @@ class LLVM_LIBRARY_VISIBILITY DAGTypeLegalizer { SDValue SplitVecOp_VP_REDUCE(SDNode *N, unsigned OpNo); SDValue SplitVecOp_UnaryOp(SDNode *N); SDValue SplitVecOp_TruncateHelper(SDNode *N); + SDValue SplitVecOp_VECTOR_COMPRESS(SDNode *N, unsigned OpNo); SDValue SplitVecOp_BITCAST(SDNode *N); SDValue SplitVecOp_INSERT_SUBVECTOR(SDNode *N, unsigned OpNo); diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp index ddb7c8c54bbfe..7598f86ea5ee7 100644 --- a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp @@ -3226,6 +3226,9 @@ bool DAGTypeLegalizer::SplitVectorOperand(SDNode *N, unsigned OpNo) { case ISD::VSELECT: Res = SplitVecOp_VSELECT(N, OpNo); break; + case ISD::VECTOR_COMPRESS: + Res = SplitVecOp_VECTOR_COMPRESS(N, OpNo); + break; case ISD::STRICT_SINT_TO_FP: case ISD::STRICT_UINT_TO_FP: case ISD::SINT_TO_FP: @@ -3372,6 +3375,20 @@ SDValue DAGTypeLegalizer::SplitVecOp_VSELECT(SDNode *N, unsigned OpNo) { return DAG.getNode(ISD::CONCAT_VECTORS, DL, Src0VT, LoSelect, HiSelect); } +SDValue DAGTypeLegalizer::SplitVecOp_VECTOR_COMPRESS(SDNode *N, unsigned OpNo) { + // The only possibility for an illegal operand is the mask, since result type + // legalization would have handled this node already otherwise. + assert(OpNo == 1 && "Illegal operand must be mask"); + + // To split the mask, we need to split the result type too, so we can just + // reuse that logic here. + SDValue Lo, Hi; + SplitVecRes_VECTOR_COMPRESS(N, Lo, Hi); + + EVT VecVT = N->getValueType(0); + return DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(N), VecVT, Lo, Hi); +} + SDValue DAGTypeLegalizer::SplitVecOp_VECREDUCE(SDNode *N, unsigned OpNo) { EVT ResVT = N->getValueType(0); SDValue Lo, Hi; diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp index fcdf12c034d52..c33fcab45a8ff 100644 --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -2125,6 +2125,27 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM, for (auto VT : { MVT::v4i32, MVT::v8i32, MVT::v2i64, MVT::v4i64 }) setOperationAction(ISD::CTPOP, VT, Legal); } + + // Legal vpcompress depends on various AVX512 extensions. + // Legal in AVX512F + for (MVT VT : {MVT::v16i32, MVT::v16f32, MVT::v8i64, MVT::v8f64}) + setOperationAction(ISD::VECTOR_COMPRESS, VT, Legal); + + // Legal in AVX512F + AVX512VL + if (Subtarget.hasVLX()) + for (MVT VT : {MVT::v8i32, MVT::v8f32, MVT::v4i32, MVT::v4f32, MVT::v4i64, + MVT::v4f64, MVT::v2i64, MVT::v2f64}) + setOperationAction(ISD::VECTOR_COMPRESS, VT, Legal); + + // Legal in AVX512F + AVX512VBMI2 + if (Subtarget.hasVBMI2()) + for (MVT VT : {MVT::v32i16, MVT::v64i8}) + setOperationAction(ISD::VECTOR_COMPRESS, VT, Legal); + + // Legal in AVX512F + AVX512VL + AVX512VBMI2 + if (Subtarget.hasVBMI2() && Subtarget.hasVLX()) + for (MVT VT : {MVT::v16i8, MVT::v8i16, MVT::v32i8, MVT::v16i16}) + setOperationAction(ISD::VECTOR_COMPRESS, VT, Legal); } // This block control legalization of v32i1/v64i1 which are available with @@ -2321,29 +2342,6 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM, } } - // vpcompress depends on various AVX512 extensions. - if (Subtarget.hasAVX512()) { - // Legal in AVX512F - for (MVT VT : {MVT::v16i32, MVT::v16f32, MVT::v8i64, MVT::v8f64}) - setOperationAction(ISD::VECTOR_COMPRESS, VT, Legal); - - // Legal in AVX512F + AVX512VL - if (Subtarget.hasVLX()) - for (MVT VT : {MVT::v8i32, MVT::v8f32, MVT::v4i32, MVT::v4f32, MVT::v4i64, - MVT::v4f64, MVT::v2i64, MVT::v2f64}) - setOperationAction(ISD::VECTOR_COMPRESS, VT, Legal); - - // Legal in AVX512F + AVX512VBMI2 - if (Subtarget.hasVBMI2()) - for (MVT VT : {MVT::v32i16, MVT::v64i8}) - setOperationAction(ISD::VECTOR_COMPRESS, VT, Legal); - - // Legal in AVX512F + AVX512VL + AVX512VBMI2 - if (Subtarget.hasVBMI2() && Subtarget.hasVLX()) - for (MVT VT : {MVT::v16i8, MVT::v8i16, MVT::v32i8, MVT::v16i16}) - setOperationAction(ISD::VECTOR_COMPRESS, VT, Legal); - } - if (!Subtarget.useSoftFloat() && (Subtarget.hasAVXNECONVERT() || Subtarget.hasBF16())) { addRegisterClass(MVT::v8bf16, Subtarget.hasAVX512() ? &X86::VR128XRegClass diff --git a/llvm/test/CodeGen/X86/vector-compress.ll b/llvm/test/CodeGen/X86/vector-compress.ll index 17e195c9f0ec0..4b084db9607ea 100644 --- a/llvm/test/CodeGen/X86/vector-compress.ll +++ b/llvm/test/CodeGen/X86/vector-compress.ll @@ -1,6 +1,7 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 -; RUN: llc -O1 -mtriple=x86_64 -mattr=+avx2 < %s | FileCheck %s --check-prefixes=CHECK,AVX2 -; RUN: llc -O1 -mtriple=x86_64 -mattr=+avx512f,+avx512vl,+avx512vbmi2 < %s | FileCheck %s --check-prefixes=CHECK,AVX512 +; RUN: llc -mtriple=x86_64 -mattr=+avx2 < %s | FileCheck %s --check-prefixes=CHECK,AVX2 +; RUN: llc -mtriple=x86_64 -mattr=+avx512f < %s | FileCheck %s --check-prefixes=CHECK,AVX512F +; RUN: llc -mtriple=x86_64 -mattr=+avx512f,+avx512vl,+avx512vbmi2 < %s | FileCheck %s --check-prefixes=CHECK,AVX512VL define <4 x i32> @test_compress_v4i32(<4 x i32> %vec, <4 x i1> %mask, <4 x i32> %passthru) { ; AVX2-LABEL: test_compress_v4i32: From e9124519d27a4350fae1985ec62ac6bfc202af79 Mon Sep 17 00:00:00 2001 From: Lawrence Benson Date: Thu, 29 Aug 2024 14:17:15 +0200 Subject: [PATCH 7/9] Handle passthru in SplitVecRes --- llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp index 7598f86ea5ee7..027aa4cbfcbe3 100644 --- a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp @@ -2436,16 +2436,17 @@ void DAGTypeLegalizer::SplitVecRes_VECTOR_COMPRESS(SDNode *N, SDValue &Lo, } SDValue Passthru = N->getOperand(2); - if (!HasCustomLowering || !Passthru.isUndef()) { + if (!HasCustomLowering) { SDValue Compressed = TLI.expandVECTOR_COMPRESS(N, DAG); std::tie(Lo, Hi) = DAG.SplitVector(Compressed, DL, LoVT, HiVT); return; } // Try to VECTOR_COMPRESS smaller vectors and combine via a stack store+load. + SDValue Mask = N->getOperand(1); SDValue LoMask, HiMask; std::tie(Lo, Hi) = DAG.SplitVectorOperand(N, 0); - std::tie(LoMask, HiMask) = SplitMask(N->getOperand(1)); + std::tie(LoMask, HiMask) = SplitMask(Mask); SDValue UndefPassthru = DAG.getUNDEF(LoVT); Lo = DAG.getNode(ISD::VECTOR_COMPRESS, DL, LoVT, Lo, LoMask, UndefPassthru); @@ -2469,6 +2470,10 @@ void DAGTypeLegalizer::SplitVecRes_VECTOR_COMPRESS(SDNode *N, SDValue &Lo, MachinePointerInfo::getUnknownStack(MF)); SDValue Compressed = DAG.getLoad(VecVT, DL, Chain, StackPtr, PtrInfo); + if (!Passthru.isUndef()) { + Compressed = + DAG.getNode(ISD::VSELECT, DL, VecVT, Mask, Compressed, Passthru); + } std::tie(Lo, Hi) = DAG.SplitVector(Compressed, DL); } From d773c8216d6c6b69ed430dfe0de008a0f80661ec Mon Sep 17 00:00:00 2001 From: Lawrence Benson Date: Thu, 29 Aug 2024 14:30:34 +0200 Subject: [PATCH 8/9] Add custom lowering for AVX512F vectors --- llvm/lib/Target/X86/X86ISelLowering.cpp | 74 ++ llvm/test/CodeGen/X86/vector-compress.ll | 1035 ++++++++++++++++------ 2 files changed, 853 insertions(+), 256 deletions(-) diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp index c33fcab45a8ff..7ea6a769782f5 100644 --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -2126,6 +2126,14 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM, setOperationAction(ISD::CTPOP, VT, Legal); } + // We can try to convert vectors to different sizes to leverage legal + // `vpcompress` cases. So we mark these supported vector sizes as Custom and + // then specialize to Legal below. + for (MVT VT : {MVT::v8i32, MVT::v8f32, MVT::v4i32, MVT::v4f32, MVT::v4i64, + MVT::v4f64, MVT::v2i64, MVT::v2f64, MVT::v16i8, MVT::v8i16, + MVT::v16i16, MVT::v8i8}) + setOperationAction(ISD::VECTOR_COMPRESS, VT, Custom); + // Legal vpcompress depends on various AVX512 extensions. // Legal in AVX512F for (MVT VT : {MVT::v16i32, MVT::v16f32, MVT::v8i64, MVT::v8f64}) @@ -17776,6 +17784,71 @@ static SDValue lowerVECTOR_SHUFFLE(SDValue Op, const X86Subtarget &Subtarget, llvm_unreachable("Unimplemented!"); } +// As legal vpcompress instructions depend on various AVX512 extensions, try to +// convert illegal vector sizes to legal ones to avoid expansion. +static SDValue lowerVECTOR_COMPRESS(SDValue Op, const X86Subtarget &Subtarget, + SelectionDAG &DAG) { + assert(Subtarget.hasAVX512() && + "Need AVX512 for custom VECTOR_COMPRESS lowering."); + + SDLoc DL(Op); + SDValue Vec = Op.getOperand(0); + SDValue Mask = Op.getOperand(1); + SDValue Passthru = Op.getOperand(2); + + EVT VecVT = Vec.getValueType(); + EVT ElementVT = VecVT.getVectorElementType(); + unsigned NumElements = VecVT.getVectorNumElements(); + unsigned NumVecBits = VecVT.getFixedSizeInBits(); + unsigned NumElementBits = ElementVT.getFixedSizeInBits(); + + // 128- and 256-bit vectors with <= 16 elements can be converted to and + // compressed as 512-bit vectors in AVX512F. + if (NumVecBits != 128 && NumVecBits != 256) + return SDValue(); + + if (NumElementBits == 32 || NumElementBits == 64) { + unsigned NumLargeElements = 512 / NumElementBits; + EVT LargeVecVT = + MVT::getVectorVT(ElementVT.getSimpleVT(), NumLargeElements); + EVT LargeMaskVT = MVT::getVectorVT(MVT::i1, NumLargeElements); + + SDValue InsertPos = DAG.getConstant(0, DL, MVT::i64); + Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, LargeVecVT, + DAG.getUNDEF(LargeVecVT), Vec, InsertPos); + Mask = DAG.getNode( + ISD::INSERT_SUBVECTOR, DL, LargeMaskVT, + DAG.getSplatVector(LargeMaskVT, DL, DAG.getConstant(0, DL, MVT::i1)), + Mask, InsertPos); + Passthru = Passthru.isUndef() + ? DAG.getUNDEF(LargeVecVT) + : DAG.getNode(ISD::INSERT_SUBVECTOR, DL, LargeVecVT, + DAG.getUNDEF(LargeVecVT), Passthru, InsertPos); + + SDValue Compressed = + DAG.getNode(ISD::VECTOR_COMPRESS, DL, LargeVecVT, Vec, Mask, Passthru); + return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VecVT, Compressed, + InsertPos); + } + + if (VecVT == MVT::v8i16 || VecVT == MVT::v8i8 || VecVT == MVT::v16i8 || + VecVT == MVT::v16i16) { + MVT LageElementVT = MVT::getIntegerVT(512 / NumElements); + EVT LargeVecVT = MVT::getVectorVT(LageElementVT, NumElements); + + Vec = DAG.getNode(ISD::ANY_EXTEND, DL, LargeVecVT, Vec); + Passthru = Passthru.isUndef() + ? DAG.getUNDEF(LargeVecVT) + : DAG.getNode(ISD::ANY_EXTEND, DL, LargeVecVT, Passthru); + + SDValue Compressed = + DAG.getNode(ISD::VECTOR_COMPRESS, DL, LargeVecVT, Vec, Mask, Passthru); + return DAG.getNode(ISD::TRUNCATE, DL, VecVT, Compressed); + } + + return SDValue(); +} + /// Try to lower a VSELECT instruction to a vector shuffle. static SDValue lowerVSELECTtoVectorShuffle(SDValue Op, const X86Subtarget &Subtarget, @@ -32395,6 +32468,7 @@ SDValue X86TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const { case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG); case ISD::CONCAT_VECTORS: return LowerCONCAT_VECTORS(Op, Subtarget, DAG); case ISD::VECTOR_SHUFFLE: return lowerVECTOR_SHUFFLE(Op, Subtarget, DAG); + case ISD::VECTOR_COMPRESS: return lowerVECTOR_COMPRESS(Op, Subtarget, DAG); case ISD::VSELECT: return LowerVSELECT(Op, DAG); case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG); case ISD::INSERT_VECTOR_ELT: return LowerINSERT_VECTOR_ELT(Op, DAG); diff --git a/llvm/test/CodeGen/X86/vector-compress.ll b/llvm/test/CodeGen/X86/vector-compress.ll index 4b084db9607ea..2b963ab896cc9 100644 --- a/llvm/test/CodeGen/X86/vector-compress.ll +++ b/llvm/test/CodeGen/X86/vector-compress.ll @@ -40,13 +40,26 @@ define <4 x i32> @test_compress_v4i32(<4 x i32> %vec, <4 x i1> %mask, <4 x i32> ; AVX2-NEXT: vmovaps -{{[0-9]+}}(%rsp), %xmm0 ; AVX2-NEXT: retq ; -; AVX512-LABEL: test_compress_v4i32: -; AVX512: # %bb.0: -; AVX512-NEXT: vpslld $31, %xmm1, %xmm1 -; AVX512-NEXT: vptestmd %xmm1, %xmm1, %k1 -; AVX512-NEXT: vpcompressd %xmm0, %xmm2 {%k1} -; AVX512-NEXT: vmovdqa %xmm2, %xmm0 -; AVX512-NEXT: retq +; AVX512F-LABEL: test_compress_v4i32: +; AVX512F: # %bb.0: +; AVX512F-NEXT: # kill: def $xmm2 killed $xmm2 def $zmm2 +; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 +; AVX512F-NEXT: vpslld $31, %xmm1, %xmm1 +; AVX512F-NEXT: vptestmd %zmm1, %zmm1, %k0 +; AVX512F-NEXT: kshiftlw $12, %k0, %k0 +; AVX512F-NEXT: kshiftrw $12, %k0, %k1 +; AVX512F-NEXT: vpcompressd %zmm0, %zmm2 {%k1} +; AVX512F-NEXT: vmovdqa %xmm2, %xmm0 +; AVX512F-NEXT: vzeroupper +; AVX512F-NEXT: retq +; +; AVX512VL-LABEL: test_compress_v4i32: +; AVX512VL: # %bb.0: +; AVX512VL-NEXT: vpslld $31, %xmm1, %xmm1 +; AVX512VL-NEXT: vptestmd %xmm1, %xmm1, %k1 +; AVX512VL-NEXT: vpcompressd %xmm0, %xmm2 {%k1} +; AVX512VL-NEXT: vmovdqa %xmm2, %xmm0 +; AVX512VL-NEXT: retq %out = call <4 x i32> @llvm.experimental.vector.compress(<4 x i32> %vec, <4 x i1> %mask, <4 x i32> %passthru) ret <4 x i32> %out } @@ -92,13 +105,26 @@ define <4 x float> @test_compress_v4f32(<4 x float> %vec, <4 x i1> %mask, <4 x f ; AVX2-NEXT: vmovaps -{{[0-9]+}}(%rsp), %xmm0 ; AVX2-NEXT: retq ; -; AVX512-LABEL: test_compress_v4f32: -; AVX512: # %bb.0: -; AVX512-NEXT: vpslld $31, %xmm1, %xmm1 -; AVX512-NEXT: vptestmd %xmm1, %xmm1, %k1 -; AVX512-NEXT: vcompressps %xmm0, %xmm2 {%k1} -; AVX512-NEXT: vmovdqa %xmm2, %xmm0 -; AVX512-NEXT: retq +; AVX512F-LABEL: test_compress_v4f32: +; AVX512F: # %bb.0: +; AVX512F-NEXT: # kill: def $xmm2 killed $xmm2 def $zmm2 +; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 +; AVX512F-NEXT: vpslld $31, %xmm1, %xmm1 +; AVX512F-NEXT: vptestmd %zmm1, %zmm1, %k0 +; AVX512F-NEXT: kshiftlw $12, %k0, %k0 +; AVX512F-NEXT: kshiftrw $12, %k0, %k1 +; AVX512F-NEXT: vcompressps %zmm0, %zmm2 {%k1} +; AVX512F-NEXT: vmovdqa %xmm2, %xmm0 +; AVX512F-NEXT: vzeroupper +; AVX512F-NEXT: retq +; +; AVX512VL-LABEL: test_compress_v4f32: +; AVX512VL: # %bb.0: +; AVX512VL-NEXT: vpslld $31, %xmm1, %xmm1 +; AVX512VL-NEXT: vptestmd %xmm1, %xmm1, %k1 +; AVX512VL-NEXT: vcompressps %xmm0, %xmm2 {%k1} +; AVX512VL-NEXT: vmovdqa %xmm2, %xmm0 +; AVX512VL-NEXT: retq %out = call <4 x float> @llvm.experimental.vector.compress(<4 x float> %vec, <4 x i1> %mask, <4 x float> %passthru) ret <4 x float> %out } @@ -131,13 +157,26 @@ define <2 x i64> @test_compress_v2i64(<2 x i64> %vec, <2 x i1> %mask, <2 x i64> ; AVX2-NEXT: vmovaps -{{[0-9]+}}(%rsp), %xmm0 ; AVX2-NEXT: retq ; -; AVX512-LABEL: test_compress_v2i64: -; AVX512: # %bb.0: -; AVX512-NEXT: vpsllq $63, %xmm1, %xmm1 -; AVX512-NEXT: vptestmq %xmm1, %xmm1, %k1 -; AVX512-NEXT: vpcompressq %xmm0, %xmm2 {%k1} -; AVX512-NEXT: vmovdqa %xmm2, %xmm0 -; AVX512-NEXT: retq +; AVX512F-LABEL: test_compress_v2i64: +; AVX512F: # %bb.0: +; AVX512F-NEXT: # kill: def $xmm2 killed $xmm2 def $zmm2 +; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 +; AVX512F-NEXT: vpsllq $63, %xmm1, %xmm1 +; AVX512F-NEXT: vptestmq %zmm1, %zmm1, %k0 +; AVX512F-NEXT: kshiftlw $14, %k0, %k0 +; AVX512F-NEXT: kshiftrw $14, %k0, %k1 +; AVX512F-NEXT: vpcompressq %zmm0, %zmm2 {%k1} +; AVX512F-NEXT: vmovdqa %xmm2, %xmm0 +; AVX512F-NEXT: vzeroupper +; AVX512F-NEXT: retq +; +; AVX512VL-LABEL: test_compress_v2i64: +; AVX512VL: # %bb.0: +; AVX512VL-NEXT: vpsllq $63, %xmm1, %xmm1 +; AVX512VL-NEXT: vptestmq %xmm1, %xmm1, %k1 +; AVX512VL-NEXT: vpcompressq %xmm0, %xmm2 {%k1} +; AVX512VL-NEXT: vmovdqa %xmm2, %xmm0 +; AVX512VL-NEXT: retq %out = call <2 x i64> @llvm.experimental.vector.compress(<2 x i64> %vec, <2 x i1> %mask, <2 x i64> %passthru) ret <2 x i64> %out } @@ -173,13 +212,26 @@ define <2 x double> @test_compress_v2f64(<2 x double> %vec, <2 x i1> %mask, <2 x ; AVX2-NEXT: vmovaps -{{[0-9]+}}(%rsp), %xmm0 ; AVX2-NEXT: retq ; -; AVX512-LABEL: test_compress_v2f64: -; AVX512: # %bb.0: -; AVX512-NEXT: vpsllq $63, %xmm1, %xmm1 -; AVX512-NEXT: vptestmq %xmm1, %xmm1, %k1 -; AVX512-NEXT: vcompresspd %xmm0, %xmm2 {%k1} -; AVX512-NEXT: vmovdqa %xmm2, %xmm0 -; AVX512-NEXT: retq +; AVX512F-LABEL: test_compress_v2f64: +; AVX512F: # %bb.0: +; AVX512F-NEXT: # kill: def $xmm2 killed $xmm2 def $zmm2 +; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 +; AVX512F-NEXT: vpsllq $63, %xmm1, %xmm1 +; AVX512F-NEXT: vptestmq %zmm1, %zmm1, %k0 +; AVX512F-NEXT: kshiftlw $14, %k0, %k0 +; AVX512F-NEXT: kshiftrw $14, %k0, %k1 +; AVX512F-NEXT: vcompresspd %zmm0, %zmm2 {%k1} +; AVX512F-NEXT: vmovdqa %xmm2, %xmm0 +; AVX512F-NEXT: vzeroupper +; AVX512F-NEXT: retq +; +; AVX512VL-LABEL: test_compress_v2f64: +; AVX512VL: # %bb.0: +; AVX512VL-NEXT: vpsllq $63, %xmm1, %xmm1 +; AVX512VL-NEXT: vptestmq %xmm1, %xmm1, %k1 +; AVX512VL-NEXT: vcompresspd %xmm0, %xmm2 {%k1} +; AVX512VL-NEXT: vmovdqa %xmm2, %xmm0 +; AVX512VL-NEXT: retq %out = call <2 x double> @llvm.experimental.vector.compress(<2 x double> %vec, <2 x i1> %mask, <2 x double> %passthru) ret <2 x double> %out } @@ -266,243 +318,586 @@ define <8 x i32> @test_compress_v8i32(<8 x i32> %vec, <8 x i1> %mask, <8 x i32> ; AVX2-NEXT: .cfi_def_cfa %rsp, 8 ; AVX2-NEXT: retq ; -; AVX512-LABEL: test_compress_v8i32: -; AVX512: # %bb.0: -; AVX512-NEXT: vpsllw $15, %xmm1, %xmm1 -; AVX512-NEXT: vpmovw2m %xmm1, %k1 -; AVX512-NEXT: vpcompressd %ymm0, %ymm2 {%k1} -; AVX512-NEXT: vmovdqa %ymm2, %ymm0 -; AVX512-NEXT: retq +; AVX512F-LABEL: test_compress_v8i32: +; AVX512F: # %bb.0: +; AVX512F-NEXT: # kill: def $ymm2 killed $ymm2 def $zmm2 +; AVX512F-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 +; AVX512F-NEXT: vpmovsxwq %xmm1, %zmm1 +; AVX512F-NEXT: vpsllq $63, %zmm1, %zmm1 +; AVX512F-NEXT: vptestmq %zmm1, %zmm1, %k1 +; AVX512F-NEXT: vpcompressd %zmm0, %zmm2 {%k1} +; AVX512F-NEXT: vmovdqa %ymm2, %ymm0 +; AVX512F-NEXT: retq +; +; AVX512VL-LABEL: test_compress_v8i32: +; AVX512VL: # %bb.0: +; AVX512VL-NEXT: vpsllw $15, %xmm1, %xmm1 +; AVX512VL-NEXT: vpmovw2m %xmm1, %k1 +; AVX512VL-NEXT: vpcompressd %ymm0, %ymm2 {%k1} +; AVX512VL-NEXT: vmovdqa %ymm2, %ymm0 +; AVX512VL-NEXT: retq %out = call <8 x i32> @llvm.experimental.vector.compress(<8 x i32> %vec, <8 x i1> %mask, <8 x i32> %passthru) ret <8 x i32> %out } define <8 x float> @test_compress_v8f32(<8 x float> %vec, <8 x i1> %mask, <8 x float> %passthru) { -; AVX512-LABEL: test_compress_v8f32: -; AVX512: # %bb.0: -; AVX512-NEXT: vpsllw $15, %xmm1, %xmm1 -; AVX512-NEXT: vpmovw2m %xmm1, %k1 -; AVX512-NEXT: vcompressps %ymm0, %ymm2 {%k1} -; AVX512-NEXT: vmovdqa %ymm2, %ymm0 -; AVX512-NEXT: retq +; AVX2-LABEL: test_compress_v8f32: +; AVX2: # %bb.0: +; AVX2-NEXT: pushq %rbp +; AVX2-NEXT: .cfi_def_cfa_offset 16 +; AVX2-NEXT: .cfi_offset %rbp, -16 +; AVX2-NEXT: movq %rsp, %rbp +; AVX2-NEXT: .cfi_def_cfa_register %rbp +; AVX2-NEXT: andq $-32, %rsp +; AVX2-NEXT: subq $64, %rsp +; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero +; AVX2-NEXT: vpslld $31, %ymm1, %ymm1 +; AVX2-NEXT: vpsrad $31, %ymm1, %ymm3 +; AVX2-NEXT: vmovaps %ymm2, (%rsp) +; AVX2-NEXT: vextracti128 $1, %ymm3, %xmm1 +; AVX2-NEXT: vpackssdw %xmm1, %xmm3, %xmm2 +; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero +; AVX2-NEXT: vpslld $31, %ymm2, %ymm2 +; AVX2-NEXT: vpsrld $31, %ymm2, %ymm2 +; AVX2-NEXT: vextracti128 $1, %ymm2, %xmm4 +; AVX2-NEXT: vpaddd %xmm4, %xmm2, %xmm2 +; AVX2-NEXT: vpextrd $1, %xmm2, %eax +; AVX2-NEXT: vmovd %xmm2, %ecx +; AVX2-NEXT: addl %eax, %ecx +; AVX2-NEXT: vpextrd $2, %xmm2, %eax +; AVX2-NEXT: vpextrd $3, %xmm2, %edx +; AVX2-NEXT: addl %eax, %edx +; AVX2-NEXT: addl %ecx, %edx +; AVX2-NEXT: andl $7, %edx +; AVX2-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero +; AVX2-NEXT: vmovss %xmm0, (%rsp) +; AVX2-NEXT: vmovd %xmm3, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: vextractps $1, %xmm0, (%rsp,%rax,4) +; AVX2-NEXT: vpextrd $1, %xmm3, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: addq %rax, %rcx +; AVX2-NEXT: vextractps $2, %xmm0, (%rsp,%rcx,4) +; AVX2-NEXT: vpextrd $2, %xmm3, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: addq %rcx, %rax +; AVX2-NEXT: vextractps $3, %xmm0, (%rsp,%rax,4) +; AVX2-NEXT: vpextrd $3, %xmm3, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: addq %rax, %rcx +; AVX2-NEXT: vmovd %xmm1, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: addq %rcx, %rax +; AVX2-NEXT: # kill: def $ecx killed $ecx killed $rcx def $rcx +; AVX2-NEXT: andl $7, %ecx +; AVX2-NEXT: vextractf128 $1, %ymm0, %xmm0 +; AVX2-NEXT: vmovss %xmm0, (%rsp,%rcx,4) +; AVX2-NEXT: vpextrd $1, %xmm1, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: addq %rax, %rcx +; AVX2-NEXT: # kill: def $eax killed $eax killed $rax def $rax +; AVX2-NEXT: andl $7, %eax +; AVX2-NEXT: vextractps $1, %xmm0, (%rsp,%rax,4) +; AVX2-NEXT: vpextrd $2, %xmm1, %edx +; AVX2-NEXT: andl $1, %edx +; AVX2-NEXT: addq %rcx, %rdx +; AVX2-NEXT: # kill: def $ecx killed $ecx killed $rcx def $rcx +; AVX2-NEXT: andl $7, %ecx +; AVX2-NEXT: vextractps $2, %xmm0, (%rsp,%rcx,4) +; AVX2-NEXT: vpextrd $3, %xmm1, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: addq %rdx, %rax +; AVX2-NEXT: # kill: def $edx killed $edx killed $rdx def $rdx +; AVX2-NEXT: andl $7, %edx +; AVX2-NEXT: vshufps {{.*#+}} xmm0 = xmm0[3,3,3,3] +; AVX2-NEXT: vmovss %xmm0, (%rsp,%rdx,4) +; AVX2-NEXT: cmpq $8, %rax +; AVX2-NEXT: jae .LBB5_2 +; AVX2-NEXT: # %bb.1: +; AVX2-NEXT: vmovaps %xmm2, %xmm0 +; AVX2-NEXT: .LBB5_2: +; AVX2-NEXT: cmpq $7, %rax +; AVX2-NEXT: movl $7, %ecx +; AVX2-NEXT: cmovbq %rax, %rcx +; AVX2-NEXT: movl %ecx, %eax +; AVX2-NEXT: vmovss %xmm0, (%rsp,%rax,4) +; AVX2-NEXT: vmovaps (%rsp), %ymm0 +; AVX2-NEXT: movq %rbp, %rsp +; AVX2-NEXT: popq %rbp +; AVX2-NEXT: .cfi_def_cfa %rsp, 8 +; AVX2-NEXT: retq +; +; AVX512F-LABEL: test_compress_v8f32: +; AVX512F: # %bb.0: +; AVX512F-NEXT: # kill: def $ymm2 killed $ymm2 def $zmm2 +; AVX512F-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 +; AVX512F-NEXT: vpmovsxwq %xmm1, %zmm1 +; AVX512F-NEXT: vpsllq $63, %zmm1, %zmm1 +; AVX512F-NEXT: vptestmq %zmm1, %zmm1, %k1 +; AVX512F-NEXT: vcompressps %zmm0, %zmm2 {%k1} +; AVX512F-NEXT: vmovdqa %ymm2, %ymm0 +; AVX512F-NEXT: retq +; +; AVX512VL-LABEL: test_compress_v8f32: +; AVX512VL: # %bb.0: +; AVX512VL-NEXT: vpsllw $15, %xmm1, %xmm1 +; AVX512VL-NEXT: vpmovw2m %xmm1, %k1 +; AVX512VL-NEXT: vcompressps %ymm0, %ymm2 {%k1} +; AVX512VL-NEXT: vmovdqa %ymm2, %ymm0 +; AVX512VL-NEXT: retq %out = call <8 x float> @llvm.experimental.vector.compress(<8 x float> %vec, <8 x i1> %mask, <8 x float> %passthru) ret <8 x float> %out } define <4 x i64> @test_compress_v4i64(<4 x i64> %vec, <4 x i1> %mask, <4 x i64> %passthru) { -; AVX512-LABEL: test_compress_v4i64: -; AVX512: # %bb.0: -; AVX512-NEXT: vpslld $31, %xmm1, %xmm1 -; AVX512-NEXT: vptestmd %xmm1, %xmm1, %k1 -; AVX512-NEXT: vpcompressq %ymm0, %ymm2 {%k1} -; AVX512-NEXT: vmovdqa %ymm2, %ymm0 -; AVX512-NEXT: retq +; AVX2-LABEL: test_compress_v4i64: +; AVX2: # %bb.0: +; AVX2-NEXT: pushq %rbp +; AVX2-NEXT: .cfi_def_cfa_offset 16 +; AVX2-NEXT: .cfi_offset %rbp, -16 +; AVX2-NEXT: movq %rsp, %rbp +; AVX2-NEXT: .cfi_def_cfa_register %rbp +; AVX2-NEXT: andq $-32, %rsp +; AVX2-NEXT: subq $64, %rsp +; AVX2-NEXT: vpslld $31, %xmm1, %xmm1 +; AVX2-NEXT: vpsrad $31, %xmm1, %xmm1 +; AVX2-NEXT: vpmovsxdq %xmm1, %ymm1 +; AVX2-NEXT: vmovaps %ymm2, (%rsp) +; AVX2-NEXT: vpsrlq $63, %ymm1, %ymm2 +; AVX2-NEXT: vextracti128 $1, %ymm2, %xmm3 +; AVX2-NEXT: vpaddq %xmm3, %xmm2, %xmm2 +; AVX2-NEXT: vpextrq $1, %xmm2, %rcx +; AVX2-NEXT: vmovq %xmm2, %rax +; AVX2-NEXT: addl %ecx, %eax +; AVX2-NEXT: andl $3, %eax +; AVX2-NEXT: vpextrq $1, %xmm1, %rcx +; AVX2-NEXT: vmovq %xmm1, %rdx +; AVX2-NEXT: andl $1, %edx +; AVX2-NEXT: movl %edx, %esi +; AVX2-NEXT: subq %rcx, %rdx +; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm1 +; AVX2-NEXT: vmovq %xmm1, %rcx +; AVX2-NEXT: movl %edx, %edi +; AVX2-NEXT: subq %rcx, %rdx +; AVX2-NEXT: vpextrq $1, %xmm1, %rcx +; AVX2-NEXT: movq %rdx, %r8 +; AVX2-NEXT: subq %rcx, %r8 +; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1 +; AVX2-NEXT: vpextrq $1, %xmm1, %rcx +; AVX2-NEXT: cmpq $4, %r8 +; AVX2-NEXT: cmovbq (%rsp,%rax,8), %rcx +; AVX2-NEXT: vmovq %xmm0, (%rsp) +; AVX2-NEXT: vpextrq $1, %xmm0, (%rsp,%rsi,8) +; AVX2-NEXT: vmovq %xmm1, (%rsp,%rdi,8) +; AVX2-NEXT: andl $3, %edx +; AVX2-NEXT: vpextrq $1, %xmm1, (%rsp,%rdx,8) +; AVX2-NEXT: cmpq $3, %r8 +; AVX2-NEXT: movl $3, %eax +; AVX2-NEXT: cmovbq %r8, %rax +; AVX2-NEXT: movl %eax, %eax +; AVX2-NEXT: movq %rcx, (%rsp,%rax,8) +; AVX2-NEXT: vmovaps (%rsp), %ymm0 +; AVX2-NEXT: movq %rbp, %rsp +; AVX2-NEXT: popq %rbp +; AVX2-NEXT: .cfi_def_cfa %rsp, 8 +; AVX2-NEXT: retq +; +; AVX512F-LABEL: test_compress_v4i64: +; AVX512F: # %bb.0: +; AVX512F-NEXT: # kill: def $ymm2 killed $ymm2 def $zmm2 +; AVX512F-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 +; AVX512F-NEXT: vpslld $31, %xmm1, %xmm1 +; AVX512F-NEXT: vptestmd %zmm1, %zmm1, %k0 +; AVX512F-NEXT: kshiftlw $12, %k0, %k0 +; AVX512F-NEXT: kshiftrw $12, %k0, %k1 +; AVX512F-NEXT: vpcompressq %zmm0, %zmm2 {%k1} +; AVX512F-NEXT: vmovdqa %ymm2, %ymm0 +; AVX512F-NEXT: retq +; +; AVX512VL-LABEL: test_compress_v4i64: +; AVX512VL: # %bb.0: +; AVX512VL-NEXT: vpslld $31, %xmm1, %xmm1 +; AVX512VL-NEXT: vptestmd %xmm1, %xmm1, %k1 +; AVX512VL-NEXT: vpcompressq %ymm0, %ymm2 {%k1} +; AVX512VL-NEXT: vmovdqa %ymm2, %ymm0 +; AVX512VL-NEXT: retq %out = call <4 x i64> @llvm.experimental.vector.compress(<4 x i64> %vec, <4 x i1> %mask, <4 x i64> %passthru) ret <4 x i64> %out } define <4 x double> @test_compress_v4f64(<4 x double> %vec, <4 x i1> %mask, <4 x double> %passthru) { -; AVX512-LABEL: test_compress_v4f64: -; AVX512: # %bb.0: -; AVX512-NEXT: vpslld $31, %xmm1, %xmm1 -; AVX512-NEXT: vptestmd %xmm1, %xmm1, %k1 -; AVX512-NEXT: vcompresspd %ymm0, %ymm2 {%k1} -; AVX512-NEXT: vmovdqa %ymm2, %ymm0 -; AVX512-NEXT: retq +; AVX512F-LABEL: test_compress_v4f64: +; AVX512F: # %bb.0: +; AVX512F-NEXT: # kill: def $ymm2 killed $ymm2 def $zmm2 +; AVX512F-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 +; AVX512F-NEXT: vpslld $31, %xmm1, %xmm1 +; AVX512F-NEXT: vptestmd %zmm1, %zmm1, %k0 +; AVX512F-NEXT: kshiftlw $12, %k0, %k0 +; AVX512F-NEXT: kshiftrw $12, %k0, %k1 +; AVX512F-NEXT: vcompresspd %zmm0, %zmm2 {%k1} +; AVX512F-NEXT: vmovdqa %ymm2, %ymm0 +; AVX512F-NEXT: retq +; +; AVX512VL-LABEL: test_compress_v4f64: +; AVX512VL: # %bb.0: +; AVX512VL-NEXT: vpslld $31, %xmm1, %xmm1 +; AVX512VL-NEXT: vptestmd %xmm1, %xmm1, %k1 +; AVX512VL-NEXT: vcompresspd %ymm0, %ymm2 {%k1} +; AVX512VL-NEXT: vmovdqa %ymm2, %ymm0 +; AVX512VL-NEXT: retq %out = call <4 x double> @llvm.experimental.vector.compress(<4 x double> %vec, <4 x i1> %mask, <4 x double> %passthru) ret <4 x double> %out } define <16 x i32> @test_compress_v16i32(<16 x i32> %vec, <16 x i1> %mask, <16 x i32> %passthru) { -; AVX512-LABEL: test_compress_v16i32: -; AVX512: # %bb.0: -; AVX512-NEXT: vpsllw $7, %xmm1, %xmm1 -; AVX512-NEXT: vpmovb2m %xmm1, %k1 -; AVX512-NEXT: vpcompressd %zmm0, %zmm2 {%k1} -; AVX512-NEXT: vmovdqa64 %zmm2, %zmm0 -; AVX512-NEXT: retq +; AVX512F-LABEL: test_compress_v16i32: +; AVX512F: # %bb.0: +; AVX512F-NEXT: vpmovsxbd %xmm1, %zmm1 +; AVX512F-NEXT: vpslld $31, %zmm1, %zmm1 +; AVX512F-NEXT: vptestmd %zmm1, %zmm1, %k1 +; AVX512F-NEXT: vpcompressd %zmm0, %zmm2 {%k1} +; AVX512F-NEXT: vmovdqa64 %zmm2, %zmm0 +; AVX512F-NEXT: retq +; +; AVX512VL-LABEL: test_compress_v16i32: +; AVX512VL: # %bb.0: +; AVX512VL-NEXT: vpsllw $7, %xmm1, %xmm1 +; AVX512VL-NEXT: vpmovb2m %xmm1, %k1 +; AVX512VL-NEXT: vpcompressd %zmm0, %zmm2 {%k1} +; AVX512VL-NEXT: vmovdqa64 %zmm2, %zmm0 +; AVX512VL-NEXT: retq %out = call <16 x i32> @llvm.experimental.vector.compress(<16 x i32> %vec, <16 x i1> %mask, <16 x i32> %passthru) ret <16 x i32> %out } define <16 x float> @test_compress_v16f32(<16 x float> %vec, <16 x i1> %mask, <16 x float> %passthru) { -; AVX512-LABEL: test_compress_v16f32: -; AVX512: # %bb.0: -; AVX512-NEXT: vpsllw $7, %xmm1, %xmm1 -; AVX512-NEXT: vpmovb2m %xmm1, %k1 -; AVX512-NEXT: vcompressps %zmm0, %zmm2 {%k1} -; AVX512-NEXT: vmovdqa64 %zmm2, %zmm0 -; AVX512-NEXT: retq +; AVX512F-LABEL: test_compress_v16f32: +; AVX512F: # %bb.0: +; AVX512F-NEXT: vpmovsxbd %xmm1, %zmm1 +; AVX512F-NEXT: vpslld $31, %zmm1, %zmm1 +; AVX512F-NEXT: vptestmd %zmm1, %zmm1, %k1 +; AVX512F-NEXT: vcompressps %zmm0, %zmm2 {%k1} +; AVX512F-NEXT: vmovdqa64 %zmm2, %zmm0 +; AVX512F-NEXT: retq +; +; AVX512VL-LABEL: test_compress_v16f32: +; AVX512VL: # %bb.0: +; AVX512VL-NEXT: vpsllw $7, %xmm1, %xmm1 +; AVX512VL-NEXT: vpmovb2m %xmm1, %k1 +; AVX512VL-NEXT: vcompressps %zmm0, %zmm2 {%k1} +; AVX512VL-NEXT: vmovdqa64 %zmm2, %zmm0 +; AVX512VL-NEXT: retq %out = call <16 x float> @llvm.experimental.vector.compress(<16 x float> %vec, <16 x i1> %mask, <16 x float> %passthru) ret <16 x float> %out } define <8 x i64> @test_compress_v8i64(<8 x i64> %vec, <8 x i1> %mask, <8 x i64> %passthru) { -; AVX512-LABEL: test_compress_v8i64: -; AVX512: # %bb.0: -; AVX512-NEXT: vpsllw $15, %xmm1, %xmm1 -; AVX512-NEXT: vpmovw2m %xmm1, %k1 -; AVX512-NEXT: vpcompressq %zmm0, %zmm2 {%k1} -; AVX512-NEXT: vmovdqa64 %zmm2, %zmm0 -; AVX512-NEXT: retq +; AVX512F-LABEL: test_compress_v8i64: +; AVX512F: # %bb.0: +; AVX512F-NEXT: vpmovsxwq %xmm1, %zmm1 +; AVX512F-NEXT: vpsllq $63, %zmm1, %zmm1 +; AVX512F-NEXT: vptestmq %zmm1, %zmm1, %k1 +; AVX512F-NEXT: vpcompressq %zmm0, %zmm2 {%k1} +; AVX512F-NEXT: vmovdqa64 %zmm2, %zmm0 +; AVX512F-NEXT: retq +; +; AVX512VL-LABEL: test_compress_v8i64: +; AVX512VL: # %bb.0: +; AVX512VL-NEXT: vpsllw $15, %xmm1, %xmm1 +; AVX512VL-NEXT: vpmovw2m %xmm1, %k1 +; AVX512VL-NEXT: vpcompressq %zmm0, %zmm2 {%k1} +; AVX512VL-NEXT: vmovdqa64 %zmm2, %zmm0 +; AVX512VL-NEXT: retq %out = call <8 x i64> @llvm.experimental.vector.compress(<8 x i64> %vec, <8 x i1> %mask, <8 x i64> %passthru) ret <8 x i64> %out } define <8 x double> @test_compress_v8f64(<8 x double> %vec, <8 x i1> %mask, <8 x double> %passthru) { -; AVX512-LABEL: test_compress_v8f64: -; AVX512: # %bb.0: -; AVX512-NEXT: vpsllw $15, %xmm1, %xmm1 -; AVX512-NEXT: vpmovw2m %xmm1, %k1 -; AVX512-NEXT: vcompresspd %zmm0, %zmm2 {%k1} -; AVX512-NEXT: vmovdqa64 %zmm2, %zmm0 -; AVX512-NEXT: retq +; AVX512F-LABEL: test_compress_v8f64: +; AVX512F: # %bb.0: +; AVX512F-NEXT: vpmovsxwq %xmm1, %zmm1 +; AVX512F-NEXT: vpsllq $63, %zmm1, %zmm1 +; AVX512F-NEXT: vptestmq %zmm1, %zmm1, %k1 +; AVX512F-NEXT: vcompresspd %zmm0, %zmm2 {%k1} +; AVX512F-NEXT: vmovdqa64 %zmm2, %zmm0 +; AVX512F-NEXT: retq +; +; AVX512VL-LABEL: test_compress_v8f64: +; AVX512VL: # %bb.0: +; AVX512VL-NEXT: vpsllw $15, %xmm1, %xmm1 +; AVX512VL-NEXT: vpmovw2m %xmm1, %k1 +; AVX512VL-NEXT: vcompresspd %zmm0, %zmm2 {%k1} +; AVX512VL-NEXT: vmovdqa64 %zmm2, %zmm0 +; AVX512VL-NEXT: retq %out = call <8 x double> @llvm.experimental.vector.compress(<8 x double> %vec, <8 x i1> %mask, <8 x double> %passthru) ret <8 x double> %out } define <16 x i8> @test_compress_v16i8(<16 x i8> %vec, <16 x i1> %mask, <16 x i8> %passthru) { -; AVX512-LABEL: test_compress_v16i8: -; AVX512: # %bb.0: -; AVX512-NEXT: vpsllw $7, %xmm1, %xmm1 -; AVX512-NEXT: vpmovb2m %xmm1, %k1 -; AVX512-NEXT: vpcompressb %xmm0, %xmm2 {%k1} -; AVX512-NEXT: vmovdqa %xmm2, %xmm0 -; AVX512-NEXT: retq +; AVX512F-LABEL: test_compress_v16i8: +; AVX512F: # %bb.0: +; AVX512F-NEXT: vpmovsxbd %xmm1, %zmm1 +; AVX512F-NEXT: vpslld $31, %zmm1, %zmm1 +; AVX512F-NEXT: vptestmd %zmm1, %zmm1, %k1 +; AVX512F-NEXT: vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero +; AVX512F-NEXT: vpmovzxbd {{.*#+}} zmm1 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero,xmm2[2],zero,zero,zero,xmm2[3],zero,zero,zero,xmm2[4],zero,zero,zero,xmm2[5],zero,zero,zero,xmm2[6],zero,zero,zero,xmm2[7],zero,zero,zero,xmm2[8],zero,zero,zero,xmm2[9],zero,zero,zero,xmm2[10],zero,zero,zero,xmm2[11],zero,zero,zero,xmm2[12],zero,zero,zero,xmm2[13],zero,zero,zero,xmm2[14],zero,zero,zero,xmm2[15],zero,zero,zero +; AVX512F-NEXT: vpcompressd %zmm0, %zmm1 {%k1} +; AVX512F-NEXT: vpmovdb %zmm1, %xmm0 +; AVX512F-NEXT: vzeroupper +; AVX512F-NEXT: retq +; +; AVX512VL-LABEL: test_compress_v16i8: +; AVX512VL: # %bb.0: +; AVX512VL-NEXT: vpsllw $7, %xmm1, %xmm1 +; AVX512VL-NEXT: vpmovb2m %xmm1, %k1 +; AVX512VL-NEXT: vpcompressb %xmm0, %xmm2 {%k1} +; AVX512VL-NEXT: vmovdqa %xmm2, %xmm0 +; AVX512VL-NEXT: retq %out = call <16 x i8> @llvm.experimental.vector.compress(<16 x i8> %vec, <16 x i1> %mask, <16 x i8> %passthru) ret <16 x i8> %out } define <8 x i16> @test_compress_v8i16(<8 x i16> %vec, <8 x i1> %mask, <8 x i16> %passthru) { -; AVX512-LABEL: test_compress_v8i16: -; AVX512: # %bb.0: -; AVX512-NEXT: vpsllw $15, %xmm1, %xmm1 -; AVX512-NEXT: vpmovw2m %xmm1, %k1 -; AVX512-NEXT: vpcompressw %xmm0, %xmm2 {%k1} -; AVX512-NEXT: vmovdqa %xmm2, %xmm0 -; AVX512-NEXT: retq +; AVX512F-LABEL: test_compress_v8i16: +; AVX512F: # %bb.0: +; AVX512F-NEXT: vpmovsxwq %xmm1, %zmm1 +; AVX512F-NEXT: vpsllq $63, %zmm1, %zmm1 +; AVX512F-NEXT: vptestmq %zmm1, %zmm1, %k1 +; AVX512F-NEXT: vpmovzxwq {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero +; AVX512F-NEXT: vpmovzxwq {{.*#+}} zmm1 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero,xmm2[2],zero,zero,zero,xmm2[3],zero,zero,zero,xmm2[4],zero,zero,zero,xmm2[5],zero,zero,zero,xmm2[6],zero,zero,zero,xmm2[7],zero,zero,zero +; AVX512F-NEXT: vpcompressq %zmm0, %zmm1 {%k1} +; AVX512F-NEXT: vpmovqw %zmm1, %xmm0 +; AVX512F-NEXT: vzeroupper +; AVX512F-NEXT: retq +; +; AVX512VL-LABEL: test_compress_v8i16: +; AVX512VL: # %bb.0: +; AVX512VL-NEXT: vpsllw $15, %xmm1, %xmm1 +; AVX512VL-NEXT: vpmovw2m %xmm1, %k1 +; AVX512VL-NEXT: vpcompressw %xmm0, %xmm2 {%k1} +; AVX512VL-NEXT: vmovdqa %xmm2, %xmm0 +; AVX512VL-NEXT: retq %out = call <8 x i16> @llvm.experimental.vector.compress(<8 x i16> %vec, <8 x i1> %mask, <8 x i16> %passthru) ret <8 x i16> %out } define <32 x i8> @test_compress_v32i8(<32 x i8> %vec, <32 x i1> %mask, <32 x i8> %passthru) { -; AVX512-LABEL: test_compress_v32i8: -; AVX512: # %bb.0: -; AVX512-NEXT: vpsllw $7, %ymm1, %ymm1 -; AVX512-NEXT: vpmovb2m %ymm1, %k1 -; AVX512-NEXT: vpcompressb %ymm0, %ymm2 {%k1} -; AVX512-NEXT: vmovdqa %ymm2, %ymm0 -; AVX512-NEXT: retq +; AVX512F-LABEL: test_compress_v32i8: +; AVX512F: # %bb.0: +; AVX512F-NEXT: pushq %rbp +; AVX512F-NEXT: .cfi_def_cfa_offset 16 +; AVX512F-NEXT: .cfi_offset %rbp, -16 +; AVX512F-NEXT: movq %rsp, %rbp +; AVX512F-NEXT: .cfi_def_cfa_register %rbp +; AVX512F-NEXT: andq $-32, %rsp +; AVX512F-NEXT: subq $64, %rsp +; AVX512F-NEXT: vextracti128 $1, %ymm1, %xmm3 +; AVX512F-NEXT: vpmovsxbd %xmm3, %zmm3 +; AVX512F-NEXT: vpslld $31, %zmm3, %zmm3 +; AVX512F-NEXT: vptestmd %zmm3, %zmm3, %k1 +; AVX512F-NEXT: vpmovsxbd %xmm1, %zmm3 +; AVX512F-NEXT: vpslld $31, %zmm3, %zmm3 +; AVX512F-NEXT: vptestmd %zmm3, %zmm3, %k2 +; AVX512F-NEXT: vpmovzxbd {{.*#+}} zmm3 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero +; AVX512F-NEXT: vpcompressd %zmm3, %zmm3 {%k2} {z} +; AVX512F-NEXT: vpmovdb %zmm3, (%rsp) +; AVX512F-NEXT: kshiftrw $8, %k2, %k0 +; AVX512F-NEXT: kxorw %k0, %k2, %k0 +; AVX512F-NEXT: kshiftrw $4, %k0, %k2 +; AVX512F-NEXT: kxorw %k2, %k0, %k0 +; AVX512F-NEXT: kshiftrw $2, %k0, %k2 +; AVX512F-NEXT: kxorw %k2, %k0, %k0 +; AVX512F-NEXT: kshiftrw $1, %k0, %k2 +; AVX512F-NEXT: kxorw %k2, %k0, %k0 +; AVX512F-NEXT: kmovw %k0, %eax +; AVX512F-NEXT: andl $31, %eax +; AVX512F-NEXT: vextracti128 $1, %ymm0, %xmm0 +; AVX512F-NEXT: vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero +; AVX512F-NEXT: vpcompressd %zmm0, %zmm0 {%k1} {z} +; AVX512F-NEXT: vpmovdb %zmm0, (%rsp,%rax) +; AVX512F-NEXT: vpsllw $7, %ymm1, %ymm0 +; AVX512F-NEXT: vpblendvb %ymm0, (%rsp), %ymm2, %ymm0 +; AVX512F-NEXT: movq %rbp, %rsp +; AVX512F-NEXT: popq %rbp +; AVX512F-NEXT: .cfi_def_cfa %rsp, 8 +; AVX512F-NEXT: retq +; +; AVX512VL-LABEL: test_compress_v32i8: +; AVX512VL: # %bb.0: +; AVX512VL-NEXT: vpsllw $7, %ymm1, %ymm1 +; AVX512VL-NEXT: vpmovb2m %ymm1, %k1 +; AVX512VL-NEXT: vpcompressb %ymm0, %ymm2 {%k1} +; AVX512VL-NEXT: vmovdqa %ymm2, %ymm0 +; AVX512VL-NEXT: retq %out = call <32 x i8> @llvm.experimental.vector.compress(<32 x i8> %vec, <32 x i1> %mask, <32 x i8> %passthru) ret <32 x i8> %out } define <16 x i16> @test_compress_v16i16(<16 x i16> %vec, <16 x i1> %mask, <16 x i16> %passthru) { -; AVX512-LABEL: test_compress_v16i16: -; AVX512: # %bb.0: -; AVX512-NEXT: vpsllw $7, %xmm1, %xmm1 -; AVX512-NEXT: vpmovb2m %xmm1, %k1 -; AVX512-NEXT: vpcompressw %ymm0, %ymm2 {%k1} -; AVX512-NEXT: vmovdqa %ymm2, %ymm0 -; AVX512-NEXT: retq +; AVX512F-LABEL: test_compress_v16i16: +; AVX512F: # %bb.0: +; AVX512F-NEXT: vpmovsxbd %xmm1, %zmm1 +; AVX512F-NEXT: vpslld $31, %zmm1, %zmm1 +; AVX512F-NEXT: vptestmd %zmm1, %zmm1, %k1 +; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero +; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm1 = ymm2[0],zero,ymm2[1],zero,ymm2[2],zero,ymm2[3],zero,ymm2[4],zero,ymm2[5],zero,ymm2[6],zero,ymm2[7],zero,ymm2[8],zero,ymm2[9],zero,ymm2[10],zero,ymm2[11],zero,ymm2[12],zero,ymm2[13],zero,ymm2[14],zero,ymm2[15],zero +; AVX512F-NEXT: vpcompressd %zmm0, %zmm1 {%k1} +; AVX512F-NEXT: vpmovdw %zmm1, %ymm0 +; AVX512F-NEXT: retq +; +; AVX512VL-LABEL: test_compress_v16i16: +; AVX512VL: # %bb.0: +; AVX512VL-NEXT: vpsllw $7, %xmm1, %xmm1 +; AVX512VL-NEXT: vpmovb2m %xmm1, %k1 +; AVX512VL-NEXT: vpcompressw %ymm0, %ymm2 {%k1} +; AVX512VL-NEXT: vmovdqa %ymm2, %ymm0 +; AVX512VL-NEXT: retq %out = call <16 x i16> @llvm.experimental.vector.compress(<16 x i16> %vec, <16 x i1> %mask, <16 x i16> %passthru) ret <16 x i16> %out } define <64 x i8> @test_compress_v64i8(<64 x i8> %vec, <64 x i1> %mask, <64 x i8> %passthru) { -; AVX512-LABEL: test_compress_v64i8: -; AVX512: # %bb.0: -; AVX512-NEXT: vpsllw $7, %zmm1, %zmm1 -; AVX512-NEXT: vpmovb2m %zmm1, %k1 -; AVX512-NEXT: vpcompressb %zmm0, %zmm2 {%k1} -; AVX512-NEXT: vmovdqa64 %zmm2, %zmm0 -; AVX512-NEXT: retq +; AVX512VL-LABEL: test_compress_v64i8: +; AVX512VL: # %bb.0: +; AVX512VL-NEXT: vpsllw $7, %zmm1, %zmm1 +; AVX512VL-NEXT: vpmovb2m %zmm1, %k1 +; AVX512VL-NEXT: vpcompressb %zmm0, %zmm2 {%k1} +; AVX512VL-NEXT: vmovdqa64 %zmm2, %zmm0 +; AVX512VL-NEXT: retq %out = call <64 x i8> @llvm.experimental.vector.compress(<64 x i8> %vec, <64 x i1> %mask, <64 x i8> %passthru) ret <64 x i8> %out } define <32 x i16> @test_compress_v32i16(<32 x i16> %vec, <32 x i1> %mask, <32 x i16> %passthru) { -; AVX512-LABEL: test_compress_v32i16: -; AVX512: # %bb.0: -; AVX512-NEXT: vpsllw $7, %ymm1, %ymm1 -; AVX512-NEXT: vpmovb2m %ymm1, %k1 -; AVX512-NEXT: vpcompressw %zmm0, %zmm2 {%k1} -; AVX512-NEXT: vmovdqa64 %zmm2, %zmm0 -; AVX512-NEXT: retq +; AVX512F-LABEL: test_compress_v32i16: +; AVX512F: # %bb.0: +; AVX512F-NEXT: pushq %rbp +; AVX512F-NEXT: .cfi_def_cfa_offset 16 +; AVX512F-NEXT: .cfi_offset %rbp, -16 +; AVX512F-NEXT: movq %rsp, %rbp +; AVX512F-NEXT: .cfi_def_cfa_register %rbp +; AVX512F-NEXT: andq $-64, %rsp +; AVX512F-NEXT: subq $128, %rsp +; AVX512F-NEXT: vpmovzxbw {{.*#+}} ymm3 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero +; AVX512F-NEXT: vextracti128 $1, %ymm1, %xmm5 +; AVX512F-NEXT: vpmovzxbw {{.*#+}} ymm4 = xmm5[0],zero,xmm5[1],zero,xmm5[2],zero,xmm5[3],zero,xmm5[4],zero,xmm5[5],zero,xmm5[6],zero,xmm5[7],zero,xmm5[8],zero,xmm5[9],zero,xmm5[10],zero,xmm5[11],zero,xmm5[12],zero,xmm5[13],zero,xmm5[14],zero,xmm5[15],zero +; AVX512F-NEXT: vpmovsxbd %xmm5, %zmm5 +; AVX512F-NEXT: vpslld $31, %zmm5, %zmm5 +; AVX512F-NEXT: vptestmd %zmm5, %zmm5, %k1 +; AVX512F-NEXT: vpmovsxbd %xmm1, %zmm1 +; AVX512F-NEXT: vpslld $31, %zmm1, %zmm1 +; AVX512F-NEXT: vptestmd %zmm1, %zmm1, %k2 +; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm1 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero +; AVX512F-NEXT: vpcompressd %zmm1, %zmm1 {%k2} {z} +; AVX512F-NEXT: vpmovdw %zmm1, (%rsp) +; AVX512F-NEXT: kshiftrw $8, %k2, %k0 +; AVX512F-NEXT: kxorw %k0, %k2, %k0 +; AVX512F-NEXT: kshiftrw $4, %k0, %k2 +; AVX512F-NEXT: kxorw %k2, %k0, %k0 +; AVX512F-NEXT: kshiftrw $2, %k0, %k2 +; AVX512F-NEXT: kxorw %k2, %k0, %k0 +; AVX512F-NEXT: kshiftrw $1, %k0, %k2 +; AVX512F-NEXT: kxorw %k2, %k0, %k0 +; AVX512F-NEXT: kmovw %k0, %eax +; AVX512F-NEXT: andl $31, %eax +; AVX512F-NEXT: vextracti64x4 $1, %zmm0, %ymm0 +; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero +; AVX512F-NEXT: vpcompressd %zmm0, %zmm0 {%k1} {z} +; AVX512F-NEXT: vpmovdw %zmm0, (%rsp,%rax,2) +; AVX512F-NEXT: vextracti64x4 $1, %zmm2, %ymm0 +; AVX512F-NEXT: vpsllw $15, %ymm4, %ymm1 +; AVX512F-NEXT: vpsraw $15, %ymm1, %ymm1 +; AVX512F-NEXT: vpblendvb %ymm1, {{[0-9]+}}(%rsp), %ymm0, %ymm0 +; AVX512F-NEXT: vpsllw $15, %ymm3, %ymm1 +; AVX512F-NEXT: vpsraw $15, %ymm1, %ymm1 +; AVX512F-NEXT: vpblendvb %ymm1, (%rsp), %ymm2, %ymm1 +; AVX512F-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0 +; AVX512F-NEXT: movq %rbp, %rsp +; AVX512F-NEXT: popq %rbp +; AVX512F-NEXT: .cfi_def_cfa %rsp, 8 +; AVX512F-NEXT: retq +; +; AVX512VL-LABEL: test_compress_v32i16: +; AVX512VL: # %bb.0: +; AVX512VL-NEXT: vpsllw $7, %ymm1, %ymm1 +; AVX512VL-NEXT: vpmovb2m %ymm1, %k1 +; AVX512VL-NEXT: vpcompressw %zmm0, %zmm2 {%k1} +; AVX512VL-NEXT: vmovdqa64 %zmm2, %zmm0 +; AVX512VL-NEXT: retq %out = call <32 x i16> @llvm.experimental.vector.compress(<32 x i16> %vec, <32 x i1> %mask, <32 x i16> %passthru) ret <32 x i16> %out } define <64 x i32> @test_compress_large(<64 x i1> %mask, <64 x i32> %vec, <64 x i32> %passthru) { -; AVX512-LABEL: test_compress_large: -; AVX512: # %bb.0: -; AVX512-NEXT: pushq %rbp -; AVX512-NEXT: .cfi_def_cfa_offset 16 -; AVX512-NEXT: .cfi_offset %rbp, -16 -; AVX512-NEXT: movq %rsp, %rbp -; AVX512-NEXT: .cfi_def_cfa_register %rbp -; AVX512-NEXT: andq $-64, %rsp -; AVX512-NEXT: subq $576, %rsp # imm = 0x240 -; AVX512-NEXT: vpsllw $7, %zmm0, %zmm0 -; AVX512-NEXT: vpmovb2m %zmm0, %k1 -; AVX512-NEXT: kshiftrq $32, %k1, %k4 -; AVX512-NEXT: kshiftrd $16, %k4, %k3 -; AVX512-NEXT: kshiftrd $16, %k1, %k2 -; AVX512-NEXT: vpcompressd %zmm1, %zmm0 {%k1} {z} -; AVX512-NEXT: vmovdqa64 %zmm0, (%rsp) -; AVX512-NEXT: kshiftrw $8, %k1, %k0 -; AVX512-NEXT: kxorw %k0, %k1, %k0 -; AVX512-NEXT: kshiftrw $4, %k0, %k5 -; AVX512-NEXT: kxorw %k5, %k0, %k0 -; AVX512-NEXT: kshiftrw $2, %k0, %k5 -; AVX512-NEXT: kxorw %k5, %k0, %k0 -; AVX512-NEXT: kshiftrw $1, %k0, %k5 -; AVX512-NEXT: kxorw %k5, %k0, %k0 -; AVX512-NEXT: kmovd %k0, %eax -; AVX512-NEXT: andl $31, %eax -; AVX512-NEXT: vpcompressd %zmm2, %zmm0 {%k2} {z} -; AVX512-NEXT: vmovdqa64 %zmm0, (%rsp,%rax,4) -; AVX512-NEXT: vpcompressd %zmm3, %zmm0 {%k4} {z} -; AVX512-NEXT: vmovdqa64 %zmm0, {{[0-9]+}}(%rsp) -; AVX512-NEXT: kshiftrw $8, %k4, %k0 -; AVX512-NEXT: kxorw %k0, %k4, %k0 -; AVX512-NEXT: kshiftrw $4, %k0, %k4 -; AVX512-NEXT: kxorw %k4, %k0, %k0 -; AVX512-NEXT: kshiftrw $2, %k0, %k4 -; AVX512-NEXT: kxorw %k4, %k0, %k0 -; AVX512-NEXT: kshiftrw $1, %k0, %k4 -; AVX512-NEXT: kxorw %k4, %k0, %k0 -; AVX512-NEXT: kmovd %k0, %eax -; AVX512-NEXT: andl $31, %eax -; AVX512-NEXT: vpcompressd %zmm4, %zmm0 {%k3} {z} -; AVX512-NEXT: vmovdqa64 %zmm0, 128(%rsp,%rax,4) -; AVX512-NEXT: vmovaps (%rsp), %zmm0 -; AVX512-NEXT: vmovaps {{[0-9]+}}(%rsp), %zmm1 -; AVX512-NEXT: vmovaps %zmm0, {{[0-9]+}}(%rsp) -; AVX512-NEXT: kxorw %k2, %k1, %k0 -; AVX512-NEXT: kshiftrw $8, %k0, %k1 -; AVX512-NEXT: kxorw %k1, %k0, %k0 -; AVX512-NEXT: kshiftrw $4, %k0, %k1 -; AVX512-NEXT: kxorw %k1, %k0, %k0 -; AVX512-NEXT: kshiftrw $2, %k0, %k1 -; AVX512-NEXT: kxorw %k1, %k0, %k0 -; AVX512-NEXT: kshiftrw $1, %k0, %k1 -; AVX512-NEXT: kxorw %k1, %k0, %k0 -; AVX512-NEXT: kmovd %k0, %eax -; AVX512-NEXT: andl $63, %eax -; AVX512-NEXT: vmovaps {{[0-9]+}}(%rsp), %zmm0 -; AVX512-NEXT: vmovaps {{[0-9]+}}(%rsp), %zmm2 -; AVX512-NEXT: vmovaps %zmm0, 256(%rsp,%rax,4) -; AVX512-NEXT: vmovaps %zmm1, {{[0-9]+}}(%rsp) -; AVX512-NEXT: vmovaps %zmm2, 320(%rsp,%rax,4) -; AVX512-NEXT: vmovaps {{[0-9]+}}(%rsp), %zmm0 -; AVX512-NEXT: vmovaps {{[0-9]+}}(%rsp), %zmm1 -; AVX512-NEXT: vmovaps {{[0-9]+}}(%rsp), %zmm2 -; AVX512-NEXT: vmovaps {{[0-9]+}}(%rsp), %zmm3 -; AVX512-NEXT: movq %rbp, %rsp -; AVX512-NEXT: popq %rbp -; AVX512-NEXT: .cfi_def_cfa %rsp, 8 -; AVX512-NEXT: retq +; AVX512VL-LABEL: test_compress_large: +; AVX512VL: # %bb.0: +; AVX512VL-NEXT: pushq %rbp +; AVX512VL-NEXT: .cfi_def_cfa_offset 16 +; AVX512VL-NEXT: .cfi_offset %rbp, -16 +; AVX512VL-NEXT: movq %rsp, %rbp +; AVX512VL-NEXT: .cfi_def_cfa_register %rbp +; AVX512VL-NEXT: andq $-64, %rsp +; AVX512VL-NEXT: subq $576, %rsp # imm = 0x240 +; AVX512VL-NEXT: vpsllw $7, %zmm0, %zmm0 +; AVX512VL-NEXT: vpmovb2m %zmm0, %k1 +; AVX512VL-NEXT: kshiftrq $32, %k1, %k4 +; AVX512VL-NEXT: kshiftrd $16, %k4, %k3 +; AVX512VL-NEXT: kshiftrd $16, %k1, %k2 +; AVX512VL-NEXT: vpcompressd %zmm1, %zmm0 {%k1} {z} +; AVX512VL-NEXT: vmovdqa64 %zmm0, (%rsp) +; AVX512VL-NEXT: kshiftrw $8, %k1, %k0 +; AVX512VL-NEXT: kxorw %k0, %k1, %k0 +; AVX512VL-NEXT: kshiftrw $4, %k0, %k5 +; AVX512VL-NEXT: kxorw %k5, %k0, %k0 +; AVX512VL-NEXT: kshiftrw $2, %k0, %k5 +; AVX512VL-NEXT: kxorw %k5, %k0, %k0 +; AVX512VL-NEXT: kshiftrw $1, %k0, %k5 +; AVX512VL-NEXT: kxorw %k5, %k0, %k0 +; AVX512VL-NEXT: kmovd %k0, %eax +; AVX512VL-NEXT: andl $31, %eax +; AVX512VL-NEXT: vpcompressd %zmm2, %zmm0 {%k2} {z} +; AVX512VL-NEXT: vmovdqa64 %zmm0, (%rsp,%rax,4) +; AVX512VL-NEXT: vpcompressd %zmm3, %zmm0 {%k4} {z} +; AVX512VL-NEXT: vmovdqa64 %zmm0, {{[0-9]+}}(%rsp) +; AVX512VL-NEXT: kshiftrw $8, %k4, %k0 +; AVX512VL-NEXT: kxorw %k0, %k4, %k0 +; AVX512VL-NEXT: kshiftrw $4, %k0, %k4 +; AVX512VL-NEXT: kxorw %k4, %k0, %k0 +; AVX512VL-NEXT: kshiftrw $2, %k0, %k4 +; AVX512VL-NEXT: kxorw %k4, %k0, %k0 +; AVX512VL-NEXT: kshiftrw $1, %k0, %k4 +; AVX512VL-NEXT: kxorw %k4, %k0, %k0 +; AVX512VL-NEXT: kmovd %k0, %eax +; AVX512VL-NEXT: andl $31, %eax +; AVX512VL-NEXT: vpcompressd %zmm4, %zmm0 {%k3} {z} +; AVX512VL-NEXT: vmovdqa64 %zmm0, 128(%rsp,%rax,4) +; AVX512VL-NEXT: vmovaps (%rsp), %zmm0 +; AVX512VL-NEXT: vmovaps {{[0-9]+}}(%rsp), %zmm1 +; AVX512VL-NEXT: vmovaps %zmm0, {{[0-9]+}}(%rsp) +; AVX512VL-NEXT: kxorw %k2, %k1, %k0 +; AVX512VL-NEXT: kshiftrw $8, %k0, %k1 +; AVX512VL-NEXT: kxorw %k1, %k0, %k0 +; AVX512VL-NEXT: kshiftrw $4, %k0, %k1 +; AVX512VL-NEXT: kxorw %k1, %k0, %k0 +; AVX512VL-NEXT: kshiftrw $2, %k0, %k1 +; AVX512VL-NEXT: kxorw %k1, %k0, %k0 +; AVX512VL-NEXT: kshiftrw $1, %k0, %k1 +; AVX512VL-NEXT: kxorw %k1, %k0, %k0 +; AVX512VL-NEXT: kmovd %k0, %eax +; AVX512VL-NEXT: andl $63, %eax +; AVX512VL-NEXT: vmovaps {{[0-9]+}}(%rsp), %zmm0 +; AVX512VL-NEXT: vmovaps {{[0-9]+}}(%rsp), %zmm2 +; AVX512VL-NEXT: vmovaps %zmm0, 256(%rsp,%rax,4) +; AVX512VL-NEXT: vmovaps %zmm1, {{[0-9]+}}(%rsp) +; AVX512VL-NEXT: vmovaps %zmm2, 320(%rsp,%rax,4) +; AVX512VL-NEXT: vmovaps {{[0-9]+}}(%rsp), %zmm0 +; AVX512VL-NEXT: vmovaps {{[0-9]+}}(%rsp), %zmm1 +; AVX512VL-NEXT: vmovaps {{[0-9]+}}(%rsp), %zmm2 +; AVX512VL-NEXT: vmovaps {{[0-9]+}}(%rsp), %zmm3 +; AVX512VL-NEXT: movq %rbp, %rsp +; AVX512VL-NEXT: popq %rbp +; AVX512VL-NEXT: .cfi_def_cfa %rsp, 8 +; AVX512VL-NEXT: retq %out = call <64 x i32> @llvm.experimental.vector.compress(<64 x i32> %vec, <64 x i1> %mask, <64 x i32> undef) ret <64 x i32> %out } @@ -590,12 +985,24 @@ define <4 x i32> @test_compress_const_splat0_mask_without_passthru(<4 x i32> %ig } define <4 x i8> @test_compress_small(<4 x i8> %vec, <4 x i1> %mask) { -; AVX512-LABEL: test_compress_small: -; AVX512: # %bb.0: -; AVX512-NEXT: vpslld $31, %xmm1, %xmm1 -; AVX512-NEXT: vptestmd %xmm1, %xmm1, %k1 -; AVX512-NEXT: vpcompressb %xmm0, %xmm0 {%k1} {z} -; AVX512-NEXT: retq +; AVX512F-LABEL: test_compress_small: +; AVX512F: # %bb.0: +; AVX512F-NEXT: vpslld $31, %xmm1, %xmm1 +; AVX512F-NEXT: vptestmd %zmm1, %zmm1, %k0 +; AVX512F-NEXT: kshiftlw $12, %k0, %k0 +; AVX512F-NEXT: kshiftrw $12, %k0, %k1 +; AVX512F-NEXT: vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero +; AVX512F-NEXT: vpcompressd %zmm0, %zmm0 {%k1} {z} +; AVX512F-NEXT: vpmovdb %zmm0, %xmm0 +; AVX512F-NEXT: vzeroupper +; AVX512F-NEXT: retq +; +; AVX512VL-LABEL: test_compress_small: +; AVX512VL: # %bb.0: +; AVX512VL-NEXT: vpslld $31, %xmm1, %xmm1 +; AVX512VL-NEXT: vptestmd %xmm1, %xmm1, %k1 +; AVX512VL-NEXT: vpcompressb %xmm0, %xmm0 {%k1} {z} +; AVX512VL-NEXT: retq %out = call <4 x i8> @llvm.experimental.vector.compress(<4 x i8> %vec, <4 x i1> %mask, <4 x i8> undef) ret <4 x i8> %out } @@ -620,71 +1027,187 @@ define <4 x i4> @test_compress_illegal_element_type(<4 x i4> %vec, <4 x i1> %mas ; AVX2-NEXT: vmovaps -{{[0-9]+}}(%rsp), %xmm0 ; AVX2-NEXT: retq ; -; AVX512-LABEL: test_compress_illegal_element_type: -; AVX512: # %bb.0: -; AVX512-NEXT: vpslld $31, %xmm1, %xmm1 -; AVX512-NEXT: vptestmd %xmm1, %xmm1, %k1 -; AVX512-NEXT: vpcompressd %xmm0, %xmm0 {%k1} {z} -; AVX512-NEXT: retq +; AVX512F-LABEL: test_compress_illegal_element_type: +; AVX512F: # %bb.0: +; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 +; AVX512F-NEXT: vpslld $31, %xmm1, %xmm1 +; AVX512F-NEXT: vptestmd %zmm1, %zmm1, %k0 +; AVX512F-NEXT: kshiftlw $12, %k0, %k0 +; AVX512F-NEXT: kshiftrw $12, %k0, %k1 +; AVX512F-NEXT: vpcompressd %zmm0, %zmm0 {%k1} {z} +; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 +; AVX512F-NEXT: vzeroupper +; AVX512F-NEXT: retq +; +; AVX512VL-LABEL: test_compress_illegal_element_type: +; AVX512VL: # %bb.0: +; AVX512VL-NEXT: vpslld $31, %xmm1, %xmm1 +; AVX512VL-NEXT: vptestmd %xmm1, %xmm1, %k1 +; AVX512VL-NEXT: vpcompressd %xmm0, %xmm0 {%k1} {z} +; AVX512VL-NEXT: retq %out = call <4 x i4> @llvm.experimental.vector.compress(<4 x i4> %vec, <4 x i1> %mask, <4 x i4> undef) ret <4 x i4> %out } define <3 x i32> @test_compress_narrow(<3 x i32> %vec, <3 x i1> %mask) { -; AVX512-LABEL: test_compress_narrow: -; AVX512: # %bb.0: -; AVX512-NEXT: andl $1, %edi -; AVX512-NEXT: kmovw %edi, %k0 -; AVX512-NEXT: kmovd %esi, %k1 -; AVX512-NEXT: kshiftlw $15, %k1, %k1 -; AVX512-NEXT: kshiftrw $14, %k1, %k1 -; AVX512-NEXT: korw %k1, %k0, %k0 -; AVX512-NEXT: movw $-5, %ax -; AVX512-NEXT: kmovd %eax, %k1 -; AVX512-NEXT: kandw %k1, %k0, %k0 -; AVX512-NEXT: kmovd %edx, %k1 -; AVX512-NEXT: kshiftlw $15, %k1, %k1 -; AVX512-NEXT: kshiftrw $13, %k1, %k1 -; AVX512-NEXT: korw %k1, %k0, %k0 -; AVX512-NEXT: movb $7, %al -; AVX512-NEXT: kmovd %eax, %k1 -; AVX512-NEXT: kandw %k1, %k0, %k1 -; AVX512-NEXT: vpcompressd %xmm0, %xmm0 {%k1} {z} -; AVX512-NEXT: retq +; AVX2-LABEL: test_compress_narrow: +; AVX2: # %bb.0: +; AVX2-NEXT: vmovd %edi, %xmm1 +; AVX2-NEXT: vpinsrd $1, %esi, %xmm1, %xmm1 +; AVX2-NEXT: vpinsrd $2, %edx, %xmm1, %xmm1 +; AVX2-NEXT: vpslld $31, %xmm1, %xmm1 +; AVX2-NEXT: vpsrad $31, %xmm1, %xmm1 +; AVX2-NEXT: vmovss %xmm0, -{{[0-9]+}}(%rsp) +; AVX2-NEXT: vmovd %xmm1, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: vextractps $1, %xmm0, -24(%rsp,%rax,4) +; AVX2-NEXT: vpextrd $1, %xmm1, %ecx +; AVX2-NEXT: subl %ecx, %eax +; AVX2-NEXT: leal (,%rax,4), %ecx +; AVX2-NEXT: vextractps $2, %xmm0, -24(%rsp,%rcx) +; AVX2-NEXT: vpextrd $2, %xmm1, %ecx +; AVX2-NEXT: subl %ecx, %eax +; AVX2-NEXT: andl $3, %eax +; AVX2-NEXT: vextractps $3, %xmm0, -24(%rsp,%rax,4) +; AVX2-NEXT: vmovaps -{{[0-9]+}}(%rsp), %xmm0 +; AVX2-NEXT: retq +; +; AVX512F-LABEL: test_compress_narrow: +; AVX512F: # %bb.0: +; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 +; AVX512F-NEXT: andl $1, %edi +; AVX512F-NEXT: kmovw %edi, %k0 +; AVX512F-NEXT: kmovw %esi, %k1 +; AVX512F-NEXT: kshiftlw $15, %k1, %k1 +; AVX512F-NEXT: kshiftrw $14, %k1, %k1 +; AVX512F-NEXT: korw %k1, %k0, %k0 +; AVX512F-NEXT: movw $-5, %ax +; AVX512F-NEXT: kmovw %eax, %k1 +; AVX512F-NEXT: kandw %k1, %k0, %k0 +; AVX512F-NEXT: kmovw %edx, %k1 +; AVX512F-NEXT: kshiftlw $15, %k1, %k1 +; AVX512F-NEXT: kshiftrw $13, %k1, %k1 +; AVX512F-NEXT: korw %k1, %k0, %k0 +; AVX512F-NEXT: movb $7, %al +; AVX512F-NEXT: kmovw %eax, %k1 +; AVX512F-NEXT: kandw %k1, %k0, %k0 +; AVX512F-NEXT: kshiftlw $12, %k0, %k0 +; AVX512F-NEXT: kshiftrw $12, %k0, %k1 +; AVX512F-NEXT: vpcompressd %zmm0, %zmm0 {%k1} {z} +; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 +; AVX512F-NEXT: vzeroupper +; AVX512F-NEXT: retq +; +; AVX512VL-LABEL: test_compress_narrow: +; AVX512VL: # %bb.0: +; AVX512VL-NEXT: andl $1, %edi +; AVX512VL-NEXT: kmovw %edi, %k0 +; AVX512VL-NEXT: kmovd %esi, %k1 +; AVX512VL-NEXT: kshiftlw $15, %k1, %k1 +; AVX512VL-NEXT: kshiftrw $14, %k1, %k1 +; AVX512VL-NEXT: korw %k1, %k0, %k0 +; AVX512VL-NEXT: movw $-5, %ax +; AVX512VL-NEXT: kmovd %eax, %k1 +; AVX512VL-NEXT: kandw %k1, %k0, %k0 +; AVX512VL-NEXT: kmovd %edx, %k1 +; AVX512VL-NEXT: kshiftlw $15, %k1, %k1 +; AVX512VL-NEXT: kshiftrw $13, %k1, %k1 +; AVX512VL-NEXT: korw %k1, %k0, %k0 +; AVX512VL-NEXT: movb $7, %al +; AVX512VL-NEXT: kmovd %eax, %k1 +; AVX512VL-NEXT: kandw %k1, %k0, %k1 +; AVX512VL-NEXT: vpcompressd %xmm0, %xmm0 {%k1} {z} +; AVX512VL-NEXT: retq %out = call <3 x i32> @llvm.experimental.vector.compress(<3 x i32> %vec, <3 x i1> %mask, <3 x i32> undef) ret <3 x i32> %out } define <3 x i3> @test_compress_narrow_illegal_element_type(<3 x i3> %vec, <3 x i1> %mask) { -; AVX512-LABEL: test_compress_narrow_illegal_element_type: -; AVX512: # %bb.0: -; AVX512-NEXT: andl $1, %ecx -; AVX512-NEXT: kmovw %ecx, %k0 -; AVX512-NEXT: kmovd %r8d, %k1 -; AVX512-NEXT: kshiftlw $15, %k1, %k1 -; AVX512-NEXT: kshiftrw $14, %k1, %k1 -; AVX512-NEXT: korw %k1, %k0, %k0 -; AVX512-NEXT: movw $-5, %ax -; AVX512-NEXT: kmovd %eax, %k1 -; AVX512-NEXT: kandw %k1, %k0, %k0 -; AVX512-NEXT: kmovd %r9d, %k1 -; AVX512-NEXT: kshiftlw $15, %k1, %k1 -; AVX512-NEXT: kshiftrw $13, %k1, %k1 -; AVX512-NEXT: korw %k1, %k0, %k0 -; AVX512-NEXT: movb $7, %al -; AVX512-NEXT: kmovd %eax, %k1 -; AVX512-NEXT: kandw %k1, %k0, %k1 -; AVX512-NEXT: vmovd %edi, %xmm0 -; AVX512-NEXT: vpinsrd $1, %esi, %xmm0, %xmm0 -; AVX512-NEXT: vpinsrd $2, %edx, %xmm0, %xmm0 -; AVX512-NEXT: vpcompressd %xmm0, %xmm0 {%k1} {z} -; AVX512-NEXT: vmovd %xmm0, %eax -; AVX512-NEXT: vpextrb $4, %xmm0, %edx -; AVX512-NEXT: vpextrb $8, %xmm0, %ecx -; AVX512-NEXT: # kill: def $al killed $al killed $eax -; AVX512-NEXT: # kill: def $dl killed $dl killed $edx -; AVX512-NEXT: # kill: def $cl killed $cl killed $ecx -; AVX512-NEXT: retq +; AVX2-LABEL: test_compress_narrow_illegal_element_type: +; AVX2: # %bb.0: +; AVX2-NEXT: vmovd %ecx, %xmm0 +; AVX2-NEXT: vpinsrd $1, %r8d, %xmm0, %xmm0 +; AVX2-NEXT: vpslld $31, %xmm0, %xmm0 +; AVX2-NEXT: vpsrad $31, %xmm0, %xmm0 +; AVX2-NEXT: movl %edi, -{{[0-9]+}}(%rsp) +; AVX2-NEXT: vmovd %xmm0, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: movl %esi, -24(%rsp,%rax,4) +; AVX2-NEXT: vpextrd $1, %xmm0, %ecx +; AVX2-NEXT: subl %ecx, %eax +; AVX2-NEXT: shll $2, %eax +; AVX2-NEXT: movl %edx, -24(%rsp,%rax) +; AVX2-NEXT: vmovdqa -{{[0-9]+}}(%rsp), %xmm0 +; AVX2-NEXT: vmovd %xmm0, %eax +; AVX2-NEXT: vpextrb $4, %xmm0, %edx +; AVX2-NEXT: vpextrb $8, %xmm0, %ecx +; AVX2-NEXT: # kill: def $al killed $al killed $eax +; AVX2-NEXT: # kill: def $dl killed $dl killed $edx +; AVX2-NEXT: # kill: def $cl killed $cl killed $ecx +; AVX2-NEXT: retq +; +; AVX512F-LABEL: test_compress_narrow_illegal_element_type: +; AVX512F: # %bb.0: +; AVX512F-NEXT: andl $1, %ecx +; AVX512F-NEXT: kmovw %ecx, %k0 +; AVX512F-NEXT: kmovw %r8d, %k1 +; AVX512F-NEXT: kshiftlw $15, %k1, %k1 +; AVX512F-NEXT: kshiftrw $14, %k1, %k1 +; AVX512F-NEXT: korw %k1, %k0, %k0 +; AVX512F-NEXT: movw $-5, %ax +; AVX512F-NEXT: kmovw %eax, %k1 +; AVX512F-NEXT: kandw %k1, %k0, %k0 +; AVX512F-NEXT: kmovw %r9d, %k1 +; AVX512F-NEXT: kshiftlw $15, %k1, %k1 +; AVX512F-NEXT: kshiftrw $13, %k1, %k1 +; AVX512F-NEXT: korw %k1, %k0, %k0 +; AVX512F-NEXT: movb $7, %al +; AVX512F-NEXT: kmovw %eax, %k1 +; AVX512F-NEXT: kandw %k1, %k0, %k0 +; AVX512F-NEXT: vmovd %edi, %xmm0 +; AVX512F-NEXT: vpinsrd $1, %esi, %xmm0, %xmm0 +; AVX512F-NEXT: vpinsrd $2, %edx, %xmm0, %xmm0 +; AVX512F-NEXT: kshiftlw $12, %k0, %k0 +; AVX512F-NEXT: kshiftrw $12, %k0, %k1 +; AVX512F-NEXT: vpcompressd %zmm0, %zmm0 {%k1} {z} +; AVX512F-NEXT: vmovd %xmm0, %eax +; AVX512F-NEXT: vpextrb $4, %xmm0, %edx +; AVX512F-NEXT: vpextrb $8, %xmm0, %ecx +; AVX512F-NEXT: # kill: def $al killed $al killed $eax +; AVX512F-NEXT: # kill: def $dl killed $dl killed $edx +; AVX512F-NEXT: # kill: def $cl killed $cl killed $ecx +; AVX512F-NEXT: vzeroupper +; AVX512F-NEXT: retq +; +; AVX512VL-LABEL: test_compress_narrow_illegal_element_type: +; AVX512VL: # %bb.0: +; AVX512VL-NEXT: andl $1, %ecx +; AVX512VL-NEXT: kmovw %ecx, %k0 +; AVX512VL-NEXT: kmovd %r8d, %k1 +; AVX512VL-NEXT: kshiftlw $15, %k1, %k1 +; AVX512VL-NEXT: kshiftrw $14, %k1, %k1 +; AVX512VL-NEXT: korw %k1, %k0, %k0 +; AVX512VL-NEXT: movw $-5, %ax +; AVX512VL-NEXT: kmovd %eax, %k1 +; AVX512VL-NEXT: kandw %k1, %k0, %k0 +; AVX512VL-NEXT: kmovd %r9d, %k1 +; AVX512VL-NEXT: kshiftlw $15, %k1, %k1 +; AVX512VL-NEXT: kshiftrw $13, %k1, %k1 +; AVX512VL-NEXT: korw %k1, %k0, %k0 +; AVX512VL-NEXT: movb $7, %al +; AVX512VL-NEXT: kmovd %eax, %k1 +; AVX512VL-NEXT: kandw %k1, %k0, %k1 +; AVX512VL-NEXT: vmovd %edi, %xmm0 +; AVX512VL-NEXT: vpinsrd $1, %esi, %xmm0, %xmm0 +; AVX512VL-NEXT: vpinsrd $2, %edx, %xmm0, %xmm0 +; AVX512VL-NEXT: vpcompressd %xmm0, %xmm0 {%k1} {z} +; AVX512VL-NEXT: vmovd %xmm0, %eax +; AVX512VL-NEXT: vpextrb $4, %xmm0, %edx +; AVX512VL-NEXT: vpextrb $8, %xmm0, %ecx +; AVX512VL-NEXT: # kill: def $al killed $al killed $eax +; AVX512VL-NEXT: # kill: def $dl killed $dl killed $edx +; AVX512VL-NEXT: # kill: def $cl killed $cl killed $ecx +; AVX512VL-NEXT: retq %out = call <3 x i3> @llvm.experimental.vector.compress(<3 x i3> %vec, <3 x i1> %mask, <3 x i3> undef) ret <3 x i3> %out } From 3e5a754cdfd8ae8fd277193e1620b83a3fcc8433 Mon Sep 17 00:00:00 2001 From: Lawrence Benson Date: Sat, 31 Aug 2024 13:31:24 +0200 Subject: [PATCH 9/9] Use widenSubVector --- llvm/lib/Target/X86/X86ISelLowering.cpp | 27 +++++++++++-------------- 1 file changed, 12 insertions(+), 15 deletions(-) diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp index 7ea6a769782f5..4dfb2f183c993 100644 --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -17809,26 +17809,23 @@ static SDValue lowerVECTOR_COMPRESS(SDValue Op, const X86Subtarget &Subtarget, if (NumElementBits == 32 || NumElementBits == 64) { unsigned NumLargeElements = 512 / NumElementBits; - EVT LargeVecVT = + MVT LargeVecVT = MVT::getVectorVT(ElementVT.getSimpleVT(), NumLargeElements); - EVT LargeMaskVT = MVT::getVectorVT(MVT::i1, NumLargeElements); - - SDValue InsertPos = DAG.getConstant(0, DL, MVT::i64); - Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, LargeVecVT, - DAG.getUNDEF(LargeVecVT), Vec, InsertPos); - Mask = DAG.getNode( - ISD::INSERT_SUBVECTOR, DL, LargeMaskVT, - DAG.getSplatVector(LargeMaskVT, DL, DAG.getConstant(0, DL, MVT::i1)), - Mask, InsertPos); - Passthru = Passthru.isUndef() - ? DAG.getUNDEF(LargeVecVT) - : DAG.getNode(ISD::INSERT_SUBVECTOR, DL, LargeVecVT, - DAG.getUNDEF(LargeVecVT), Passthru, InsertPos); + MVT LargeMaskVT = MVT::getVectorVT(MVT::i1, NumLargeElements); + + Vec = widenSubVector(LargeVecVT, Vec, /*ZeroNewElements=*/false, Subtarget, + DAG, DL); + Mask = widenSubVector(LargeMaskVT, Mask, /*ZeroNewElements=*/true, + Subtarget, DAG, DL); + Passthru = Passthru.isUndef() ? DAG.getUNDEF(LargeVecVT) + : widenSubVector(LargeVecVT, Passthru, + /*ZeroNewElements=*/false, + Subtarget, DAG, DL); SDValue Compressed = DAG.getNode(ISD::VECTOR_COMPRESS, DL, LargeVecVT, Vec, Mask, Passthru); return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VecVT, Compressed, - InsertPos); + DAG.getConstant(0, DL, MVT::i64)); } if (VecVT == MVT::v8i16 || VecVT == MVT::v8i8 || VecVT == MVT::v16i8 ||