Skip to content

Commit f28e522

Browse files
authored
[Clang] Change two placeholders from undef to poison [NFC] (#119141)
- Use `poison` instead of `undef` as a phi operand for an unreachable path (the predecessor will not go the BB that uses the value of the phi). - Call `@llvm.vector.insert` with a `poison` subvec when performing a `bitcast` from a fixed vector to a scalable vector.
1 parent 3654f1b commit f28e522

13 files changed

+55
-55
lines changed

clang/lib/CodeGen/CGCall.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4379,7 +4379,7 @@ static void emitWritebackArg(CodeGenFunction &CGF, CallArgList &args,
43794379
llvm::PHINode *phiToUse = CGF.Builder.CreatePHI(valueToUse->getType(), 2,
43804380
"icr.to-use");
43814381
phiToUse->addIncoming(valueToUse, copyBB);
4382-
phiToUse->addIncoming(llvm::UndefValue::get(valueToUse->getType()),
4382+
phiToUse->addIncoming(llvm::PoisonValue::get(valueToUse->getType()),
43834383
originBB);
43844384
valueToUse = phiToUse;
43854385
}

clang/lib/CodeGen/CGExprScalar.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2370,10 +2370,10 @@ Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
23702370
ScalableDstTy->getElementCount().getKnownMinValue() / 8);
23712371
}
23722372
if (FixedSrcTy->getElementType() == ScalableDstTy->getElementType()) {
2373-
llvm::Value *UndefVec = llvm::UndefValue::get(ScalableDstTy);
2373+
llvm::Value *PoisonVec = llvm::PoisonValue::get(ScalableDstTy);
23742374
llvm::Value *Zero = llvm::Constant::getNullValue(CGF.CGM.Int64Ty);
23752375
llvm::Value *Result = Builder.CreateInsertVector(
2376-
ScalableDstTy, UndefVec, Src, Zero, "cast.scalable");
2376+
ScalableDstTy, PoisonVec, Src, Zero, "cast.scalable");
23772377
if (Result->getType() != DstTy)
23782378
Result = Builder.CreateBitCast(Result, DstTy);
23792379
return Result;

clang/test/CodeGen/AArch64/sve-acle-__ARM_FEATURE_SVE_VECTOR_OPERATORS.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -53,7 +53,7 @@ typedef int8_t vec_int8 __attribute__((vector_size(N / 8)));
5353
// CHECK128-LABEL: define{{.*}} <16 x i8> @f2(<16 x i8> noundef %x)
5454
// CHECK128-NEXT: entry:
5555
// CHECK128-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i1> @llvm.aarch64.sve.ptrue.nxv16i1(i32 31)
56-
// CHECK128-NEXT: [[CASTSCALABLESVE:%.*]] = tail call <vscale x 16 x i8> @llvm.vector.insert.nxv16i8.v16i8(<vscale x 16 x i8> undef, <16 x i8> [[X:%.*]], i64 0)
56+
// CHECK128-NEXT: [[CASTSCALABLESVE:%.*]] = tail call <vscale x 16 x i8> @llvm.vector.insert.nxv16i8.v16i8(<vscale x 16 x i8> poison, <16 x i8> [[X:%.*]], i64 0)
5757
// CHECK128-NEXT: [[TMP1:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.asrd.nxv16i8(<vscale x 16 x i1> [[TMP0]], <vscale x 16 x i8> [[CASTSCALABLESVE]], i32 1)
5858
// CHECK128-NEXT: [[CASTFIXEDSVE:%.*]] = tail call <16 x i8> @llvm.vector.extract.v16i8.nxv16i8(<vscale x 16 x i8> [[TMP1]], i64 0)
5959
// CHECK128-NEXT: ret <16 x i8> [[CASTFIXEDSVE]]
@@ -63,7 +63,7 @@ typedef int8_t vec_int8 __attribute__((vector_size(N / 8)));
6363
// CHECK-NEXT: entry:
6464
// CHECK-NEXT: [[X:%.*]] = load <[[#div(VBITS,8)]] x i8>, ptr [[TMP0:%.*]], align 16, [[TBAA6:!tbaa !.*]]
6565
// CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 16 x i1> @llvm.aarch64.sve.ptrue.nxv16i1(i32 31)
66-
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = tail call <vscale x 16 x i8> @llvm.vector.insert.nxv16i8.v[[#div(VBITS,8)]]i8(<vscale x 16 x i8> undef, <[[#div(VBITS,8)]] x i8> [[X]], i64 0)
66+
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = tail call <vscale x 16 x i8> @llvm.vector.insert.nxv16i8.v[[#div(VBITS,8)]]i8(<vscale x 16 x i8> poison, <[[#div(VBITS,8)]] x i8> [[X]], i64 0)
6767
// CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.asrd.nxv16i8(<vscale x 16 x i1> [[TMP1]], <vscale x 16 x i8> [[CASTSCALABLESVE]], i32 1)
6868
// CHECK-NEXT: [[CASTFIXEDSVE:%.*]] = tail call <[[#div(VBITS,8)]] x i8> @llvm.vector.extract.v[[#div(VBITS,8)]]i8.nxv16i8(<vscale x 16 x i8> [[TMP2]], i64 0)
6969
// CHECK-NEXT: store <[[#div(VBITS,8)]] x i8> [[CASTFIXEDSVE]], ptr [[AGG_RESULT:%.*]], align 16, [[TBAA6]]

clang/test/CodeGen/RISCV/attr-rvv-vector-bits-bitcast.c

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -71,21 +71,21 @@ DEFINE_STRUCT(bool64)
7171
// CHECK-64-NEXT: entry:
7272
// CHECK-64-NEXT: [[Y:%.*]] = getelementptr inbounds nuw i8, ptr [[S:%.*]], i64 8
7373
// CHECK-64-NEXT: [[TMP0:%.*]] = load <1 x i64>, ptr [[Y]], align 8, !tbaa [[TBAA6:![0-9]+]]
74-
// CHECK-64-NEXT: [[CAST_SCALABLE:%.*]] = tail call <vscale x 1 x i64> @llvm.vector.insert.nxv1i64.v1i64(<vscale x 1 x i64> undef, <1 x i64> [[TMP0]], i64 0)
74+
// CHECK-64-NEXT: [[CAST_SCALABLE:%.*]] = tail call <vscale x 1 x i64> @llvm.vector.insert.nxv1i64.v1i64(<vscale x 1 x i64> poison, <1 x i64> [[TMP0]], i64 0)
7575
// CHECK-64-NEXT: ret <vscale x 1 x i64> [[CAST_SCALABLE]]
7676
//
7777
// CHECK-128-LABEL: @read_int64m1(
7878
// CHECK-128-NEXT: entry:
7979
// CHECK-128-NEXT: [[Y:%.*]] = getelementptr inbounds nuw i8, ptr [[S:%.*]], i64 16
8080
// CHECK-128-NEXT: [[TMP0:%.*]] = load <2 x i64>, ptr [[Y]], align 8, !tbaa [[TBAA6:![0-9]+]]
81-
// CHECK-128-NEXT: [[CAST_SCALABLE:%.*]] = tail call <vscale x 1 x i64> @llvm.vector.insert.nxv1i64.v2i64(<vscale x 1 x i64> undef, <2 x i64> [[TMP0]], i64 0)
81+
// CHECK-128-NEXT: [[CAST_SCALABLE:%.*]] = tail call <vscale x 1 x i64> @llvm.vector.insert.nxv1i64.v2i64(<vscale x 1 x i64> poison, <2 x i64> [[TMP0]], i64 0)
8282
// CHECK-128-NEXT: ret <vscale x 1 x i64> [[CAST_SCALABLE]]
8383
//
8484
// CHECK-256-LABEL: @read_int64m1(
8585
// CHECK-256-NEXT: entry:
8686
// CHECK-256-NEXT: [[Y:%.*]] = getelementptr inbounds nuw i8, ptr [[S:%.*]], i64 32
8787
// CHECK-256-NEXT: [[TMP0:%.*]] = load <4 x i64>, ptr [[Y]], align 8, !tbaa [[TBAA6:![0-9]+]]
88-
// CHECK-256-NEXT: [[CAST_SCALABLE:%.*]] = tail call <vscale x 1 x i64> @llvm.vector.insert.nxv1i64.v4i64(<vscale x 1 x i64> undef, <4 x i64> [[TMP0]], i64 0)
88+
// CHECK-256-NEXT: [[CAST_SCALABLE:%.*]] = tail call <vscale x 1 x i64> @llvm.vector.insert.nxv1i64.v4i64(<vscale x 1 x i64> poison, <4 x i64> [[TMP0]], i64 0)
8989
// CHECK-256-NEXT: ret <vscale x 1 x i64> [[CAST_SCALABLE]]
9090
//
9191
vint64m1_t read_int64m1(struct struct_int64m1 *s) {
@@ -125,21 +125,21 @@ void write_int64m1(struct struct_int64m1 *s, vint64m1_t x) {
125125
// CHECK-64-NEXT: entry:
126126
// CHECK-64-NEXT: [[Y:%.*]] = getelementptr inbounds nuw i8, ptr [[S:%.*]], i64 8
127127
// CHECK-64-NEXT: [[TMP0:%.*]] = load <1 x double>, ptr [[Y]], align 8, !tbaa [[TBAA6]]
128-
// CHECK-64-NEXT: [[CAST_SCALABLE:%.*]] = tail call <vscale x 1 x double> @llvm.vector.insert.nxv1f64.v1f64(<vscale x 1 x double> undef, <1 x double> [[TMP0]], i64 0)
128+
// CHECK-64-NEXT: [[CAST_SCALABLE:%.*]] = tail call <vscale x 1 x double> @llvm.vector.insert.nxv1f64.v1f64(<vscale x 1 x double> poison, <1 x double> [[TMP0]], i64 0)
129129
// CHECK-64-NEXT: ret <vscale x 1 x double> [[CAST_SCALABLE]]
130130
//
131131
// CHECK-128-LABEL: @read_float64m1(
132132
// CHECK-128-NEXT: entry:
133133
// CHECK-128-NEXT: [[Y:%.*]] = getelementptr inbounds nuw i8, ptr [[S:%.*]], i64 16
134134
// CHECK-128-NEXT: [[TMP0:%.*]] = load <2 x double>, ptr [[Y]], align 8, !tbaa [[TBAA6]]
135-
// CHECK-128-NEXT: [[CAST_SCALABLE:%.*]] = tail call <vscale x 1 x double> @llvm.vector.insert.nxv1f64.v2f64(<vscale x 1 x double> undef, <2 x double> [[TMP0]], i64 0)
135+
// CHECK-128-NEXT: [[CAST_SCALABLE:%.*]] = tail call <vscale x 1 x double> @llvm.vector.insert.nxv1f64.v2f64(<vscale x 1 x double> poison, <2 x double> [[TMP0]], i64 0)
136136
// CHECK-128-NEXT: ret <vscale x 1 x double> [[CAST_SCALABLE]]
137137
//
138138
// CHECK-256-LABEL: @read_float64m1(
139139
// CHECK-256-NEXT: entry:
140140
// CHECK-256-NEXT: [[Y:%.*]] = getelementptr inbounds nuw i8, ptr [[S:%.*]], i64 32
141141
// CHECK-256-NEXT: [[TMP0:%.*]] = load <4 x double>, ptr [[Y]], align 8, !tbaa [[TBAA6]]
142-
// CHECK-256-NEXT: [[CAST_SCALABLE:%.*]] = tail call <vscale x 1 x double> @llvm.vector.insert.nxv1f64.v4f64(<vscale x 1 x double> undef, <4 x double> [[TMP0]], i64 0)
142+
// CHECK-256-NEXT: [[CAST_SCALABLE:%.*]] = tail call <vscale x 1 x double> @llvm.vector.insert.nxv1f64.v4f64(<vscale x 1 x double> poison, <4 x double> [[TMP0]], i64 0)
143143
// CHECK-256-NEXT: ret <vscale x 1 x double> [[CAST_SCALABLE]]
144144
//
145145
vfloat64m1_t read_float64m1(struct struct_float64m1 *s) {
@@ -179,23 +179,23 @@ void write_float64m1(struct struct_float64m1 *s, vfloat64m1_t x) {
179179
// CHECK-64-NEXT: entry:
180180
// CHECK-64-NEXT: [[Y:%.*]] = getelementptr inbounds nuw i8, ptr [[S:%.*]], i64 8
181181
// CHECK-64-NEXT: [[TMP0:%.*]] = load <8 x i8>, ptr [[Y]], align 8, !tbaa [[TBAA6]]
182-
// CHECK-64-NEXT: [[CAST_SCALABLE:%.*]] = tail call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v8i8(<vscale x 8 x i8> undef, <8 x i8> [[TMP0]], i64 0)
182+
// CHECK-64-NEXT: [[CAST_SCALABLE:%.*]] = tail call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v8i8(<vscale x 8 x i8> poison, <8 x i8> [[TMP0]], i64 0)
183183
// CHECK-64-NEXT: [[TMP1:%.*]] = bitcast <vscale x 8 x i8> [[CAST_SCALABLE]] to <vscale x 64 x i1>
184184
// CHECK-64-NEXT: ret <vscale x 64 x i1> [[TMP1]]
185185
//
186186
// CHECK-128-LABEL: @read_bool1(
187187
// CHECK-128-NEXT: entry:
188188
// CHECK-128-NEXT: [[Y:%.*]] = getelementptr inbounds nuw i8, ptr [[S:%.*]], i64 16
189189
// CHECK-128-NEXT: [[TMP0:%.*]] = load <16 x i8>, ptr [[Y]], align 8, !tbaa [[TBAA6]]
190-
// CHECK-128-NEXT: [[CAST_SCALABLE:%.*]] = tail call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v16i8(<vscale x 8 x i8> undef, <16 x i8> [[TMP0]], i64 0)
190+
// CHECK-128-NEXT: [[CAST_SCALABLE:%.*]] = tail call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v16i8(<vscale x 8 x i8> poison, <16 x i8> [[TMP0]], i64 0)
191191
// CHECK-128-NEXT: [[TMP1:%.*]] = bitcast <vscale x 8 x i8> [[CAST_SCALABLE]] to <vscale x 64 x i1>
192192
// CHECK-128-NEXT: ret <vscale x 64 x i1> [[TMP1]]
193193
//
194194
// CHECK-256-LABEL: @read_bool1(
195195
// CHECK-256-NEXT: entry:
196196
// CHECK-256-NEXT: [[Y:%.*]] = getelementptr inbounds nuw i8, ptr [[S:%.*]], i64 32
197197
// CHECK-256-NEXT: [[TMP0:%.*]] = load <32 x i8>, ptr [[Y]], align 8, !tbaa [[TBAA6]]
198-
// CHECK-256-NEXT: [[CAST_SCALABLE:%.*]] = tail call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v32i8(<vscale x 8 x i8> undef, <32 x i8> [[TMP0]], i64 0)
198+
// CHECK-256-NEXT: [[CAST_SCALABLE:%.*]] = tail call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v32i8(<vscale x 8 x i8> poison, <32 x i8> [[TMP0]], i64 0)
199199
// CHECK-256-NEXT: [[TMP1:%.*]] = bitcast <vscale x 8 x i8> [[CAST_SCALABLE]] to <vscale x 64 x i1>
200200
// CHECK-256-NEXT: ret <vscale x 64 x i1> [[TMP1]]
201201
//

clang/test/CodeGen/RISCV/attr-rvv-vector-bits-cast.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -120,7 +120,7 @@ vbool32_t to_vbool32_t(fixed_bool32_t type) {
120120
// CHECK-LABEL: @to_vint32m1_t__from_gnu_int32m1_t(
121121
// CHECK-NEXT: entry:
122122
// CHECK-NEXT: [[TYPE:%.*]] = load <8 x i32>, ptr [[TMP0:%.*]], align 32, !tbaa [[TBAA8]]
123-
// CHECK-NEXT: [[CAST_SCALABLE:%.*]] = tail call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> undef, <8 x i32> [[TYPE]], i64 0)
123+
// CHECK-NEXT: [[CAST_SCALABLE:%.*]] = tail call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> poison, <8 x i32> [[TYPE]], i64 0)
124124
// CHECK-NEXT: ret <vscale x 2 x i32> [[CAST_SCALABLE]]
125125
//
126126
vint32m1_t to_vint32m1_t__from_gnu_int32m1_t(gnu_int32m1_t type) {

clang/test/CodeGen/RISCV/attr-rvv-vector-bits-codegen.c

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -57,14 +57,14 @@ fixed_bool32_t global_bool32;
5757
// CHECK-NEXT: store <vscale x 64 x i8> [[VEC:%.*]], ptr [[VEC_ADDR]], align 1
5858
// CHECK-NEXT: [[TMP0:%.*]] = load <vscale x 64 x i1>, ptr [[M_ADDR]], align 1
5959
// CHECK-NEXT: [[TMP1:%.*]] = load <32 x i8>, ptr @global_bool1, align 8
60-
// CHECK-NEXT: [[CAST_SCALABLE:%.*]] = call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v32i8(<vscale x 8 x i8> undef, <32 x i8> [[TMP1]], i64 0)
60+
// CHECK-NEXT: [[CAST_SCALABLE:%.*]] = call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v32i8(<vscale x 8 x i8> poison, <32 x i8> [[TMP1]], i64 0)
6161
// CHECK-NEXT: [[TMP2:%.*]] = bitcast <vscale x 8 x i8> [[CAST_SCALABLE]] to <vscale x 64 x i1>
6262
// CHECK-NEXT: [[TMP3:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmand.nxv64i1.i64(<vscale x 64 x i1> [[TMP0]], <vscale x 64 x i1> [[TMP2]], i64 256)
6363
// CHECK-NEXT: store <vscale x 64 x i1> [[TMP3]], ptr [[MASK]], align 1
6464
// CHECK-NEXT: [[TMP4:%.*]] = load <vscale x 64 x i1>, ptr [[MASK]], align 1
6565
// CHECK-NEXT: [[TMP5:%.*]] = load <vscale x 64 x i8>, ptr [[VEC_ADDR]], align 1
6666
// CHECK-NEXT: [[TMP6:%.*]] = load <256 x i8>, ptr @global_vec_int8m8, align 8
67-
// CHECK-NEXT: [[CAST_SCALABLE1:%.*]] = call <vscale x 64 x i8> @llvm.vector.insert.nxv64i8.v256i8(<vscale x 64 x i8> undef, <256 x i8> [[TMP6]], i64 0)
67+
// CHECK-NEXT: [[CAST_SCALABLE1:%.*]] = call <vscale x 64 x i8> @llvm.vector.insert.nxv64i8.v256i8(<vscale x 64 x i8> poison, <256 x i8> [[TMP6]], i64 0)
6868
// CHECK-NEXT: [[TMP7:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vadd.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[TMP5]], <vscale x 64 x i8> [[CAST_SCALABLE1]], <vscale x 64 x i1> [[TMP4]], i64 256, i64 3)
6969
// CHECK-NEXT: [[CAST_FIXED:%.*]] = call <256 x i8> @llvm.vector.extract.v256i8.nxv64i8(<vscale x 64 x i8> [[TMP7]], i64 0)
7070
// CHECK-NEXT: store <256 x i8> [[CAST_FIXED]], ptr [[RETVAL]], align 8
@@ -87,14 +87,14 @@ fixed_int8m8_t test_bool1(vbool1_t m, vint8m8_t vec) {
8787
// CHECK-NEXT: store <vscale x 16 x i16> [[VEC:%.*]], ptr [[VEC_ADDR]], align 2
8888
// CHECK-NEXT: [[TMP0:%.*]] = load <vscale x 16 x i1>, ptr [[M_ADDR]], align 1
8989
// CHECK-NEXT: [[TMP1:%.*]] = load <8 x i8>, ptr @global_bool4, align 8
90-
// CHECK-NEXT: [[CAST_SCALABLE:%.*]] = call <vscale x 2 x i8> @llvm.vector.insert.nxv2i8.v8i8(<vscale x 2 x i8> undef, <8 x i8> [[TMP1]], i64 0)
90+
// CHECK-NEXT: [[CAST_SCALABLE:%.*]] = call <vscale x 2 x i8> @llvm.vector.insert.nxv2i8.v8i8(<vscale x 2 x i8> poison, <8 x i8> [[TMP1]], i64 0)
9191
// CHECK-NEXT: [[TMP2:%.*]] = bitcast <vscale x 2 x i8> [[CAST_SCALABLE]] to <vscale x 16 x i1>
9292
// CHECK-NEXT: [[TMP3:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmand.nxv16i1.i64(<vscale x 16 x i1> [[TMP0]], <vscale x 16 x i1> [[TMP2]], i64 64)
9393
// CHECK-NEXT: store <vscale x 16 x i1> [[TMP3]], ptr [[MASK]], align 1
9494
// CHECK-NEXT: [[TMP4:%.*]] = load <vscale x 16 x i1>, ptr [[MASK]], align 1
9595
// CHECK-NEXT: [[TMP5:%.*]] = load <vscale x 16 x i16>, ptr [[VEC_ADDR]], align 2
9696
// CHECK-NEXT: [[TMP6:%.*]] = load <64 x i16>, ptr @global_vec_int16m4, align 8
97-
// CHECK-NEXT: [[CAST_SCALABLE1:%.*]] = call <vscale x 16 x i16> @llvm.vector.insert.nxv16i16.v64i16(<vscale x 16 x i16> undef, <64 x i16> [[TMP6]], i64 0)
97+
// CHECK-NEXT: [[CAST_SCALABLE1:%.*]] = call <vscale x 16 x i16> @llvm.vector.insert.nxv16i16.v64i16(<vscale x 16 x i16> poison, <64 x i16> [[TMP6]], i64 0)
9898
// CHECK-NEXT: [[TMP7:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vadd.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[TMP5]], <vscale x 16 x i16> [[CAST_SCALABLE1]], <vscale x 16 x i1> [[TMP4]], i64 64, i64 3)
9999
// CHECK-NEXT: [[CAST_FIXED:%.*]] = call <64 x i16> @llvm.vector.extract.v64i16.nxv16i16(<vscale x 16 x i16> [[TMP7]], i64 0)
100100
// CHECK-NEXT: store <64 x i16> [[CAST_FIXED]], ptr [[RETVAL]], align 8
@@ -125,7 +125,7 @@ fixed_int16m4_t test_bool4(vbool4_t m, vint16m4_t vec) {
125125
// CHECK-NEXT: [[TMP4:%.*]] = load <vscale x 2 x i1>, ptr [[MASK]], align 1
126126
// CHECK-NEXT: [[TMP5:%.*]] = load <vscale x 2 x i32>, ptr [[VEC_ADDR]], align 4
127127
// CHECK-NEXT: [[TMP6:%.*]] = load <8 x i32>, ptr @global_vec, align 8
128-
// CHECK-NEXT: [[CAST_SCALABLE:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> undef, <8 x i32> [[TMP6]], i64 0)
128+
// CHECK-NEXT: [[CAST_SCALABLE:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> poison, <8 x i32> [[TMP6]], i64 0)
129129
// CHECK-NEXT: [[TMP7:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vadd.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[TMP5]], <vscale x 2 x i32> [[CAST_SCALABLE]], <vscale x 2 x i1> [[TMP4]], i64 8, i64 3)
130130
// CHECK-NEXT: [[CAST_FIXED:%.*]] = call <8 x i32> @llvm.vector.extract.v8i32.nxv2i32(<vscale x 2 x i32> [[TMP7]], i64 0)
131131
// CHECK-NEXT: store <8 x i32> [[CAST_FIXED]], ptr [[RETVAL]], align 8
@@ -247,7 +247,7 @@ fixed_bool32_t address_of_array_idx_bool32() {
247247
// CHECK-NEXT: [[VEC_ADDR:%.*]] = alloca <vscale x 2 x i32>, align 4
248248
// CHECK-NEXT: store <vscale x 2 x i32> [[VEC:%.*]], ptr [[VEC_ADDR]], align 4
249249
// CHECK-NEXT: [[TMP0:%.*]] = load <8 x i32>, ptr @global_vec, align 8
250-
// CHECK-NEXT: [[CAST_SCALABLE:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> undef, <8 x i32> [[TMP0]], i64 0)
250+
// CHECK-NEXT: [[CAST_SCALABLE:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> poison, <8 x i32> [[TMP0]], i64 0)
251251
// CHECK-NEXT: [[TMP1:%.*]] = load <vscale x 2 x i32>, ptr [[VEC_ADDR]], align 4
252252
// CHECK-NEXT: [[TMP2:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vadd.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[CAST_SCALABLE]], <vscale x 2 x i32> [[TMP1]], i64 8)
253253
// CHECK-NEXT: [[CAST_FIXED:%.*]] = call <8 x i32> @llvm.vector.extract.v8i32.nxv2i32(<vscale x 2 x i32> [[TMP2]], i64 0)
@@ -303,7 +303,7 @@ fixed_int32m2_t array_arg_m2(fixed_int32m2_t arr[]) {
303303
// CHECK-NEXT: [[VEC_ADDR:%.*]] = alloca <vscale x 4 x i32>, align 4
304304
// CHECK-NEXT: store <vscale x 4 x i32> [[VEC:%.*]], ptr [[VEC_ADDR]], align 4
305305
// CHECK-NEXT: [[TMP0:%.*]] = load <16 x i32>, ptr @global_vec_m2, align 8
306-
// CHECK-NEXT: [[CAST_SCALABLE:%.*]] = call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v16i32(<vscale x 4 x i32> undef, <16 x i32> [[TMP0]], i64 0)
306+
// CHECK-NEXT: [[CAST_SCALABLE:%.*]] = call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v16i32(<vscale x 4 x i32> poison, <16 x i32> [[TMP0]], i64 0)
307307
// CHECK-NEXT: [[TMP1:%.*]] = load <vscale x 4 x i32>, ptr [[VEC_ADDR]], align 4
308308
// CHECK-NEXT: [[TMP2:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[CAST_SCALABLE]], <vscale x 4 x i32> [[TMP1]], i64 16)
309309
// CHECK-NEXT: [[CAST_FIXED:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[TMP2]], i64 0)

0 commit comments

Comments
 (0)