@@ -41,7 +41,7 @@ define void @floor() {
4141 call <16 x double > @llvm.floor.v16f64 (<16 x double > undef )
4242 call <vscale x 1 x double > @llvm.floor.nvx1f64 (<vscale x 1 x double > undef )
4343 call <vscale x 2 x double > @llvm.floor.nvx2f64 (<vscale x 2 x double > undef )
44- call <vscale x 4 x double > @llvm.floor.nvx5f64 (<vscale x 4 x double > undef )
44+ call <vscale x 4 x double > @llvm.floor.nvx4f64 (<vscale x 4 x double > undef )
4545 call <vscale x 8 x double > @llvm.floor.nvx8f64 (<vscale x 8 x double > undef )
4646 ret void
4747}
@@ -86,7 +86,7 @@ define void @ceil() {
8686 call <16 x double > @llvm.ceil.v16f64 (<16 x double > undef )
8787 call <vscale x 1 x double > @llvm.ceil.nvx1f64 (<vscale x 1 x double > undef )
8888 call <vscale x 2 x double > @llvm.ceil.nvx2f64 (<vscale x 2 x double > undef )
89- call <vscale x 4 x double > @llvm.ceil.nvx5f64 (<vscale x 4 x double > undef )
89+ call <vscale x 4 x double > @llvm.ceil.nvx4f64 (<vscale x 4 x double > undef )
9090 call <vscale x 8 x double > @llvm.ceil.nvx8f64 (<vscale x 8 x double > undef )
9191 ret void
9292}
@@ -131,7 +131,7 @@ define void @trunc() {
131131 call <16 x double > @llvm.trunc.v16f64 (<16 x double > undef )
132132 call <vscale x 1 x double > @llvm.trunc.nvx1f64 (<vscale x 1 x double > undef )
133133 call <vscale x 2 x double > @llvm.trunc.nvx2f64 (<vscale x 2 x double > undef )
134- call <vscale x 4 x double > @llvm.trunc.nvx5f64 (<vscale x 4 x double > undef )
134+ call <vscale x 4 x double > @llvm.trunc.nvx4f64 (<vscale x 4 x double > undef )
135135 call <vscale x 8 x double > @llvm.trunc.nvx8f64 (<vscale x 8 x double > undef )
136136 ret void
137137}
@@ -176,7 +176,7 @@ define void @rint() {
176176 call <16 x double > @llvm.rint.v16f64 (<16 x double > undef )
177177 call <vscale x 1 x double > @llvm.rint.nvx1f64 (<vscale x 1 x double > undef )
178178 call <vscale x 2 x double > @llvm.rint.nvx2f64 (<vscale x 2 x double > undef )
179- call <vscale x 4 x double > @llvm.rint.nvx5f64 (<vscale x 4 x double > undef )
179+ call <vscale x 4 x double > @llvm.rint.nvx4f64 (<vscale x 4 x double > undef )
180180 call <vscale x 8 x double > @llvm.rint.nvx8f64 (<vscale x 8 x double > undef )
181181 ret void
182182}
@@ -221,7 +221,7 @@ define void @nearbyint() {
221221 call <16 x double > @llvm.nearbyint.v16f64 (<16 x double > undef )
222222 call <vscale x 1 x double > @llvm.nearbyint.nvx1f64 (<vscale x 1 x double > undef )
223223 call <vscale x 2 x double > @llvm.nearbyint.nvx2f64 (<vscale x 2 x double > undef )
224- call <vscale x 4 x double > @llvm.nearbyint.nvx5f64 (<vscale x 4 x double > undef )
224+ call <vscale x 4 x double > @llvm.nearbyint.nvx4f64 (<vscale x 4 x double > undef )
225225 call <vscale x 8 x double > @llvm.nearbyint.nvx8f64 (<vscale x 8 x double > undef )
226226 ret void
227227}
@@ -266,7 +266,7 @@ define void @round() {
266266 call <16 x double > @llvm.round.v16f64 (<16 x double > undef )
267267 call <vscale x 1 x double > @llvm.round.nvx1f64 (<vscale x 1 x double > undef )
268268 call <vscale x 2 x double > @llvm.round.nvx2f64 (<vscale x 2 x double > undef )
269- call <vscale x 4 x double > @llvm.round.nvx5f64 (<vscale x 4 x double > undef )
269+ call <vscale x 4 x double > @llvm.round.nvx4f64 (<vscale x 4 x double > undef )
270270 call <vscale x 8 x double > @llvm.round.nvx8f64 (<vscale x 8 x double > undef )
271271 ret void
272272}
@@ -311,7 +311,7 @@ define void @roundeven() {
311311 call <16 x double > @llvm.roundeven.v16f64 (<16 x double > undef )
312312 call <vscale x 1 x double > @llvm.roundeven.nvx1f64 (<vscale x 1 x double > undef )
313313 call <vscale x 2 x double > @llvm.roundeven.nvx2f64 (<vscale x 2 x double > undef )
314- call <vscale x 4 x double > @llvm.roundeven.nvx5f64 (<vscale x 4 x double > undef )
314+ call <vscale x 4 x double > @llvm.roundeven.nvx4f64 (<vscale x 4 x double > undef )
315315 call <vscale x 8 x double > @llvm.roundeven.nvx8f64 (<vscale x 8 x double > undef )
316316 ret void
317317}
@@ -352,7 +352,7 @@ define void @vp_ceil() {
352352 call <16 x double > @llvm.vp.ceil.v16f64 (<16 x double > undef , <16 x i1 > undef , i32 undef )
353353 call <vscale x 1 x double > @llvm.vp.ceil.nvx1f64 (<vscale x 1 x double > undef , <vscale x 1 x i1 > undef , i32 undef )
354354 call <vscale x 2 x double > @llvm.vp.ceil.nvx2f64 (<vscale x 2 x double > undef , <vscale x 2 x i1 > undef , i32 undef )
355- call <vscale x 4 x double > @llvm.vp.ceil.nvx5f64 (<vscale x 4 x double > undef , <vscale x 4 x i1 > undef , i32 undef )
355+ call <vscale x 4 x double > @llvm.vp.ceil.nvx4f64 (<vscale x 4 x double > undef , <vscale x 4 x i1 > undef , i32 undef )
356356 call <vscale x 8 x double > @llvm.vp.ceil.nvx8f64 (<vscale x 8 x double > undef , <vscale x 8 x i1 > undef , i32 undef )
357357 ret void
358358}
@@ -393,7 +393,7 @@ define void @vp_floor() {
393393 call <16 x double > @llvm.vp.floor.v16f64 (<16 x double > undef , <16 x i1 > undef , i32 undef )
394394 call <vscale x 1 x double > @llvm.vp.floor.nvx1f64 (<vscale x 1 x double > undef , <vscale x 1 x i1 > undef , i32 undef )
395395 call <vscale x 2 x double > @llvm.vp.floor.nvx2f64 (<vscale x 2 x double > undef , <vscale x 2 x i1 > undef , i32 undef )
396- call <vscale x 4 x double > @llvm.vp.floor.nvx5f64 (<vscale x 4 x double > undef , <vscale x 4 x i1 > undef , i32 undef )
396+ call <vscale x 4 x double > @llvm.vp.floor.nvx4f64 (<vscale x 4 x double > undef , <vscale x 4 x i1 > undef , i32 undef )
397397 call <vscale x 8 x double > @llvm.vp.floor.nvx8f64 (<vscale x 8 x double > undef , <vscale x 8 x i1 > undef , i32 undef )
398398 ret void
399399}
@@ -434,7 +434,7 @@ define void @vp_round() {
434434 call <16 x double > @llvm.vp.round.v16f64 (<16 x double > undef , <16 x i1 > undef , i32 undef )
435435 call <vscale x 1 x double > @llvm.vp.round.nvx1f64 (<vscale x 1 x double > undef , <vscale x 1 x i1 > undef , i32 undef )
436436 call <vscale x 2 x double > @llvm.vp.round.nvx2f64 (<vscale x 2 x double > undef , <vscale x 2 x i1 > undef , i32 undef )
437- call <vscale x 4 x double > @llvm.vp.round.nvx5f64 (<vscale x 4 x double > undef , <vscale x 4 x i1 > undef , i32 undef )
437+ call <vscale x 4 x double > @llvm.vp.round.nvx4f64 (<vscale x 4 x double > undef , <vscale x 4 x i1 > undef , i32 undef )
438438 call <vscale x 8 x double > @llvm.vp.round.nvx8f64 (<vscale x 8 x double > undef , <vscale x 8 x i1 > undef , i32 undef )
439439 ret void
440440}
@@ -475,7 +475,7 @@ define void @vp_roundeven() {
475475 call <16 x double > @llvm.vp.roundeven.v16f64 (<16 x double > undef , <16 x i1 > undef , i32 undef )
476476 call <vscale x 1 x double > @llvm.vp.roundeven.nvx1f64 (<vscale x 1 x double > undef , <vscale x 1 x i1 > undef , i32 undef )
477477 call <vscale x 2 x double > @llvm.vp.roundeven.nvx2f64 (<vscale x 2 x double > undef , <vscale x 2 x i1 > undef , i32 undef )
478- call <vscale x 4 x double > @llvm.vp.roundeven.nvx5f64 (<vscale x 4 x double > undef , <vscale x 4 x i1 > undef , i32 undef )
478+ call <vscale x 4 x double > @llvm.vp.roundeven.nvx4f64 (<vscale x 4 x double > undef , <vscale x 4 x i1 > undef , i32 undef )
479479 call <vscale x 8 x double > @llvm.vp.roundeven.nvx8f64 (<vscale x 8 x double > undef , <vscale x 8 x i1 > undef , i32 undef )
480480 ret void
481481}
@@ -516,7 +516,7 @@ define void @vp_roundtozero() {
516516 call <16 x double > @llvm.vp.roundtozero.v16f64 (<16 x double > undef , <16 x i1 > undef , i32 undef )
517517 call <vscale x 1 x double > @llvm.vp.roundtozero.nvx1f64 (<vscale x 1 x double > undef , <vscale x 1 x i1 > undef , i32 undef )
518518 call <vscale x 2 x double > @llvm.vp.roundtozero.nvx2f64 (<vscale x 2 x double > undef , <vscale x 2 x i1 > undef , i32 undef )
519- call <vscale x 4 x double > @llvm.vp.roundtozero.nvx5f64 (<vscale x 4 x double > undef , <vscale x 4 x i1 > undef , i32 undef )
519+ call <vscale x 4 x double > @llvm.vp.roundtozero.nvx4f64 (<vscale x 4 x double > undef , <vscale x 4 x i1 > undef , i32 undef )
520520 call <vscale x 8 x double > @llvm.vp.roundtozero.nvx8f64 (<vscale x 8 x double > undef , <vscale x 8 x i1 > undef , i32 undef )
521521 ret void
522522}
@@ -557,7 +557,7 @@ define void @vp_rint() {
557557 call <16 x double > @llvm.vp.rint.v16f64 (<16 x double > undef , <16 x i1 > undef , i32 undef )
558558 call <vscale x 1 x double > @llvm.vp.rint.nvx1f64 (<vscale x 1 x double > undef , <vscale x 1 x i1 > undef , i32 undef )
559559 call <vscale x 2 x double > @llvm.vp.rint.nvx2f64 (<vscale x 2 x double > undef , <vscale x 2 x i1 > undef , i32 undef )
560- call <vscale x 4 x double > @llvm.vp.rint.nvx5f64 (<vscale x 4 x double > undef , <vscale x 4 x i1 > undef , i32 undef )
560+ call <vscale x 4 x double > @llvm.vp.rint.nvx4f64 (<vscale x 4 x double > undef , <vscale x 4 x i1 > undef , i32 undef )
561561 call <vscale x 8 x double > @llvm.vp.rint.nvx8f64 (<vscale x 8 x double > undef , <vscale x 8 x i1 > undef , i32 undef )
562562 ret void
563563}
@@ -598,7 +598,7 @@ define void @vp_nearbyint() {
598598 call <16 x double > @llvm.vp.nearbyint.v16f64 (<16 x double > undef , <16 x i1 > undef , i32 undef )
599599 call <vscale x 1 x double > @llvm.vp.nearbyint.nvx1f64 (<vscale x 1 x double > undef , <vscale x 1 x i1 > undef , i32 undef )
600600 call <vscale x 2 x double > @llvm.vp.nearbyint.nvx2f64 (<vscale x 2 x double > undef , <vscale x 2 x i1 > undef , i32 undef )
601- call <vscale x 4 x double > @llvm.vp.nearbyint.nvx5f64 (<vscale x 4 x double > undef , <vscale x 4 x i1 > undef , i32 undef )
601+ call <vscale x 4 x double > @llvm.vp.nearbyint.nvx4f64 (<vscale x 4 x double > undef , <vscale x 4 x i1 > undef , i32 undef )
602602 call <vscale x 8 x double > @llvm.vp.nearbyint.nvx8f64 (<vscale x 8 x double > undef , <vscale x 8 x i1 > undef , i32 undef )
603603 ret void
604604}
@@ -620,7 +620,7 @@ declare <8 x double> @llvm.floor.v8f64(<8 x double>)
620620declare <16 x double > @llvm.floor.v16f64 (<16 x double >)
621621declare <vscale x 1 x double > @llvm.floor.nvx1f64 (<vscale x 1 x double >)
622622declare <vscale x 2 x double > @llvm.floor.nvx2f64 (<vscale x 2 x double >)
623- declare <vscale x 4 x double > @llvm.floor.nvx5f64 (<vscale x 4 x double >)
623+ declare <vscale x 4 x double > @llvm.floor.nvx4f64 (<vscale x 4 x double >)
624624declare <vscale x 8 x double > @llvm.floor.nvx8f64 (<vscale x 8 x double >)
625625
626626declare float @llvm.ceil.f32 (float )
@@ -640,7 +640,7 @@ declare <8 x double> @llvm.ceil.v8f64(<8 x double>)
640640declare <16 x double > @llvm.ceil.v16f64 (<16 x double >)
641641declare <vscale x 1 x double > @llvm.ceil.nvx1f64 (<vscale x 1 x double >)
642642declare <vscale x 2 x double > @llvm.ceil.nvx2f64 (<vscale x 2 x double >)
643- declare <vscale x 4 x double > @llvm.ceil.nvx5f64 (<vscale x 4 x double >)
643+ declare <vscale x 4 x double > @llvm.ceil.nvx4f64 (<vscale x 4 x double >)
644644declare <vscale x 8 x double > @llvm.ceil.nvx8f64 (<vscale x 8 x double >)
645645
646646declare float @llvm.trunc.f32 (float )
@@ -660,7 +660,7 @@ declare <8 x double> @llvm.trunc.v8f64(<8 x double>)
660660declare <16 x double > @llvm.trunc.v16f64 (<16 x double >)
661661declare <vscale x 1 x double > @llvm.trunc.nvx1f64 (<vscale x 1 x double >)
662662declare <vscale x 2 x double > @llvm.trunc.nvx2f64 (<vscale x 2 x double >)
663- declare <vscale x 4 x double > @llvm.trunc.nvx5f64 (<vscale x 4 x double >)
663+ declare <vscale x 4 x double > @llvm.trunc.nvx4f64 (<vscale x 4 x double >)
664664declare <vscale x 8 x double > @llvm.trunc.nvx8f64 (<vscale x 8 x double >)
665665
666666declare float @llvm.rint.f32 (float )
@@ -680,7 +680,7 @@ declare <8 x double> @llvm.rint.v8f64(<8 x double>)
680680declare <16 x double > @llvm.rint.v16f64 (<16 x double >)
681681declare <vscale x 1 x double > @llvm.rint.nvx1f64 (<vscale x 1 x double >)
682682declare <vscale x 2 x double > @llvm.rint.nvx2f64 (<vscale x 2 x double >)
683- declare <vscale x 4 x double > @llvm.rint.nvx5f64 (<vscale x 4 x double >)
683+ declare <vscale x 4 x double > @llvm.rint.nvx4f64 (<vscale x 4 x double >)
684684declare <vscale x 8 x double > @llvm.rint.nvx8f64 (<vscale x 8 x double >)
685685
686686declare float @llvm.nearbyint.f32 (float )
@@ -700,7 +700,7 @@ declare <8 x double> @llvm.nearbyint.v8f64(<8 x double>)
700700declare <16 x double > @llvm.nearbyint.v16f64 (<16 x double >)
701701declare <vscale x 1 x double > @llvm.nearbyint.nvx1f64 (<vscale x 1 x double >)
702702declare <vscale x 2 x double > @llvm.nearbyint.nvx2f64 (<vscale x 2 x double >)
703- declare <vscale x 4 x double > @llvm.nearbyint.nvx5f64 (<vscale x 4 x double >)
703+ declare <vscale x 4 x double > @llvm.nearbyint.nvx4f64 (<vscale x 4 x double >)
704704declare <vscale x 8 x double > @llvm.nearbyint.nvx8f64 (<vscale x 8 x double >)
705705
706706declare float @llvm.round.f32 (float )
@@ -720,7 +720,7 @@ declare <8 x double> @llvm.round.v8f64(<8 x double>)
720720declare <16 x double > @llvm.round.v16f64 (<16 x double >)
721721declare <vscale x 1 x double > @llvm.round.nvx1f64 (<vscale x 1 x double >)
722722declare <vscale x 2 x double > @llvm.round.nvx2f64 (<vscale x 2 x double >)
723- declare <vscale x 4 x double > @llvm.round.nvx5f64 (<vscale x 4 x double >)
723+ declare <vscale x 4 x double > @llvm.round.nvx4f64 (<vscale x 4 x double >)
724724declare <vscale x 8 x double > @llvm.round.nvx8f64 (<vscale x 8 x double >)
725725
726726declare float @llvm.roundeven.f32 (float )
@@ -740,7 +740,7 @@ declare <8 x double> @llvm.roundeven.v8f64(<8 x double>)
740740declare <16 x double > @llvm.roundeven.v16f64 (<16 x double >)
741741declare <vscale x 1 x double > @llvm.roundeven.nvx1f64 (<vscale x 1 x double >)
742742declare <vscale x 2 x double > @llvm.roundeven.nvx2f64 (<vscale x 2 x double >)
743- declare <vscale x 4 x double > @llvm.roundeven.nvx5f64 (<vscale x 4 x double >)
743+ declare <vscale x 4 x double > @llvm.roundeven.nvx4f64 (<vscale x 4 x double >)
744744declare <vscale x 8 x double > @llvm.roundeven.nvx8f64 (<vscale x 8 x double >)
745745
746746declare <2 x float > @llvm.vp.ceil.v2f32 (<2 x float >, <2 x i1 >, i32 )
@@ -759,7 +759,7 @@ declare <8 x double> @llvm.vp.ceil.v8f64(<8 x double>, <8 x i1>, i32)
759759declare <16 x double > @llvm.vp.ceil.v16f64 (<16 x double >, <16 x i1 >, i32 )
760760declare <vscale x 1 x double > @llvm.vp.ceil.nvx1f64 (<vscale x 1 x double >, <vscale x 1 x i1 >, i32 )
761761declare <vscale x 2 x double > @llvm.vp.ceil.nvx2f64 (<vscale x 2 x double >, <vscale x 2 x i1 >, i32 )
762- declare <vscale x 4 x double > @llvm.vp.ceil.nvx5f64 (<vscale x 4 x double >, <vscale x 4 x i1 >, i32 )
762+ declare <vscale x 4 x double > @llvm.vp.ceil.nvx4f64 (<vscale x 4 x double >, <vscale x 4 x i1 >, i32 )
763763declare <vscale x 8 x double > @llvm.vp.ceil.nvx8f64 (<vscale x 8 x double >, <vscale x 8 x i1 >, i32 )
764764
765765declare <2 x float > @llvm.vp.floor.v2f32 (<2 x float >, <2 x i1 >, i32 )
@@ -778,7 +778,7 @@ declare <8 x double> @llvm.vp.floor.v8f64(<8 x double>, <8 x i1>, i32)
778778declare <16 x double > @llvm.vp.floor.v16f64 (<16 x double >, <16 x i1 >, i32 )
779779declare <vscale x 1 x double > @llvm.vp.floor.nvx1f64 (<vscale x 1 x double >, <vscale x 1 x i1 >, i32 )
780780declare <vscale x 2 x double > @llvm.vp.floor.nvx2f64 (<vscale x 2 x double >, <vscale x 2 x i1 >, i32 )
781- declare <vscale x 4 x double > @llvm.vp.floor.nvx5f64 (<vscale x 4 x double >, <vscale x 4 x i1 >, i32 )
781+ declare <vscale x 4 x double > @llvm.vp.floor.nvx4f64 (<vscale x 4 x double >, <vscale x 4 x i1 >, i32 )
782782declare <vscale x 8 x double > @llvm.vp.floor.nvx8f64 (<vscale x 8 x double >, <vscale x 8 x i1 >, i32 )
783783
784784declare <2 x float > @llvm.vp.round.v2f32 (<2 x float >, <2 x i1 >, i32 )
@@ -797,7 +797,7 @@ declare <8 x double> @llvm.vp.round.v8f64(<8 x double>, <8 x i1>, i32)
797797declare <16 x double > @llvm.vp.round.v16f64 (<16 x double >, <16 x i1 >, i32 )
798798declare <vscale x 1 x double > @llvm.vp.round.nvx1f64 (<vscale x 1 x double >, <vscale x 1 x i1 >, i32 )
799799declare <vscale x 2 x double > @llvm.vp.round.nvx2f64 (<vscale x 2 x double >, <vscale x 2 x i1 >, i32 )
800- declare <vscale x 4 x double > @llvm.vp.round.nvx5f64 (<vscale x 4 x double >, <vscale x 4 x i1 >, i32 )
800+ declare <vscale x 4 x double > @llvm.vp.round.nvx4f64 (<vscale x 4 x double >, <vscale x 4 x i1 >, i32 )
801801declare <vscale x 8 x double > @llvm.vp.round.nvx8f64 (<vscale x 8 x double >, <vscale x 8 x i1 >, i32 )
802802
803803declare <2 x float > @llvm.vp.roundeven.v2f32 (<2 x float >, <2 x i1 >, i32 )
@@ -816,7 +816,7 @@ declare <8 x double> @llvm.vp.roundeven.v8f64(<8 x double>, <8 x i1>, i32)
816816declare <16 x double > @llvm.vp.roundeven.v16f64 (<16 x double >, <16 x i1 >, i32 )
817817declare <vscale x 1 x double > @llvm.vp.roundeven.nvx1f64 (<vscale x 1 x double >, <vscale x 1 x i1 >, i32 )
818818declare <vscale x 2 x double > @llvm.vp.roundeven.nvx2f64 (<vscale x 2 x double >, <vscale x 2 x i1 >, i32 )
819- declare <vscale x 4 x double > @llvm.vp.roundeven.nvx5f64 (<vscale x 4 x double >, <vscale x 4 x i1 >, i32 )
819+ declare <vscale x 4 x double > @llvm.vp.roundeven.nvx4f64 (<vscale x 4 x double >, <vscale x 4 x i1 >, i32 )
820820declare <vscale x 8 x double > @llvm.vp.roundeven.nvx8f64 (<vscale x 8 x double >, <vscale x 8 x i1 >, i32 )
821821
822822declare <2 x float > @llvm.vp.roundtozero.v2f32 (<2 x float >, <2 x i1 >, i32 )
@@ -835,7 +835,7 @@ declare <8 x double> @llvm.vp.roundtozero.v8f64(<8 x double>, <8 x i1>, i32)
835835declare <16 x double > @llvm.vp.roundtozero.v16f64 (<16 x double >, <16 x i1 >, i32 )
836836declare <vscale x 1 x double > @llvm.vp.roundtozero.nvx1f64 (<vscale x 1 x double >, <vscale x 1 x i1 >, i32 )
837837declare <vscale x 2 x double > @llvm.vp.roundtozero.nvx2f64 (<vscale x 2 x double >, <vscale x 2 x i1 >, i32 )
838- declare <vscale x 4 x double > @llvm.vp.roundtozero.nvx5f64 (<vscale x 4 x double >, <vscale x 4 x i1 >, i32 )
838+ declare <vscale x 4 x double > @llvm.vp.roundtozero.nvx4f64 (<vscale x 4 x double >, <vscale x 4 x i1 >, i32 )
839839declare <vscale x 8 x double > @llvm.vp.roundtozero.nvx8f64 (<vscale x 8 x double >, <vscale x 8 x i1 >, i32 )
840840
841841declare <2 x float > @llvm.vp.rint.v2f32 (<2 x float >, <2 x i1 >, i32 )
@@ -854,7 +854,7 @@ declare <8 x double> @llvm.vp.rint.v8f64(<8 x double>, <8 x i1>, i32)
854854declare <16 x double > @llvm.vp.rint.v16f64 (<16 x double >, <16 x i1 >, i32 )
855855declare <vscale x 1 x double > @llvm.vp.rint.nvx1f64 (<vscale x 1 x double >, <vscale x 1 x i1 >, i32 )
856856declare <vscale x 2 x double > @llvm.vp.rint.nvx2f64 (<vscale x 2 x double >, <vscale x 2 x i1 >, i32 )
857- declare <vscale x 4 x double > @llvm.vp.rint.nvx5f64 (<vscale x 4 x double >, <vscale x 4 x i1 >, i32 )
857+ declare <vscale x 4 x double > @llvm.vp.rint.nvx4f64 (<vscale x 4 x double >, <vscale x 4 x i1 >, i32 )
858858declare <vscale x 8 x double > @llvm.vp.rint.nvx8f64 (<vscale x 8 x double >, <vscale x 8 x i1 >, i32 )
859859
860860declare <2 x float > @llvm.vp.nearbyint.v2f32 (<2 x float >, <2 x i1 >, i32 )
@@ -873,5 +873,5 @@ declare <8 x double> @llvm.vp.nearbyint.v8f64(<8 x double>, <8 x i1>, i32)
873873declare <16 x double > @llvm.vp.nearbyint.v16f64 (<16 x double >, <16 x i1 >, i32 )
874874declare <vscale x 1 x double > @llvm.vp.nearbyint.nvx1f64 (<vscale x 1 x double >, <vscale x 1 x i1 >, i32 )
875875declare <vscale x 2 x double > @llvm.vp.nearbyint.nvx2f64 (<vscale x 2 x double >, <vscale x 2 x i1 >, i32 )
876- declare <vscale x 4 x double > @llvm.vp.nearbyint.nvx5f64 (<vscale x 4 x double >, <vscale x 4 x i1 >, i32 )
876+ declare <vscale x 4 x double > @llvm.vp.nearbyint.nvx4f64 (<vscale x 4 x double >, <vscale x 4 x i1 >, i32 )
877877declare <vscale x 8 x double > @llvm.vp.nearbyint.nvx8f64 (<vscale x 8 x double >, <vscale x 8 x i1 >, i32 )
0 commit comments