@@ -19,17 +19,30 @@ define void @induction_i7(ptr %dst) #0 {
19
19
; CHECK: vector.body:
20
20
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %vector.ph ], [ [[INDEX_NEXT:%.*]], %vector.body ]
21
21
; CHECK-NEXT: [[VEC_IND:%.*]] = phi <vscale x 2 x i7> [ [[INDUCTION]], %vector.ph ], [ [[VEC_IND_NEXT:%.*]], %vector.body ]
22
- ; CHECK-NEXT: [[TMP10:%.*]] = add i64 [[INDEX]], 0
23
- ; CHECK-NEXT: [[TMP11:%.*]] = add <vscale x 2 x i7> [[VEC_IND]], zeroinitializer
24
- ; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds i64, ptr [[DST:%.*]], i64 [[TMP10]]
25
- ; CHECK-NEXT: [[EXT:%.+]] = zext <vscale x 2 x i7> [[TMP11]] to <vscale x 2 x i64>
26
- ; CHECK-NEXT: [[TMP13:%.*]] = getelementptr inbounds i64, ptr [[TMP12]], i32 0
27
- ; CHECK-NEXT: store <vscale x 2 x i64> [[EXT]], ptr [[TMP13]], align 8
28
- ; CHECK-NEXT: [[TMP15:%.*]] = call i64 @llvm.vscale.i64()
29
- ; CHECK-NEXT: [[TMP16:%.*]] = mul i64 [[TMP15]], 2
30
- ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP16]]
31
- ; CHECK-NEXT: [[VEC_IND_NEXT]] = add <vscale x 2 x i7> [[VEC_IND]],
32
- ;
22
+ ; CHECK-NEXT: [[STEP_ADD:%.*]] = add <vscale x 2 x i7> [[VEC_IND]], [[DOTSPLAT:%.*]]
23
+ ; CHECK-NEXT: [[TMP13:%.*]] = add i64 [[INDEX]], 0
24
+ ; CHECK-NEXT: [[TMP14:%.*]] = call i64 @llvm.vscale.i64()
25
+ ; CHECK-NEXT: [[TMP15:%.*]] = mul i64 [[TMP14]], 2
26
+ ; CHECK-NEXT: [[TMP16:%.*]] = add i64 [[TMP15]], 0
27
+ ; CHECK-NEXT: [[TMP17:%.*]] = mul i64 [[TMP16]], 1
28
+ ; CHECK-NEXT: [[TMP18:%.*]] = add i64 [[INDEX]], [[TMP17]]
29
+ ; CHECK-NEXT: [[TMP19:%.*]] = add <vscale x 2 x i7> [[VEC_IND]], zeroinitializer
30
+ ; CHECK-NEXT: [[TMP20:%.*]] = add <vscale x 2 x i7> [[STEP_ADD]], zeroinitializer
31
+ ; CHECK-NEXT: [[TMP21:%.*]] = getelementptr inbounds i64, ptr [[DST:%.*]], i64 [[TMP13]]
32
+ ; CHECK-NEXT: [[TMP22:%.*]] = getelementptr inbounds i64, ptr [[DST:%.*]], i64 [[TMP18]]
33
+ ; CHECK-NEXT: [[TMP23:%.*]] = zext <vscale x 2 x i7> [[TMP19]] to <vscale x 2 x i64>
34
+ ; CHECK-NEXT: [[TMP24:%.*]] = zext <vscale x 2 x i7> [[TMP20]] to <vscale x 2 x i64>
35
+ ; CHECK-NEXT: [[TMP25:%.*]] = getelementptr inbounds i64, ptr [[TMP21]], i32 0
36
+ ; CHECK-NEXT: store <vscale x 2 x i64> [[TMP23:%.*]], ptr [[TMP25]], align 8
37
+ ; CHECK-NEXT: [[TMP26:%.*]] = call i64 @llvm.vscale.i64()
38
+ ; CHECK-NEXT: [[TMP27:%.*]] = mul i64 [[TMP26]], 2
39
+ ; CHECK-NEXT: [[TMP28:%.*]] = getelementptr inbounds i64, ptr [[TMP21]], i64 [[TMP27]]
40
+ ; CHECK-NEXT: store <vscale x 2 x i64> [[TMP24]], ptr [[TMP28]], align 8
41
+ ; CHECK-NEXT: [[TMP29:%.*]] = call i64 @llvm.vscale.i64()
42
+ ; CHECK-NEXT: [[TMP30:%.*]] = mul i64 [[TMP29]], 4
43
+ ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP30]]
44
+ ; CHECK-NEXT: [[VEC_IND_NEXT]] = add <vscale x 2 x i7> [[STEP_ADD]], [[DOTSPLAT]]
45
+
33
46
entry:
34
47
br label %for.body
35
48
@@ -61,20 +74,30 @@ define void @induction_i3_zext(ptr %dst) #0 {
61
74
; CHECK: [[TMP5:%.*]] = trunc <vscale x 2 x i8> [[TMP4]] to <vscale x 2 x i3>
62
75
; CHECK-NEXT: [[TMP6:%.*]] = add <vscale x 2 x i3> [[TMP5]], zeroinitializer
63
76
; CHECK-NEXT: [[TMP7:%.*]] = mul <vscale x 2 x i3> [[TMP6]], shufflevector (<vscale x 2 x i3> insertelement (<vscale x 2 x i3> poison, i3 1, i64 0), <vscale x 2 x i3> poison, <vscale x 2 x i32> zeroinitializer)
64
- ; CHECK-NEXT: [[INDUCTION:%.*]] = add <vscale x 2 x i3> zeroinitializer, [[TMP7]]
65
77
; CHECK: vector.body:
66
78
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %vector.ph ], [ [[INDEX_NEXT:%.*]], %vector.body ]
67
79
; CHECK-NEXT: [[VEC_IND:%.*]] = phi <vscale x 2 x i3> [ [[INDUCTION]], %vector.ph ], [ [[VEC_IND_NEXT:%.*]], %vector.body ]
68
- ; CHECK-NEXT: [[TMP9:%.*]] = add i64 [[INDEX]], 0
69
- ; CHECK-NEXT: [[TMP10:%.*]] = zext <vscale x 2 x i3> [[VEC_IND]] to <vscale x 2 x i64>
70
- ; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds i64, ptr [[DST:%.*]], i64 [[TMP9]]
71
- ; CHECK-NEXT: [[TMP13:%.*]] = getelementptr inbounds i64, ptr [[TMP12]], i32 0
72
- ; CHECK-NEXT: store <vscale x 2 x i64> [[TMP10]], ptr [[TMP13]], align 8
73
- ; CHECK-NEXT: [[TMP15:%.*]] = call i64 @llvm.vscale.i64()
74
- ; CHECK-NEXT: [[TMP16:%.*]] = mul i64 [[TMP15]], 2
75
- ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP16]]
76
- ; CHECK-NEXT: [[VEC_IND_NEXT]] = add <vscale x 2 x i3> [[VEC_IND]],
77
- ;
80
+ ; CHECK-NEXT: [[STEP_ADD:%.*]] = add <vscale x 2 x i3> [[VEC_IND]], [[DOTSPLAT]]
81
+ ; CHECK-NEXT: [[TMP13:%.*]] = add i64 [[INDEX]], 0
82
+ ; CHECK-NEXT: [[TMP14:%.*]] = call i64 @llvm.vscale.i64()
83
+ ; CHECK-NEXT: [[TMP15:%.*]] = mul i64 [[TMP14]], 2
84
+ ; CHECK-NEXT: [[TMP16:%.*]] = add i64 [[TMP15]], 0
85
+ ; CHECK-NEXT: [[TMP17:%.*]] = mul i64 [[TMP16]], 1
86
+ ; CHECK-NEXT: [[TMP18:%.*]] = add i64 [[INDEX]], [[TMP17]]
87
+ ; CHECK-NEXT: [[TMP19:%.*]] = zext <vscale x 2 x i3> [[VEC_IND]] to <vscale x 2 x i64>
88
+ ; CHECK-NEXT: [[TMP20:%.*]] = zext <vscale x 2 x i3> [[STEP_ADD]] to <vscale x 2 x i64>
89
+ ; CHECK-NEXT: [[TMP21:%.*]] = getelementptr inbounds i64, ptr [[DST]], i64 [[TMP13]]
90
+ ; CHECK-NEXT: [[TMP22:%.*]] = getelementptr inbounds i64, ptr [[DST]], i64 [[TMP18]]
91
+ ; CHECK-NEXT: [[TMP23:%.*]] = getelementptr inbounds i64, ptr [[TMP21]], i32 0
92
+ ; CHECK-NEXT: store <vscale x 2 x i64> [[TMP19]], ptr [[TMP23]], align 8
93
+ ; CHECK-NEXT: [[TMP24:%.*]] = call i64 @llvm.vscale.i64()
94
+ ; CHECK-NEXT: [[TMP25:%.*]] = mul i64 [[TMP24]], 2
95
+ ; CHECK-NEXT: [[TMP26:%.*]] = getelementptr inbounds i64, ptr [[TMP21]], i64 [[TMP25]]
96
+ ; CHECK-NEXT: store <vscale x 2 x i64> [[TMP20]], ptr [[TMP26]], align 8
97
+ ; CHECK-NEXT: [[TMP27:%.*]] = call i64 @llvm.vscale.i64()
98
+ ; CHECK-NEXT: [[TMP28:%.*]] = mul i64 [[TMP27]], 4
99
+ ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP28]]
100
+ ; CHECK-NEXT: [[VEC_IND_NEXT]] = add <vscale x 2 x i3> [[STEP_ADD]], [[DOTSPLAT]]
78
101
entry:
79
102
br label %for.body
80
103
0 commit comments