@@ -134,22 +134,22 @@ long double truncl(long double);
134
134
// RV32-NEXT: [[TMP44:%.*]] = call fp128 @llvm.floor.f128(fp128 [[TMP43]])
135
135
// RV32-NEXT: [[TMP45:%.*]] = load float, ptr [[FARG_ADDR]], align 4
136
136
// RV32-NEXT: [[TMP46:%.*]] = load float, ptr [[FARG_ADDR]], align 4
137
- // RV32-NEXT: [[TMP47:%.*]] = call float @llvm.maxnum.f32(float [[TMP45]], float [[TMP46]])
137
+ // RV32-NEXT: [[TMP47:%.*]] = call nsz float @llvm.maxnum.f32(float [[TMP45]], float [[TMP46]])
138
138
// RV32-NEXT: [[TMP48:%.*]] = load double, ptr [[DARG_ADDR]], align 8
139
139
// RV32-NEXT: [[TMP49:%.*]] = load double, ptr [[DARG_ADDR]], align 8
140
- // RV32-NEXT: [[TMP50:%.*]] = call double @llvm.maxnum.f64(double [[TMP48]], double [[TMP49]])
140
+ // RV32-NEXT: [[TMP50:%.*]] = call nsz double @llvm.maxnum.f64(double [[TMP48]], double [[TMP49]])
141
141
// RV32-NEXT: [[TMP51:%.*]] = load fp128, ptr [[LDARG_ADDR]], align 16
142
142
// RV32-NEXT: [[TMP52:%.*]] = load fp128, ptr [[LDARG_ADDR]], align 16
143
- // RV32-NEXT: [[TMP53:%.*]] = call fp128 @llvm.maxnum.f128(fp128 [[TMP51]], fp128 [[TMP52]])
143
+ // RV32-NEXT: [[TMP53:%.*]] = call nsz fp128 @llvm.maxnum.f128(fp128 [[TMP51]], fp128 [[TMP52]])
144
144
// RV32-NEXT: [[TMP54:%.*]] = load float, ptr [[FARG_ADDR]], align 4
145
145
// RV32-NEXT: [[TMP55:%.*]] = load float, ptr [[FARG_ADDR]], align 4
146
- // RV32-NEXT: [[TMP56:%.*]] = call float @llvm.minnum.f32(float [[TMP54]], float [[TMP55]])
146
+ // RV32-NEXT: [[TMP56:%.*]] = call nsz float @llvm.minnum.f32(float [[TMP54]], float [[TMP55]])
147
147
// RV32-NEXT: [[TMP57:%.*]] = load double, ptr [[DARG_ADDR]], align 8
148
148
// RV32-NEXT: [[TMP58:%.*]] = load double, ptr [[DARG_ADDR]], align 8
149
- // RV32-NEXT: [[TMP59:%.*]] = call double @llvm.minnum.f64(double [[TMP57]], double [[TMP58]])
149
+ // RV32-NEXT: [[TMP59:%.*]] = call nsz double @llvm.minnum.f64(double [[TMP57]], double [[TMP58]])
150
150
// RV32-NEXT: [[TMP60:%.*]] = load fp128, ptr [[LDARG_ADDR]], align 16
151
151
// RV32-NEXT: [[TMP61:%.*]] = load fp128, ptr [[LDARG_ADDR]], align 16
152
- // RV32-NEXT: [[TMP62:%.*]] = call fp128 @llvm.minnum.f128(fp128 [[TMP60]], fp128 [[TMP61]])
152
+ // RV32-NEXT: [[TMP62:%.*]] = call nsz fp128 @llvm.minnum.f128(fp128 [[TMP60]], fp128 [[TMP61]])
153
153
// RV32-NEXT: [[TMP63:%.*]] = load float, ptr [[FARG_ADDR]], align 4
154
154
// RV32-NEXT: [[TMP64:%.*]] = load float, ptr [[FARG_ADDR]], align 4
155
155
// RV32-NEXT: [[FMOD:%.*]] = frem float [[TMP63]], [[TMP64]]
@@ -310,22 +310,22 @@ long double truncl(long double);
310
310
// RV64-NEXT: [[TMP44:%.*]] = call fp128 @llvm.floor.f128(fp128 [[TMP43]])
311
311
// RV64-NEXT: [[TMP45:%.*]] = load float, ptr [[FARG_ADDR]], align 4
312
312
// RV64-NEXT: [[TMP46:%.*]] = load float, ptr [[FARG_ADDR]], align 4
313
- // RV64-NEXT: [[TMP47:%.*]] = call float @llvm.maxnum.f32(float [[TMP45]], float [[TMP46]])
313
+ // RV64-NEXT: [[TMP47:%.*]] = call nsz float @llvm.maxnum.f32(float [[TMP45]], float [[TMP46]])
314
314
// RV64-NEXT: [[TMP48:%.*]] = load double, ptr [[DARG_ADDR]], align 8
315
315
// RV64-NEXT: [[TMP49:%.*]] = load double, ptr [[DARG_ADDR]], align 8
316
- // RV64-NEXT: [[TMP50:%.*]] = call double @llvm.maxnum.f64(double [[TMP48]], double [[TMP49]])
316
+ // RV64-NEXT: [[TMP50:%.*]] = call nsz double @llvm.maxnum.f64(double [[TMP48]], double [[TMP49]])
317
317
// RV64-NEXT: [[TMP51:%.*]] = load fp128, ptr [[LDARG_ADDR]], align 16
318
318
// RV64-NEXT: [[TMP52:%.*]] = load fp128, ptr [[LDARG_ADDR]], align 16
319
- // RV64-NEXT: [[TMP53:%.*]] = call fp128 @llvm.maxnum.f128(fp128 [[TMP51]], fp128 [[TMP52]])
319
+ // RV64-NEXT: [[TMP53:%.*]] = call nsz fp128 @llvm.maxnum.f128(fp128 [[TMP51]], fp128 [[TMP52]])
320
320
// RV64-NEXT: [[TMP54:%.*]] = load float, ptr [[FARG_ADDR]], align 4
321
321
// RV64-NEXT: [[TMP55:%.*]] = load float, ptr [[FARG_ADDR]], align 4
322
- // RV64-NEXT: [[TMP56:%.*]] = call float @llvm.minnum.f32(float [[TMP54]], float [[TMP55]])
322
+ // RV64-NEXT: [[TMP56:%.*]] = call nsz float @llvm.minnum.f32(float [[TMP54]], float [[TMP55]])
323
323
// RV64-NEXT: [[TMP57:%.*]] = load double, ptr [[DARG_ADDR]], align 8
324
324
// RV64-NEXT: [[TMP58:%.*]] = load double, ptr [[DARG_ADDR]], align 8
325
- // RV64-NEXT: [[TMP59:%.*]] = call double @llvm.minnum.f64(double [[TMP57]], double [[TMP58]])
325
+ // RV64-NEXT: [[TMP59:%.*]] = call nsz double @llvm.minnum.f64(double [[TMP57]], double [[TMP58]])
326
326
// RV64-NEXT: [[TMP60:%.*]] = load fp128, ptr [[LDARG_ADDR]], align 16
327
327
// RV64-NEXT: [[TMP61:%.*]] = load fp128, ptr [[LDARG_ADDR]], align 16
328
- // RV64-NEXT: [[TMP62:%.*]] = call fp128 @llvm.minnum.f128(fp128 [[TMP60]], fp128 [[TMP61]])
328
+ // RV64-NEXT: [[TMP62:%.*]] = call nsz fp128 @llvm.minnum.f128(fp128 [[TMP60]], fp128 [[TMP61]])
329
329
// RV64-NEXT: [[TMP63:%.*]] = load float, ptr [[FARG_ADDR]], align 4
330
330
// RV64-NEXT: [[TMP64:%.*]] = load float, ptr [[FARG_ADDR]], align 4
331
331
// RV64-NEXT: [[FMOD:%.*]] = frem float [[TMP63]], [[TMP64]]
0 commit comments