diff --git a/flang/lib/Optimizer/CodeGen/CodeGen.cpp b/flang/lib/Optimizer/CodeGen/CodeGen.cpp index 6a6a10f632648..9d92086f8d709 100644 --- a/flang/lib/Optimizer/CodeGen/CodeGen.cpp +++ b/flang/lib/Optimizer/CodeGen/CodeGen.cpp @@ -1150,7 +1150,7 @@ computeElementDistance(mlir::Location loc, mlir::Type ptrTy, mlir::Type idxTy, // *)0 + 1)' trick for all types. The generated instructions are optimized // into constant by the first pass of InstCombine, so it should not be a // performance issue. - auto nullPtr = rewriter.create(loc, ptrTy); + auto nullPtr = rewriter.create(loc, ptrTy); auto gep = rewriter.create( loc, ptrTy, nullPtr, llvm::ArrayRef{1}); return rewriter.create(loc, idxTy, gep); @@ -1431,7 +1431,7 @@ struct EmboxCommonConversion : public FIROpConversion { name, Fortran::semantics::typeInfoBuiltinModule)) fir::emitFatalError( loc, "runtime derived type info descriptor was not generated"); - return rewriter.create( + return rewriter.create( loc, ::getVoidPtrType(mod.getContext())); } @@ -1474,7 +1474,7 @@ struct EmboxCommonConversion : public FIROpConversion { } else { // Unlimited polymorphic type descriptor with no record type. Set // type descriptor address to a clean state. - typeDesc = rewriter.create( + typeDesc = rewriter.create( loc, ::getVoidPtrType(mod.getContext())); } } else { @@ -3407,7 +3407,7 @@ struct ZeroOpConversion : public FIROpConversion { mlir::ConversionPatternRewriter &rewriter) const override { mlir::Type ty = convertType(zero.getType()); if (ty.isa()) { - rewriter.replaceOpWithNewOp(zero, ty); + rewriter.replaceOpWithNewOp(zero, ty); } else if (ty.isa()) { rewriter.replaceOpWithNewOp( zero, ty, mlir::IntegerAttr::get(ty, 0)); @@ -3470,7 +3470,7 @@ struct IsPresentOpConversion : public FIROpConversion { }; /// Create value signaling an absent optional argument in a call, e.g. -/// `fir.absent !fir.ref` --> `llvm.mlir.null : !llvm.ptr` +/// `fir.absent !fir.ref` --> `llvm.mlir.zero : !llvm.ptr` struct AbsentOpConversion : public FIROpConversion { using FIROpConversion::FIROpConversion; @@ -3485,11 +3485,11 @@ struct AbsentOpConversion : public FIROpConversion { assert(!structTy.isOpaque() && !structTy.getBody().empty()); auto undefStruct = rewriter.create(loc, ty); auto nullField = - rewriter.create(loc, structTy.getBody()[0]); + rewriter.create(loc, structTy.getBody()[0]); rewriter.replaceOpWithNewOp( absent, undefStruct, nullField, 0); } else { - rewriter.replaceOpWithNewOp(absent, ty); + rewriter.replaceOpWithNewOp(absent, ty); } return mlir::success(); } diff --git a/flang/test/Fir/convert-to-llvm.fir b/flang/test/Fir/convert-to-llvm.fir index 52716afe3198d..e39f13ac98268 100644 --- a/flang/test/Fir/convert-to-llvm.fir +++ b/flang/test/Fir/convert-to-llvm.fir @@ -145,7 +145,7 @@ func.func @zero_test_ptr() { return } -// CHECK: %{{.*}} = llvm.mlir.null : !llvm.ptr +// CHECK: %{{.*}} = llvm.mlir.zero : !llvm.ptr // CHECK-NOT: fir.zero_bits // ----- @@ -201,7 +201,7 @@ func.func @test_alloc_and_freemem_one() { } // CHECK-LABEL: llvm.func @test_alloc_and_freemem_one() { -// CHECK-NEXT: %[[NULL:.*]] = llvm.mlir.null : !llvm.ptr +// CHECK-NEXT: %[[NULL:.*]] = llvm.mlir.zero : !llvm.ptr // CHECK-NEXT: %[[GEP:.*]] = llvm.getelementptr %[[NULL]][1] // CHECK-NEXT: %[[N:.*]] = llvm.ptrtoint %[[GEP]] : !llvm.ptr to i64 // CHECK-NEXT: llvm.call @malloc(%[[N]]) @@ -220,7 +220,7 @@ func.func @test_alloc_and_freemem_several() { } // CHECK-LABEL: llvm.func @test_alloc_and_freemem_several() { -// CHECK: [[NULL:%.*]] = llvm.mlir.null : !llvm.ptr> +// CHECK: [[NULL:%.*]] = llvm.mlir.zero : !llvm.ptr> // CHECK: [[PTR:%.*]] = llvm.getelementptr [[NULL]][{{.*}}] : (!llvm.ptr>) -> !llvm.ptr> // CHECK: [[N:%.*]] = llvm.ptrtoint [[PTR]] : !llvm.ptr> to i64 // CHECK: [[MALLOC:%.*]] = llvm.call @malloc([[N]]) @@ -238,7 +238,7 @@ func.func @test_with_shape(%ncols: index, %nrows: index) { // CHECK-LABEL: llvm.func @test_with_shape // CHECK-SAME: %[[NCOLS:.*]]: i64, %[[NROWS:.*]]: i64 -// CHECK: %[[NULL:.*]] = llvm.mlir.null : !llvm.ptr +// CHECK: %[[NULL:.*]] = llvm.mlir.zero : !llvm.ptr // CHECK: %[[GEP:.*]] = llvm.getelementptr %[[NULL]][1] // CHECK: %[[FOUR:.*]] = llvm.ptrtoint %[[GEP]] : !llvm.ptr to i64 // CHECK: %[[DIM1_SIZE:.*]] = llvm.mul %[[FOUR]], %[[NCOLS]] : i64 @@ -258,7 +258,7 @@ func.func @test_string_with_shape(%len: index, %nelems: index) { // CHECK-LABEL: llvm.func @test_string_with_shape // CHECK-SAME: %[[LEN:.*]]: i64, %[[NELEMS:.*]]: i64) -// CHECK: %[[NULL:.*]] = llvm.mlir.null : !llvm.ptr +// CHECK: %[[NULL:.*]] = llvm.mlir.zero : !llvm.ptr // CHECK: %[[GEP:.*]] = llvm.getelementptr %[[NULL]][1] // CHECK: %[[ONE:.*]] = llvm.ptrtoint %[[GEP]] : !llvm.ptr to i64 // CHECK: %[[LEN_SIZE:.*]] = llvm.mul %[[ONE]], %[[LEN]] : i64 @@ -750,7 +750,7 @@ func.func @convert_from_i1(%arg0 : i1) { // CHECK-LABEL: convert_from_i1( // CHECK-SAME: %[[ARG0:.*]]: i1 -// CHECK: %{{.*}} = llvm.zext %[[ARG0]] : i1 to i32 +// CHECK: %{{.*}} = llvm.zext %[[ARG0]] : i1 to i32 // ----- @@ -1403,7 +1403,7 @@ func.func @test_absent_i64() -> () { } // CHECK-LABEL: @test_absent_i64 -// CHECK-NEXT: %{{.*}} = llvm.mlir.null : !llvm.ptr +// CHECK-NEXT: %{{.*}} = llvm.mlir.zero : !llvm.ptr // CHECK-NEXT: llvm.return // CHECK-NEXT: } @@ -1412,7 +1412,7 @@ func.func @test_absent_box() -> () { return } // CHECK-LABEL: @test_absent_box -// CHECK-NEXT: %{{.*}} = llvm.mlir.null : !llvm.ptr, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>)>> +// CHECK-NEXT: %{{.*}} = llvm.mlir.zero : !llvm.ptr, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>)>> // CHECK-NEXT: llvm.return // CHECK-NEXT: } @@ -1442,7 +1442,7 @@ func.func @absent() -> i1 { // CHECK-LABEL: @absent // CHECK-SAME: () -> i1 -// CHECK-NEXT: %[[ptr:.*]] = llvm.mlir.null : !llvm.ptr +// CHECK-NEXT: %[[ptr:.*]] = llvm.mlir.zero : !llvm.ptr // CHECK-NEXT: %[[ret_val:.*]] = llvm.call @is_present(%[[ptr]]) : (!llvm.ptr) -> i1 // CHECK-NEXT: llvm.return %[[ret_val]] : i1 @@ -1525,7 +1525,7 @@ func.func @box_tdesc(%arg0: !fir.box>) { // CHECK-LABEL: llvm.func @box_tdesc( // CHECK-SAME: %[[ARG0:.*]]: !llvm.ptr>, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, ptr, array<1 x i{{.*}}>)>>) { -// CHECK: %[[GEP:.*]] = llvm.getelementptr %[[ARG0]][0, 7] : (!llvm.ptr>, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, ptr, array<1 x i{{.*}}>)>>) -> !llvm.ptr> +// CHECK: %[[GEP:.*]] = llvm.getelementptr %[[ARG0]][0, 7] : (!llvm.ptr>, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, ptr, array<1 x i{{.*}}>)>>) -> !llvm.ptr> // CHECK: %[[LOAD:.*]] = llvm.load %[[GEP]] : !llvm.ptr> // ----- @@ -1547,7 +1547,7 @@ func.func @embox0(%arg0: !fir.ref>) { // CHECK: %[[C1:.*]] = llvm.mlir.constant(1 : i32) : i32 // CHECK: %[[ALLOCA:.*]] = llvm.alloca %[[C1]] x !llvm.struct<(ptr>, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}})> {alignment = 8 : i64} : (i32) -> !llvm.ptr>, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}})>> // CHECK: %[[TYPE_CODE:.*]] = llvm.mlir.constant(9 : i32) : i32 -// CHECK: %[[NULL:.*]] = llvm.mlir.null : !llvm.ptr +// CHECK: %[[NULL:.*]] = llvm.mlir.zero : !llvm.ptr // CHECK: %[[GEP:.*]] = llvm.getelementptr %[[NULL]][1] // CHECK: %[[I64_ELEM_SIZE:.*]] = llvm.ptrtoint %[[GEP]] : !llvm.ptr to i64 // CHECK: %[[DESC:.*]] = llvm.mlir.undef : !llvm.struct<(ptr>, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}})> @@ -1771,7 +1771,7 @@ func.func @xembox0(%arg0: !fir.ref>) { // CHECK: %[[ALLOCA:.*]] = llvm.alloca %[[ALLOCA_SIZE]] x !llvm.struct<(ptr, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, array<1 x array<3 x i64>>)> {alignment = 8 : i64} : (i32) -> !llvm.ptr, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, array<1 x array<3 x i64>>)>> // CHECK: %[[C0:.*]] = llvm.mlir.constant(0 : i64) : i64 // CHECK: %[[TYPE:.*]] = llvm.mlir.constant(9 : i32) : i32 -// CHECK: %[[NULL:.*]] = llvm.mlir.null : !llvm.ptr +// CHECK: %[[NULL:.*]] = llvm.mlir.zero : !llvm.ptr // CHECK: %[[GEP:.*]] = llvm.getelementptr %[[NULL]][1] // CHECK: %[[ELEM_LEN_I64:.*]] = llvm.ptrtoint %[[GEP]] : !llvm.ptr to i64 // CHECK: %[[BOX0:.*]] = llvm.mlir.undef : !llvm.struct<(ptr, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, array<1 x array<3 x i64>>)> @@ -1820,7 +1820,7 @@ func.func @xembox1(%arg0: !fir.ref>>) { // CHECK-LABEL: llvm.func @xembox1(%{{.*}}: !llvm.ptr>) { // CHECK: %[[C0:.*]] = llvm.mlir.constant(0 : i64) : i64 -// CHECK: %[[NULL:.*]] = llvm.mlir.null : !llvm.ptr> +// CHECK: %[[NULL:.*]] = llvm.mlir.zero : !llvm.ptr> // CHECK: %[[GEP:.*]] = llvm.getelementptr %[[NULL]][1] // CHECK: %[[ELEM_LEN_I64:.*]] = llvm.ptrtoint %[[GEP]] : !llvm.ptr> to i64 // CHECK: %{{.*}} = llvm.insertvalue %[[ELEM_LEN_I64]], %{{.*}}[1] : !llvm.struct<(ptr>, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, array<1 x array<3 x i64>>)> @@ -1870,7 +1870,7 @@ func.func private @_QPxb(!fir.box>) // CHECK: %[[ARR_SIZE:.*]] = llvm.mul %[[ARR_SIZE_TMP1]], %[[N2]] : i64 // CHECK: %[[ARR:.*]] = llvm.alloca %[[ARR_SIZE]] x f64 {bindc_name = "arr", in_type = !fir.array, operandSegmentSizes = array, uniq_name = "_QFsbEarr"} : (i64) -> !llvm.ptr // CHECK: %[[TYPE_CODE:.*]] = llvm.mlir.constant(28 : i32) : i32 -// CHECK: %[[NULL:.*]] = llvm.mlir.null : !llvm.ptr +// CHECK: %[[NULL:.*]] = llvm.mlir.zero : !llvm.ptr // CHECK: %[[GEP:.*]] = llvm.getelementptr %[[NULL]][1] // CHECK: %[[ELEM_LEN_I64:.*]] = llvm.ptrtoint %[[GEP]] : !llvm.ptr to i64 // CHECK: %[[BOX0:.*]] = llvm.mlir.undef : !llvm.struct<(ptr, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, array<2 x array<3 x i64>>)> @@ -1949,7 +1949,7 @@ func.func private @_QPtest_dt_callee(%arg0: !fir.box>) // CHECK: %[[ALLOCA_SIZE_X:.*]] = llvm.mlir.constant(1 : i64) : i64 // CHECK: %[[X:.*]] = llvm.alloca %[[ALLOCA_SIZE_X]] x !llvm.array<20 x struct<"_QFtest_dt_sliceTt", (i32, i32)>> {bindc_name = "x", in_type = !fir.array<20x!fir.type<_QFtest_dt_sliceTt{i:i32,j:i32}>>, operandSegmentSizes = array, uniq_name = "_QFtest_dt_sliceEx"} : (i64) -> !llvm.ptr>> // CHECK: %[[TYPE_CODE:.*]] = llvm.mlir.constant(9 : i32) : i32 -// CHECK: %[[NULL:.*]] = llvm.mlir.null : !llvm.ptr +// CHECK: %[[NULL:.*]] = llvm.mlir.zero : !llvm.ptr // CHECK: %[[GEP:.*]] = llvm.getelementptr %[[NULL]][1] // CHECK: %[[ELEM_LEN_I64:.*]] = llvm.ptrtoint %[[GEP]] : !llvm.ptr to i64 // CHECK: %[[BOX0:.*]] = llvm.mlir.undef : !llvm.struct<(ptr, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, array<1 x array<3 x i64>>)> @@ -1969,7 +1969,7 @@ func.func private @_QPtest_dt_callee(%arg0: !fir.box>) // CHECK: %[[BOX6:.*]] = llvm.insertvalue %[[F18ADDENDUM_I8]], %[[BOX5]][6] : !llvm.struct<(ptr, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, array<1 x array<3 x i64>>)> // CHECK: %[[ZERO:.*]] = llvm.mlir.constant(0 : i64) : i64 // CHECK: %[[ONE:.*]] = llvm.mlir.constant(1 : i64) : i64 -// CHECK: %[[ELE_TYPE:.*]] = llvm.mlir.null : !llvm.ptr> +// CHECK: %[[ELE_TYPE:.*]] = llvm.mlir.zero : !llvm.ptr> // CHECK: %[[GEP_DTYPE_SIZE:.*]] = llvm.getelementptr %[[ELE_TYPE]][1] : (!llvm.ptr>) -> !llvm.ptr> // CHECK: %[[PTRTOINT_DTYPE_SIZE:.*]] = llvm.ptrtoint %[[GEP_DTYPE_SIZE]] : !llvm.ptr> to i64 // CHECK: %[[ADJUSTED_OFFSET:.*]] = llvm.sub %[[C1]], %[[ONE]] : i64 @@ -2261,7 +2261,7 @@ func.func @test_rebox_1(%arg0: !fir.box>) { //CHECK: %[[SIX:.*]] = llvm.mlir.constant(6 : index) : i64 //CHECK: %[[EIGHTY:.*]] = llvm.mlir.constant(80 : index) : i64 //CHECK: %[[FLOAT_TYPE:.*]] = llvm.mlir.constant(27 : i32) : i32 -//CHECK: %[[NULL:.*]] = llvm.mlir.null : !llvm.ptr +//CHECK: %[[NULL:.*]] = llvm.mlir.zero : !llvm.ptr //CHECK: %[[GEP:.*]] = llvm.getelementptr %[[NULL]][1] //CHECK: %[[ELEM_SIZE_I64:.*]] = llvm.ptrtoint %[[GEP]] : !llvm.ptr to i64 //CHECK: %[[RBOX:.*]] = llvm.mlir.undef : !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>)> @@ -2334,7 +2334,7 @@ func.func @foo(%arg0: !fir.box} //CHECK: %[[COMPONENT_OFFSET_1:.*]] = llvm.mlir.constant(1 : i64) : i64 //CHECK: %[[ELEM_COUNT:.*]] = llvm.mlir.constant(7 : i64) : i64 //CHECK: %[[TYPE_CHAR:.*]] = llvm.mlir.constant(40 : i32) : i32 -//CHECK: %[[NULL:.*]] = llvm.mlir.null : !llvm.ptr +//CHECK: %[[NULL:.*]] = llvm.mlir.zero : !llvm.ptr //CHECK: %[[GEP:.*]] = llvm.getelementptr %[[NULL]][1] //CHECK: %[[CHAR_SIZE:.*]] = llvm.ptrtoint %[[GEP]] : !llvm.ptr to i64 //CHECK: %[[ELEM_SIZE:.*]] = llvm.mul %[[CHAR_SIZE]], %[[ELEM_COUNT]] diff --git a/flang/test/Fir/embox-char.fir b/flang/test/Fir/embox-char.fir index 8a7ef396abab0..2e8a826e0b8d5 100644 --- a/flang/test/Fir/embox-char.fir +++ b/flang/test/Fir/embox-char.fir @@ -40,7 +40,7 @@ // CHECK: %[[VAL_30_ST0:.*]] = llvm.load %[[VAL_29]] : !llvm.ptr // CHECK: %[[VAL_31_LEN:.*]] = llvm.sdiv %[[VAL_16_BYTESIZE]], %[[VAL_13_WIDTH]] : i64 // CHECK: %[[VAL_32:.*]] = llvm.mlir.constant(44 : i32) : i32 -// CHECK: %[[VAL_33:.*]] = llvm.mlir.null : !llvm.ptr +// CHECK: %[[VAL_33:.*]] = llvm.mlir.zero : !llvm.ptr // CHECK: %[[VAL_34:.*]] = llvm.getelementptr %[[VAL_33]][1] : (!llvm.ptr) -> !llvm.ptr // CHECK: %[[VAL_35:.*]] = llvm.ptrtoint %[[VAL_34]] : !llvm.ptr to i64 // CHECK: %[[VAL_36_BYTESIZE:.*]] = llvm.mul %[[VAL_35]], %[[VAL_31_LEN]] : i64 @@ -137,7 +137,7 @@ func.func @test_char4(%arg0: !fir.ref, i64, i32, i8, i8, i8, i8, array<2 x array<3 x i64>>)>>, i64) -> !llvm.ptr // CHECK: %[[VAL_30_ST0:.*]] = llvm.load %[[VAL_29]] : !llvm.ptr // CHECK: %[[VAL_32:.*]] = llvm.mlir.constant(40 : i32) : i32 -// CHECK: %[[VAL_33:.*]] = llvm.mlir.null : !llvm.ptr +// CHECK: %[[VAL_33:.*]] = llvm.mlir.zero : !llvm.ptr // CHECK: %[[VAL_34:.*]] = llvm.getelementptr %[[VAL_33]][1] : (!llvm.ptr) -> !llvm.ptr // CHECK: %[[VAL_35:.*]] = llvm.ptrtoint %[[VAL_34]] : !llvm.ptr to i64 // CHECK: %[[VAL_36_BYTESIZE:.*]] = llvm.mul %[[VAL_35]], %[[VAL_16_BYTESIZE]] : i64 diff --git a/flang/test/Fir/embox-substring.fir b/flang/test/Fir/embox-substring.fir index fd0b5923df0ed..4e2395bc7f345 100644 --- a/flang/test/Fir/embox-substring.fir +++ b/flang/test/Fir/embox-substring.fir @@ -30,7 +30,7 @@ func.func private @dump(!fir.box>>) // CHECK-SAME: %[[VAL_1:.*]]: i64) { // CHECK: %[[VAL_5:.*]] = llvm.mlir.constant(1 : index) : i64 // CHECK: llvm.getelementptr -// CHECK: %[[VAL_28:.*]] = llvm.mlir.null : !llvm.ptr +// CHECK: %[[VAL_28:.*]] = llvm.mlir.zero : !llvm.ptr // CHECK: %[[VAL_29:.*]] = llvm.getelementptr %[[VAL_28]][1] : (!llvm.ptr) -> !llvm.ptr // CHECK: %[[VAL_30:.*]] = llvm.ptrtoint %[[VAL_29]] : !llvm.ptr to i64 // CHECK: %[[VAL_31:.*]] = llvm.mul %[[VAL_30]], %[[VAL_1]] : i64 diff --git a/flang/test/Fir/tbaa.fir b/flang/test/Fir/tbaa.fir index 66bd41bad18e7..f8a52b2e98db0 100644 --- a/flang/test/Fir/tbaa.fir +++ b/flang/test/Fir/tbaa.fir @@ -205,7 +205,7 @@ module { // CHECK-LABEL: llvm.mlir.global internal @_QFEx() {addr_space = 0 : i32} : !llvm.struct<(ptr>, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>, ptr, array<1 x i64>)> { // CHECK: %[[VAL_0:.*]] = llvm.mlir.constant(0 : index) : i64 -// CHECK: %[[VAL_1:.*]] = llvm.mlir.null : !llvm.ptr> +// CHECK: %[[VAL_1:.*]] = llvm.mlir.zero : !llvm.ptr> // CHECK: %[[VAL_2:.*]] = llvm.mlir.constant(0 : i64) : i64 // CHECK: %[[VAL_3:.*]] = llvm.mlir.constant(-1 : i32) : i32 // CHECK: %[[VAL_4:.*]] = llvm.mlir.undef : !llvm.struct<(ptr>, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>, ptr, array<1 x i64>)> @@ -223,7 +223,7 @@ module { // CHECK: %[[VAL_16:.*]] = llvm.mlir.constant(1 : i32) : i32 // CHECK: %[[VAL_17:.*]] = llvm.trunc %[[VAL_16]] : i32 to i8 // CHECK: %[[VAL_18:.*]] = llvm.insertvalue %[[VAL_17]], %[[VAL_15]][6] : !llvm.struct<(ptr>, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>, ptr, array<1 x i64>)> -// CHECK: %[[VAL_19:.*]] = llvm.mlir.null : !llvm.ptr +// CHECK: %[[VAL_19:.*]] = llvm.mlir.zero : !llvm.ptr // CHECK: %[[VAL_20:.*]] = llvm.bitcast %[[VAL_19]] : !llvm.ptr to !llvm.ptr // CHECK: %[[VAL_21:.*]] = llvm.insertvalue %[[VAL_20]], %[[VAL_18]][8] : !llvm.struct<(ptr>, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>, ptr, array<1 x i64>)> // CHECK: %[[VAL_22:.*]] = llvm.mlir.constant(0 : i64) : i64 diff --git a/mlir/docs/Dialects/LLVM.md b/mlir/docs/Dialects/LLVM.md index fa5ce630ad43b..796c3a7b76844 100644 --- a/mlir/docs/Dialects/LLVM.md +++ b/mlir/docs/Dialects/LLVM.md @@ -103,10 +103,10 @@ Some value kinds in LLVM IR, such as constants and undefs, are uniqued in context and used directly in relevant operations. MLIR does not support such values for thread-safety and concept parsimony reasons. Instead, regular values are produced by dedicated operations that have the corresponding semantics: -[`llvm.mlir.constant`](#llvmmlirconstant-mlirllvmconstantop), -[`llvm.mlir.undef`](#llvmmlirundef-mlirllvmundefop), -[`llvm.mlir.poison`](#llvmmlirpoison-mlirllvmpoisonop), -[`llvm.mlir.null`](#llvmmlirnull-mlirllvmnullop). Note how these operations are +[`llvm.mlir.constant`](#llvmmlirconstant-llvmconstantop), +[`llvm.mlir.undef`](#llvmmlirundef-llvmundefop), +[`llvm.mlir.poison`](#llvmmlirpoison-llvmpoisonop), +[`llvm.mlir.zero`](#llvmmlirzero-llvmzeroop). Note how these operations are prefixed with `mlir.` to indicate that they don't belong to LLVM IR but are only necessary to model it in MLIR. The values produced by these operations are usable just like any other value. @@ -118,11 +118,12 @@ Examples: // by a float. %0 = llvm.mlir.undef : !llvm.struct<(i32, f32)> -// Null pointer to i8. -%1 = llvm.mlir.null : !llvm.ptr +// Null pointer. +%1 = llvm.mlir.zero : !llvm.ptr -// Null pointer to a function with signature void(). -%2 = llvm.mlir.null : !llvm.ptr> +// Create an zero initialized value of structure type with a 32-bit integer +// followed by a float. +%2 = llvm.mlir.zero : !llvm.struct<(i32, f32)> // Constant 42 as i32. %3 = llvm.mlir.constant(42 : i32) : i32 diff --git a/mlir/include/mlir/Conversion/LLVMCommon/Pattern.h b/mlir/include/mlir/Conversion/LLVMCommon/Pattern.h index 92f4025ffffff..aea6c38a441d3 100644 --- a/mlir/include/mlir/Conversion/LLVMCommon/Pattern.h +++ b/mlir/include/mlir/Conversion/LLVMCommon/Pattern.h @@ -89,7 +89,7 @@ class ConvertToLLVMPattern : public ConversionPattern { /// `strides[1]` = llvm.mlir.constant(1 : index) : i64 /// `strides[0]` = `sizes[0]` /// %size = llvm.mul `sizes[0]`, `sizes[1]` : i64 - /// %nullptr = llvm.mlir.null : !llvm.ptr + /// %nullptr = llvm.mlir.zero : !llvm.ptr /// %gep = llvm.getelementptr %nullptr[%size] /// : (!llvm.ptr, i64) -> !llvm.ptr /// `sizeBytes` = llvm.ptrtoint %gep : !llvm.ptr to i64 diff --git a/mlir/include/mlir/Dialect/LLVMIR/LLVMOps.td b/mlir/include/mlir/Dialect/LLVMIR/LLVMOps.td index c0216d1971e58..e4486eb36e51a 100644 --- a/mlir/include/mlir/Dialect/LLVMIR/LLVMOps.td +++ b/mlir/include/mlir/Dialect/LLVMIR/LLVMOps.td @@ -1438,33 +1438,6 @@ def LLVM_LLVMFuncOp : LLVM_Op<"func", [ let hasRegionVerifier = 1; } -def LLVM_NullOp - : LLVM_Op<"mlir.null", [Pure]>, - LLVM_Builder<"$res = llvm::ConstantPointerNull::get(" - " cast($_resultType));"> { - let summary = "Defines a value containing a null pointer to LLVM type."; - let description = [{ - Unlike LLVM IR, MLIR does not have first-class null pointers. They must be - explicitly created as SSA values using `llvm.mlir.null`. This operation has - no operands or attributes, and returns a null value of a wrapped LLVM IR - pointer type. - - Examples: - - ```mlir - // Null pointer to i8. - %0 = llvm.mlir.null : !llvm.ptr - - // Null pointer to a function with signature void(). - %1 = llvm.mlir.null : !llvm.ptr> - ``` - }]; - - let results = (outs LLVM_AnyPointer:$res); - let builders = [LLVM_OneResultOpBuilder]; - let assemblyFormat = "attr-dict `:` qualified(type($res))"; -} - def LLVM_NoneTokenOp : LLVM_Op<"mlir.none", [Pure]> { let summary = "Defines a value containing an empty token to LLVM type."; diff --git a/mlir/lib/Conversion/AsyncToLLVM/AsyncToLLVM.cpp b/mlir/lib/Conversion/AsyncToLLVM/AsyncToLLVM.cpp index 1c3fbe8057c5b..d9ea60a6749d9 100644 --- a/mlir/lib/Conversion/AsyncToLLVM/AsyncToLLVM.cpp +++ b/mlir/lib/Conversion/AsyncToLLVM/AsyncToLLVM.cpp @@ -421,7 +421,7 @@ class CoroIdOpConversion : public AsyncOpConversionPattern { // Constants for initializing coroutine frame. auto constZero = rewriter.create(loc, rewriter.getI32Type(), 0); - auto nullPtr = rewriter.create(loc, ptrType); + auto nullPtr = rewriter.create(loc, ptrType); // Get coroutine id: @llvm.coro.id. rewriter.replaceOpWithNewOp( @@ -677,7 +677,7 @@ class RuntimeCreateOpLowering : public ConvertOpToLLVMPattern { // %Size = getelementptr %T* null, int 1 // %SizeI = ptrtoint %T* %Size to i64 - auto nullPtr = rewriter.create(loc, storagePtrType); + auto nullPtr = rewriter.create(loc, storagePtrType); auto gep = rewriter.create(loc, storagePtrType, storedType, nullPtr, ArrayRef{1}); diff --git a/mlir/lib/Conversion/GPUCommon/GPUToLLVMConversion.cpp b/mlir/lib/Conversion/GPUCommon/GPUToLLVMConversion.cpp index eddf3e9a47d0b..b6d9ec6e013b4 100644 --- a/mlir/lib/Conversion/GPUCommon/GPUToLLVMConversion.cpp +++ b/mlir/lib/Conversion/GPUCommon/GPUToLLVMConversion.cpp @@ -1149,7 +1149,7 @@ LogicalResult ConvertLaunchFuncOpToGpuRuntimeCallPattern::matchAndRewrite( : adaptor.getAsyncDependencies().front(); // Create array of pointers to kernel arguments. auto kernelParams = generateParamsArray(launchOp, adaptor, rewriter); - auto nullpointer = rewriter.create(loc, llvmPointerPointerType); + auto nullpointer = rewriter.create(loc, llvmPointerPointerType); Value dynamicSharedMemorySize = launchOp.getDynamicSharedMemorySize() ? launchOp.getDynamicSharedMemorySize() : zero; @@ -1211,7 +1211,7 @@ LogicalResult ConvertMemcpyOpToGpuRuntimeCallPattern::matchAndRewrite( Value numElements = getNumElements(rewriter, loc, memRefType, srcDesc); Type elementPtrType = getElementPtrType(memRefType); - Value nullPtr = rewriter.create(loc, elementPtrType); + Value nullPtr = rewriter.create(loc, elementPtrType); Value gepPtr = rewriter.create( loc, elementPtrType, typeConverter->convertType(memRefType.getElementType()), nullPtr, diff --git a/mlir/lib/Conversion/LLVMCommon/Pattern.cpp b/mlir/lib/Conversion/LLVMCommon/Pattern.cpp index e5519df9b0185..40d4c97975a6a 100644 --- a/mlir/lib/Conversion/LLVMCommon/Pattern.cpp +++ b/mlir/lib/Conversion/LLVMCommon/Pattern.cpp @@ -162,7 +162,7 @@ void ConvertToLLVMPattern::getMemRefDescriptorSizes( // Buffer size in bytes. Type elementType = typeConverter->convertType(memRefType.getElementType()); Type elementPtrType = getTypeConverter()->getPointerType(elementType); - Value nullPtr = rewriter.create(loc, elementPtrType); + Value nullPtr = rewriter.create(loc, elementPtrType); Value gepPtr = rewriter.create( loc, elementPtrType, elementType, nullPtr, runningStride); size = rewriter.create(loc, getIndexType(), gepPtr); @@ -180,7 +180,7 @@ Value ConvertToLLVMPattern::getSizeInBytes( // which is a common pattern of getting the size of a type in bytes. Type llvmType = typeConverter->convertType(type); auto convertedPtrType = getTypeConverter()->getPointerType(llvmType); - auto nullPtr = rewriter.create(loc, convertedPtrType); + auto nullPtr = rewriter.create(loc, convertedPtrType); auto gep = rewriter.create(loc, convertedPtrType, llvmType, nullPtr, ArrayRef{1}); return rewriter.create(loc, getIndexType(), gep); diff --git a/mlir/lib/Dialect/LLVMIR/IR/LLVMDialect.cpp b/mlir/lib/Dialect/LLVMIR/IR/LLVMDialect.cpp index 1cf91bde28183..2e0b2582eee96 100644 --- a/mlir/lib/Dialect/LLVMIR/IR/LLVMDialect.cpp +++ b/mlir/lib/Dialect/LLVMIR/IR/LLVMDialect.cpp @@ -195,12 +195,12 @@ OpFoldResult ICmpOp::fold(FoldAdaptor adaptor) { getPredicate() == ICmpPredicate::eq); // cmpi(eq/ne, alloca, null) -> false/true - if (getLhs().getDefiningOp() && getRhs().getDefiningOp()) + if (getLhs().getDefiningOp() && getRhs().getDefiningOp()) return getBoolAttribute(getType(), getContext(), getPredicate() == ICmpPredicate::ne); // cmpi(eq/ne, null, alloca) -> cmpi(eq/ne, alloca, null) - if (getLhs().getDefiningOp() && getRhs().getDefiningOp()) { + if (getLhs().getDefiningOp() && getRhs().getDefiningOp()) { Value lhs = getLhs(); Value rhs = getRhs(); getLhsMutable().assign(rhs); @@ -1466,8 +1466,8 @@ LogicalResult LandingpadOp::verify() { << "global addresses expected as operand to " "bitcast used in clauses for landingpad"; } - // NullOp and AddressOfOp allowed - if (value.getDefiningOp()) + // ZeroOp and AddressOfOp allowed + if (value.getDefiningOp()) continue; if (value.getDefiningOp()) continue; diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp index d75601e369a0d..894382cd7f37b 100644 --- a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp +++ b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp @@ -299,7 +299,7 @@ class NewCallParams final { assert(isInitialized() && "Must initialize before genNewCall"); StringRef name = "newSparseTensor"; params[kParamAction] = constantAction(builder, loc, action); - params[kParamPtr] = ptr ? ptr : builder.create(loc, pTp); + params[kParamPtr] = ptr ? ptr : builder.create(loc, pTp); return createFuncCall(builder, loc, name, pTp, params, EmitCInterface::On) .getResult(0); } diff --git a/mlir/lib/Target/LLVMIR/ModuleImport.cpp b/mlir/lib/Target/LLVMIR/ModuleImport.cpp index c6c30880d4f2c..3d93332c4c567 100644 --- a/mlir/lib/Target/LLVMIR/ModuleImport.cpp +++ b/mlir/lib/Target/LLVMIR/ModuleImport.cpp @@ -987,7 +987,7 @@ FailureOr ModuleImport::convertConstant(llvm::Constant *constant) { // Convert null pointer constants. if (auto *nullPtr = dyn_cast(constant)) { Type type = convertType(nullPtr->getType()); - return builder.create(loc, type).getResult(); + return builder.create(loc, type).getResult(); } // Convert none token constants. diff --git a/mlir/test/Conversion/AsyncToLLVM/convert-coro-to-llvm.mlir b/mlir/test/Conversion/AsyncToLLVM/convert-coro-to-llvm.mlir index fb15a2da2836b..8a611cf96f5b5 100644 --- a/mlir/test/Conversion/AsyncToLLVM/convert-coro-to-llvm.mlir +++ b/mlir/test/Conversion/AsyncToLLVM/convert-coro-to-llvm.mlir @@ -3,7 +3,7 @@ // CHECK-LABEL: @coro_id func.func @coro_id() { // CHECK: %0 = llvm.mlir.constant(0 : i32) : i32 - // CHECK: %1 = llvm.mlir.null : !llvm.ptr + // CHECK: %1 = llvm.mlir.zero : !llvm.ptr // CHECK: %2 = llvm.intr.coro.id %0, %1, %1, %1 : (i32, !llvm.ptr, !llvm.ptr, !llvm.ptr) -> !llvm.token %0 = async.coro.id return diff --git a/mlir/test/Conversion/AsyncToLLVM/convert-runtime-to-llvm.mlir b/mlir/test/Conversion/AsyncToLLVM/convert-runtime-to-llvm.mlir index 7ff5a2c4a490d..3672be91bbc07 100644 --- a/mlir/test/Conversion/AsyncToLLVM/convert-runtime-to-llvm.mlir +++ b/mlir/test/Conversion/AsyncToLLVM/convert-runtime-to-llvm.mlir @@ -9,7 +9,7 @@ func.func @create_token() { // CHECK-LABEL: @create_value func.func @create_value() { - // CHECK: %[[NULL:.*]] = llvm.mlir.null : !llvm.ptr + // CHECK: %[[NULL:.*]] = llvm.mlir.zero : !llvm.ptr // CHECK: %[[OFFSET:.*]] = llvm.getelementptr %[[NULL]][1] // CHECK: %[[SIZE:.*]] = llvm.ptrtoint %[[OFFSET]] // CHECK: %[[VALUE:.*]] = call @mlirAsyncRuntimeCreateValue(%[[SIZE]]) diff --git a/mlir/test/Conversion/GPUCommon/lower-launch-func-to-gpu-runtime-calls.mlir b/mlir/test/Conversion/GPUCommon/lower-launch-func-to-gpu-runtime-calls.mlir index 2cdc4e8dbb1ad..b7bcbc1262d82 100644 --- a/mlir/test/Conversion/GPUCommon/lower-launch-func-to-gpu-runtime-calls.mlir +++ b/mlir/test/Conversion/GPUCommon/lower-launch-func-to-gpu-runtime-calls.mlir @@ -52,7 +52,7 @@ module attributes {gpu.container_module} { // CHECK: llvm.getelementptr %[[MEMREF]][0, 4] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct[[STRUCT_BODY:<.*>]] // CHECK: llvm.getelementptr %[[MEMREF]][0, 5] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct[[STRUCT_BODY:<.*>]] - // CHECK: [[EXTRA_PARAMS:%.*]] = llvm.mlir.null : !llvm.ptr + // CHECK: [[EXTRA_PARAMS:%.*]] = llvm.mlir.zero : !llvm.ptr // CHECK: llvm.call @mgpuLaunchKernel([[FUNC]], [[C8]], [[C8]], [[C8]], // CHECK-SAME: [[C8]], [[C8]], [[C8]], [[C256]], [[STREAM]], diff --git a/mlir/test/Conversion/GPUToVulkan/invoke-vulkan.mlir b/mlir/test/Conversion/GPUToVulkan/invoke-vulkan.mlir index c77bf238c8ca2..f6da6d0a3be34 100644 --- a/mlir/test/Conversion/GPUToVulkan/invoke-vulkan.mlir +++ b/mlir/test/Conversion/GPUToVulkan/invoke-vulkan.mlir @@ -21,7 +21,7 @@ module attributes {gpu.container_module} { llvm.func @malloc(i64) -> !llvm.ptr llvm.func @foo() { %0 = llvm.mlir.constant(12 : index) : i64 - %1 = llvm.mlir.null : !llvm.ptr + %1 = llvm.mlir.zero : !llvm.ptr %2 = llvm.mlir.constant(1 : index) : i64 %3 = llvm.getelementptr %1[%2] : (!llvm.ptr, i64) -> !llvm.ptr, f32 %4 = llvm.ptrtoint %3 : !llvm.ptr to i64 diff --git a/mlir/test/Conversion/GPUToVulkan/typed-pointers.mlir b/mlir/test/Conversion/GPUToVulkan/typed-pointers.mlir index 67bd640e5d44c..2884b33750b32 100644 --- a/mlir/test/Conversion/GPUToVulkan/typed-pointers.mlir +++ b/mlir/test/Conversion/GPUToVulkan/typed-pointers.mlir @@ -21,7 +21,7 @@ module attributes {gpu.container_module} { llvm.func @malloc(i64) -> !llvm.ptr llvm.func @foo() { %0 = llvm.mlir.constant(12 : index) : i64 - %1 = llvm.mlir.null : !llvm.ptr + %1 = llvm.mlir.zero : !llvm.ptr %2 = llvm.mlir.constant(1 : index) : i64 %3 = llvm.getelementptr %1[%2] : (!llvm.ptr, i64) -> !llvm.ptr %4 = llvm.ptrtoint %3 : !llvm.ptr to i64 diff --git a/mlir/test/Conversion/MemRefToLLVM/convert-dynamic-memref-ops.mlir b/mlir/test/Conversion/MemRefToLLVM/convert-dynamic-memref-ops.mlir index 447875506b1ed..ea6a235857e63 100644 --- a/mlir/test/Conversion/MemRefToLLVM/convert-dynamic-memref-ops.mlir +++ b/mlir/test/Conversion/MemRefToLLVM/convert-dynamic-memref-ops.mlir @@ -11,7 +11,7 @@ func.func @mixed_alloc(%arg0: index, %arg1: index) -> memref { // CHECK-NEXT: %[[one:.*]] = llvm.mlir.constant(1 : index) : i64 // CHECK-NEXT: %[[st0:.*]] = llvm.mul %[[N]], %[[c42]] : i64 // CHECK-NEXT: %[[sz:.*]] = llvm.mul %[[st0]], %[[M]] : i64 -// CHECK-NEXT: %[[null:.*]] = llvm.mlir.null : !llvm.ptr +// CHECK-NEXT: %[[null:.*]] = llvm.mlir.zero : !llvm.ptr // CHECK-NEXT: %[[gep:.*]] = llvm.getelementptr %[[null]][%[[sz]]] : (!llvm.ptr, i64) -> !llvm.ptr, f32 // CHECK-NEXT: %[[sz_bytes:.*]] = llvm.ptrtoint %[[gep]] : !llvm.ptr to i64 // CHECK-NEXT: llvm.call @malloc(%[[sz_bytes]]) : (i64) -> !llvm.ptr @@ -60,7 +60,7 @@ func.func @dynamic_alloc(%arg0: index, %arg1: index) -> memref { // CHECK-DAG: %[[N:.*]] = builtin.unrealized_conversion_cast %[[Narg]] // CHECK-NEXT: %[[one:.*]] = llvm.mlir.constant(1 : index) : i64 // CHECK-NEXT: %[[sz:.*]] = llvm.mul %[[N]], %[[M]] : i64 -// CHECK-NEXT: %[[null:.*]] = llvm.mlir.null : !llvm.ptr +// CHECK-NEXT: %[[null:.*]] = llvm.mlir.zero : !llvm.ptr // CHECK-NEXT: %[[gep:.*]] = llvm.getelementptr %[[null]][%[[sz]]] : (!llvm.ptr, i64) -> !llvm.ptr, f32 // CHECK-NEXT: %[[sz_bytes:.*]] = llvm.ptrtoint %[[gep]] : !llvm.ptr to i64 // CHECK-NEXT: llvm.call @malloc(%[[sz_bytes]]) : (i64) -> !llvm.ptr @@ -128,7 +128,7 @@ func.func @stdlib_aligned_alloc(%N : index) -> memref<32x18xf32> { // ALIGNED-ALLOC-NEXT: %[[sz2:.*]] = llvm.mlir.constant(18 : index) : i64 // ALIGNED-ALLOC-NEXT: %[[one:.*]] = llvm.mlir.constant(1 : index) : i64 // ALIGNED-ALLOC-NEXT: %[[num_elems:.*]] = llvm.mlir.constant(576 : index) : i64 -// ALIGNED-ALLOC-NEXT: %[[null:.*]] = llvm.mlir.null : !llvm.ptr +// ALIGNED-ALLOC-NEXT: %[[null:.*]] = llvm.mlir.zero : !llvm.ptr // ALIGNED-ALLOC-NEXT: %[[gep:.*]] = llvm.getelementptr %[[null]][%[[num_elems]]] : (!llvm.ptr, i64) -> !llvm.ptr, f32 // ALIGNED-ALLOC-NEXT: %[[bytes:.*]] = llvm.ptrtoint %[[gep]] : !llvm.ptr to i64 // ALIGNED-ALLOC-NEXT: %[[alignment:.*]] = llvm.mlir.constant(32 : index) : i64 @@ -570,7 +570,7 @@ func.func @memref_reshape(%input : memref<2x3xf32>, %shape : memref) { // ALIGNED-ALLOC-LABEL: @memref_of_memref func.func @memref_of_memref() { // Sizeof computation is as usual. - // ALIGNED-ALLOC: %[[NULL:.*]] = llvm.mlir.null + // ALIGNED-ALLOC: %[[NULL:.*]] = llvm.mlir.zero // ALIGNED-ALLOC: %[[PTR:.*]] = llvm.getelementptr // ALIGNED-ALLOC: %[[SIZEOF:.*]] = llvm.ptrtoint @@ -592,7 +592,7 @@ module attributes { dlti.dl_spec = #dlti.dl_spec<#dlti.dl_entry> } { // ALIGNED-ALLOC-LABEL: @memref_of_memref_32 func.func @memref_of_memref_32() { // Sizeof computation is as usual. - // ALIGNED-ALLOC: %[[NULL:.*]] = llvm.mlir.null + // ALIGNED-ALLOC: %[[NULL:.*]] = llvm.mlir.zero // ALIGNED-ALLOC: %[[PTR:.*]] = llvm.getelementptr // ALIGNED-ALLOC: %[[SIZEOF:.*]] = llvm.ptrtoint @@ -615,7 +615,7 @@ module attributes { dlti.dl_spec = #dlti.dl_spec<#dlti.dl_entry> } { // ALIGNED-ALLOC-LABEL: @memref_of_memref_of_memref func.func @memref_of_memref_of_memref() { // Sizeof computation is as usual, also check the type. - // ALIGNED-ALLOC: %[[NULL:.*]] = llvm.mlir.null : !llvm.ptr + // ALIGNED-ALLOC: %[[NULL:.*]] = llvm.mlir.zero : !llvm.ptr // ALIGNED-ALLOC: %[[PTR:.*]] = llvm.getelementptr // ALIGNED-ALLOC: %[[SIZEOF:.*]] = llvm.ptrtoint @@ -631,7 +631,7 @@ func.func @memref_of_memref_of_memref() { // ALIGNED-ALLOC-LABEL: @ranked_unranked func.func @ranked_unranked() { - // ALIGNED-ALLOC: llvm.mlir.null + // ALIGNED-ALLOC: llvm.mlir.zero // ALIGNED-ALLOC-SAME: !llvm.ptr // ALIGNED-ALLOC: llvm.getelementptr // ALIGNED-ALLOC: llvm.ptrtoint diff --git a/mlir/test/Conversion/MemRefToLLVM/convert-static-memref-ops.mlir b/mlir/test/Conversion/MemRefToLLVM/convert-static-memref-ops.mlir index 241a868f0ee05..35a6358d8f58b 100644 --- a/mlir/test/Conversion/MemRefToLLVM/convert-static-memref-ops.mlir +++ b/mlir/test/Conversion/MemRefToLLVM/convert-static-memref-ops.mlir @@ -3,7 +3,7 @@ // CHECK-LABEL: func @zero_d_alloc() func.func @zero_d_alloc() -> memref { // CHECK: %[[one:.*]] = llvm.mlir.constant(1 : index) : i64 -// CHECK: %[[null:.*]] = llvm.mlir.null : !llvm.ptr +// CHECK: %[[null:.*]] = llvm.mlir.zero : !llvm.ptr // CHECK: %[[gep:.*]] = llvm.getelementptr %[[null]][%[[one]]] : (!llvm.ptr, i64) -> !llvm.ptr, f32 // CHECK: %[[size_bytes:.*]] = llvm.ptrtoint %[[gep]] : !llvm.ptr to i64 // CHECK: %[[ptr:.*]] = llvm.call @malloc(%[[size_bytes]]) : (i64) -> !llvm.ptr @@ -36,7 +36,7 @@ func.func @zero_d_dealloc(%arg0: memref) { func.func @aligned_1d_alloc() -> memref<42xf32> { // CHECK: %[[sz1:.*]] = llvm.mlir.constant(42 : index) : i64 // CHECK: %[[st1:.*]] = llvm.mlir.constant(1 : index) : i64 -// CHECK: %[[null:.*]] = llvm.mlir.null : !llvm.ptr +// CHECK: %[[null:.*]] = llvm.mlir.zero : !llvm.ptr // CHECK: %[[gep:.*]] = llvm.getelementptr %[[null]][%[[sz1]]] : (!llvm.ptr, i64) -> !llvm.ptr, f32 // CHECK: %[[size_bytes:.*]] = llvm.ptrtoint %[[gep]] : !llvm.ptr to i64 // CHECK: %[[alignment:.*]] = llvm.mlir.constant(8 : index) : i64 @@ -63,7 +63,7 @@ func.func @aligned_1d_alloc() -> memref<42xf32> { // CHECK-LABEL: func @static_alloc() func.func @static_alloc() -> memref<32x18xf32> { // CHECK: %[[num_elems:.*]] = llvm.mlir.constant(576 : index) : i64 -// CHECK: %[[null:.*]] = llvm.mlir.null : !llvm.ptr +// CHECK: %[[null:.*]] = llvm.mlir.zero : !llvm.ptr // CHECK: %[[gep:.*]] = llvm.getelementptr %[[null]][%[[num_elems]]] : (!llvm.ptr, i64) -> !llvm.ptr, f32 // CHECK: %[[size_bytes:.*]] = llvm.ptrtoint %[[gep]] : !llvm.ptr to i64 // CHECK: llvm.call @malloc(%[[size_bytes]]) : (i64) -> !llvm.ptr @@ -207,7 +207,7 @@ module attributes { dlti.dl_spec = #dlti.dl_spec<#dlti.dl_entry> } { %0 = memref.alloc(%c1) : memref> // CHECK: %[[CST_S:.*]] = arith.constant 1 : index // CHECK: %[[CST:.*]] = builtin.unrealized_conversion_cast - // CHECK: llvm.mlir.null + // CHECK: llvm.mlir.zero // CHECK: llvm.getelementptr %{{.*}}[[CST]] // CHECK: llvm.ptrtoint %{{.*}} : !llvm.ptr to i32 // CHECK: llvm.ptrtoint %{{.*}} : !llvm.ptr to i32 diff --git a/mlir/test/Conversion/MemRefToLLVM/memref-to-llvm.mlir b/mlir/test/Conversion/MemRefToLLVM/memref-to-llvm.mlir index 9e44029ad93bd..ae487ef669474 100644 --- a/mlir/test/Conversion/MemRefToLLVM/memref-to-llvm.mlir +++ b/mlir/test/Conversion/MemRefToLLVM/memref-to-llvm.mlir @@ -464,7 +464,7 @@ func.func @memref_copy_ranked() { // CHECK: [[ONE:%.*]] = llvm.mlir.constant(1 : index) : i64 // CHECK: [[EXTRACT0:%.*]] = llvm.extractvalue {{%.*}}[3, 0] : !llvm.struct<(ptr, ptr, i64, array<1 x i64>, array<1 x i64>)> // CHECK: [[MUL:%.*]] = llvm.mul [[ONE]], [[EXTRACT0]] : i64 - // CHECK: [[NULL:%.*]] = llvm.mlir.null : !llvm.ptr + // CHECK: [[NULL:%.*]] = llvm.mlir.zero : !llvm.ptr // CHECK: [[GEP:%.*]] = llvm.getelementptr [[NULL]][1] : (!llvm.ptr) -> !llvm.ptr, f32 // CHECK: [[PTRTOINT:%.*]] = llvm.ptrtoint [[GEP]] : !llvm.ptr to i64 // CHECK: [[SIZE:%.*]] = llvm.mul [[MUL]], [[PTRTOINT]] : i64 @@ -495,7 +495,7 @@ func.func @memref_copy_contiguous(%in: memref<16x4xi32>, %offset: index) { // CHECK: [[MUL1:%.*]] = llvm.mul {{.*}}, [[EXTRACT0]] : i64 // CHECK: [[EXTRACT1:%.*]] = llvm.extractvalue %[[DESC]][3, 1] : !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)> // CHECK: [[MUL2:%.*]] = llvm.mul [[MUL1]], [[EXTRACT1]] : i64 - // CHECK: [[NULL:%.*]] = llvm.mlir.null : !llvm.ptr + // CHECK: [[NULL:%.*]] = llvm.mlir.zero : !llvm.ptr // CHECK: [[GEP:%.*]] = llvm.getelementptr [[NULL]][1] : (!llvm.ptr) -> !llvm.ptr, i32 // CHECK: [[PTRTOINT:%.*]] = llvm.ptrtoint [[GEP]] : !llvm.ptr to i64 // CHECK: [[SIZE:%.*]] = llvm.mul [[MUL2]], [[PTRTOINT]] : i64 @@ -610,7 +610,7 @@ func.func @extract_strided_metadata( // ----- // CHECK-LABEL: func @load_non_temporal( -func.func @load_non_temporal(%arg0 : memref<32xf32, affine_map<(d0) -> (d0)>>) { +func.func @load_non_temporal(%arg0 : memref<32xf32, affine_map<(d0) -> (d0)>>) { %1 = arith.constant 7 : index // CHECK: llvm.load %{{.*}} {nontemporal} : !llvm.ptr -> f32 %2 = memref.load %arg0[%1] {nontemporal = true} : memref<32xf32, affine_map<(d0) -> (d0)>> diff --git a/mlir/test/Conversion/MemRefToLLVM/typed-pointers.mlir b/mlir/test/Conversion/MemRefToLLVM/typed-pointers.mlir index 893c359b7b071..19d053ee7813b 100644 --- a/mlir/test/Conversion/MemRefToLLVM/typed-pointers.mlir +++ b/mlir/test/Conversion/MemRefToLLVM/typed-pointers.mlir @@ -100,7 +100,7 @@ func.func @mixed_alloc(%arg0: index, %arg1: index) -> memref { // CHECK-NEXT: %[[one:.*]] = llvm.mlir.constant(1 : index) : i64 // CHECK-NEXT: %[[st0:.*]] = llvm.mul %[[N]], %[[c42]] : i64 // CHECK-NEXT: %[[sz:.*]] = llvm.mul %[[st0]], %[[M]] : i64 -// CHECK-NEXT: %[[null:.*]] = llvm.mlir.null : !llvm.ptr +// CHECK-NEXT: %[[null:.*]] = llvm.mlir.zero : !llvm.ptr // CHECK-NEXT: %[[gep:.*]] = llvm.getelementptr %[[null]][%[[sz]]] : (!llvm.ptr, i64) -> !llvm.ptr // CHECK-NEXT: %[[sz_bytes:.*]] = llvm.ptrtoint %[[gep]] : !llvm.ptr to i64 // CHECK-NEXT: llvm.call @malloc(%[[sz_bytes]]) : (i64) -> !llvm.ptr @@ -140,7 +140,7 @@ func.func @dynamic_alloc(%arg0: index, %arg1: index) -> memref { // CHECK-DAG: %[[N:.*]] = builtin.unrealized_conversion_cast %[[Narg]] // CHECK-NEXT: %[[one:.*]] = llvm.mlir.constant(1 : index) : i64 // CHECK-NEXT: %[[sz:.*]] = llvm.mul %[[N]], %[[M]] : i64 -// CHECK-NEXT: %[[null:.*]] = llvm.mlir.null : !llvm.ptr +// CHECK-NEXT: %[[null:.*]] = llvm.mlir.zero : !llvm.ptr // CHECK-NEXT: %[[gep:.*]] = llvm.getelementptr %[[null]][%[[sz]]] : (!llvm.ptr, i64) -> !llvm.ptr // CHECK-NEXT: %[[sz_bytes:.*]] = llvm.ptrtoint %[[gep]] : !llvm.ptr to i64 // CHECK-NEXT: llvm.call @malloc(%[[sz_bytes]]) : (i64) -> !llvm.ptr diff --git a/mlir/test/Dialect/GPU/ops.mlir b/mlir/test/Dialect/GPU/ops.mlir index 0d2f52e8adbfc..c638e0b21ab6f 100644 --- a/mlir/test/Dialect/GPU/ops.mlir +++ b/mlir/test/Dialect/GPU/ops.mlir @@ -147,7 +147,7 @@ module attributes {gpu.container_module} { %cstI64 = arith.constant 8 : i64 %c0 = arith.constant 0 : i32 %t0 = gpu.wait async - %lowStream = llvm.mlir.null : !llvm.ptr + %lowStream = llvm.mlir.zero : !llvm.ptr // CHECK: gpu.launch_func @kernels::@kernel_1 blocks in (%{{.*}}, %{{.*}}, %{{.*}}) threads in (%{{.*}}, %{{.*}}, %{{.*}}) args(%{{.*}} : f32, %{{.*}} : memref) gpu.launch_func @kernels::@kernel_1 blocks in (%cst, %cst, %cst) threads in (%cst, %cst, %cst) args(%0 : f32, %1 : memref) diff --git a/mlir/test/Dialect/LLVMIR/callgraph.mlir b/mlir/test/Dialect/LLVMIR/callgraph.mlir index ca1044b8288c4..5be0bc6252624 100644 --- a/mlir/test/Dialect/LLVMIR/callgraph.mlir +++ b/mlir/test/Dialect/LLVMIR/callgraph.mlir @@ -67,8 +67,8 @@ module attributes {"test.name" = "Invoke call"} { %0 = llvm.mlir.constant(0 : i32) : i32 %1 = llvm.mlir.constant(3 : i32) : i32 %2 = llvm.mlir.constant("\01") : !llvm.array<1 x i8> - %3 = llvm.mlir.null : !llvm.ptr - %4 = llvm.mlir.null : !llvm.ptr + %3 = llvm.mlir.zero : !llvm.ptr + %4 = llvm.mlir.zero : !llvm.ptr %5 = llvm.mlir.addressof @_ZTIi : !llvm.ptr %6 = llvm.mlir.constant(1 : i32) : i32 %7 = llvm.alloca %6 x i8 : (i32) -> !llvm.ptr diff --git a/mlir/test/Dialect/LLVMIR/canonicalize.mlir b/mlir/test/Dialect/LLVMIR/canonicalize.mlir index ed7efabb44b1a..5e26fa37b681d 100644 --- a/mlir/test/Dialect/LLVMIR/canonicalize.mlir +++ b/mlir/test/Dialect/LLVMIR/canonicalize.mlir @@ -19,7 +19,7 @@ llvm.func @fold_icmp_ne(%arg0 : vector<2xi32>) -> vector<2xi1> { // CHECK-LABEL: @fold_icmp_alloca llvm.func @fold_icmp_alloca() -> i1 { // CHECK: %[[C0:.*]] = llvm.mlir.constant(true) : i1 - %c0 = llvm.mlir.null : !llvm.ptr + %c0 = llvm.mlir.zero : !llvm.ptr %c1 = arith.constant 1 : i64 %0 = llvm.alloca %c1 x i32 : (i64) -> !llvm.ptr %1 = llvm.icmp "ne" %c0, %0 : !llvm.ptr diff --git a/mlir/test/Dialect/LLVMIR/invalid.mlir b/mlir/test/Dialect/LLVMIR/invalid.mlir index fc959009ff20c..3db07f45a5bab 100644 --- a/mlir/test/Dialect/LLVMIR/invalid.mlir +++ b/mlir/test/Dialect/LLVMIR/invalid.mlir @@ -545,9 +545,9 @@ func.func @invalid_vector_type_5(%a : vector<4xf32>, %idx : i32) -> vector<4xf32 // ----- -func.func @null_non_llvm_type() { - // expected-error@+1 {{'llvm.mlir.null' op result #0 must be LLVM pointer type, but got 'i32'}} - llvm.mlir.null : i32 +func.func @zero_non_llvm_type() { + // expected-error@+1 {{'llvm.mlir.zero' op result #0 must be LLVM dialect-compatible type, but got 'tensor<4xi32>'}} + llvm.mlir.zero : tensor<4xi32> } // ----- diff --git a/mlir/test/Dialect/LLVMIR/mem2reg.mlir b/mlir/test/Dialect/LLVMIR/mem2reg.mlir index fc696c5073c30..30ba459d07a49 100644 --- a/mlir/test/Dialect/LLVMIR/mem2reg.mlir +++ b/mlir/test/Dialect/LLVMIR/mem2reg.mlir @@ -16,7 +16,7 @@ llvm.func @default_value() -> i32 { llvm.func @store_of_ptr() { %0 = llvm.mlir.constant(1 : i32) : i32 %1 = llvm.mlir.constant(4 : i32) : i32 - %2 = llvm.mlir.null : !llvm.ptr + %2 = llvm.mlir.zero : !llvm.ptr // CHECK: %[[ALLOCA:.*]] = llvm.alloca %3 = llvm.alloca %0 x i32 {alignment = 4 : i64} : (i32) -> !llvm.ptr // CHECK: llvm.store %{{.*}}, %[[ALLOCA]] @@ -55,7 +55,7 @@ llvm.func @unreachable_in_loop() -> i32 { llvm.store %1, %3 {alignment = 4 : i64} : i32, !llvm.ptr // CHECK: llvm.br ^[[LOOP:.*]] llvm.br ^bb1 - + // CHECK: ^[[LOOP]]: ^bb1: // 2 preds: ^bb0, ^bb3 // CHECK-NEXT: llvm.br ^[[ENDOFLOOP:.*]] @@ -66,7 +66,7 @@ llvm.func @unreachable_in_loop() -> i32 { ^bb2: // no predecessors // CHECK-NEXT: llvm.br ^[[ENDOFLOOP]] llvm.br ^bb3 - + // CHECK: ^[[ENDOFLOOP]]: ^bb3: // 2 preds: ^bb1, ^bb2 // CHECK-NEXT: llvm.br ^[[LOOP]] @@ -421,8 +421,8 @@ llvm.func @ignore_discardable_tree() { %1 = llvm.mlir.constant(0 : i16) : i16 %2 = llvm.mlir.constant(0 : i8) : i8 %3 = llvm.mlir.undef : !llvm.struct<(i8, i16)> - %4 = llvm.insertvalue %2, %3[0] : !llvm.struct<(i8, i16)> - %5 = llvm.insertvalue %1, %4[1] : !llvm.struct<(i8, i16)> + %4 = llvm.insertvalue %2, %3[0] : !llvm.struct<(i8, i16)> + %5 = llvm.insertvalue %1, %4[1] : !llvm.struct<(i8, i16)> %6 = llvm.alloca %0 x !llvm.struct<(i8, i16)> {alignment = 8 : i64} : (i32) -> !llvm.ptr %7 = llvm.getelementptr %6[0, 0] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<(i8, i16)> llvm.intr.lifetime.start 2, %7 : !llvm.ptr diff --git a/mlir/test/Dialect/LLVMIR/roundtrip-typed-pointers.mlir b/mlir/test/Dialect/LLVMIR/roundtrip-typed-pointers.mlir index 7cc5a6deee541..b1d72b690595c 100644 --- a/mlir/test/Dialect/LLVMIR/roundtrip-typed-pointers.mlir +++ b/mlir/test/Dialect/LLVMIR/roundtrip-typed-pointers.mlir @@ -39,10 +39,10 @@ func.func @alloca(%size : i64) { // CHECK-LABEL: @null func.func @null() { - // CHECK: llvm.mlir.null : !llvm.ptr - %0 = llvm.mlir.null : !llvm.ptr - // CHECK: llvm.mlir.null : !llvm.ptr>)>>, i64)>> - %1 = llvm.mlir.null : !llvm.ptr>)>>, i64)>> + // CHECK: llvm.mlir.zero : !llvm.ptr + %0 = llvm.mlir.zero : !llvm.ptr + // CHECK: llvm.mlir.zero : !llvm.ptr>)>>, i64)>> + %1 = llvm.mlir.zero : !llvm.ptr>)>>, i64)>> llvm.return } diff --git a/mlir/test/Dialect/LLVMIR/roundtrip.mlir b/mlir/test/Dialect/LLVMIR/roundtrip.mlir index 558ed3058fe75..654048d873234 100644 --- a/mlir/test/Dialect/LLVMIR/roundtrip.mlir +++ b/mlir/test/Dialect/LLVMIR/roundtrip.mlir @@ -329,8 +329,8 @@ func.func @alloca(%size : i64) { // CHECK-LABEL: @null func.func @null() { - // CHECK: llvm.mlir.null : !llvm.ptr - %0 = llvm.mlir.null : !llvm.ptr + // CHECK: llvm.mlir.zero : !llvm.ptr + %0 = llvm.mlir.zero : !llvm.ptr llvm.return } @@ -386,7 +386,7 @@ llvm.func @invokeLandingpad() -> i32 attributes { personality = @__gxx_personali // CHECK: %[[V0:.*]] = llvm.mlir.constant(0 : i32) : i32 // CHECK: %{{.*}} = llvm.mlir.constant(3 : i32) : i32 // CHECK: %[[V1:.*]] = llvm.mlir.constant("\01") : !llvm.array<1 x i8> -// CHECK: %[[V2:.*]] = llvm.mlir.null : !llvm.ptr +// CHECK: %[[V2:.*]] = llvm.mlir.zero : !llvm.ptr // CHECK: %[[V3:.*]] = llvm.mlir.addressof @_ZTIi : !llvm.ptr // CHECK: %[[V4:.*]] = llvm.mlir.constant(1 : i32) : i32 // CHECK: %[[V5:.*]] = llvm.alloca %[[V4]] x i8 : (i32) -> !llvm.ptr @@ -394,7 +394,7 @@ llvm.func @invokeLandingpad() -> i32 attributes { personality = @__gxx_personali %0 = llvm.mlir.constant(0 : i32) : i32 %1 = llvm.mlir.constant(3 : i32) : i32 %2 = llvm.mlir.constant("\01") : !llvm.array<1 x i8> - %3 = llvm.mlir.null : !llvm.ptr + %3 = llvm.mlir.zero : !llvm.ptr %4 = llvm.mlir.addressof @_ZTIi : !llvm.ptr %5 = llvm.mlir.constant(1 : i32) : i32 %6 = llvm.alloca %5 x i8 : (i32) -> !llvm.ptr diff --git a/mlir/test/Dialect/SparseTensor/conversion.mlir b/mlir/test/Dialect/SparseTensor/conversion.mlir index 9cc5cc01544cc..9d337b929fa42 100644 --- a/mlir/test/Dialect/SparseTensor/conversion.mlir +++ b/mlir/test/Dialect/SparseTensor/conversion.mlir @@ -146,7 +146,7 @@ func.func @sparse_new3d(%arg0: !llvm.ptr) -> tensor to memref // CHECK-DAG: memref.store %[[I]], %[[DimSizes0]][%[[C0]]] : memref<2xindex> // CHECK-DAG: memref.store %[[J]], %[[DimSizes0]][%[[C1]]] : memref<2xindex> -// CHECK: %[[NP:.*]] = llvm.mlir.null : !llvm.ptr +// CHECK: %[[NP:.*]] = llvm.mlir.zero : !llvm.ptr // CHECK: %[[T:.*]] = call @newSparseTensor(%[[DimSizes]], %[[LvlSizes]], %[[LvlTypes]], %[[Iota]], %[[Iota]], %{{.*}}, %{{.*}}, %{{.*}}, %[[Empty]], %[[NP]]) // CHECK: return %[[T]] : !llvm.ptr func.func @sparse_init(%arg0: index, %arg1: index) -> tensor { diff --git a/mlir/test/Dialect/SparseTensor/convert_dense2sparse.mlir b/mlir/test/Dialect/SparseTensor/convert_dense2sparse.mlir index 9fb1946d56263..2e361e940f8e1 100644 --- a/mlir/test/Dialect/SparseTensor/convert_dense2sparse.mlir +++ b/mlir/test/Dialect/SparseTensor/convert_dense2sparse.mlir @@ -34,7 +34,7 @@ // CHECK-DAG: %[[DimSizesP:.*]] = memref.cast %[[DimSizes]] : memref<1xindex> to memref // CHECK-DAG: %[[LvlSizesP:.*]] = memref.cast %[[LvlSizes]] : memref<1xindex> to memref // CHECK-DAG: %[[IotaP:.*]] = memref.cast %[[Iota]] : memref<1xindex> to memref -// CHECK: %[[NP:.*]] = llvm.mlir.null : !llvm.ptr +// CHECK: %[[NP:.*]] = llvm.mlir.zero : !llvm.ptr // CHECK: %[[C:.*]] = call @newSparseTensor(%[[DimSizesP]], %[[LvlSizesP]], %[[LvlTypesP]], %[[IotaP]], %[[IotaP]], %{{.*}}, %{{.*}}, %{{.*}}, %[[EmptyCOO]], %[[NP]]) // CHECK: %[[M:.*]] = memref.alloca() : memref<1xindex> // CHECK: %[[T:.*]] = memref.cast %[[M]] : memref<1xindex> to memref @@ -92,7 +92,7 @@ func.func @sparse_convert_complex(%arg0: tensor<100xcomplex>) -> tensor<100 // CHECK-DAG: %[[DimSizesP:.*]] = memref.cast %[[DimSizes]] : memref<2xindex> to memref // CHECK-DAG: %[[LvlSizesP:.*]] = memref.cast %[[LvlSizes]] : memref<2xindex> to memref // CHECK-DAG: %[[IotaP:.*]] = memref.cast %[[Iota]] : memref<2xindex> to memref -// CHECK: %[[NP:.*]] = llvm.mlir.null : !llvm.ptr +// CHECK: %[[NP:.*]] = llvm.mlir.zero : !llvm.ptr // CHECK: %[[C:.*]] = call @newSparseTensor(%[[DimSizesP]], %[[LvlSizesP]], %[[LvlTypesP]], %[[IotaP]], %[[IotaP]], %{{.*}}, %{{.*}}, %{{.*}}, %[[EmptyCOO]], %[[NP]]) // CHECK: %[[M:.*]] = memref.alloca() : memref<2xindex> // CHECK: %[[T:.*]] = memref.cast %[[M]] : memref<2xindex> to memref @@ -146,7 +146,7 @@ func.func @sparse_convert_2d(%arg0: tensor<2x4xf64>) -> tensor<2x4xf64, #CSR> { // CHECK-DAG: %[[DimSizesP:.*]] = memref.cast %[[DimSizes]] : memref<2xindex> to memref // CHECK-DAG: %[[LvlSizesP:.*]] = memref.cast %[[LvlSizes]] : memref<2xindex> to memref // CHECK-DAG: %[[IotaP:.*]] = memref.cast %[[Iota]] : memref<2xindex> to memref -// CHECK: %[[NP:.*]] = llvm.mlir.null : !llvm.ptr +// CHECK: %[[NP:.*]] = llvm.mlir.zero : !llvm.ptr // CHECK: %[[C:.*]] = call @newSparseTensor(%[[DimSizesP]], %[[LvlSizesP]], %[[LvlTypesP]], %[[IotaP]], %[[IotaP]], %{{.*}}, %{{.*}}, %{{.*}}, %[[EmptyCOO]], %[[NP]]) // CHECK: %[[M:.*]] = memref.alloca() : memref<2xindex> // CHECK: %[[N:.*]] = memref.cast %[[M]] : memref<2xindex> to memref @@ -220,7 +220,7 @@ func.func @sparse_constant_csc() -> tensor<8x7xf32, #CSC>{ // CHECK-DAG: %[[LvlSizesP:.*]] = memref.cast %[[LvlSizes]] : memref<3xindex> to memref // CHECK-DAG: %[[Lvl2DimP:.*]] = memref.cast %[[Lvl2Dim]] : memref<3xindex> to memref // CHECK-DAG: %[[Dim2LvlP:.*]] = memref.cast %[[Dim2Lvl]] : memref<3xindex> to memref -// CHECK: %[[NP:.*]] = llvm.mlir.null : !llvm.ptr +// CHECK: %[[NP:.*]] = llvm.mlir.zero : !llvm.ptr // CHECK: %[[C:.*]] = call @newSparseTensor(%[[DimSizesP]], %[[LvlSizesP]], %[[LvlTypesP]], %[[Lvl2DimP]], %[[Dim2LvlP]], %{{.*}}, %{{.*}}, %{{.*}}, %[[EmptyCOO]], %[[NP]]) // CHECK: %[[M:.*]] = memref.alloca() : memref<3xindex> // CHECK: %[[N:.*]] = memref.cast %[[M]] : memref<3xindex> to memref diff --git a/mlir/test/Dialect/SparseTensor/sparse_concat.mlir b/mlir/test/Dialect/SparseTensor/sparse_concat.mlir index 5f412e59dba9f..a243157559443 100644 --- a/mlir/test/Dialect/SparseTensor/sparse_concat.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_concat.mlir @@ -99,7 +99,7 @@ func.func @concat_mix_dense(%arg0: tensor<2x4xf64>, %arg1: tensor<3x4xf64, #Spar // CHECK-DAG: %[[IotaP_0:.*]] = memref.cast %[[Iota_0]] : memref<2xindex> to memref // CHECK-DAG: memref.store %[[TMP_c0]], %[[Iota_0]][%[[TMP_c0]]] : memref<2xindex> // CHECK-DAG: memref.store %[[TMP_c1]], %[[Iota_0]][%[[TMP_c1]]] : memref<2xindex> -// CHECK-DAG: %[[NullPtr:.*]] = llvm.mlir.null : !llvm.ptr +// CHECK-DAG: %[[NullPtr:.*]] = llvm.mlir.zero : !llvm.ptr // CHECK: %[[TMP_7:.*]] = call @newSparseTensor(%[[DimSizesP_0]], %[[LvlSizesP_0]], %[[LvlTypesP_0]], %[[IotaP_0]], %[[IotaP_0]], %[[TMP_c0_i32]], %[[TMP_c0_i32]], %[[TMP_c1_i32]], %[[TMP_c4_i32]], %[[NullPtr]]) // CHECK: %[[TMP_9:.*]] = memref.alloca() : memref<2xindex> // CHECK: %[[TMP_10:.*]] = memref.cast %[[TMP_9]] : memref<2xindex> to memref @@ -189,7 +189,7 @@ func.func @concat_mix_sparse(%arg0: tensor<2x4xf64>, %arg1: tensor<3x4xf64, #Spa // CHECK-DAG: %[[Dim2LvlP_0:.*]] = memref.cast %[[Dim2Lvl_0]] : memref<2xindex> to memref // CHECK-DAG: memref.store %[[TMP_c1]], %[[Dim2Lvl_0]][%[[TMP_c0]]] : memref<2xindex> // CHECK-DAG: memref.store %[[TMP_c0]], %[[Dim2Lvl_0]][%[[TMP_c1]]] : memref<2xindex> -// CHECK-DAG: %[[NullPtr:.*]] = llvm.mlir.null : !llvm.ptr +// CHECK-DAG: %[[NullPtr:.*]] = llvm.mlir.zero : !llvm.ptr // CHECK: %[[TMP_7:.*]] = call @newSparseTensor(%[[DimSizesP_0]], %[[LvlSizesP_0]], %[[LvlTypesP_0]], %[[Lvl2DimP_0]], %[[Dim2LvlP_0]], %[[TMP_c0_i32]], %[[TMP_c0_i32]], %[[TMP_c1_i32]], %[[TMP_c4_i32]], %[[NullPtr]]) // CHECK: %[[TMP_9:.*]] = memref.alloca() : memref<2xindex> // CHECK: %[[TMP_10:.*]] = memref.cast %[[TMP_9]] : memref<2xindex> to memref @@ -404,7 +404,7 @@ func.func @concat_mix_dense_perm_dim1_dyn(%arg0: tensor<3x2xf64>, %arg1: tensor< // CHECK-DAG: %[[Dim2LvlP_0:.*]] = memref.cast %[[Dim2Lvl_0]] : memref<2xindex> to memref // CHECK-DAG: memref.store %[[TMP_c1]], %[[Dim2Lvl_0]][%[[TMP_c0]]] : memref<2xindex> // CHECK-DAG: memref.store %[[TMP_c0]], %[[Dim2Lvl_0]][%[[TMP_c1]]] : memref<2xindex> -// CHECK-DAG: %[[NullPtr:.*]] = llvm.mlir.null : !llvm.ptr +// CHECK-DAG: %[[NullPtr:.*]] = llvm.mlir.zero : !llvm.ptr // CHECK: %[[TMP_7:.*]] = call @newSparseTensor(%[[DimSizesP_0]], %[[LvlSizesP_0]], %[[LvlTypesP_0]], %[[Lvl2DimP_0]], %[[Dim2LvlP_0]], %[[TMP_c0_i32]], %[[TMP_c0_i32]], %[[TMP_c1_i32]], %[[TMP_c0_i32]], %[[NullPtr]]) // CHECK: %[[Values_r:.*]] = call @sparseValuesF64(%[[TMP_7]]) : (!llvm.ptr) -> memref // CHECK: %[[Values:.*]] = memref.reshape %[[Values_r]] diff --git a/mlir/test/Dialect/SparseTensor/sparse_fill_zero.mlir b/mlir/test/Dialect/SparseTensor/sparse_fill_zero.mlir index 7a4989304b5be..e1fe5d85e72ec 100644 --- a/mlir/test/Dialect/SparseTensor/sparse_fill_zero.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_fill_zero.mlir @@ -31,7 +31,7 @@ // CHECK-DAG: %[[IotaP:.*]] = memref.cast %[[Iota]] : memref<2xindex> to memref // CHECK-DAG: memref.store %[[I0]], %[[Iota]]{{\[}}%[[I0]]] : memref<2xindex> // CHECK-DAG: memref.store %[[I1]], %[[Iota]]{{\[}}%[[I1]]] : memref<2xindex> -// CHECK-DAG: %[[NullPtr:.*]] = llvm.mlir.null : !llvm.ptr +// CHECK-DAG: %[[NullPtr:.*]] = llvm.mlir.zero : !llvm.ptr // CHECK: %[[VAL_19:.*]] = call @newSparseTensor(%[[DimSizesP]], %[[LvlSizesP]], %[[LvlTypesP]], %[[IotaP]], %[[IotaP]], %[[C0]], %[[C0]], %[[C1]], %[[C0]], %[[NullPtr]]) : (memref, memref, memref, memref, memref, i32, i32, i32, i32, !llvm.ptr) -> !llvm.ptr // CHECK: %[[VAL_20:.*]] = memref.alloc() : memref<300xf64> // CHECK: %[[VAL_21:.*]] = memref.cast %[[VAL_20]] : memref<300xf64> to memref diff --git a/mlir/test/Target/LLVMIR/Import/constant.ll b/mlir/test/Target/LLVMIR/Import/constant.ll index 85cde7b1bcfef..620c1d3772d05 100644 --- a/mlir/test/Target/LLVMIR/Import/constant.ll +++ b/mlir/test/Target/LLVMIR/Import/constant.ll @@ -49,7 +49,7 @@ define void @undef_constant(i32 %arg0) { ; CHECK-LABEL: @null_constant define ptr @null_constant() { - ; CHECK: %[[NULL:[0-9]+]] = llvm.mlir.null : !llvm.ptr + ; CHECK: %[[NULL:[0-9]+]] = llvm.mlir.zero : !llvm.ptr ; CHECK: llvm.return %[[NULL]] : !llvm.ptr ret ptr null } @@ -180,7 +180,7 @@ define i32 @function_address_after_def() { ; CHECK-DAG: %[[CHAIN1:.+]] = llvm.insertvalue %[[C2]], %[[CHAIN0]][1] ; CHECK-DAG: %[[CHAIN2:.+]] = llvm.insertvalue %[[C3]], %[[CHAIN1]][2] ; CHECK-DAG: %[[CHAIN3:.+]] = llvm.insertvalue %[[C4]], %[[CHAIN2]][3] -; CHECK-DAG: %[[NULL:.+]] = llvm.mlir.null : !llvm.ptr +; CHECK-DAG: %[[NULL:.+]] = llvm.mlir.zero : !llvm.ptr ; CHECK-DAG: %[[ROOT:.+]] = llvm.mlir.undef : !llvm.struct<"nested_agg_type", (struct<"simple_agg_type", (i32, i8, i16, i32)>, ptr)> ; CHECK-DAG: %[[CHAIN4:.+]] = llvm.insertvalue %[[CHAIN3]], %[[ROOT]][0] ; CHECK-DAG: %[[CHAIN5:.+]] = llvm.insertvalue %[[NULL]], %[[CHAIN4]][1] @@ -188,7 +188,7 @@ define i32 @function_address_after_def() { %nested_agg_type = type {%simple_agg_type, ptr} @nested_agg = global %nested_agg_type { %simple_agg_type{i32 1, i8 2, i16 3, i32 4}, ptr null } -; CHECK-DAG: %[[NULL:.+]] = llvm.mlir.null : !llvm.ptr +; CHECK-DAG: %[[NULL:.+]] = llvm.mlir.zero : !llvm.ptr ; CHECK-DAG: %[[ROOT:.+]] = llvm.mlir.undef : !llvm.vec<2 x ptr> ; CHECK-DAG: %[[P0:.+]] = llvm.mlir.constant(0 : i32) : i32 ; CHECK-DAG: %[[CHAIN0:.+]] = llvm.insertelement %[[NULL]], %[[ROOT]][%[[P0]] : i32] : !llvm.vec<2 x ptr> diff --git a/mlir/test/Target/LLVMIR/Import/exception.ll b/mlir/test/Target/LLVMIR/Import/exception.ll index 944e5de6badd9..1bdfa6f3646e9 100644 --- a/mlir/test/Target/LLVMIR/Import/exception.ll +++ b/mlir/test/Target/LLVMIR/Import/exception.ll @@ -72,7 +72,7 @@ entry: ; CHECK: %[[c0:.*]] = llvm.mlir.constant(0 : i32) : i32 ; CHECK: %[[c1:.*]] = llvm.mlir.constant(1 : i32) : i32 ; CHECK: %[[c2:.*]] = llvm.mlir.constant(2 : i32) : i32 - ; CHECK: %[[c20:.*]] = llvm.mlir.constant(20 : i32) : i32 + ; CHECK: %[[c20:.*]] = llvm.mlir.constant(20 : i32) : i32 ; CHECK: llvm.cond_br %[[cond]], ^[[bb1:.*]], ^[[bb2:.*]] br i1 %cond, label %call, label %nocall ; CHECK: ^[[bb1]]: @@ -111,7 +111,7 @@ declare void @f2({ptr, i32}) ; CHECK-LABEL: @landingpad_dominance define void @landingpad_dominance() personality ptr @__gxx_personality_v0 { entry: - ; CHECK: %[[null:.*]] = llvm.mlir.null : !llvm.ptr + ; CHECK: %[[null:.*]] = llvm.mlir.zero : !llvm.ptr ; CHECK: %[[c1:.*]] = llvm.mlir.constant(0 : i32) : i32 ; CHECK: %[[undef:.*]] = llvm.mlir.undef : !llvm.struct<(ptr, i32)> ; CHECK: %[[tmpstruct:.*]] = llvm.insertvalue %[[null]], %[[undef]][0] : !llvm.struct<(ptr, i32)> diff --git a/mlir/test/Target/LLVMIR/Import/instructions.ll b/mlir/test/Target/LLVMIR/Import/instructions.ll index 3f5ade4f15735..b72e72c8392d9 100644 --- a/mlir/test/Target/LLVMIR/Import/instructions.ll +++ b/mlir/test/Target/LLVMIR/Import/instructions.ll @@ -182,7 +182,7 @@ define void @integer_extension_and_truncation(i32 %arg1) { ; CHECK-SAME: %[[ARG1:[a-zA-Z0-9]+]] ; CHECK-SAME: %[[ARG2:[a-zA-Z0-9]+]] define ptr @pointer_casts(ptr %arg1, i64 %arg2) { - ; CHECK: %[[NULL:[0-9]+]] = llvm.mlir.null : !llvm.ptr + ; CHECK: %[[NULL:[0-9]+]] = llvm.mlir.zero : !llvm.ptr ; CHECK: llvm.ptrtoint %[[ARG1]] : !llvm.ptr to i64 ; CHECK: llvm.inttoptr %[[ARG2]] : i64 to !llvm.ptr ; CHECK: llvm.bitcast %[[ARG1]] : !llvm.ptr to !llvm.ptr diff --git a/mlir/test/Target/LLVMIR/Import/zeroinitializer.ll b/mlir/test/Target/LLVMIR/Import/zeroinitializer.ll index 41933dcbe7cb1..da3eae3e9e337 100644 --- a/mlir/test/Target/LLVMIR/Import/zeroinitializer.ll +++ b/mlir/test/Target/LLVMIR/Import/zeroinitializer.ll @@ -4,7 +4,7 @@ ; CHECK: llvm.mlir.global external @D() ; CHECK-SAME: !llvm.struct<"Domain", (ptr, ptr)> -; CHECK: %[[E0:.+]] = llvm.mlir.null : !llvm.ptr +; CHECK: %[[E0:.+]] = llvm.mlir.zero : !llvm.ptr ; CHECK: %[[ROOT:.+]] = llvm.mlir.undef : !llvm.struct<"Domain", (ptr, ptr)> ; CHECK: %[[CHAIN:.+]] = llvm.insertvalue %[[E0]], %[[ROOT]][0] ; CHECK: %[[RES:.+]] = llvm.insertvalue %[[E0]], %[[CHAIN]][1] diff --git a/mlir/test/Target/LLVMIR/llvmir-intrinsics.mlir b/mlir/test/Target/LLVMIR/llvmir-intrinsics.mlir index 6bbd761b6e613..d23991b65523f 100644 --- a/mlir/test/Target/LLVMIR/llvmir-intrinsics.mlir +++ b/mlir/test/Target/LLVMIR/llvmir-intrinsics.mlir @@ -640,14 +640,14 @@ llvm.func @ushl_sat_test(%arg0: i32, %arg1: i32, %arg2: vector<8xi32>, %arg3: ve // CHECK-LABEL: @coro_id llvm.func @coro_id(%arg0: i32, %arg1: !llvm.ptr) { // CHECK: call token @llvm.coro.id - %null = llvm.mlir.null : !llvm.ptr + %null = llvm.mlir.zero : !llvm.ptr llvm.intr.coro.id %arg0, %arg1, %arg1, %null : (i32, !llvm.ptr, !llvm.ptr, !llvm.ptr) -> !llvm.token llvm.return } // CHECK-LABEL: @coro_begin llvm.func @coro_begin(%arg0: i32, %arg1: !llvm.ptr) { - %null = llvm.mlir.null : !llvm.ptr + %null = llvm.mlir.zero : !llvm.ptr %token = llvm.intr.coro.id %arg0, %arg1, %arg1, %null : (i32, !llvm.ptr, !llvm.ptr, !llvm.ptr) -> !llvm.token // CHECK: call ptr @llvm.coro.begin llvm.intr.coro.begin %token, %arg1 : (!llvm.token, !llvm.ptr) -> !llvm.ptr @@ -681,7 +681,7 @@ llvm.func @coro_save(%arg0: !llvm.ptr) { // CHECK-LABEL: @coro_suspend llvm.func @coro_suspend(%arg0: i32, %arg1 : i1, %arg2 : !llvm.ptr) { - %null = llvm.mlir.null : !llvm.ptr + %null = llvm.mlir.zero : !llvm.ptr %token = llvm.intr.coro.id %arg0, %arg2, %arg2, %null : (i32, !llvm.ptr, !llvm.ptr, !llvm.ptr) -> !llvm.token // CHECK: call i8 @llvm.coro.suspend %0 = llvm.intr.coro.suspend %token, %arg1 : i8 @@ -698,7 +698,7 @@ llvm.func @coro_end(%arg0: !llvm.ptr, %arg1 : i1) { // CHECK-LABEL: @coro_free llvm.func @coro_free(%arg0: i32, %arg1 : !llvm.ptr) { - %null = llvm.mlir.null : !llvm.ptr + %null = llvm.mlir.zero : !llvm.ptr %token = llvm.intr.coro.id %arg0, %arg1, %arg1, %null : (i32, !llvm.ptr, !llvm.ptr, !llvm.ptr) -> !llvm.token // CHECK: call ptr @llvm.coro.free %0 = llvm.intr.coro.free %token, %arg1 : (!llvm.token, !llvm.ptr) -> !llvm.ptr diff --git a/mlir/test/Target/LLVMIR/llvmir.mlir b/mlir/test/Target/LLVMIR/llvmir.mlir index d08f5b2776ee1..8317fae7dd710 100644 --- a/mlir/test/Target/LLVMIR/llvmir.mlir +++ b/mlir/test/Target/LLVMIR/llvmir.mlir @@ -1438,7 +1438,7 @@ llvm.func @integer_extension_and_truncation(%a : i32) { // Check that the auxiliary `null` operation is converted into a `null` value. // CHECK-LABEL: @null llvm.func @null() -> !llvm.ptr { - %0 = llvm.mlir.null : !llvm.ptr + %0 = llvm.mlir.zero : !llvm.ptr // CHECK: ret ptr null llvm.return %0 : !llvm.ptr } @@ -1536,7 +1536,7 @@ llvm.func @invokeLandingpad() -> i32 attributes { personality = @__gxx_personali %1 = llvm.mlir.constant(dense<0> : vector<1xi8>) : !llvm.array<1 x i8> %2 = llvm.mlir.addressof @_ZTIi : !llvm.ptr> %3 = llvm.bitcast %2 : !llvm.ptr> to !llvm.ptr - %4 = llvm.mlir.null : !llvm.ptr> + %4 = llvm.mlir.zero : !llvm.ptr> %5 = llvm.mlir.constant(1 : i32) : i32 %6 = llvm.alloca %5 x i8 : (i32) -> !llvm.ptr // CHECK: invoke void @foo(ptr %[[a1]]) diff --git a/mlir/test/Target/LLVMIR/omptarget-declare-target-llvm-host.mlir b/mlir/test/Target/LLVMIR/omptarget-declare-target-llvm-host.mlir index 709af26f1d3df..82aff6f131ef1 100644 --- a/mlir/test/Target/LLVMIR/omptarget-declare-target-llvm-host.mlir +++ b/mlir/test/Target/LLVMIR/omptarget-declare-target-llvm-host.mlir @@ -1,31 +1,31 @@ // RUN: mlir-translate -mlir-to-llvmir %s | FileCheck %s -// CHECK-DAG: %struct.__tgt_offload_entry = type { ptr, ptr, i64, i32, i32 } +// CHECK-DAG: %struct.__tgt_offload_entry = type { ptr, ptr, i64, i32, i32 } // CHECK-DAG: !omp_offload.info = !{!{{.*}}} module attributes {llvm.target_triple = "x86_64-unknown-linux-gnu", omp.is_target_device = false} { // CHECK-DAG: @_QMtest_0Earray_1d = global [3 x i32] [i32 1, i32 2, i32 3] - // CHECK-DAG: @_QMtest_0Earray_1d_decl_tgt_ref_ptr = weak global ptr @_QMtest_0Earray_1d + // CHECK-DAG: @_QMtest_0Earray_1d_decl_tgt_ref_ptr = weak global ptr @_QMtest_0Earray_1d // CHECK-DAG: @.omp_offloading.entry_name{{.*}} = internal unnamed_addr constant [36 x i8] c"_QMtest_0Earray_1d_decl_tgt_ref_ptr\00" // CHECK-DAG: @.omp_offloading.entry._QMtest_0Earray_1d_decl_tgt_ref_ptr = weak constant %struct.__tgt_offload_entry { ptr @_QMtest_0Earray_1d_decl_tgt_ref_ptr, ptr @.omp_offloading.entry_name{{.*}}, i64 8, i32 1, i32 0 }, section "omp_offloading_entries", align 1 // CHECK-DAG: !{{.*}} = !{i32 {{.*}}, !"_QMtest_0Earray_1d_decl_tgt_ref_ptr", i32 {{.*}}, i32 {{.*}}} llvm.mlir.global external @_QMtest_0Earray_1d(dense<[1, 2, 3]> : tensor<3xi32>) {addr_space = 0 : i32, omp.declare_target = #omp.declaretarget} : !llvm.array<3 x i32> // CHECK-DAG: @_QMtest_0Earray_2d = global [2 x [2 x i32]] {{.*}} - // CHECK-DAG: @_QMtest_0Earray_2d_decl_tgt_ref_ptr = weak global ptr @_QMtest_0Earray_2d + // CHECK-DAG: @_QMtest_0Earray_2d_decl_tgt_ref_ptr = weak global ptr @_QMtest_0Earray_2d // CHECK-DAG: @.omp_offloading.entry_name{{.*}} = internal unnamed_addr constant [36 x i8] c"_QMtest_0Earray_2d_decl_tgt_ref_ptr\00" // CHECK-DAG: @.omp_offloading.entry._QMtest_0Earray_2d_decl_tgt_ref_ptr = weak constant %struct.__tgt_offload_entry { ptr @_QMtest_0Earray_2d_decl_tgt_ref_ptr, ptr @.omp_offloading.entry_name{{.*}}, i64 8, i32 1, i32 0 }, section "omp_offloading_entries", align 1 // CHECK-DAG: !{{.*}} = !{i32 {{.*}}, !"_QMtest_0Earray_2d_decl_tgt_ref_ptr", i32 {{.*}}, i32 {{.*}}} llvm.mlir.global external @_QMtest_0Earray_2d() {addr_space = 0 : i32, omp.declare_target = #omp.declaretarget} : !llvm.array<2 x array<2 x i32>> { %0 = llvm.mlir.undef : !llvm.array<2 x array<2 x i32>> %1 = llvm.mlir.constant(1 : i32) : i32 - %2 = llvm.insertvalue %1, %0[0, 0] : !llvm.array<2 x array<2 x i32>> + %2 = llvm.insertvalue %1, %0[0, 0] : !llvm.array<2 x array<2 x i32>> %3 = llvm.mlir.constant(2 : i32) : i32 - %4 = llvm.insertvalue %3, %2[0, 1] : !llvm.array<2 x array<2 x i32>> + %4 = llvm.insertvalue %3, %2[0, 1] : !llvm.array<2 x array<2 x i32>> %5 = llvm.mlir.constant(3 : i32) : i32 - %6 = llvm.insertvalue %5, %4[1, 0] : !llvm.array<2 x array<2 x i32>> + %6 = llvm.insertvalue %5, %4[1, 0] : !llvm.array<2 x array<2 x i32>> %7 = llvm.mlir.constant(4 : i32) : i32 - %8 = llvm.insertvalue %7, %6[1, 1] : !llvm.array<2 x array<2 x i32>> + %8 = llvm.insertvalue %7, %6[1, 1] : !llvm.array<2 x array<2 x i32>> %9 = llvm.mlir.constant(2 : index) : i64 %10 = llvm.mlir.constant(2 : index) : i64 llvm.return %8 : !llvm.array<2 x array<2 x i32>> @@ -70,7 +70,7 @@ module attributes {llvm.target_triple = "x86_64-unknown-linux-gnu", omp.is_targe } // CHECK-DAG: @_QMtest_0Edata_int = global i32 1 - // CHECK-DAG: @_QMtest_0Edata_int_decl_tgt_ref_ptr = weak global ptr @_QMtest_0Edata_int + // CHECK-DAG: @_QMtest_0Edata_int_decl_tgt_ref_ptr = weak global ptr @_QMtest_0Edata_int // CHECK-DAG: @.omp_offloading.entry_name{{.*}} = internal unnamed_addr constant [36 x i8] c"_QMtest_0Edata_int_decl_tgt_ref_ptr\00" // CHECK-DAG: @.omp_offloading.entry._QMtest_0Edata_int_decl_tgt_ref_ptr = weak constant %struct.__tgt_offload_entry { ptr @_QMtest_0Edata_int_decl_tgt_ref_ptr, ptr @.omp_offloading.entry_name{{.*}}, i64 8, i32 1, i32 0 }, section "omp_offloading_entries", align 1 // CHECK-DAG: !{{.*}} = !{i32 {{.*}}, !"_QMtest_0Edata_int_decl_tgt_ref_ptr", i32 {{.*}}, i32 {{.*}}} @@ -103,28 +103,28 @@ module attributes {llvm.target_triple = "x86_64-unknown-linux-gnu", omp.is_targe // CHECK-DAG: @.omp_offloading.entry._QMtest_0Ept1_decl_tgt_ref_ptr = weak constant %struct.__tgt_offload_entry { ptr @_QMtest_0Ept1_decl_tgt_ref_ptr, ptr @.omp_offloading.entry_name{{.*}}, i64 8, i32 1, i32 0 }, section "omp_offloading_entries", align 1 // CHECK-DAG: !{{.*}} = !{i32 {{.*}}, !"_QMtest_0Ept1_decl_tgt_ref_ptr", i32 {{.*}}, i32 {{.*}}} llvm.mlir.global external @_QMtest_0Ept1() {addr_space = 0 : i32, omp.declare_target = #omp.declaretarget} : !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8)> { - %0 = llvm.mlir.null : !llvm.ptr + %0 = llvm.mlir.zero : !llvm.ptr %1 = llvm.mlir.constant(9 : i32) : i32 - %2 = llvm.mlir.null : !llvm.ptr + %2 = llvm.mlir.zero : !llvm.ptr %3 = llvm.getelementptr %2[1] : (!llvm.ptr) -> !llvm.ptr %4 = llvm.ptrtoint %3 : !llvm.ptr to i64 %5 = llvm.mlir.undef : !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8)> - %6 = llvm.insertvalue %4, %5[1] : !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8)> + %6 = llvm.insertvalue %4, %5[1] : !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8)> %7 = llvm.mlir.constant(20180515 : i32) : i32 - %8 = llvm.insertvalue %7, %6[2] : !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8)> + %8 = llvm.insertvalue %7, %6[2] : !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8)> %9 = llvm.mlir.constant(0 : i32) : i32 %10 = llvm.trunc %9 : i32 to i8 - %11 = llvm.insertvalue %10, %8[3] : !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8)> + %11 = llvm.insertvalue %10, %8[3] : !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8)> %12 = llvm.trunc %1 : i32 to i8 - %13 = llvm.insertvalue %12, %11[4] : !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8)> + %13 = llvm.insertvalue %12, %11[4] : !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8)> %14 = llvm.mlir.constant(1 : i32) : i32 %15 = llvm.trunc %14 : i32 to i8 - %16 = llvm.insertvalue %15, %13[5] : !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8)> + %16 = llvm.insertvalue %15, %13[5] : !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8)> %17 = llvm.mlir.constant(0 : i32) : i32 %18 = llvm.trunc %17 : i32 to i8 - %19 = llvm.insertvalue %18, %16[6] : !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8)> + %19 = llvm.insertvalue %18, %16[6] : !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8)> %20 = llvm.bitcast %0 : !llvm.ptr to !llvm.ptr - %21 = llvm.insertvalue %20, %19[0] : !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8)> + %21 = llvm.insertvalue %20, %19[0] : !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8)> llvm.return %21 : !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8)> } diff --git a/mlir/test/Transforms/test-convert-call-op.mlir b/mlir/test/Transforms/test-convert-call-op.mlir index f8da5f91d3c8f..dd7924edc88b8 100644 --- a/mlir/test/Transforms/test-convert-call-op.mlir +++ b/mlir/test/Transforms/test-convert-call-op.mlir @@ -9,6 +9,6 @@ func.func @caller() -> i32 { %out = call @callee(%arg) : (!test.test_type) -> i32 return %out : i32 } -// CHECK-NEXT: [[ARG:%.*]] = llvm.mlir.null : !llvm.ptr +// CHECK-NEXT: [[ARG:%.*]] = llvm.mlir.zero : !llvm.ptr // CHECK-NEXT: [[OUT:%.*]] = llvm.call @callee([[ARG]]) // CHECK-SAME: : (!llvm.ptr) -> i32 diff --git a/mlir/test/lib/Conversion/FuncToLLVM/TestConvertCallOp.cpp b/mlir/test/lib/Conversion/FuncToLLVM/TestConvertCallOp.cpp index c741a656d13f7..9b3a1534a598e 100644 --- a/mlir/test/lib/Conversion/FuncToLLVM/TestConvertCallOp.cpp +++ b/mlir/test/lib/Conversion/FuncToLLVM/TestConvertCallOp.cpp @@ -27,7 +27,7 @@ class TestTypeProducerOpConverter LogicalResult matchAndRewrite(test::TestTypeProducerOp op, OpAdaptor adaptor, ConversionPatternRewriter &rewriter) const override { - rewriter.replaceOpWithNewOp(op, getVoidPtrType()); + rewriter.replaceOpWithNewOp(op, getVoidPtrType()); return success(); } };