Skip to content

[mlir][sparse] More allocate -> empty tensor migration #66720

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
Sep 19, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -830,6 +830,7 @@ class SparseTensorNewConverter : public OpConversionPattern<NewOp> {
};

/// Sparse conversion rule for the alloc operator.
/// TODO(springerm): remove when bufferization.alloc_tensor is gone
class SparseTensorAllocConverter
: public OpConversionPattern<bufferization::AllocTensorOp> {
public:
Expand Down Expand Up @@ -864,6 +865,37 @@ class SparseTensorAllocConverter
}
};

/// Sparse conversion rule for the empty tensor.
class SparseTensorEmptyConverter : public OpConversionPattern<tensor::EmptyOp> {
public:
using OpConversionPattern::OpConversionPattern;
LogicalResult
matchAndRewrite(tensor::EmptyOp op, OpAdaptor adaptor,
ConversionPatternRewriter &rewriter) const override {
Location loc = op.getLoc();
const auto stt = getSparseTensorType(op);
if (!stt.hasEncoding())
return failure();
// Gather all dimension sizes as SSA values.
const Dimension dimRank = stt.getDimRank();
SmallVector<Value> dimSizes;
dimSizes.reserve(dimRank);
auto shape = op.getType().getShape();
unsigned operandCtr = 0;
for (Dimension d = 0; d < dimRank; ++d) {
dimSizes.push_back(stt.isDynamicDim(d)
? adaptor.getOperands()[operandCtr++]
: constantIndex(rewriter, loc, shape[d]));
}
// Generate the call to construct empty tensor. The sizes are
// explicitly defined by the arguments to the alloc operator.
rewriter.replaceOp(op, NewCallParams(rewriter, loc)
.genBuffers(stt, dimSizes)
.genNewCall(Action::kEmpty));
return success();
}
};

/// Sparse conversion rule for the convert operator.
class SparseTensorConvertConverter : public OpConversionPattern<ConvertOp> {
public:
Expand Down Expand Up @@ -1503,19 +1535,19 @@ mlir::SparseTensorTypeToPtrConverter::SparseTensorTypeToPtrConverter() {
void mlir::populateSparseTensorConversionPatterns(
TypeConverter &typeConverter, RewritePatternSet &patterns,
const SparseTensorConversionOptions &options) {
patterns.add<SparseReturnConverter, SparseTensorToDimSizeConverter,
SparseCastConverter, SparseTensorNewConverter,
SparseReshapeConverter<tensor::ExpandShapeOp>,
SparseReshapeConverter<tensor::CollapseShapeOp>,
SparseTensorConcatConverter, SparseTensorAllocConverter,
SparseTensorDeallocConverter, SparseTensorToPositionsConverter,
SparseTensorToCoordinatesConverter,
SparseTensorToValuesConverter, SparseNumberOfEntriesConverter,
SparseTensorLoadConverter, SparseTensorInsertConverter,
SparseTensorExpandConverter, SparseTensorCompressConverter,
SparseTensorOutConverter, SparseTensorPackConverter>(
typeConverter, patterns.getContext());

patterns
.add<SparseReturnConverter, SparseTensorToDimSizeConverter,
SparseCastConverter, SparseTensorNewConverter,
SparseReshapeConverter<tensor::ExpandShapeOp>,
SparseReshapeConverter<tensor::CollapseShapeOp>,
SparseTensorConcatConverter, SparseTensorAllocConverter,
SparseTensorEmptyConverter, SparseTensorDeallocConverter,
SparseTensorToPositionsConverter, SparseTensorToCoordinatesConverter,
SparseTensorToValuesConverter, SparseNumberOfEntriesConverter,
SparseTensorLoadConverter, SparseTensorInsertConverter,
SparseTensorExpandConverter, SparseTensorCompressConverter,
SparseTensorOutConverter, SparseTensorPackConverter>(
typeConverter, patterns.getContext());
patterns.add<SparseTensorConvertConverter>(typeConverter,
patterns.getContext(), options);
}
4 changes: 2 additions & 2 deletions mlir/test/Dialect/SparseTensor/constant_index_map.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@
// CHECK-DAG: %[[VAL_2:.*]] = arith.constant 77 : index
// CHECK-DAG: %[[VAL_3:.*]] = arith.constant 0 : index
// CHECK-DAG: %[[VAL_4:.*]] = arith.constant 1 : index
// CHECK-DAG: %[[VAL_5:.*]] = bufferization.alloc_tensor() : tensor<77xi1, #{{.*}}>
// CHECK-DAG: %[[VAL_5:.*]] = tensor.empty() : tensor<77xi1, #{{.*}}>
// CHECK-DAG: %[[VAL_6:.*]] = bufferization.to_memref %[[VAL_0]] : memref<1x77xi1>
// CHECK-DAG: %[[VAL_7:.*]] = bufferization.to_memref %[[VAL_1]] : memref<1x77xi1>
// CHECK: %[[VAL_8:.*]] = scf.for %[[VAL_9:.*]] = %[[VAL_3]] to %[[VAL_2]] step %[[VAL_4]] iter_args(%[[VAL_10:.*]] = %[[VAL_5]]) -> (tensor<77xi1, #{{.*}}>) {
Expand All @@ -27,7 +27,7 @@
// CHECK: return %[[VAL_15]] : tensor<77xi1, #{{.*}}>
// CHECK: }
func.func @main(%arg0: tensor<1x77xi1>, %arg1: tensor<1x77xi1>) -> tensor<77xi1, #SpVec> {
%0 = bufferization.alloc_tensor() : tensor<77xi1, #SpVec>
%0 = tensor.empty() : tensor<77xi1, #SpVec>
%1 = linalg.generic {
indexing_maps = [#map1, #map1, #map2],
iterator_types = ["parallel"]}
Expand Down
63 changes: 31 additions & 32 deletions mlir/test/Dialect/SparseTensor/sparse_affine.mlir
Original file line number Diff line number Diff line change
@@ -1,4 +1,3 @@
// NOTE: Assertions have been autogenerated by utils/generate-test-checks.py
// RUN: mlir-opt %s -sparsification | FileCheck %s

#SpVec = #sparse_tensor.encoding<{ map = (d0) -> (d0 : compressed) }>
Expand All @@ -17,9 +16,9 @@
}

// CHECK-LABEL: func @mul_inv_dense1d(
// CHECK-SAME: %[[VAL_0:.*]]: tensor<32xf32, #sparse_tensor.encoding<{{{.*}}}>>,
// CHECK-SAME: %[[VAL_1:.*]]: tensor<4xf32>,
// CHECK-SAME: %[[VAL_2:.*]]: tensor<32xf32>) -> tensor<32xf32> {
// CHECK-SAME: %[[VAL_0:.*]]: tensor<32xf32, #sparse_tensor.encoding<{{{.*}}}>>,
// CHECK-SAME: %[[VAL_1:.*]]: tensor<4xf32>,
// CHECK-SAME: %[[VAL_2:.*]]: tensor<32xf32>) -> tensor<32xf32> {
// CHECK-DAG: %[[VAL_3:.*]] = arith.constant 0 : index
// CHECK-DAG: %[[VAL_4:.*]] = arith.constant 3 : index
// CHECK-DAG: %[[VAL_5:.*]] = arith.constant 1 : index
Expand Down Expand Up @@ -57,13 +56,13 @@ func.func @mul_inv_dense1d(%arga: tensor<32xf32, #SpVec>,
}

// CHECK-LABEL: func.func @mul_inv_sparse1d(
// CHECK-SAME: %[[VAL_0:.*]]: tensor<32xf32, #sparse_tensor.encoding<{{{.*}}}>>,
// CHECK-SAME: %[[VAL_1:.*]]: tensor<4xf32, #sparse_tensor.encoding<{{{.*}}}>>)
// CHECK-SAME: %[[VAL_0:.*]]: tensor<32xf32, #sparse_tensor.encoding<{{{.*}}}>>,
// CHECK-SAME: %[[VAL_1:.*]]: tensor<4xf32, #sparse_tensor.encoding<{{{.*}}}>>)
// CHECK: %[[VAL_2:.*]] = arith.constant 0 : index
// CHECK: %[[VAL_3:.*]] = arith.constant 1 : index
// CHECK: %[[VAL_4:.*]] = arith.constant 3 : index
// CHECK: %[[VAL_5:.*]] = arith.constant 0.000000e+00 : f32
// CHECK: %[[VAL_6:.*]] = bufferization.alloc_tensor() : tensor<32xf32, #sparse_tensor.encoding<{{{.*}}}>>
// CHECK: %[[VAL_6:.*]] = tensor.empty() : tensor<32xf32, #sparse_tensor.encoding<{{{.*}}}>>
// CHECK: %[[VAL_7:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32xf32, #sparse_tensor.encoding<{{{.*}}}>> to memref<?xindex>
// CHECK: %[[VAL_8:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xf32, #sparse_tensor.encoding<{{{.*}}}>> to memref<?xf32>
// CHECK: %[[VAL_9:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor<4xf32, #sparse_tensor.encoding<{{{.*}}}>> to memref<?xindex>
Expand Down Expand Up @@ -95,7 +94,7 @@ func.func @mul_inv_dense1d(%arga: tensor<32xf32, #SpVec>,
// CHECK: return %[[VAL_32]] : tensor<32xf32, #sparse_tensor.encoding<{{{.*}}}>>
func.func @mul_inv_sparse1d(%arga: tensor<32xf32, #SpVec>,
%argb: tensor<4xf32, #SpVec>) -> tensor<32xf32, #SpVec> {
%argx = bufferization.alloc_tensor() : tensor<32xf32, #SpVec>
%argx = tensor.empty() : tensor<32xf32, #SpVec>
%0 = linalg.generic #trait1
ins(%arga, %argb: tensor<32xf32, #SpVec>, tensor<4xf32, #SpVec>)
outs(%argx: tensor<32xf32, #SpVec>) {
Expand All @@ -109,13 +108,13 @@ func.func @mul_inv_sparse1d(%arga: tensor<32xf32, #SpVec>,


// CHECK-LABEL: func.func @mul_inv_enc_dense1d(
// CHECK-SAME: %[[VAL_0:.*]]: tensor<32xf32, #sparse_tensor.encoding<{{{.*}}}>>,
// CHECK-SAME: %[[VAL_1:.*]]: tensor<4xf32, #sparse_tensor.encoding<{{{.*}}}>>) -> tensor<32xf32, #sparse_tensor.encoding<{{{.*}}}>> {
// CHECK-SAME: %[[VAL_0:.*]]: tensor<32xf32, #sparse_tensor.encoding<{{{.*}}}>>,
// CHECK-SAME: %[[VAL_1:.*]]: tensor<4xf32, #sparse_tensor.encoding<{{{.*}}}>>) -> tensor<32xf32, #sparse_tensor.encoding<{{{.*}}}>> {
// CHECK: %[[VAL_2:.*]] = arith.constant 32 : index
// CHECK: %[[VAL_3:.*]] = arith.constant 3 : index
// CHECK: %[[VAL_4:.*]] = arith.constant 0 : index
// CHECK: %[[VAL_5:.*]] = arith.constant 1 : index
// CHECK: %[[VAL_6:.*]] = bufferization.alloc_tensor() : tensor<32xf32, #sparse_tensor.encoding<{{{.*}}}>>
// CHECK: %[[VAL_6:.*]] = tensor.empty() : tensor<32xf32, #sparse_tensor.encoding<{{{.*}}}>>
// CHECK: %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xf32, #sparse_tensor.encoding<{{{.*}}}>> to memref<?xf32>
// CHECK: %[[VAL_8:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<4xf32, #sparse_tensor.encoding<{{{.*}}}>> to memref<?xf32>
// CHECK: %[[VAL_9:.*]] = sparse_tensor.values %[[VAL_6]] : tensor<32xf32, #sparse_tensor.encoding<{{{.*}}}>> to memref<?xf32>
Expand All @@ -132,7 +131,7 @@ func.func @mul_inv_sparse1d(%arga: tensor<32xf32, #SpVec>,
// CHECK: }
func.func @mul_inv_enc_dense1d(%arga: tensor<32xf32, #EncDenseVec>,
%argb: tensor<4xf32, #EncDenseVec>) -> tensor<32xf32, #EncDenseVec> {
%argx = bufferization.alloc_tensor() : tensor<32xf32, #EncDenseVec>
%argx = tensor.empty() : tensor<32xf32, #EncDenseVec>
%0 = linalg.generic #trait1
ins(%arga, %argb: tensor<32xf32, #EncDenseVec>, tensor<4xf32, #EncDenseVec>)
outs(%argx: tensor<32xf32, #EncDenseVec>) {
Expand All @@ -155,9 +154,9 @@ func.func @mul_inv_enc_dense1d(%arga: tensor<32xf32, #EncDenseVec>,
}

// CHECK-LABEL: func @and_affine_dense1d(
// CHECK-SAME: %[[VAL_0:.*]]: tensor<32xi32, #sparse_tensor.encoding<{{{.*}}}>>,
// CHECK-SAME: %[[VAL_1:.*]]: tensor<34xi32>,
// CHECK-SAME: %[[VAL_2:.*]]: tensor<32xi32>) -> tensor<32xi32> {
// CHECK-SAME: %[[VAL_0:.*]]: tensor<32xi32, #sparse_tensor.encoding<{{{.*}}}>>,
// CHECK-SAME: %[[VAL_1:.*]]: tensor<34xi32>,
// CHECK-SAME: %[[VAL_2:.*]]: tensor<32xi32>) -> tensor<32xi32> {
// CHECK-DAG: %[[ZERO:.*]] = arith.constant 0 : i32
// CHECK-DAG: %[[VAL_3:.*]] = arith.constant 0 : index
// CHECK-DAG: %[[VAL_4:.*]] = arith.constant 1 : index
Expand Down Expand Up @@ -195,12 +194,12 @@ func.func @and_affine_dense1d(%arga: tensor<32xi32, #SpVec>,
}

// CHECK-LABEL: func.func @and_affine_sparse1d(
// CHECK-SAME: %[[VAL_0:.*]]: tensor<32xi32, #sparse_tensor.encoding<{{{.*}}}>>,
// CHECK-SAME: %[[VAL_1:.*]]: tensor<34xi32, #sparse_tensor.encoding<{{{.*}}}>>)
// CHECK-SAME: %[[VAL_0:.*]]: tensor<32xi32, #sparse_tensor.encoding<{{{.*}}}>>,
// CHECK-SAME: %[[VAL_1:.*]]: tensor<34xi32, #sparse_tensor.encoding<{{{.*}}}>>)
// CHECK: %[[VAL_2:.*]] = arith.constant 0 : index
// CHECK: %[[VAL_3:.*]] = arith.constant 1 : index
// CHECK: %[[VAL_4:.*]] = arith.constant 2 : index
// CHECK: %[[VAL_5:.*]] = bufferization.alloc_tensor() : tensor<32xi32, #sparse_tensor.encoding<{{{.*}}}>>
// CHECK: %[[VAL_5:.*]] = tensor.empty() : tensor<32xi32, #sparse_tensor.encoding<{{{.*}}}>>
// CHECK: %[[VAL_6:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32xi32, #sparse_tensor.encoding<{{{.*}}}>> to memref<?xindex>
// CHECK: %[[VAL_7:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32xi32, #sparse_tensor.encoding<{{{.*}}}>> to memref<?xindex>
// CHECK: %[[VAL_8:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xi32, #sparse_tensor.encoding<{{{.*}}}>> to memref<?xi32>
Expand Down Expand Up @@ -234,7 +233,7 @@ func.func @and_affine_dense1d(%arga: tensor<32xi32, #SpVec>,
// CHECK: return %[[VAL_33]] : tensor<32xi32, #sparse_tensor.encoding<{{{.*}}}>>
func.func @and_affine_sparse1d(%arga: tensor<32xi32, #SpVec>,
%argb: tensor<34xi32, #SpVec>) -> tensor<32xi32, #SpVec> {
%argx = bufferization.alloc_tensor() : tensor<32xi32, #SpVec>
%argx = tensor.empty() : tensor<32xi32, #SpVec>
%0 = linalg.generic #trait2
ins(%arga, %argb: tensor<32xi32, #SpVec>, tensor<34xi32, #SpVec>)
outs(%argx: tensor<32xi32, #SpVec>) {
Expand All @@ -256,9 +255,9 @@ func.func @and_affine_sparse1d(%arga: tensor<32xi32, #SpVec>,
}

// CHECK-LABEL: func @mul_affine_dense2d(
// CHECK-SAME: %[[VAL_0:.*]]: tensor<32x16xf64, #sparse_tensor.encoding<{{{.*}}}>>,
// CHECK-SAME: %[[VAL_1:.*]]: tensor<34x19xf64>,
// CHECK-SAME: %[[VAL_2:.*]]: tensor<32x16xf64>) -> tensor<32x16xf64> {
// CHECK-SAME: %[[VAL_0:.*]]: tensor<32x16xf64, #sparse_tensor.encoding<{{{.*}}}>>,
// CHECK-SAME: %[[VAL_1:.*]]: tensor<34x19xf64>,
// CHECK-SAME: %[[VAL_2:.*]]: tensor<32x16xf64>) -> tensor<32x16xf64> {
// CHECK-DAG: %[[VAL_3:.*]] = arith.constant 1 : index
// CHECK-DAG: %[[VAL_4:.*]] = arith.constant 32 : index
// CHECK-DAG: %[[VAL_5:.*]] = arith.constant 0 : index
Expand Down Expand Up @@ -304,8 +303,8 @@ func.func @mul_affine_dense2d(%arga: tensor<32x16xf64, #CSR>,


// CHECK-LABEL: func.func @mul_affine_sparse2d(
// CHECK-SAME: %[[VAL_0:.*]]: tensor<32x16xf64, #sparse_tensor.encoding<{{{.*}}}>>,
// CHECK-SAME: %[[VAL_1:.*]]: tensor<34x19xf64, #sparse_tensor.encoding<{{{.*}}}>>) -> tensor<32x16xf64, #sparse_tensor.encoding<{{{.*}}}>> {
// CHECK-SAME: %[[VAL_0:.*]]: tensor<32x16xf64, #sparse_tensor.encoding<{{{.*}}}>>,
// CHECK-SAME: %[[VAL_1:.*]]: tensor<34x19xf64, #sparse_tensor.encoding<{{{.*}}}>>) -> tensor<32x16xf64, #sparse_tensor.encoding<{{{.*}}}>> {
// CHECK-DAG: %[[VAL_2:.*]] = arith.constant 32 : index
// CHECK-DAG: %[[VAL_3:.*]] = arith.constant 0 : index
// CHECK-DAG: %[[VAL_4:.*]] = arith.constant 1 : index
Expand All @@ -314,7 +313,7 @@ func.func @mul_affine_dense2d(%arga: tensor<32x16xf64, #CSR>,
// CHECK-DAG: %[[VAL_7:.*]] = arith.constant 3 : index
// CHECK-DAG: %[[VAL_TRUE:.*]] = arith.constant true
// CHECK-DAG: %[[VAL_FALSE:.*]] = arith.constant false
// CHECK: %[[VAL_8:.*]] = bufferization.alloc_tensor() : tensor<32x16xf64, #sparse_tensor.encoding<{{{.*}}}>>
// CHECK: %[[VAL_8:.*]] = tensor.empty() : tensor<32x16xf64, #sparse_tensor.encoding<{{{.*}}}>>
// CHECK: %[[VAL_9:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor<32x16xf64, #sparse_tensor.encoding<{{{.*}}}>> to memref<?xindex>
// CHECK: %[[VAL_10:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<32x16xf64, #sparse_tensor.encoding<{{{.*}}}>> to memref<?xindex>
// CHECK: %[[VAL_11:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16xf64, #sparse_tensor.encoding<{{{.*}}}>> to memref<?xf64>
Expand Down Expand Up @@ -360,7 +359,7 @@ func.func @mul_affine_dense2d(%arga: tensor<32x16xf64, #CSR>,
// CHECK: return %[[VAL_45]] : tensor<32x16xf64, #sparse_tensor.encoding<{{{.*}}}>>
func.func @mul_affine_sparse2d(%arga: tensor<32x16xf64, #CSR>,
%argb: tensor<34x19xf64, #CSR>) -> tensor<32x16xf64, #CSR> {
%argx = bufferization.alloc_tensor() : tensor<32x16xf64, #CSR>
%argx = tensor.empty() : tensor<32x16xf64, #CSR>
%0 = linalg.generic #trait3
ins(%arga, %argb: tensor<32x16xf64, #CSR>, tensor<34x19xf64, #CSR>)
outs(%argx: tensor<32x16xf64, #CSR>) {
Expand All @@ -383,9 +382,9 @@ func.func @mul_affine_sparse2d(%arga: tensor<32x16xf64, #CSR>,
}

// CHECK-LABEL: func.func @mul_affine_dense_dim_2d(
// CHECK-SAME: %[[VAL_0:.*]]: tensor<34x16xf64, #sparse_tensor.encoding
// CHECK-SAME: %[[VAL_1:.*]]: tensor<32x19xf64, #sparse_tensor.encoding<{{{.*}}}>>,
// CHECK-SAME: %[[VAL_2:.*]]: tensor<32x16xf64>) -> tensor<32x16xf64> {
// CHECK-SAME: %[[VAL_0:.*]]: tensor<34x16xf64, #sparse_tensor.encoding
// CHECK-SAME: %[[VAL_1:.*]]: tensor<32x19xf64, #sparse_tensor.encoding<{{{.*}}}>>,
// CHECK-SAME: %[[VAL_2:.*]]: tensor<32x16xf64>) -> tensor<32x16xf64> {
// CHECK-DAG: %[[VAL_3:.*]] = arith.constant 19 : index
// CHECK-DAG: %[[VAL_4:.*]] = arith.constant 0 : index
// CHECK-DAG: %[[VAL_5:.*]] = arith.constant 1 : index
Expand Down Expand Up @@ -447,9 +446,9 @@ func.func @mul_affine_dense_dim_2d(%arga: tensor<34x16xf64, #CSR>,
}

// CHECK-LABEL: func.func @mul_const_affine_dense_dim_2d(
// CHECK-SAME: %[[VAL_0:.*]]: tensor<34x16xf64,
// CHECK-SAME: %[[VAL_1:.*]]: tensor<32x19xf64, #sparse_tensor.encoding<{{{.*}}}>>,
// CHECK-SAME: %[[VAL_2:.*]]: tensor<32x16xf64>) -> tensor<32x16xf64> {
// CHECK-SAME: %[[VAL_0:.*]]: tensor<34x16xf64,
// CHECK-SAME: %[[VAL_1:.*]]: tensor<32x19xf64, #sparse_tensor.encoding<{{{.*}}}>>,
// CHECK-SAME: %[[VAL_2:.*]]: tensor<32x16xf64>) -> tensor<32x16xf64> {
// CHECK-DAG: %[[VAL_3:.*]] = arith.constant 19 : index
// CHECK-DAG: %[[VAL_4:.*]] = arith.constant 2 : index
// CHECK-DAG: %[[VAL_5:.*]] = arith.constant 0 : index
Expand Down
4 changes: 2 additions & 2 deletions mlir/test/Dialect/SparseTensor/sparse_broadcast.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@
// CHECK-DAG: %[[TMP_c3:.*]] = arith.constant 3 : index
// CHECK-DAG: %[[TMP_c0:.*]] = arith.constant 0 : index
// CHECK-DAG: %[[TMP_c1:.*]] = arith.constant 1 : index
// CHECK: %[[TMP_0:.*]] = bufferization.alloc_tensor()
// CHECK: %[[TMP_0:.*]] = tensor.empty()
// CHECK: %[[TMP_1:.*]] = sparse_tensor.positions %[[TMP_arg0]] {level = 0 : index}
// CHECK: %[[TMP_2:.*]] = sparse_tensor.coordinates %[[TMP_arg0]] {level = 0 : index}
// CHECK: %[[TMP_3:.*]] = sparse_tensor.positions %[[TMP_arg0]] {level = 1 : index}
Expand Down Expand Up @@ -44,7 +44,7 @@
// CHECK: return %[[TMP_8]]
module @func_sparse {
func.func public @main(%arg0: tensor<4x5xi32, #DCSR>) -> tensor<4x3x5xi32, #SparseTensor> {
%0 = bufferization.alloc_tensor() : tensor<4x3x5xi32, #SparseTensor>
%0 = tensor.empty() : tensor<4x3x5xi32, #SparseTensor>
%1 = linalg.generic #trait
ins(%arg0 : tensor<4x5xi32, #DCSR>) outs(%0 : tensor<4x3x5xi32, #SparseTensor>) {
^bb0(%in: i32, %out: i32):
Expand Down
6 changes: 3 additions & 3 deletions mlir/test/Dialect/SparseTensor/sparse_expand.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -67,7 +67,7 @@
func.func @kernel(%arga: tensor<?x?xf64, #DCSC>) -> tensor<?xf64, #SV> {
%c0 = arith.constant 0 : index
%n = tensor.dim %arga, %c0 : tensor<?x?xf64, #DCSC>
%v = bufferization.alloc_tensor(%n) : tensor<?xf64, #SV>
%v = tensor.empty(%n) : tensor<?xf64, #SV>
%0 = linalg.generic #rowsum
ins(%arga: tensor<?x?xf64, #DCSC>)
outs(%v: tensor<?xf64, #SV>) {
Expand Down Expand Up @@ -119,7 +119,7 @@ func.func @kernel(%arga: tensor<?x?xf64, #DCSC>) -> tensor<?xf64, #SV> {
//
func.func @matmul1(%A: tensor<8x2xf64, #CSR>,
%B: tensor<2x4xf64, #CSR>) -> tensor<8x4xf64, #CSR> {
%C = bufferization.alloc_tensor() : tensor<8x4xf64, #CSR>
%C = tensor.empty() : tensor<8x4xf64, #CSR>
%D = linalg.matmul
ins(%A, %B: tensor<8x2xf64, #CSR>, tensor<2x4xf64, #CSR>)
outs(%C: tensor<8x4xf64, #CSR>) -> tensor<8x4xf64, #CSR>
Expand Down Expand Up @@ -167,7 +167,7 @@ func.func @matmul1(%A: tensor<8x2xf64, #CSR>,
//
func.func @matmul2(%A: tensor<8x2xf64, #CSC>,
%B: tensor<2x4xf64, #CSC>) -> tensor<8x4xf64, #CSC> {
%C = bufferization.alloc_tensor() : tensor<8x4xf64, #CSC>
%C = tensor.empty() : tensor<8x4xf64, #CSC>
%D = linalg.matmul
ins(%A, %B: tensor<8x2xf64, #CSC>, tensor<2x4xf64, #CSC>)
outs(%C: tensor<8x4xf64, #CSC>) -> tensor<8x4xf64, #CSC>
Expand Down
Loading