Skip to content
This repository was archived by the owner on Jan 25, 2023. It is now read-only.

[MLIR] array GetItem/SetItem multidim argument #184

Merged
merged 4 commits into from
Feb 16, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
80 changes: 59 additions & 21 deletions mlir-compiler/mlir-compiler/src/pipelines/plier_to_linalg.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -223,6 +223,23 @@ struct CallLowerer
PyLinalgResolver linalg_resolver;
};

mlir::Value index_cast(mlir::Value value, mlir::Location loc, mlir::OpBuilder& builder)
{
if (!value.getType().isa<mlir::IndexType>())
{
auto index_type = mlir::IndexType::get(value.getContext());
auto res = builder.create<plier::CastOp>(loc, index_type, value);
rerun_std_pipeline(res);
return res;
}
return value;
}

bool isValidGetitemIndex(mlir::Type type)
{
return type.isa<mlir::IntegerType, mlir::IndexType, mlir::TupleType>();
}

template<typename T>
struct GetitemOpLowering : public mlir::OpRewritePattern<T>
{
Expand All @@ -241,24 +258,36 @@ struct GetitemOpLowering : public mlir::OpRewritePattern<T>
{
return mlir::failure();
}
if (!index.getType().template isa<mlir::IndexType>() &&
!index.getType().template isa<mlir::IntegerType>())
if (!isValidGetitemIndex(index.getType()))
{
return mlir::failure();
}
auto loc = op.getLoc();
if (index.getType().template isa<mlir::IntegerType>())

llvm::SmallVector<mlir::Value, 8> indices;
if (auto tuple_type = index.getType().template dyn_cast<mlir::TupleType>())
{
indices.resize(tuple_type.size());
for (auto it : llvm::enumerate(tuple_type))
{
auto getitem_ind = rewriter.create<mlir::ConstantIndexOp>(loc, it.index());
auto ind = rewriter.create<plier::GetItemOp>(loc, index, getitem_ind);
indices[it.index()] = index_cast(ind, loc, rewriter);
}
}
else
{
index = rewriter.create<mlir::IndexCastOp>(loc, index, mlir::IndexType::get(op.getContext()));
indices.push_back(index_cast(index, loc, rewriter));
}

mlir::Value res;
if (is_memref)
{
res = rewriter.create<mlir::LoadOp>(loc, val, index);
res = rewriter.create<mlir::LoadOp>(loc, val, indices);
}
else if (is_tensor)
{
res = rewriter.create<mlir::tensor::ExtractOp>(loc, val, index);
res = rewriter.create<mlir::tensor::ExtractOp>(loc, val, indices);
}
else
{
Expand Down Expand Up @@ -328,18 +357,6 @@ bool replace_ssa_value(mlir::Value value, mlir::Value new_value, mlir::PatternRe
llvm_unreachable("Unhandled parent op");
}

mlir::Value index_cast(mlir::Value value, mlir::Location loc, mlir::OpBuilder& builder)
{
if (!value.getType().isa<mlir::IndexType>())
{
auto index_type = mlir::IndexType::get(value.getContext());
auto res = builder.create<plier::CastOp>(loc, index_type, value);
rerun_std_pipeline(res);
return res;
}
return value;
}

template<typename T>
struct SetitemOpLoweringSSA : public mlir::OpRewritePattern<T>
{
Expand Down Expand Up @@ -407,6 +424,12 @@ struct SetitemOpLowering : public mlir::OpRewritePattern<T>
return op.getOperand(0).getType();
};

auto index = op.index();
if (!isValidGetitemIndex(index.getType()))
{
return mlir::failure();
}

if (auto target_type = get_target_type().template dyn_cast<mlir::RankedTensorType>())
{
auto target = op.getOperand(0);
Expand Down Expand Up @@ -452,18 +475,33 @@ struct SetitemOpLowering : public mlir::OpRewritePattern<T>
return mlir::failure();
}
auto target = op.getOperand(0);
auto index = op.getOperand(1);
auto value = op.getOperand(2);
auto loc = op.getLoc();
auto ind = index_cast(index, loc, rewriter);
auto elem_type = target.getType().template cast<mlir::MemRefType>().getElementType();
if (value.getType() != elem_type)
{
// TODO
value = rewriter.create<plier::CastOp>(loc, elem_type, value);
rerun_std_pipeline(op);
}
auto store = rewriter.create<mlir::StoreOp>(loc, value, target, ind);

llvm::SmallVector<mlir::Value, 8> indices;
if (auto tuple_type = index.getType().template dyn_cast<mlir::TupleType>())
{
indices.resize(tuple_type.size());
for (auto it : llvm::enumerate(tuple_type))
{
auto getitem_ind = rewriter.create<mlir::ConstantIndexOp>(loc, it.index());
auto ind = rewriter.create<plier::GetItemOp>(loc, index, getitem_ind);
indices[it.index()] = index_cast(ind, loc, rewriter);
}
rerun_std_pipeline(op);
}
else
{
indices.push_back(index_cast(index, loc, rewriter));
}
rewriter.create<mlir::StoreOp>(loc, value, target, indices);
rewriter.eraseOp(op);
return mlir::success();
}
Expand Down
22 changes: 22 additions & 0 deletions mlir-compiler/mlir-compiler/src/pipelines/plier_to_std.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1105,6 +1105,27 @@ struct FixupWhileTypes : public mlir::OpRewritePattern<mlir::scf::WhileOp>
}
};

struct PropagateBuildTupleTypes : public mlir::OpRewritePattern<plier::BuildTupleOp>
{
PropagateBuildTupleTypes(mlir::TypeConverter &/*typeConverter*/,
mlir::MLIRContext *context):
OpRewritePattern(context) {}

mlir::LogicalResult matchAndRewrite(
plier::BuildTupleOp op, mlir::PatternRewriter &rewriter) const override
{
if (op.getType().isa<mlir::TupleType>() ||
llvm::any_of(op.getOperandTypes(), [](mlir::Type type){ return type.isa<plier::PyType>(); }))
{
return mlir::failure();
}

auto new_type = mlir::TupleType::get(op.getContext(), op.getOperandTypes());
rewriter.replaceOpWithNewOp<plier::BuildTupleOp>(op, new_type, op.getOperands());
return mlir::success();
}
};

template<typename Op>
struct FoldTupleGetitem : public mlir::OpRewritePattern<Op>
{
Expand Down Expand Up @@ -1352,6 +1373,7 @@ void PlierToStdPass::runOnOperation()
ScfIfRewrite,
ScfWhileRewrite,
FixupWhileTypes,
PropagateBuildTupleTypes,
FoldTupleGetitem<plier::GetItemOp>,
FoldTupleGetitem<plier::StaticGetItemOp>
>(type_converter, context);
Expand Down
11 changes: 10 additions & 1 deletion numba/mlir/tests/test_numpy.py
Original file line number Diff line number Diff line change
Expand Up @@ -78,7 +78,7 @@ def py_func(a, b, c):
arr3 = np.asarray([7,8,9])
assert_equal(py_func(arr1, arr2, arr3), jit_func(arr1, arr2, arr3))

def test_setitem(self):
def test_setitem1(self):
def py_func(a, b):
a[b] = 42
return a[b]
Expand All @@ -87,6 +87,15 @@ def py_func(a, b):
arr = np.asarray([1,2,3])
assert_equal(py_func(arr, 1), jit_func(arr, 1))

def test_setitem2(self):
def py_func(a, b, c):
a[b, c] = 42
return a[b, c]

jit_func = njit(py_func)
arr = np.asarray([[1,2,3],[4,5,6]])
assert_equal(py_func(arr, 1, 2), jit_func(arr, 1, 2))

def test_setitem_loop(self):
def py_func(a):
for i in range(len(a)):
Expand Down