diff --git a/mlir/include/mlir/Dialect/Bufferization/IR/BufferizationOps.td b/mlir/include/mlir/Dialect/Bufferization/IR/BufferizationOps.td index 6fcba25a0f297..9761ab12134ad 100644 --- a/mlir/include/mlir/Dialect/Bufferization/IR/BufferizationOps.td +++ b/mlir/include/mlir/Dialect/Bufferization/IR/BufferizationOps.td @@ -264,9 +264,7 @@ def Bufferization_MaterializeInDestinationOp return ::llvm::cast(getResult().getType()); } - std::pair getDpsInitsPositionRange() { - return {1, 2}; // `dest` operand - } + MutableOperandRange getDpsInitsMutable() { return getDestMutable(); } }]; let assemblyFormat = "$source `in` $dest attr-dict `:` type($source)"; diff --git a/mlir/include/mlir/Dialect/Linalg/IR/LinalgInterfaces.td b/mlir/include/mlir/Dialect/Linalg/IR/LinalgInterfaces.td index 839861c2369ca..9ca029b489ad1 100644 --- a/mlir/include/mlir/Dialect/Linalg/IR/LinalgInterfaces.td +++ b/mlir/include/mlir/Dialect/Linalg/IR/LinalgInterfaces.td @@ -555,12 +555,12 @@ def LinalgStructuredInterface are expection. For example, in `map` output operand isn't used in the block. }], - /*retTy=*/"OpOperandVector", + /*retTy=*/"::llvm::SmallVector", /*methodName=*/"getOpOperandsMatchingBBargs", /*args=*/(ins), /*methodBody=*/"", /*defaultImplementation=*/[{ - OpOperandVector result; + ::llvm::SmallVector result; result.reserve($_op->getNumOperands()); llvm::transform( this->getOperation()->getOpOperands(), diff --git a/mlir/include/mlir/Dialect/Linalg/IR/LinalgOps.td b/mlir/include/mlir/Dialect/Linalg/IR/LinalgOps.td index 4d06747a05d63..da12e7c83b22b 100644 --- a/mlir/include/mlir/Dialect/Linalg/IR/LinalgOps.td +++ b/mlir/include/mlir/Dialect/Linalg/IR/LinalgOps.td @@ -149,14 +149,7 @@ def Linalg_SoftmaxOp : Linalg_Op<"softmax", int64_t getOutputOperandRank() { return getOutputOperandType().getRank(); } - // Method to implement DestinationStyleOpInterface. - std::pair getDpsInitsPositionRange() { - std::pair outputsIndexAndLength = - getODSOperandIndexAndLength(1); - return std::make_pair( - outputsIndexAndLength.first, - outputsIndexAndLength.first + outputsIndexAndLength.second); - } + MutableOperandRange getDpsInitsMutable() { return getOutputMutable(); } }]; let hasVerifier = 1; } diff --git a/mlir/include/mlir/Dialect/Linalg/IR/LinalgStructuredOps.td b/mlir/include/mlir/Dialect/Linalg/IR/LinalgStructuredOps.td index c8d579949dc4e..21a5e5cc47aeb 100644 --- a/mlir/include/mlir/Dialect/Linalg/IR/LinalgStructuredOps.td +++ b/mlir/include/mlir/Dialect/Linalg/IR/LinalgStructuredOps.td @@ -207,10 +207,8 @@ def GenericOp : LinalgStructuredBase_Op<"generic", [ getRegionBuilder() { return nullptr; } - std::pair getDpsInitsPositionRange() { - int64_t getNumOperands = this->getNumOperands(); - return {getNumOperands - getOutputs().size(), getNumOperands}; - } + + MutableOperandRange getDpsInitsMutable() { return getOutputsMutable(); } }]; let hasCanonicalizer = 1; @@ -283,11 +281,9 @@ def MapOp : LinalgStructuredBase_Op<"map", [ } // Implement functions necessary for DestinationStyleOpInterface. - std::pair getDpsInitsPositionRange() { - int64_t getNumOperands = this->getNumOperands(); - return {getNumOperands - 1, getNumOperands}; - } - OpOperandVector getOpOperandsMatchingBBargs() { + MutableOperandRange getDpsInitsMutable() { return getInitMutable(); } + + SmallVector getOpOperandsMatchingBBargs() { return getDpsInputOperands(); } @@ -381,9 +377,7 @@ def ReduceOp : LinalgStructuredBase_Op<"reduce", [ getRegionBuilder() { return nullptr; } - std::pair getDpsInitsPositionRange() { - return {getInits().size(), getNumOperands()}; - } + MutableOperandRange getDpsInitsMutable() { return getInitsMutable(); } }]; let hasCustomAssemblyFormat = 1; @@ -446,10 +440,7 @@ def TransposeOp : LinalgStructuredBase_Op<"transpose", [ } // Implement functions necessary for DestinationStyleOpInterface. - std::pair getDpsInitsPositionRange() { - int64_t getNumOperands = this->getNumOperands(); - return {getNumOperands - 1, getNumOperands}; - } + MutableOperandRange getDpsInitsMutable() { return getInitMutable(); } static std::function)> @@ -517,10 +508,7 @@ def BroadcastOp : LinalgStructuredBase_Op<"broadcast", [ } // Implement functions necessary for DestinationStyleOpInterface. - std::pair getDpsInitsPositionRange() { - int64_t getNumOperands = this->getNumOperands(); - return {getNumOperands - 1, getNumOperands}; - } + MutableOperandRange getDpsInitsMutable() { return getInitMutable(); } static std::function)> diff --git a/mlir/include/mlir/Dialect/Tensor/IR/TensorOps.td b/mlir/include/mlir/Dialect/Tensor/IR/TensorOps.td index d1c33d8b4c03c..86a250b77dcc8 100644 --- a/mlir/include/mlir/Dialect/Tensor/IR/TensorOps.td +++ b/mlir/include/mlir/Dialect/Tensor/IR/TensorOps.td @@ -750,9 +750,7 @@ def Tensor_InsertOp : Tensor_Op<"insert", [ }]; let extraClassDeclaration = [{ - std::pair getDpsInitsPositionRange() { - return {1, 2}; // `dest` operand - } + MutableOperandRange getDpsInitsMutable() { return getDestMutable(); } }]; let hasFolder = 1; @@ -892,9 +890,7 @@ def Tensor_InsertSliceOp : Tensor_OpWithOffsetSizesAndStrides<"insert_slice", [ /// and `strides` operands. static unsigned getOffsetSizeAndStrideStartOperandIndex() { return 2; } - std::pair getDpsInitsPositionRange() { - return {1, 2}; // `dest` operand - } + MutableOperandRange getDpsInitsMutable() { return getDestMutable(); } }]; let hasCanonicalizer = 1; @@ -1714,10 +1710,7 @@ class Tensor_RelayoutOp traits = []> : RankedTensorType getDestType() { return ::llvm::cast(getDest().getType()); }; - /// Return position for init operand. Init operand is `dest`. - std::pair getDpsInitsPositionRange() { - return {1, 2}; // `dest` operand - } + MutableOperandRange getDpsInitsMutable() { return getDestMutable(); } /// Interface method for ConditionallySpeculatable. Speculation::Speculatability getSpeculatability(); diff --git a/mlir/include/mlir/Dialect/Vector/IR/VectorOps.td b/mlir/include/mlir/Dialect/Vector/IR/VectorOps.td index aba85f86a7eef..701eefcc1e7da 100644 --- a/mlir/include/mlir/Dialect/Vector/IR/VectorOps.td +++ b/mlir/include/mlir/Dialect/Vector/IR/VectorOps.td @@ -1330,8 +1330,8 @@ def Vector_TransferReadOp : // MaskableOpInterface methods. bool supportsPassthru() { return true; } - std::pair getDpsInitsPositionRange() { - return {0, 0}; // empty range (no init operands) + MutableOperandRange getDpsInitsMutable() { + return MutableOperandRange(getOperation(), /*start=*/0, /*length=*/0); } }]; @@ -1494,9 +1494,7 @@ def Vector_TransferWriteOp : /// ops of other dialects. Value getValue() { return getVector(); } - std::pair getDpsInitsPositionRange() { - return {1, 2}; // `source` operand - } + MutableOperandRange getDpsInitsMutable() { return getSourceMutable(); } }]; let hasFolder = 1; diff --git a/mlir/include/mlir/Interfaces/DestinationStyleOpInterface.h b/mlir/include/mlir/Interfaces/DestinationStyleOpInterface.h index 9ace95c6f3d3b..6649371f3ed32 100644 --- a/mlir/include/mlir/Interfaces/DestinationStyleOpInterface.h +++ b/mlir/include/mlir/Interfaces/DestinationStyleOpInterface.h @@ -17,11 +17,6 @@ #include "llvm/ADT/SmallVector.h" namespace mlir { -/// OpOperand vector that implicitly converts to a Value vector. -struct OpOperandVector : public llvm::SmallVector { - operator SmallVector(); -}; - namespace detail { /// Verify that `op` conforms to the invariants of DestinationStyleOpInterface LogicalResult verifyDestinationStyleOpInterface(Operation *op); diff --git a/mlir/include/mlir/Interfaces/DestinationStyleOpInterface.td b/mlir/include/mlir/Interfaces/DestinationStyleOpInterface.td index 8558e0279e3e5..4c52d803e1147 100644 --- a/mlir/include/mlir/Interfaces/DestinationStyleOpInterface.td +++ b/mlir/include/mlir/Interfaces/DestinationStyleOpInterface.td @@ -13,16 +13,16 @@ include "mlir/IR/OpBase.td" def DestinationStyleOpInterface : OpInterface<"DestinationStyleOpInterface"> { let description = [{ - Ops that are in destination style have designated init operands, which act + Ops that are in destination style have designated "init" operands, which act as initial tensor values for the results of the operation or the init buffers to which the results of the op will be written. Init operands must be ranked tensors or ranked memrefs. Input operands can have any type. All non-init operands are DPS inputs. - It is assumed that the init operands of the op are the operands at - position [start, end). The positions are defined by getDpsInitsPositionRange - method. + The init operands of this op are specified by the MutableOperandRange that + the `getDpsInitsMutable` interface methods returns. This implies that the + init operands must be a consecutive range of operands. If the op has "tensor semantics", then the input operands are either ranked tensors or other non-tensor/memref types ("scalars"). The init operands are @@ -50,241 +50,157 @@ def DestinationStyleOpInterface : OpInterface<"DestinationStyleOpInterface"> { Example of an op that is not in destination style: `%r = tensor.pad %t`. This op is not in destination style because `%r` and `%t` have different shape. - - Each op that wants to implement DestinationStyleOpInterface needs to define - the getDpsInitsPositionRange() method. }]; let cppNamespace = "::mlir"; let methods = [ - // This method has to be defined for every DPS op. InterfaceMethod< /*desc=*/"Return start and end indices of the init operands range.", - /*retTy=*/"std::pair", - /*methodName=*/"getDpsInitsPositionRange", - /*args=*/(ins), - /*methodBody=*/"", - /*defaultImplementation=*/"" - >, - //===------------------------------------------------------------------===// - // Operands handling. - //===------------------------------------------------------------------===// - // The operand list is assumed to start with the input operands and end - // with the init operands. Therefore, all methods to access the inputs - // and inits can be expressed if the number of init operands is know. - InterfaceMethod< - /*desc=*/"Return the number of inits.", - /*retTy=*/"int64_t", - /*methodName=*/"getNumDpsInits", - /*args=*/(ins), - /*methodBody=*/"", - /*defaultImplementation=*/[{ - auto [start, end] = $_op.getDpsInitsPositionRange(); - return end - start; - }] - >, - InterfaceMethod< - /*desc=*/"Return the init operands.", - /*retTy=*/"::mlir::OpOperandVector", - /*methodName=*/"getDpsInitOperands", - /*args=*/(ins), - /*methodBody=*/"", - /*defaultImplementation=*/[{ - auto [start, end] = $_op.getDpsInitsPositionRange(); - - ::mlir::OpOperandVector result; - result.reserve(end - start); - for (int i = start; i < end; ++i) - result.push_back(&$_op->getOpOperand(i)); - return result; - }] - >, - InterfaceMethod< - /*desc=*/"Return the `i`-th init operand.", - /*retTy=*/"::mlir::OpOperand *", - /*methodName=*/"getDpsInitOperand", - /*args=*/(ins "int64_t":$i), - /*methodBody=*/"", - /*defaultImplementation=*/[{ - assert(i >= 0 && i < $_op.getNumDpsInits()); - auto [start, end] = $_op.getDpsInitsPositionRange(); - return &$_op->getOpOperand(start + i); - }] - >, - InterfaceMethod< - /*desc=*/"Set the `i`-th init operand.", - /*retTy=*/"void", - /*methodName=*/"setDpsInitOperand", - /*args=*/(ins "int64_t":$i, "::mlir::Value":$value), - /*methodBody=*/"", - /*defaultImplementation=*/[{ - assert(i >= 0 && i < $_op.getNumDpsInits()); - auto [start, end] = $_op.getDpsInitsPositionRange(); - $_op->setOperand(start + i, value); - }] - >, - InterfaceMethod< - /*desc=*/"Return the number of inputs.", - /*retTy=*/"int64_t", - /*methodName=*/"getNumDpsInputs", - /*args=*/(ins), - /*methodBody=*/"", - /*defaultImplementation=*/[{ - return $_op.getNumOperands() - $_op.getNumDpsInits(); - }] + /*retTy=*/"::mlir::MutableOperandRange", + /*methodName=*/"getDpsInitsMutable", + /*args=*/(ins) >, - InterfaceMethod< - /*desc=*/"Return the input operands.", - /*retTy=*/"::mlir::OpOperandVector", - /*methodName=*/"getDpsInputOperands", - /*args=*/(ins), - /*methodBody=*/"", - /*defaultImplementation=*/[{ - auto [start, end] = $_op.getDpsInitsPositionRange(); - int64_t numInits = end - start; - int64_t numOperands = $_op.getNumOperands(); + ]; - ::mlir::OpOperandVector result; - result.reserve(numOperands - numInits); - for (int i = 0; i < start; ++i) + let extraSharedClassDeclaration = [{ + ::mlir::OperandRange getDpsInits() { + return $_op.getDpsInitsMutable(); + } + + /// Return the number of DPS inits. + int64_t getNumDpsInits() { return $_op.getDpsInits().size(); } + + /// Return the `i`-th DPS init. + ::mlir::OpOperand *getDpsInitOperand(int64_t i) { + return &$_op.getDpsInitsMutable()[i]; + } + + /// Set the `i`-th DPS init. + void setDpsInitOperand(int64_t i, Value value) { + assert(i >= 0 && i < $_op.getNumDpsInits() && "invalid index"); + $_op->setOperand($_op.getDpsInits().getBeginOperandIndex() + i, value); + } + + /// Return the number of DPS inits. + int64_t getNumDpsInputs() { + return $_op->getNumOperands() - $_op.getNumDpsInits(); + } + + /// Return the DPS input operands. + ::llvm::SmallVector<::mlir::OpOperand *> getDpsInputOperands() { + ::llvm::SmallVector<::mlir::OpOperand *> result; + int64_t numOperands = $_op->getNumOperands(); + ::mlir::OperandRange range = $_op.getDpsInits(); + if (range.empty()) { + result.reserve(numOperands); + for (int64_t i = 0; i < numOperands; ++i) result.push_back(&$_op->getOpOperand(i)); - for (int i = end; i < numOperands; ++i) - result.push_back(&$_op->getOpOperand(end + i)); - return result; - }] - >, - InterfaceMethod< - /*desc=*/[{ Return the `i`-th input operand. }], - /*retTy=*/"::mlir::OpOperand *", - /*methodName=*/"getDpsInputOperand", - /*args=*/(ins "int64_t":$i), - /*methodBody=*/"", - /*defaultImplementation=*/[{ - assert(i >= 0 && i < getNumDpsInputs()); - auto [start, end] = $_op.getDpsInitsPositionRange(); - return &$_op->getOpOperand(i < start ? i : i + end - start) ; - }] - >, - //===------------------------------------------------------------------===// - // Input and DpsInit arguments handling. - //===------------------------------------------------------------------===// - InterfaceMethod< - /*desc=*/"Return true if `opOperand` is an input.", - /*retTy=*/"bool", - /*methodName=*/"isDpsInput", - /*args=*/(ins "::mlir::OpOperand *":$opOperand), - /*methodBody=*/"", - /*defaultImplementation=*/[{ - auto [start, end] = $_op.getDpsInitsPositionRange(); - auto operandNumber = opOperand->getOperandNumber(); - return operandNumber < start || operandNumber >= end; - }] - >, - InterfaceMethod< - /*desc=*/"Return true if `opOperand` is an init.", - /*retTy=*/"bool", - /*methodName=*/"isDpsInit", - /*args=*/(ins "::mlir::OpOperand *":$opOperand), - /*methodBody=*/"", - /*defaultImplementation=*/[{ - auto [start, end] = $_op.getDpsInitsPositionRange(); - auto operandNumber = opOperand->getOperandNumber(); - return operandNumber >= start && operandNumber < end; - }] - >, - InterfaceMethod< - /*desc=*/[{ - Return true if the `opOperand` is a scalar value. A scalar is defined - as neither a memref nor a tensor value. - }], - /*retTy=*/"bool", - /*methodName=*/"isScalar", - /*args=*/(ins "::mlir::OpOperand *":$opOperand), - /*methodBody=*/"", - /*defaultImplementation=*/[{ - assert(opOperand->getOwner() == $_op.getOperation()); - return !::llvm::isa(opOperand->get().getType()); - }] - >, - InterfaceMethod< - /*desc=*/"Return the OpResult that is tied to the given OpOperand.", - /*retTy=*/"::mlir::OpResult", - /*methodName=*/"getTiedOpResult", - /*args=*/(ins "::mlir::OpOperand *":$opOperand), - /*methodBody=*/"", - /*defaultImplementation=*/[{ - assert(opOperand->getOwner() == $_op.getOperation()); - - auto [start, end] = $_op.getDpsInitsPositionRange(); - int64_t resultIndex = opOperand->getOperandNumber() - start; + } + int64_t firstInitPos = range.getBeginOperandIndex(); + int64_t numInits = range.size(); + result.reserve(numOperands - numInits); + for (int64_t i = 0; i < firstInitPos; ++i) + result.push_back(&$_op->getOpOperand(i)); + for (int64_t i = firstInitPos + numInits; i < numOperands; ++i) + result.push_back(&$_op->getOpOperand(i)); + return result; + } + + /// Return the DPS input operands. + ::llvm::SmallVector<::mlir::Value> getDpsInputs() { + return ::llvm::to_vector(::llvm::map_range( + $_op.getDpsInputOperands(), [](OpOperand *o) { return o->get(); })); + } + + /// Return the `i`-th DPS input operand. + ::mlir::OpOperand *getDpsInputOperand(int64_t i) { + ::mlir::OperandRange range = $_op.getDpsInits(); + if (range.empty()) + return &$_op->getOpOperand(i); + int64_t firstInitPos = range.getBeginOperandIndex(); + int64_t numInits = range.size(); + assert(i >= 0 && i < $_op->getNumOperands() - numInits + && "invalid index"); + return &$_op->getOpOperand( + i < firstInitPos ? i : i + firstInitPos + numInits); + } + + /// Return "true" if `opOperand` is an "input". + bool isDpsInput(::mlir::OpOperand *opOperand) { + assert(opOperand->getOwner() == $_op && "invalid operand"); + return !$_op.isDpsInit(opOperand); + } + + /// Return "true" if `opOperand` is an "init". + bool isDpsInit(::mlir::OpOperand *opOperand) { + assert(opOperand->getOwner() == $_op && "invalid operand"); + ::mlir::OperandRange range = $_op.getDpsInits(); + if (range.empty()) + return false; + auto operandNumber = opOperand->getOperandNumber(); + return operandNumber >= range.getBeginOperandIndex() + && operandNumber < range.getBeginOperandIndex() + range.size(); + } + + /// Return "true" if `opOperand` is a scalar value. A sclar is defined as + /// neither a MemRef nor a tensor value. + bool isScalar(::mlir::OpOperand *opOperand) { + assert(opOperand->getOwner() == $_op && "invalid operand"); + return !::llvm::isa(opOperand->get().getType()); + } + + /// Return the OpResult that is tied to the given OpOperand. + ::mlir::OpResult getTiedOpResult(::mlir::OpOperand *opOperand) { + assert(opOperand->getOwner() == $_op && "invalid operand"); + ::mlir::OperandRange range = $_op.getDpsInits(); + assert(!range.empty() && "op has no inits"); + int64_t resultIndex = + opOperand->getOperandNumber() - range.getBeginOperandIndex(); assert(resultIndex >= 0 && - resultIndex < $_op->getNumResults() ); + resultIndex < $_op->getNumResults()); return $_op->getResult(resultIndex); - }] - >, - InterfaceMethod< - /*desc=*/"Return the OpOperand that is tied to the given OpResult.", - /*retTy=*/"::mlir::OpOperand *", - /*methodName=*/"getTiedOpOperand", - /*args=*/(ins "::mlir::OpResult":$opResult), - /*methodBody=*/"", - /*defaultImplementation=*/[{ - assert(opResult.getDefiningOp() == $_op.getOperation()); - return $_op.getDpsInitOperand(opResult.getResultNumber()); - }] - >, - //===------------------------------------------------------------------===// - // Other interface methods. - //===------------------------------------------------------------------===// - InterfaceMethod< - /*desc=*/[{ - Return whether the op has buffer semantics. That is the case if the op - has no tensor operands and at least one memref operand. - }], - /*retTy=*/"bool", - /*methodName=*/"hasBufferSemantics", - /*args=*/(ins), - /*methodBody=*/"", - /*defaultImplementation=*/[{ - // No tensors. - auto isTensor = [](Value v){ - return ::llvm::isa<::mlir::RankedTensorType>(v.getType()); - }; - if (::llvm::any_of($_op->getOperands(), isTensor)) - return false; - // At least one memref. - auto isMemref = [](Value v){ - return ::llvm::isa<::mlir::MemRefType>(v.getType()); - }; - return llvm::any_of($_op->getOperands(), isMemref); - }] - >, - InterfaceMethod< - /*desc=*/[{ - Return whether the op has tensor semantics. That is the case if the op - has no memref operands and at least one tensor operand. - }], - /*retTy=*/"bool", - /*methodName=*/"hasTensorSemantics", - /*args=*/(ins), - /*methodBody=*/"", - /*defaultImplementation=*/[{ - // No memrefs. - auto isMemref = [](Value v){ - return ::llvm::isa<::mlir::MemRefType>(v.getType()); - }; - if (::llvm::any_of($_op->getOperands(), isMemref)) - return false; - // At least one tensor. - auto isTensor = [](Value v){ - return ::llvm::isa<::mlir::RankedTensorType>(v.getType()); - }; - return llvm::any_of($_op->getOperands(), isTensor); - }] - > - ]; + } + + /// Return the OpOperand that is tied to the given OpResult. + ::mlir::OpOperand *getTiedOpOperand(::mlir::OpResult opResult) { + assert(opResult.getDefiningOp() == $_op && "invalid opresult"); + return $_op.getDpsInitOperand(opResult.getResultNumber()); + } + + /// Return whether the op has buffer semantics. That is the case if the op + /// has no ranked tensor operands and at least one memref operand. + bool hasBufferSemantics() { + // No tensors. + auto isTensor = [](Value v){ + return ::llvm::isa<::mlir::RankedTensorType>(v.getType()); + }; + if (::llvm::any_of($_op->getOperands(), isTensor)) + return false; + // At least one memref. + auto isMemref = [](Value v){ + return ::llvm::isa<::mlir::MemRefType>(v.getType()); + }; + return llvm::any_of($_op->getOperands(), isMemref); + } + + /// Return whether the op has tensor semantics. That is the case if the op + /// has no memref operands and at least one ranked tensor operand. + bool hasTensorSemantics() { + // No memrefs. + auto isMemref = [](Value v){ + return ::llvm::isa<::mlir::MemRefType>(v.getType()); + }; + if (::llvm::any_of($_op->getOperands(), isMemref)) + return false; + // At least one tensor. + auto isTensor = [](Value v){ + return ::llvm::isa<::mlir::RankedTensorType>(v.getType()); + }; + return llvm::any_of($_op->getOperands(), isTensor); + } + }]; let verify = [{ return detail::verifyDestinationStyleOpInterface($_op); }]; let verifyWithRegions = 1; diff --git a/mlir/lib/Dialect/Linalg/IR/LinalgInterfaces.cpp b/mlir/lib/Dialect/Linalg/IR/LinalgInterfaces.cpp index 95a766b337571..ea50e1232a4c7 100644 --- a/mlir/lib/Dialect/Linalg/IR/LinalgInterfaces.cpp +++ b/mlir/lib/Dialect/Linalg/IR/LinalgInterfaces.cpp @@ -904,8 +904,8 @@ getResultsPositionInLoopsToShapeMap(LinalgOp &op) { int64_t outputRankSum = 0; for (OpOperand *input : op.getDpsInputOperands()) inputRankSum += op.getRank(input); - for (OpOperand *output : op.getDpsInitOperands()) - outputRankSum += op.getRank(output); + for (OpOperand &output : op.getDpsInitsMutable()) + outputRankSum += op.getRank(&output); return {inputRankSum, inputRankSum + outputRankSum}; } @@ -948,19 +948,18 @@ LinalgOp::reifyResultShapes(OpBuilder &b, createFlatListOfOperandDims(b, loc)); int64_t pos = 0; ArrayRef shapeExprs = resultShapesFromInputShapesMap.getResults(); - for (OpOperand *opOperand : getDpsInitOperands()) { + for (OpOperand &opOperand : getDpsInitsMutable()) { SmallVector shapes; - for (int64_t dim : llvm::seq(0, getRank(opOperand))) { - auto shapedType = llvm::cast(opOperand->get().getType()); + for (int64_t dim : llvm::seq(0, getRank(&opOperand))) { + auto shapedType = llvm::cast(opOperand.get().getType()); if (!shapedType.isDynamicDim(dim)) { // Static dim: Return IntegerAttr. shapes.push_back(b.getIndexAttr(shapedType.getDimSize(dim))); } else { // Dynamic dim: Return Value. - OpFoldResult ofr = - checkDimExpr.visit(shapeExprs[pos]) - ? createOrFoldDimOp(b, loc, opOperand->get(), dim) - : allResultDimValues[pos]; + OpFoldResult ofr = checkDimExpr.visit(shapeExprs[pos]) + ? createOrFoldDimOp(b, loc, opOperand.get(), dim) + : allResultDimValues[pos]; shapes.push_back(getValueOrCreateConstantIndexOp(b, loc, ofr)); } pos++; @@ -977,7 +976,7 @@ int64_t LinalgOp::getIndexingMapIndex(OpOperand *opOperand) { auto dpsIface = cast(*this->getOperation()); if (!dpsIface.isDpsInput(opOperand)) return operandNumber; - auto [start, end] = dpsIface.getDpsInitsPositionRange(); + unsigned start = dpsIface.getDpsInits().getBeginOperandIndex(); assert(!dpsIface.isDpsInit(opOperand)); // Account for potential inputs that are not DPS and may not appear in // `indexingMaps`. diff --git a/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp b/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp index f87aa4559e10a..5871c59e1d35d 100644 --- a/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp +++ b/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp @@ -948,8 +948,7 @@ void GenericOp::print(OpAsmPrinter &p) { } // Printing is shared with named ops, except for the region and attributes - printCommonStructuredOpParts(p, SmallVector(getDpsInputOperands()), - SmallVector(getDpsInitOperands())); + printCommonStructuredOpParts(p, getDpsInputs(), getDpsInits()); genericAttrNames.push_back("operandSegmentSizes"); genericAttrNamesSet.insert(genericAttrNames.back()); @@ -1044,20 +1043,20 @@ ParseResult GenericOp::parse(OpAsmParser &parser, OperationState &result) { static void getGenericEffectsImpl( SmallVectorImpl> &effects, - ValueRange results, const OpOperandVector &inputOperands, - const OpOperandVector &outputOperands) { - for (auto *operand : inputOperands) { - if (!llvm::isa(operand->get().getType())) + ValueRange results, const ValueRange inputOperands, + ValueRange outputOperands) { + for (auto operand : inputOperands) { + if (!llvm::isa(operand.getType())) continue; - effects.emplace_back(MemoryEffects::Read::get(), operand->get(), + effects.emplace_back(MemoryEffects::Read::get(), operand, SideEffects::DefaultResource::get()); } - for (auto *operand : outputOperands) { - if (!llvm::isa(operand->get().getType())) + for (auto operand : outputOperands) { + if (!llvm::isa(operand.getType())) continue; - effects.emplace_back(MemoryEffects::Read::get(), operand->get(), + effects.emplace_back(MemoryEffects::Read::get(), operand, SideEffects::DefaultResource::get()); - effects.emplace_back(MemoryEffects::Write::get(), operand->get(), + effects.emplace_back(MemoryEffects::Write::get(), operand, SideEffects::DefaultResource::get()); } } @@ -1065,8 +1064,8 @@ static void getGenericEffectsImpl( void GenericOp::getEffects( SmallVectorImpl> &effects) { - getGenericEffectsImpl(effects, getOperation()->getResults(), - getDpsInputOperands(), getDpsInitOperands()); + getGenericEffectsImpl(effects, getOperation()->getResults(), getDpsInputs(), + getDpsInits()); } LogicalResult GenericOp::verify() { return success(); } @@ -1345,8 +1344,7 @@ void MapOp::print(OpAsmPrinter &p) { printShortForm(p, payloadOp); } - printCommonStructuredOpParts(p, SmallVector(getDpsInputOperands()), - SmallVector(getDpsInitOperands())); + printCommonStructuredOpParts(p, getDpsInputs(), getDpsInits()); p.printOptionalAttrDict((*this)->getAttrs()); if (!payloadOp) { @@ -1414,8 +1412,8 @@ ArrayAttr MapOp::getIndexingMaps() { void MapOp::getEffects( SmallVectorImpl> &effects) { - getGenericEffectsImpl(effects, getOperation()->getResults(), - getDpsInputOperands(), getDpsInitOperands()); + getGenericEffectsImpl(effects, getOperation()->getResults(), getDpsInputs(), + getDpsInits()); } //===----------------------------------------------------------------------===// @@ -1483,8 +1481,8 @@ ArrayAttr ReduceOp::getIndexingMaps() { void ReduceOp::getEffects( SmallVectorImpl> &effects) { - getGenericEffectsImpl(effects, getOperation()->getResults(), - getDpsInputOperands(), getDpsInitOperands()); + getGenericEffectsImpl(effects, getOperation()->getResults(), getDpsInputs(), + getDpsInits()); } static ParseResult parseDenseI64ArrayAttr(OpAsmParser &parser, @@ -1547,8 +1545,7 @@ void ReduceOp::print(OpAsmPrinter &p) { printShortForm(p, payloadOp); } - printCommonStructuredOpParts(p, SmallVector(getDpsInputOperands()), - SmallVector(getDpsInitOperands())); + printCommonStructuredOpParts(p, getDpsInputs(), getDpsInits()); printDenseI64ArrayAttr(p, getDimensionsAttrName(), getDimensions()); p.printOptionalAttrDict((*this)->getAttrs(), {getDimensionsAttrName()}); if (!payloadOp) { @@ -1638,11 +1635,10 @@ LogicalResult ReduceOp::verify() { } // Check that the last block arguments match the element type of the outputs. - for (auto [output, bbArg] : - llvm::zip(getDpsInitOperands(), - block->getArguments().take_back(getNumDpsInits()))) { + for (auto [output, bbArg] : llvm::zip( + getDpsInits(), block->getArguments().take_back(getNumDpsInits()))) { auto outputElementType = - llvm::cast(output->get().getType()).getElementType(); + llvm::cast(output.getType()).getElementType(); if (outputElementType != bbArg.getType()) return emitOpError() << "output element type " << outputElementType @@ -1712,8 +1708,7 @@ void TransposeOp::getAsmResultNames( } void TransposeOp::print(OpAsmPrinter &p) { - printCommonStructuredOpParts(p, SmallVector(getDpsInputOperands()), - SmallVector(getDpsInitOperands())); + printCommonStructuredOpParts(p, getDpsInputs(), getDpsInits()); printDenseI64ArrayAttr(p, getPermutationAttrName(), getPermutation()); p.printOptionalAttrDict((*this)->getAttrs(), {getPermutationAttrName()}); } @@ -1771,8 +1766,8 @@ ArrayAttr TransposeOp::getIndexingMaps() { void TransposeOp::getEffects( SmallVectorImpl> &effects) { - getGenericEffectsImpl(effects, getOperation()->getResults(), - getDpsInputOperands(), getDpsInitOperands()); + getGenericEffectsImpl(effects, getOperation()->getResults(), getDpsInputs(), + getDpsInits()); } //===----------------------------------------------------------------------===// @@ -1826,8 +1821,7 @@ void BroadcastOp::getAsmResultNames( } void BroadcastOp::print(OpAsmPrinter &p) { - printCommonStructuredOpParts(p, SmallVector(getDpsInputOperands()), - SmallVector(getDpsInitOperands())); + printCommonStructuredOpParts(p, getDpsInputs(), getDpsInits()); printDenseI64ArrayAttr(p, getDimensionsAttrName(), getDimensions()); p.printOptionalAttrDict((*this)->getAttrs(), {getDimensionsAttrName()}); } @@ -1894,8 +1888,8 @@ ArrayAttr BroadcastOp::getIndexingMaps() { void BroadcastOp::getEffects( SmallVectorImpl> &effects) { - getGenericEffectsImpl(effects, getOperation()->getResults(), - getDpsInputOperands(), getDpsInitOperands()); + getGenericEffectsImpl(effects, getOperation()->getResults(), getDpsInputs(), + getDpsInits()); } //===----------------------------------------------------------------------===// @@ -2126,8 +2120,9 @@ struct FoldTensorCastConsumerOp : public OpRewritePattern { OpOperand *outOperand = linalgOp.getDpsInitOperand(resultNumber); Value newOperand = rewriter.create(loc, resultType, outOperand->get()); - SmallVector newOperands{linalgOp.getDpsInputOperands()}; - SmallVector outputOperands{linalgOp.getDpsInitOperands()}; + SmallVector newOperands = linalgOp.getDpsInputs(); + SmallVector outputOperands(linalgOp.getDpsInits().begin(), + linalgOp.getDpsInits().end()); outputOperands[resultNumber] = newOperand; newOperands.append(outputOperands.begin(), outputOperands.end()); @@ -2399,8 +2394,8 @@ SoftmaxOp::reifyResultShapes(OpBuilder &b, void SoftmaxOp::getEffects( SmallVectorImpl> &effects) { - getGenericEffectsImpl(effects, getOperation()->getResults(), - getDpsInputOperands(), getDpsInitOperands()); + getGenericEffectsImpl(effects, getOperation()->getResults(), getDpsInputs(), + getDpsInits()); } // Helper functions for softmax decomposition. diff --git a/mlir/lib/Dialect/Linalg/Transforms/BubbleUpExtractSlice.cpp b/mlir/lib/Dialect/Linalg/Transforms/BubbleUpExtractSlice.cpp index 684d823d9f3df..28377279b7ce9 100644 --- a/mlir/lib/Dialect/Linalg/Transforms/BubbleUpExtractSlice.cpp +++ b/mlir/lib/Dialect/Linalg/Transforms/BubbleUpExtractSlice.cpp @@ -119,9 +119,9 @@ struct BubbleUpExtractSliceOpPattern /*omitPartialTileCheck=*/true); SmallVector resultTensorTypes; - for (OpOperand *opOperand : linalgOp.getDpsInitOperands()) + for (OpOperand &opOperand : linalgOp.getDpsInitsMutable()) resultTensorTypes.push_back( - tiledOperands[opOperand->getOperandNumber()].getType()); + tiledOperands[opOperand.getOperandNumber()].getType()); Operation *newOp = clone(rewriter, linalgOp, resultTensorTypes, tiledOperands); diff --git a/mlir/lib/Dialect/Linalg/Transforms/ConstantFold.cpp b/mlir/lib/Dialect/Linalg/Transforms/ConstantFold.cpp index 6b06c32d22eba..4322b6e77eb8f 100644 --- a/mlir/lib/Dialect/Linalg/Transforms/ConstantFold.cpp +++ b/mlir/lib/Dialect/Linalg/Transforms/ConstantFold.cpp @@ -97,8 +97,8 @@ class FoldConstantBase : public OpRewritePattern { [](AffineMap map) { return map.isPermutation(); })) return failure(); - for (OpOperand *operand : genericOp.getDpsInitOperands()) { - if (genericOp.payloadUsesValueFromOperand(operand)) + for (OpOperand &operand : genericOp.getDpsInitsMutable()) { + if (genericOp.payloadUsesValueFromOperand(&operand)) return failure(); } diff --git a/mlir/lib/Dialect/Linalg/Transforms/DecomposeLinalgOps.cpp b/mlir/lib/Dialect/Linalg/Transforms/DecomposeLinalgOps.cpp index 42f87a16c92f3..eae03924fb5c7 100644 --- a/mlir/lib/Dialect/Linalg/Transforms/DecomposeLinalgOps.cpp +++ b/mlir/lib/Dialect/Linalg/Transforms/DecomposeLinalgOps.cpp @@ -235,8 +235,8 @@ DecomposeLinalgOp::createResidualGenericOp(GenericOp genericOp, indexingMaps.push_back( peeledGenericOp.getIndexingMapMatchingResult(result)); } - for (OpOperand *outOperand : genericOp.getDpsInitOperands()) - indexingMaps.push_back(genericOp.getMatchingIndexingMap(outOperand)); + for (OpOperand &outOperand : genericOp.getDpsInitsMutable()) + indexingMaps.push_back(genericOp.getMatchingIndexingMap(&outOperand)); auto indexingMapAttr = rewriter.getAffineMapArrayAttr(indexingMaps); return rewriter.create( @@ -263,8 +263,8 @@ DecomposeLinalgOp::matchAndRewrite(GenericOp genericOp, genericOp, "only operations with tensor semantics are handled"); } - if (llvm::any_of(genericOp.getDpsInitOperands(), [&](OpOperand *outOperand) { - return !genericOp.getMatchingIndexingMap(outOperand).isPermutation(); + if (llvm::any_of(genericOp.getDpsInitsMutable(), [&](OpOperand &outOperand) { + return !genericOp.getMatchingIndexingMap(&outOperand).isPermutation(); })) { return rewriter.notifyMatchFailure( genericOp, "unhandled decomposition of generic op with out operand not " diff --git a/mlir/lib/Dialect/Linalg/Transforms/DropUnitDims.cpp b/mlir/lib/Dialect/Linalg/Transforms/DropUnitDims.cpp index fa901dfd1f87c..2e3610b7c08d9 100644 --- a/mlir/lib/Dialect/Linalg/Transforms/DropUnitDims.cpp +++ b/mlir/lib/Dialect/Linalg/Transforms/DropUnitDims.cpp @@ -88,12 +88,12 @@ struct MoveInitOperandsToInput : public OpRewritePattern { if (genericOp.getNumParallelLoops() != genericOp.getNumLoops()) return failure(); - auto outputOperands = genericOp.getDpsInitOperands(); + auto outputOperands = genericOp.getDpsInitsMutable(); SetVector candidates; - for (OpOperand *op : outputOperands) { - if (genericOp.getMatchingBlockArgument(op).use_empty()) + for (OpOperand &op : outputOperands) { + if (genericOp.getMatchingBlockArgument(&op).use_empty()) continue; - candidates.insert(op); + candidates.insert(&op); } if (candidates.empty()) @@ -101,7 +101,7 @@ struct MoveInitOperandsToInput : public OpRewritePattern { // Compute the modified indexing maps. int64_t origNumInput = genericOp.getNumDpsInputs(); - SmallVector newInputOperands = genericOp.getDpsInputOperands(); + SmallVector newInputOperands = genericOp.getDpsInputs(); SmallVector indexingMaps = genericOp.getIndexingMapsArray(); SmallVector newIndexingMaps; newIndexingMaps.append(indexingMaps.begin(), @@ -114,7 +114,8 @@ struct MoveInitOperandsToInput : public OpRewritePattern { indexingMaps.end()); Location loc = genericOp.getLoc(); - SmallVector newOutputOperands = outputOperands; + SmallVector newOutputOperands = + llvm::to_vector(genericOp.getDpsInits()); for (OpOperand *op : candidates) { OpBuilder::InsertionGuard guard(rewriter); rewriter.setInsertionPointAfterValue(op->get()); @@ -122,7 +123,7 @@ struct MoveInitOperandsToInput : public OpRewritePattern { auto empty = rewriter.create( loc, tensor::getMixedSizes(rewriter, loc, op->get()), elemType); - auto [start, end] = genericOp.getDpsInitsPositionRange(); + unsigned start = genericOp.getDpsInits().getBeginOperandIndex(); newOutputOperands[op->getOperandNumber() - start] = empty.getResult(); } @@ -145,9 +146,9 @@ struct MoveInitOperandsToInput : public OpRewritePattern { mapper.map(bbarg, block->addArgument(bbarg.getType(), loc)); } - for (OpOperand *op : outputOperands) { - BlockArgument bbarg = genericOp.getMatchingBlockArgument(op); - if (candidates.count(op)) + for (OpOperand &op : outputOperands) { + BlockArgument bbarg = genericOp.getMatchingBlockArgument(&op); + if (candidates.count(&op)) block->addArgument(bbarg.getType(), loc); else mapper.map(bbarg, block->addArgument(bbarg.getType(), loc)); diff --git a/mlir/lib/Dialect/Linalg/Transforms/ElementwiseOpFusion.cpp b/mlir/lib/Dialect/Linalg/Transforms/ElementwiseOpFusion.cpp index 6a01c24f02699..17346607fa9cd 100644 --- a/mlir/lib/Dialect/Linalg/Transforms/ElementwiseOpFusion.cpp +++ b/mlir/lib/Dialect/Linalg/Transforms/ElementwiseOpFusion.cpp @@ -340,23 +340,23 @@ mlir::linalg::fuseElementwiseOps(RewriterBase &rewriter, } // 6. Collect all of the producer outputs. - for (const auto &opOperand : llvm::enumerate(producer.getDpsInitOperands())) { + for (const auto &opOperand : llvm::enumerate(producer.getDpsInitsMutable())) { if (!preservedProducerResults.count(opOperand.index())) continue; - fusedOutputOperands.push_back(opOperand.value()->get()); + fusedOutputOperands.push_back(opOperand.value().get()); AffineMap map = getIndexingMapOfProducerOperandsInCoordinatesOfFusedOp( - opOperand.value(), producerResultIndexMap, + &opOperand.value(), producerResultIndexMap, consumer.getMatchingIndexingMap(fusedOperand)); fusedIndexMaps.push_back(map); - fusedResultTypes.push_back(opOperand.value()->get().getType()); + fusedResultTypes.push_back(opOperand.value().get().getType()); } // 7. All of consumer's output operands (skip operands: added by the builder). - for (OpOperand *opOperand : consumer.getDpsInitOperands()) { - fusedOutputOperands.push_back(opOperand->get()); - fusedIndexMaps.push_back(consumer.getMatchingIndexingMap(opOperand)); - Type resultType = opOperand->get().getType(); + for (OpOperand &opOperand : consumer.getDpsInitsMutable()) { + fusedOutputOperands.push_back(opOperand.get()); + fusedIndexMaps.push_back(consumer.getMatchingIndexingMap(&opOperand)); + Type resultType = opOperand.get().getType(); if (!isa(resultType)) fusedResultTypes.push_back(resultType); } @@ -812,12 +812,12 @@ fuseWithReshapeByExpansion(GenericOp genericOp, Operation *reshapeOp, Location loc = genericOp.getLoc(); SmallVector outputs; - for (OpOperand *opOperand : genericOp.getDpsInitOperands()) { - AffineMap indexingMap = genericOp.getMatchingIndexingMap(opOperand); - auto opOperandType = cast(opOperand->get().getType()); + for (OpOperand &opOperand : genericOp.getDpsInitsMutable()) { + AffineMap indexingMap = genericOp.getMatchingIndexingMap(&opOperand); + auto opOperandType = cast(opOperand.get().getType()); RankedTensorType expandedOutputType = getExpandedType(opOperandType, indexingMap, expansionInfo); - if (expandedOutputType != opOperand->get().getType()) { + if (expandedOutputType != opOperand.get().getType()) { SmallVector reassociation = getReassociationForExpansion(indexingMap, expansionInfo); if (failed(reshapeLikeShapesAreCompatible( @@ -829,10 +829,10 @@ fuseWithReshapeByExpansion(GenericOp genericOp, Operation *reshapeOp, /*isExpandingReshape=*/true))) return std::nullopt; outputs.push_back(rewriter.create( - genericOp.getLoc(), expandedOutputType, opOperand->get(), + genericOp.getLoc(), expandedOutputType, opOperand.get(), reassociation)); } else { - outputs.push_back(opOperand->get()); + outputs.push_back(opOperand.get()); } } @@ -1495,9 +1495,9 @@ FailureOr> mlir::linalg::collapseGenericOpIterationDims( SmallVector outputOperands; resultTypes.reserve(genericOp.getNumDpsInits()); outputOperands.reserve(genericOp.getNumDpsInits()); - for (OpOperand *output : genericOp.getDpsInitOperands()) { - Value newOutput = - getCollapsedOpOperand(loc, genericOp, output, collapsingInfo, rewriter); + for (OpOperand &output : genericOp.getDpsInitsMutable()) { + Value newOutput = getCollapsedOpOperand(loc, genericOp, &output, + collapsingInfo, rewriter); outputOperands.push_back(newOutput); resultTypes.push_back(newOutput.getType()); } @@ -1703,9 +1703,9 @@ class FoldScalarOrSplatConstant : public OpRewritePattern { fusedOperands.push_back(inputValue); fusedLocs.push_back(inputValue.getLoc()); } - for (OpOperand *outputOperand : genericOp.getDpsInitOperands()) + for (OpOperand &outputOperand : genericOp.getDpsInitsMutable()) fusedIndexMaps.push_back( - genericOp.getMatchingIndexingMap(outputOperand)); + genericOp.getMatchingIndexingMap(&outputOperand)); // Check if the operation shapes to loops map is computable. if (!inversePermutation(concatAffineMaps(fusedIndexMaps))) { @@ -1763,9 +1763,9 @@ struct RemoveOutsDependency : public OpRewritePattern { rewriter.startRootUpdate(op); bool modifiedOutput = false; Location loc = op.getLoc(); - for (OpOperand *opOperand : op.getDpsInitOperands()) { - if (!op.payloadUsesValueFromOperand(opOperand)) { - Value operandVal = opOperand->get(); + for (OpOperand &opOperand : op.getDpsInitsMutable()) { + if (!op.payloadUsesValueFromOperand(&opOperand)) { + Value operandVal = opOperand.get(); auto operandType = dyn_cast(operandVal.getType()); if (!operandType) continue; @@ -1783,7 +1783,7 @@ struct RemoveOutsDependency : public OpRewritePattern { tensor::getMixedSizes(rewriter, loc, operandVal); Value emptyTensor = rewriter.create( loc, mixedSizes, operandType.getElementType()); - op->setOperand(opOperand->getOperandNumber(), emptyTensor); + op->setOperand(opOperand.getOperandNumber(), emptyTensor); } } if (!modifiedOutput) { diff --git a/mlir/lib/Dialect/Linalg/Transforms/EliminateEmptyTensors.cpp b/mlir/lib/Dialect/Linalg/Transforms/EliminateEmptyTensors.cpp index 4b754065e3189..5a8320bdb2875 100644 --- a/mlir/lib/Dialect/Linalg/Transforms/EliminateEmptyTensors.cpp +++ b/mlir/lib/Dialect/Linalg/Transforms/EliminateEmptyTensors.cpp @@ -22,17 +22,17 @@ using namespace mlir::linalg; /// Get an output operand that matches the given input operand and can be used /// to eliminate a tensor.empty op. static OpOperand *getUnusedOutOperand(LinalgOp op, OpOperand *in) { - for (OpOperand *operand : op.getDpsInitOperands()) { + for (OpOperand &operand : op.getDpsInitsMutable()) { // Operand must be unused. - if (op.payloadUsesValueFromOperand(operand)) + if (op.payloadUsesValueFromOperand(&operand)) continue; // Types must match. - if (operand->get().getType() != in->get().getType()) + if (operand.get().getType() != in->get().getType()) continue; // Indexing maps must match. - if (op.getMatchingIndexingMap(operand) != op.getMatchingIndexingMap(in)) + if (op.getMatchingIndexingMap(&operand) != op.getMatchingIndexingMap(in)) continue; - return operand; + return &operand; } return nullptr; } diff --git a/mlir/lib/Dialect/Linalg/Transforms/EraseUnusedOperandsAndResults.cpp b/mlir/lib/Dialect/Linalg/Transforms/EraseUnusedOperandsAndResults.cpp index c89fc5b9da8d3..4e54e48c914ae 100644 --- a/mlir/lib/Dialect/Linalg/Transforms/EraseUnusedOperandsAndResults.cpp +++ b/mlir/lib/Dialect/Linalg/Transforms/EraseUnusedOperandsAndResults.cpp @@ -184,10 +184,11 @@ struct DeduplicateAndRemoveDeadOperandsAndResults // If the op doesn't have tensor semantics or outputs should not be removed, // keep all the outputs as preserved. if (!genericOp.hasTensorSemantics() || !removeOutputs) { - for (const auto &en : llvm::enumerate(genericOp.getDpsInitOperands())) { + for (const auto &en : llvm::enumerate(genericOp.getDpsInitsMutable())) { origToNewPos[en.index()] = newOutputOperands.size(); - newOutputOperands.push_back(en.value()->get()); - newIndexingMaps.push_back(genericOp.getMatchingIndexingMap(en.value())); + newOutputOperands.push_back(en.value().get()); + newIndexingMaps.push_back( + genericOp.getMatchingIndexingMap(&en.value())); } return origToNewPos; } @@ -198,25 +199,25 @@ struct DeduplicateAndRemoveDeadOperandsAndResults // computation. auto yieldOp = cast(genericOp.getBody()->getTerminator()); for (const auto &outputOpOperand : - llvm::enumerate(genericOp.getDpsInitOperands())) { - OpResult result = genericOp.getTiedOpResult(outputOpOperand.value()); + llvm::enumerate(genericOp.getDpsInitsMutable())) { + OpResult result = genericOp.getTiedOpResult(&outputOpOperand.value()); AffineMap indexingMap = - genericOp.getMatchingIndexingMap(outputOpOperand.value()); - auto key = std::make_tuple(outputOpOperand.value()->get(), indexingMap, + genericOp.getMatchingIndexingMap(&outputOpOperand.value()); + auto key = std::make_tuple(outputOpOperand.value().get(), indexingMap, yieldOp->getOperand(outputOpOperand.index())); if (isResultValueDead(genericOp, result)) { // Check if the opoperand can be dropped without affecting loop // bound computation. Add the operand to the list of dropped op // operand for checking. If it cannot be dropped, need to pop the // value back. - droppedOpOperands.push_back(outputOpOperand.value()); + droppedOpOperands.push_back(&outputOpOperand.value()); if (genericOp.canOpOperandsBeDropped(droppedOpOperands)) { continue; } droppedOpOperands.pop_back(); } - if (!genericOp.payloadUsesValueFromOperand(outputOpOperand.value())) { + if (!genericOp.payloadUsesValueFromOperand(&outputOpOperand.value())) { // The out operand can also be dropped if it is computed redundantly // by another result, the conditions for that are // - The same operand is used as the out operand @@ -225,16 +226,16 @@ struct DeduplicateAndRemoveDeadOperandsAndResults auto it = dedupedOutpts.find(key); if (it != dedupedOutpts.end()) { origToNewPos[outputOpOperand.index()] = it->second; - droppedOpOperands.push_back(outputOpOperand.value()); + droppedOpOperands.push_back(&outputOpOperand.value()); continue; } } origToNewPos[outputOpOperand.index()] = newOutputOperands.size(); dedupedOutpts[key] = newOutputOperands.size(); - newOutputOperands.push_back(outputOpOperand.value()->get()); + newOutputOperands.push_back(outputOpOperand.value().get()); newIndexingMaps.push_back( - genericOp.getMatchingIndexingMap(outputOpOperand.value())); + genericOp.getMatchingIndexingMap(&outputOpOperand.value())); } return origToNewPos; } @@ -254,7 +255,8 @@ struct DeduplicateAndRemoveDeadOperandsAndResults // Replace all arguments in the original op, with arguments from the // canonicalized op. auto updateReplacements = - [&](OpOperandVector &origOperands, OpOperandVector &newOperands, + [&](SmallVector &origOperands, + SmallVector &newOperands, const llvm::SmallDenseMap &map) { for (const auto &origOperand : llvm::enumerate(origOperands)) { auto it = map.find(origOperand.index()); @@ -266,12 +268,17 @@ struct DeduplicateAndRemoveDeadOperandsAndResults } }; - OpOperandVector origInputOperands = genericOp.getDpsInputOperands(); - OpOperandVector newInputOperands = newOp.getDpsInputOperands(); + SmallVector origInputOperands = + genericOp.getDpsInputOperands(); + SmallVector newInputOperands = newOp.getDpsInputOperands(); updateReplacements(origInputOperands, newInputOperands, origInsToNewInsPos); - OpOperandVector origOutputOperands = genericOp.getDpsInitOperands(); - OpOperandVector newOutputOperands = newOp.getDpsInitOperands(); + SmallVector origOutputOperands = + llvm::to_vector(llvm::map_range(genericOp.getDpsInitsMutable(), + [](OpOperand &o) { return &o; })); + SmallVector newOutputOperands = + llvm::to_vector(llvm::map_range(newOp.getDpsInitsMutable(), + [](OpOperand &o) { return &o; })); updateReplacements(origOutputOperands, newOutputOperands, origOutsToNewOutsPos); @@ -316,7 +323,7 @@ struct RemoveUnusedCycleInGenericOp : public OpRewritePattern { bool hasRemovedCycles = false; // Iterate over output operands and remove any unused cycles. for (const auto &outputOpOperand : - llvm::enumerate(genericOp.getDpsInitOperands())) { + llvm::enumerate(genericOp.getDpsInits())) { // Check that result from out operand is dead. Value result = genericOp.getResult(outputOpOperand.index()); diff --git a/mlir/lib/Dialect/Linalg/Transforms/Fusion.cpp b/mlir/lib/Dialect/Linalg/Transforms/Fusion.cpp index ae0461965c478..d83ec725e0820 100644 --- a/mlir/lib/Dialect/Linalg/Transforms/Fusion.cpp +++ b/mlir/lib/Dialect/Linalg/Transforms/Fusion.cpp @@ -150,8 +150,8 @@ static LinalgOp fuse(OpBuilder &b, LinalgOp producer, // fully dynamic at construction time. SmallVector resultTypes; resultTypes.reserve(producer->getNumResults()); - for (OpOperand *operand : producer.getDpsInitOperands()) { - auto tensorType = dyn_cast(operand->get().getType()); + for (Value operand : producer.getDpsInits()) { + auto tensorType = dyn_cast(operand.getType()); if (!tensorType) continue; unsigned rank = tensorType.getRank(); diff --git a/mlir/lib/Dialect/Linalg/Transforms/Generalization.cpp b/mlir/lib/Dialect/Linalg/Transforms/Generalization.cpp index 2382903bf3785..1d9ce4144f998 100644 --- a/mlir/lib/Dialect/Linalg/Transforms/Generalization.cpp +++ b/mlir/lib/Dialect/Linalg/Transforms/Generalization.cpp @@ -55,8 +55,8 @@ FailureOr mlir::linalg::generalizeNamedOp(RewriterBase &rewriter, if (failed(generalizeNamedOpPrecondition(linalgOp))) return rewriter.notifyMatchFailure(linalgOp, "preconditions not met"); - SmallVector inputs = linalgOp.getDpsInputOperands(); - SmallVector outputs = linalgOp.getDpsInitOperands(); + SmallVector inputs = linalgOp.getDpsInputs(); + ValueRange outputs = linalgOp.getDpsInits(); SmallVector indexingMaps = linalgOp.getIndexingMapsArray(); SmallVector iterators = linalgOp.getIteratorTypesArray(); SmallVector resultTypes = linalgOp.hasTensorSemantics() diff --git a/mlir/lib/Dialect/Linalg/Transforms/InlineScalarOperands.cpp b/mlir/lib/Dialect/Linalg/Transforms/InlineScalarOperands.cpp index 9cf8f7c4e1ca0..cc39fe932c24b 100644 --- a/mlir/lib/Dialect/Linalg/Transforms/InlineScalarOperands.cpp +++ b/mlir/lib/Dialect/Linalg/Transforms/InlineScalarOperands.cpp @@ -54,8 +54,9 @@ struct InlineScalarOperands : public OpRewritePattern { if (scalarOperands.empty()) return failure(); - for (OpOperand *opOperand : genericOp.getDpsInitOperands()) - newIndexingMaps.emplace_back(genericOp.getMatchingIndexingMap(opOperand)); + for (OpOperand &opOperand : genericOp.getDpsInitsMutable()) + newIndexingMaps.emplace_back( + genericOp.getMatchingIndexingMap(&opOperand)); Location loc = genericOp->getLoc(); SmallVector outputOperands = genericOp.getOutputs(); diff --git a/mlir/lib/Dialect/Linalg/Transforms/Loops.cpp b/mlir/lib/Dialect/Linalg/Transforms/Loops.cpp index 72b684aaa864c..79e295b937b93 100644 --- a/mlir/lib/Dialect/Linalg/Transforms/Loops.cpp +++ b/mlir/lib/Dialect/Linalg/Transforms/Loops.cpp @@ -149,11 +149,12 @@ static void emitScalarImplementation(OpBuilder &b, Location loc, b.create(loc, inputOperand->get(), indexing)); } // 1.b. Emit load from output views. - for (OpOperand *outputOperand : linalgOp.getDpsInitOperands()) { + for (OpOperand &outputOperand : linalgOp.getDpsInitsMutable()) { SmallVector indexing = makeCanonicalAffineApplies( - b, loc, linalgOp.getMatchingIndexingMap(outputOperand), allIvsPlusDims); + b, loc, linalgOp.getMatchingIndexingMap(&outputOperand), + allIvsPlusDims); indexedValues.push_back( - b.create(loc, outputOperand->get(), indexing)); + b.create(loc, outputOperand.get(), indexing)); } // TODO: When a region inliner exists, use it. @@ -161,13 +162,13 @@ static void emitScalarImplementation(OpBuilder &b, Location loc, // 3. Emit store. SmallVector, 8> indexing; SmallVector outputBuffers; - for (OpOperand *outputOperand : linalgOp.getDpsInitOperands()) { - if (!isa(outputOperand->get().getType())) + for (OpOperand &outputOperand : linalgOp.getDpsInitsMutable()) { + if (!isa(outputOperand.get().getType())) continue; indexing.push_back(makeCanonicalAffineApplies( - b, loc, linalgOp.getMatchingIndexingMap(outputOperand), + b, loc, linalgOp.getMatchingIndexingMap(&outputOperand), allIvsPlusDims)); - outputBuffers.push_back(outputOperand->get()); + outputBuffers.push_back(outputOperand.get()); } inlineRegionAndEmitStore(b, loc, linalgOp, indexedValues, indexing, outputBuffers); diff --git a/mlir/lib/Dialect/Linalg/Transforms/Padding.cpp b/mlir/lib/Dialect/Linalg/Transforms/Padding.cpp index 8fe745d97ca3d..a74a3c2c50040 100644 --- a/mlir/lib/Dialect/Linalg/Transforms/Padding.cpp +++ b/mlir/lib/Dialect/Linalg/Transforms/Padding.cpp @@ -238,18 +238,18 @@ linalg::rewriteAsPaddedOp(RewriterBase &rewriter, LinalgOp opToPad, opToPad.getNumDpsInits() && "expected matching number of results"); for (auto it : - llvm::zip(paddedSubtensorResults, opToPad.getDpsInitOperands())) { + llvm::zip(paddedSubtensorResults, opToPad.getDpsInitsMutable())) { if (options.copyBackOp == LinalgPaddingOptions::CopyBackOp::LinalgCopy) { replacements.push_back(rewriter .create(loc, std::get<0>(it), - std::get<1>(it)->get()) + std::get<1>(it).get()) .getResult(0)); } else if (options.copyBackOp == LinalgPaddingOptions::CopyBackOp:: BufferizationMaterializeInDestination) { replacements.push_back( rewriter.create( - loc, std::get<0>(it), std::get<1>(it)->get())); + loc, std::get<0>(it), std::get<1>(it).get())); } else { llvm_unreachable("unsupported copy back op"); } diff --git a/mlir/lib/Dialect/Linalg/Transforms/SplitReduction.cpp b/mlir/lib/Dialect/Linalg/Transforms/SplitReduction.cpp index 6c859b6cb70eb..6559c86c9e0ff 100644 --- a/mlir/lib/Dialect/Linalg/Transforms/SplitReduction.cpp +++ b/mlir/lib/Dialect/Linalg/Transforms/SplitReduction.cpp @@ -192,8 +192,7 @@ FailureOr mlir::linalg::splitReduction( auto reduction = b.create( loc, op->getResultTypes(), ValueRange({genericOp.getResult(0)}), - SmallVector{op.getDpsInitOperands()}, reductionMaps, - reductionIteratorTypes, + op.getDpsInits(), reductionMaps, reductionIteratorTypes, [reductionOp](OpBuilder &b, Location loc, ValueRange inputs) { Operation *clonedReductionOp = b.clone(*reductionOp); clonedReductionOp->setOperand(0, inputs[0]); @@ -308,8 +307,8 @@ FailureOr mlir::linalg::splitReductionByScaling( SmallVector emptyOrAllocTensorOps; SmallVector fillOps; fillOps.reserve(op.getNumDpsInits()); - for (auto it : llvm::zip(op.getDpsInitOperands(), neutralElements)) { - Value rankedTensor = std::get<0>(it)->get(); + for (auto it : llvm::zip(op.getDpsInitsMutable(), neutralElements)) { + Value rankedTensor = std::get<0>(it).get(); auto t = cast(rankedTensor.getType()); RankedTensorType newT = RankedTensorType::Builder(t).insertDim( reductionDimSize / splitFactor, insertSplitDimension); @@ -345,13 +344,13 @@ FailureOr mlir::linalg::splitReductionByScaling( // TODO: a subset of these may not reduce along reducePos and should be // reindexed: k -> k * splitFactor + k', when multi-reduction support is // available. - for (OpOperand *o : op.getDpsInitOperands()) - newMaps.push_back(insertParallelDim(op, *o, reductionDimPos, + for (OpOperand &o : op.getDpsInitsMutable()) + newMaps.push_back(insertParallelDim(op, o, reductionDimPos, reductionDimSize / splitFactor)); // Step 3. Handle operands. // Compute the new input tensors. - SmallVector newInputs(op.getDpsInputOperands()); + SmallVector newInputs = op.getDpsInputs(); // Add a single shape-only tensor to carry the dimensions without resorting to // more complex inversions. newInputs.push_back(b.create( @@ -380,10 +379,10 @@ FailureOr mlir::linalg::splitReductionByScaling( // TODO: all results can be handled in a single GenericOp, when // multi-reduction support is available. SmallVector results; - for (auto it : llvm::zip(genericOp->getResults(), op.getDpsInitOperands(), - combinerOps)) { + for (auto it : + llvm::zip(genericOp->getResults(), op.getDpsInits(), combinerOps)) { Value reindexedOutput = std::get<0>(it); - Value originalOutput = std::get<1>(it)->get(); + Value originalOutput = std::get<1>(it); auto originalOutputType = cast(originalOutput.getType()); Operation *combinerOp = std::get<2>(it); diff --git a/mlir/lib/Dialect/Linalg/Transforms/Tiling.cpp b/mlir/lib/Dialect/Linalg/Transforms/Tiling.cpp index 2c6afd4c2e6d9..472e6fa3ab27b 100644 --- a/mlir/lib/Dialect/Linalg/Transforms/Tiling.cpp +++ b/mlir/lib/Dialect/Linalg/Transforms/Tiling.cpp @@ -368,14 +368,14 @@ static FailureOr tileToForallOpImpl( Operation *clonedOp = b.clone(*op.getOperation()); auto destinationStyleOp = dyn_cast(clonedOp); if (destinationStyleOp) { - for (OpOperand *outOperand : destinationStyleOp.getDpsInitOperands()) { + for (OpOperand &outOperand : destinationStyleOp.getDpsInitsMutable()) { // Swap tensor inits with the corresponding block argument of the // scf.forall op. Memref inits remain as is. - if (outOperand->get().getType().isa()) { - auto *it = llvm::find(dest, outOperand->get()); + if (outOperand.get().getType().isa()) { + auto *it = llvm::find(dest, outOperand.get()); assert(it != dest.end() && "could not find destination tensor"); unsigned destNum = std::distance(dest.begin(), it); - outOperand->set(destBbArgs[destNum]); + outOperand.set(destBbArgs[destNum]); } } } @@ -702,8 +702,8 @@ FailureOr linalg::tileReductionUsingForall( b.setInsertionPoint(forallOp.getTerminator()); SmallVector tiledDpsInitOperands; - for (OpOperand *initOperand : destinationStyleOp.getDpsInitOperands()) { - auto *it = llvm::find(dest, initOperand->get()); + for (Value initOperand : destinationStyleOp.getDpsInits()) { + auto *it = llvm::find(dest, initOperand); assert(it != dest.end() && "dest operand not found in dest"); unsigned destNum = std::distance(dest.begin(), it); SmallVector strides(numThreads.size(), b.getIndexAttr(1)); @@ -714,7 +714,7 @@ FailureOr linalg::tileReductionUsingForall( outOffsets[reductionDim] = forallOp.getInductionVars().front(); // TODO: use SubsetExtractOpInterface once it is available. tiledDpsInitOperands.push_back(b.create( - loc, cast(initOperand->get().getType()), + loc, cast(initOperand.getType()), destBbArgs[destNum], outOffsets, sizes, strides)); } @@ -724,9 +724,9 @@ FailureOr linalg::tileReductionUsingForall( Operation *clonedOp = b.clone(*op.getOperation()); b.updateRootInPlace(clonedOp, [&]() { for (auto [initOperandPtr, tiledInitValue] : llvm::zip_equal( - cast(clonedOp).getDpsInitOperands(), + cast(clonedOp).getDpsInitsMutable(), tiledDpsInitOperands)) { - initOperandPtr->set(tiledInitValue); + initOperandPtr.set(tiledInitValue); } }); diff --git a/mlir/lib/Dialect/Linalg/Transforms/TilingInterfaceImpl.cpp b/mlir/lib/Dialect/Linalg/Transforms/TilingInterfaceImpl.cpp index cedaa4344a295..5f566d8b10aef 100644 --- a/mlir/lib/Dialect/Linalg/Transforms/TilingInterfaceImpl.cpp +++ b/mlir/lib/Dialect/Linalg/Transforms/TilingInterfaceImpl.cpp @@ -335,7 +335,7 @@ struct LinalgOpPartialReductionInterface } // Step 1: Extract a slice of the input operands. - SmallVector valuesToTile = linalgOp.getDpsInputOperands(); + SmallVector valuesToTile = linalgOp.getDpsInputs(); SmallVector tiledOperands = makeTiledShapes( b, loc, linalgOp, valuesToTile, offsets, sizes, {}, true); @@ -397,8 +397,7 @@ struct LinalgOpPartialReductionInterface auto reduction = b.create( loc, op->getResultTypes(), ValueRange({partialReduce[0]}), - SmallVector{linalgOp.getDpsInitOperands()}, reductionMaps, - reductionIteratorTypes, + linalgOp.getDpsInits(), reductionMaps, reductionIteratorTypes, [reductionOp](OpBuilder &b, Location loc, ValueRange inputs) { Operation *clonedReductionOp = b.clone(*reductionOp); clonedReductionOp->setOperand(0, inputs[0]); diff --git a/mlir/lib/Dialect/Linalg/Transforms/Transforms.cpp b/mlir/lib/Dialect/Linalg/Transforms/Transforms.cpp index a2d219f669905..49fe937741c77 100644 --- a/mlir/lib/Dialect/Linalg/Transforms/Transforms.cpp +++ b/mlir/lib/Dialect/Linalg/Transforms/Transforms.cpp @@ -554,11 +554,13 @@ FailureOr linalg::pack(RewriterBase &rewriter, // Step 2. Propagate packing to all LinalgOp operands. SmallVector inputsAndInits, results; - for (const auto &operandsList : - {linalgOp.getDpsInputOperands(), linalgOp.getDpsInitOperands()}) { - for (OpOperand *opOperandPtr : operandsList) { - int64_t pos = opOperandPtr->getOperandNumber(); - Value operand = opOperandPtr->get(); + SmallVector initOperands = llvm::to_vector(llvm::map_range( + linalgOp.getDpsInitsMutable(), [](OpOperand &o) { return &o; })); + SmallVector inputOperands = linalgOp.getDpsInputOperands(); + for (const auto &operandsList : {inputOperands, initOperands}) { + for (OpOperand *opOperand : operandsList) { + int64_t pos = opOperand->getOperandNumber(); + Value operand = opOperand->get(); SmallVector innerPos = listOfPackedOperandsDim.extractPackedDimsForOperand(pos); SmallVector innerPackSizes = diff --git a/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp b/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp index 51a83c35e4cda..6d8a96a3ad23f 100644 --- a/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp +++ b/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp @@ -1450,12 +1450,12 @@ static LogicalResult reductionPreconditions(LinalgOp op) { LDBG("reduction precondition failed: no reduction iterator\n"); return failure(); } - for (OpOperand *opOperand : op.getDpsInitOperands()) { - AffineMap indexingMap = op.getMatchingIndexingMap(opOperand); + for (OpOperand &opOperand : op.getDpsInitsMutable()) { + AffineMap indexingMap = op.getMatchingIndexingMap(&opOperand); if (indexingMap.isPermutation()) continue; - Operation *reduceOp = matchLinalgReduction(opOperand); + Operation *reduceOp = matchLinalgReduction(&opOperand); if (!reduceOp || !getCombinerOpKind(reduceOp)) { LDBG("reduction precondition failed: reduction detection failed\n"); return failure(); diff --git a/mlir/lib/Dialect/Linalg/Utils/Utils.cpp b/mlir/lib/Dialect/Linalg/Utils/Utils.cpp index eb62dcaba139e..f177235acff72 100644 --- a/mlir/lib/Dialect/Linalg/Utils/Utils.cpp +++ b/mlir/lib/Dialect/Linalg/Utils/Utils.cpp @@ -174,8 +174,8 @@ bool isElementwise(LinalgOp op) { return false; // TODO: relax the restrictions on indexing map. - for (OpOperand *opOperand : op.getDpsInitOperands()) { - if (!op.getMatchingIndexingMap(opOperand).isPermutation()) + for (OpOperand &opOperand : op.getDpsInitsMutable()) { + if (!op.getMatchingIndexingMap(&opOperand).isPermutation()) return false; } return hasOnlyScalarElementwiseOp(op->getRegion(0)); @@ -321,10 +321,9 @@ void GenerateLoopNest::doit( assert((procInfo.empty() || (procInfo.size() == loopRanges.size())) && "expected as many entries for proc info as number of loops, even if " "they are null entries"); - SmallVector iterArgInitValues = linalgOp.hasBufferSemantics() - ? SmallVector{} - : linalgOp.getDpsInitOperands(); - + SmallVector iterArgInitValues; + if (!linalgOp.hasBufferSemantics()) + llvm::append_range(iterArgInitValues, linalgOp.getDpsInits()); SmallVector lbs, ubs, steps; unpackRanges(b, loc, loopRanges, lbs, ubs, steps); LoopNest loopNest = mlir::scf::buildLoopNest( @@ -334,7 +333,7 @@ void GenerateLoopNest::doit( "expect the number of output tensors and iter args to match"); SmallVector operandValuesToUse = linalgOp->getOperands(); if (!iterArgs.empty()) { - operandValuesToUse = linalgOp.getDpsInputOperands(); + operandValuesToUse = linalgOp.getDpsInputs(); operandValuesToUse.append(iterArgs.begin(), iterArgs.end()); } return bodyBuilderFn(b, loc, ivs, operandValuesToUse); @@ -362,9 +361,9 @@ void GenerateLoopNest::doit( ValueRange)> bodyBuilderFn, ArrayRef /*procInfo*/) { - SmallVector iterArgInitValues = linalgOp.hasBufferSemantics() - ? SmallVector{} - : linalgOp.getDpsInitOperands(); + SmallVector iterArgInitValues; + if (!linalgOp.hasBufferSemantics()) + llvm::append_range(iterArgInitValues, linalgOp.getDpsInits()); assert(iterArgInitValues.empty() && "unexpected AffineForOp init values"); SmallVector lbs, ubs, steps; unpackRanges(b, loc, loopRanges, lbs, ubs, steps); @@ -529,9 +528,9 @@ void GenerateLoopNest::doit( ValueRange)> bodyBuilderFn, ArrayRef procInfo) { - SmallVector iterArgInitValues = linalgOp.hasBufferSemantics() - ? SmallVector{} - : linalgOp.getDpsInitOperands(); + SmallVector iterArgInitValues; + if (!linalgOp.hasBufferSemantics()) + llvm::append_range(iterArgInitValues, linalgOp.getDpsInits()); assert(iterArgInitValues.empty() && "unexpected ParallelOp init values"); // This function may be passed more iterator types than ranges. assert(iteratorTypes.size() >= loopRanges.size() && @@ -742,8 +741,8 @@ SmallVector getTensorOutputTypes(LinalgOp op, ValueRange operands) { if (op.hasBufferSemantics()) return {}; return llvm::to_vector( - llvm::map_range(op.getDpsInitOperands(), [&](OpOperand *opOperand) { - return operands[opOperand->getOperandNumber()].getType(); + llvm::map_range(op.getDpsInitsMutable(), [&](OpOperand &opOperand) { + return operands[opOperand.getOperandNumber()].getType(); })); } @@ -756,10 +755,10 @@ SmallVector insertSlicesBack(OpBuilder &builder, Location loc, tensorResults.reserve(results.size()); // Insert a insert_slice for each output tensor. unsigned resultIdx = 0; - for (OpOperand *opOperand : op.getDpsInitOperands()) { + for (OpOperand &opOperand : op.getDpsInitsMutable()) { // TODO: use an interface/adaptor to avoid leaking position in // `tiledOperands`. - Value outputTensor = operands[opOperand->getOperandNumber()]; + Value outputTensor = operands[opOperand.getOperandNumber()]; if (auto sliceOp = outputTensor.getDefiningOp()) { Value inserted = builder.create( loc, sliceOp.getSource().getType(), results[resultIdx], diff --git a/mlir/lib/Dialect/SCF/Transforms/TileUsingInterface.cpp b/mlir/lib/Dialect/SCF/Transforms/TileUsingInterface.cpp index 6cfba3fef15eb..ab59eac2ac4d6 100644 --- a/mlir/lib/Dialect/SCF/Transforms/TileUsingInterface.cpp +++ b/mlir/lib/Dialect/SCF/Transforms/TileUsingInterface.cpp @@ -255,7 +255,8 @@ yieldTiledValues(RewriterBase &rewriter, ArrayRef initValues, for (auto tiledOp : tilingResult.tiledOps) { if (auto dstOp = dyn_cast(tiledOp)) { auto innerMostLoop = loops.back(); - SmallVector tiledOpDestinationTensors = dstOp.getDpsInitOperands(); + SmallVector tiledOpDestinationTensors = + llvm::to_vector(dstOp.getDpsInits()); updateDestinationOperandsForTiledOp(rewriter, tiledOpDestinationTensors, innerMostLoop.getRegionIterArgs()); } @@ -447,7 +448,7 @@ mlir::scf::tileReductionUsingScf(RewriterBase &b, auto dstOp = cast(parallelOp); auto innerMostLoop = loops.back(); - SmallVector destinationTensors = dstOp.getDpsInitOperands(); + SmallVector destinationTensors = llvm::to_vector(dstOp.getDpsInits()); assert(destinationTensors.size() == innerMostLoop.getRegionIterArgs().size() && "unexpected number of outputs"); diff --git a/mlir/lib/Interfaces/DestinationStyleOpInterface.cpp b/mlir/lib/Interfaces/DestinationStyleOpInterface.cpp index f344ea656b247..4e5ef66887cad 100644 --- a/mlir/lib/Interfaces/DestinationStyleOpInterface.cpp +++ b/mlir/lib/Interfaces/DestinationStyleOpInterface.cpp @@ -14,14 +14,6 @@ namespace mlir { #include "mlir/Interfaces/DestinationStyleOpInterface.cpp.inc" } // namespace mlir -OpOperandVector::operator SmallVector() { - SmallVector result; - result.reserve(this->size()); - llvm::transform(*this, std::back_inserter(result), - [](OpOperand *opOperand) { return opOperand->get(); }); - return result; -} - namespace { size_t getNumTensorResults(Operation *op) { size_t numTensorResults = 0; @@ -39,13 +31,13 @@ LogicalResult detail::verifyDestinationStyleOpInterface(Operation *op) { cast(op); SmallVector outputTensorOperands; - for (OpOperand *operand : dstStyleOp.getDpsInitOperands()) { - Type type = operand->get().getType(); + for (OpOperand &operand : dstStyleOp.getDpsInitsMutable()) { + Type type = operand.get().getType(); if (isa(type)) { - outputTensorOperands.push_back(operand); + outputTensorOperands.push_back(&operand); } else if (!isa(type)) { return op->emitOpError("expected that operand #") - << operand->getOperandNumber() + << operand.getOperandNumber() << " is a ranked tensor or a ranked memref"; } } diff --git a/mlir/test/lib/Dialect/Linalg/TestLinalgElementwiseFusion.cpp b/mlir/test/lib/Dialect/Linalg/TestLinalgElementwiseFusion.cpp index 167ed80552067..e41481a9e5136 100644 --- a/mlir/test/lib/Dialect/Linalg/TestLinalgElementwiseFusion.cpp +++ b/mlir/test/lib/Dialect/Linalg/TestLinalgElementwiseFusion.cpp @@ -26,7 +26,7 @@ static void addOperands(Operation *op, SetVector &operandSet) { return; TypeSwitch(op) .Case([&](linalg::LinalgOp linalgOp) { - SmallVector inputOperands{linalgOp.getDpsInputOperands()}; + SmallVector inputOperands = linalgOp.getDpsInputs(); operandSet.insert(inputOperands.begin(), inputOperands.end()); }) .Default([&](Operation *operation) { diff --git a/mlir/test/lib/Dialect/Test/TestOps.td b/mlir/test/lib/Dialect/Test/TestOps.td index 354a43c244e3b..6887f151eef76 100644 --- a/mlir/test/lib/Dialect/Test/TestOps.td +++ b/mlir/test/lib/Dialect/Test/TestOps.td @@ -2350,9 +2350,8 @@ def TestDestinationStyleOp : }]; let extraClassDeclaration = [{ - std::pair getDpsInitsPositionRange() { - int64_t numOperands = this->getNumOperands(); - return {numOperands - getOutputs().size(), numOperands}; + mlir::MutableOperandRange getDpsInitsMutable() { + return getOutputsMutable(); } }]; } @@ -2412,9 +2411,8 @@ def TestLinalgConvOp : return ""; } - std::pair getDpsInitsPositionRange() { - int64_t getNumOperands = this->getNumOperands(); - return {getNumOperands - 1, getNumOperands}; + mlir::MutableOperandRange getDpsInitsMutable() { + return getOutputsMutable(); } }]; } @@ -2474,9 +2472,8 @@ def TestLinalgFillOp : return ""; } - std::pair getDpsInitsPositionRange() { - int64_t getNumOperands = this->getNumOperands(); - return {getNumOperands - 1, getNumOperands}; + mlir::MutableOperandRange getDpsInitsMutable() { + return getOutputsMutable(); } }]; } diff --git a/mlir/test/mlir-linalg-ods-gen/test-linalg-ods-yaml-gen.yaml b/mlir/test/mlir-linalg-ods-gen/test-linalg-ods-yaml-gen.yaml index 29075cb78a1cc..ab7b86125f693 100644 --- a/mlir/test/mlir-linalg-ods-gen/test-linalg-ods-yaml-gen.yaml +++ b/mlir/test/mlir-linalg-ods-gen/test-linalg-ods-yaml-gen.yaml @@ -82,9 +82,8 @@ structured_op: !LinalgStructuredOpConfig # ODS: buildStructuredOp($_builder, $_state, resultTensorTypes, # ODS-NEXT: attributes, Test1Op::getRegionBuilder()) -# ODS: std::pair getDpsInitsPositionRange() { -# ODS-NEXT: int64_t getNumOperands = this->getNumOperands(); -# ODS-NEXT: return {getNumOperands - getOutputs().size(), getNumOperands}; +# ODS: MutableOperandRange getDpsInitsMutable() { +# ODS-NEXT: return getOutputsMutable() # ODS-NEXT: } # IMPL-LABEL: void Test1Op::regionBuilder(ImplicitLocOpBuilder &b, diff --git a/mlir/tools/mlir-linalg-ods-gen/mlir-linalg-ods-yaml-gen.cpp b/mlir/tools/mlir-linalg-ods-gen/mlir-linalg-ods-yaml-gen.cpp index 61cb5537f1df6..664167e4f6c34 100644 --- a/mlir/tools/mlir-linalg-ods-gen/mlir-linalg-ods-yaml-gen.cpp +++ b/mlir/tools/mlir-linalg-ods-gen/mlir-linalg-ods-yaml-gen.cpp @@ -563,9 +563,8 @@ def {0} : LinalgStructuredBase_Op<"{1}", !listconcat([AttrSizedOperandSegments], return regionBuilder; } - std::pair getDpsInitsPositionRange() {{ - int64_t getNumOperands = this->getNumOperands(); - return {{getNumOperands - getOutputs().size(), getNumOperands}; + ::mlir::MutableOperandRange getDpsInitsMutable() {{ + return getOutputsMutable(); } // Generic methods. @@ -661,7 +660,7 @@ void {0}::getEffects(SmallVectorImpl< SideEffects::EffectInstance >&effects) {{ if (hasTensorSemantics()) return; getGenericEffectsImpl(effects, - getOperation()->getResults(), getDpsInputOperands(), getDpsInitOperands()); + getOperation()->getResults(), getDpsInputs(), getDpsInits()); } )FMT";