From 582d1bf84aa8d52a63b8b11245fbc71be0bcb640 Mon Sep 17 00:00:00 2001 From: Craig Topper Date: Wed, 18 Dec 2024 15:32:54 -0800 Subject: [PATCH 1/3] [SelectionDAG] Rename SDNode::uses() to users(). This function is most often used in range based loops or algorithms where the iterator is implicitly dereferenced. The dereference returns an SDNode * of the user rather than SDUse * so users() is a better name. I've long beeen annoyed that we can't write a range based loop over SDUse when we need getOperandNo. I plan to rename use_iterator to user_iterator and add a use_iterator that returns SDUse* on dereference. This will make it more like IR. --- llvm/include/llvm/CodeGen/SelectionDAGNodes.h | 10 +++- llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp | 36 ++++++------ .../lib/CodeGen/SelectionDAG/InstrEmitter.cpp | 6 +- llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp | 4 +- .../CodeGen/SelectionDAG/LegalizeTypes.cpp | 4 +- .../CodeGen/SelectionDAG/ScheduleDAGFast.cpp | 2 +- .../SelectionDAG/ScheduleDAGSDNodes.cpp | 2 +- .../lib/CodeGen/SelectionDAG/SelectionDAG.cpp | 14 ++--- .../CodeGen/SelectionDAG/SelectionDAGISel.cpp | 2 +- .../Target/AArch64/AArch64ISelDAGToDAG.cpp | 12 ++-- .../Target/AArch64/AArch64ISelLowering.cpp | 16 ++--- llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp | 10 ++-- llvm/lib/Target/AMDGPU/SIISelLowering.cpp | 6 +- llvm/lib/Target/ARM/ARMISelLowering.cpp | 14 ++--- .../Target/Hexagon/HexagonISelDAGToDAGHVX.cpp | 6 +- .../LoongArch/LoongArchISelLowering.cpp | 2 +- llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp | 2 +- llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp | 8 +-- llvm/lib/Target/PowerPC/PPCISelDAGToDAG.cpp | 10 ++-- llvm/lib/Target/PowerPC/PPCISelLowering.cpp | 58 +++++++++---------- llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp | 2 +- llvm/lib/Target/RISCV/RISCVISelLowering.cpp | 6 +- .../Target/SystemZ/SystemZISelDAGToDAG.cpp | 2 +- .../Target/SystemZ/SystemZISelLowering.cpp | 12 ++-- llvm/lib/Target/VE/VEISelLowering.cpp | 4 +- llvm/lib/Target/X86/X86ISelDAGToDAG.cpp | 4 +- llvm/lib/Target/X86/X86ISelLowering.cpp | 53 ++++++++--------- llvm/lib/Target/X86/X86ISelLoweringCall.cpp | 2 +- 28 files changed, 157 insertions(+), 152 deletions(-) diff --git a/llvm/include/llvm/CodeGen/SelectionDAGNodes.h b/llvm/include/llvm/CodeGen/SelectionDAGNodes.h index 61f3c6329efce..b525872f9dd2a 100644 --- a/llvm/include/llvm/CodeGen/SelectionDAGNodes.h +++ b/llvm/include/llvm/CodeGen/SelectionDAGNodes.h @@ -750,7 +750,7 @@ END_TWO_BYTE_PACK() bool use_empty() const { return UseList == nullptr; } /// Return true if there is exactly one use of this node. - bool hasOneUse() const { return hasSingleElement(uses()); } + bool hasOneUse() const { return hasSingleElement(users()); } /// Return the number of uses of this node. This method takes /// time proportional to the number of uses. @@ -844,10 +844,14 @@ END_TWO_BYTE_PACK() static use_iterator use_end() { return use_iterator(nullptr); } - inline iterator_range uses() { + // Dereferencing use_iterator returns the user SDNode* making it closer to a + // user_iterator thus this function is called users() to reflect that. + // FIXME: Rename to user_iterator and introduce a use_iterator that returns + // SDUse*. + inline iterator_range users() { return make_range(use_begin(), use_end()); } - inline iterator_range uses() const { + inline iterator_range users() const { return make_range(use_begin(), use_end()); } diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp index 10fc8eecaff90..9b0dc853ac037 100644 --- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp @@ -202,7 +202,7 @@ namespace { /// When an instruction is simplified, add all users of the instruction to /// the work lists because they might get more simplified now. void AddUsersToWorklist(SDNode *N) { - for (SDNode *Node : N->uses()) + for (SDNode *Node : N->users()) AddToWorklist(Node); } @@ -1113,7 +1113,7 @@ bool DAGCombiner::reassociationCanBreakAddressingModePattern(unsigned Opc, : N1.getConstantOperandVal(1))); if (Opc == ISD::SUB) ScalableOffset = -ScalableOffset; - if (all_of(N->uses(), [&](SDNode *Node) { + if (all_of(N->users(), [&](SDNode *Node) { if (auto *LoadStore = dyn_cast(Node); LoadStore && LoadStore->getBasePtr().getNode() == N) { TargetLoweringBase::AddrMode AM; @@ -1151,7 +1151,7 @@ bool DAGCombiner::reassociationCanBreakAddressingModePattern(unsigned Opc, return false; const int64_t CombinedValue = CombinedValueIntVal.getSExtValue(); - for (SDNode *Node : N->uses()) { + for (SDNode *Node : N->users()) { if (auto *LoadStore = dyn_cast(Node)) { // Is x[offset2] already not a legal addressing mode? If so then // reassociating the constants breaks nothing (we test offset2 because @@ -1176,7 +1176,7 @@ bool DAGCombiner::reassociationCanBreakAddressingModePattern(unsigned Opc, if (GA->getOpcode() == ISD::GlobalAddress && TLI.isOffsetFoldingLegal(GA)) return false; - for (SDNode *Node : N->uses()) { + for (SDNode *Node : N->users()) { auto *LoadStore = dyn_cast(Node); if (!LoadStore) return false; @@ -4720,7 +4720,7 @@ SDValue DAGCombiner::useDivRem(SDNode *Node) { SDValue Op0 = Node->getOperand(0); SDValue Op1 = Node->getOperand(1); SDValue combined; - for (SDNode *User : Op0->uses()) { + for (SDNode *User : Op0->users()) { if (User == Node || User->getOpcode() == ISD::DELETED_NODE || User->use_empty()) continue; @@ -10369,7 +10369,7 @@ static SDValue combineShiftToMULH(SDNode *N, const SDLoc &DL, SelectionDAG &DAG, unsigned MulLoHiOp = IsSignExt ? ISD::SMUL_LOHI : ISD::UMUL_LOHI; if (!ShiftOperand.hasOneUse() && TLI.isOperationLegalOrCustom(MulLoHiOp, NarrowVT) && - llvm::any_of(ShiftOperand->uses(), UserOfLowerBits)) { + llvm::any_of(ShiftOperand->users(), UserOfLowerBits)) { return SDValue(); } @@ -13570,7 +13570,7 @@ static SDValue tryToFoldExtOfLoad(SelectionDAG &DAG, DAGCombiner &Combiner, if (NonNegZExt) { assert(ExtLoadType == ISD::ZEXTLOAD && ExtOpc == ISD::ZERO_EXTEND && "Unexpected load type or opcode"); - for (SDNode *User : N0->uses()) { + for (SDNode *User : N0->users()) { if (User->getOpcode() == ISD::SETCC) { ISD::CondCode CC = cast(User->getOperand(2))->get(); if (ISD::isSignedIntSetCC(CC)) { @@ -17673,7 +17673,7 @@ SDValue DAGCombiner::combineRepeatedFPDivisors(SDNode *N) { // Find all FDIV users of the same divisor. // Use a set because duplicates may be present in the user list. SetVector Users; - for (auto *U : N1->uses()) { + for (auto *U : N1->users()) { if (U->getOpcode() == ISD::FDIV && U->getOperand(1) == N1) { // Skip X/sqrt(X) that has not been simplified to sqrt(X) yet. if (U->getOperand(1).getOpcode() == ISD::FSQRT && @@ -18965,7 +18965,7 @@ bool DAGCombiner::CombineToPreIndexedLoadStore(SDNode *N) { // Now check for #3 and #4. bool RealUse = false; - for (SDNode *Use : Ptr->uses()) { + for (SDNode *Use : Ptr->users()) { if (Use == N) continue; if (SDNode::hasPredecessorHelper(Use, Visited, Worklist, MaxSteps)) @@ -19089,7 +19089,7 @@ static bool shouldCombineToPostInc(SDNode *N, SDValue Ptr, SDNode *PtrUse, SmallPtrSet Visited; unsigned MaxSteps = SelectionDAG::getHasPredecessorMaxSteps(); - for (SDNode *Use : BasePtr->uses()) { + for (SDNode *Use : BasePtr->users()) { if (Use == Ptr.getNode()) continue; @@ -19110,7 +19110,7 @@ static bool shouldCombineToPostInc(SDNode *N, SDValue Ptr, SDNode *PtrUse, // If all the uses are load / store addresses, then don't do the // transformation. if (Use->getOpcode() == ISD::ADD || Use->getOpcode() == ISD::SUB) { - for (SDNode *UseUse : Use->uses()) + for (SDNode *UseUse : Use->users()) if (canFoldInAddressingMode(Use, UseUse, DAG, TLI)) return false; } @@ -19136,7 +19136,7 @@ static SDNode *getPostIndexedLoadStoreOp(SDNode *N, bool &IsLoad, // nor a successor of N. Otherwise, if Op is folded that would // create a cycle. unsigned MaxSteps = SelectionDAG::getHasPredecessorMaxSteps(); - for (SDNode *Op : Ptr->uses()) { + for (SDNode *Op : Ptr->users()) { // Check for #1. if (!shouldCombineToPostInc(N, Ptr, Op, BasePtr, Offset, AM, DAG, TLI)) continue; @@ -20515,7 +20515,7 @@ bool DAGCombiner::isMulAddWithConstProfitable(SDNode *MulNode, SDValue AddNode, return true; // Walk all the users of the constant with which we're multiplying. - for (SDNode *Use : ConstNode->uses()) { + for (SDNode *Use : ConstNode->users()) { if (Use == MulNode) // This use is the one we're on right now. Skip it. continue; @@ -22902,7 +22902,7 @@ bool DAGCombiner::refineExtractVectorEltIntoMultipleNarrowExtractVectorElts( // Did we fail to model any of the users of the Producer? bool ProducerIsLeaf = false; // Look at each user of this Producer. - for (SDNode *User : E.Producer->uses()) { + for (SDNode *User : E.Producer->users()) { switch (User->getOpcode()) { // TODO: support ISD::BITCAST // TODO: support ISD::ANY_EXTEND @@ -23176,13 +23176,13 @@ SDValue DAGCombiner::visitEXTRACT_VECTOR_ELT(SDNode *N) { // If only EXTRACT_VECTOR_ELT nodes use the source vector we can // simplify it based on the (valid) extraction indices. - if (llvm::all_of(VecOp->uses(), [&](SDNode *Use) { + if (llvm::all_of(VecOp->users(), [&](SDNode *Use) { return Use->getOpcode() == ISD::EXTRACT_VECTOR_ELT && Use->getOperand(0) == VecOp && isa(Use->getOperand(1)); })) { APInt DemandedElts = APInt::getZero(NumElts); - for (SDNode *Use : VecOp->uses()) { + for (SDNode *Use : VecOp->users()) { auto *CstElt = cast(Use->getOperand(1)); if (CstElt->getAPIntValue().ult(NumElts)) DemandedElts.setBit(CstElt->getZExtValue()); @@ -27302,7 +27302,7 @@ SDValue DAGCombiner::visitGET_FPENV_MEM(SDNode *N) { // Check if the memory, where FP state is written to, is used only in a single // load operation. LoadSDNode *LdNode = nullptr; - for (auto *U : Ptr->uses()) { + for (auto *U : Ptr->users()) { if (U == N) continue; if (auto *Ld = dyn_cast(U)) { @@ -27352,7 +27352,7 @@ SDValue DAGCombiner::visitSET_FPENV_MEM(SDNode *N) { // Check if the address of FP state is used also in a store operation only. StoreSDNode *StNode = nullptr; - for (auto *U : Ptr->uses()) { + for (auto *U : Ptr->users()) { if (U == N) continue; if (auto *St = dyn_cast(U)) { diff --git a/llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp b/llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp index 9c7085cc7e7a8..8e313fb21eede 100644 --- a/llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp @@ -105,7 +105,7 @@ void InstrEmitter::EmitCopyFromReg(SDNode *Node, unsigned ResNo, bool IsClone, if (TLI->isTypeLegal(VT)) UseRC = TLI->getRegClassFor(VT, Node->isDivergent()); - for (SDNode *User : Node->uses()) { + for (SDNode *User : Node->users()) { bool Match = true; if (User->getOpcode() == ISD::CopyToReg && User->getOperand(2).getNode() == Node && @@ -225,7 +225,7 @@ void InstrEmitter::CreateVirtualRegisters(SDNode *Node, } if (!VRBase && !IsClone && !IsCloned) - for (SDNode *User : Node->uses()) { + for (SDNode *User : Node->users()) { if (User->getOpcode() == ISD::CopyToReg && User->getOperand(2).getNode() == Node && User->getOperand(2).getResNo() == i) { @@ -502,7 +502,7 @@ void InstrEmitter::EmitSubregNode(SDNode *Node, VRBaseMapType &VRBaseMap, // If the node is only used by a CopyToReg and the dest reg is a vreg, use // the CopyToReg'd destination register instead of creating a new vreg. - for (SDNode *User : Node->uses()) { + for (SDNode *User : Node->users()) { if (User->getOpcode() == ISD::CopyToReg && User->getOperand(2).getNode() == Node) { Register DestReg = cast(User->getOperand(1))->getReg(); diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp index ca87168929f96..595a410101eca 100644 --- a/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp @@ -1394,7 +1394,7 @@ SDValue SelectionDAGLegalize::ExpandExtractFromVectorThroughStack(SDValue Op) { Visited.insert(Op.getNode()); Worklist.push_back(Idx.getNode()); SDValue StackPtr, Ch; - for (SDNode *User : Vec.getNode()->uses()) { + for (SDNode *User : Vec.getNode()->users()) { if (StoreSDNode *ST = dyn_cast(User)) { if (ST->isIndexed() || ST->isTruncatingStore() || ST->getValue() != Vec) @@ -2293,7 +2293,7 @@ static bool useSinCos(SDNode *Node) { ? ISD::FCOS : ISD::FSIN; SDValue Op0 = Node->getOperand(0); - for (const SDNode *User : Op0.getNode()->uses()) { + for (const SDNode *User : Op0.getNode()->users()) { if (User == Node) continue; // The other user might have been turned into sincos already. diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.cpp index cb6d3fe4db8a4..c7d29ec1a836c 100644 --- a/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.cpp @@ -189,7 +189,7 @@ void DAGTypeLegalizer::PerformExpensiveChecks() { #ifndef NDEBUG // Checked that NewNodes are only used by other NewNodes. for (SDNode *N : NewNodes) { - for (SDNode *U : N->uses()) + for (SDNode *U : N->users()) assert(U->getNodeId() == NewNode && "NewNode used by non-NewNode!"); } #endif @@ -399,7 +399,7 @@ bool DAGTypeLegalizer::run() { assert(N->getNodeId() == ReadyToProcess && "Node ID recalculated?"); N->setNodeId(Processed); - for (SDNode *User : N->uses()) { + for (SDNode *User : N->users()) { int NodeId = User->getNodeId(); // This node has two options: it can either be a new node or its Node ID diff --git a/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGFast.cpp b/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGFast.cpp index 70a7438440191..26eba4b257fb9 100644 --- a/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGFast.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGFast.cpp @@ -756,7 +756,7 @@ void ScheduleDAGLinearize::Schedule() { // Glue user must be scheduled together with the glue operand. So other // users of the glue operand must be treated as its users. SDNode *ImmGUser = Glue->getGluedUser(); - for (const SDNode *U : Glue->uses()) + for (const SDNode *U : Glue->users()) if (U == ImmGUser) --Degree; GUser->setNodeId(UDegree + Degree); diff --git a/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.cpp b/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.cpp index 31939ae5922ec..2e59dbf2f7028 100644 --- a/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.cpp @@ -388,7 +388,7 @@ void ScheduleDAGSDNodes::BuildSchedUnits() { // There are either zero or one users of the Glue result. bool HasGlueUse = false; - for (SDNode *U : N->uses()) + for (SDNode *U : N->users()) if (GlueVal.isOperandOf(U)) { HasGlueUse = true; assert(N->getNodeId() == -1 && "Node already inserted!"); diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp index 0fb5c4d5c4cb9..bd9e5d4dce8ec 100644 --- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp @@ -2556,7 +2556,7 @@ bool SelectionDAG::expandMultipleResultFPLibCall( // destination pointers can be used instead of creating stack allocations. SDValue StoresInChain; SmallVector ResultStores(NumResults); - for (SDNode *User : Node->uses()) { + for (SDNode *User : Node->users()) { if (!ISD::isNormalStore(User)) continue; auto *ST = cast(User); @@ -7933,7 +7933,7 @@ SDValue SelectionDAG::getStackArgumentTokenFactor(SDValue Chain) { ArgChains.push_back(Chain); // Add a chain value for each stack argument. - for (SDNode *U : getEntryNode().getNode()->uses()) + for (SDNode *U : getEntryNode().getNode()->users()) if (LoadSDNode *L = dyn_cast(U)) if (FrameIndexSDNode *FI = dyn_cast(L->getBasePtr())) if (FI->getIndex() < 0) @@ -11926,7 +11926,7 @@ void SelectionDAG::updateDivergence(SDNode *N) { bool IsDivergent = calculateDivergence(N); if (N->SDNodeBits.IsDivergent != IsDivergent) { N->SDNodeBits.IsDivergent = IsDivergent; - llvm::append_range(Worklist, N->uses()); + llvm::append_range(Worklist, N->users()); } } while (!Worklist.empty()); } @@ -11942,7 +11942,7 @@ void SelectionDAG::CreateTopologicalOrder(std::vector &Order) { } for (size_t I = 0; I != Order.size(); ++I) { SDNode *N = Order[I]; - for (auto *U : N->uses()) { + for (auto *U : N->users()) { unsigned &UnsortedOps = Degree[U]; if (0 == --UnsortedOps) Order.push_back(U); @@ -12071,7 +12071,7 @@ unsigned SelectionDAG::AssignTopologicalOrder() { checkForCycles(N, this); // N is in sorted position, so all its uses have one less operand // that needs to be sorted. - for (SDNode *P : N->uses()) { + for (SDNode *P : N->users()) { unsigned Degree = P->getNodeId(); assert(Degree != 0 && "Invalid node degree"); --Degree; @@ -12489,7 +12489,7 @@ bool SDNode::hasAnyUseOfValue(unsigned Value) const { /// isOnlyUserOf - Return true if this node is the only use of N. bool SDNode::isOnlyUserOf(const SDNode *N) const { bool Seen = false; - for (const SDNode *User : N->uses()) { + for (const SDNode *User : N->users()) { if (User == this) Seen = true; else @@ -12502,7 +12502,7 @@ bool SDNode::isOnlyUserOf(const SDNode *N) const { /// Return true if the only users of N are contained in Nodes. bool SDNode::areOnlyUsersOf(ArrayRef Nodes, const SDNode *N) { bool Seen = false; - for (const SDNode *User : N->uses()) { + for (const SDNode *User : N->users()) { if (llvm::is_contained(Nodes, User)) Seen = true; else diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp index 35aa7b87bc3b7..9147fb1c2badf 100644 --- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp @@ -1225,7 +1225,7 @@ void SelectionDAGISel::EnforceNodeIdInvariant(SDNode *Node) { while (!Nodes.empty()) { SDNode *N = Nodes.pop_back_val(); - for (auto *U : N->uses()) { + for (auto *U : N->users()) { auto UId = U->getNodeId(); if (UId > 0) { InvalidateNodeId(U); diff --git a/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp b/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp index 5df61b3722037..f831f8de70547 100644 --- a/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp +++ b/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp @@ -679,9 +679,9 @@ static bool isWorthFoldingSHL(SDValue V) { // operation. If yes, do not try to fold this node into the address // computation, since the computation will be kept. const SDNode *Node = V.getNode(); - for (SDNode *UI : Node->uses()) + for (SDNode *UI : Node->users()) if (!isa(*UI)) - for (SDNode *UII : UI->uses()) + for (SDNode *UII : UI->users()) if (!isa(*UII)) return false; return true; @@ -1012,7 +1012,7 @@ bool AArch64DAGToDAGISel::SelectArithUXTXRegister(SDValue N, SDValue &Reg, /// a single pseudo-instruction for an ADRP/ADD pair so over-aggressive folding /// leads to duplicated ADRP instructions. static bool isWorthFoldingADDlow(SDValue N) { - for (auto *Use : N->uses()) { + for (auto *Use : N->users()) { if (Use->getOpcode() != ISD::LOAD && Use->getOpcode() != ISD::STORE && Use->getOpcode() != ISD::ATOMIC_LOAD && Use->getOpcode() != ISD::ATOMIC_STORE) @@ -1245,7 +1245,7 @@ bool AArch64DAGToDAGISel::SelectAddrModeWRO(SDValue N, unsigned Size, // operation. If yes, do not try to fold this node into the address // computation, since the computation will be kept. const SDNode *Node = N.getNode(); - for (SDNode *UI : Node->uses()) { + for (SDNode *UI : Node->users()) { if (!isa(*UI)) return false; } @@ -1329,7 +1329,7 @@ bool AArch64DAGToDAGISel::SelectAddrModeXRO(SDValue N, unsigned Size, // operation. If yes, do not try to fold this node into the address // computation, since the computation will be kept. const SDNode *Node = N.getNode(); - for (SDNode *UI : Node->uses()) { + for (SDNode *UI : Node->users()) { if (!isa(*UI)) return false; } @@ -3031,7 +3031,7 @@ static void getUsefulBits(SDValue Op, APInt &UsefulBits, unsigned Depth) { } APInt UsersUsefulBits(UsefulBits.getBitWidth(), 0); - for (SDNode *Node : Op.getNode()->uses()) { + for (SDNode *Node : Op.getNode()->users()) { // A use cannot produce useful bits APInt UsefulBitsForUse = APInt(UsefulBits); getUsefulBitsForUse(Node, UsefulBitsForUse, Op, Depth); diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp index cb6ba06bd4425..5865dbe1307ba 100644 --- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp +++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -6464,7 +6464,7 @@ bool AArch64TargetLowering::isVectorLoadExtDesirable(SDValue ExtVal) const { return false; unsigned NumExtMaskedLoads = 0; - for (auto *U : Ld->getMask()->uses()) + for (auto *U : Ld->getMask()->users()) if (isa(U)) NumExtMaskedLoads++; @@ -8559,7 +8559,7 @@ SDValue AArch64TargetLowering::addTokenForArgument(SDValue Chain, ArgChains.push_back(Chain); // Add a chain value for each stack argument corresponding - for (SDNode *U : DAG.getEntryNode().getNode()->uses()) + for (SDNode *U : DAG.getEntryNode().getNode()->users()) if (LoadSDNode *L = dyn_cast(U)) if (FrameIndexSDNode *FI = dyn_cast(L->getBasePtr())) if (FI->getIndex() < 0) { @@ -19586,7 +19586,7 @@ static SDValue performANDSETCCCombine(SDNode *N, // Checks if the current node (N) is used by any SELECT instruction and // returns an empty SDValue to avoid applying the optimization to prevent // incorrect results - for (auto U : N->uses()) + for (auto U : N->users()) if (U->getOpcode() == ISD::SELECT) return SDValue(); @@ -24761,7 +24761,7 @@ static SDValue tryToWidenSetCCOperands(SDNode *Op, SelectionDAG &DAG) { EVT UseMVT = FirstUse->getValueType(0); if (UseMVT.getScalarSizeInBits() <= Op0MVT.getScalarSizeInBits()) return SDValue(); - if (any_of(Op->uses(), [&UseMVT](const SDNode *N) { + if (any_of(Op->users(), [&UseMVT](const SDNode *N) { return N->getOpcode() != ISD::VSELECT || N->getValueType(0) != UseMVT; })) return SDValue(); @@ -25335,7 +25335,7 @@ static SDValue performGlobalAddressCombine(SDNode *N, SelectionDAG &DAG, return SDValue(); uint64_t MinOffset = -1ull; - for (SDNode *N : GN->uses()) { + for (SDNode *N : GN->users()) { if (N->getOpcode() != ISD::ADD) return SDValue(); auto *C = dyn_cast(N->getOperand(0)); @@ -26054,7 +26054,7 @@ static SDValue tryCombineMULLWithUZP1(SDNode *N, HasFoundMULLow = false; // Find ExtractLow. - for (SDNode *User : ExtractHighSrcVec.getNode()->uses()) { + for (SDNode *User : ExtractHighSrcVec.getNode()->users()) { if (User == ExtractHigh.getNode()) continue; @@ -26561,7 +26561,7 @@ bool AArch64TargetLowering::isUsedByReturnOnly(SDNode *N, return false; bool HasRet = false; - for (SDNode *Node : Copy->uses()) { + for (SDNode *Node : Copy->users()) { if (Node->getOpcode() != AArch64ISD::RET_GLUE) return false; HasRet = true; @@ -29650,7 +29650,7 @@ Value *AArch64TargetLowering::createComplexDeinterleavingIR( bool AArch64TargetLowering::preferScalarizeSplat(SDNode *N) const { unsigned Opc = N->getOpcode(); if (ISD::isExtOpcode(Opc)) { - if (any_of(N->uses(), + if (any_of(N->users(), [&](SDNode *Use) { return Use->getOpcode() == ISD::MUL; })) return false; } diff --git a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp index 48e9af9fe507f..c129759f3d3c7 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp @@ -772,7 +772,7 @@ bool AMDGPUTargetLowering::allUsesHaveSourceMods(const SDNode *N, assert(!N->use_empty()); // XXX - Should this limit number of uses to check? - for (const SDNode *U : N->uses()) { + for (const SDNode *U : N->users()) { if (!hasSourceMods(U)) return false; @@ -1348,7 +1348,7 @@ SDValue AMDGPUTargetLowering::addTokenForArgument(SDValue Chain, ArgChains.push_back(Chain); // Add a chain value for each stack argument corresponding - for (SDNode *U : DAG.getEntryNode().getNode()->uses()) { + for (SDNode *U : DAG.getEntryNode().getNode()->users()) { if (LoadSDNode *L = dyn_cast(U)) { if (FrameIndexSDNode *FI = dyn_cast(L->getBasePtr())) { if (FI->getIndex() < 0) { @@ -3814,7 +3814,7 @@ static SDValue constantFoldBFE(SelectionDAG &DAG, IntTy Src0, uint32_t Offset, } static bool hasVolatileUser(SDNode *Val) { - for (SDNode *U : Val->uses()) { + for (SDNode *U : Val->users()) { if (MemSDNode *M = dyn_cast(U)) { if (M->isVolatile()) return true; @@ -4338,7 +4338,7 @@ SDValue AMDGPUTargetLowering::performMulCombine(SDNode *N, if (!AddOp) return SDValue(); - if (V.hasOneUse() || all_of(V->uses(), [](const SDNode *U) -> bool { + if (V.hasOneUse() || all_of(V->users(), [](const SDNode *U) -> bool { return U->getOpcode() == ISD::MUL; })) return AddOp; @@ -4927,7 +4927,7 @@ SDValue AMDGPUTargetLowering::performFNegCombine(SDNode *N, SDValue Neg = DAG.getNode(ISD::FNEG, SL, VT, Res); DAG.ReplaceAllUsesWith(N0, Neg); - for (SDNode *U : Neg->uses()) + for (SDNode *U : Neg->users()) DCI.AddToWorklist(U); } diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp index 7da93f90341d2..3871a42fcb424 100644 --- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp +++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp @@ -12544,7 +12544,7 @@ SDValue SITargetLowering::performOrCombine(SDNode *N, return true; // If we have any non-vectorized use, then it is a candidate for v_perm - for (auto *VUse : OrUse->uses()) { + for (auto *VUse : OrUse->users()) { if (!VUse->getValueType(0).isVector()) return true; @@ -12558,7 +12558,7 @@ SDValue SITargetLowering::performOrCombine(SDNode *N, return false; }; - if (!any_of(N->uses(), usesCombinedOperand)) + if (!any_of(N->users(), usesCombinedOperand)) return SDValue(); uint32_t LHSMask = getPermuteMask(LHS); @@ -13895,7 +13895,7 @@ SDValue SITargetLowering::tryFoldToMad64_32(SDNode *N, // part of full-rate 64-bit ops). if (!Subtarget->hasFullRate64Ops()) { unsigned NumUsers = 0; - for (SDNode *Use : LHS->uses()) { + for (SDNode *Use : LHS->users()) { // There is a use that does not feed into addition, so the multiply can't // be removed. We prefer MUL + ADD + ADDC over MAD + MUL. if (Use->getOpcode() != ISD::ADD) diff --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp index 2b20154042fe2..764d3c879f2d6 100644 --- a/llvm/lib/Target/ARM/ARMISelLowering.cpp +++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp @@ -3467,7 +3467,7 @@ bool ARMTargetLowering::isUsedByReturnOnly(SDNode *N, SDValue &Chain) const { SDNode *VMov = Copy; // f64 returned in a pair of GPRs. SmallPtrSet Copies; - for (SDNode *U : VMov->uses()) { + for (SDNode *U : VMov->users()) { if (U->getOpcode() != ISD::CopyToReg) return false; Copies.insert(U); @@ -3475,7 +3475,7 @@ bool ARMTargetLowering::isUsedByReturnOnly(SDNode *N, SDValue &Chain) const { if (Copies.size() > 2) return false; - for (SDNode *U : VMov->uses()) { + for (SDNode *U : VMov->users()) { SDValue UseChain = U->getOperand(0); if (Copies.count(UseChain.getNode())) // Second CopyToReg @@ -3507,7 +3507,7 @@ bool ARMTargetLowering::isUsedByReturnOnly(SDNode *N, SDValue &Chain) const { } bool HasRet = false; - for (const SDNode *U : Copy->uses()) { + for (const SDNode *U : Copy->users()) { if (U->getOpcode() != ARMISD::RET_GLUE && U->getOpcode() != ARMISD::INTRET_GLUE) return false; @@ -7958,7 +7958,7 @@ SDValue ARMTargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG, // generate a vdup of the constant. if (ST->hasMVEIntegerOps() && VT.getScalarSizeInBits() == SplatBitSize && (SplatBitSize == 8 || SplatBitSize == 16 || SplatBitSize == 32) && - all_of(BVN->uses(), + all_of(BVN->users(), [BVN](const SDNode *U) { return IsQRMVEInstruction(U, BVN); })) { EVT DupVT = SplatBitSize == 32 ? MVT::v4i32 : SplatBitSize == 16 ? MVT::v8i16 @@ -13970,7 +13970,7 @@ static SDValue PerformSHLSimplify(SDNode *N, return SDValue(); // Check that all the users could perform the shl themselves. - for (auto *U : N->uses()) { + for (auto *U : N->users()) { switch(U->getOpcode()) { default: return SDValue(); @@ -15574,13 +15574,13 @@ PerformExtractEltToVMOVRRD(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) { return SDValue(); // Find another extract, of Lane + 1 - auto OtherIt = find_if(Op0->uses(), [&](SDNode *V) { + auto OtherIt = find_if(Op0->users(), [&](SDNode *V) { return V->getOpcode() == ISD::EXTRACT_VECTOR_ELT && isa(V->getOperand(1)) && V->getConstantOperandVal(1) == Lane + 1 && V->getOperand(0).getResNo() == ResNo; }); - if (OtherIt == Op0->uses().end()) + if (OtherIt == Op0->users().end()) return SDValue(); // For float extracts, we need to be converting to a i32 for both vector diff --git a/llvm/lib/Target/Hexagon/HexagonISelDAGToDAGHVX.cpp b/llvm/lib/Target/Hexagon/HexagonISelDAGToDAGHVX.cpp index c1937ff70f366..db9aa7e18f5e7 100644 --- a/llvm/lib/Target/Hexagon/HexagonISelDAGToDAGHVX.cpp +++ b/llvm/lib/Target/Hexagon/HexagonISelDAGToDAGHVX.cpp @@ -1756,7 +1756,7 @@ void HvxSelector::select(SDNode *ISelN) { // Don't want to select N0 if it's shared with another node, except if // it's shared with other ISELs. auto IsISelN = [](SDNode *T) { return T->getOpcode() == HexagonISD::ISEL; }; - if (llvm::all_of(N0->uses(), IsISelN)) + if (llvm::all_of(N0->users(), IsISelN)) SubNodes.insert(N0); } if (SubNodes.empty()) { @@ -1775,7 +1775,7 @@ void HvxSelector::select(SDNode *ISelN) { return true; if (T->use_empty() || NonDom.count(T)) return false; - for (SDNode *U : T->uses()) { + for (SDNode *U : T->users()) { // If T is reachable from a known non-dominated node, then T itself // is non-dominated. if (!Rec(U, Rec)) { @@ -1814,7 +1814,7 @@ void HvxSelector::select(SDNode *ISelN) { for (unsigned I = 0; I != TmpQ.size(); ++I) { SDNode *S = TmpQ[I]; - for (SDNode *U : S->uses()) { + for (SDNode *U : S->users()) { if (U == ISelN) continue; auto F = OpCount.find(U); diff --git a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp index 104e601de044b..e32ed41c2893c 100644 --- a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp +++ b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp @@ -5351,7 +5351,7 @@ bool LoongArchTargetLowering::isUsedByReturnOnly(SDNode *N, // The copy must be used by a LoongArchISD::RET, and nothing else. bool HasRet = false; - for (SDNode *Node : Copy->uses()) { + for (SDNode *Node : Copy->users()) { if (Node->getOpcode() != LoongArchISD::RET) return false; HasRet = true; diff --git a/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp b/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp index 94e90a84a2d41..c838b21cbf75e 100644 --- a/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp +++ b/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp @@ -318,7 +318,7 @@ bool NVPTXDAGToDAGISel::tryEXTRACT_VECTOR_ELEMENT(SDNode *N) { return false; // Find and record all uses of this vector that extract element 0 or 1. SmallVector E0, E1; - for (auto *U : Vector.getNode()->uses()) { + for (auto *U : Vector.getNode()->users()) { if (U->getOpcode() != ISD::EXTRACT_VECTOR_ELT) continue; if (U->getOperand(0) != Vector) diff --git a/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp b/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp index a033a8247fac5..5c1f717694a4c 100644 --- a/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp +++ b/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp @@ -4495,7 +4495,7 @@ PerformFADDCombineWithOperands(SDNode *N, SDValue N0, SDValue N1, // int numUses = 0; int nonAddCount = 0; - for (const SDNode *User : N0.getNode()->uses()) { + for (const SDNode *User : N0.getNode()->users()) { numUses++; if (User->getOpcode() != ISD::FADD) ++nonAddCount; @@ -4523,7 +4523,7 @@ PerformFADDCombineWithOperands(SDNode *N, SDValue N0, SDValue N1, opIsLive = true; if (!opIsLive) - for (const SDNode *User : left->uses()) { + for (const SDNode *User : left->users()) { int orderNo3 = User->getIROrder(); if (orderNo3 > orderNo) { opIsLive = true; @@ -4532,7 +4532,7 @@ PerformFADDCombineWithOperands(SDNode *N, SDValue N0, SDValue N1, } if (!opIsLive) - for (const SDNode *User : right->uses()) { + for (const SDNode *User : right->users()) { int orderNo3 = User->getIROrder(); if (orderNo3 > orderNo) { opIsLive = true; @@ -4730,7 +4730,7 @@ static SDValue PerformREMCombine(SDNode *N, const SDValue &Num = N->getOperand(0); const SDValue &Den = N->getOperand(1); - for (const SDNode *U : Num->uses()) { + for (const SDNode *U : Num->users()) { if (U->getOpcode() == DivOpc && U->getOperand(0) == Num && U->getOperand(1) == Den) { // Num % Den -> Num - (Num / Den) * Den diff --git a/llvm/lib/Target/PowerPC/PPCISelDAGToDAG.cpp b/llvm/lib/Target/PowerPC/PPCISelDAGToDAG.cpp index 2475b8ad11f10..277c1414d7160 100644 --- a/llvm/lib/Target/PowerPC/PPCISelDAGToDAG.cpp +++ b/llvm/lib/Target/PowerPC/PPCISelDAGToDAG.cpp @@ -750,7 +750,7 @@ static bool canOptimizeTLSDFormToXForm(SelectionDAG *CurDAG, SDValue Base) { // Base is expected to be an ADD_TLS node. if (Base.getOpcode() != PPCISD::ADD_TLS) return false; - for (auto *ADDTLSUse : Base.getNode()->uses()) { + for (auto *ADDTLSUse : Base.getNode()->users()) { // The optimization to convert the D-Form load/store into its X-Form // counterpart should only occur if the source value offset of the load/ // store is 0. This also means that The offset should always be undefined. @@ -3986,7 +3986,7 @@ static bool allUsesExtend(SDValue Compare, SelectionDAG *CurDAG) { return true; // We want the value in a GPR if it is being extended, used for a select, or // used in logical operations. - for (auto *CompareUse : Compare.getNode()->uses()) + for (auto *CompareUse : Compare.getNode()->users()) if (CompareUse->getOpcode() != ISD::SIGN_EXTEND && CompareUse->getOpcode() != ISD::ZERO_EXTEND && CompareUse->getOpcode() != ISD::SELECT && @@ -6701,7 +6701,7 @@ void PPCDAGToDAGISel::PostprocessISelDAG() { // be folded with the isel so that we don't need to materialize a register // containing zero. bool PPCDAGToDAGISel::AllUsersSelectZero(SDNode *N) { - for (const SDNode *User : N->uses()) { + for (const SDNode *User : N->users()) { if (!User->isMachineOpcode()) return false; if (User->getMachineOpcode() != PPC::SELECT_I4 && @@ -6731,7 +6731,7 @@ bool PPCDAGToDAGISel::AllUsersSelectZero(SDNode *N) { void PPCDAGToDAGISel::SwapAllSelectUsers(SDNode *N) { SmallVector ToReplace; - for (SDNode *User : N->uses()) { + for (SDNode *User : N->users()) { assert((User->getMachineOpcode() == PPC::SELECT_I4 || User->getMachineOpcode() == PPC::SELECT_I8) && "Must have all select users"); @@ -7382,7 +7382,7 @@ void PPCDAGToDAGISel::PeepholePPC64ZExt() { // (except for the original INSERT_SUBREG), then abort the transformation. bool OutsideUse = false; for (SDNode *PN : ToPromote) { - for (SDNode *UN : PN->uses()) { + for (SDNode *UN : PN->users()) { if (!ToPromote.count(UN) && UN != ISR.getNode()) { OutsideUse = true; break; diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp index 69bc2cce6c2c7..199e1f41cfc05 100644 --- a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp +++ b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp @@ -2687,7 +2687,7 @@ static bool provablyDisjointOr(SelectionDAG &DAG, const SDValue &N) { bool PPCTargetLowering::SelectAddressEVXRegReg(SDValue N, SDValue &Base, SDValue &Index, SelectionDAG &DAG) const { - for (SDNode *U : N->uses()) { + for (SDNode *U : N->users()) { if (MemSDNode *Memop = dyn_cast(U)) { if (Memop->getMemoryVT() == MVT::f64) { Base = N.getOperand(0); @@ -12033,7 +12033,7 @@ SDValue PPCTargetLowering::LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) const { SDValue PPCTargetLowering::LowerUaddo(SDValue Op, SelectionDAG &DAG) const { // Default to target independent lowering if there is a logical user of the // carry-bit. - for (SDNode *U : Op->uses()) { + for (SDNode *U : Op->users()) { if (U->getOpcode() == ISD::SELECT) return SDValue(); if (ISD::isBitwiseLogicOp(U->getOpcode())) { @@ -14290,7 +14290,7 @@ static bool findConsecutiveLoad(LoadSDNode *LD, SelectionDAG &DAG) { if (isConsecutiveLS(ChainLD, LD, VT.getStoreSize(), 1, DAG)) return true; - for (SDNode *U : LoadRoot->uses()) + for (SDNode *U : LoadRoot->users()) if (((isa(U) && cast(U)->getChain().getNode() == LoadRoot) || U->getOpcode() == ISD::TokenFactor) && @@ -14352,7 +14352,7 @@ SDValue PPCTargetLowering::ConvertSETCCToSubtract(SDNode *N, // If all users of SETCC extend its value to a legal integer type // then we replace SETCC with a subtraction - for (const SDNode *U : N->uses()) + for (const SDNode *U : N->users()) if (U->getOpcode() != ISD::ZERO_EXTEND) return SDValue(); @@ -14531,7 +14531,7 @@ SDValue PPCTargetLowering::DAGCombineTruncBoolExt(SDNode *N, if (isa(Inputs[i])) continue; - for (const SDNode *User : Inputs[i].getNode()->uses()) { + for (const SDNode *User : Inputs[i].getNode()->users()) { if (User != N && !Visited.count(User)) return SDValue(); @@ -14552,7 +14552,7 @@ SDValue PPCTargetLowering::DAGCombineTruncBoolExt(SDNode *N, } for (unsigned i = 0, ie = PromOps.size(); i != ie; ++i) { - for (const SDNode *User : PromOps[i].getNode()->uses()) { + for (const SDNode *User : PromOps[i].getNode()->users()) { if (User != N && !Visited.count(User)) return SDValue(); @@ -14736,7 +14736,7 @@ SDValue PPCTargetLowering::DAGCombineExtBoolTrunc(SDNode *N, if (isa(Inputs[i])) continue; - for (SDNode *User : Inputs[i].getNode()->uses()) { + for (SDNode *User : Inputs[i].getNode()->users()) { if (User != N && !Visited.count(User)) return SDValue(); @@ -14758,7 +14758,7 @@ SDValue PPCTargetLowering::DAGCombineExtBoolTrunc(SDNode *N, } for (unsigned i = 0, ie = PromOps.size(); i != ie; ++i) { - for (SDNode *User : PromOps[i].getNode()->uses()) { + for (SDNode *User : PromOps[i].getNode()->users()) { if (User != N && !Visited.count(User)) return SDValue(); @@ -16556,35 +16556,35 @@ SDValue PPCTargetLowering::PerformDAGCombine(SDNode *N, APInt::getAllOnes(Bits /* alignment */) .zext(Add.getScalarValueSizeInBits()))) { SDNode *BasePtr = Add->getOperand(0).getNode(); - for (SDNode *U : BasePtr->uses()) { - if (U->getOpcode() == ISD::INTRINSIC_WO_CHAIN && - U->getConstantOperandVal(0) == IID) { - // We've found another LVSL/LVSR, and this address is an aligned - // multiple of that one. The results will be the same, so use the - // one we've just found instead. - - return SDValue(U, 0); - } + for (SDNode *U : BasePtr->users()) { + if (U->getOpcode() == ISD::INTRINSIC_WO_CHAIN && + U->getConstantOperandVal(0) == IID) { + // We've found another LVSL/LVSR, and this address is an aligned + // multiple of that one. The results will be the same, so use the + // one we've just found instead. + + return SDValue(U, 0); + } } } if (isa(Add->getOperand(1))) { SDNode *BasePtr = Add->getOperand(0).getNode(); - for (SDNode *U : BasePtr->uses()) { - if (U->getOpcode() == ISD::ADD && - isa(U->getOperand(1)) && - (Add->getConstantOperandVal(1) - U->getConstantOperandVal(1)) % - (1ULL << Bits) == - 0) { - SDNode *OtherAdd = U; - for (SDNode *V : OtherAdd->uses()) { - if (V->getOpcode() == ISD::INTRINSIC_WO_CHAIN && - V->getConstantOperandVal(0) == IID) { - return SDValue(V, 0); + for (SDNode *U : BasePtr->users()) { + if (U->getOpcode() == ISD::ADD && + isa(U->getOperand(1)) && + (Add->getConstantOperandVal(1) - U->getConstantOperandVal(1)) % + (1ULL << Bits) == + 0) { + SDNode *OtherAdd = U; + for (SDNode *V : OtherAdd->users()) { + if (V->getOpcode() == ISD::INTRINSIC_WO_CHAIN && + V->getConstantOperandVal(0) == IID) { + return SDValue(V, 0); + } } } } - } } } diff --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp index ccf34b8a6b2b0..eb9845e869182 100644 --- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp @@ -2614,7 +2614,7 @@ static bool selectConstantAddr(SelectionDAG *CurDAG, const SDLoc &DL, // Is this ADD instruction only used as the base pointer of scalar loads and // stores? static bool isWorthFoldingAdd(SDValue Add) { - for (auto *Use : Add->uses()) { + for (auto *Use : Add->users()) { if (Use->getOpcode() != ISD::LOAD && Use->getOpcode() != ISD::STORE && Use->getOpcode() != ISD::ATOMIC_LOAD && Use->getOpcode() != ISD::ATOMIC_STORE) diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp index b703eb90e8ef3..e82190e77d590 100644 --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -16246,7 +16246,7 @@ static SDValue performSRACombine(SDNode *N, SelectionDAG &DAG, // All users should be a shift by constant less than or equal to 32. This // ensures we'll do this optimization for each of them to produce an // add/sub+sext_inreg they can all share. - for (SDNode *U : N0->uses()) { + for (SDNode *U : N0->users()) { if (U->getOpcode() != ISD::SRA || !isa(U->getOperand(1)) || U->getConstantOperandVal(1) > 32) @@ -18310,7 +18310,7 @@ bool RISCVTargetLowering::isDesirableToCommuteWithShift( // LD/ST, it can still complete the folding optimization operation performed // above. auto isUsedByLdSt = [](const SDNode *X, const SDNode *User) { - for (SDNode *Use : X->uses()) { + for (SDNode *Use : X->users()) { // This use is the one we're on right now. Skip it if (Use == User || Use->getOpcode() == ISD::SELECT) continue; @@ -20447,7 +20447,7 @@ bool RISCVTargetLowering::isUsedByReturnOnly(SDNode *N, SDValue &Chain) const { // The copy must be used by a RISCVISD::RET_GLUE, and nothing else. bool HasRet = false; - for (SDNode *Node : Copy->uses()) { + for (SDNode *Node : Copy->users()) { if (Node->getOpcode() != RISCVISD::RET_GLUE) return false; HasRet = true; diff --git a/llvm/lib/Target/SystemZ/SystemZISelDAGToDAG.cpp b/llvm/lib/Target/SystemZ/SystemZISelDAGToDAG.cpp index 403d238aa5b52..210e3c5426f46 100644 --- a/llvm/lib/Target/SystemZ/SystemZISelDAGToDAG.cpp +++ b/llvm/lib/Target/SystemZ/SystemZISelDAGToDAG.cpp @@ -1890,7 +1890,7 @@ SystemZDAGToDAGISel::IsProfitableToFold(SDValue N, SDNode *U, SDNode *CCRegUser = nullptr; if (CCUser->getOpcode() == ISD::CopyToReg || cast(CCUser->getOperand(1))->getReg() == SystemZ::CC) { - for (auto *U : CCUser->uses()) { + for (auto *U : CCUser->users()) { if (CCRegUser == nullptr) CCRegUser = U; else if (CCRegUser != U) diff --git a/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp b/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp index becc3936eef89..47008af3479ee 100644 --- a/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp +++ b/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp @@ -2910,7 +2910,7 @@ static void adjustForSubtraction(SelectionDAG &DAG, const SDLoc &DL, Comparison &C) { if (C.CCMask == SystemZ::CCMASK_CMP_EQ || C.CCMask == SystemZ::CCMASK_CMP_NE) { - for (SDNode *N : C.Op0->uses()) { + for (SDNode *N : C.Op0->users()) { if (N->getOpcode() == ISD::SUB && ((N->getOperand(0) == C.Op0 && N->getOperand(1) == C.Op1) || (N->getOperand(0) == C.Op1 && N->getOperand(1) == C.Op0))) { @@ -2936,7 +2936,7 @@ static void adjustForFNeg(Comparison &C) { return; auto *C1 = dyn_cast(C.Op1); if (C1 && C1->isZero()) { - for (SDNode *N : C.Op0->uses()) { + for (SDNode *N : C.Op0->users()) { if (N->getOpcode() == ISD::FNEG) { C.Op0 = SDValue(N, 0); C.CCMask = SystemZ::reverseCCMask(C.CCMask); @@ -2960,7 +2960,7 @@ static void adjustForLTGFR(Comparison &C) { if (C1 && C1->getZExtValue() == 32) { SDValue ShlOp0 = C.Op0.getOperand(0); // See whether X has any SIGN_EXTEND_INREG uses. - for (SDNode *N : ShlOp0->uses()) { + for (SDNode *N : ShlOp0->users()) { if (N->getOpcode() == ISD::SIGN_EXTEND_INREG && cast(N->getOperand(1))->getVT() == MVT::i32) { C.Op0 = SDValue(N, 0); @@ -7289,7 +7289,7 @@ static bool isVectorElementSwap(ArrayRef M, EVT VT) { } static bool isOnlyUsedByStores(SDValue StoredVal, SelectionDAG &DAG) { - for (auto *U : StoredVal->uses()) { + for (auto *U : StoredVal->users()) { if (StoreSDNode *ST = dyn_cast(U)) { EVT CurrMemVT = ST->getMemoryVT().getScalarType(); if (CurrMemVT.isRound() && CurrMemVT.getStoreSize() <= 16) @@ -7668,7 +7668,7 @@ SDValue SystemZTargetLowering::combineFP_ROUND( Op0.getOperand(1).getOpcode() == ISD::Constant && Op0.getConstantOperandVal(1) == 0) { SDValue Vec = Op0.getOperand(0); - for (auto *U : Vec->uses()) { + for (auto *U : Vec->users()) { if (U != Op0.getNode() && U->hasOneUse() && U->getOpcode() == ISD::EXTRACT_VECTOR_ELT && U->getOperand(0) == Vec && @@ -7732,7 +7732,7 @@ SDValue SystemZTargetLowering::combineFP_EXTEND( Op0.getOperand(1).getOpcode() == ISD::Constant && Op0.getConstantOperandVal(1) == 0) { SDValue Vec = Op0.getOperand(0); - for (auto *U : Vec->uses()) { + for (auto *U : Vec->users()) { if (U != Op0.getNode() && U->hasOneUse() && U->getOpcode() == ISD::EXTRACT_VECTOR_ELT && U->getOperand(0) == Vec && diff --git a/llvm/lib/Target/VE/VEISelLowering.cpp b/llvm/lib/Target/VE/VEISelLowering.cpp index a56b5a2ac9a3e..87c1625c11454 100644 --- a/llvm/lib/Target/VE/VEISelLowering.cpp +++ b/llvm/lib/Target/VE/VEISelLowering.cpp @@ -2951,7 +2951,7 @@ static bool isI32Insn(const SDNode *User, const SDNode *N) { static bool isI32InsnAllUses(const SDNode *User, const SDNode *N) { // Check all use of User node. If all of them are safe, optimize // truncate to extract_subreg. - for (const SDNode *U : User->uses()) { + for (const SDNode *U : User->users()) { switch (U->getOpcode()) { default: // If the use is an instruction which treats the source operand as i32, @@ -3002,7 +3002,7 @@ SDValue VETargetLowering::combineTRUNCATE(SDNode *N, return SDValue(); // Check all use of this TRUNCATE. - for (const SDNode *User : N->uses()) { + for (const SDNode *User : N->users()) { // Make sure that we're not going to replace TRUNCATE for non i32 // instructions. // diff --git a/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp b/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp index 76ef207f7d47d..bb20e6ecf281b 100644 --- a/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp +++ b/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp @@ -370,7 +370,7 @@ namespace { return false; // Walk all the users of the immediate. - for (const SDNode *User : N->uses()) { + for (const SDNode *User : N->users()) { if (UseCount >= 2) break; @@ -1095,7 +1095,7 @@ void X86DAGToDAGISel::PreprocessISelDAG() { SDNode *MaxLd = nullptr; SDValue Ptr = Ld->getBasePtr(); SDValue Chain = Ld->getChain(); - for (SDNode *User : Ptr->uses()) { + for (SDNode *User : Ptr->users()) { auto *UserLd = dyn_cast(User); MVT UserVT = User->getSimpleValueType(0); if (User != N && UserLd && ISD::isNormalLoad(User) && diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp index 35c0974733aba..4bd65dc6ade40 100644 --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -7397,7 +7397,7 @@ static Constant *getConstantVector(MVT VT, const APInt &SplatValue, } static bool isFoldableUseOfShuffle(SDNode *N) { - for (auto *U : N->uses()) { + for (auto *U : N->users()) { unsigned Opc = U->getOpcode(); // VPERMV/VPERMV3 shuffles can never fold their index operands. if (Opc == X86ISD::VPERMV && U->getOperand(0).getNode() == N) @@ -16004,7 +16004,7 @@ static SDValue lowerShufflePairAsUNPCKAndPermute(const SDLoc &DL, MVT VT, // Find the intersection between shuffle users of V1 and V2. SmallVector Shuffles; - for (SDNode *User : V1->uses()) + for (SDNode *User : V1->users()) if (User->getOpcode() == ISD::VECTOR_SHUFFLE && User->getOperand(0) == V1 && User->getOperand(1) == V2) Shuffles.push_back(User); @@ -18280,7 +18280,7 @@ static APInt getExtractedDemandedElts(SDNode *N) { MVT VT = N->getSimpleValueType(0); unsigned NumElts = VT.getVectorNumElements(); APInt DemandedElts = APInt::getZero(NumElts); - for (SDNode *User : N->uses()) { + for (SDNode *User : N->users()) { switch (User->getOpcode()) { case X86ISD::PEXTRB: case X86ISD::PEXTRW: @@ -22143,7 +22143,7 @@ static SDValue LowerFABSorFNEG(SDValue Op, SelectionDAG &DAG) { // If this is a FABS and it has an FNEG user, bail out to fold the combination // into an FNABS. We'll lower the FABS after that if it is still in use. if (IsFABS) - for (SDNode *User : Op->uses()) + for (SDNode *User : Op->users()) if (User->getOpcode() == ISD::FNEG) return Op; @@ -22888,7 +22888,7 @@ static bool hasNonFlagsUse(SDValue Op) { // using an RMW op or only the flags are used. Otherwise, leave // the node alone and emit a 'cmp' or 'test' instruction. static bool isProfitableToUseFlagOp(SDValue Op) { - for (SDNode *U : Op->uses()) + for (SDNode *U : Op->users()) if (U->getOpcode() != ISD::CopyToReg && U->getOpcode() != ISD::SETCC && U->getOpcode() != ISD::STORE) @@ -41712,7 +41712,7 @@ static SDValue combineTargetShuffle(SDValue N, const SDLoc &DL, // Share broadcast with the longest vector and extract low subvector (free). // Ensure the same SDValue from the SDNode use is being used. - for (SDNode *User : Src->uses()) + for (SDNode *User : Src->users()) if (User != N.getNode() && User->getOpcode() == X86ISD::VBROADCAST && Src == User->getOperand(0) && User->getValueSizeInBits(0).getFixedValue() > @@ -42910,7 +42910,7 @@ bool X86TargetLowering::SimplifyDemandedVectorEltsForTargetNode( // If we reuse the shift amount just for sse shift amounts then we know that // only the bottom 64-bits are only ever used. - bool AssumeSingleUse = llvm::all_of(Amt->uses(), [&Amt](SDNode *Use) { + bool AssumeSingleUse = llvm::all_of(Amt->users(), [&Amt](SDNode *Use) { unsigned UseOpc = Use->getOpcode(); return (UseOpc == X86ISD::VSHL || UseOpc == X86ISD::VSRL || UseOpc == X86ISD::VSRA) && @@ -45670,7 +45670,7 @@ combineExtractFromVectorLoad(SDNode *N, EVT VecVT, SDValue SrcVec, uint64_t Idx, const TargetLowering &TLI = DAG.getTargetLoweringInfo(); EVT VT = N->getValueType(0); - bool LikelyUsedAsVector = any_of(N->uses(), [](SDNode *Use) { + bool LikelyUsedAsVector = any_of(N->users(), [](SDNode *Use) { return Use->getOpcode() == ISD::STORE || Use->getOpcode() == ISD::INSERT_VECTOR_ELT || Use->getOpcode() == ISD::SCALAR_TO_VECTOR; @@ -46338,7 +46338,7 @@ static SDValue combineExtractVectorElt(SDNode *N, SelectionDAG &DAG, return false; }; // TODO: Can we drop the oneuse check for constant extracts? - if (all_of(InputVector->uses(), IsBoolExtract) && + if (all_of(InputVector->users(), IsBoolExtract) && (IsVar || BoolExtracts.size() > 1)) { EVT BCVT = EVT::getIntegerVT(*DAG.getContext(), NumSrcElts); if (SDValue BC = @@ -46754,7 +46754,7 @@ static SDValue combineVSelectToBLENDV(SDNode *N, SelectionDAG &DAG, // the generic VSELECT anymore. Otherwise, we may perform wrong // optimizations as we messed with the actual expectation for the vector // boolean values. - for (SDNode *U : Cond->uses()) { + for (SDNode *U : Cond->users()) { if (U->getOpcode() == X86ISD::BLENDV) continue; @@ -49937,7 +49937,7 @@ static SDValue combineCompareEqual(SDNode *N, SelectionDAG &DAG, (VT == MVT::f16 && Subtarget.hasFP16())) { bool ExpectingFlags = false; // Check for any users that want flags: - for (const SDNode *U : N->uses()) { + for (const SDNode *U : N->users()) { if (ExpectingFlags) break; @@ -50765,7 +50765,7 @@ static SDValue combineX86SubCmpForFlags(SDNode *N, SDValue Flag, return SDValue(); // Check the only user of flag is `brcond ne`. - SDNode *BrCond = *Flag->uses().begin(); + SDNode *BrCond = *Flag->use_begin(); if (BrCond->getOpcode() != X86ISD::BRCOND) return SDValue(); unsigned CondNo = 2; @@ -52179,7 +52179,7 @@ static SDValue combineConstantPoolLoads(SDNode *N, const SDLoc &dl, // Look through all other loads/broadcasts in the chain for another constant // pool entry. - for (SDNode *User : Chain->uses()) { + for (SDNode *User : Chain->users()) { auto *UserLd = dyn_cast(User); if (User != N && UserLd && (User->getOpcode() == X86ISD::SUBV_BROADCAST_LOAD || @@ -52289,7 +52289,7 @@ static SDValue combineLoad(SDNode *N, SelectionDAG &DAG, (RegVT.is128BitVector() || RegVT.is256BitVector())) { SDValue Ptr = Ld->getBasePtr(); SDValue Chain = Ld->getChain(); - for (SDNode *User : Chain->uses()) { + for (SDNode *User : Chain->users()) { auto *UserLd = dyn_cast(User); if (User != N && UserLd && User->getOpcode() == X86ISD::SUBV_BROADCAST_LOAD && @@ -53150,8 +53150,8 @@ static bool isHorizontalBinOp(unsigned HOpcode, SDValue &LHS, SDValue &RHS, return User->getOpcode() == HOpcode && User->getValueType(0) == VT; }; ForceHorizOp = - ForceHorizOp || (llvm::any_of(NewLHS->uses(), FoundHorizUser) && - llvm::any_of(NewRHS->uses(), FoundHorizUser)); + ForceHorizOp || (llvm::any_of(NewLHS->users(), FoundHorizUser) && + llvm::any_of(NewRHS->users(), FoundHorizUser)); // Assume a SingleSource HOP if we only shuffle one input and don't need to // shuffle the result. @@ -54878,7 +54878,7 @@ static SDValue promoteExtBeforeAdd(SDNode *Ext, SelectionDAG &DAG, // of single 'add' instructions, but the cost model for selecting an LEA // currently has a high threshold. bool HasLEAPotential = false; - for (auto *User : Ext->uses()) { + for (auto *User : Ext->users()) { if (User->getOpcode() == ISD::ADD || User->getOpcode() == ISD::SHL) { HasLEAPotential = true; break; @@ -55066,10 +55066,11 @@ static SDValue getInvertedVectorForFMA(SDValue V, SelectionDAG &DAG) { // Check if we can eliminate V. We assume if a value is only used in FMAs, we // can eliminate it. Since this function is invoked for each FMA with this // vector. - auto IsNotFMA = [](SDNode *Use) { - return Use->getOpcode() != ISD::FMA && Use->getOpcode() != ISD::STRICT_FMA; + auto IsNotFMA = [](SDNode *User) { + return User->getOpcode() != ISD::FMA && + User->getOpcode() != ISD::STRICT_FMA; }; - if (llvm::any_of(V->uses(), IsNotFMA)) + if (llvm::any_of(V->users(), IsNotFMA)) return SDValue(); SmallVector Ops; @@ -55090,7 +55091,7 @@ static SDValue getInvertedVectorForFMA(SDValue V, SelectionDAG &DAG) { // If an inverted version cannot be eliminated, choose it instead of the // original version. - if (llvm::any_of(NV->uses(), IsNotFMA)) + if (llvm::any_of(NV->users(), IsNotFMA)) return SDValue(NV, 0); // If the inverted version also can be eliminated, we have to consistently @@ -56183,7 +56184,7 @@ static SDValue combineSIntToFP(SDNode *N, SelectionDAG &DAG, static bool needCarryOrOverflowFlag(SDValue Flags) { assert(Flags.getValueType() == MVT::i32 && "Unexpected VT!"); - for (const SDNode *User : Flags->uses()) { + for (const SDNode *User : Flags->users()) { X86::CondCode CC; switch (User->getOpcode()) { default: @@ -56218,7 +56219,7 @@ static bool needCarryOrOverflowFlag(SDValue Flags) { static bool onlyZeroFlagUsed(SDValue Flags) { assert(Flags.getValueType() == MVT::i32 && "Unexpected VT!"); - for (const SDNode *User : Flags->uses()) { + for (const SDNode *User : Flags->users()) { unsigned CCOpNo; switch (User->getOpcode()) { default: @@ -56829,7 +56830,7 @@ static SDValue pushAddIntoCmovOfConsts(SDNode *N, const SDLoc &DL, // TODO: If target has "slow3OpsLEA", do this even without the trailing memop? if (OtherOp.getOpcode() == ISD::ADD && OtherOp.hasOneUse() && !isa(OtherOp.getOperand(0)) && - all_of(N->uses(), [&](SDNode *Use) { + all_of(N->users(), [&](SDNode *Use) { auto *MemNode = dyn_cast(Use); return MemNode && MemNode->getBasePtr().getNode() == N; })) { @@ -58485,7 +58486,7 @@ static SDValue combineScalarToVector(SDNode *N, SelectionDAG &DAG, // See if we're broadcasting the scalar value, in which case just reuse that. // Ensure the same SDValue from the SDNode use is being used. if (VT.getScalarType() == Src.getValueType()) - for (SDNode *User : Src->uses()) + for (SDNode *User : Src->users()) if (User->getOpcode() == X86ISD::VBROADCAST && Src == User->getOperand(0)) { unsigned SizeInBits = VT.getFixedSizeInBits(); @@ -58881,7 +58882,7 @@ static SDValue combineBROADCAST_LOAD(SDNode *N, SelectionDAG &DAG, // Look at other users of our base pointer and try to find a wider broadcast. // The input chain and the size of the memory VT must match. - for (SDNode *User : Ptr->uses()) + for (SDNode *User : Ptr->users()) if (User != N && User->getOpcode() == N->getOpcode() && cast(User)->getBasePtr() == Ptr && cast(User)->getChain() == Chain && diff --git a/llvm/lib/Target/X86/X86ISelLoweringCall.cpp b/llvm/lib/Target/X86/X86ISelLoweringCall.cpp index 05a5a36ce5cbe..df12ea2f79df5 100644 --- a/llvm/lib/Target/X86/X86ISelLoweringCall.cpp +++ b/llvm/lib/Target/X86/X86ISelLoweringCall.cpp @@ -955,7 +955,7 @@ bool X86TargetLowering::isUsedByReturnOnly(SDNode *N, SDValue &Chain) const { return false; bool HasRet = false; - for (const SDNode *U : Copy->uses()) { + for (const SDNode *U : Copy->users()) { if (U->getOpcode() != X86ISD::RET_GLUE) return false; // If we are returning more than one value, we can definitely From 085cb298557989362d485eabf60199ffe8feeadf Mon Sep 17 00:00:00 2001 From: Craig Topper Date: Wed, 18 Dec 2024 16:09:05 -0800 Subject: [PATCH 2/3] fixup! Rename variables Use->User. Fix M68k use. --- llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp | 44 +++++++++---------- .../Target/AArch64/AArch64ISelDAGToDAG.cpp | 10 ++--- llvm/lib/Target/AMDGPU/SIISelLowering.cpp | 10 ++--- llvm/lib/Target/M68k/M68kISelLowering.cpp | 2 +- llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp | 18 ++++---- 5 files changed, 42 insertions(+), 42 deletions(-) diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp index 9b0dc853ac037..ebce0ebe8f81c 100644 --- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp @@ -18965,15 +18965,15 @@ bool DAGCombiner::CombineToPreIndexedLoadStore(SDNode *N) { // Now check for #3 and #4. bool RealUse = false; - for (SDNode *Use : Ptr->users()) { - if (Use == N) + for (SDNode *User : Ptr->users()) { + if (User == N) continue; - if (SDNode::hasPredecessorHelper(Use, Visited, Worklist, MaxSteps)) + if (SDNode::hasPredecessorHelper(User, Visited, Worklist, MaxSteps)) return false; // If Ptr may be folded in addressing mode of other use, then it's // not profitable to do this transformation. - if (!canFoldInAddressingMode(Ptr.getNode(), Use, DAG, TLI)) + if (!canFoldInAddressingMode(Ptr.getNode(), User, DAG, TLI)) RealUse = true; } @@ -19089,19 +19089,19 @@ static bool shouldCombineToPostInc(SDNode *N, SDValue Ptr, SDNode *PtrUse, SmallPtrSet Visited; unsigned MaxSteps = SelectionDAG::getHasPredecessorMaxSteps(); - for (SDNode *Use : BasePtr->users()) { - if (Use == Ptr.getNode()) + for (SDNode *User : BasePtr->users()) { + if (User == Ptr.getNode()) continue; // No if there's a later user which could perform the index instead. - if (isa(Use)) { + if (isa(User)) { bool IsLoad = true; bool IsMasked = false; SDValue OtherPtr; - if (getCombineLoadStoreParts(Use, ISD::POST_INC, ISD::POST_DEC, IsLoad, + if (getCombineLoadStoreParts(User, ISD::POST_INC, ISD::POST_DEC, IsLoad, IsMasked, OtherPtr, TLI)) { SmallVector Worklist; - Worklist.push_back(Use); + Worklist.push_back(User); if (SDNode::hasPredecessorHelper(N, Visited, Worklist, MaxSteps)) return false; } @@ -19109,9 +19109,9 @@ static bool shouldCombineToPostInc(SDNode *N, SDValue Ptr, SDNode *PtrUse, // If all the uses are load / store addresses, then don't do the // transformation. - if (Use->getOpcode() == ISD::ADD || Use->getOpcode() == ISD::SUB) { - for (SDNode *UseUse : Use->users()) - if (canFoldInAddressingMode(Use, UseUse, DAG, TLI)) + if (User->getOpcode() == ISD::ADD || User->getOpcode() == ISD::SUB) { + for (SDNode *UserUser : User->users()) + if (canFoldInAddressingMode(User, UserUser, DAG, TLI)) return false; } } @@ -20515,24 +20515,24 @@ bool DAGCombiner::isMulAddWithConstProfitable(SDNode *MulNode, SDValue AddNode, return true; // Walk all the users of the constant with which we're multiplying. - for (SDNode *Use : ConstNode->users()) { - if (Use == MulNode) // This use is the one we're on right now. Skip it. + for (SDNode *User : ConstNode->users()) { + if (User == MulNode) // This use is the one we're on right now. Skip it. continue; - if (Use->getOpcode() == ISD::MUL) { // We have another multiply use. + if (User->getOpcode() == ISD::MUL) { // We have another multiply use. SDNode *OtherOp; SDNode *MulVar = AddNode.getOperand(0).getNode(); // OtherOp is what we're multiplying against the constant. - if (Use->getOperand(0) == ConstNode) - OtherOp = Use->getOperand(1).getNode(); + if (User->getOperand(0) == ConstNode) + OtherOp = User->getOperand(1).getNode(); else - OtherOp = Use->getOperand(0).getNode(); + OtherOp = User->getOperand(0).getNode(); // Check to see if multiply is with the same operand of our "add". // // ConstNode = CONST - // Use = ConstNode * A <-- visiting Use. OtherOp is A. + // User = ConstNode * A <-- visiting User. OtherOp is A. // ... // AddNode = (A + c1) <-- MulVar is A. // = AddNode * ConstNode <-- current visiting instruction. @@ -20550,7 +20550,7 @@ bool DAGCombiner::isMulAddWithConstProfitable(SDNode *MulNode, SDValue AddNode, // ... = AddNode * ConstNode <-- current visiting instruction. // ... // OtherOp = (A + c2) - // Use = OtherOp * ConstNode <-- visiting Use. + // User = OtherOp * ConstNode <-- visiting User. // // If we make this transformation, we will have a common // multiply (CONST * A) after we also do the same transformation @@ -23182,8 +23182,8 @@ SDValue DAGCombiner::visitEXTRACT_VECTOR_ELT(SDNode *N) { isa(Use->getOperand(1)); })) { APInt DemandedElts = APInt::getZero(NumElts); - for (SDNode *Use : VecOp->users()) { - auto *CstElt = cast(Use->getOperand(1)); + for (SDNode *User : VecOp->users()) { + auto *CstElt = cast(User->getOperand(1)); if (CstElt->getAPIntValue().ult(NumElts)) DemandedElts.setBit(CstElt->getZExtValue()); } diff --git a/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp b/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp index f831f8de70547..ff3ca8a24fc04 100644 --- a/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp +++ b/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp @@ -1012,15 +1012,15 @@ bool AArch64DAGToDAGISel::SelectArithUXTXRegister(SDValue N, SDValue &Reg, /// a single pseudo-instruction for an ADRP/ADD pair so over-aggressive folding /// leads to duplicated ADRP instructions. static bool isWorthFoldingADDlow(SDValue N) { - for (auto *Use : N->users()) { - if (Use->getOpcode() != ISD::LOAD && Use->getOpcode() != ISD::STORE && - Use->getOpcode() != ISD::ATOMIC_LOAD && - Use->getOpcode() != ISD::ATOMIC_STORE) + for (auto *User : N->users()) { + if (User->getOpcode() != ISD::LOAD && User->getOpcode() != ISD::STORE && + User->getOpcode() != ISD::ATOMIC_LOAD && + User->getOpcode() != ISD::ATOMIC_STORE) return false; // ldar and stlr have much more restrictive addressing modes (just a // register). - if (isStrongerThanMonotonic(cast(Use)->getSuccessOrdering())) + if (isStrongerThanMonotonic(cast(User)->getSuccessOrdering())) return false; } diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp index 3871a42fcb424..2b8cc5b4e33a4 100644 --- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp +++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp @@ -12544,15 +12544,15 @@ SDValue SITargetLowering::performOrCombine(SDNode *N, return true; // If we have any non-vectorized use, then it is a candidate for v_perm - for (auto *VUse : OrUse->users()) { - if (!VUse->getValueType(0).isVector()) + for (auto *VUser : OrUse->users()) { + if (!VUser->getValueType(0).isVector()) return true; // If the use of a vector is a store, then combining via a v_perm // is beneficial. // TODO -- whitelist more uses for (auto VectorwiseOp : {ISD::STORE, ISD::CopyToReg, ISD::CopyFromReg}) - if (VUse->getOpcode() == VectorwiseOp) + if (VUser->getOpcode() == VectorwiseOp) return true; } return false; @@ -13895,10 +13895,10 @@ SDValue SITargetLowering::tryFoldToMad64_32(SDNode *N, // part of full-rate 64-bit ops). if (!Subtarget->hasFullRate64Ops()) { unsigned NumUsers = 0; - for (SDNode *Use : LHS->users()) { + for (SDNode *User : LHS->users()) { // There is a use that does not feed into addition, so the multiply can't // be removed. We prefer MUL + ADD + ADDC over MAD + MUL. - if (Use->getOpcode() != ISD::ADD) + if (User->getOpcode() != ISD::ADD) return SDValue(); // We prefer 2xMAD over MUL + 2xADD + 2xADDC (code density), and prefer diff --git a/llvm/lib/Target/M68k/M68kISelLowering.cpp b/llvm/lib/Target/M68k/M68kISelLowering.cpp index ff966baecf27d..98ed46d91da60 100644 --- a/llvm/lib/Target/M68k/M68kISelLowering.cpp +++ b/llvm/lib/Target/M68k/M68kISelLowering.cpp @@ -1990,7 +1990,7 @@ SDValue M68kTargetLowering::EmitTest(SDValue Op, unsigned M68kCC, case ISD::XOR: // Due to the ISEL shortcoming noted above, be conservative if this op is // likely to be selected as part of a load-modify-store instruction. - for (const auto *U : Op.getNode()->uses()) + for (const auto *U : Op.getNode()->users()) if (U->getOpcode() == ISD::STORE) goto default_case; diff --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp index eb9845e869182..4393d33021760 100644 --- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp @@ -2614,21 +2614,21 @@ static bool selectConstantAddr(SelectionDAG *CurDAG, const SDLoc &DL, // Is this ADD instruction only used as the base pointer of scalar loads and // stores? static bool isWorthFoldingAdd(SDValue Add) { - for (auto *Use : Add->users()) { - if (Use->getOpcode() != ISD::LOAD && Use->getOpcode() != ISD::STORE && - Use->getOpcode() != ISD::ATOMIC_LOAD && - Use->getOpcode() != ISD::ATOMIC_STORE) + for (auto *User : Add->users()) { + if (User->getOpcode() != ISD::LOAD && User->getOpcode() != ISD::STORE && + User->getOpcode() != ISD::ATOMIC_LOAD && + User->getOpcode() != ISD::ATOMIC_STORE) return false; - EVT VT = cast(Use)->getMemoryVT(); + EVT VT = cast(User)->getMemoryVT(); if (!VT.isScalarInteger() && VT != MVT::f16 && VT != MVT::f32 && VT != MVT::f64) return false; // Don't allow stores of the value. It must be used as the address. - if (Use->getOpcode() == ISD::STORE && - cast(Use)->getValue() == Add) + if (User->getOpcode() == ISD::STORE && + cast(User)->getValue() == Add) return false; - if (Use->getOpcode() == ISD::ATOMIC_STORE && - cast(Use)->getVal() == Add) + if (User->getOpcode() == ISD::ATOMIC_STORE && + cast(User)->getVal() == Add) return false; } From b9cc2e6a8582b6690de9c12dce7cb067f441aa6e Mon Sep 17 00:00:00 2001 From: Craig Topper Date: Wed, 18 Dec 2024 17:14:06 -0800 Subject: [PATCH 3/3] [SelectionDAG] Add SDNode::user_begin() and use it in some places. Most of these are just places that want the first user and aren't iterating over the whole list. While there I changed some use_size() == 1 to hasOneUse() which is more efficient. This is part of an effort to rename use_iterator to user_iterator and provide a use_iterator that dereferences to SDUse. This patch helps reduce the diff on later patches. --- llvm/include/llvm/CodeGen/SelectionDAGNodes.h | 5 +++ llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp | 38 +++++++++---------- .../Target/AArch64/AArch64ISelLowering.cpp | 20 +++++----- llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp | 6 +-- llvm/lib/Target/AMDGPU/SIISelLowering.cpp | 2 +- llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp | 8 ++-- llvm/lib/Target/ARM/ARMISelLowering.cpp | 29 +++++++------- .../Target/Hexagon/HexagonISelDAGToDAG.cpp | 4 +- .../LoongArch/LoongArchISelLowering.cpp | 2 +- llvm/lib/Target/PowerPC/PPCISelDAGToDAG.cpp | 4 +- llvm/lib/Target/PowerPC/PPCISelLowering.cpp | 8 ++-- llvm/lib/Target/RISCV/RISCVISelLowering.cpp | 8 ++-- .../Target/SystemZ/SystemZISelDAGToDAG.cpp | 2 +- .../Target/SystemZ/SystemZISelLowering.cpp | 6 +-- llvm/lib/Target/X86/X86ISelLowering.cpp | 36 +++++++++--------- llvm/lib/Target/X86/X86ISelLoweringCall.cpp | 2 +- 16 files changed, 92 insertions(+), 88 deletions(-) diff --git a/llvm/include/llvm/CodeGen/SelectionDAGNodes.h b/llvm/include/llvm/CodeGen/SelectionDAGNodes.h index b525872f9dd2a..77c04369f3e92 100644 --- a/llvm/include/llvm/CodeGen/SelectionDAGNodes.h +++ b/llvm/include/llvm/CodeGen/SelectionDAGNodes.h @@ -844,6 +844,11 @@ END_TWO_BYTE_PACK() static use_iterator use_end() { return use_iterator(nullptr); } + /// Provide iteration support to walk over all users of an SDNode. + /// For now, this should only be used to get a pointer to the first user. + /// FIXME: Rename use_iterator to user_iterator. Add user_end(). + use_iterator user_begin() const { return use_iterator(UseList); } + // Dereferencing use_iterator returns the user SDNode* making it closer to a // user_iterator thus this function is called users() to reflect that. // FIXME: Rename to user_iterator and introduce a use_iterator that returns diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp index ebce0ebe8f81c..85009439c37b3 100644 --- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp @@ -2136,8 +2136,8 @@ SDValue DAGCombiner::visitTokenFactor(SDNode *N) { // If the sole user is a token factor, we should make sure we have a // chance to merge them together. This prevents TF chains from inhibiting // optimizations. - if (N->hasOneUse() && N->use_begin()->getOpcode() == ISD::TokenFactor) - AddToWorklist(*(N->use_begin())); + if (N->hasOneUse() && N->user_begin()->getOpcode() == ISD::TokenFactor) + AddToWorklist(*(N->user_begin())); SmallVector TFs; // List of token factors to visit. SmallVector Ops; // Ops for replacing token factor. @@ -10906,15 +10906,15 @@ SDValue DAGCombiner::visitSRL(SDNode *N) { // which we plan to do. This workaround can be removed once the DAG is // processed in topological order. if (N->hasOneUse()) { - SDNode *Use = *N->use_begin(); + SDNode *User = *N->user_begin(); // Look pass the truncate. - if (Use->getOpcode() == ISD::TRUNCATE && Use->hasOneUse()) - Use = *Use->use_begin(); + if (User->getOpcode() == ISD::TRUNCATE && User->hasOneUse()) + User = *User->user_begin(); - if (Use->getOpcode() == ISD::BRCOND || Use->getOpcode() == ISD::AND || - Use->getOpcode() == ISD::OR || Use->getOpcode() == ISD::XOR) - AddToWorklist(Use); + if (User->getOpcode() == ISD::BRCOND || User->getOpcode() == ISD::AND || + User->getOpcode() == ISD::OR || User->getOpcode() == ISD::XOR) + AddToWorklist(User); } // Try to transform this shift into a multiply-high if @@ -12917,7 +12917,7 @@ SDValue DAGCombiner::visitSETCC(SDNode *N) { // also lend itself to numerous combines and, as a result, it is desired // we keep the argument to a brcond as a setcc as much as possible. bool PreferSetCC = - N->hasOneUse() && N->use_begin()->getOpcode() == ISD::BRCOND; + N->hasOneUse() && N->user_begin()->getOpcode() == ISD::BRCOND; ISD::CondCode Cond = cast(N->getOperand(2))->get(); EVT VT = N->getValueType(0); @@ -14825,7 +14825,7 @@ SDValue DAGCombiner::reduceLoadWidth(SDNode *N) { // If the SRL is only used by a masking AND, we may be able to adjust // the ExtVT to make the AND redundant. - SDNode *Mask = *(SRL->use_begin()); + SDNode *Mask = *(SRL->user_begin()); if (SRL.hasOneUse() && Mask->getOpcode() == ISD::AND && isa(Mask->getOperand(1))) { unsigned Offset, ActiveBits; @@ -15364,7 +15364,7 @@ SDValue DAGCombiner::visitTRUNCATE(SDNode *N) { } // If this is anyext(trunc), don't fold it, allow ourselves to be folded. - if (N->hasOneUse() && (N->use_begin()->getOpcode() == ISD::ANY_EXTEND)) + if (N->hasOneUse() && (N->user_begin()->getOpcode() == ISD::ANY_EXTEND)) return SDValue(); // Fold extract-and-trunc into a narrow extract. For example: @@ -18370,7 +18370,7 @@ SDValue DAGCombiner::visitFP_EXTEND(SDNode *N) { return FoldedVOp; // If this is fp_round(fpextend), don't fold it, allow ourselves to be folded. - if (N->hasOneUse() && N->use_begin()->getOpcode() == ISD::FP_ROUND) + if (N->hasOneUse() && N->user_begin()->getOpcode() == ISD::FP_ROUND) return SDValue(); // fold (fp_extend c1fp) -> c1fp @@ -19847,17 +19847,17 @@ struct LoadedSlice { bool canMergeExpensiveCrossRegisterBankCopy() const { if (!Inst || !Inst->hasOneUse()) return false; - SDNode *Use = *Inst->use_begin(); - if (Use->getOpcode() != ISD::BITCAST) + SDNode *User = *Inst->user_begin(); + if (User->getOpcode() != ISD::BITCAST) return false; assert(DAG && "Missing context"); const TargetLowering &TLI = DAG->getTargetLoweringInfo(); - EVT ResVT = Use->getValueType(0); + EVT ResVT = User->getValueType(0); const TargetRegisterClass *ResRC = - TLI.getRegClassFor(ResVT.getSimpleVT(), Use->isDivergent()); + TLI.getRegClassFor(ResVT.getSimpleVT(), User->isDivergent()); const TargetRegisterClass *ArgRC = - TLI.getRegClassFor(Use->getOperand(0).getValueType().getSimpleVT(), - Use->getOperand(0)->isDivergent()); + TLI.getRegClassFor(User->getOperand(0).getValueType().getSimpleVT(), + User->getOperand(0)->isDivergent()); if (ArgRC == ResRC || !TLI.isOperationLegal(ISD::LOAD, ResVT)) return false; @@ -20069,7 +20069,7 @@ bool DAGCombiner::SliceUpLoad(SDNode *N) { if (User->getOpcode() == ISD::SRL && User->hasOneUse() && isa(User->getOperand(1))) { Shift = User->getConstantOperandVal(1); - User = *User->use_begin(); + User = *User->user_begin(); } // At this point, User is a Truncate, iff we encountered, trunc or diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp index 5865dbe1307ba..494506def33a3 100644 --- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp +++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -18109,9 +18109,9 @@ bool AArch64TargetLowering::shouldFoldConstantShiftPairToMask( if (N->getOpcode() == ISD::SHL && N->hasOneUse()) { if (auto C2 = dyn_cast(N->getOperand(1))) { unsigned ShlAmt = C2->getZExtValue(); - if (auto ShouldADD = *N->use_begin(); + if (auto ShouldADD = *N->user_begin(); ShouldADD->getOpcode() == ISD::ADD && ShouldADD->hasOneUse()) { - if (auto ShouldLOAD = dyn_cast(*ShouldADD->use_begin())) { + if (auto ShouldLOAD = dyn_cast(*ShouldADD->user_begin())) { unsigned ByteVT = ShouldLOAD->getMemoryVT().getSizeInBits() / 8; if ((1ULL << ShlAmt) == ByteVT && isIndexedLoadLegal(ISD::PRE_INC, ShouldLOAD->getMemoryVT())) @@ -18902,8 +18902,8 @@ static SDValue performMulCombine(SDNode *N, SelectionDAG &DAG, return SDValue(); // Conservatively do not lower to shift+add+shift if the mul might be // folded into madd or msub. - if (N->hasOneUse() && (N->use_begin()->getOpcode() == ISD::ADD || - N->use_begin()->getOpcode() == ISD::SUB)) + if (N->hasOneUse() && (N->user_begin()->getOpcode() == ISD::ADD || + N->user_begin()->getOpcode() == ISD::SUB)) return SDValue(); } // Use ShiftedConstValue instead of ConstValue to support both shift+add/sub @@ -21803,7 +21803,7 @@ static SDValue tryCombineWhileLo(SDNode *N, if (HalfSize < 2) return SDValue(); - auto It = N->use_begin(); + auto It = N->user_begin(); SDNode *Lo = *It++; SDNode *Hi = *It; @@ -23402,7 +23402,7 @@ static SDValue performPostLD1Combine(SDNode *N, // TODO: This could be expanded to more operations if they reliably use the // index variants. if (N->hasOneUse()) { - unsigned UseOpc = N->use_begin()->getOpcode(); + unsigned UseOpc = N->user_begin()->getOpcode(); if (UseOpc == ISD::FMUL || UseOpc == ISD::FMA) return SDValue(); } @@ -24755,7 +24755,7 @@ static SDValue tryToWidenSetCCOperands(SDNode *Op, SelectionDAG &DAG) { // Make sure that all uses of Op are VSELECTs with result matching types where // the result type has a larger element type than the SetCC operand. - SDNode *FirstUse = *Op->use_begin(); + SDNode *FirstUse = *Op->user_begin(); if (FirstUse->getOpcode() != ISD::VSELECT) return SDValue(); EVT UseMVT = FirstUse->getValueType(0); @@ -25905,7 +25905,7 @@ static SDValue performFPExtendCombine(SDNode *N, SelectionDAG &DAG, EVT VT = N->getValueType(0); // If this is fp_round(fpextend), don't fold it, allow ourselves to be folded. - if (N->hasOneUse() && N->use_begin()->getOpcode() == ISD::FP_ROUND) + if (N->hasOneUse() && N->user_begin()->getOpcode() == ISD::FP_ROUND) return SDValue(); auto hasValidElementTypeForFPExtLoad = [](EVT VT) { @@ -26072,7 +26072,7 @@ static SDValue tryCombineMULLWithUZP1(SDNode *N, // Check ExtractLow's user. if (HasFoundMULLow) { - SDNode *ExtractLowUser = *ExtractLow.getNode()->use_begin(); + SDNode *ExtractLowUser = *ExtractLow.getNode()->user_begin(); if (ExtractLowUser->getOpcode() != N->getOpcode()) { HasFoundMULLow = false; } else { @@ -26549,7 +26549,7 @@ bool AArch64TargetLowering::isUsedByReturnOnly(SDNode *N, return false; SDValue TCChain = Chain; - SDNode *Copy = *N->use_begin(); + SDNode *Copy = *N->user_begin(); if (Copy->getOpcode() == ISD::CopyToReg) { // If the copy has a glue operand, we conservatively assume it isn't safe to // perform a tail call. diff --git a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp index c129759f3d3c7..a716d185e392a 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp @@ -1088,9 +1088,9 @@ bool AMDGPUTargetLowering::isDesirableToCommuteWithShift( return true; // If only user is a i32 right-shift, then don't destroy a BFE pattern. - if (N->getValueType(0) == MVT::i32 && N->use_size() == 1 && - (N->use_begin()->getOpcode() == ISD::SRA || - N->use_begin()->getOpcode() == ISD::SRL)) + if (N->getValueType(0) == MVT::i32 && N->hasOneUse() && + (N->user_begin()->getOpcode() == ISD::SRA || + N->user_begin()->getOpcode() == ISD::SRL)) return false; // Don't destroy or(shl(load_zext(),c), load_zext()) patterns. diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp index 2b8cc5b4e33a4..f83ccf6d8280b 100644 --- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp +++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp @@ -16896,7 +16896,7 @@ bool SITargetLowering::isReassocProfitable(SelectionDAG &DAG, SDValue N0, // Check if we have a good chance to form the memory access pattern with the // base and offset return (DAG.isBaseWithConstantOffset(N0) && - hasMemSDNodeUser(*N0->use_begin())); + hasMemSDNodeUser(*N0->user_begin())); } bool SITargetLowering::isReassocProfitable(MachineRegisterInfo &MRI, diff --git a/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp b/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp index b03221a440039..9ad46df159c20 100644 --- a/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp +++ b/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp @@ -505,14 +505,14 @@ bool ARMDAGToDAGISel::hasNoVMLxHazardUse(SDNode *N) const { if (!N->hasOneUse()) return false; - SDNode *Use = *N->use_begin(); - if (Use->getOpcode() == ISD::CopyToReg) + SDNode *User = *N->user_begin(); + if (User->getOpcode() == ISD::CopyToReg) return true; - if (Use->isMachineOpcode()) { + if (User->isMachineOpcode()) { const ARMBaseInstrInfo *TII = static_cast( CurDAG->getSubtarget().getInstrInfo()); - const MCInstrDesc &MCID = TII->get(Use->getMachineOpcode()); + const MCInstrDesc &MCID = TII->get(User->getMachineOpcode()); if (MCID.mayStore()) return true; unsigned Opcode = MCID.getOpcode(); diff --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp index 764d3c879f2d6..88293c1b1101a 100644 --- a/llvm/lib/Target/ARM/ARMISelLowering.cpp +++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp @@ -3456,7 +3456,7 @@ bool ARMTargetLowering::isUsedByReturnOnly(SDNode *N, SDValue &Chain) const { return false; SDValue TCChain = Chain; - SDNode *Copy = *N->use_begin(); + SDNode *Copy = *N->user_begin(); if (Copy->getOpcode() == ISD::CopyToReg) { // If the copy has a glue operand, we conservatively assume it isn't safe to // perform a tail call. @@ -3494,7 +3494,7 @@ bool ARMTargetLowering::isUsedByReturnOnly(SDNode *N, SDValue &Chain) const { // f32 returned in a single GPR. if (!Copy->hasOneUse()) return false; - Copy = *Copy->use_begin(); + Copy = *Copy->user_begin(); if (Copy->getOpcode() != ISD::CopyToReg || !Copy->hasNUsesOfValue(1, 0)) return false; // If the copy has a glue operand, we conservatively assume it isn't safe to @@ -15356,7 +15356,7 @@ PerformARMBUILD_VECTORCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) { assert(EltVT == MVT::f32 && "Unexpected type!"); // Check 1.2. - SDNode *Use = *N->use_begin(); + SDNode *Use = *N->user_begin(); if (Use->getOpcode() != ISD::BITCAST || Use->getValueType(0).isFloatingPoint()) return SDValue(); @@ -15561,9 +15561,8 @@ PerformExtractEltToVMOVRRD(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) { !isa(Ext.getOperand(1)) || Ext.getConstantOperandVal(1) % 2 != 0) return SDValue(); - if (Ext->use_size() == 1 && - (Ext->use_begin()->getOpcode() == ISD::SINT_TO_FP || - Ext->use_begin()->getOpcode() == ISD::UINT_TO_FP)) + if (Ext->hasOneUse() && (Ext->user_begin()->getOpcode() == ISD::SINT_TO_FP || + Ext->user_begin()->getOpcode() == ISD::UINT_TO_FP)) return SDValue(); SDValue Op0 = Ext.getOperand(0); @@ -15587,11 +15586,11 @@ PerformExtractEltToVMOVRRD(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) { // lanes. SDValue OtherExt(*OtherIt, 0); if (OtherExt.getValueType() != MVT::i32) { - if (OtherExt->use_size() != 1 || - OtherExt->use_begin()->getOpcode() != ISD::BITCAST || - OtherExt->use_begin()->getValueType(0) != MVT::i32) + if (!OtherExt->hasOneUse() || + OtherExt->user_begin()->getOpcode() != ISD::BITCAST || + OtherExt->user_begin()->getValueType(0) != MVT::i32) return SDValue(); - OtherExt = SDValue(*OtherExt->use_begin(), 0); + OtherExt = SDValue(*OtherExt->user_begin(), 0); } // Convert the type to a f64 and extract with a VMOVRRD. @@ -18326,9 +18325,9 @@ static SDValue PerformHWLoopCombine(SDNode *N, SelectionDAG &DAG = DCI.DAG; SDValue Elements = Int.getOperand(2); unsigned IntOp = Int->getConstantOperandVal(1); - assert((N->hasOneUse() && N->use_begin()->getOpcode() == ISD::BR) - && "expected single br user"); - SDNode *Br = *N->use_begin(); + assert((N->hasOneUse() && N->user_begin()->getOpcode() == ISD::BR) && + "expected single br user"); + SDNode *Br = *N->user_begin(); SDValue OtherTarget = Br->getOperand(1); // Update the unconditional branch to branch to the given Dest. @@ -19330,10 +19329,10 @@ bool ARMTargetLowering::isVectorLoadExtDesirable(SDValue ExtVal) const { // If there's more than one user instruction, the loadext is desirable no // matter what. There can be two uses by the same instruction. if (ExtVal->use_empty() || - !ExtVal->use_begin()->isOnlyUserOf(ExtVal.getNode())) + !ExtVal->user_begin()->isOnlyUserOf(ExtVal.getNode())) return true; - SDNode *U = *ExtVal->use_begin(); + SDNode *U = *ExtVal->user_begin(); if ((U->getOpcode() == ISD::ADD || U->getOpcode() == ISD::SUB || U->getOpcode() == ISD::SHL || U->getOpcode() == ARMISD::VSHLIMM)) return false; diff --git a/llvm/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp b/llvm/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp index 2c20db16b055f..2a267e52610b3 100644 --- a/llvm/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp +++ b/llvm/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp @@ -1097,7 +1097,7 @@ static bool isMemOPCandidate(SDNode *I, SDNode *U) { SDValue S1 = U->getOperand(1); SDValue SY = (S0.getNode() == I) ? S1 : S0; - SDNode *UUse = *U->use_begin(); + SDNode *UUse = *U->user_begin(); if (UUse->getNumValues() != 1) return false; @@ -2431,7 +2431,7 @@ void HexagonDAGToDAGISel::rebalanceAddressTrees() { Worklist.push_back(N->getOperand(1).getNode()); // Not a root if it has only one use and same opcode as its parent - if (N->hasOneUse() && Opcode == N->use_begin()->getOpcode()) + if (N->hasOneUse() && Opcode == N->user_begin()->getOpcode()) continue; // This root node has already been processed diff --git a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp index e32ed41c2893c..7f67def73ca2b 100644 --- a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp +++ b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp @@ -5340,7 +5340,7 @@ bool LoongArchTargetLowering::isUsedByReturnOnly(SDNode *N, if (!N->hasNUsesOfValue(1, 0)) return false; - SDNode *Copy = *N->use_begin(); + SDNode *Copy = *N->user_begin(); if (Copy->getOpcode() != ISD::CopyToReg) return false; diff --git a/llvm/lib/Target/PowerPC/PPCISelDAGToDAG.cpp b/llvm/lib/Target/PowerPC/PPCISelDAGToDAG.cpp index 277c1414d7160..5445a0a06bef1 100644 --- a/llvm/lib/Target/PowerPC/PPCISelDAGToDAG.cpp +++ b/llvm/lib/Target/PowerPC/PPCISelDAGToDAG.cpp @@ -6610,7 +6610,7 @@ void PPCDAGToDAGISel::foldBoolExts(SDValue &Res, SDNode *&N) { SDValue ConstFalse = CurDAG->getConstant(0, dl, VT); do { - SDNode *User = *N->use_begin(); + SDNode *User = *N->user_begin(); if (User->getNumOperands() != 2) break; @@ -7564,7 +7564,7 @@ static void reduceVSXSwap(SDNode *N, SelectionDAG *DAG) { while (V->isMachineOpcode() && V->getMachineOpcode() == TargetOpcode::COPY_TO_REGCLASS) { // All values in the chain should have single use. - if (V->use_empty() || !V->use_begin()->isOnlyUserOf(V.getNode())) + if (V->use_empty() || !V->user_begin()->isOnlyUserOf(V.getNode())) return SDValue(); V = V->getOperand(0); } diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp index 199e1f41cfc05..3b3842bb14456 100644 --- a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp +++ b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp @@ -16331,7 +16331,7 @@ SDValue PPCTargetLowering::PerformDAGCombine(SDNode *N, if (!LD->hasNUsesOfValue(2, 0)) return false; - auto UI = LD->use_begin(); + auto UI = LD->user_begin(); while (UI.getUse().getResNo() != 0) ++UI; SDNode *Trunc = *UI++; while (UI.getUse().getResNo() != 0) ++UI; @@ -16349,14 +16349,14 @@ SDValue PPCTargetLowering::PerformDAGCombine(SDNode *N, !RightShift->hasOneUse()) return false; - SDNode *Trunc2 = *RightShift->use_begin(); + SDNode *Trunc2 = *RightShift->user_begin(); if (Trunc2->getOpcode() != ISD::TRUNCATE || Trunc2->getValueType(0) != MVT::i32 || !Trunc2->hasOneUse()) return false; - SDNode *Bitcast = *Trunc->use_begin(); - SDNode *Bitcast2 = *Trunc2->use_begin(); + SDNode *Bitcast = *Trunc->user_begin(); + SDNode *Bitcast2 = *Trunc2->user_begin(); if (Bitcast->getOpcode() != ISD::BITCAST || Bitcast->getValueType(0) != MVT::f32) diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp index e82190e77d590..abc960ead5d5d 100644 --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -8295,10 +8295,10 @@ SDValue RISCVTargetLowering::lowerSELECT(SDValue Op, SelectionDAG &DAG) const { return V; if (Op.hasOneUse()) { - unsigned UseOpc = Op->use_begin()->getOpcode(); + unsigned UseOpc = Op->user_begin()->getOpcode(); if (isBinOp(UseOpc) && DAG.isSafeToSpeculativelyExecute(UseOpc)) { - SDNode *BinOp = *Op->use_begin(); - if (SDValue NewSel = foldBinOpIntoSelectIfProfitable(*Op->use_begin(), + SDNode *BinOp = *Op->user_begin(); + if (SDValue NewSel = foldBinOpIntoSelectIfProfitable(*Op->user_begin(), DAG, Subtarget)) { DAG.ReplaceAllUsesWith(BinOp, &NewSel); // Opcode check is necessary because foldBinOpIntoSelectIfProfitable @@ -20428,7 +20428,7 @@ bool RISCVTargetLowering::isUsedByReturnOnly(SDNode *N, SDValue &Chain) const { if (!N->hasNUsesOfValue(1, 0)) return false; - SDNode *Copy = *N->use_begin(); + SDNode *Copy = *N->user_begin(); if (Copy->getOpcode() == ISD::BITCAST) { return isUsedByReturnOnly(Copy, Chain); diff --git a/llvm/lib/Target/SystemZ/SystemZISelDAGToDAG.cpp b/llvm/lib/Target/SystemZ/SystemZISelDAGToDAG.cpp index 210e3c5426f46..884d3a0614a8e 100644 --- a/llvm/lib/Target/SystemZ/SystemZISelDAGToDAG.cpp +++ b/llvm/lib/Target/SystemZ/SystemZISelDAGToDAG.cpp @@ -1886,7 +1886,7 @@ SystemZDAGToDAGISel::IsProfitableToFold(SDValue N, SDNode *U, // physical CC register, which in turn is glued and chained to the // actual instruction that uses the CC value. Bail out if we have // anything else than that. - SDNode *CCUser = *U->use_begin(); + SDNode *CCUser = *U->user_begin(); SDNode *CCRegUser = nullptr; if (CCUser->getOpcode() == ISD::CopyToReg || cast(CCUser->getOperand(1))->getReg() == SystemZ::CC) { diff --git a/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp b/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp index 47008af3479ee..331d3a4d494c9 100644 --- a/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp +++ b/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp @@ -7117,7 +7117,7 @@ static bool isI128MovedToParts(LoadSDNode *LD, SDNode *&LoPart, if (User->getOpcode() == ISD::SRL && User->getOperand(1).getOpcode() == ISD::Constant && User->getConstantOperandVal(1) == 64 && User->hasOneUse()) { - User = *User->use_begin(); + User = *User->user_begin(); IsLoPart = false; } if (User->getOpcode() != ISD::TRUNCATE || User->getValueType(0) != MVT::i64) @@ -7674,7 +7674,7 @@ SDValue SystemZTargetLowering::combineFP_ROUND( U->getOperand(0) == Vec && U->getOperand(1).getOpcode() == ISD::Constant && U->getConstantOperandVal(1) == 1) { - SDValue OtherRound = SDValue(*U->use_begin(), 0); + SDValue OtherRound = SDValue(*U->user_begin(), 0); if (OtherRound.getOpcode() == N->getOpcode() && OtherRound.getOperand(OpNo) == SDValue(U, 0) && OtherRound.getValueType() == MVT::f32) { @@ -7738,7 +7738,7 @@ SDValue SystemZTargetLowering::combineFP_EXTEND( U->getOperand(0) == Vec && U->getOperand(1).getOpcode() == ISD::Constant && U->getConstantOperandVal(1) == 2) { - SDValue OtherExtend = SDValue(*U->use_begin(), 0); + SDValue OtherExtend = SDValue(*U->user_begin(), 0); if (OtherExtend.getOpcode() == N->getOpcode() && OtherExtend.getOperand(OpNo) == SDValue(U, 0) && OtherExtend.getValueType() == MVT::f64) { diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp index 4bd65dc6ade40..3d8af69380125 100644 --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -2766,12 +2766,12 @@ bool X86::mayFoldLoadIntoBroadcastFromMem(SDValue Op, MVT EltVT, } bool X86::mayFoldIntoStore(SDValue Op) { - return Op.hasOneUse() && ISD::isNormalStore(*Op.getNode()->use_begin()); + return Op.hasOneUse() && ISD::isNormalStore(*Op.getNode()->user_begin()); } bool X86::mayFoldIntoZeroExtend(SDValue Op) { if (Op.hasOneUse()) { - unsigned Opcode = Op.getNode()->use_begin()->getOpcode(); + unsigned Opcode = Op.getNode()->user_begin()->getOpcode(); return (ISD::ZERO_EXTEND == Opcode); } return false; @@ -3215,7 +3215,7 @@ bool X86TargetLowering::shouldReduceLoadWidth(SDNode *Load, // If this use is not an extract + store, it's probably worth splitting. if (UI->getOpcode() != ISD::EXTRACT_SUBVECTOR || !UI->hasOneUse() || - UI->use_begin()->getOpcode() != ISD::STORE) + UI->user_begin()->getOpcode() != ISD::STORE) return true; } // All non-chain uses are extract + store. @@ -18212,7 +18212,7 @@ static SDValue LowerEXTRACT_VECTOR_ELT_SSE4(SDValue Op, SelectionDAG &DAG) { // because a MOVSSmr can be used instead, which is smaller and faster. if (!Op.hasOneUse()) return SDValue(); - SDNode *User = *Op.getNode()->use_begin(); + SDNode *User = *Op.getNode()->user_begin(); if ((User->getOpcode() != ISD::STORE || isNullConstant(Idx)) && (User->getOpcode() != ISD::BITCAST || User->getValueType(0) != MVT::i32)) @@ -22873,8 +22873,8 @@ static bool hasNonFlagsUse(SDValue Op) { unsigned UOpNo = UI.getOperandNo(); if (User->getOpcode() == ISD::TRUNCATE && User->hasOneUse()) { // Look pass truncate. - UOpNo = User->use_begin().getOperandNo(); - User = *User->use_begin(); + UOpNo = User->user_begin().getOperandNo(); + User = *User->user_begin(); } if (User->getOpcode() != ISD::BRCOND && User->getOpcode() != ISD::SETCC && @@ -25265,7 +25265,7 @@ SDValue X86TargetLowering::LowerBRCOND(SDValue Op, SelectionDAG &DAG) const { // have a fall-through edge, because this requires an explicit // jmp when the condition is false. if (Op.getNode()->hasOneUse()) { - SDNode *User = *Op.getNode()->use_begin(); + SDNode *User = *Op.getNode()->user_begin(); // Look for an unconditional branch following this conditional branch. // We need this because we need to reverse the successors in order // to implement FCMP_OEQ. @@ -39423,8 +39423,8 @@ static SDValue combineX86ShuffleChain(ArrayRef Inputs, SDValue Root, // from being reused. bool IsMaskedShuffle = false; if (RootSizeInBits == 512 || (Subtarget.hasVLX() && RootSizeInBits >= 128)) { - if (Root.hasOneUse() && Root->use_begin()->getOpcode() == ISD::VSELECT && - Root->use_begin()->getOperand(0).getScalarValueSizeInBits() == 1) { + if (Root.hasOneUse() && Root->user_begin()->getOpcode() == ISD::VSELECT && + Root->user_begin()->getOperand(0).getScalarValueSizeInBits() == 1) { IsMaskedShuffle = true; } } @@ -48982,7 +48982,7 @@ static SDValue combineMul(SDNode *N, SelectionDAG &DAG, (SignMulAmt >= 0 && (MulAmt2 == 3 || MulAmt2 == 5 || MulAmt2 == 9)))) { if (isPowerOf2_64(MulAmt2) && !(SignMulAmt >= 0 && N->hasOneUse() && - N->use_begin()->getOpcode() == ISD::ADD)) + N->user_begin()->getOpcode() == ISD::ADD)) // If second multiplifer is pow2, issue it first. We want the multiply // by 3, 5, or 9 to be folded into the addressing mode unless the lone // use is an add. Only do this for positive multiply amounts since the @@ -50765,7 +50765,7 @@ static SDValue combineX86SubCmpForFlags(SDNode *N, SDValue Flag, return SDValue(); // Check the only user of flag is `brcond ne`. - SDNode *BrCond = *Flag->use_begin(); + SDNode *BrCond = *Flag->user_begin(); if (BrCond->getOpcode() != X86ISD::BRCOND) return SDValue(); unsigned CondNo = 2; @@ -53176,9 +53176,9 @@ static SDValue combineToHorizontalAddSub(SDNode *N, SelectionDAG &DAG, auto MergableHorizOp = [N](unsigned HorizOpcode) { return N->hasOneUse() && - N->use_begin()->getOpcode() == ISD::VECTOR_SHUFFLE && - (N->use_begin()->getOperand(0).getOpcode() == HorizOpcode || - N->use_begin()->getOperand(1).getOpcode() == HorizOpcode); + N->user_begin()->getOpcode() == ISD::VECTOR_SHUFFLE && + (N->user_begin()->getOperand(0).getOpcode() == HorizOpcode || + N->user_begin()->getOperand(1).getOpcode() == HorizOpcode); }; switch (Opcode) { @@ -56422,7 +56422,7 @@ static SDValue combineX86AddSub(SDNode *N, SelectionDAG &DAG, if (Negate) { // Bail if this is only used by a user of the x86 add/sub. if (GenericAddSub->hasOneUse() && - GenericAddSub->use_begin()->isOnlyUserOf(N)) + GenericAddSub->user_begin()->isOnlyUserOf(N)) return; Op = DAG.getNegative(Op, DL, VT); } @@ -59419,7 +59419,7 @@ bool X86TargetLowering::IsDesirableToPromoteOp(SDValue Op, EVT &PVT) const { auto IsFoldableRMW = [](SDValue Load, SDValue Op) { if (!Op.hasOneUse()) return false; - SDNode *User = *Op->use_begin(); + SDNode *User = *Op->user_begin(); if (!ISD::isNormalStore(User)) return false; auto *Ld = cast(Load); @@ -59432,7 +59432,7 @@ bool X86TargetLowering::IsDesirableToPromoteOp(SDValue Op, EVT &PVT) const { return false; if (!Op.hasOneUse()) return false; - SDNode *User = *Op->use_begin(); + SDNode *User = *Op->user_begin(); if (User->getOpcode() != ISD::ATOMIC_STORE) return false; auto *Ld = cast(Load); @@ -59443,7 +59443,7 @@ bool X86TargetLowering::IsDesirableToPromoteOp(SDValue Op, EVT &PVT) const { auto IsFoldableZext = [](SDValue Op) { if (!Op.hasOneUse()) return false; - SDNode *User = *Op->use_begin(); + SDNode *User = *Op->user_begin(); EVT VT = User->getValueType(0); return (User->getOpcode() == ISD::ZERO_EXTEND && (VT == MVT::i32 || VT == MVT::i64)); diff --git a/llvm/lib/Target/X86/X86ISelLoweringCall.cpp b/llvm/lib/Target/X86/X86ISelLoweringCall.cpp index df12ea2f79df5..b1c1ab4aa855d 100644 --- a/llvm/lib/Target/X86/X86ISelLoweringCall.cpp +++ b/llvm/lib/Target/X86/X86ISelLoweringCall.cpp @@ -944,7 +944,7 @@ bool X86TargetLowering::isUsedByReturnOnly(SDNode *N, SDValue &Chain) const { return false; SDValue TCChain = Chain; - SDNode *Copy = *N->use_begin(); + SDNode *Copy = *N->user_begin(); if (Copy->getOpcode() == ISD::CopyToReg) { // If the copy has a glue operand, we conservatively assume it isn't safe to // perform a tail call.