diff --git a/llvm/include/llvm/CodeGen/SelectionDAGNodes.h b/llvm/include/llvm/CodeGen/SelectionDAGNodes.h index b525872f9dd2a..77c04369f3e92 100644 --- a/llvm/include/llvm/CodeGen/SelectionDAGNodes.h +++ b/llvm/include/llvm/CodeGen/SelectionDAGNodes.h @@ -844,6 +844,11 @@ END_TWO_BYTE_PACK() static use_iterator use_end() { return use_iterator(nullptr); } + /// Provide iteration support to walk over all users of an SDNode. + /// For now, this should only be used to get a pointer to the first user. + /// FIXME: Rename use_iterator to user_iterator. Add user_end(). + use_iterator user_begin() const { return use_iterator(UseList); } + // Dereferencing use_iterator returns the user SDNode* making it closer to a // user_iterator thus this function is called users() to reflect that. // FIXME: Rename to user_iterator and introduce a use_iterator that returns diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp index ebce0ebe8f81c..85009439c37b3 100644 --- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp @@ -2136,8 +2136,8 @@ SDValue DAGCombiner::visitTokenFactor(SDNode *N) { // If the sole user is a token factor, we should make sure we have a // chance to merge them together. This prevents TF chains from inhibiting // optimizations. - if (N->hasOneUse() && N->use_begin()->getOpcode() == ISD::TokenFactor) - AddToWorklist(*(N->use_begin())); + if (N->hasOneUse() && N->user_begin()->getOpcode() == ISD::TokenFactor) + AddToWorklist(*(N->user_begin())); SmallVector TFs; // List of token factors to visit. SmallVector Ops; // Ops for replacing token factor. @@ -10906,15 +10906,15 @@ SDValue DAGCombiner::visitSRL(SDNode *N) { // which we plan to do. This workaround can be removed once the DAG is // processed in topological order. if (N->hasOneUse()) { - SDNode *Use = *N->use_begin(); + SDNode *User = *N->user_begin(); // Look pass the truncate. - if (Use->getOpcode() == ISD::TRUNCATE && Use->hasOneUse()) - Use = *Use->use_begin(); + if (User->getOpcode() == ISD::TRUNCATE && User->hasOneUse()) + User = *User->user_begin(); - if (Use->getOpcode() == ISD::BRCOND || Use->getOpcode() == ISD::AND || - Use->getOpcode() == ISD::OR || Use->getOpcode() == ISD::XOR) - AddToWorklist(Use); + if (User->getOpcode() == ISD::BRCOND || User->getOpcode() == ISD::AND || + User->getOpcode() == ISD::OR || User->getOpcode() == ISD::XOR) + AddToWorklist(User); } // Try to transform this shift into a multiply-high if @@ -12917,7 +12917,7 @@ SDValue DAGCombiner::visitSETCC(SDNode *N) { // also lend itself to numerous combines and, as a result, it is desired // we keep the argument to a brcond as a setcc as much as possible. bool PreferSetCC = - N->hasOneUse() && N->use_begin()->getOpcode() == ISD::BRCOND; + N->hasOneUse() && N->user_begin()->getOpcode() == ISD::BRCOND; ISD::CondCode Cond = cast(N->getOperand(2))->get(); EVT VT = N->getValueType(0); @@ -14825,7 +14825,7 @@ SDValue DAGCombiner::reduceLoadWidth(SDNode *N) { // If the SRL is only used by a masking AND, we may be able to adjust // the ExtVT to make the AND redundant. - SDNode *Mask = *(SRL->use_begin()); + SDNode *Mask = *(SRL->user_begin()); if (SRL.hasOneUse() && Mask->getOpcode() == ISD::AND && isa(Mask->getOperand(1))) { unsigned Offset, ActiveBits; @@ -15364,7 +15364,7 @@ SDValue DAGCombiner::visitTRUNCATE(SDNode *N) { } // If this is anyext(trunc), don't fold it, allow ourselves to be folded. - if (N->hasOneUse() && (N->use_begin()->getOpcode() == ISD::ANY_EXTEND)) + if (N->hasOneUse() && (N->user_begin()->getOpcode() == ISD::ANY_EXTEND)) return SDValue(); // Fold extract-and-trunc into a narrow extract. For example: @@ -18370,7 +18370,7 @@ SDValue DAGCombiner::visitFP_EXTEND(SDNode *N) { return FoldedVOp; // If this is fp_round(fpextend), don't fold it, allow ourselves to be folded. - if (N->hasOneUse() && N->use_begin()->getOpcode() == ISD::FP_ROUND) + if (N->hasOneUse() && N->user_begin()->getOpcode() == ISD::FP_ROUND) return SDValue(); // fold (fp_extend c1fp) -> c1fp @@ -19847,17 +19847,17 @@ struct LoadedSlice { bool canMergeExpensiveCrossRegisterBankCopy() const { if (!Inst || !Inst->hasOneUse()) return false; - SDNode *Use = *Inst->use_begin(); - if (Use->getOpcode() != ISD::BITCAST) + SDNode *User = *Inst->user_begin(); + if (User->getOpcode() != ISD::BITCAST) return false; assert(DAG && "Missing context"); const TargetLowering &TLI = DAG->getTargetLoweringInfo(); - EVT ResVT = Use->getValueType(0); + EVT ResVT = User->getValueType(0); const TargetRegisterClass *ResRC = - TLI.getRegClassFor(ResVT.getSimpleVT(), Use->isDivergent()); + TLI.getRegClassFor(ResVT.getSimpleVT(), User->isDivergent()); const TargetRegisterClass *ArgRC = - TLI.getRegClassFor(Use->getOperand(0).getValueType().getSimpleVT(), - Use->getOperand(0)->isDivergent()); + TLI.getRegClassFor(User->getOperand(0).getValueType().getSimpleVT(), + User->getOperand(0)->isDivergent()); if (ArgRC == ResRC || !TLI.isOperationLegal(ISD::LOAD, ResVT)) return false; @@ -20069,7 +20069,7 @@ bool DAGCombiner::SliceUpLoad(SDNode *N) { if (User->getOpcode() == ISD::SRL && User->hasOneUse() && isa(User->getOperand(1))) { Shift = User->getConstantOperandVal(1); - User = *User->use_begin(); + User = *User->user_begin(); } // At this point, User is a Truncate, iff we encountered, trunc or diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp index 5865dbe1307ba..494506def33a3 100644 --- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp +++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -18109,9 +18109,9 @@ bool AArch64TargetLowering::shouldFoldConstantShiftPairToMask( if (N->getOpcode() == ISD::SHL && N->hasOneUse()) { if (auto C2 = dyn_cast(N->getOperand(1))) { unsigned ShlAmt = C2->getZExtValue(); - if (auto ShouldADD = *N->use_begin(); + if (auto ShouldADD = *N->user_begin(); ShouldADD->getOpcode() == ISD::ADD && ShouldADD->hasOneUse()) { - if (auto ShouldLOAD = dyn_cast(*ShouldADD->use_begin())) { + if (auto ShouldLOAD = dyn_cast(*ShouldADD->user_begin())) { unsigned ByteVT = ShouldLOAD->getMemoryVT().getSizeInBits() / 8; if ((1ULL << ShlAmt) == ByteVT && isIndexedLoadLegal(ISD::PRE_INC, ShouldLOAD->getMemoryVT())) @@ -18902,8 +18902,8 @@ static SDValue performMulCombine(SDNode *N, SelectionDAG &DAG, return SDValue(); // Conservatively do not lower to shift+add+shift if the mul might be // folded into madd or msub. - if (N->hasOneUse() && (N->use_begin()->getOpcode() == ISD::ADD || - N->use_begin()->getOpcode() == ISD::SUB)) + if (N->hasOneUse() && (N->user_begin()->getOpcode() == ISD::ADD || + N->user_begin()->getOpcode() == ISD::SUB)) return SDValue(); } // Use ShiftedConstValue instead of ConstValue to support both shift+add/sub @@ -21803,7 +21803,7 @@ static SDValue tryCombineWhileLo(SDNode *N, if (HalfSize < 2) return SDValue(); - auto It = N->use_begin(); + auto It = N->user_begin(); SDNode *Lo = *It++; SDNode *Hi = *It; @@ -23402,7 +23402,7 @@ static SDValue performPostLD1Combine(SDNode *N, // TODO: This could be expanded to more operations if they reliably use the // index variants. if (N->hasOneUse()) { - unsigned UseOpc = N->use_begin()->getOpcode(); + unsigned UseOpc = N->user_begin()->getOpcode(); if (UseOpc == ISD::FMUL || UseOpc == ISD::FMA) return SDValue(); } @@ -24755,7 +24755,7 @@ static SDValue tryToWidenSetCCOperands(SDNode *Op, SelectionDAG &DAG) { // Make sure that all uses of Op are VSELECTs with result matching types where // the result type has a larger element type than the SetCC operand. - SDNode *FirstUse = *Op->use_begin(); + SDNode *FirstUse = *Op->user_begin(); if (FirstUse->getOpcode() != ISD::VSELECT) return SDValue(); EVT UseMVT = FirstUse->getValueType(0); @@ -25905,7 +25905,7 @@ static SDValue performFPExtendCombine(SDNode *N, SelectionDAG &DAG, EVT VT = N->getValueType(0); // If this is fp_round(fpextend), don't fold it, allow ourselves to be folded. - if (N->hasOneUse() && N->use_begin()->getOpcode() == ISD::FP_ROUND) + if (N->hasOneUse() && N->user_begin()->getOpcode() == ISD::FP_ROUND) return SDValue(); auto hasValidElementTypeForFPExtLoad = [](EVT VT) { @@ -26072,7 +26072,7 @@ static SDValue tryCombineMULLWithUZP1(SDNode *N, // Check ExtractLow's user. if (HasFoundMULLow) { - SDNode *ExtractLowUser = *ExtractLow.getNode()->use_begin(); + SDNode *ExtractLowUser = *ExtractLow.getNode()->user_begin(); if (ExtractLowUser->getOpcode() != N->getOpcode()) { HasFoundMULLow = false; } else { @@ -26549,7 +26549,7 @@ bool AArch64TargetLowering::isUsedByReturnOnly(SDNode *N, return false; SDValue TCChain = Chain; - SDNode *Copy = *N->use_begin(); + SDNode *Copy = *N->user_begin(); if (Copy->getOpcode() == ISD::CopyToReg) { // If the copy has a glue operand, we conservatively assume it isn't safe to // perform a tail call. diff --git a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp index c129759f3d3c7..a716d185e392a 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp @@ -1088,9 +1088,9 @@ bool AMDGPUTargetLowering::isDesirableToCommuteWithShift( return true; // If only user is a i32 right-shift, then don't destroy a BFE pattern. - if (N->getValueType(0) == MVT::i32 && N->use_size() == 1 && - (N->use_begin()->getOpcode() == ISD::SRA || - N->use_begin()->getOpcode() == ISD::SRL)) + if (N->getValueType(0) == MVT::i32 && N->hasOneUse() && + (N->user_begin()->getOpcode() == ISD::SRA || + N->user_begin()->getOpcode() == ISD::SRL)) return false; // Don't destroy or(shl(load_zext(),c), load_zext()) patterns. diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp index 2b8cc5b4e33a4..f83ccf6d8280b 100644 --- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp +++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp @@ -16896,7 +16896,7 @@ bool SITargetLowering::isReassocProfitable(SelectionDAG &DAG, SDValue N0, // Check if we have a good chance to form the memory access pattern with the // base and offset return (DAG.isBaseWithConstantOffset(N0) && - hasMemSDNodeUser(*N0->use_begin())); + hasMemSDNodeUser(*N0->user_begin())); } bool SITargetLowering::isReassocProfitable(MachineRegisterInfo &MRI, diff --git a/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp b/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp index b03221a440039..9ad46df159c20 100644 --- a/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp +++ b/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp @@ -505,14 +505,14 @@ bool ARMDAGToDAGISel::hasNoVMLxHazardUse(SDNode *N) const { if (!N->hasOneUse()) return false; - SDNode *Use = *N->use_begin(); - if (Use->getOpcode() == ISD::CopyToReg) + SDNode *User = *N->user_begin(); + if (User->getOpcode() == ISD::CopyToReg) return true; - if (Use->isMachineOpcode()) { + if (User->isMachineOpcode()) { const ARMBaseInstrInfo *TII = static_cast( CurDAG->getSubtarget().getInstrInfo()); - const MCInstrDesc &MCID = TII->get(Use->getMachineOpcode()); + const MCInstrDesc &MCID = TII->get(User->getMachineOpcode()); if (MCID.mayStore()) return true; unsigned Opcode = MCID.getOpcode(); diff --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp index 764d3c879f2d6..88293c1b1101a 100644 --- a/llvm/lib/Target/ARM/ARMISelLowering.cpp +++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp @@ -3456,7 +3456,7 @@ bool ARMTargetLowering::isUsedByReturnOnly(SDNode *N, SDValue &Chain) const { return false; SDValue TCChain = Chain; - SDNode *Copy = *N->use_begin(); + SDNode *Copy = *N->user_begin(); if (Copy->getOpcode() == ISD::CopyToReg) { // If the copy has a glue operand, we conservatively assume it isn't safe to // perform a tail call. @@ -3494,7 +3494,7 @@ bool ARMTargetLowering::isUsedByReturnOnly(SDNode *N, SDValue &Chain) const { // f32 returned in a single GPR. if (!Copy->hasOneUse()) return false; - Copy = *Copy->use_begin(); + Copy = *Copy->user_begin(); if (Copy->getOpcode() != ISD::CopyToReg || !Copy->hasNUsesOfValue(1, 0)) return false; // If the copy has a glue operand, we conservatively assume it isn't safe to @@ -15356,7 +15356,7 @@ PerformARMBUILD_VECTORCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) { assert(EltVT == MVT::f32 && "Unexpected type!"); // Check 1.2. - SDNode *Use = *N->use_begin(); + SDNode *Use = *N->user_begin(); if (Use->getOpcode() != ISD::BITCAST || Use->getValueType(0).isFloatingPoint()) return SDValue(); @@ -15561,9 +15561,8 @@ PerformExtractEltToVMOVRRD(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) { !isa(Ext.getOperand(1)) || Ext.getConstantOperandVal(1) % 2 != 0) return SDValue(); - if (Ext->use_size() == 1 && - (Ext->use_begin()->getOpcode() == ISD::SINT_TO_FP || - Ext->use_begin()->getOpcode() == ISD::UINT_TO_FP)) + if (Ext->hasOneUse() && (Ext->user_begin()->getOpcode() == ISD::SINT_TO_FP || + Ext->user_begin()->getOpcode() == ISD::UINT_TO_FP)) return SDValue(); SDValue Op0 = Ext.getOperand(0); @@ -15587,11 +15586,11 @@ PerformExtractEltToVMOVRRD(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) { // lanes. SDValue OtherExt(*OtherIt, 0); if (OtherExt.getValueType() != MVT::i32) { - if (OtherExt->use_size() != 1 || - OtherExt->use_begin()->getOpcode() != ISD::BITCAST || - OtherExt->use_begin()->getValueType(0) != MVT::i32) + if (!OtherExt->hasOneUse() || + OtherExt->user_begin()->getOpcode() != ISD::BITCAST || + OtherExt->user_begin()->getValueType(0) != MVT::i32) return SDValue(); - OtherExt = SDValue(*OtherExt->use_begin(), 0); + OtherExt = SDValue(*OtherExt->user_begin(), 0); } // Convert the type to a f64 and extract with a VMOVRRD. @@ -18326,9 +18325,9 @@ static SDValue PerformHWLoopCombine(SDNode *N, SelectionDAG &DAG = DCI.DAG; SDValue Elements = Int.getOperand(2); unsigned IntOp = Int->getConstantOperandVal(1); - assert((N->hasOneUse() && N->use_begin()->getOpcode() == ISD::BR) - && "expected single br user"); - SDNode *Br = *N->use_begin(); + assert((N->hasOneUse() && N->user_begin()->getOpcode() == ISD::BR) && + "expected single br user"); + SDNode *Br = *N->user_begin(); SDValue OtherTarget = Br->getOperand(1); // Update the unconditional branch to branch to the given Dest. @@ -19330,10 +19329,10 @@ bool ARMTargetLowering::isVectorLoadExtDesirable(SDValue ExtVal) const { // If there's more than one user instruction, the loadext is desirable no // matter what. There can be two uses by the same instruction. if (ExtVal->use_empty() || - !ExtVal->use_begin()->isOnlyUserOf(ExtVal.getNode())) + !ExtVal->user_begin()->isOnlyUserOf(ExtVal.getNode())) return true; - SDNode *U = *ExtVal->use_begin(); + SDNode *U = *ExtVal->user_begin(); if ((U->getOpcode() == ISD::ADD || U->getOpcode() == ISD::SUB || U->getOpcode() == ISD::SHL || U->getOpcode() == ARMISD::VSHLIMM)) return false; diff --git a/llvm/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp b/llvm/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp index 2c20db16b055f..2a267e52610b3 100644 --- a/llvm/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp +++ b/llvm/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp @@ -1097,7 +1097,7 @@ static bool isMemOPCandidate(SDNode *I, SDNode *U) { SDValue S1 = U->getOperand(1); SDValue SY = (S0.getNode() == I) ? S1 : S0; - SDNode *UUse = *U->use_begin(); + SDNode *UUse = *U->user_begin(); if (UUse->getNumValues() != 1) return false; @@ -2431,7 +2431,7 @@ void HexagonDAGToDAGISel::rebalanceAddressTrees() { Worklist.push_back(N->getOperand(1).getNode()); // Not a root if it has only one use and same opcode as its parent - if (N->hasOneUse() && Opcode == N->use_begin()->getOpcode()) + if (N->hasOneUse() && Opcode == N->user_begin()->getOpcode()) continue; // This root node has already been processed diff --git a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp index e32ed41c2893c..7f67def73ca2b 100644 --- a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp +++ b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp @@ -5340,7 +5340,7 @@ bool LoongArchTargetLowering::isUsedByReturnOnly(SDNode *N, if (!N->hasNUsesOfValue(1, 0)) return false; - SDNode *Copy = *N->use_begin(); + SDNode *Copy = *N->user_begin(); if (Copy->getOpcode() != ISD::CopyToReg) return false; diff --git a/llvm/lib/Target/PowerPC/PPCISelDAGToDAG.cpp b/llvm/lib/Target/PowerPC/PPCISelDAGToDAG.cpp index 277c1414d7160..5445a0a06bef1 100644 --- a/llvm/lib/Target/PowerPC/PPCISelDAGToDAG.cpp +++ b/llvm/lib/Target/PowerPC/PPCISelDAGToDAG.cpp @@ -6610,7 +6610,7 @@ void PPCDAGToDAGISel::foldBoolExts(SDValue &Res, SDNode *&N) { SDValue ConstFalse = CurDAG->getConstant(0, dl, VT); do { - SDNode *User = *N->use_begin(); + SDNode *User = *N->user_begin(); if (User->getNumOperands() != 2) break; @@ -7564,7 +7564,7 @@ static void reduceVSXSwap(SDNode *N, SelectionDAG *DAG) { while (V->isMachineOpcode() && V->getMachineOpcode() == TargetOpcode::COPY_TO_REGCLASS) { // All values in the chain should have single use. - if (V->use_empty() || !V->use_begin()->isOnlyUserOf(V.getNode())) + if (V->use_empty() || !V->user_begin()->isOnlyUserOf(V.getNode())) return SDValue(); V = V->getOperand(0); } diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp index 199e1f41cfc05..3b3842bb14456 100644 --- a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp +++ b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp @@ -16331,7 +16331,7 @@ SDValue PPCTargetLowering::PerformDAGCombine(SDNode *N, if (!LD->hasNUsesOfValue(2, 0)) return false; - auto UI = LD->use_begin(); + auto UI = LD->user_begin(); while (UI.getUse().getResNo() != 0) ++UI; SDNode *Trunc = *UI++; while (UI.getUse().getResNo() != 0) ++UI; @@ -16349,14 +16349,14 @@ SDValue PPCTargetLowering::PerformDAGCombine(SDNode *N, !RightShift->hasOneUse()) return false; - SDNode *Trunc2 = *RightShift->use_begin(); + SDNode *Trunc2 = *RightShift->user_begin(); if (Trunc2->getOpcode() != ISD::TRUNCATE || Trunc2->getValueType(0) != MVT::i32 || !Trunc2->hasOneUse()) return false; - SDNode *Bitcast = *Trunc->use_begin(); - SDNode *Bitcast2 = *Trunc2->use_begin(); + SDNode *Bitcast = *Trunc->user_begin(); + SDNode *Bitcast2 = *Trunc2->user_begin(); if (Bitcast->getOpcode() != ISD::BITCAST || Bitcast->getValueType(0) != MVT::f32) diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp index 9383e700ade86..f0afd26598d6d 100644 --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -8300,10 +8300,10 @@ SDValue RISCVTargetLowering::lowerSELECT(SDValue Op, SelectionDAG &DAG) const { return V; if (Op.hasOneUse()) { - unsigned UseOpc = Op->use_begin()->getOpcode(); + unsigned UseOpc = Op->user_begin()->getOpcode(); if (isBinOp(UseOpc) && DAG.isSafeToSpeculativelyExecute(UseOpc)) { - SDNode *BinOp = *Op->use_begin(); - if (SDValue NewSel = foldBinOpIntoSelectIfProfitable(*Op->use_begin(), + SDNode *BinOp = *Op->user_begin(); + if (SDValue NewSel = foldBinOpIntoSelectIfProfitable(*Op->user_begin(), DAG, Subtarget)) { DAG.ReplaceAllUsesWith(BinOp, &NewSel); // Opcode check is necessary because foldBinOpIntoSelectIfProfitable @@ -20492,7 +20492,7 @@ bool RISCVTargetLowering::isUsedByReturnOnly(SDNode *N, SDValue &Chain) const { if (!N->hasNUsesOfValue(1, 0)) return false; - SDNode *Copy = *N->use_begin(); + SDNode *Copy = *N->user_begin(); if (Copy->getOpcode() == ISD::BITCAST) { return isUsedByReturnOnly(Copy, Chain); diff --git a/llvm/lib/Target/SystemZ/SystemZISelDAGToDAG.cpp b/llvm/lib/Target/SystemZ/SystemZISelDAGToDAG.cpp index 210e3c5426f46..884d3a0614a8e 100644 --- a/llvm/lib/Target/SystemZ/SystemZISelDAGToDAG.cpp +++ b/llvm/lib/Target/SystemZ/SystemZISelDAGToDAG.cpp @@ -1886,7 +1886,7 @@ SystemZDAGToDAGISel::IsProfitableToFold(SDValue N, SDNode *U, // physical CC register, which in turn is glued and chained to the // actual instruction that uses the CC value. Bail out if we have // anything else than that. - SDNode *CCUser = *U->use_begin(); + SDNode *CCUser = *U->user_begin(); SDNode *CCRegUser = nullptr; if (CCUser->getOpcode() == ISD::CopyToReg || cast(CCUser->getOperand(1))->getReg() == SystemZ::CC) { diff --git a/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp b/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp index 47008af3479ee..331d3a4d494c9 100644 --- a/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp +++ b/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp @@ -7117,7 +7117,7 @@ static bool isI128MovedToParts(LoadSDNode *LD, SDNode *&LoPart, if (User->getOpcode() == ISD::SRL && User->getOperand(1).getOpcode() == ISD::Constant && User->getConstantOperandVal(1) == 64 && User->hasOneUse()) { - User = *User->use_begin(); + User = *User->user_begin(); IsLoPart = false; } if (User->getOpcode() != ISD::TRUNCATE || User->getValueType(0) != MVT::i64) @@ -7674,7 +7674,7 @@ SDValue SystemZTargetLowering::combineFP_ROUND( U->getOperand(0) == Vec && U->getOperand(1).getOpcode() == ISD::Constant && U->getConstantOperandVal(1) == 1) { - SDValue OtherRound = SDValue(*U->use_begin(), 0); + SDValue OtherRound = SDValue(*U->user_begin(), 0); if (OtherRound.getOpcode() == N->getOpcode() && OtherRound.getOperand(OpNo) == SDValue(U, 0) && OtherRound.getValueType() == MVT::f32) { @@ -7738,7 +7738,7 @@ SDValue SystemZTargetLowering::combineFP_EXTEND( U->getOperand(0) == Vec && U->getOperand(1).getOpcode() == ISD::Constant && U->getConstantOperandVal(1) == 2) { - SDValue OtherExtend = SDValue(*U->use_begin(), 0); + SDValue OtherExtend = SDValue(*U->user_begin(), 0); if (OtherExtend.getOpcode() == N->getOpcode() && OtherExtend.getOperand(OpNo) == SDValue(U, 0) && OtherExtend.getValueType() == MVT::f64) { diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp index 4bd65dc6ade40..3d8af69380125 100644 --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -2766,12 +2766,12 @@ bool X86::mayFoldLoadIntoBroadcastFromMem(SDValue Op, MVT EltVT, } bool X86::mayFoldIntoStore(SDValue Op) { - return Op.hasOneUse() && ISD::isNormalStore(*Op.getNode()->use_begin()); + return Op.hasOneUse() && ISD::isNormalStore(*Op.getNode()->user_begin()); } bool X86::mayFoldIntoZeroExtend(SDValue Op) { if (Op.hasOneUse()) { - unsigned Opcode = Op.getNode()->use_begin()->getOpcode(); + unsigned Opcode = Op.getNode()->user_begin()->getOpcode(); return (ISD::ZERO_EXTEND == Opcode); } return false; @@ -3215,7 +3215,7 @@ bool X86TargetLowering::shouldReduceLoadWidth(SDNode *Load, // If this use is not an extract + store, it's probably worth splitting. if (UI->getOpcode() != ISD::EXTRACT_SUBVECTOR || !UI->hasOneUse() || - UI->use_begin()->getOpcode() != ISD::STORE) + UI->user_begin()->getOpcode() != ISD::STORE) return true; } // All non-chain uses are extract + store. @@ -18212,7 +18212,7 @@ static SDValue LowerEXTRACT_VECTOR_ELT_SSE4(SDValue Op, SelectionDAG &DAG) { // because a MOVSSmr can be used instead, which is smaller and faster. if (!Op.hasOneUse()) return SDValue(); - SDNode *User = *Op.getNode()->use_begin(); + SDNode *User = *Op.getNode()->user_begin(); if ((User->getOpcode() != ISD::STORE || isNullConstant(Idx)) && (User->getOpcode() != ISD::BITCAST || User->getValueType(0) != MVT::i32)) @@ -22873,8 +22873,8 @@ static bool hasNonFlagsUse(SDValue Op) { unsigned UOpNo = UI.getOperandNo(); if (User->getOpcode() == ISD::TRUNCATE && User->hasOneUse()) { // Look pass truncate. - UOpNo = User->use_begin().getOperandNo(); - User = *User->use_begin(); + UOpNo = User->user_begin().getOperandNo(); + User = *User->user_begin(); } if (User->getOpcode() != ISD::BRCOND && User->getOpcode() != ISD::SETCC && @@ -25265,7 +25265,7 @@ SDValue X86TargetLowering::LowerBRCOND(SDValue Op, SelectionDAG &DAG) const { // have a fall-through edge, because this requires an explicit // jmp when the condition is false. if (Op.getNode()->hasOneUse()) { - SDNode *User = *Op.getNode()->use_begin(); + SDNode *User = *Op.getNode()->user_begin(); // Look for an unconditional branch following this conditional branch. // We need this because we need to reverse the successors in order // to implement FCMP_OEQ. @@ -39423,8 +39423,8 @@ static SDValue combineX86ShuffleChain(ArrayRef Inputs, SDValue Root, // from being reused. bool IsMaskedShuffle = false; if (RootSizeInBits == 512 || (Subtarget.hasVLX() && RootSizeInBits >= 128)) { - if (Root.hasOneUse() && Root->use_begin()->getOpcode() == ISD::VSELECT && - Root->use_begin()->getOperand(0).getScalarValueSizeInBits() == 1) { + if (Root.hasOneUse() && Root->user_begin()->getOpcode() == ISD::VSELECT && + Root->user_begin()->getOperand(0).getScalarValueSizeInBits() == 1) { IsMaskedShuffle = true; } } @@ -48982,7 +48982,7 @@ static SDValue combineMul(SDNode *N, SelectionDAG &DAG, (SignMulAmt >= 0 && (MulAmt2 == 3 || MulAmt2 == 5 || MulAmt2 == 9)))) { if (isPowerOf2_64(MulAmt2) && !(SignMulAmt >= 0 && N->hasOneUse() && - N->use_begin()->getOpcode() == ISD::ADD)) + N->user_begin()->getOpcode() == ISD::ADD)) // If second multiplifer is pow2, issue it first. We want the multiply // by 3, 5, or 9 to be folded into the addressing mode unless the lone // use is an add. Only do this for positive multiply amounts since the @@ -50765,7 +50765,7 @@ static SDValue combineX86SubCmpForFlags(SDNode *N, SDValue Flag, return SDValue(); // Check the only user of flag is `brcond ne`. - SDNode *BrCond = *Flag->use_begin(); + SDNode *BrCond = *Flag->user_begin(); if (BrCond->getOpcode() != X86ISD::BRCOND) return SDValue(); unsigned CondNo = 2; @@ -53176,9 +53176,9 @@ static SDValue combineToHorizontalAddSub(SDNode *N, SelectionDAG &DAG, auto MergableHorizOp = [N](unsigned HorizOpcode) { return N->hasOneUse() && - N->use_begin()->getOpcode() == ISD::VECTOR_SHUFFLE && - (N->use_begin()->getOperand(0).getOpcode() == HorizOpcode || - N->use_begin()->getOperand(1).getOpcode() == HorizOpcode); + N->user_begin()->getOpcode() == ISD::VECTOR_SHUFFLE && + (N->user_begin()->getOperand(0).getOpcode() == HorizOpcode || + N->user_begin()->getOperand(1).getOpcode() == HorizOpcode); }; switch (Opcode) { @@ -56422,7 +56422,7 @@ static SDValue combineX86AddSub(SDNode *N, SelectionDAG &DAG, if (Negate) { // Bail if this is only used by a user of the x86 add/sub. if (GenericAddSub->hasOneUse() && - GenericAddSub->use_begin()->isOnlyUserOf(N)) + GenericAddSub->user_begin()->isOnlyUserOf(N)) return; Op = DAG.getNegative(Op, DL, VT); } @@ -59419,7 +59419,7 @@ bool X86TargetLowering::IsDesirableToPromoteOp(SDValue Op, EVT &PVT) const { auto IsFoldableRMW = [](SDValue Load, SDValue Op) { if (!Op.hasOneUse()) return false; - SDNode *User = *Op->use_begin(); + SDNode *User = *Op->user_begin(); if (!ISD::isNormalStore(User)) return false; auto *Ld = cast(Load); @@ -59432,7 +59432,7 @@ bool X86TargetLowering::IsDesirableToPromoteOp(SDValue Op, EVT &PVT) const { return false; if (!Op.hasOneUse()) return false; - SDNode *User = *Op->use_begin(); + SDNode *User = *Op->user_begin(); if (User->getOpcode() != ISD::ATOMIC_STORE) return false; auto *Ld = cast(Load); @@ -59443,7 +59443,7 @@ bool X86TargetLowering::IsDesirableToPromoteOp(SDValue Op, EVT &PVT) const { auto IsFoldableZext = [](SDValue Op) { if (!Op.hasOneUse()) return false; - SDNode *User = *Op->use_begin(); + SDNode *User = *Op->user_begin(); EVT VT = User->getValueType(0); return (User->getOpcode() == ISD::ZERO_EXTEND && (VT == MVT::i32 || VT == MVT::i64)); diff --git a/llvm/lib/Target/X86/X86ISelLoweringCall.cpp b/llvm/lib/Target/X86/X86ISelLoweringCall.cpp index df12ea2f79df5..b1c1ab4aa855d 100644 --- a/llvm/lib/Target/X86/X86ISelLoweringCall.cpp +++ b/llvm/lib/Target/X86/X86ISelLoweringCall.cpp @@ -944,7 +944,7 @@ bool X86TargetLowering::isUsedByReturnOnly(SDNode *N, SDValue &Chain) const { return false; SDValue TCChain = Chain; - SDNode *Copy = *N->use_begin(); + SDNode *Copy = *N->user_begin(); if (Copy->getOpcode() == ISD::CopyToReg) { // If the copy has a glue operand, we conservatively assume it isn't safe to // perform a tail call.