diff --git a/llvm/include/llvm/ADT/TinyPtrVector.h b/llvm/include/llvm/ADT/TinyPtrVector.h index fa2bcd8933a0a..501500793509a 100644 --- a/llvm/include/llvm/ADT/TinyPtrVector.h +++ b/llvm/include/llvm/ADT/TinyPtrVector.h @@ -135,7 +135,7 @@ class TinyPtrVector { // implicit conversion operator to ArrayRef. operator ArrayRef() const { if (Val.isNull()) - return std::nullopt; + return {}; if (isa(Val)) return *Val.getAddrOfPtr1(); return *cast(Val); @@ -144,7 +144,7 @@ class TinyPtrVector { // implicit conversion operator to MutableArrayRef. operator MutableArrayRef() { if (Val.isNull()) - return std::nullopt; + return {}; if (isa(Val)) return *Val.getAddrOfPtr1(); return *cast(Val); diff --git a/llvm/include/llvm/Analysis/ScalarEvolution.h b/llvm/include/llvm/Analysis/ScalarEvolution.h index 89f9395959779..44fb249d584d8 100644 --- a/llvm/include/llvm/Analysis/ScalarEvolution.h +++ b/llvm/include/llvm/Analysis/ScalarEvolution.h @@ -1130,11 +1130,10 @@ class ScalarEvolution { /// as arguments and asserts enforce that internally. /*implicit*/ ExitLimit(const SCEV *E); - ExitLimit( - const SCEV *E, const SCEV *ConstantMaxNotTaken, - const SCEV *SymbolicMaxNotTaken, bool MaxOrZero, - ArrayRef *> PredSetList = - std::nullopt); + ExitLimit(const SCEV *E, const SCEV *ConstantMaxNotTaken, + const SCEV *SymbolicMaxNotTaken, bool MaxOrZero, + ArrayRef *> + PredSetList = {}); ExitLimit(const SCEV *E, const SCEV *ConstantMaxNotTaken, const SCEV *SymbolicMaxNotTaken, bool MaxOrZero, diff --git a/llvm/include/llvm/Analysis/TargetTransformInfo.h b/llvm/include/llvm/Analysis/TargetTransformInfo.h index 3411163549de2..cd69a8a371b6e 100644 --- a/llvm/include/llvm/Analysis/TargetTransformInfo.h +++ b/llvm/include/llvm/Analysis/TargetTransformInfo.h @@ -1279,8 +1279,7 @@ class TargetTransformInfo { TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput, TTI::OperandValueInfo Opd1Info = {TTI::OK_AnyValue, TTI::OP_None}, TTI::OperandValueInfo Opd2Info = {TTI::OK_AnyValue, TTI::OP_None}, - ArrayRef Args = std::nullopt, - const Instruction *CxtI = nullptr, + ArrayRef Args = {}, const Instruction *CxtI = nullptr, const TargetLibraryInfo *TLibInfo = nullptr) const; /// Returns the cost estimation for alternating opcode pattern that can be @@ -1303,11 +1302,12 @@ class TargetTransformInfo { /// passed through \p Args, which helps improve the cost estimation in some /// cases, like in broadcast loads. /// NOTE: For subvector extractions Tp represents the source type. - InstructionCost getShuffleCost( - ShuffleKind Kind, VectorType *Tp, ArrayRef Mask = std::nullopt, - TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput, int Index = 0, - VectorType *SubTp = nullptr, ArrayRef Args = std::nullopt, - const Instruction *CxtI = nullptr) const; + InstructionCost + getShuffleCost(ShuffleKind Kind, VectorType *Tp, ArrayRef Mask = {}, + TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput, + int Index = 0, VectorType *SubTp = nullptr, + ArrayRef Args = {}, + const Instruction *CxtI = nullptr) const; /// Represents a hint about the context in which a cast is used. /// diff --git a/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h b/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h index 2819af30cd170..c592bc8f6ba2a 100644 --- a/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h +++ b/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h @@ -605,7 +605,7 @@ class TargetTransformInfoImplBase { ArrayRef Mask, TTI::TargetCostKind CostKind, int Index, VectorType *SubTp, - ArrayRef Args = std::nullopt, + ArrayRef Args = {}, const Instruction *CxtI = nullptr) const { return 1; } @@ -1176,7 +1176,7 @@ class TargetTransformInfoImplCRTPBase : public TargetTransformInfoImplBase { Cost += static_cast(this)->getArithmeticInstrCost( Instruction::Add, GEP->getType(), CostKind, {TTI::OK_AnyValue, TTI::OP_None}, {TTI::OK_AnyValue, TTI::OP_None}, - std::nullopt); + {}); } else { SmallVector Indices(GEP->indices()); Cost += static_cast(this)->getGEPCost(GEP->getSourceElementType(), diff --git a/llvm/include/llvm/CodeGen/BasicTTIImpl.h b/llvm/include/llvm/CodeGen/BasicTTIImpl.h index 2f2a6a09ffc44..7198e134a2d26 100644 --- a/llvm/include/llvm/CodeGen/BasicTTIImpl.h +++ b/llvm/include/llvm/CodeGen/BasicTTIImpl.h @@ -897,8 +897,7 @@ class BasicTTIImplBase : public TargetTransformInfoImplCRTPBase { unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Opd1Info = {TTI::OK_AnyValue, TTI::OP_None}, TTI::OperandValueInfo Opd2Info = {TTI::OK_AnyValue, TTI::OP_None}, - ArrayRef Args = std::nullopt, - const Instruction *CxtI = nullptr) { + ArrayRef Args = {}, const Instruction *CxtI = nullptr) { // Check if any of the operands are vector operands. const TargetLoweringBase *TLI = getTLI(); int ISD = TLI->InstructionOpcodeToISD(Opcode); @@ -1023,7 +1022,7 @@ class BasicTTIImplBase : public TargetTransformInfoImplCRTPBase { ArrayRef Mask, TTI::TargetCostKind CostKind, int Index, VectorType *SubTp, - ArrayRef Args = std::nullopt, + ArrayRef Args = {}, const Instruction *CxtI = nullptr) { switch (improveShuffleKindFromMask(Kind, Mask, Tp, Index, SubTp)) { case TTI::SK_Broadcast: @@ -1657,9 +1656,9 @@ class BasicTTIImplBase : public TargetTransformInfoImplCRTPBase { if (isa(RetTy)) return BaseT::getIntrinsicInstrCost(ICA, CostKind); unsigned Index = cast(Args[1])->getZExtValue(); - return thisT()->getShuffleCost( - TTI::SK_ExtractSubvector, cast(Args[0]->getType()), - std::nullopt, CostKind, Index, cast(RetTy)); + return thisT()->getShuffleCost(TTI::SK_ExtractSubvector, + cast(Args[0]->getType()), {}, + CostKind, Index, cast(RetTy)); } case Intrinsic::vector_insert: { // FIXME: Handle case where a scalable vector is inserted into a scalable @@ -1668,19 +1667,19 @@ class BasicTTIImplBase : public TargetTransformInfoImplCRTPBase { return BaseT::getIntrinsicInstrCost(ICA, CostKind); unsigned Index = cast(Args[2])->getZExtValue(); return thisT()->getShuffleCost( - TTI::SK_InsertSubvector, cast(Args[0]->getType()), - std::nullopt, CostKind, Index, cast(Args[1]->getType())); + TTI::SK_InsertSubvector, cast(Args[0]->getType()), {}, + CostKind, Index, cast(Args[1]->getType())); } case Intrinsic::vector_reverse: { - return thisT()->getShuffleCost( - TTI::SK_Reverse, cast(Args[0]->getType()), std::nullopt, - CostKind, 0, cast(RetTy)); + return thisT()->getShuffleCost(TTI::SK_Reverse, + cast(Args[0]->getType()), {}, + CostKind, 0, cast(RetTy)); } case Intrinsic::vector_splice: { unsigned Index = cast(Args[2])->getZExtValue(); - return thisT()->getShuffleCost( - TTI::SK_Splice, cast(Args[0]->getType()), std::nullopt, - CostKind, Index, cast(RetTy)); + return thisT()->getShuffleCost(TTI::SK_Splice, + cast(Args[0]->getType()), {}, + CostKind, Index, cast(RetTy)); } case Intrinsic::vector_reduce_add: case Intrinsic::vector_reduce_mul: @@ -2600,9 +2599,8 @@ class BasicTTIImplBase : public TargetTransformInfoImplCRTPBase { while (NumVecElts > MVTLen) { NumVecElts /= 2; VectorType *SubTy = FixedVectorType::get(ScalarTy, NumVecElts); - ShuffleCost += - thisT()->getShuffleCost(TTI::SK_ExtractSubvector, Ty, std::nullopt, - CostKind, NumVecElts, SubTy); + ShuffleCost += thisT()->getShuffleCost(TTI::SK_ExtractSubvector, Ty, {}, + CostKind, NumVecElts, SubTy); ArithCost += thisT()->getArithmeticInstrCost(Opcode, SubTy, CostKind); Ty = SubTy; ++LongVectorCount; @@ -2618,7 +2616,7 @@ class BasicTTIImplBase : public TargetTransformInfoImplCRTPBase { // By default reductions need one shuffle per reduction level. ShuffleCost += NumReduxLevels * thisT()->getShuffleCost(TTI::SK_PermuteSingleSrc, Ty, - std::nullopt, CostKind, 0, Ty); + {}, CostKind, 0, Ty); ArithCost += NumReduxLevels * thisT()->getArithmeticInstrCost(Opcode, Ty, CostKind); return ShuffleCost + ArithCost + @@ -2691,9 +2689,8 @@ class BasicTTIImplBase : public TargetTransformInfoImplCRTPBase { NumVecElts /= 2; auto *SubTy = FixedVectorType::get(ScalarTy, NumVecElts); - ShuffleCost += - thisT()->getShuffleCost(TTI::SK_ExtractSubvector, Ty, std::nullopt, - CostKind, NumVecElts, SubTy); + ShuffleCost += thisT()->getShuffleCost(TTI::SK_ExtractSubvector, Ty, {}, + CostKind, NumVecElts, SubTy); IntrinsicCostAttributes Attrs(IID, SubTy, {SubTy, SubTy}, FMF); MinMaxCost += getIntrinsicInstrCost(Attrs, CostKind); @@ -2709,7 +2706,7 @@ class BasicTTIImplBase : public TargetTransformInfoImplCRTPBase { // architecture-dependent length. ShuffleCost += NumReduxLevels * thisT()->getShuffleCost(TTI::SK_PermuteSingleSrc, Ty, - std::nullopt, CostKind, 0, Ty); + {}, CostKind, 0, Ty); IntrinsicCostAttributes Attrs(IID, Ty, {Ty, Ty}, FMF); MinMaxCost += NumReduxLevels * getIntrinsicInstrCost(Attrs, CostKind); // The last min/max should be in vector registers and we counted it above. diff --git a/llvm/include/llvm/CodeGen/GlobalISel/CallLowering.h b/llvm/include/llvm/CodeGen/GlobalISel/CallLowering.h index 6f79313c41862..1f969788d2088 100644 --- a/llvm/include/llvm/CodeGen/GlobalISel/CallLowering.h +++ b/llvm/include/llvm/CodeGen/GlobalISel/CallLowering.h @@ -409,20 +409,21 @@ class CallLowering { /// \p Handler to move them to the assigned locations. /// /// \return True if everything has succeeded, false otherwise. - bool determineAndHandleAssignments( - ValueHandler &Handler, ValueAssigner &Assigner, - SmallVectorImpl &Args, MachineIRBuilder &MIRBuilder, - CallingConv::ID CallConv, bool IsVarArg, - ArrayRef ThisReturnRegs = std::nullopt) const; + bool + determineAndHandleAssignments(ValueHandler &Handler, ValueAssigner &Assigner, + SmallVectorImpl &Args, + MachineIRBuilder &MIRBuilder, + CallingConv::ID CallConv, bool IsVarArg, + ArrayRef ThisReturnRegs = {}) const; /// Use \p Handler to insert code to handle the argument/return values /// represented by \p Args. It's expected determineAssignments previously /// processed these arguments to populate \p CCState and \p ArgLocs. - bool - handleAssignments(ValueHandler &Handler, SmallVectorImpl &Args, - CCState &CCState, SmallVectorImpl &ArgLocs, - MachineIRBuilder &MIRBuilder, - ArrayRef ThisReturnRegs = std::nullopt) const; + bool handleAssignments(ValueHandler &Handler, SmallVectorImpl &Args, + CCState &CCState, + SmallVectorImpl &ArgLocs, + MachineIRBuilder &MIRBuilder, + ArrayRef ThisReturnRegs = {}) const; /// Check whether parameters to a call that are passed in callee saved /// registers are the same as from the calling function. This needs to be diff --git a/llvm/include/llvm/CodeGen/LiveRangeEdit.h b/llvm/include/llvm/CodeGen/LiveRangeEdit.h index 0950c20325fb0..3a4a76d99c5c2 100644 --- a/llvm/include/llvm/CodeGen/LiveRangeEdit.h +++ b/llvm/include/llvm/CodeGen/LiveRangeEdit.h @@ -237,7 +237,7 @@ class LiveRangeEdit : private MachineRegisterInfo::Delegate { /// allocator. These registers should not be split into new intervals /// as currently those new intervals are not guaranteed to spill. void eliminateDeadDefs(SmallVectorImpl &Dead, - ArrayRef RegsBeingSpilled = std::nullopt); + ArrayRef RegsBeingSpilled = {}); /// calculateRegClassAndHint - Recompute register class and hint for each new /// register. diff --git a/llvm/include/llvm/CodeGen/MachineTraceMetrics.h b/llvm/include/llvm/CodeGen/MachineTraceMetrics.h index a5e78d47724d8..c7d97597d551c 100644 --- a/llvm/include/llvm/CodeGen/MachineTraceMetrics.h +++ b/llvm/include/llvm/CodeGen/MachineTraceMetrics.h @@ -295,9 +295,9 @@ class MachineTraceMetrics : public MachineFunctionPass { /// classes are included. For the caller to account for extra machine /// instructions, it must first resolve each instruction's scheduling class. unsigned getResourceLength( - ArrayRef Extrablocks = std::nullopt, - ArrayRef ExtraInstrs = std::nullopt, - ArrayRef RemoveInstrs = std::nullopt) const; + ArrayRef Extrablocks = {}, + ArrayRef ExtraInstrs = {}, + ArrayRef RemoveInstrs = {}) const; /// Return the length of the (data dependency) critical path through the /// trace. diff --git a/llvm/include/llvm/CodeGen/TargetInstrInfo.h b/llvm/include/llvm/CodeGen/TargetInstrInfo.h index a3bfc63f2a479..c944a96aee61a 100644 --- a/llvm/include/llvm/CodeGen/TargetInstrInfo.h +++ b/llvm/include/llvm/CodeGen/TargetInstrInfo.h @@ -2007,7 +2007,7 @@ class TargetInstrInfo : public MCInstrInfo { /// defined by this method. virtual ArrayRef> getSerializableTargetIndices() const { - return std::nullopt; + return {}; } /// Decompose the machine operand's target flags into two values - the direct @@ -2024,7 +2024,7 @@ class TargetInstrInfo : public MCInstrInfo { /// defined by this method. virtual ArrayRef> getSerializableDirectMachineOperandTargetFlags() const { - return std::nullopt; + return {}; } /// Return an array that contains the bitmask target flag values and their @@ -2034,7 +2034,7 @@ class TargetInstrInfo : public MCInstrInfo { /// defined by this method. virtual ArrayRef> getSerializableBitmaskMachineOperandTargetFlags() const { - return std::nullopt; + return {}; } /// Return an array that contains the MMO target flag values and their @@ -2044,7 +2044,7 @@ class TargetInstrInfo : public MCInstrInfo { /// defined by this method. virtual ArrayRef> getSerializableMachineMemOperandTargetFlags() const { - return std::nullopt; + return {}; } /// Determines whether \p Inst is a tail call instruction. Override this diff --git a/llvm/include/llvm/IR/DIBuilder.h b/llvm/include/llvm/IR/DIBuilder.h index 97ea38f041baa..a5655f630fc4f 100644 --- a/llvm/include/llvm/IR/DIBuilder.h +++ b/llvm/include/llvm/IR/DIBuilder.h @@ -772,7 +772,7 @@ namespace llvm { /// Create a new descriptor for the specified /// variable which has a complex address expression for its address. /// \param Addr An array of complex address operations. - DIExpression *createExpression(ArrayRef Addr = std::nullopt); + DIExpression *createExpression(ArrayRef Addr = {}); /// Create an expression for a variable that does not have an address, but /// does have a constant value. diff --git a/llvm/include/llvm/IR/DebugInfoMetadata.h b/llvm/include/llvm/IR/DebugInfoMetadata.h index e8fdc0bacc663..d2b4e900438d3 100644 --- a/llvm/include/llvm/IR/DebugInfoMetadata.h +++ b/llvm/include/llvm/IR/DebugInfoMetadata.h @@ -138,7 +138,7 @@ class DINode : public MDNode { protected: DINode(LLVMContext &C, unsigned ID, StorageType Storage, unsigned Tag, - ArrayRef Ops1, ArrayRef Ops2 = std::nullopt) + ArrayRef Ops1, ArrayRef Ops2 = {}) : MDNode(C, ID, Storage, Ops1, Ops2) { assert(Tag < 1u << 16); SubclassData16 = Tag; @@ -311,7 +311,7 @@ class DIAssignID : public MDNode { friend class MDNode; DIAssignID(LLVMContext &C, StorageType Storage) - : MDNode(C, DIAssignIDKind, Storage, std::nullopt) {} + : MDNode(C, DIAssignIDKind, Storage, {}) {} ~DIAssignID() { dropAllReferences(); } @@ -2730,7 +2730,7 @@ class DIExpression : public MDNode { std::vector Elements; DIExpression(LLVMContext &C, StorageType Storage, ArrayRef Elements) - : MDNode(C, DIExpressionKind, Storage, std::nullopt), + : MDNode(C, DIExpressionKind, Storage, {}), Elements(Elements.begin(), Elements.end()) {} ~DIExpression() = default; @@ -3776,8 +3776,7 @@ class DIMacroNode : public MDNode { protected: DIMacroNode(LLVMContext &C, unsigned ID, StorageType Storage, unsigned MIType, - ArrayRef Ops1, - ArrayRef Ops2 = std::nullopt) + ArrayRef Ops1, ArrayRef Ops2 = {}) : MDNode(C, ID, Storage, Ops1, Ops2) { assert(MIType < 1u << 16); SubclassData16 = MIType; diff --git a/llvm/include/llvm/IR/DerivedTypes.h b/llvm/include/llvm/IR/DerivedTypes.h index d31654ac131d2..975c142f1a457 100644 --- a/llvm/include/llvm/IR/DerivedTypes.h +++ b/llvm/include/llvm/IR/DerivedTypes.h @@ -733,16 +733,16 @@ class TargetExtType : public Type { /// Return a target extension type having the specified name and optional /// type and integer parameters. static TargetExtType *get(LLVMContext &Context, StringRef Name, - ArrayRef Types = std::nullopt, - ArrayRef Ints = std::nullopt); + ArrayRef Types = {}, + ArrayRef Ints = {}); /// Return a target extension type having the specified name and optional /// type and integer parameters, or an appropriate Error if it fails the /// parameters check. - static Expected - getOrError(LLVMContext &Context, StringRef Name, - ArrayRef Types = std::nullopt, - ArrayRef Ints = std::nullopt); + static Expected getOrError(LLVMContext &Context, + StringRef Name, + ArrayRef Types = {}, + ArrayRef Ints = {}); /// Check that a newly created target extension type has the expected number /// of type parameters and integer parameters, returning the type itself if OK diff --git a/llvm/include/llvm/IR/IRBuilder.h b/llvm/include/llvm/IR/IRBuilder.h index 4193fcd125401..8f83dede4a0cb 100644 --- a/llvm/include/llvm/IR/IRBuilder.h +++ b/llvm/include/llvm/IR/IRBuilder.h @@ -221,7 +221,7 @@ class IRBuilderBase { /// Set nosanitize metadata. void SetNoSanitizeMetadata() { AddOrRemoveMetadataToCopy(llvm::LLVMContext::MD_nosanitize, - llvm::MDNode::get(getContext(), std::nullopt)); + llvm::MDNode::get(getContext(), {})); } /// Collect metadata with IDs \p MetadataKinds from \p Src which should be @@ -638,8 +638,7 @@ class IRBuilderBase { Value *ArraySize, Function *MallocF = nullptr, const Twine &Name = ""); /// Generate the IR for a call to the builtin free function. - CallInst *CreateFree(Value *Source, - ArrayRef Bundles = std::nullopt); + CallInst *CreateFree(Value *Source, ArrayRef Bundles = {}); CallInst *CreateElementUnorderedAtomicMemSet(Value *Ptr, Value *Val, Value *Size, Align Alignment, @@ -852,9 +851,8 @@ class IRBuilderBase { /// /// The optional argument \p OpBundles specifies operand bundles that are /// added to the call instruction. - CallInst * - CreateAssumption(Value *Cond, - ArrayRef OpBundles = std::nullopt); + CallInst *CreateAssumption(Value *Cond, + ArrayRef OpBundles = {}); /// Create a llvm.experimental.noalias.scope.decl intrinsic call. Instruction *CreateNoAliasScopeDeclaration(Value *Scope); @@ -1194,7 +1192,7 @@ class IRBuilderBase { } InvokeInst *CreateInvoke(FunctionType *Ty, Value *Callee, BasicBlock *NormalDest, BasicBlock *UnwindDest, - ArrayRef Args = std::nullopt, + ArrayRef Args = {}, const Twine &Name = "") { InvokeInst *II = InvokeInst::Create(Ty, Callee, NormalDest, UnwindDest, Args); @@ -1212,8 +1210,7 @@ class IRBuilderBase { } InvokeInst *CreateInvoke(FunctionCallee Callee, BasicBlock *NormalDest, - BasicBlock *UnwindDest, - ArrayRef Args = std::nullopt, + BasicBlock *UnwindDest, ArrayRef Args = {}, const Twine &Name = "") { return CreateInvoke(Callee.getFunctionType(), Callee.getCallee(), NormalDest, UnwindDest, Args, Name); @@ -1223,7 +1220,7 @@ class IRBuilderBase { CallBrInst *CreateCallBr(FunctionType *Ty, Value *Callee, BasicBlock *DefaultDest, ArrayRef IndirectDests, - ArrayRef Args = std::nullopt, + ArrayRef Args = {}, const Twine &Name = "") { return Insert(CallBrInst::Create(Ty, Callee, DefaultDest, IndirectDests, Args), Name); @@ -1241,7 +1238,7 @@ class IRBuilderBase { CallBrInst *CreateCallBr(FunctionCallee Callee, BasicBlock *DefaultDest, ArrayRef IndirectDests, - ArrayRef Args = std::nullopt, + ArrayRef Args = {}, const Twine &Name = "") { return CreateCallBr(Callee.getFunctionType(), Callee.getCallee(), DefaultDest, IndirectDests, Args, Name); @@ -1277,7 +1274,7 @@ class IRBuilderBase { } CleanupPadInst *CreateCleanupPad(Value *ParentPad, - ArrayRef Args = std::nullopt, + ArrayRef Args = {}, const Twine &Name = "") { return Insert(CleanupPadInst::Create(ParentPad, Args), Name); } @@ -2439,8 +2436,8 @@ class IRBuilderBase { public: CallInst *CreateCall(FunctionType *FTy, Value *Callee, - ArrayRef Args = std::nullopt, - const Twine &Name = "", MDNode *FPMathTag = nullptr) { + ArrayRef Args = {}, const Twine &Name = "", + MDNode *FPMathTag = nullptr) { CallInst *CI = CallInst::Create(FTy, Callee, Args, DefaultOperandBundles); if (IsFPConstrained) setConstrainedFPCallAttr(CI); @@ -2460,8 +2457,7 @@ class IRBuilderBase { return Insert(CI, Name); } - CallInst *CreateCall(FunctionCallee Callee, - ArrayRef Args = std::nullopt, + CallInst *CreateCall(FunctionCallee Callee, ArrayRef Args = {}, const Twine &Name = "", MDNode *FPMathTag = nullptr) { return CreateCall(Callee.getFunctionType(), Callee.getCallee(), Args, Name, FPMathTag); @@ -2700,17 +2696,17 @@ class IRBuilder : public IRBuilderBase { public: IRBuilder(LLVMContext &C, FolderTy Folder, InserterTy Inserter = InserterTy(), MDNode *FPMathTag = nullptr, - ArrayRef OpBundles = std::nullopt) + ArrayRef OpBundles = {}) : IRBuilderBase(C, this->Folder, this->Inserter, FPMathTag, OpBundles), Folder(Folder), Inserter(Inserter) {} explicit IRBuilder(LLVMContext &C, MDNode *FPMathTag = nullptr, - ArrayRef OpBundles = std::nullopt) + ArrayRef OpBundles = {}) : IRBuilderBase(C, this->Folder, this->Inserter, FPMathTag, OpBundles) {} explicit IRBuilder(BasicBlock *TheBB, FolderTy Folder, MDNode *FPMathTag = nullptr, - ArrayRef OpBundles = std::nullopt) + ArrayRef OpBundles = {}) : IRBuilderBase(TheBB->getContext(), this->Folder, this->Inserter, FPMathTag, OpBundles), Folder(Folder) { @@ -2718,14 +2714,14 @@ class IRBuilder : public IRBuilderBase { } explicit IRBuilder(BasicBlock *TheBB, MDNode *FPMathTag = nullptr, - ArrayRef OpBundles = std::nullopt) + ArrayRef OpBundles = {}) : IRBuilderBase(TheBB->getContext(), this->Folder, this->Inserter, FPMathTag, OpBundles) { SetInsertPoint(TheBB); } explicit IRBuilder(Instruction *IP, MDNode *FPMathTag = nullptr, - ArrayRef OpBundles = std::nullopt) + ArrayRef OpBundles = {}) : IRBuilderBase(IP->getContext(), this->Folder, this->Inserter, FPMathTag, OpBundles) { SetInsertPoint(IP); @@ -2733,7 +2729,7 @@ class IRBuilder : public IRBuilderBase { IRBuilder(BasicBlock *TheBB, BasicBlock::iterator IP, FolderTy Folder, MDNode *FPMathTag = nullptr, - ArrayRef OpBundles = std::nullopt) + ArrayRef OpBundles = {}) : IRBuilderBase(TheBB->getContext(), this->Folder, this->Inserter, FPMathTag, OpBundles), Folder(Folder) { @@ -2742,7 +2738,7 @@ class IRBuilder : public IRBuilderBase { IRBuilder(BasicBlock *TheBB, BasicBlock::iterator IP, MDNode *FPMathTag = nullptr, - ArrayRef OpBundles = std::nullopt) + ArrayRef OpBundles = {}) : IRBuilderBase(TheBB->getContext(), this->Folder, this->Inserter, FPMathTag, OpBundles) { SetInsertPoint(TheBB, IP); diff --git a/llvm/include/llvm/IR/Instruction.h b/llvm/include/llvm/IR/Instruction.h index a12d5d9d8fe94..108f44e7c5ad3 100644 --- a/llvm/include/llvm/IR/Instruction.h +++ b/llvm/include/llvm/IR/Instruction.h @@ -433,7 +433,7 @@ class Instruction : public User, /// convenience method for passes to do so. /// dropUBImplyingAttrsAndUnknownMetadata should be used instead of /// this API if the Instruction being modified is a call. - void dropUnknownNonDebugMetadata(ArrayRef KnownIDs = std::nullopt); + void dropUnknownNonDebugMetadata(ArrayRef KnownIDs = {}); /// @} /// Adds an !annotation metadata node with \p Annotation to this instruction. diff --git a/llvm/include/llvm/IR/Instructions.h b/llvm/include/llvm/IR/Instructions.h index e89739a555266..75a059760f48f 100644 --- a/llvm/include/llvm/IR/Instructions.h +++ b/llvm/include/llvm/IR/Instructions.h @@ -1421,8 +1421,7 @@ class CallInst : public CallBase { inline CallInst(FunctionType *Ty, Value *Func, ArrayRef Args, const Twine &NameStr, AllocInfo AllocInfo, InsertPosition InsertBefore) - : CallInst(Ty, Func, Args, std::nullopt, NameStr, AllocInfo, - InsertBefore) {} + : CallInst(Ty, Func, Args, {}, NameStr, AllocInfo, InsertBefore) {} explicit CallInst(FunctionType *Ty, Value *F, const Twine &NameStr, AllocInfo AllocInfo, InsertPosition InsertBefore); @@ -1457,12 +1456,12 @@ class CallInst : public CallBase { const Twine &NameStr, InsertPosition InsertBefore = nullptr) { IntrusiveOperandsAllocMarker AllocMarker{ComputeNumOperands(Args.size())}; - return new (AllocMarker) CallInst(Ty, Func, Args, std::nullopt, NameStr, - AllocMarker, InsertBefore); + return new (AllocMarker) + CallInst(Ty, Func, Args, {}, NameStr, AllocMarker, InsertBefore); } static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef Args, - ArrayRef Bundles = std::nullopt, + ArrayRef Bundles = {}, const Twine &NameStr = "", InsertPosition InsertBefore = nullptr) { IntrusiveOperandsAndDescriptorAllocMarker AllocMarker{ @@ -1480,7 +1479,7 @@ class CallInst : public CallBase { } static CallInst *Create(FunctionCallee Func, ArrayRef Args, - ArrayRef Bundles = std::nullopt, + ArrayRef Bundles = {}, const Twine &NameStr = "", InsertPosition InsertBefore = nullptr) { return Create(Func.getFunctionType(), Func.getCallee(), Args, Bundles, @@ -3648,14 +3647,13 @@ class InvokeInst : public CallBase { InsertPosition InsertBefore = nullptr) { IntrusiveOperandsAllocMarker AllocMarker{ ComputeNumOperands(unsigned(Args.size()))}; - return new (AllocMarker) - InvokeInst(Ty, Func, IfNormal, IfException, Args, std::nullopt, - AllocMarker, NameStr, InsertBefore); + return new (AllocMarker) InvokeInst(Ty, Func, IfNormal, IfException, Args, + {}, AllocMarker, NameStr, InsertBefore); } static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, BasicBlock *IfException, ArrayRef Args, - ArrayRef Bundles = std::nullopt, + ArrayRef Bundles = {}, const Twine &NameStr = "", InsertPosition InsertBefore = nullptr) { IntrusiveOperandsAndDescriptorAllocMarker AllocMarker{ @@ -3672,12 +3670,12 @@ class InvokeInst : public CallBase { const Twine &NameStr, InsertPosition InsertBefore = nullptr) { return Create(Func.getFunctionType(), Func.getCallee(), IfNormal, - IfException, Args, std::nullopt, NameStr, InsertBefore); + IfException, Args, {}, NameStr, InsertBefore); } static InvokeInst *Create(FunctionCallee Func, BasicBlock *IfNormal, BasicBlock *IfException, ArrayRef Args, - ArrayRef Bundles = std::nullopt, + ArrayRef Bundles = {}, const Twine &NameStr = "", InsertPosition InsertBefore = nullptr) { return Create(Func.getFunctionType(), Func.getCallee(), IfNormal, @@ -3805,15 +3803,15 @@ class CallBrInst : public CallBase { IntrusiveOperandsAllocMarker AllocMarker{ ComputeNumOperands(Args.size(), IndirectDests.size())}; return new (AllocMarker) - CallBrInst(Ty, Func, DefaultDest, IndirectDests, Args, std::nullopt, - AllocMarker, NameStr, InsertBefore); + CallBrInst(Ty, Func, DefaultDest, IndirectDests, Args, {}, AllocMarker, + NameStr, InsertBefore); } static CallBrInst * Create(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest, ArrayRef IndirectDests, ArrayRef Args, - ArrayRef Bundles = std::nullopt, - const Twine &NameStr = "", InsertPosition InsertBefore = nullptr) { + ArrayRef Bundles = {}, const Twine &NameStr = "", + InsertPosition InsertBefore = nullptr) { IntrusiveOperandsAndDescriptorAllocMarker AllocMarker{ ComputeNumOperands(Args.size(), IndirectDests.size(), CountBundleInputs(Bundles)), @@ -3835,7 +3833,7 @@ class CallBrInst : public CallBase { static CallBrInst *Create(FunctionCallee Func, BasicBlock *DefaultDest, ArrayRef IndirectDests, ArrayRef Args, - ArrayRef Bundles = std::nullopt, + ArrayRef Bundles = {}, const Twine &NameStr = "", InsertPosition InsertBefore = nullptr) { return Create(Func.getFunctionType(), Func.getCallee(), DefaultDest, @@ -4163,8 +4161,7 @@ class CleanupPadInst : public FuncletPadInst { NameStr, InsertBefore) {} public: - static CleanupPadInst *Create(Value *ParentPad, - ArrayRef Args = std::nullopt, + static CleanupPadInst *Create(Value *ParentPad, ArrayRef Args = {}, const Twine &NameStr = "", InsertPosition InsertBefore = nullptr) { IntrusiveOperandsAllocMarker AllocMarker{unsigned(1 + Args.size())}; diff --git a/llvm/include/llvm/IR/Intrinsics.h b/llvm/include/llvm/IR/Intrinsics.h index 4f5801e1d996c..4bd7fda77f313 100644 --- a/llvm/include/llvm/IR/Intrinsics.h +++ b/llvm/include/llvm/IR/Intrinsics.h @@ -73,8 +73,7 @@ namespace Intrinsic { std::string getNameNoUnnamedTypes(ID Id, ArrayRef Tys); /// Return the function type for an intrinsic. - FunctionType *getType(LLVMContext &Context, ID id, - ArrayRef Tys = std::nullopt); + FunctionType *getType(LLVMContext &Context, ID id, ArrayRef Tys = {}); /// Returns true if the intrinsic can be overloaded. bool isOverloaded(ID id); @@ -89,8 +88,7 @@ namespace Intrinsic { /// using iAny, fAny, vAny, or iPTRAny). For a declaration of an overloaded /// intrinsic, Tys must provide exactly one type for each overloaded type in /// the intrinsic. - Function *getDeclaration(Module *M, ID id, - ArrayRef Tys = std::nullopt); + Function *getDeclaration(Module *M, ID id, ArrayRef Tys = {}); /// Looks up Name in NameTable via binary search. NameTable must be sorted /// and all entries must start with "llvm.". If NameTable contains an exact diff --git a/llvm/include/llvm/IR/Metadata.h b/llvm/include/llvm/IR/Metadata.h index 495db79362f09..7088276754516 100644 --- a/llvm/include/llvm/IR/Metadata.h +++ b/llvm/include/llvm/IR/Metadata.h @@ -1181,7 +1181,7 @@ class MDNode : public Metadata { protected: MDNode(LLVMContext &Context, unsigned ID, StorageType Storage, - ArrayRef Ops1, ArrayRef Ops2 = std::nullopt); + ArrayRef Ops1, ArrayRef Ops2 = {}); ~MDNode() = default; void *operator new(size_t Size, size_t NumOps, StorageType Storage); diff --git a/llvm/include/llvm/Object/COFFImportFile.h b/llvm/include/llvm/Object/COFFImportFile.h index 649fb4930934d..e24eb4c380bfa 100644 --- a/llvm/include/llvm/Object/COFFImportFile.h +++ b/llvm/include/llvm/Object/COFFImportFile.h @@ -135,10 +135,10 @@ struct COFFShortExport { /// linking both ARM64EC and pure ARM64 objects, and the linker will pick only /// the exports relevant to the target platform. For non-hybrid targets, /// the NativeExports parameter should not be used. -Error writeImportLibrary( - StringRef ImportName, StringRef Path, ArrayRef Exports, - COFF::MachineTypes Machine, bool MinGW, - ArrayRef NativeExports = std::nullopt); +Error writeImportLibrary(StringRef ImportName, StringRef Path, + ArrayRef Exports, + COFF::MachineTypes Machine, bool MinGW, + ArrayRef NativeExports = {}); } // namespace object } // namespace llvm diff --git a/llvm/include/llvm/ProfileData/Coverage/CoverageMapping.h b/llvm/include/llvm/ProfileData/Coverage/CoverageMapping.h index 5fc497db8df54..fa07b3a9e8b14 100644 --- a/llvm/include/llvm/ProfileData/Coverage/CoverageMapping.h +++ b/llvm/include/llvm/ProfileData/Coverage/CoverageMapping.h @@ -660,7 +660,7 @@ class CounterMappingContext { public: CounterMappingContext(ArrayRef Expressions, - ArrayRef CounterValues = std::nullopt) + ArrayRef CounterValues = {}) : Expressions(Expressions), CounterValues(CounterValues) {} void setCounts(ArrayRef Counts) { CounterValues = Counts; } @@ -970,7 +970,7 @@ class CoverageMapping { /// Ignores non-instrumented object files unless all are not instrumented. static Expected> load(ArrayRef ObjectFilenames, StringRef ProfileFilename, - vfs::FileSystem &FS, ArrayRef Arches = std::nullopt, + vfs::FileSystem &FS, ArrayRef Arches = {}, StringRef CompilationDir = "", const object::BuildIDFetcher *BIDFetcher = nullptr, bool CheckBinaryIDs = false); diff --git a/llvm/include/llvm/ProfileData/InstrProf.h b/llvm/include/llvm/ProfileData/InstrProf.h index c4270478565d9..b0b2258735e2a 100644 --- a/llvm/include/llvm/ProfileData/InstrProf.h +++ b/llvm/include/llvm/ProfileData/InstrProf.h @@ -959,7 +959,7 @@ struct InstrProfRecord { ArrayRef getValueSitesForKind(uint32_t ValueKind) const { if (!ValueData) - return std::nullopt; + return {}; assert(IPVK_First <= ValueKind && ValueKind <= IPVK_Last && "Unknown value kind!"); return (*ValueData)[ValueKind - IPVK_First]; diff --git a/llvm/include/llvm/ProfileData/InstrProfCorrelator.h b/llvm/include/llvm/ProfileData/InstrProfCorrelator.h index c873597e42095..ed8642495cd74 100644 --- a/llvm/include/llvm/ProfileData/InstrProfCorrelator.h +++ b/llvm/include/llvm/ProfileData/InstrProfCorrelator.h @@ -40,7 +40,7 @@ class InstrProfCorrelator { static llvm::Expected> get(StringRef Filename, ProfCorrelatorKind FileKind, const object::BuildIDFetcher *BIDFetcher = nullptr, - const ArrayRef BIs = std::nullopt); + const ArrayRef BIs = {}); /// Construct a ProfileData vector used to correlate raw instrumentation data /// to their functions. diff --git a/llvm/lib/Analysis/ScalarEvolution.cpp b/llvm/lib/Analysis/ScalarEvolution.cpp index e06863b6deb90..c7f14ed2edf45 100644 --- a/llvm/lib/Analysis/ScalarEvolution.cpp +++ b/llvm/lib/Analysis/ScalarEvolution.cpp @@ -4519,7 +4519,7 @@ bool ScalarEvolution::containsAddRecurrence(const SCEV *S) { ArrayRef ScalarEvolution::getSCEVValues(const SCEV *S) { ExprValueMapType::iterator SI = ExprValueMap.find_as(S); if (SI == ExprValueMap.end()) - return std::nullopt; + return {}; return SI->second.getArrayRef(); } @@ -8681,7 +8681,7 @@ bool ScalarEvolution::BackedgeTakenInfo::isConstantMaxOrZero( } ScalarEvolution::ExitLimit::ExitLimit(const SCEV *E) - : ExitLimit(E, E, E, false, std::nullopt) {} + : ExitLimit(E, E, E, false, {}) {} ScalarEvolution::ExitLimit::ExitLimit( const SCEV *E, const SCEV *ConstantMaxNotTaken, diff --git a/llvm/lib/AsmParser/LLParser.cpp b/llvm/lib/AsmParser/LLParser.cpp index 66edf1e5a2957..08b917fdb260a 100644 --- a/llvm/lib/AsmParser/LLParser.cpp +++ b/llvm/lib/AsmParser/LLParser.cpp @@ -948,7 +948,7 @@ bool LLParser::parseMDNodeID(MDNode *&Result) { // Otherwise, create MDNode forward reference. auto &FwdRef = ForwardRefMDNodes[MID]; - FwdRef = std::make_pair(MDTuple::getTemporary(Context, std::nullopt), IDLoc); + FwdRef = std::make_pair(MDTuple::getTemporary(Context, {}), IDLoc); Result = FwdRef.first.get(); NumberedMetadata[MID].reset(Result); diff --git a/llvm/lib/Bitcode/Reader/MetadataLoader.cpp b/llvm/lib/Bitcode/Reader/MetadataLoader.cpp index 5b3b63c2d1e4e..ada1597b39920 100644 --- a/llvm/lib/Bitcode/Reader/MetadataLoader.cpp +++ b/llvm/lib/Bitcode/Reader/MetadataLoader.cpp @@ -222,7 +222,7 @@ Metadata *BitcodeReaderMetadataList::getMetadataFwdRef(unsigned Idx) { // Create and return a placeholder, which will later be RAUW'd. ++NumMDNodeTemporary; - Metadata *MD = MDNode::getTemporary(Context, std::nullopt).release(); + Metadata *MD = MDNode::getTemporary(Context, {}).release(); MetadataPtrs[Idx].reset(MD); return MD; } @@ -304,7 +304,7 @@ Metadata *BitcodeReaderMetadataList::upgradeTypeRef(Metadata *MaybeUUID) { auto &Ref = OldTypeRefs.Unknown[UUID]; if (!Ref) - Ref = MDNode::getTemporary(Context, std::nullopt); + Ref = MDNode::getTemporary(Context, {}); return Ref.get(); } @@ -321,7 +321,7 @@ Metadata *BitcodeReaderMetadataList::upgradeTypeRefArray(Metadata *MaybeTuple) { // resolveTypeRefArrays() will be resolve this forward reference. OldTypeRefs.Arrays.emplace_back( std::piecewise_construct, std::forward_as_tuple(Tuple), - std::forward_as_tuple(MDTuple::getTemporary(Context, std::nullopt))); + std::forward_as_tuple(MDTuple::getTemporary(Context, {}))); return OldTypeRefs.Arrays.back().second.get(); } @@ -1331,8 +1331,7 @@ Error MetadataLoader::MetadataLoaderImpl::parseOneMetadata( // If this isn't a LocalAsMetadata record, we're dropping it. This used // to be legal, but there's no upgrade path. auto dropRecord = [&] { - MetadataList.assignValue(MDNode::get(Context, std::nullopt), - NextMetadataNo); + MetadataList.assignValue(MDNode::get(Context, {}), NextMetadataNo); NextMetadataNo++; }; if (Record.size() != 2) { diff --git a/llvm/lib/CodeGen/AsmPrinter/CodeViewDebug.cpp b/llvm/lib/CodeGen/AsmPrinter/CodeViewDebug.cpp index f184f320e89c9..5cba2cbc241e4 100644 --- a/llvm/lib/CodeGen/AsmPrinter/CodeViewDebug.cpp +++ b/llvm/lib/CodeGen/AsmPrinter/CodeViewDebug.cpp @@ -2042,7 +2042,7 @@ TypeIndex CodeViewDebug::lowerTypeFunction(const DISubroutineType *Ty) { ReturnAndArgTypeIndices.back() = TypeIndex::None(); } TypeIndex ReturnTypeIndex = TypeIndex::Void(); - ArrayRef ArgTypeIndices = std::nullopt; + ArrayRef ArgTypeIndices = {}; if (!ReturnAndArgTypeIndices.empty()) { auto ReturnAndArgTypesRef = ArrayRef(ReturnAndArgTypeIndices); ReturnTypeIndex = ReturnAndArgTypesRef.front(); diff --git a/llvm/lib/CodeGen/AssignmentTrackingAnalysis.cpp b/llvm/lib/CodeGen/AssignmentTrackingAnalysis.cpp index 788cdfe3bb13d..59257fd6aadd5 100644 --- a/llvm/lib/CodeGen/AssignmentTrackingAnalysis.cpp +++ b/llvm/lib/CodeGen/AssignmentTrackingAnalysis.cpp @@ -966,7 +966,7 @@ class MemLocFragmentFill { auto &Ctx = Fn.getContext(); for (auto &FragMemLoc : FragMemLocs) { - DIExpression *Expr = DIExpression::get(Ctx, std::nullopt); + DIExpression *Expr = DIExpression::get(Ctx, {}); if (FragMemLoc.SizeInBits != *Aggregates[FragMemLoc.Var].first->getSizeInBits()) Expr = *DIExpression::createFragmentExpression( @@ -1398,7 +1398,7 @@ ArrayRef AssignmentTrackingLowering::getContainedFragments(VariableID Var) const { auto R = VarContains.find(Var); if (R == VarContains.end()) - return std::nullopt; + return {}; return R->second; } @@ -1638,7 +1638,7 @@ void AssignmentTrackingLowering::processUntaggedInstruction( // // DIExpression: Add fragment and offset. DebugVariable V = FnVarLocs->getVariable(Var); - DIExpression *DIE = DIExpression::get(I.getContext(), std::nullopt); + DIExpression *DIE = DIExpression::get(I.getContext(), {}); if (auto Frag = V.getFragment()) { auto R = DIExpression::createFragmentExpression(DIE, Frag->OffsetInBits, Frag->SizeInBits); @@ -2419,7 +2419,7 @@ bool AssignmentTrackingLowering::run(FunctionVarLocsBuilder *FnVarLocsBuilder) { // built appropriately rather than always using an empty DIExpression. // The assert below is a reminder. assert(Simple); - VarLoc.Expr = DIExpression::get(Fn.getContext(), std::nullopt); + VarLoc.Expr = DIExpression::get(Fn.getContext(), {}); DebugVariable Var = FnVarLocs->getVariable(VarLoc.VariableID); FnVarLocs->addSingleLocVar(Var, VarLoc.Expr, VarLoc.DL, VarLoc.Values); InsertedAnyIntrinsics = true; diff --git a/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp b/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp index 5710bda2b2cf8..d770933fb4fab 100644 --- a/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp +++ b/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp @@ -2198,7 +2198,7 @@ bool IRTranslator::translateKnownIntrinsic(const CallInst &CI, Intrinsic::ID ID, for (const auto &Arg : CI.args()) for (auto VReg : getOrCreateVRegs(*Arg)) VRegs.push_back(VReg); - MIRBuilder.buildInstr(TargetOpcode::FAKE_USE, std::nullopt, VRegs); + MIRBuilder.buildInstr(TargetOpcode::FAKE_USE, {}, VRegs); return true; } case Intrinsic::dbg_declare: { diff --git a/llvm/lib/CodeGen/ImplicitNullChecks.cpp b/llvm/lib/CodeGen/ImplicitNullChecks.cpp index 5ad003ed31802..c586e07c7d042 100644 --- a/llvm/lib/CodeGen/ImplicitNullChecks.cpp +++ b/llvm/lib/CodeGen/ImplicitNullChecks.cpp @@ -801,7 +801,7 @@ void ImplicitNullChecks::rewriteNullChecks( // Insert an *unconditional* branch to not-null successor - we expect // block placement to remove fallthroughs later. TII->insertBranch(*NC.getCheckBlock(), NC.getNotNullSucc(), nullptr, - /*Cond=*/std::nullopt, DL); + /*Cond=*/{}, DL); NumImplicitNullChecks++; } diff --git a/llvm/lib/CodeGen/InlineSpiller.cpp b/llvm/lib/CodeGen/InlineSpiller.cpp index 81ae805d64e1e..409879a8b49bc 100644 --- a/llvm/lib/CodeGen/InlineSpiller.cpp +++ b/llvm/lib/CodeGen/InlineSpiller.cpp @@ -1697,7 +1697,7 @@ void HoistSpillHelper::hoistAllSpills() { RMEnt->removeOperand(i - 1); } } - Edit.eliminateDeadDefs(SpillsToRm, std::nullopt); + Edit.eliminateDeadDefs(SpillsToRm, {}); } } diff --git a/llvm/lib/CodeGen/MIRParser/MIParser.cpp b/llvm/lib/CodeGen/MIRParser/MIParser.cpp index 47b220172602d..27f0a9331a3e3 100644 --- a/llvm/lib/CodeGen/MIRParser/MIParser.cpp +++ b/llvm/lib/CodeGen/MIRParser/MIParser.cpp @@ -599,7 +599,7 @@ bool MIParser::error(StringRef::iterator Loc, const Twine &Msg) { // Create a diagnostic for a YAML string literal. Error = SMDiagnostic(SM, SMLoc(), Buffer.getBufferIdentifier(), 1, Loc - Source.data(), SourceMgr::DK_Error, Msg.str(), - Source, std::nullopt, std::nullopt); + Source, {}, {}); return true; } @@ -1399,7 +1399,7 @@ bool MIParser::parseMetadata(Metadata *&MD) { // Forward reference. auto &FwdRef = PFS.MachineForwardRefMDNodes[ID]; FwdRef = std::make_pair( - MDTuple::getTemporary(MF.getFunction().getContext(), std::nullopt), Loc); + MDTuple::getTemporary(MF.getFunction().getContext(), {}), Loc); PFS.MachineMetadataNodes[ID].reset(FwdRef.first.get()); MD = FwdRef.first.get(); diff --git a/llvm/lib/CodeGen/MIRParser/MIRParser.cpp b/llvm/lib/CodeGen/MIRParser/MIRParser.cpp index a5d6a40392d0c..d506cd1879648 100644 --- a/llvm/lib/CodeGen/MIRParser/MIRParser.cpp +++ b/llvm/lib/CodeGen/MIRParser/MIRParser.cpp @@ -1072,7 +1072,7 @@ SMDiagnostic MIRParserImpl::diagFromMIStringDiag(const SMDiagnostic &Error, (HasQuote ? 1 : 0)); // TODO: Translate any source ranges as well. - return SM.GetMessage(Loc, Error.getKind(), Error.getMessage(), std::nullopt, + return SM.GetMessage(Loc, Error.getKind(), Error.getMessage(), {}, Error.getFixIts()); } diff --git a/llvm/lib/CodeGen/MachineOutliner.cpp b/llvm/lib/CodeGen/MachineOutliner.cpp index 36c325cb422c6..6006ea7be806b 100644 --- a/llvm/lib/CodeGen/MachineOutliner.cpp +++ b/llvm/lib/CodeGen/MachineOutliner.cpp @@ -1007,8 +1007,7 @@ MachineFunction *MachineOutliner::createOutlinedFunction( DISubprogram *OutlinedSP = DB.createFunction( Unit /* Context */, F->getName(), StringRef(Dummy), Unit /* File */, 0 /* Line 0 is reserved for compiler-generated code. */, - DB.createSubroutineType( - DB.getOrCreateTypeArray(std::nullopt)), /* void type */ + DB.createSubroutineType(DB.getOrCreateTypeArray({})), /* void type */ 0, /* Line 0 is reserved for compiler-generated code. */ DINode::DIFlags::FlagArtificial /* Compiler-generated code. */, /* Outlined code is optimized code by definition. */ diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp index 3918da3ef031b..9b96dbb666198 100644 --- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp @@ -1739,7 +1739,7 @@ SDValue SelectionDAG::getConstant(const ConstantInt &Val, const SDLoc &DL, unsigned Opc = isT ? ISD::TargetConstant : ISD::Constant; SDVTList VTs = getVTList(EltVT); FoldingSetNodeID ID; - AddNodeIDNode(ID, Opc, VTs, std::nullopt); + AddNodeIDNode(ID, Opc, VTs, {}); ID.AddPointer(Elt); ID.AddBoolean(isO); void *IP = nullptr; @@ -1816,7 +1816,7 @@ SDValue SelectionDAG::getConstantFP(const ConstantFP &V, const SDLoc &DL, unsigned Opc = isTarget ? ISD::TargetConstantFP : ISD::ConstantFP; SDVTList VTs = getVTList(EltVT); FoldingSetNodeID ID; - AddNodeIDNode(ID, Opc, VTs, std::nullopt); + AddNodeIDNode(ID, Opc, VTs, {}); ID.AddPointer(&V); void *IP = nullptr; SDNode *N = nullptr; @@ -1874,7 +1874,7 @@ SDValue SelectionDAG::getGlobalAddress(const GlobalValue *GV, const SDLoc &DL, SDVTList VTs = getVTList(VT); FoldingSetNodeID ID; - AddNodeIDNode(ID, Opc, VTs, std::nullopt); + AddNodeIDNode(ID, Opc, VTs, {}); ID.AddPointer(GV); ID.AddInteger(Offset); ID.AddInteger(TargetFlags); @@ -1893,7 +1893,7 @@ SDValue SelectionDAG::getFrameIndex(int FI, EVT VT, bool isTarget) { unsigned Opc = isTarget ? ISD::TargetFrameIndex : ISD::FrameIndex; SDVTList VTs = getVTList(VT); FoldingSetNodeID ID; - AddNodeIDNode(ID, Opc, VTs, std::nullopt); + AddNodeIDNode(ID, Opc, VTs, {}); ID.AddInteger(FI); void *IP = nullptr; if (SDNode *E = FindNodeOrInsertPos(ID, IP)) @@ -1912,7 +1912,7 @@ SDValue SelectionDAG::getJumpTable(int JTI, EVT VT, bool isTarget, unsigned Opc = isTarget ? ISD::TargetJumpTable : ISD::JumpTable; SDVTList VTs = getVTList(VT); FoldingSetNodeID ID; - AddNodeIDNode(ID, Opc, VTs, std::nullopt); + AddNodeIDNode(ID, Opc, VTs, {}); ID.AddInteger(JTI); ID.AddInteger(TargetFlags); void *IP = nullptr; @@ -1944,7 +1944,7 @@ SDValue SelectionDAG::getConstantPool(const Constant *C, EVT VT, unsigned Opc = isTarget ? ISD::TargetConstantPool : ISD::ConstantPool; SDVTList VTs = getVTList(VT); FoldingSetNodeID ID; - AddNodeIDNode(ID, Opc, VTs, std::nullopt); + AddNodeIDNode(ID, Opc, VTs, {}); ID.AddInteger(Alignment->value()); ID.AddInteger(Offset); ID.AddPointer(C); @@ -1972,7 +1972,7 @@ SDValue SelectionDAG::getConstantPool(MachineConstantPoolValue *C, EVT VT, unsigned Opc = isTarget ? ISD::TargetConstantPool : ISD::ConstantPool; SDVTList VTs = getVTList(VT); FoldingSetNodeID ID; - AddNodeIDNode(ID, Opc, VTs, std::nullopt); + AddNodeIDNode(ID, Opc, VTs, {}); ID.AddInteger(Alignment->value()); ID.AddInteger(Offset); C->addSelectionDAGCSEId(ID); @@ -1990,7 +1990,7 @@ SDValue SelectionDAG::getConstantPool(MachineConstantPoolValue *C, EVT VT, SDValue SelectionDAG::getBasicBlock(MachineBasicBlock *MBB) { FoldingSetNodeID ID; - AddNodeIDNode(ID, ISD::BasicBlock, getVTList(MVT::Other), std::nullopt); + AddNodeIDNode(ID, ISD::BasicBlock, getVTList(MVT::Other), {}); ID.AddPointer(MBB); void *IP = nullptr; if (SDNode *E = FindNodeOrInsertPos(ID, IP)) @@ -2295,7 +2295,7 @@ SDValue SelectionDAG::getCommutedVectorShuffle(const ShuffleVectorSDNode &SV) { SDValue SelectionDAG::getRegister(Register Reg, EVT VT) { SDVTList VTs = getVTList(VT); FoldingSetNodeID ID; - AddNodeIDNode(ID, ISD::Register, VTs, std::nullopt); + AddNodeIDNode(ID, ISD::Register, VTs, {}); ID.AddInteger(Reg.id()); void *IP = nullptr; if (SDNode *E = FindNodeOrInsertPos(ID, IP)) @@ -2310,7 +2310,7 @@ SDValue SelectionDAG::getRegister(Register Reg, EVT VT) { SDValue SelectionDAG::getRegisterMask(const uint32_t *RegMask) { FoldingSetNodeID ID; - AddNodeIDNode(ID, ISD::RegisterMask, getVTList(MVT::Untyped), std::nullopt); + AddNodeIDNode(ID, ISD::RegisterMask, getVTList(MVT::Untyped), {}); ID.AddPointer(RegMask); void *IP = nullptr; if (SDNode *E = FindNodeOrInsertPos(ID, IP)) @@ -2353,7 +2353,7 @@ SDValue SelectionDAG::getBlockAddress(const BlockAddress *BA, EVT VT, SDVTList VTs = getVTList(VT); FoldingSetNodeID ID; - AddNodeIDNode(ID, Opc, VTs, std::nullopt); + AddNodeIDNode(ID, Opc, VTs, {}); ID.AddPointer(BA); ID.AddInteger(Offset); ID.AddInteger(TargetFlags); @@ -2369,7 +2369,7 @@ SDValue SelectionDAG::getBlockAddress(const BlockAddress *BA, EVT VT, SDValue SelectionDAG::getSrcValue(const Value *V) { FoldingSetNodeID ID; - AddNodeIDNode(ID, ISD::SRCVALUE, getVTList(MVT::Other), std::nullopt); + AddNodeIDNode(ID, ISD::SRCVALUE, getVTList(MVT::Other), {}); ID.AddPointer(V); void *IP = nullptr; @@ -2384,7 +2384,7 @@ SDValue SelectionDAG::getSrcValue(const Value *V) { SDValue SelectionDAG::getMDNode(const MDNode *MD) { FoldingSetNodeID ID; - AddNodeIDNode(ID, ISD::MDNODE_SDNODE, getVTList(MVT::Other), std::nullopt); + AddNodeIDNode(ID, ISD::MDNODE_SDNODE, getVTList(MVT::Other), {}); ID.AddPointer(MD); void *IP = nullptr; @@ -5944,7 +5944,7 @@ static SDValue foldCONCAT_VECTORS(const SDLoc &DL, EVT VT, SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT) { SDVTList VTs = getVTList(VT); FoldingSetNodeID ID; - AddNodeIDNode(ID, Opcode, VTs, std::nullopt); + AddNodeIDNode(ID, Opcode, VTs, {}); void *IP = nullptr; if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) return SDValue(E, 0); @@ -10428,7 +10428,7 @@ SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList, SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList) { - return getNode(Opcode, DL, VTList, std::nullopt); + return getNode(Opcode, DL, VTList, ArrayRef()); } SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList, @@ -10695,7 +10695,7 @@ void SelectionDAG::setNodeMemRefs(MachineSDNode *N, SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, EVT VT) { SDVTList VTs = getVTList(VT); - return SelectNodeTo(N, MachineOpc, VTs, std::nullopt); + return SelectNodeTo(N, MachineOpc, VTs, {}); } SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, @@ -10736,7 +10736,7 @@ SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, EVT VT1, EVT VT2) { SDVTList VTs = getVTList(VT1, VT2); - return SelectNodeTo(N, MachineOpc, VTs, std::nullopt); + return SelectNodeTo(N, MachineOpc, VTs, {}); } SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, @@ -10903,7 +10903,7 @@ SDNode* SelectionDAG::mutateStrictFPToFP(SDNode *Node) { MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT) { SDVTList VTs = getVTList(VT); - return getMachineNode(Opcode, dl, VTs, std::nullopt); + return getMachineNode(Opcode, dl, VTs, {}); } MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp index a719ff859e778..c04dc403f4588 100644 --- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp @@ -3178,10 +3178,9 @@ SelectionDAGBuilder::visitSPDescriptorFailure(StackProtectorDescriptor &SPD) { const TargetLowering &TLI = DAG.getTargetLoweringInfo(); TargetLowering::MakeLibCallOptions CallOptions; CallOptions.setDiscardResult(true); - SDValue Chain = - TLI.makeLibCall(DAG, RTLIB::STACKPROTECTOR_CHECK_FAIL, MVT::isVoid, - std::nullopt, CallOptions, getCurSDLoc()) - .second; + SDValue Chain = TLI.makeLibCall(DAG, RTLIB::STACKPROTECTOR_CHECK_FAIL, + MVT::isVoid, {}, CallOptions, getCurSDLoc()) + .second; // On PS4/PS5, the "return address" must still be within the calling // function, even if it's at the very end, so emit an explicit TRAP here. // Passing 'true' for doesNotReturn above won't generate the trap for us. diff --git a/llvm/lib/CodeGen/SplitKit.cpp b/llvm/lib/CodeGen/SplitKit.cpp index b671e51038753..d270d6ef62152 100644 --- a/llvm/lib/CodeGen/SplitKit.cpp +++ b/llvm/lib/CodeGen/SplitKit.cpp @@ -1462,7 +1462,7 @@ void SplitEditor::deleteRematVictims() { if (Dead.empty()) return; - Edit->eliminateDeadDefs(Dead, std::nullopt); + Edit->eliminateDeadDefs(Dead, {}); } void SplitEditor::forceRecomputeVNI(const VNInfo &ParentVNI) { diff --git a/llvm/lib/ExecutionEngine/ExecutionEngine.cpp b/llvm/lib/ExecutionEngine/ExecutionEngine.cpp index 8297d15b1580e..f09975331bba8 100644 --- a/llvm/lib/ExecutionEngine/ExecutionEngine.cpp +++ b/llvm/lib/ExecutionEngine/ExecutionEngine.cpp @@ -395,7 +395,7 @@ void ExecutionEngine::runStaticConstructorsDestructors(Module &module, // Execute the ctor/dtor function! if (Function *F = dyn_cast(FP)) - runFunction(F, std::nullopt); + runFunction(F, {}); // FIXME: It is marginally lame that we just do nothing here if we see an // entry we don't recognize. It might not be unreasonable for the verifier diff --git a/llvm/lib/ExecutionEngine/Interpreter/Interpreter.cpp b/llvm/lib/ExecutionEngine/Interpreter/Interpreter.cpp index d4235cfa2ccf5..2b9149e825fdb 100644 --- a/llvm/lib/ExecutionEngine/Interpreter/Interpreter.cpp +++ b/llvm/lib/ExecutionEngine/Interpreter/Interpreter.cpp @@ -69,7 +69,7 @@ Interpreter::~Interpreter() { void Interpreter::runAtExitHandlers () { while (!AtExitHandlers.empty()) { - callFunction(AtExitHandlers.back(), std::nullopt); + callFunction(AtExitHandlers.back(), {}); AtExitHandlers.pop_back(); run(); } diff --git a/llvm/lib/Frontend/OpenMP/OMP.cpp b/llvm/lib/Frontend/OpenMP/OMP.cpp index 5720655442be3..fdb09678d7a4c 100644 --- a/llvm/lib/Frontend/OpenMP/OMP.cpp +++ b/llvm/lib/Frontend/OpenMP/OMP.cpp @@ -83,7 +83,7 @@ namespace llvm::omp { ArrayRef getLeafConstructs(Directive D) { auto Idx = static_cast(D); if (Idx >= Directive_enumSize) - return std::nullopt; + return {}; const auto *Row = LeafConstructTable[LeafConstructTableOrdering[Idx]]; return ArrayRef(&Row[2], static_cast(Row[1])); } diff --git a/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp b/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp index 515b74cbb7588..722587e23bfd3 100644 --- a/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp +++ b/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp @@ -6606,7 +6606,7 @@ static Function *createOutlinedFunction( // TODO: We are using nullopt for arguments at the moment. This will // need to be updated when debug data is being generated for variables. DISubroutineType *Ty = - DB.createSubroutineType(DB.getOrCreateTypeArray(std::nullopt)); + DB.createSubroutineType(DB.getOrCreateTypeArray({})); DISubprogram::DISPFlags SPFlags = DISubprogram::SPFlagDefinition | DISubprogram::SPFlagOptimized | DISubprogram::SPFlagLocalToUnit; diff --git a/llvm/lib/IR/Constants.cpp b/llvm/lib/IR/Constants.cpp index 6d035d5373295..d6c00a4b54782 100644 --- a/llvm/lib/IR/Constants.cpp +++ b/llvm/lib/IR/Constants.cpp @@ -2521,7 +2521,7 @@ Constant *ConstantExpr::getGetElementPtr(Type *Ty, Constant *C, } const ConstantExprKeyType Key(Instruction::GetElementPtr, ArgVec, NW.getRaw(), - std::nullopt, Ty, InRange); + {}, Ty, InRange); LLVMContextImpl *pImpl = C->getContext().pImpl; return pImpl->ExprConstants.getOrCreate(ReqTy, Key); diff --git a/llvm/lib/IR/ConstantsContext.h b/llvm/lib/IR/ConstantsContext.h index 6afc86ffc73ab..aaaab0b7a918a 100644 --- a/llvm/lib/IR/ConstantsContext.h +++ b/llvm/lib/IR/ConstantsContext.h @@ -398,7 +398,7 @@ struct ConstantExprKeyType { static ArrayRef getShuffleMaskIfValid(const ConstantExpr *CE) { if (CE->getOpcode() == Instruction::ShuffleVector) return CE->getShuffleMask(); - return std::nullopt; + return {}; } static Type *getSourceElementTypeIfValid(const ConstantExpr *CE) { @@ -417,7 +417,7 @@ struct ConstantExprKeyType { public: ConstantExprKeyType(unsigned Opcode, ArrayRef Ops, unsigned short SubclassOptionalData = 0, - ArrayRef ShuffleMask = std::nullopt, + ArrayRef ShuffleMask = {}, Type *ExplicitTy = nullptr, std::optional InRange = std::nullopt) : Opcode(Opcode), SubclassOptionalData(SubclassOptionalData), Ops(Ops), diff --git a/llvm/lib/IR/DebugInfo.cpp b/llvm/lib/IR/DebugInfo.cpp index e50b6f6335ef5..50b29ae4f4167 100644 --- a/llvm/lib/IR/DebugInfo.cpp +++ b/llvm/lib/IR/DebugInfo.cpp @@ -2039,16 +2039,14 @@ static void emitDbgAssign(AssignmentInfo Info, Value *Val, Value *Dest, StoreToWholeVariable = FragStartBit <= VarStartBit && FragEndBit >= *Size; } - DIExpression *Expr = - DIExpression::get(StoreLikeInst.getContext(), std::nullopt); + DIExpression *Expr = DIExpression::get(StoreLikeInst.getContext(), {}); if (!StoreToWholeVariable) { auto R = DIExpression::createFragmentExpression(Expr, FragStartBit, FragEndBit - FragStartBit); assert(R.has_value() && "failed to create fragment expression"); Expr = *R; } - DIExpression *AddrExpr = - DIExpression::get(StoreLikeInst.getContext(), std::nullopt); + DIExpression *AddrExpr = DIExpression::get(StoreLikeInst.getContext(), {}); if (StoreLikeInst.getParent()->IsNewDbgInfoFormat) { auto *Assign = DbgVariableRecord::createLinkedDVRAssign( &StoreLikeInst, Val, VarRec.Var, Expr, Dest, AddrExpr, VarRec.DL); diff --git a/llvm/lib/IR/DebugInfoMetadata.cpp b/llvm/lib/IR/DebugInfoMetadata.cpp index 416cebbe52723..fb4326e554713 100644 --- a/llvm/lib/IR/DebugInfoMetadata.cpp +++ b/llvm/lib/IR/DebugInfoMetadata.cpp @@ -1954,7 +1954,7 @@ DIExpression *DIExpression::append(const DIExpression *Expr, NewOps.append(Ops.begin(), Ops.end()); // Ensure that the new opcodes are only appended once. - Ops = std::nullopt; + Ops = {}; } Op.appendToVector(NewOps); } diff --git a/llvm/lib/IR/IRBuilder.cpp b/llvm/lib/IR/IRBuilder.cpp index 486ee99b355da..8bf695e835c36 100644 --- a/llvm/lib/IR/IRBuilder.cpp +++ b/llvm/lib/IR/IRBuilder.cpp @@ -346,8 +346,8 @@ CallInst *IRBuilderBase::CreateMalloc(Type *IntPtrTy, Type *AllocTy, Value *AllocSize, Value *ArraySize, Function *MallocF, const Twine &Name) { - return CreateMalloc(IntPtrTy, AllocTy, AllocSize, ArraySize, std::nullopt, - MallocF, Name); + return CreateMalloc(IntPtrTy, AllocTy, AllocSize, ArraySize, {}, MallocF, + Name); } /// CreateFree - Generate the IR for a call to the builtin free function. diff --git a/llvm/lib/IR/MDBuilder.cpp b/llvm/lib/IR/MDBuilder.cpp index 000027754d13e..26c8ab9fc36c8 100644 --- a/llvm/lib/IR/MDBuilder.cpp +++ b/llvm/lib/IR/MDBuilder.cpp @@ -66,9 +66,7 @@ MDNode *MDBuilder::createBranchWeights(ArrayRef Weights, return MDNode::get(Context, Vals); } -MDNode *MDBuilder::createUnpredictable() { - return MDNode::get(Context, std::nullopt); -} +MDNode *MDBuilder::createUnpredictable() { return MDNode::get(Context, {}); } MDNode *MDBuilder::createFunctionEntryCount( uint64_t Count, bool Synthetic, diff --git a/llvm/lib/IR/Metadata.cpp b/llvm/lib/IR/Metadata.cpp index 2599de3437ba2..ea2d1dc8440bc 100644 --- a/llvm/lib/IR/Metadata.cpp +++ b/llvm/lib/IR/Metadata.cpp @@ -82,7 +82,7 @@ static Metadata *canonicalizeMetadataForValue(LLVMContext &Context, Metadata *MD) { if (!MD) // !{} - return MDNode::get(Context, std::nullopt); + return MDNode::get(Context, {}); // Return early if this isn't a single-operand MDNode. auto *N = dyn_cast(MD); @@ -91,7 +91,7 @@ static Metadata *canonicalizeMetadataForValue(LLVMContext &Context, if (!N->getOperand(0)) // !{} - return MDNode::get(Context, std::nullopt); + return MDNode::get(Context, {}); if (auto *C = dyn_cast(N->getOperand(0))) // Look through the MDNode. @@ -1733,7 +1733,7 @@ void Instruction::setAAMetadata(const AAMDNodes &N) { void Instruction::setNoSanitizeMetadata() { setMetadata(llvm::LLVMContext::MD_nosanitize, - llvm::MDNode::get(getContext(), std::nullopt)); + llvm::MDNode::get(getContext(), {})); } void Instruction::getAllMetadataImpl( diff --git a/llvm/lib/IR/Type.cpp b/llvm/lib/IR/Type.cpp index a4e3ebdd339cc..3784ad28d7219 100644 --- a/llvm/lib/IR/Type.cpp +++ b/llvm/lib/IR/Type.cpp @@ -349,7 +349,7 @@ FunctionType *FunctionType::get(Type *ReturnType, } FunctionType *FunctionType::get(Type *Result, bool isVarArg) { - return get(Result, std::nullopt, isVarArg); + return get(Result, {}, isVarArg); } bool FunctionType::isValidReturnType(Type *RetTy) { @@ -515,7 +515,7 @@ StructType *StructType::create(LLVMContext &Context, StringRef Name) { } StructType *StructType::get(LLVMContext &Context, bool isPacked) { - return get(Context, std::nullopt, isPacked); + return get(Context, {}, isPacked); } StructType *StructType::create(LLVMContext &Context, ArrayRef Elements, diff --git a/llvm/lib/MC/MCCodeView.cpp b/llvm/lib/MC/MCCodeView.cpp index 792a132d6fb86..86f5016e86daa 100644 --- a/llvm/lib/MC/MCCodeView.cpp +++ b/llvm/lib/MC/MCCodeView.cpp @@ -338,9 +338,9 @@ CodeViewContext::getLineExtentIncludingInlinees(unsigned FuncId) { ArrayRef CodeViewContext::getLinesForExtent(size_t L, size_t R) { if (R <= L) - return std::nullopt; + return {}; if (L >= MCCVLines.size()) - return std::nullopt; + return {}; return ArrayRef(&MCCVLines[L], R - L); } diff --git a/llvm/lib/MC/MCDwarf.cpp b/llvm/lib/MC/MCDwarf.cpp index 2db59f8fc9442..0dd113797385d 100644 --- a/llvm/lib/MC/MCDwarf.cpp +++ b/llvm/lib/MC/MCDwarf.cpp @@ -290,7 +290,7 @@ void MCDwarfDwoLineTable::Emit(MCStreamer &MCOS, MCDwarfLineTableParams Params, return; std::optional NoLineStr(std::nullopt); MCOS.switchSection(Section); - MCOS.emitLabel(Header.Emit(&MCOS, Params, std::nullopt, NoLineStr).second); + MCOS.emitLabel(Header.Emit(&MCOS, Params, {}, NoLineStr).second); } std::pair diff --git a/llvm/lib/MC/MCParser/AsmParser.cpp b/llvm/lib/MC/MCParser/AsmParser.cpp index 66e52fe2d08f8..58496f054419f 100644 --- a/llvm/lib/MC/MCParser/AsmParser.cpp +++ b/llvm/lib/MC/MCParser/AsmParser.cpp @@ -5714,7 +5714,7 @@ bool AsmParser::parseDirectiveRept(SMLoc DirectiveLoc, StringRef Dir) { raw_svector_ostream OS(Buf); while (Count--) { // Note that the AtPseudoVariable is disabled for instantiations of .rep(t). - if (expandMacro(OS, *M, std::nullopt, std::nullopt, false)) + if (expandMacro(OS, *M, {}, {}, false)) return true; } instantiateMacroLikeBody(M, DirectiveLoc, OS); diff --git a/llvm/lib/MC/MCParser/MasmParser.cpp b/llvm/lib/MC/MCParser/MasmParser.cpp index 9f619c5018b50..fef904685da93 100644 --- a/llvm/lib/MC/MCParser/MasmParser.cpp +++ b/llvm/lib/MC/MCParser/MasmParser.cpp @@ -6955,8 +6955,7 @@ bool MasmParser::parseDirectiveRepeat(SMLoc DirectiveLoc, StringRef Dir) { SmallString<256> Buf; raw_svector_ostream OS(Buf); while (Count--) { - if (expandMacro(OS, M->Body, std::nullopt, std::nullopt, M->Locals, - getTok().getLoc())) + if (expandMacro(OS, M->Body, {}, {}, M->Locals, getTok().getLoc())) return true; } instantiateMacroLikeBody(M, DirectiveLoc, OS); @@ -6989,8 +6988,7 @@ bool MasmParser::parseDirectiveWhile(SMLoc DirectiveLoc) { if (Condition) { // Instantiate the macro, then resume at this directive to recheck the // condition. - if (expandMacro(OS, M->Body, std::nullopt, std::nullopt, M->Locals, - getTok().getLoc())) + if (expandMacro(OS, M->Body, {}, {}, M->Locals, getTok().getLoc())) return true; instantiateMacroLikeBody(M, DirectiveLoc, /*ExitLoc=*/DirectiveLoc, OS); } diff --git a/llvm/lib/Object/BuildID.cpp b/llvm/lib/Object/BuildID.cpp index d3c58a919731f..89d6bc3ab550d 100644 --- a/llvm/lib/Object/BuildID.cpp +++ b/llvm/lib/Object/BuildID.cpp @@ -62,7 +62,7 @@ BuildIDRef llvm::object::getBuildID(const ObjectFile *Obj) { return ::getBuildID(O->getELFFile()); if (auto *O = dyn_cast>(Obj)) return ::getBuildID(O->getELFFile()); - return std::nullopt; + return {}; } std::optional BuildIDFetcher::fetch(BuildIDRef BuildID) const { diff --git a/llvm/lib/Object/MachOObjectFile.cpp b/llvm/lib/Object/MachOObjectFile.cpp index 8fa3f67ea00f3..46ed3b2ff7c58 100644 --- a/llvm/lib/Object/MachOObjectFile.cpp +++ b/llvm/lib/Object/MachOObjectFile.cpp @@ -4906,12 +4906,12 @@ MachOObjectFile::getLinkOptHintsLoadCommand() const { ArrayRef MachOObjectFile::getDyldInfoRebaseOpcodes() const { if (!DyldInfoLoadCmd) - return std::nullopt; + return {}; auto DyldInfoOrErr = getStructOrErr(*this, DyldInfoLoadCmd); if (!DyldInfoOrErr) - return std::nullopt; + return {}; MachO::dyld_info_command DyldInfo = DyldInfoOrErr.get(); const uint8_t *Ptr = reinterpret_cast(getPtr(*this, DyldInfo.rebase_off)); @@ -4920,12 +4920,12 @@ ArrayRef MachOObjectFile::getDyldInfoRebaseOpcodes() const { ArrayRef MachOObjectFile::getDyldInfoBindOpcodes() const { if (!DyldInfoLoadCmd) - return std::nullopt; + return {}; auto DyldInfoOrErr = getStructOrErr(*this, DyldInfoLoadCmd); if (!DyldInfoOrErr) - return std::nullopt; + return {}; MachO::dyld_info_command DyldInfo = DyldInfoOrErr.get(); const uint8_t *Ptr = reinterpret_cast(getPtr(*this, DyldInfo.bind_off)); @@ -4934,12 +4934,12 @@ ArrayRef MachOObjectFile::getDyldInfoBindOpcodes() const { ArrayRef MachOObjectFile::getDyldInfoWeakBindOpcodes() const { if (!DyldInfoLoadCmd) - return std::nullopt; + return {}; auto DyldInfoOrErr = getStructOrErr(*this, DyldInfoLoadCmd); if (!DyldInfoOrErr) - return std::nullopt; + return {}; MachO::dyld_info_command DyldInfo = DyldInfoOrErr.get(); const uint8_t *Ptr = reinterpret_cast(getPtr(*this, DyldInfo.weak_bind_off)); @@ -4948,12 +4948,12 @@ ArrayRef MachOObjectFile::getDyldInfoWeakBindOpcodes() const { ArrayRef MachOObjectFile::getDyldInfoLazyBindOpcodes() const { if (!DyldInfoLoadCmd) - return std::nullopt; + return {}; auto DyldInfoOrErr = getStructOrErr(*this, DyldInfoLoadCmd); if (!DyldInfoOrErr) - return std::nullopt; + return {}; MachO::dyld_info_command DyldInfo = DyldInfoOrErr.get(); const uint8_t *Ptr = reinterpret_cast(getPtr(*this, DyldInfo.lazy_bind_off)); @@ -4962,12 +4962,12 @@ ArrayRef MachOObjectFile::getDyldInfoLazyBindOpcodes() const { ArrayRef MachOObjectFile::getDyldInfoExportsTrie() const { if (!DyldInfoLoadCmd) - return std::nullopt; + return {}; auto DyldInfoOrErr = getStructOrErr(*this, DyldInfoLoadCmd); if (!DyldInfoOrErr) - return std::nullopt; + return {}; MachO::dyld_info_command DyldInfo = DyldInfoOrErr.get(); const uint8_t *Ptr = reinterpret_cast(getPtr(*this, DyldInfo.export_off)); @@ -5248,12 +5248,12 @@ MachOObjectFile::getDyldChainedFixupTargets() const { ArrayRef MachOObjectFile::getDyldExportsTrie() const { if (!DyldExportsTrieLoadCmd) - return std::nullopt; + return {}; auto DyldExportsTrieOrError = getStructOrErr( *this, DyldExportsTrieLoadCmd); if (!DyldExportsTrieOrError) - return std::nullopt; + return {}; MachO::linkedit_data_command DyldExportsTrie = DyldExportsTrieOrError.get(); const uint8_t *Ptr = reinterpret_cast(getPtr(*this, DyldExportsTrie.dataoff)); @@ -5277,7 +5277,7 @@ SmallVector MachOObjectFile::getFunctionStarts() const { ArrayRef MachOObjectFile::getUuid() const { if (!UuidLoadCmd) - return std::nullopt; + return {}; // Returning a pointer is fine as uuid doesn't need endian swapping. const char *Ptr = UuidLoadCmd + offsetof(MachO::uuid_command, uuid); return ArrayRef(reinterpret_cast(Ptr), 16); diff --git a/llvm/lib/Option/ArgList.cpp b/llvm/lib/Option/ArgList.cpp index 72003e3a52591..6e164150d2e5e 100644 --- a/llvm/lib/Option/ArgList.cpp +++ b/llvm/lib/Option/ArgList.cpp @@ -134,7 +134,7 @@ void ArgList::AddAllArgsExcept(ArgStringList &Output, /// This is a nicer interface when you don't have a list of Ids to exclude. void ArgList::addAllArgs(ArgStringList &Output, ArrayRef Ids) const { - ArrayRef Exclude = std::nullopt; + ArrayRef Exclude = {}; AddAllArgsExcept(Output, Ids, Exclude); } diff --git a/llvm/lib/Support/YAMLParser.cpp b/llvm/lib/Support/YAMLParser.cpp index fdd0ed6e682eb..d2bedbaf0119f 100644 --- a/llvm/lib/Support/YAMLParser.cpp +++ b/llvm/lib/Support/YAMLParser.cpp @@ -258,9 +258,8 @@ class Scanner { Token getNext(); void printError(SMLoc Loc, SourceMgr::DiagKind Kind, const Twine &Message, - ArrayRef Ranges = std::nullopt) { - SM.PrintMessage(Loc, Kind, Message, Ranges, /* FixIts= */ std::nullopt, - ShowColors); + ArrayRef Ranges = {}) { + SM.PrintMessage(Loc, Kind, Message, Ranges, /* FixIts= */ {}, ShowColors); } void setError(const Twine &Message, StringRef::iterator Position) { diff --git a/llvm/lib/TableGen/Parser.cpp b/llvm/lib/TableGen/Parser.cpp index 73a3362924129..2c3726a339bb8 100644 --- a/llvm/lib/TableGen/Parser.cpp +++ b/llvm/lib/TableGen/Parser.cpp @@ -28,7 +28,7 @@ bool llvm::TableGenParseFile(SourceMgr &InputSrcMgr, RecordKeeper &Records) { auto *MainFileBuffer = SrcMgr.getMemoryBuffer(SrcMgr.getMainFileID()); Records.saveInputFilename(MainFileBuffer->getBufferIdentifier().str()); - TGParser Parser(SrcMgr, /*Macros=*/std::nullopt, Records, + TGParser Parser(SrcMgr, /*Macros=*/{}, Records, /*NoWarnOnUnusedTemplateArgs=*/false, /*TrackReferenceLocs=*/true); bool ParseResult = Parser.ParseFile(); diff --git a/llvm/lib/TableGen/TGParser.cpp b/llvm/lib/TableGen/TGParser.cpp index d1d46105c814e..1a60c2a567a29 100644 --- a/llvm/lib/TableGen/TGParser.cpp +++ b/llvm/lib/TableGen/TGParser.cpp @@ -3294,7 +3294,7 @@ Init *TGParser::ParseDeclaration(Record *CurRec, SMLoc ValLoc = Lex.getLoc(); Init *Val = ParseValue(CurRec, Type); if (!Val || - SetValue(CurRec, ValLoc, DeclName, std::nullopt, Val, + SetValue(CurRec, ValLoc, DeclName, {}, Val, /*AllowSelfAssignment=*/false, /*OverrideDefLoc=*/false)) { // Return the name, even if an error is thrown. This is so that we can // continue to make some progress, even without the value having been diff --git a/llvm/lib/Target/AArch64/AArch64StorePairSuppress.cpp b/llvm/lib/Target/AArch64/AArch64StorePairSuppress.cpp index 773c309a0943e..047e38261a6fc 100644 --- a/llvm/lib/Target/AArch64/AArch64StorePairSuppress.cpp +++ b/llvm/lib/Target/AArch64/AArch64StorePairSuppress.cpp @@ -96,8 +96,8 @@ bool AArch64StorePairSuppress::shouldAddSTPToBlock(const MachineBasicBlock *BB) SingleSCDesc->isValid() && !SingleSCDesc->isVariant()) { // Compute the new critical resource length after replacing 2 separate // STRDui with one STPDi. - unsigned ResLenWithSTP = BBTrace.getResourceLength( - std::nullopt, PairSCDesc, {SingleSCDesc, SingleSCDesc}); + unsigned ResLenWithSTP = + BBTrace.getResourceLength({}, PairSCDesc, {SingleSCDesc, SingleSCDesc}); if (ResLenWithSTP > ResLength) { LLVM_DEBUG(dbgs() << " Suppress STP in BB: " << BB->getNumber() << " resources " << ResLength << " -> " << ResLenWithSTP diff --git a/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.h b/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.h index 4a6457d7a7dbf..22bba21eedcc5 100644 --- a/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.h +++ b/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.h @@ -203,8 +203,7 @@ class AArch64TTIImpl : public BasicTTIImplBase { unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Op1Info = {TTI::OK_AnyValue, TTI::OP_None}, TTI::OperandValueInfo Op2Info = {TTI::OK_AnyValue, TTI::OP_None}, - ArrayRef Args = std::nullopt, - const Instruction *CxtI = nullptr); + ArrayRef Args = {}, const Instruction *CxtI = nullptr); InstructionCost getAddressComputationCost(Type *Ty, ScalarEvolution *SE, const SCEV *Ptr); @@ -399,7 +398,7 @@ class AArch64TTIImpl : public BasicTTIImplBase { ArrayRef Mask, TTI::TargetCostKind CostKind, int Index, VectorType *SubTp, - ArrayRef Args = std::nullopt, + ArrayRef Args = {}, const Instruction *CxtI = nullptr); InstructionCost getScalarizationOverhead(VectorType *Ty, diff --git a/llvm/lib/Target/AArch64/GISel/AArch64CallLowering.cpp b/llvm/lib/Target/AArch64/GISel/AArch64CallLowering.cpp index b4d2a3388c1df..5aee7804de3e3 100644 --- a/llvm/lib/Target/AArch64/GISel/AArch64CallLowering.cpp +++ b/llvm/lib/Target/AArch64/GISel/AArch64CallLowering.cpp @@ -1450,7 +1450,8 @@ bool AArch64CallLowering::lowerCall(MachineIRBuilder &MIRBuilder, if (!determineAndHandleAssignments( UsingReturnedArg ? ReturnedArgHandler : Handler, Assigner, InArgs, MIRBuilder, Info.CallConv, Info.IsVarArg, - UsingReturnedArg ? ArrayRef(OutArgs[0].Regs) : std::nullopt)) + UsingReturnedArg ? ArrayRef(OutArgs[0].Regs) + : ArrayRef())) return false; } diff --git a/llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp b/llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp index bfb57b1edf508..7bd618b2d9660 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp @@ -1043,7 +1043,7 @@ AMDGPUPromoteAllocaImpl::getLocalSizeYZ(IRBuilder<> &Builder) { Value *GEPZU = Builder.CreateConstInBoundsGEP1_64(I32Ty, DispatchPtr, 2); LoadInst *LoadZU = Builder.CreateAlignedLoad(I32Ty, GEPZU, Align(4)); - MDNode *MD = MDNode::get(Mod->getContext(), std::nullopt); + MDNode *MD = MDNode::get(Mod->getContext(), {}); LoadXY->setMetadata(LLVMContext::MD_invariant_load, MD); LoadZU->setMetadata(LLVMContext::MD_invariant_load, MD); ST.makeLIDRangeMetadata(LoadZU); diff --git a/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp b/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp index f860b13994512..57a97f029c867 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp @@ -1603,7 +1603,7 @@ bool GCNTargetMachine::parseMachineFunctionInfo( Error = SMDiagnostic(*PFS.SM, SMLoc(), Buffer.getBufferIdentifier(), 1, RegName.Value.size(), SourceMgr::DK_Error, "incorrect register class for field", RegName.Value, - std::nullopt, std::nullopt); + {}, {}); SourceRange = RegName.SourceRange; return true; }; diff --git a/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.h b/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.h index 01df2e6caaba1..76785ee456a41 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.h +++ b/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.h @@ -156,8 +156,7 @@ class GCNTTIImpl final : public BasicTTIImplBase { unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Op1Info = {TTI::OK_AnyValue, TTI::OP_None}, TTI::OperandValueInfo Op2Info = {TTI::OK_AnyValue, TTI::OP_None}, - ArrayRef Args = std::nullopt, - const Instruction *CxtI = nullptr); + ArrayRef Args = {}, const Instruction *CxtI = nullptr); InstructionCost getCFInstrCost(unsigned Opcode, TTI::TargetCostKind CostKind, const Instruction *I = nullptr); @@ -235,7 +234,7 @@ class GCNTTIImpl final : public BasicTTIImplBase { ArrayRef Mask, TTI::TargetCostKind CostKind, int Index, VectorType *SubTp, - ArrayRef Args = std::nullopt, + ArrayRef Args = {}, const Instruction *CxtI = nullptr); bool areInlineCompatible(const Function *Caller, diff --git a/llvm/lib/Target/AMDGPU/SIMachineFunctionInfo.cpp b/llvm/lib/Target/AMDGPU/SIMachineFunctionInfo.cpp index 4818f1b13caa6..2237b2e78c417 100644 --- a/llvm/lib/Target/AMDGPU/SIMachineFunctionInfo.cpp +++ b/llvm/lib/Target/AMDGPU/SIMachineFunctionInfo.cpp @@ -739,7 +739,7 @@ bool SIMachineFunctionInfo::initializeBaseYamlFields( Error = SMDiagnostic(*PFS.SM, SMLoc(), Buffer.getBufferIdentifier(), 1, 1, SourceMgr::DK_Error, toString(FIOrErr.takeError()), - "", std::nullopt, std::nullopt); + "", {}, {}); SourceRange = YamlMFI.ScavengeFI->SourceRange; return true; } diff --git a/llvm/lib/Target/ARM/ARMTargetTransformInfo.h b/llvm/lib/Target/ARM/ARMTargetTransformInfo.h index bea088065172e..528f082dde32c 100644 --- a/llvm/lib/Target/ARM/ARMTargetTransformInfo.h +++ b/llvm/lib/Target/ARM/ARMTargetTransformInfo.h @@ -220,7 +220,7 @@ class ARMTTIImpl : public BasicTTIImplBase { ArrayRef Mask, TTI::TargetCostKind CostKind, int Index, VectorType *SubTp, - ArrayRef Args = std::nullopt, + ArrayRef Args = {}, const Instruction *CxtI = nullptr); bool preferInLoopReduction(unsigned Opcode, Type *Ty, @@ -256,8 +256,7 @@ class ARMTTIImpl : public BasicTTIImplBase { unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Op1Info = {TTI::OK_AnyValue, TTI::OP_None}, TTI::OperandValueInfo Op2Info = {TTI::OK_AnyValue, TTI::OP_None}, - ArrayRef Args = std::nullopt, - const Instruction *CxtI = nullptr); + ArrayRef Args = {}, const Instruction *CxtI = nullptr); InstructionCost getMemoryOpCost(unsigned Opcode, Type *Src, MaybeAlign Alignment, diff --git a/llvm/lib/Target/BPF/BPFTargetTransformInfo.h b/llvm/lib/Target/BPF/BPFTargetTransformInfo.h index 13dcae7e67bcf..9d0db33d9a1fd 100644 --- a/llvm/lib/Target/BPF/BPFTargetTransformInfo.h +++ b/llvm/lib/Target/BPF/BPFTargetTransformInfo.h @@ -59,8 +59,7 @@ class BPFTTIImpl : public BasicTTIImplBase { unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Op1Info = {TTI::OK_AnyValue, TTI::OP_None}, TTI::OperandValueInfo Op2Info = {TTI::OK_AnyValue, TTI::OP_None}, - ArrayRef Args = std::nullopt, - const Instruction *CxtI = nullptr) { + ArrayRef Args = {}, const Instruction *CxtI = nullptr) { int ISD = TLI->InstructionOpcodeToISD(Opcode); if (ISD == ISD::ADD && CostKind == TTI::TCK_RecipThroughput) return SCEVCheapExpansionBudget.getValue() + 1; diff --git a/llvm/lib/Target/Hexagon/HexagonTargetTransformInfo.h b/llvm/lib/Target/Hexagon/HexagonTargetTransformInfo.h index b6f36b920da77..4a1cfe03d48a7 100644 --- a/llvm/lib/Target/Hexagon/HexagonTargetTransformInfo.h +++ b/llvm/lib/Target/Hexagon/HexagonTargetTransformInfo.h @@ -121,8 +121,7 @@ class HexagonTTIImpl : public BasicTTIImplBase { InstructionCost getShuffleCost(TTI::ShuffleKind Kind, Type *Tp, ArrayRef Mask, TTI::TargetCostKind CostKind, int Index, - Type *SubTp, - ArrayRef Args = std::nullopt, + Type *SubTp, ArrayRef Args = {}, const Instruction *CxtI = nullptr); InstructionCost getGatherScatterOpCost(unsigned Opcode, Type *DataTy, const Value *Ptr, bool VariableMask, @@ -141,8 +140,7 @@ class HexagonTTIImpl : public BasicTTIImplBase { unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Op1Info = {TTI::OK_AnyValue, TTI::OP_None}, TTI::OperandValueInfo Op2Info = {TTI::OK_AnyValue, TTI::OP_None}, - ArrayRef Args = std::nullopt, - const Instruction *CxtI = nullptr); + ArrayRef Args = {}, const Instruction *CxtI = nullptr); InstructionCost getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src, TTI::CastContextHint CCH, TTI::TargetCostKind CostKind, diff --git a/llvm/lib/Target/Hexagon/HexagonVectorCombine.cpp b/llvm/lib/Target/Hexagon/HexagonVectorCombine.cpp index 273bd7b2b361e..f4e495266eae3 100644 --- a/llvm/lib/Target/Hexagon/HexagonVectorCombine.cpp +++ b/llvm/lib/Target/Hexagon/HexagonVectorCombine.cpp @@ -142,8 +142,8 @@ class HexagonVectorCombine { Value *createHvxIntrinsic(IRBuilderBase &Builder, Intrinsic::ID IntID, Type *RetTy, ArrayRef Args, - ArrayRef ArgTys = std::nullopt, - ArrayRef MDSources = std::nullopt) const; + ArrayRef ArgTys = {}, + ArrayRef MDSources = {}) const; SmallVector splitVectorElements(IRBuilderBase &Builder, Value *Vec, unsigned ToWidth) const; Value *joinVectorElements(IRBuilderBase &Builder, ArrayRef Values, @@ -318,26 +318,24 @@ class AlignVectors { Value *createLoad(IRBuilderBase &Builder, Type *ValTy, Value *Ptr, Value *Predicate, int Alignment, Value *Mask, - Value *PassThru, - ArrayRef MDSources = std::nullopt) const; + Value *PassThru, ArrayRef MDSources = {}) const; Value *createSimpleLoad(IRBuilderBase &Builder, Type *ValTy, Value *Ptr, int Alignment, - ArrayRef MDSources = std::nullopt) const; + ArrayRef MDSources = {}) const; Value *createStore(IRBuilderBase &Builder, Value *Val, Value *Ptr, Value *Predicate, int Alignment, Value *Mask, - ArrayRef MDSources = std ::nullopt) const; + ArrayRef MDSources = {}) const; Value *createSimpleStore(IRBuilderBase &Builder, Value *Val, Value *Ptr, int Alignment, - ArrayRef MDSources = std ::nullopt) const; + ArrayRef MDSources = {}) const; Value *createPredicatedLoad(IRBuilderBase &Builder, Type *ValTy, Value *Ptr, Value *Predicate, int Alignment, - ArrayRef MDSources = std::nullopt) const; - Value * - createPredicatedStore(IRBuilderBase &Builder, Value *Val, Value *Ptr, - Value *Predicate, int Alignment, - ArrayRef MDSources = std::nullopt) const; + ArrayRef MDSources = {}) const; + Value *createPredicatedStore(IRBuilderBase &Builder, Value *Val, Value *Ptr, + Value *Predicate, int Alignment, + ArrayRef MDSources = {}) const; DepList getUpwardDeps(Instruction *In, Instruction *Base) const; bool createAddressGroups(); @@ -767,8 +765,8 @@ auto AlignVectors::createPredicatedLoad(IRBuilderBase &Builder, Type *ValTy, auto V6_vL32b_pred_ai = HVC.HST.getIntrinsicId(Hexagon::V6_vL32b_pred_ai); // FIXME: This may not put the offset from Ptr into the vmem offset. return HVC.createHvxIntrinsic(Builder, V6_vL32b_pred_ai, ValTy, - {Predicate, Ptr, HVC.getConstInt(0)}, - std::nullopt, MDSources); + {Predicate, Ptr, HVC.getConstInt(0)}, {}, + MDSources); } auto AlignVectors::createStore(IRBuilderBase &Builder, Value *Val, Value *Ptr, @@ -838,8 +836,8 @@ auto AlignVectors::createPredicatedStore(IRBuilderBase &Builder, Value *Val, auto V6_vS32b_pred_ai = HVC.HST.getIntrinsicId(Hexagon::V6_vS32b_pred_ai); // FIXME: This may not put the offset from Ptr into the vmem offset. return HVC.createHvxIntrinsic(Builder, V6_vS32b_pred_ai, nullptr, - {Predicate, Ptr, HVC.getConstInt(0), Val}, - std::nullopt, MDSources); + {Predicate, Ptr, HVC.getConstInt(0), Val}, {}, + MDSources); } auto AlignVectors::getUpwardDeps(Instruction *In, Instruction *Base) const diff --git a/llvm/lib/Target/Lanai/LanaiTargetTransformInfo.h b/llvm/lib/Target/Lanai/LanaiTargetTransformInfo.h index 7dec962789d53..5fe63e4a2e031 100644 --- a/llvm/lib/Target/Lanai/LanaiTargetTransformInfo.h +++ b/llvm/lib/Target/Lanai/LanaiTargetTransformInfo.h @@ -94,8 +94,7 @@ class LanaiTTIImpl : public BasicTTIImplBase { unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Op1Info = {TTI::OK_AnyValue, TTI::OP_None}, TTI::OperandValueInfo Op2Info = {TTI::OK_AnyValue, TTI::OP_None}, - ArrayRef Args = std::nullopt, - const Instruction *CxtI = nullptr) { + ArrayRef Args = {}, const Instruction *CxtI = nullptr) { int ISD = TLI->InstructionOpcodeToISD(Opcode); switch (ISD) { diff --git a/llvm/lib/Target/NVPTX/NVPTXTargetTransformInfo.h b/llvm/lib/Target/NVPTX/NVPTXTargetTransformInfo.h index 4160f5f6bfae7..86140daa7be48 100644 --- a/llvm/lib/Target/NVPTX/NVPTXTargetTransformInfo.h +++ b/llvm/lib/Target/NVPTX/NVPTXTargetTransformInfo.h @@ -98,8 +98,7 @@ class NVPTXTTIImpl : public BasicTTIImplBase { unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Op1Info = {TTI::OK_AnyValue, TTI::OP_None}, TTI::OperandValueInfo Op2Info = {TTI::OK_AnyValue, TTI::OP_None}, - ArrayRef Args = std::nullopt, - const Instruction *CxtI = nullptr); + ArrayRef Args = {}, const Instruction *CxtI = nullptr); void getUnrollingPreferences(Loop *L, ScalarEvolution &SE, TTI::UnrollingPreferences &UP, diff --git a/llvm/lib/Target/PowerPC/PPCTargetTransformInfo.h b/llvm/lib/Target/PowerPC/PPCTargetTransformInfo.h index 5be093f81d3f6..126ccb2b3096e 100644 --- a/llvm/lib/Target/PowerPC/PPCTargetTransformInfo.h +++ b/llvm/lib/Target/PowerPC/PPCTargetTransformInfo.h @@ -106,13 +106,11 @@ class PPCTTIImpl : public BasicTTIImplBase { unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Op1Info = {TTI::OK_AnyValue, TTI::OP_None}, TTI::OperandValueInfo Op2Info = {TTI::OK_AnyValue, TTI::OP_None}, - ArrayRef Args = std::nullopt, - const Instruction *CxtI = nullptr); + ArrayRef Args = {}, const Instruction *CxtI = nullptr); InstructionCost getShuffleCost(TTI::ShuffleKind Kind, Type *Tp, ArrayRef Mask, TTI::TargetCostKind CostKind, int Index, - Type *SubTp, - ArrayRef Args = std::nullopt, + Type *SubTp, ArrayRef Args = {}, const Instruction *CxtI = nullptr); InstructionCost getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src, TTI::CastContextHint CCH, diff --git a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp index 5d280b44630ae..c87e59a0dd001 100644 --- a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp +++ b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp @@ -1943,8 +1943,7 @@ InstructionCost RISCVTTIImpl::getPointersChainCost( continue; Cost += getArithmeticInstrCost(Instruction::Add, GEP->getType(), CostKind, {TTI::OK_AnyValue, TTI::OP_None}, - {TTI::OK_AnyValue, TTI::OP_None}, - std::nullopt); + {TTI::OK_AnyValue, TTI::OP_None}, {}); } else { SmallVector Indices(GEP->indices()); Cost += getGEPCost(GEP->getSourceElementType(), GEP->getPointerOperand(), diff --git a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.h b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.h index 763b89bfec0a6..308fbc55b2d59 100644 --- a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.h +++ b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.h @@ -146,7 +146,7 @@ class RISCVTTIImpl : public BasicTTIImplBase { ArrayRef Mask, TTI::TargetCostKind CostKind, int Index, VectorType *SubTp, - ArrayRef Args = std::nullopt, + ArrayRef Args = {}, const Instruction *CxtI = nullptr); InstructionCost getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA, @@ -210,8 +210,7 @@ class RISCVTTIImpl : public BasicTTIImplBase { unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Op1Info = {TTI::OK_AnyValue, TTI::OP_None}, TTI::OperandValueInfo Op2Info = {TTI::OK_AnyValue, TTI::OP_None}, - ArrayRef Args = std::nullopt, - const Instruction *CxtI = nullptr); + ArrayRef Args = {}, const Instruction *CxtI = nullptr); bool isElementTypeLegalForScalableVector(Type *Ty) const { return TLI->isLegalElementTypeForRVV(TLI->getValueType(DL, Ty)); diff --git a/llvm/lib/Target/SystemZ/SystemZTargetTransformInfo.h b/llvm/lib/Target/SystemZ/SystemZTargetTransformInfo.h index 47db8f132337f..9294fada1eb77 100644 --- a/llvm/lib/Target/SystemZ/SystemZTargetTransformInfo.h +++ b/llvm/lib/Target/SystemZ/SystemZTargetTransformInfo.h @@ -89,13 +89,12 @@ class SystemZTTIImpl : public BasicTTIImplBase { unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Op1Info = {TTI::OK_AnyValue, TTI::OP_None}, TTI::OperandValueInfo Op2Info = {TTI::OK_AnyValue, TTI::OP_None}, - ArrayRef Args = std::nullopt, - const Instruction *CxtI = nullptr); + ArrayRef Args = {}, const Instruction *CxtI = nullptr); InstructionCost getShuffleCost(TTI::ShuffleKind Kind, VectorType *Tp, ArrayRef Mask, TTI::TargetCostKind CostKind, int Index, VectorType *SubTp, - ArrayRef Args = std::nullopt, + ArrayRef Args = {}, const Instruction *CxtI = nullptr); unsigned getVectorTruncCost(Type *SrcTy, Type *DstTy); unsigned getVectorBitmaskConversionCost(Type *SrcTy, Type *DstTy); diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyLowerEmscriptenEHSjLj.cpp b/llvm/lib/Target/WebAssembly/WebAssemblyLowerEmscriptenEHSjLj.cpp index 17bec8e2a6a45..c040e560be605 100644 --- a/llvm/lib/Target/WebAssembly/WebAssemblyLowerEmscriptenEHSjLj.cpp +++ b/llvm/lib/Target/WebAssembly/WebAssemblyLowerEmscriptenEHSjLj.cpp @@ -767,7 +767,7 @@ void WebAssemblyLowerEmscriptenEHSjLj::wrapTestSetjmp( // Output parameter assignment Label = LabelPHI; EndBB = EndBB1; - LongjmpResult = IRB.CreateCall(GetTempRet0F, std::nullopt, "longjmp_result"); + LongjmpResult = IRB.CreateCall(GetTempRet0F, {}, "longjmp_result"); } void WebAssemblyLowerEmscriptenEHSjLj::rebuildSSA(Function &F) { @@ -1222,7 +1222,7 @@ bool WebAssemblyLowerEmscriptenEHSjLj::runEHOnFunction(Function &F) { CallInst *FMCI = IRB.CreateCall(FMCF, FMCArgs, "fmc"); Value *Poison = PoisonValue::get(LPI->getType()); Value *Pair0 = IRB.CreateInsertValue(Poison, FMCI, 0, "pair0"); - Value *TempRet0 = IRB.CreateCall(GetTempRet0F, std::nullopt, "tempret0"); + Value *TempRet0 = IRB.CreateCall(GetTempRet0F, {}, "tempret0"); Value *Pair1 = IRB.CreateInsertValue(Pair0, TempRet0, 1, "pair1"); LPI->replaceAllUsesWith(Pair1); diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyTargetTransformInfo.h b/llvm/lib/Target/WebAssembly/WebAssemblyTargetTransformInfo.h index 269922cc3ea84..ac3a333991684 100644 --- a/llvm/lib/Target/WebAssembly/WebAssemblyTargetTransformInfo.h +++ b/llvm/lib/Target/WebAssembly/WebAssemblyTargetTransformInfo.h @@ -63,8 +63,7 @@ class WebAssemblyTTIImpl final : public BasicTTIImplBase { unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Op1Info = {TTI::OK_AnyValue, TTI::OP_None}, TTI::OperandValueInfo Op2Info = {TTI::OK_AnyValue, TTI::OP_None}, - ArrayRef Args = std::nullopt, - const Instruction *CxtI = nullptr); + ArrayRef Args = {}, const Instruction *CxtI = nullptr); using BaseT::getVectorInstrCost; InstructionCost getVectorInstrCost(unsigned Opcode, Type *Val, TTI::TargetCostKind CostKind, diff --git a/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp b/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp index e8a9da1f17d96..564bfd63f3c27 100644 --- a/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp +++ b/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp @@ -453,8 +453,8 @@ namespace { // Create zero. SDVTList VTs = CurDAG->getVTList(MVT::i32, MVT::i32); - SDValue Zero = SDValue( - CurDAG->getMachineNode(X86::MOV32r0, dl, VTs, std::nullopt), 0); + SDValue Zero = + SDValue(CurDAG->getMachineNode(X86::MOV32r0, dl, VTs, {}), 0); if (VT == MVT::i64) { Zero = SDValue( CurDAG->getMachineNode( @@ -5826,8 +5826,8 @@ void X86DAGToDAGISel::Select(SDNode *Node) { } else { // Zero out the high part, effectively zero extending the input. SDVTList VTs = CurDAG->getVTList(MVT::i32, MVT::i32); - SDValue ClrNode = SDValue( - CurDAG->getMachineNode(X86::MOV32r0, dl, VTs, std::nullopt), 0); + SDValue ClrNode = + SDValue(CurDAG->getMachineNode(X86::MOV32r0, dl, VTs, {}), 0); switch (NVT.SimpleTy) { case MVT::i16: ClrNode = diff --git a/llvm/lib/Target/X86/X86ISelLoweringCall.cpp b/llvm/lib/Target/X86/X86ISelLoweringCall.cpp index ab1eeb4111ccd..43dcaefc623be 100644 --- a/llvm/lib/Target/X86/X86ISelLoweringCall.cpp +++ b/llvm/lib/Target/X86/X86ISelLoweringCall.cpp @@ -1435,14 +1435,14 @@ static ArrayRef get64BitArgumentXMMs(MachineFunction &MF, // in their paired GPR. So we only need to save the GPR to their home // slots. // TODO: __vectorcall will change this. - return std::nullopt; + return {}; } bool isSoftFloat = Subtarget.useSoftFloat(); if (isSoftFloat || !Subtarget.hasSSE1()) // Kernel mode asks for SSE to be disabled, so there are no XMM argument // registers. - return std::nullopt; + return {}; static const MCPhysReg XMMArgRegs64Bit[] = { X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3, diff --git a/llvm/lib/Target/X86/X86LowerAMXType.cpp b/llvm/lib/Target/X86/X86LowerAMXType.cpp index 9921e14ed95e1..919e1eb3e38e9 100644 --- a/llvm/lib/Target/X86/X86LowerAMXType.cpp +++ b/llvm/lib/Target/X86/X86LowerAMXType.cpp @@ -254,8 +254,8 @@ void X86LowerAMXType::combineLoadBitcast(LoadInst *LD, BitCastInst *Bitcast) { Value *I8Ptr = LD->getOperand(0); std::array Args = {Row, Col, I8Ptr, Stride}; - Value *NewInst = Builder.CreateIntrinsic(Intrinsic::x86_tileloadd64_internal, - std::nullopt, Args); + Value *NewInst = + Builder.CreateIntrinsic(Intrinsic::x86_tileloadd64_internal, {}, Args); Bitcast->replaceAllUsesWith(NewInst); } @@ -280,8 +280,7 @@ void X86LowerAMXType::combineBitcastStore(BitCastInst *Bitcast, StoreInst *ST) { Value *Stride = Builder.getInt64(64); Value *I8Ptr = ST->getOperand(1); std::array Args = {Row, Col, I8Ptr, Stride, Tile}; - Builder.CreateIntrinsic(Intrinsic::x86_tilestored64_internal, std::nullopt, - Args); + Builder.CreateIntrinsic(Intrinsic::x86_tilestored64_internal, {}, Args); if (Bitcast->hasOneUse()) return; // %13 = bitcast x86_amx %src to <256 x i32> @@ -330,8 +329,8 @@ bool X86LowerAMXType::transformBitcast(BitCastInst *Bitcast) { Value *Row = nullptr, *Col = nullptr; std::tie(Row, Col) = getShape(II, OpNo); std::array Args = {Row, Col, I8Ptr, Stride}; - Value *NewInst = Builder.CreateIntrinsic( - Intrinsic::x86_tileloadd64_internal, std::nullopt, Args); + Value *NewInst = + Builder.CreateIntrinsic(Intrinsic::x86_tileloadd64_internal, {}, Args); Bitcast->replaceAllUsesWith(NewInst); } else { // %2 = bitcast x86_amx %src to <256 x i32> @@ -348,8 +347,7 @@ bool X86LowerAMXType::transformBitcast(BitCastInst *Bitcast) { Value *Row = II->getOperand(0); Value *Col = II->getOperand(1); std::array Args = {Row, Col, I8Ptr, Stride, Src}; - Builder.CreateIntrinsic(Intrinsic::x86_tilestored64_internal, std::nullopt, - Args); + Builder.CreateIntrinsic(Intrinsic::x86_tilestored64_internal, {}, Args); Value *NewInst = Builder.CreateLoad(Bitcast->getType(), AllocaAddr); Bitcast->replaceAllUsesWith(NewInst); } @@ -480,8 +478,8 @@ static Instruction *createTileStore(Instruction *TileDef, Value *Ptr) { Value *Stride = Builder.getInt64(64); std::array Args = {Row, Col, Ptr, Stride, TileDef}; - Instruction *TileStore = Builder.CreateIntrinsic( - Intrinsic::x86_tilestored64_internal, std::nullopt, Args); + Instruction *TileStore = + Builder.CreateIntrinsic(Intrinsic::x86_tilestored64_internal, {}, Args); return TileStore; } @@ -505,8 +503,8 @@ static void replaceWithTileLoad(Use &U, Value *Ptr, bool IsPHI = false) { Value *Stride = Builder.getInt64(64); std::array Args = {Row, Col, Ptr, Stride}; - Value *TileLoad = Builder.CreateIntrinsic(Intrinsic::x86_tileloadd64_internal, - std::nullopt, Args); + Value *TileLoad = + Builder.CreateIntrinsic(Intrinsic::x86_tileloadd64_internal, {}, Args); UserI->replaceUsesOfWith(V, TileLoad); } @@ -799,7 +797,7 @@ bool X86LowerAMXCast::optimizeAMXCastFromPhi( auto *Block = OldPN->getIncomingBlock(I); BasicBlock::iterator Iter = Block->getTerminator()->getIterator(); Instruction *NewInst = Builder.CreateIntrinsic( - Intrinsic::x86_tilezero_internal, std::nullopt, {Row, Col}); + Intrinsic::x86_tilezero_internal, {}, {Row, Col}); NewInst->moveBefore(&*Iter); NewInst = Builder.CreateIntrinsic(Intrinsic::x86_cast_tile_to_vector, {IncValue->getType()}, {NewInst}); @@ -942,8 +940,7 @@ bool X86LowerAMXCast::combineCastStore(IntrinsicInst *Cast, StoreInst *ST) { Value *Stride = Builder.CreateSExt(Col, Builder.getInt64Ty()); Value *I8Ptr = Builder.CreateBitCast(ST->getOperand(1), Builder.getPtrTy()); std::array Args = {Row, Col, I8Ptr, Stride, Tile}; - Builder.CreateIntrinsic(Intrinsic::x86_tilestored64_internal, std::nullopt, - Args); + Builder.CreateIntrinsic(Intrinsic::x86_tilestored64_internal, {}, Args); return true; } @@ -987,8 +984,8 @@ bool X86LowerAMXCast::combineLoadCast(IntrinsicInst *Cast, LoadInst *LD) { } std::array Args = {Row, Col, I8Ptr, Stride}; - Value *NewInst = Builder.CreateIntrinsic(Intrinsic::x86_tileloadd64_internal, - std::nullopt, Args); + Value *NewInst = + Builder.CreateIntrinsic(Intrinsic::x86_tileloadd64_internal, {}, Args); Cast->replaceAllUsesWith(NewInst); return EraseLoad; @@ -1175,8 +1172,8 @@ bool X86LowerAMXCast::transformAMXCast(IntrinsicInst *AMXCast) { std::tie(Row, Col) = getShape(II, OpNo); std::array Args = { Row, Col, I8Ptr, Builder.CreateSExt(Col, Builder.getInt64Ty())}; - Value *NewInst = Builder.CreateIntrinsic( - Intrinsic::x86_tileloadd64_internal, std::nullopt, Args); + Value *NewInst = + Builder.CreateIntrinsic(Intrinsic::x86_tileloadd64_internal, {}, Args); AMXCast->replaceAllUsesWith(NewInst); AMXCast->eraseFromParent(); } else { @@ -1195,8 +1192,7 @@ bool X86LowerAMXCast::transformAMXCast(IntrinsicInst *AMXCast) { Value *Col = II->getOperand(1); std::array Args = { Row, Col, I8Ptr, Builder.CreateSExt(Col, Builder.getInt64Ty()), Src}; - Builder.CreateIntrinsic(Intrinsic::x86_tilestored64_internal, std::nullopt, - Args); + Builder.CreateIntrinsic(Intrinsic::x86_tilestored64_internal, {}, Args); Value *NewInst = Builder.CreateLoad(AMXCast->getType(), AllocaAddr); AMXCast->replaceAllUsesWith(NewInst); AMXCast->eraseFromParent(); diff --git a/llvm/lib/Target/X86/X86TargetTransformInfo.cpp b/llvm/lib/Target/X86/X86TargetTransformInfo.cpp index cb9ee64a677a7..0fa138cefc3b8 100644 --- a/llvm/lib/Target/X86/X86TargetTransformInfo.cpp +++ b/llvm/lib/Target/X86/X86TargetTransformInfo.cpp @@ -1594,9 +1594,8 @@ InstructionCost X86TTIImpl::getShuffleCost( auto *SubTy = FixedVectorType::get(BaseTp->getElementType(), SubLT.second.getVectorNumElements()); int ExtractIndex = alignDown((Index % NumElts), NumSubElts); - InstructionCost ExtractCost = - getShuffleCost(TTI::SK_ExtractSubvector, VecTy, std::nullopt, - CostKind, ExtractIndex, SubTy); + InstructionCost ExtractCost = getShuffleCost( + TTI::SK_ExtractSubvector, VecTy, {}, CostKind, ExtractIndex, SubTy); // If the original size is 32-bits or more, we can use pshufd. Otherwise // if we have SSSE3 we can use pshufb. @@ -1753,7 +1752,7 @@ InstructionCost X86TTIImpl::getShuffleCost( InstructionCost NumOfShuffles = (NumOfSrcs - 1) * NumOfDests; return NumOfShuffles * getShuffleCost(TTI::SK_PermuteTwoSrc, SingleOpTy, - std::nullopt, CostKind, 0, nullptr); + {}, CostKind, 0, nullptr); } return BaseT::getShuffleCost(Kind, BaseTp, Mask, CostKind, Index, SubTp); @@ -4742,8 +4741,8 @@ InstructionCost X86TTIImpl::getVectorInstrCost(unsigned Opcode, Type *Val, EVT VT = TLI->getValueType(DL, Val); if (VT.getScalarType() != MScalarTy || VT.getSizeInBits() >= 128) SubTy = FixedVectorType::get(ScalarType, SubNumElts); - ShuffleCost = getShuffleCost(TTI::SK_PermuteTwoSrc, SubTy, std::nullopt, - CostKind, 0, SubTy); + ShuffleCost = + getShuffleCost(TTI::SK_PermuteTwoSrc, SubTy, {}, CostKind, 0, SubTy); } int IntOrFpCost = ScalarType->isFloatingPointTy() ? 0 : 1; return ShuffleCost + IntOrFpCost + RegisterFileMoveCost; @@ -4821,8 +4820,8 @@ X86TTIImpl::getScalarizationOverhead(VectorType *Ty, const APInt &DemandedElts, // FIXME: we don't need to extract if all non-demanded elements // are legalization-inserted padding. if (!LaneEltMask.isAllOnes()) - Cost += getShuffleCost(TTI::SK_ExtractSubvector, Ty, std::nullopt, - CostKind, I * NumEltsPerLane, LaneTy); + Cost += getShuffleCost(TTI::SK_ExtractSubvector, Ty, {}, CostKind, + I * NumEltsPerLane, LaneTy); Cost += BaseT::getScalarizationOverhead(LaneTy, LaneEltMask, Insert, /*Extract*/ false, CostKind); } @@ -4839,8 +4838,8 @@ X86TTIImpl::getScalarizationOverhead(VectorType *Ty, const APInt &DemandedElts, if (!AffectedLanes[I] || (Lane == 0 && FullyAffectedLegalVectors[LegalVec])) continue; - Cost += getShuffleCost(TTI::SK_InsertSubvector, Ty, std::nullopt, - CostKind, I * NumEltsPerLane, LaneTy); + Cost += getShuffleCost(TTI::SK_InsertSubvector, Ty, {}, CostKind, + I * NumEltsPerLane, LaneTy); } } } @@ -4899,8 +4898,8 @@ X86TTIImpl::getScalarizationOverhead(VectorType *Ty, const APInt &DemandedElts, NumEltsPerLane, I * NumEltsPerLane); if (LaneEltMask.isZero()) continue; - Cost += getShuffleCost(TTI::SK_ExtractSubvector, Ty, std::nullopt, - CostKind, I * NumEltsPerLane, LaneTy); + Cost += getShuffleCost(TTI::SK_ExtractSubvector, Ty, {}, CostKind, + I * NumEltsPerLane, LaneTy); Cost += BaseT::getScalarizationOverhead( LaneTy, LaneEltMask, /*Insert*/ false, Extract, CostKind); } @@ -5018,7 +5017,7 @@ X86TTIImpl::getReplicationShuffleCost(Type *EltTy, int ReplicationFactor, unsigned NumDstVectorsDemanded = DemandedDstVectors.popcount(); InstructionCost SingleShuffleCost = getShuffleCost( - TTI::SK_PermuteSingleSrc, SingleDstVecTy, /*Mask=*/std::nullopt, CostKind, + TTI::SK_PermuteSingleSrc, SingleDstVecTy, /*Mask=*/{}, CostKind, /*Index=*/0, /*SubTp=*/nullptr); return NumDstVectorsDemanded * SingleShuffleCost; } @@ -5146,8 +5145,7 @@ InstructionCost X86TTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src, if (!Is0thSubVec) Cost += getShuffleCost(IsLoad ? TTI::ShuffleKind::SK_InsertSubvector : TTI::ShuffleKind::SK_ExtractSubvector, - VTy, std::nullopt, CostKind, NumEltDone(), - CurrVecTy); + VTy, {}, CostKind, NumEltDone(), CurrVecTy); } // While we can directly load/store ZMM, YMM, and 64-bit halves of XMM, @@ -5235,17 +5233,17 @@ X86TTIImpl::getMaskedMemoryOpCost(unsigned Opcode, Type *SrcTy, Align Alignment, if (VT.isSimple() && Ty != VT.getSimpleVT() && LT.second.getVectorNumElements() == NumElem) // Promotion requires extend/truncate for data and a shuffle for mask. - Cost += getShuffleCost(TTI::SK_PermuteTwoSrc, SrcVTy, std::nullopt, - CostKind, 0, nullptr) + - getShuffleCost(TTI::SK_PermuteTwoSrc, MaskTy, std::nullopt, - CostKind, 0, nullptr); + Cost += + getShuffleCost(TTI::SK_PermuteTwoSrc, SrcVTy, {}, CostKind, 0, + nullptr) + + getShuffleCost(TTI::SK_PermuteTwoSrc, MaskTy, {}, CostKind, 0, nullptr); else if (LT.first * Ty.getVectorNumElements() > NumElem) { auto *NewMaskTy = FixedVectorType::get(MaskTy->getElementType(), Ty.getVectorNumElements()); // Expanding requires fill mask with zeroes - Cost += getShuffleCost(TTI::SK_InsertSubvector, NewMaskTy, std::nullopt, - CostKind, 0, MaskTy); + Cost += getShuffleCost(TTI::SK_InsertSubvector, NewMaskTy, {}, CostKind, 0, + MaskTy); } // Pre-AVX512 - each maskmov load costs 2 + store costs ~8. @@ -5510,9 +5508,8 @@ X86TTIImpl::getArithmeticReductionCost(unsigned Opcode, VectorType *ValTy, // If we're reducing from 256/512 bits, use an extract_subvector. if (Size > 128) { auto *SubTy = FixedVectorType::get(ValVTy->getElementType(), NumVecElts); - ReductionCost += - getShuffleCost(TTI::SK_ExtractSubvector, Ty, std::nullopt, CostKind, - NumVecElts, SubTy); + ReductionCost += getShuffleCost(TTI::SK_ExtractSubvector, Ty, {}, + CostKind, NumVecElts, SubTy); Ty = SubTy; } else if (Size == 128) { // Reducing from 128 bits is a permute of v2f64/v2i64. @@ -5523,8 +5520,8 @@ X86TTIImpl::getArithmeticReductionCost(unsigned Opcode, VectorType *ValTy, else ShufTy = FixedVectorType::get(Type::getInt64Ty(ValVTy->getContext()), 2); - ReductionCost += getShuffleCost(TTI::SK_PermuteSingleSrc, ShufTy, - std::nullopt, CostKind, 0, nullptr); + ReductionCost += getShuffleCost(TTI::SK_PermuteSingleSrc, ShufTy, {}, + CostKind, 0, nullptr); } else if (Size == 64) { // Reducing from 64 bits is a shuffle of v4f32/v4i32. FixedVectorType *ShufTy; @@ -5534,8 +5531,8 @@ X86TTIImpl::getArithmeticReductionCost(unsigned Opcode, VectorType *ValTy, else ShufTy = FixedVectorType::get(Type::getInt32Ty(ValVTy->getContext()), 4); - ReductionCost += getShuffleCost(TTI::SK_PermuteSingleSrc, ShufTy, - std::nullopt, CostKind, 0, nullptr); + ReductionCost += getShuffleCost(TTI::SK_PermuteSingleSrc, ShufTy, {}, + CostKind, 0, nullptr); } else { // Reducing from smaller size is a shift by immediate. auto *ShiftTy = FixedVectorType::get( @@ -5693,8 +5690,8 @@ X86TTIImpl::getMinMaxReductionCost(Intrinsic::ID IID, VectorType *ValTy, // If we're reducing from 256/512 bits, use an extract_subvector. if (Size > 128) { auto *SubTy = FixedVectorType::get(ValVTy->getElementType(), NumVecElts); - MinMaxCost += getShuffleCost(TTI::SK_ExtractSubvector, Ty, std::nullopt, - CostKind, NumVecElts, SubTy); + MinMaxCost += getShuffleCost(TTI::SK_ExtractSubvector, Ty, {}, CostKind, + NumVecElts, SubTy); Ty = SubTy; } else if (Size == 128) { // Reducing from 128 bits is a permute of v2f64/v2i64. @@ -5704,8 +5701,8 @@ X86TTIImpl::getMinMaxReductionCost(Intrinsic::ID IID, VectorType *ValTy, FixedVectorType::get(Type::getDoubleTy(ValTy->getContext()), 2); else ShufTy = FixedVectorType::get(Type::getInt64Ty(ValTy->getContext()), 2); - MinMaxCost += getShuffleCost(TTI::SK_PermuteSingleSrc, ShufTy, - std::nullopt, CostKind, 0, nullptr); + MinMaxCost += getShuffleCost(TTI::SK_PermuteSingleSrc, ShufTy, {}, + CostKind, 0, nullptr); } else if (Size == 64) { // Reducing from 64 bits is a shuffle of v4f32/v4i32. FixedVectorType *ShufTy; @@ -5713,8 +5710,8 @@ X86TTIImpl::getMinMaxReductionCost(Intrinsic::ID IID, VectorType *ValTy, ShufTy = FixedVectorType::get(Type::getFloatTy(ValTy->getContext()), 4); else ShufTy = FixedVectorType::get(Type::getInt32Ty(ValTy->getContext()), 4); - MinMaxCost += getShuffleCost(TTI::SK_PermuteSingleSrc, ShufTy, - std::nullopt, CostKind, 0, nullptr); + MinMaxCost += getShuffleCost(TTI::SK_PermuteSingleSrc, ShufTy, {}, + CostKind, 0, nullptr); } else { // Reducing from smaller size is a shift by immediate. auto *ShiftTy = FixedVectorType::get( @@ -6470,8 +6467,8 @@ InstructionCost X86TTIImpl::getInterleavedMemoryOpCostAVX512( TTI::ShuffleKind ShuffleKind = (NumOfMemOps > 1) ? TTI::SK_PermuteTwoSrc : TTI::SK_PermuteSingleSrc; - InstructionCost ShuffleCost = getShuffleCost( - ShuffleKind, SingleMemOpTy, std::nullopt, CostKind, 0, nullptr); + InstructionCost ShuffleCost = + getShuffleCost(ShuffleKind, SingleMemOpTy, {}, CostKind, 0, nullptr); unsigned NumOfLoadsInInterleaveGrp = Indices.size() ? Indices.size() : Factor; @@ -6528,7 +6525,7 @@ InstructionCost X86TTIImpl::getInterleavedMemoryOpCostAVX512( // shuffle. unsigned NumOfSources = Factor; // The number of values to be merged. InstructionCost ShuffleCost = getShuffleCost( - TTI::SK_PermuteTwoSrc, SingleMemOpTy, std::nullopt, CostKind, 0, nullptr); + TTI::SK_PermuteTwoSrc, SingleMemOpTy, {}, CostKind, 0, nullptr); unsigned NumOfShufflesPerStore = NumOfSources - 1; // The SK_MergeTwoSrc shuffle clobbers one of src operands. diff --git a/llvm/lib/Target/X86/X86TargetTransformInfo.h b/llvm/lib/Target/X86/X86TargetTransformInfo.h index b619090e8e1e0..8ea67dcbe5166 100644 --- a/llvm/lib/Target/X86/X86TargetTransformInfo.h +++ b/llvm/lib/Target/X86/X86TargetTransformInfo.h @@ -140,8 +140,7 @@ class X86TTIImpl : public BasicTTIImplBase { unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Op1Info = {TTI::OK_AnyValue, TTI::OP_None}, TTI::OperandValueInfo Op2Info = {TTI::OK_AnyValue, TTI::OP_None}, - ArrayRef Args = std::nullopt, - const Instruction *CxtI = nullptr); + ArrayRef Args = {}, const Instruction *CxtI = nullptr); InstructionCost getAltInstrCost(VectorType *VecTy, unsigned Opcode0, unsigned Opcode1, const SmallBitVector &OpcodeMask, @@ -151,7 +150,7 @@ class X86TTIImpl : public BasicTTIImplBase { ArrayRef Mask, TTI::TargetCostKind CostKind, int Index, VectorType *SubTp, - ArrayRef Args = std::nullopt, + ArrayRef Args = {}, const Instruction *CxtI = nullptr); InstructionCost getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src, TTI::CastContextHint CCH, diff --git a/llvm/lib/Transforms/Coroutines/CoroAnnotationElide.cpp b/llvm/lib/Transforms/Coroutines/CoroAnnotationElide.cpp index c3f258eb083da..ed036f254a1cf 100644 --- a/llvm/lib/Transforms/Coroutines/CoroAnnotationElide.cpp +++ b/llvm/lib/Transforms/Coroutines/CoroAnnotationElide.cpp @@ -78,7 +78,7 @@ static void processCall(CallBase *CB, Function *Caller, Function *NewCallee, } else if (auto *II = dyn_cast(CB)) { NewCB = InvokeInst::Create(NewCallee->getFunctionType(), NewCallee, II->getNormalDest(), II->getUnwindDest(), - NewArgs, std::nullopt, "", NewCBInsertPt); + NewArgs, {}, "", NewCBInsertPt); } else { llvm_unreachable("CallBase should either be Call or Invoke!"); } diff --git a/llvm/lib/Transforms/IPO/IROutliner.cpp b/llvm/lib/Transforms/IPO/IROutliner.cpp index aecae7c59afa4..ec72741c06e73 100644 --- a/llvm/lib/Transforms/IPO/IROutliner.cpp +++ b/llvm/lib/Transforms/IPO/IROutliner.cpp @@ -678,11 +678,9 @@ Function *IROutliner::createFunction(Module &M, OutlinableGroup &Group, Mg.getNameWithPrefix(MangledNameStream, F, false); DISubprogram *OutlinedSP = DB.createFunction( - Unit /* Context */, F->getName(), Dummy, - Unit /* File */, + Unit /* Context */, F->getName(), Dummy, Unit /* File */, 0 /* Line 0 is reserved for compiler-generated code. */, - DB.createSubroutineType( - DB.getOrCreateTypeArray(std::nullopt)), /* void type */ + DB.createSubroutineType(DB.getOrCreateTypeArray({})), /* void type */ 0, /* Line 0 is reserved for compiler-generated code. */ DINode::DIFlags::FlagArtificial /* Compiler-generated code. */, /* Outlined code is optimized code by definition. */ diff --git a/llvm/lib/Transforms/IPO/WholeProgramDevirt.cpp b/llvm/lib/Transforms/IPO/WholeProgramDevirt.cpp index 19bc841b10529..36a1841b36346 100644 --- a/llvm/lib/Transforms/IPO/WholeProgramDevirt.cpp +++ b/llvm/lib/Transforms/IPO/WholeProgramDevirt.cpp @@ -860,8 +860,8 @@ void llvm::updatePublicTypeTestCalls(Module &M, for (Use &U : make_early_inc_range(PublicTypeTestFunc->uses())) { auto *CI = cast(U.getUser()); auto *NewCI = CallInst::Create( - TypeTestFunc, {CI->getArgOperand(0), CI->getArgOperand(1)}, - std::nullopt, "", CI->getIterator()); + TypeTestFunc, {CI->getArgOperand(0), CI->getArgOperand(1)}, {}, "", + CI->getIterator()); CI->replaceAllUsesWith(NewCI); CI->eraseFromParent(); } diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp index 61011d55227e7..f7a9406791801 100644 --- a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp @@ -3067,7 +3067,7 @@ Instruction *InstCombinerImpl::visitCallInst(CallInst &CI) { LHS->getOpcode() == Instruction::Load && LHS->getType()->isPointerTy() && isValidAssumeForContext(II, LHS, &DT)) { - MDNode *MD = MDNode::get(II->getContext(), std::nullopt); + MDNode *MD = MDNode::get(II->getContext(), {}); LHS->setMetadata(LLVMContext::MD_nonnull, MD); LHS->setMetadata(LLVMContext::MD_noundef, MD); return RemoveConditionFromAssume(II); diff --git a/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp b/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp index aa3f3fbdaeffa..eea3a010d72d6 100644 --- a/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp +++ b/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp @@ -3333,8 +3333,8 @@ Instruction *InstCombinerImpl::visitAllocSite(Instruction &MI) { // Replace invoke with a NOP intrinsic to maintain the original CFG Module *M = II->getModule(); Function *F = Intrinsic::getDeclaration(M, Intrinsic::donothing); - InvokeInst::Create(F, II->getNormalDest(), II->getUnwindDest(), - std::nullopt, "", II->getParent()); + InvokeInst::Create(F, II->getNormalDest(), II->getUnwindDest(), {}, "", + II->getParent()); } // Remove debug intrinsics which describe the value contained within the diff --git a/llvm/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp b/llvm/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp index 20fdf28011ca4..3c3cc2599aee2 100644 --- a/llvm/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp +++ b/llvm/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp @@ -1175,7 +1175,7 @@ bool DataFlowSanitizer::initializeModule(Module &M) { PointerType::getUnqual(*Ctx), IntptrTy}; DFSanSetLabelFnTy = FunctionType::get(Type::getVoidTy(*Ctx), DFSanSetLabelArgs, /*isVarArg=*/false); - DFSanNonzeroLabelFnTy = FunctionType::get(Type::getVoidTy(*Ctx), std::nullopt, + DFSanNonzeroLabelFnTy = FunctionType::get(Type::getVoidTy(*Ctx), {}, /*isVarArg=*/false); DFSanVarargWrapperFnTy = FunctionType::get( Type::getVoidTy(*Ctx), PointerType::getUnqual(*Ctx), /*isVarArg=*/false); diff --git a/llvm/lib/Transforms/Instrumentation/InstrProfiling.cpp b/llvm/lib/Transforms/Instrumentation/InstrProfiling.cpp index 014e049ed0d8e..fe882164656df 100644 --- a/llvm/lib/Transforms/Instrumentation/InstrProfiling.cpp +++ b/llvm/lib/Transforms/Instrumentation/InstrProfiling.cpp @@ -1131,7 +1131,7 @@ Value *InstrLowerer::getCounterAddress(InstrProfCntrInstBase *I) { BiasLI = EntryBuilder.CreateLoad(Int64Ty, Bias, "profc_bias"); // Bias doesn't change after startup. BiasLI->setMetadata(LLVMContext::MD_invariant_load, - MDNode::get(M.getContext(), std::nullopt)); + MDNode::get(M.getContext(), {})); } auto *Add = Builder.CreateAdd(Builder.CreatePtrToInt(Addr, Int64Ty), BiasLI); return Builder.CreateIntToPtr(Add, Addr->getType()); @@ -1212,7 +1212,7 @@ Value *InstrLowerer::getBitmapAddress(InstrProfMCDCTVBitmapUpdate *I) { auto *BiasLI = EntryBuilder.CreateLoad(Int64Ty, Bias, "profbm_bias"); // Assume BiasLI invariant (in the function at least) BiasLI->setMetadata(LLVMContext::MD_invariant_load, - MDNode::get(M.getContext(), std::nullopt)); + MDNode::get(M.getContext(), {})); // Add Bias to Bitmaps and put it before the intrinsic. IRBuilder<> Builder(I); diff --git a/llvm/lib/Transforms/ObjCARC/ObjCARCContract.cpp b/llvm/lib/Transforms/ObjCARC/ObjCARCContract.cpp index e0f43e31288a3..2c8b4e76312a0 100644 --- a/llvm/lib/Transforms/ObjCARC/ObjCARCContract.cpp +++ b/llvm/lib/Transforms/ObjCARC/ObjCARCContract.cpp @@ -476,8 +476,8 @@ bool ObjCARCContract::tryToPeepholeInstruction( RVInstMarker->getString(), /*Constraints=*/"", /*hasSideEffects=*/true); - objcarc::createCallInstWithColors(IA, std::nullopt, "", - Inst->getIterator(), BlockColors); + objcarc::createCallInstWithColors(IA, {}, "", Inst->getIterator(), + BlockColors); } decline_rv_optimization: return false; diff --git a/llvm/lib/Transforms/ObjCARC/ObjCARCOpts.cpp b/llvm/lib/Transforms/ObjCARC/ObjCARCOpts.cpp index 72e860d7dcfa6..a2434675a7b5a 100644 --- a/llvm/lib/Transforms/ObjCARC/ObjCARCOpts.cpp +++ b/llvm/lib/Transforms/ObjCARC/ObjCARCOpts.cpp @@ -994,7 +994,7 @@ void ObjCARCOpt::OptimizeIndividualCallImpl(Function &F, Instruction *Inst, CallInst *NewCall = CallInst::Create(Decl, Call->getArgOperand(0), "", Call->getIterator()); NewCall->setMetadata(MDKindCache.get(ARCMDKindID::ImpreciseRelease), - MDNode::get(C, std::nullopt)); + MDNode::get(C, {})); LLVM_DEBUG(dbgs() << "Replacing autorelease{,RV}(x) with objc_release(x) " "since x is otherwise unused.\nOld: " diff --git a/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp b/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp index a37f295abbd31..a304f7b056f5f 100644 --- a/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp +++ b/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp @@ -505,8 +505,8 @@ static void shortenAssignment(Instruction *Inst, Value *OriginalDest, // Failed to create a fragment expression for this so discard the value, // making this a kill location. auto *Expr = *DIExpression::createFragmentExpression( - DIExpression::get(Assign->getContext(), std::nullopt), - DeadFragment.OffsetInBits, DeadFragment.SizeInBits); + DIExpression::get(Assign->getContext(), {}), DeadFragment.OffsetInBits, + DeadFragment.SizeInBits); Assign->setExpression(Expr); Assign->setKillLocation(); }; diff --git a/llvm/lib/Transforms/Scalar/LowerMatrixIntrinsics.cpp b/llvm/lib/Transforms/Scalar/LowerMatrixIntrinsics.cpp index d0a83a8242ddc..8af85d27adcf6 100644 --- a/llvm/lib/Transforms/Scalar/LowerMatrixIntrinsics.cpp +++ b/llvm/lib/Transforms/Scalar/LowerMatrixIntrinsics.cpp @@ -1380,7 +1380,7 @@ class LowerMatrixIntrinsics { for (unsigned I = 1; I < N; ++I) EmbedCost += TTI.getShuffleCost(TTI::SK_Splice, FixedVectorType::get(EltTy, 1), - std::nullopt, TTI::TCK_RecipThroughput); + {}, TTI::TCK_RecipThroughput); return EmbedCost; } @@ -1402,7 +1402,7 @@ class LowerMatrixIntrinsics { for (unsigned I = 1; I < N; ++I) EmbedCost -= TTI.getShuffleCost(TTI::SK_Splice, FixedVectorType::get(EltTy, 1), - std::nullopt, TTI::TCK_RecipThroughput); + {}, TTI::TCK_RecipThroughput); return EmbedCost; } diff --git a/llvm/lib/Transforms/Scalar/RewriteStatepointsForGC.cpp b/llvm/lib/Transforms/Scalar/RewriteStatepointsForGC.cpp index ef6bbd37295ca..e3c12c971b9ab 100644 --- a/llvm/lib/Transforms/Scalar/RewriteStatepointsForGC.cpp +++ b/llvm/lib/Transforms/Scalar/RewriteStatepointsForGC.cpp @@ -236,7 +236,7 @@ static ArrayRef GetDeoptBundleOperands(const CallBase *Call) { if (!DeoptBundle) { assert(AllowStatepointWithNoDeoptInfo && "Found non-leaf call without deopt info!"); - return std::nullopt; + return {}; } return DeoptBundle->Inputs; diff --git a/llvm/lib/Transforms/Scalar/SROA.cpp b/llvm/lib/Transforms/Scalar/SROA.cpp index d0186da1bc5e2..92589ab17da31 100644 --- a/llvm/lib/Transforms/Scalar/SROA.cpp +++ b/llvm/lib/Transforms/Scalar/SROA.cpp @@ -429,7 +429,7 @@ static void migrateDebugInfo(AllocaInst *OldAlloca, bool IsSplit, // discard the value component of this dbg.assign as the value cannot // be computed with the new fragment. Expr = *DIExpression::createFragmentExpression( - DIExpression::get(Expr->getContext(), std::nullopt), + DIExpression::get(Expr->getContext(), {}), NewFragment.OffsetInBits, NewFragment.SizeInBits); SetKillLocation = true; } @@ -445,8 +445,7 @@ static void migrateDebugInfo(AllocaInst *OldAlloca, bool IsSplit, ::Value *NewValue = Value ? Value : DbgAssign->getValue(); auto *NewAssign = UnwrapDbgInstPtr( DIB.insertDbgAssign(Inst, NewValue, DbgAssign->getVariable(), Expr, - Dest, - DIExpression::get(Expr->getContext(), std::nullopt), + Dest, DIExpression::get(Expr->getContext(), {}), DbgAssign->getDebugLoc()), DbgAssign); diff --git a/llvm/lib/Transforms/Utils/CodeExtractor.cpp b/llvm/lib/Transforms/Utils/CodeExtractor.cpp index 895b588a9e5ac..e4291152cd83f 100644 --- a/llvm/lib/Transforms/Utils/CodeExtractor.cpp +++ b/llvm/lib/Transforms/Utils/CodeExtractor.cpp @@ -1553,8 +1553,7 @@ static void fixupDebugInfoPostExtraction(Function &OldFunc, Function &NewFunc, assert(OldSP->getUnit() && "Missing compile unit for subprogram"); DIBuilder DIB(*OldFunc.getParent(), /*AllowUnresolved=*/false, OldSP->getUnit()); - auto SPType = - DIB.createSubroutineType(DIB.getOrCreateTypeArray(std::nullopt)); + auto SPType = DIB.createSubroutineType(DIB.getOrCreateTypeArray({})); DISubprogram::DISPFlags SPFlags = DISubprogram::SPFlagDefinition | DISubprogram::SPFlagOptimized | DISubprogram::SPFlagLocalToUnit; diff --git a/llvm/lib/Transforms/Utils/Debugify.cpp b/llvm/lib/Transforms/Utils/Debugify.cpp index fcc82eadac36c..e5e2aa6556930 100644 --- a/llvm/lib/Transforms/Utils/Debugify.cpp +++ b/llvm/lib/Transforms/Utils/Debugify.cpp @@ -115,8 +115,7 @@ bool llvm::applyDebugifyMetadata( continue; bool InsertedDbgVal = false; - auto SPType = - DIB.createSubroutineType(DIB.getOrCreateTypeArray(std::nullopt)); + auto SPType = DIB.createSubroutineType(DIB.getOrCreateTypeArray({})); DISubprogram::DISPFlags SPFlags = DISubprogram::SPFlagDefinition | DISubprogram::SPFlagOptimized; if (F.hasPrivateLinkage() || F.hasInternalLinkage()) diff --git a/llvm/lib/Transforms/Utils/InlineFunction.cpp b/llvm/lib/Transforms/Utils/InlineFunction.cpp index 2e05fa80464b8..52da65ce01b82 100644 --- a/llvm/lib/Transforms/Utils/InlineFunction.cpp +++ b/llvm/lib/Transforms/Utils/InlineFunction.cpp @@ -1042,7 +1042,7 @@ void ScopedAliasMetadataDeepCloner::clone() { SmallVector DummyNodes; for (const MDNode *I : MD) { - DummyNodes.push_back(MDTuple::getTemporary(I->getContext(), std::nullopt)); + DummyNodes.push_back(MDTuple::getTemporary(I->getContext(), {})); MDMap[I].reset(DummyNodes.back().get()); } diff --git a/llvm/lib/Transforms/Utils/Local.cpp b/llvm/lib/Transforms/Utils/Local.cpp index c85c819263e2a..725b512fb86e7 100644 --- a/llvm/lib/Transforms/Utils/Local.cpp +++ b/llvm/lib/Transforms/Utils/Local.cpp @@ -3638,7 +3638,7 @@ void llvm::copyRangeMetadata(const DataLayout &DL, const LoadInst &OldLI, unsigned BitWidth = DL.getPointerTypeSizeInBits(NewTy); if (BitWidth == OldLI.getType()->getScalarSizeInBits() && !getConstantRangeFromMetadata(*N).contains(APInt(BitWidth, 0))) { - MDNode *NN = MDNode::get(OldLI.getContext(), std::nullopt); + MDNode *NN = MDNode::get(OldLI.getContext(), {}); NewLI.setMetadata(LLVMContext::MD_nonnull, NN); } } diff --git a/llvm/lib/Transforms/Utils/ValueMapper.cpp b/llvm/lib/Transforms/Utils/ValueMapper.cpp index 56e0eca7cea56..3faea48466ba9 100644 --- a/llvm/lib/Transforms/Utils/ValueMapper.cpp +++ b/llvm/lib/Transforms/Utils/ValueMapper.cpp @@ -391,9 +391,8 @@ Value *Mapper::mapValue(const Value *V) { // ensures metadata operands only reference defined SSA values. return (Flags & RF_IgnoreMissingLocals) ? nullptr - : MetadataAsValue::get( - V->getContext(), - MDTuple::get(V->getContext(), std::nullopt)); + : MetadataAsValue::get(V->getContext(), + MDTuple::get(V->getContext(), {})); } if (auto *AL = dyn_cast(MD)) { SmallVector MappedArgs; diff --git a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp index 0fa7c2add1faa..8c6c132a72ba4 100644 --- a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp +++ b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp @@ -5672,8 +5672,8 @@ LoopVectorizationCostModel::getConsecutiveMemOpCost(Instruction *I, bool Reverse = ConsecutiveStride < 0; if (Reverse) - Cost += TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, - std::nullopt, CostKind, 0); + Cost += TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, {}, + CostKind, 0); return Cost; } @@ -5752,8 +5752,8 @@ LoopVectorizationCostModel::getInterleaveGroupCost(Instruction *I, assert(!Legal->isMaskRequired(I) && "Reverse masked interleaved access not supported."); Cost += Group->getNumMembers() * - TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, - std::nullopt, CostKind, 0); + TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, {}, + CostKind, 0); } return Cost; } diff --git a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp index 377bd08ee8ff1..ddeb97463281c 100644 --- a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp +++ b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp @@ -1293,7 +1293,7 @@ class BoUpSLP { /// \returns the vectorization cost of the subtree that starts at \p VL. /// A negative number means that this is profitable. - InstructionCost getTreeCost(ArrayRef VectorizedVals = std::nullopt); + InstructionCost getTreeCost(ArrayRef VectorizedVals = {}); /// Construct a vectorizable tree that starts at \p Roots, ignoring users for /// the purpose of scheduling and extraction in the \p UserIgnoreLst. @@ -1792,7 +1792,7 @@ class BoUpSLP { // Recursively calculate the cost at each level int TmpScore = getScoreAtLevelRec(I1->getOperand(OpIdx1), I2->getOperand(OpIdx2), - I1, I2, CurrLevel + 1, std::nullopt); + I1, I2, CurrLevel + 1, {}); // Look for the best score. if (TmpScore > LookAheadHeuristics::ScoreFail && TmpScore > MaxTmpScore) { @@ -2601,7 +2601,7 @@ class BoUpSLP { int Score = LookAhead.getScoreAtLevelRec(Candidates[I].first, Candidates[I].second, /*U1=*/nullptr, /*U2=*/nullptr, - /*CurrLevel=*/1, std::nullopt); + /*CurrLevel=*/1, {}); if (Score > BestScore) { BestScore = Score; Index = I; @@ -3364,8 +3364,8 @@ class BoUpSLP { std::optional Bundle, const InstructionsState &S, const EdgeInfo &UserTreeIdx, - ArrayRef ReuseShuffleIndices = std::nullopt, - ArrayRef ReorderIndices = std::nullopt) { + ArrayRef ReuseShuffleIndices = {}, + ArrayRef ReorderIndices = {}) { TreeEntry::EntryState EntryState = Bundle ? TreeEntry::Vectorize : TreeEntry::NeedToGather; return newTreeEntry(VL, EntryState, Bundle, S, UserTreeIdx, @@ -3377,8 +3377,8 @@ class BoUpSLP { std::optional Bundle, const InstructionsState &S, const EdgeInfo &UserTreeIdx, - ArrayRef ReuseShuffleIndices = std::nullopt, - ArrayRef ReorderIndices = std::nullopt) { + ArrayRef ReuseShuffleIndices = {}, + ArrayRef ReorderIndices = {}) { assert(((!Bundle && EntryState == TreeEntry::NeedToGather) || (Bundle && EntryState != TreeEntry::NeedToGather)) && "Need to vectorize gather entry?"); @@ -4678,10 +4678,10 @@ getGEPCosts(const TargetTransformInfo &TTI, ArrayRef Ptrs, /// subvector pattern. static InstructionCost getShuffleCost(const TargetTransformInfo &TTI, TTI::ShuffleKind Kind, - VectorType *Tp, ArrayRef Mask = std::nullopt, + VectorType *Tp, ArrayRef Mask = {}, TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput, int Index = 0, VectorType *SubTp = nullptr, - ArrayRef Args = std::nullopt) { + ArrayRef Args = {}) { if (Kind != TTI::SK_PermuteTwoSrc) return TTI.getShuffleCost(Kind, Tp, Mask, CostKind, Index, SubTp, Args); int NumSrcElts = Tp->getElementCount().getKnownMinValue(); @@ -4841,8 +4841,7 @@ BoUpSLP::LoadsState BoUpSLP::canVectorizeLoads( TTI.getScalarizationOverhead( PtrVecTy, APInt::getOneBitSet(VecTy->getNumElements(), 0), /*Insert=*/true, /*Extract=*/false, CostKind) + - ::getShuffleCost(TTI, TTI::SK_Broadcast, PtrVecTy, std::nullopt, - CostKind); + ::getShuffleCost(TTI, TTI::SK_Broadcast, PtrVecTy, {}, CostKind); // The cost of scalar loads. InstructionCost ScalarLoadsCost = std::accumulate(VL.begin(), VL.end(), InstructionCost(), @@ -4941,7 +4940,7 @@ BoUpSLP::LoadsState BoUpSLP::canVectorizeLoads( SubVecTy, APInt::getOneBitSet(VF, 0), /*Insert=*/true, /*Extract=*/false, CostKind) + ::getShuffleCost(TTI, TTI::SK_Broadcast, SubVecTy, - std::nullopt, CostKind); + {}, CostKind); } switch (LS) { case LoadsState::Vectorize: @@ -7405,7 +7404,7 @@ void BoUpSLP::buildTree_rec(ArrayRef VL, unsigned Depth, if (IsIdentity) CurrentOrder.clear(); TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx, - std::nullopt, CurrentOrder); + {}, CurrentOrder); LLVM_DEBUG(dbgs() << "SLP: added inserts bundle.\n"); TE->setOperandsInOrder(); @@ -8756,9 +8755,8 @@ class BoUpSLP::ShuffleCostEstimator : public BaseShuffleAnalysis { "SK_ExtractSubvector index out of range"); Cost += ::getShuffleCost( TTI, TTI::SK_ExtractSubvector, - getWidenedType(ScalarTy, alignTo(NumElts, EltsPerVector)), - std::nullopt, CostKind, Idx, - getWidenedType(ScalarTy, EltsPerVector)); + getWidenedType(ScalarTy, alignTo(NumElts, EltsPerVector)), {}, + CostKind, Idx, getWidenedType(ScalarTy, EltsPerVector)); } // Second attempt to check, if just a permute is better estimated than // subvector extract. @@ -9411,9 +9409,8 @@ class BoUpSLP::ShuffleCostEstimator : public BaseShuffleAnalysis { } Cost += ::getShuffleCost( TTI, TTI::SK_InsertSubvector, - FixedVectorType::get(ScalarTy, CommonMask.size()), std::nullopt, - CostKind, Idx, - FixedVectorType::get(ScalarTy, E->getVectorFactor())); + FixedVectorType::get(ScalarTy, CommonMask.size()), {}, CostKind, + Idx, FixedVectorType::get(ScalarTy, E->getVectorFactor())); if (!CommonMask.empty()) { std::iota(std::next(CommonMask.begin(), Idx), std::next(CommonMask.begin(), Idx + E->getVectorFactor()), @@ -9824,9 +9821,8 @@ BoUpSLP::getEntryCost(const TreeEntry *E, ArrayRef VectorizedVals, if (!InMask.all() && NumScalars != NumElts && !IsWholeSubvector) { if (InsertVecSz != VecSz) { auto *ActualVecTy = getWidenedType(ScalarTy, VecSz); - Cost += ::getShuffleCost(*TTI, TTI::SK_InsertSubvector, ActualVecTy, - std::nullopt, CostKind, OffsetBeg - Offset, - InsertVecTy); + Cost += ::getShuffleCost(*TTI, TTI::SK_InsertSubvector, ActualVecTy, {}, + CostKind, OffsetBeg - Offset, InsertVecTy); } else { for (unsigned I = 0, End = OffsetBeg - Offset; I < End; ++I) Mask[I] = InMask.test(I) ? PoisonMaskElem : I; @@ -10021,7 +10017,7 @@ BoUpSLP::getEntryCost(const TreeEntry *E, ArrayRef VectorizedVals, TTI::OperandValueInfo Op1Info = getOperandInfo(E->getOperand(0)); TTI::OperandValueInfo Op2Info = getOperandInfo(E->getOperand(OpIdx)); return TTI->getArithmeticInstrCost(ShuffleOrOp, VecTy, CostKind, Op1Info, - Op2Info, std::nullopt, nullptr, TLI) + + Op2Info, {}, nullptr, TLI) + CommonCost; }; return GetCostDiff(GetScalarCost, GetVectorCost); @@ -12681,7 +12677,7 @@ Value *BoUpSLP::vectorizeOperand(TreeEntry *E, unsigned NodeIdx, return std::make_pair(VectorizableTree[P.first].get(), P.second); }); - return ShuffleBuilder.finalize(std::nullopt, SubVectors); + return ShuffleBuilder.finalize({}, SubVectors); }; Value *V = vectorizeTree(VE, PostponedPHIs); if (VF * getNumElements(VL[0]->getType()) != @@ -13248,7 +13244,7 @@ Value *BoUpSLP::vectorizeTree(TreeEntry *E, bool PostponedPHIs) { E->ReorderIndices.size()); ShuffleBuilder.add(V, Mask); } else if (E->State == TreeEntry::StridedVectorize && IsReverseOrder) { - ShuffleBuilder.addOrdered(V, std::nullopt); + ShuffleBuilder.addOrdered(V, {}); } else { ShuffleBuilder.addOrdered(V, E->ReorderIndices); } @@ -14657,7 +14653,7 @@ BoUpSLP::vectorizeTree(const ExtraValueToDebugLocsMap &ExternallyUsedValues, ShuffleBuilder.add(V1, CombinedMask1); if (V2) ShuffleBuilder.add(V2, CombinedMask2); - return ShuffleBuilder.finalize(std::nullopt, std::nullopt); + return ShuffleBuilder.finalize({}, {}); }; auto &&ResizeToVF = [&CreateShuffle](Value *Vec, ArrayRef Mask, @@ -15784,8 +15780,7 @@ bool BoUpSLP::collectValuesToDemote( return false; }; auto TryProcessInstruction = - [&](unsigned &BitWidth, - ArrayRef Operands = std::nullopt, + [&](unsigned &BitWidth, ArrayRef Operands = {}, function_ref Checker = {}) { if (Operands.empty()) { if (!IsTruncRoot) diff --git a/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp b/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp index ecdf0b526f608..d8d233ad7b19b 100644 --- a/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp +++ b/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp @@ -2257,8 +2257,7 @@ InstructionCost VPWidenMemoryRecipe::computeCost(ElementCount VF, return Cost; return Cost += Ctx.TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, - cast(Ty), std::nullopt, - CostKind, 0); + cast(Ty), {}, CostKind, 0); } void VPWidenLoadRecipe::execute(VPTransformState &State) { diff --git a/llvm/lib/Transforms/Vectorize/VectorCombine.cpp b/llvm/lib/Transforms/Vectorize/VectorCombine.cpp index 58701bfa60a33..a2ab5d9666407 100644 --- a/llvm/lib/Transforms/Vectorize/VectorCombine.cpp +++ b/llvm/lib/Transforms/Vectorize/VectorCombine.cpp @@ -1560,7 +1560,7 @@ bool VectorCombine::foldShuffleOfCastops(Instruction &I) { InstructionCost OldCost = CostC0 + CostC1; OldCost += TTI.getShuffleCost(TargetTransformInfo::SK_PermuteTwoSrc, CastDstTy, - OldMask, CostKind, 0, nullptr, std::nullopt, &I); + OldMask, CostKind, 0, nullptr, {}, &I); InstructionCost NewCost = TTI.getShuffleCost( TargetTransformInfo::SK_PermuteTwoSrc, CastSrcTy, NewMask, CostKind); diff --git a/llvm/tools/llvm-reduce/deltas/ReduceOpcodes.cpp b/llvm/tools/llvm-reduce/deltas/ReduceOpcodes.cpp index c4812e59e8b85..fb8729c36a6f2 100644 --- a/llvm/tools/llvm-reduce/deltas/ReduceOpcodes.cpp +++ b/llvm/tools/llvm-reduce/deltas/ReduceOpcodes.cpp @@ -29,7 +29,7 @@ static bool shouldIgnoreArgument(const Value *V) { static Value *replaceIntrinsic(Module &M, IntrinsicInst *II, Intrinsic::ID NewIID, - ArrayRef Tys = std::nullopt) { + ArrayRef Tys = {}) { Function *NewFunc = Intrinsic::getDeclaration(&M, NewIID, Tys); II->setCalledFunction(NewFunc); return II; diff --git a/llvm/unittests/tools/llvm-mca/MCATestBase.h b/llvm/unittests/tools/llvm-mca/MCATestBase.h index 443a208c164f3..66e20a45c96ce 100644 --- a/llvm/unittests/tools/llvm-mca/MCATestBase.h +++ b/llvm/unittests/tools/llvm-mca/MCATestBase.h @@ -74,7 +74,7 @@ class MCATestBase : public ::testing::Test { /// `llvm-mca` tool to verify result correctness. /// This function only displays on SummaryView by default. virtual Error runBaselineMCA(json::Object &Result, ArrayRef Insts, - ArrayRef Views = std::nullopt, + ArrayRef Views = {}, const mca::PipelineOptions *PO = nullptr); }; diff --git a/llvm/utils/TableGen/DAGISelMatcherGen.cpp b/llvm/utils/TableGen/DAGISelMatcherGen.cpp index 5cb393ae7a538..981fb506b5825 100644 --- a/llvm/utils/TableGen/DAGISelMatcherGen.cpp +++ b/llvm/utils/TableGen/DAGISelMatcherGen.cpp @@ -692,8 +692,8 @@ void MatcherGen::EmitResultLeafAsOperand(const TreePatternNode &N, auto IDOperandNo = NextRecordedOperandNo++; Record *ImpDef = Def->getRecords().getDef("IMPLICIT_DEF"); CodeGenInstruction &II = CGP.getTargetInfo().getInstruction(ImpDef); - AddMatcher(new EmitNodeMatcher(II, ResultVT, std::nullopt, false, false, - false, false, -1, IDOperandNo)); + AddMatcher(new EmitNodeMatcher(II, ResultVT, {}, false, false, false, + false, -1, IDOperandNo)); ResultOps.push_back(IDOperandNo); return; } diff --git a/llvm/utils/TableGen/RegisterInfoEmitter.cpp b/llvm/utils/TableGen/RegisterInfoEmitter.cpp index e076832674bde..95df22ad4af08 100644 --- a/llvm/utils/TableGen/RegisterInfoEmitter.cpp +++ b/llvm/utils/TableGen/RegisterInfoEmitter.cpp @@ -1720,7 +1720,7 @@ void RegisterInfoEmitter::runTargetDesc(raw_ostream &OS, CodeGenTarget &Target, OS << " };\n"; OS << " return ArrayRef(Masks);\n"; } else { - OS << " return std::nullopt;\n"; + OS << " return {};\n"; } OS << "}\n\n"; @@ -1800,7 +1800,7 @@ void RegisterInfoEmitter::runTargetDesc(raw_ostream &OS, CodeGenTarget &Target, OS << " };\n"; OS << " return ArrayRef(Names);\n"; } else { - OS << " return std::nullopt;\n"; + OS << " return {};\n"; } OS << "}\n\n"; diff --git a/llvm/utils/TableGen/SubtargetEmitter.cpp b/llvm/utils/TableGen/SubtargetEmitter.cpp index 394e2eb42c15d..f17b746da4308 100644 --- a/llvm/utils/TableGen/SubtargetEmitter.cpp +++ b/llvm/utils/TableGen/SubtargetEmitter.cpp @@ -2019,11 +2019,11 @@ void SubtargetEmitter::run(raw_ostream &OS) { if (NumFeatures) OS << Target << "FeatureKV, "; else - OS << "std::nullopt, "; + OS << "{}, "; if (NumProcs) OS << Target << "SubTypeKV, "; else - OS << "std::nullopt, "; + OS << "{}, "; OS << '\n'; OS.indent(22); OS << Target << "WriteProcResTable, " << Target << "WriteLatencyTable, " @@ -2125,11 +2125,11 @@ void SubtargetEmitter::run(raw_ostream &OS) { if (NumFeatures) OS << "ArrayRef(" << Target << "FeatureKV, " << NumFeatures << "), "; else - OS << "std::nullopt, "; + OS << "{}, "; if (NumProcs) OS << "ArrayRef(" << Target << "SubTypeKV, " << NumProcs << "), "; else - OS << "std::nullopt, "; + OS << "{}, "; OS << '\n'; OS.indent(24); OS << Target << "WriteProcResTable, " << Target << "WriteLatencyTable, "