diff --git a/llvm/lib/Target/AArch64/AArch64ExpandPseudoInsts.cpp b/llvm/lib/Target/AArch64/AArch64ExpandPseudoInsts.cpp index b44c48afe705b..22424b11afb58 100644 --- a/llvm/lib/Target/AArch64/AArch64ExpandPseudoInsts.cpp +++ b/llvm/lib/Target/AArch64/AArch64ExpandPseudoInsts.cpp @@ -64,8 +64,8 @@ class AArch64ExpandPseudo : public MachineFunctionPass { MachineBasicBlock::iterator &NextMBBI); bool expandMultiVecPseudo(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, - TargetRegisterClass ContiguousClass, - TargetRegisterClass StridedClass, + const TargetRegisterClass &ContiguousClass, + const TargetRegisterClass &StridedClass, unsigned ContiguousOpc, unsigned StridedOpc); bool expandFormTuplePseudo(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, @@ -1121,7 +1121,8 @@ AArch64ExpandPseudo::expandCondSMToggle(MachineBasicBlock &MBB, bool AArch64ExpandPseudo::expandMultiVecPseudo( MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, - TargetRegisterClass ContiguousClass, TargetRegisterClass StridedClass, + const TargetRegisterClass &ContiguousClass, + const TargetRegisterClass &StridedClass, unsigned ContiguousOp, unsigned StridedOpc) { MachineInstr &MI = *MBBI; Register Tuple = MI.getOperand(0).getReg(); diff --git a/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp b/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp index a082a1ebe95bf..89a8c981a330d 100644 --- a/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp +++ b/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp @@ -4252,7 +4252,7 @@ class TagStoreEdit { } // Add an instruction to be replaced. Instructions must be added in the // ascending order of Offset, and have to be adjacent. - void addInstruction(TagStoreInstr I) { + void addInstruction(const TagStoreInstr &I) { assert((TagStores.empty() || TagStores.back().Offset + TagStores.back().Size == I.Offset) && "Non-adjacent tag store instructions."); diff --git a/llvm/lib/Target/AArch64/AArch64SelectionDAGInfo.cpp b/llvm/lib/Target/AArch64/AArch64SelectionDAGInfo.cpp index 17adda15d9fc8..0edb5c436808f 100644 --- a/llvm/lib/Target/AArch64/AArch64SelectionDAGInfo.cpp +++ b/llvm/lib/Target/AArch64/AArch64SelectionDAGInfo.cpp @@ -38,8 +38,8 @@ SDValue AArch64SelectionDAGInfo::EmitMOPS(unsigned Opcode, SelectionDAG &DAG, SDValue Dst, SDValue SrcOrValue, SDValue Size, Align Alignment, bool isVolatile, - MachinePointerInfo DstPtrInfo, - MachinePointerInfo SrcPtrInfo) const { + const MachinePointerInfo &DstPtrInfo, + const MachinePointerInfo &SrcPtrInfo) const { // Get the constant size of the copy/set. uint64_t ConstSize = 0; diff --git a/llvm/lib/Target/AArch64/AArch64SelectionDAGInfo.h b/llvm/lib/Target/AArch64/AArch64SelectionDAGInfo.h index 7efe49c720655..fe3fe7705def5 100644 --- a/llvm/lib/Target/AArch64/AArch64SelectionDAGInfo.h +++ b/llvm/lib/Target/AArch64/AArch64SelectionDAGInfo.h @@ -26,8 +26,8 @@ class AArch64SelectionDAGInfo : public SelectionDAGTargetInfo { SDValue EmitMOPS(unsigned Opcode, SelectionDAG &DAG, const SDLoc &DL, SDValue Chain, SDValue Dst, SDValue SrcOrValue, SDValue Size, Align Alignment, bool isVolatile, - MachinePointerInfo DstPtrInfo, - MachinePointerInfo SrcPtrInfo) const; + const MachinePointerInfo &DstPtrInfo, + const MachinePointerInfo &SrcPtrInfo) const; SDValue EmitTargetCodeForMemcpy(SelectionDAG &DAG, const SDLoc &dl, SDValue Chain, SDValue Dst, SDValue Src, diff --git a/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp b/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp index e2389145cf33f..a45df57dfac44 100644 --- a/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp +++ b/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp @@ -121,7 +121,7 @@ class TailFoldingOption { return Bits; } - void reportError(std::string Opt) { + void reportError(const std::string &Opt) { errs() << "invalid argument '" << Opt << "' to -sve-tail-folding=; the option should be of the form\n" " (disabled|all|default|simple)[+(reductions|recurrences" diff --git a/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp b/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp index d3eda48f3276e..bae7c12e02a9e 100644 --- a/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp +++ b/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp @@ -2455,7 +2455,7 @@ class AArch64Operand : public MCParsedAsmOperand { } static std::unique_ptr - CreateFPImm(APFloat Val, bool IsExact, SMLoc S, MCContext &Ctx) { + CreateFPImm(const APFloat &Val, bool IsExact, SMLoc S, MCContext &Ctx) { auto Op = std::make_unique(k_FPImm, Ctx); Op->FPImm.Val = Val.bitcastToAPInt().getSExtValue(); Op->FPImm.IsExact = IsExact; @@ -3837,7 +3837,7 @@ static const struct Extension { {"sme-tmop", {AArch64::FeatureSME_TMOP}}, }; -static void setRequiredFeatureString(FeatureBitset FBS, std::string &Str) { +static void setRequiredFeatureString(const FeatureBitset &FBS, std::string &Str) { if (FBS[AArch64::HasV8_0aOps]) Str += "ARMv8a"; if (FBS[AArch64::HasV8_1aOps]) diff --git a/llvm/lib/Target/AArch64/Utils/AArch64BaseInfo.h b/llvm/lib/Target/AArch64/Utils/AArch64BaseInfo.h index 9671fa3b3d92f..49e823615b003 100644 --- a/llvm/lib/Target/AArch64/Utils/AArch64BaseInfo.h +++ b/llvm/lib/Target/AArch64/Utils/AArch64BaseInfo.h @@ -373,7 +373,7 @@ struct SysAlias { constexpr SysAlias(const char *N, uint16_t E, FeatureBitset F) : Name(N), Encoding(E), FeaturesRequired(F) {} - bool haveFeatures(FeatureBitset ActiveFeatures) const { + bool haveFeatures(const FeatureBitset &ActiveFeatures) const { return ActiveFeatures[llvm::AArch64::FeatureAll] || (FeaturesRequired & ActiveFeatures) == FeaturesRequired; } @@ -634,7 +634,7 @@ struct PHint { unsigned Encoding; FeatureBitset FeaturesRequired; - bool haveFeatures(FeatureBitset ActiveFeatures) const { + bool haveFeatures(const FeatureBitset &ActiveFeatures) const { return ActiveFeatures[llvm::AArch64::FeatureAll] || (FeaturesRequired & ActiveFeatures) == FeaturesRequired; } @@ -753,7 +753,7 @@ namespace AArch64SysReg { bool Writeable; FeatureBitset FeaturesRequired; - bool haveFeatures(FeatureBitset ActiveFeatures) const { + bool haveFeatures(const FeatureBitset &ActiveFeatures) const { return ActiveFeatures[llvm::AArch64::FeatureAll] || (FeaturesRequired & ActiveFeatures) == FeaturesRequired; } diff --git a/llvm/lib/Target/AMDGPU/Utils/AMDGPUDelayedMCExpr.cpp b/llvm/lib/Target/AMDGPU/Utils/AMDGPUDelayedMCExpr.cpp index ceb475d77cb32..789ec58845856 100644 --- a/llvm/lib/Target/AMDGPU/Utils/AMDGPUDelayedMCExpr.cpp +++ b/llvm/lib/Target/AMDGPU/Utils/AMDGPUDelayedMCExpr.cpp @@ -12,8 +12,8 @@ using namespace llvm; -static msgpack::DocNode getNode(msgpack::DocNode DN, msgpack::Type Type, - MCValue Val) { +static msgpack::DocNode getNode(const msgpack::DocNode &DN, msgpack::Type Type, + const MCValue &Val) { msgpack::Document *Doc = DN.getDocument(); switch (Type) { default: diff --git a/llvm/lib/Target/ARM/ARMInstructionSelector.cpp b/llvm/lib/Target/ARM/ARMInstructionSelector.cpp index 2d3cb71fbc3fd..1054ed45a41ed 100644 --- a/llvm/lib/Target/ARM/ARMInstructionSelector.cpp +++ b/llvm/lib/Target/ARM/ARMInstructionSelector.cpp @@ -44,13 +44,13 @@ class ARMInstructionSelector : public InstructionSelector { struct CmpConstants; struct InsertInfo; - bool selectCmp(CmpConstants Helper, MachineInstrBuilder &MIB, + bool selectCmp(const CmpConstants &Helper, MachineInstrBuilder &MIB, MachineRegisterInfo &MRI) const; // Helper for inserting a comparison sequence that sets \p ResReg to either 1 // if \p LHSReg and \p RHSReg are in the relationship defined by \p Cond, or // \p PrevRes otherwise. In essence, it computes PrevRes OR (LHS Cond RHS). - bool insertComparison(CmpConstants Helper, InsertInfo I, unsigned ResReg, + bool insertComparison(const CmpConstants &Helper, InsertInfo I, unsigned ResReg, ARMCC::CondCodes Cond, unsigned LHSReg, unsigned RHSReg, unsigned PrevRes) const; @@ -525,7 +525,7 @@ bool ARMInstructionSelector::validReg(MachineRegisterInfo &MRI, unsigned Reg, return true; } -bool ARMInstructionSelector::selectCmp(CmpConstants Helper, +bool ARMInstructionSelector::selectCmp(const CmpConstants &Helper, MachineInstrBuilder &MIB, MachineRegisterInfo &MRI) const { const InsertInfo I(MIB); @@ -572,7 +572,7 @@ bool ARMInstructionSelector::selectCmp(CmpConstants Helper, return true; } -bool ARMInstructionSelector::insertComparison(CmpConstants Helper, InsertInfo I, +bool ARMInstructionSelector::insertComparison(const CmpConstants &Helper, InsertInfo I, unsigned ResReg, ARMCC::CondCodes Cond, unsigned LHSReg, unsigned RHSReg, diff --git a/llvm/lib/Target/ARM/MCTargetDesc/ARMMachObjectWriter.cpp b/llvm/lib/Target/ARM/MCTargetDesc/ARMMachObjectWriter.cpp index 357654615e002..e2586926cefe4 100644 --- a/llvm/lib/Target/ARM/MCTargetDesc/ARMMachObjectWriter.cpp +++ b/llvm/lib/Target/ARM/MCTargetDesc/ARMMachObjectWriter.cpp @@ -27,13 +27,15 @@ class ARMMachObjectWriter : public MCMachObjectTargetWriter { void recordARMScatteredRelocation(MachObjectWriter *Writer, const MCAssembler &Asm, const MCFragment *Fragment, - const MCFixup &Fixup, MCValue Target, + const MCFixup &Fixup, + const MCValue &Target, unsigned Type, unsigned Log2Size, uint64_t &FixedValue); void recordARMScatteredHalfRelocation(MachObjectWriter *Writer, const MCAssembler &Asm, const MCFragment *Fragment, - const MCFixup &Fixup, MCValue Target, + const MCFixup &Fixup, + const MCValue &Target, uint64_t &FixedValue); bool requiresExternRelocation(MachObjectWriter *Writer, @@ -130,7 +132,7 @@ static bool getARMFixupKindMachOInfo(unsigned Kind, unsigned &RelocType, void ARMMachObjectWriter::recordARMScatteredHalfRelocation( MachObjectWriter *Writer, const MCAssembler &Asm, - const MCFragment *Fragment, const MCFixup &Fixup, MCValue Target, + const MCFragment *Fragment, const MCFixup &Fixup, const MCValue &Target, uint64_t &FixedValue) { uint32_t FixupOffset = Asm.getFragmentOffset(*Fragment) + Fixup.getOffset(); @@ -240,7 +242,7 @@ void ARMMachObjectWriter::recordARMScatteredHalfRelocation( void ARMMachObjectWriter::recordARMScatteredRelocation( MachObjectWriter *Writer, const MCAssembler &Asm, - const MCFragment *Fragment, const MCFixup &Fixup, MCValue Target, + const MCFragment *Fragment, const MCFixup &Fixup, const MCValue &Target, unsigned Type, unsigned Log2Size, uint64_t &FixedValue) { uint32_t FixupOffset = Asm.getFragmentOffset(*Fragment) + Fixup.getOffset(); diff --git a/llvm/lib/Target/ARM/Utils/ARMBaseInfo.h b/llvm/lib/Target/ARM/Utils/ARMBaseInfo.h index dc4f811e075c6..0d895e600b105 100644 --- a/llvm/lib/Target/ARM/Utils/ARMBaseInfo.h +++ b/llvm/lib/Target/ARM/Utils/ARMBaseInfo.h @@ -196,12 +196,12 @@ namespace ARMSysReg { FeatureBitset FeaturesRequired; // return true if FeaturesRequired are all present in ActiveFeatures - bool hasRequiredFeatures(FeatureBitset ActiveFeatures) const { + bool hasRequiredFeatures(const FeatureBitset &ActiveFeatures) const { return (FeaturesRequired & ActiveFeatures) == FeaturesRequired; } // returns true if TestFeatures are all present in FeaturesRequired - bool isInRequiredFeatures(FeatureBitset TestFeatures) const { + bool isInRequiredFeatures(const FeatureBitset &TestFeatures) const { return (FeaturesRequired & TestFeatures) == TestFeatures; } }; diff --git a/llvm/lib/Target/AVR/MCTargetDesc/AVRAsmBackend.cpp b/llvm/lib/Target/AVR/MCTargetDesc/AVRAsmBackend.cpp index fbed25157a44e..c392b13e1920d 100644 --- a/llvm/lib/Target/AVR/MCTargetDesc/AVRAsmBackend.cpp +++ b/llvm/lib/Target/AVR/MCTargetDesc/AVRAsmBackend.cpp @@ -32,7 +32,7 @@ namespace adjust { using namespace llvm; static void unsigned_width(unsigned Width, uint64_t Value, - std::string Description, const MCFixup &Fixup, + const std::string &Description, const MCFixup &Fixup, MCContext *Ctx) { if (!isUIntN(Width, Value)) { std::string Diagnostic = "out of range " + Description; diff --git a/llvm/lib/Target/Hexagon/HexagonConstExtenders.cpp b/llvm/lib/Target/Hexagon/HexagonConstExtenders.cpp index 86ce6b4e05ed2..3c95714ef78ba 100644 --- a/llvm/lib/Target/Hexagon/HexagonConstExtenders.cpp +++ b/llvm/lib/Target/Hexagon/HexagonConstExtenders.cpp @@ -202,7 +202,7 @@ namespace { Pos = std::distance(B->begin(), It); } } - bool operator<(Loc A) const { + bool operator<(const Loc &A) const { if (Block != A.Block) return Block->getNumber() < A.Block->getNumber(); if (A.Pos == -1) diff --git a/llvm/lib/Target/Hexagon/HexagonISelDAGToDAGHVX.cpp b/llvm/lib/Target/Hexagon/HexagonISelDAGToDAGHVX.cpp index db9aa7e18f5e7..d5def5342d8de 100644 --- a/llvm/lib/Target/Hexagon/HexagonISelDAGToDAGHVX.cpp +++ b/llvm/lib/Target/Hexagon/HexagonISelDAGToDAGHVX.cpp @@ -949,7 +949,7 @@ namespace llvm { void selectRor(SDNode *N); void selectVAlign(SDNode *N); - static SmallVector getPerfectCompletions(ShuffleMask SM, + static SmallVector getPerfectCompletions(const ShuffleMask &SM, unsigned Width); static SmallVector completeToPerfect( ArrayRef Completions, unsigned Width); @@ -966,22 +966,22 @@ namespace llvm { None, PackMux, }; - OpRef concats(OpRef Va, OpRef Vb, ResultStack &Results); + OpRef concats(const OpRef &Va, const OpRef &Vb, ResultStack &Results); OpRef funnels(OpRef Va, OpRef Vb, int Amount, ResultStack &Results); OpRef packs(ShuffleMask SM, OpRef Va, OpRef Vb, ResultStack &Results, MutableArrayRef NewMask, unsigned Options = None); - OpRef packp(ShuffleMask SM, OpRef Va, OpRef Vb, ResultStack &Results, + OpRef packp(const ShuffleMask &SM, const OpRef &Va, const OpRef &Vb, ResultStack &Results, MutableArrayRef NewMask); - OpRef vmuxs(ArrayRef Bytes, OpRef Va, OpRef Vb, + OpRef vmuxs(ArrayRef Bytes, const OpRef &Va, const OpRef &Vb, ResultStack &Results); - OpRef vmuxp(ArrayRef Bytes, OpRef Va, OpRef Vb, + OpRef vmuxp(ArrayRef Bytes, const OpRef &Va, const OpRef &Vb, ResultStack &Results); - OpRef shuffs1(ShuffleMask SM, OpRef Va, ResultStack &Results); - OpRef shuffs2(ShuffleMask SM, OpRef Va, OpRef Vb, ResultStack &Results); - OpRef shuffp1(ShuffleMask SM, OpRef Va, ResultStack &Results); - OpRef shuffp2(ShuffleMask SM, OpRef Va, OpRef Vb, ResultStack &Results); + OpRef shuffs1(ShuffleMask SM, const OpRef &Va, ResultStack &Results); + OpRef shuffs2(const ShuffleMask &SM, const OpRef &Va, const OpRef &Vb, ResultStack &Results); + OpRef shuffp1(const ShuffleMask &SM, const OpRef &Va, ResultStack &Results); + OpRef shuffp2(const ShuffleMask &SM, const OpRef &Va, const OpRef &Vb, ResultStack &Results); OpRef butterfly(ShuffleMask SM, OpRef Va, ResultStack &Results); OpRef contracting(ShuffleMask SM, OpRef Va, OpRef Vb, ResultStack &Results); @@ -1048,7 +1048,7 @@ static bool isLowHalfOnly(ArrayRef Mask) { return llvm::all_of(Mask.drop_front(L / 2), [](int M) { return M < 0; }); } -static SmallVector getInputSegmentList(ShuffleMask SM, +static SmallVector getInputSegmentList(const ShuffleMask &SM, unsigned SegLen) { assert(isPowerOf2_32(SegLen)); SmallVector SegList; @@ -1068,7 +1068,7 @@ static SmallVector getInputSegmentList(ShuffleMask SM, return SegList; } -static SmallVector getOutputSegmentMap(ShuffleMask SM, +static SmallVector getOutputSegmentMap(const ShuffleMask &SM, unsigned SegLen) { // Calculate the layout of the output segments in terms of the input // segments. @@ -1213,7 +1213,7 @@ void HvxSelector::materialize(const ResultStack &Results) { DAG.RemoveDeadNodes(); } -OpRef HvxSelector::concats(OpRef Lo, OpRef Hi, ResultStack &Results) { +OpRef HvxSelector::concats(const OpRef &Lo, const OpRef &Hi, ResultStack &Results) { DEBUG_WITH_TYPE("isel", {dbgs() << __func__ << '\n';}); const SDLoc &dl(Results.InpNode); Results.push(TargetOpcode::REG_SEQUENCE, getPairVT(MVT::i8), { @@ -1496,7 +1496,7 @@ OpRef HvxSelector::packs(ShuffleMask SM, OpRef Va, OpRef Vb, // Va, Vb are vector pairs. If SM only uses two single vectors from Va/Vb, // pack these vectors into a pair, and remap SM into NewMask to use the // new pair instead. -OpRef HvxSelector::packp(ShuffleMask SM, OpRef Va, OpRef Vb, +OpRef HvxSelector::packp(const ShuffleMask &SM, const OpRef &Va, const OpRef &Vb, ResultStack &Results, MutableArrayRef NewMask) { DEBUG_WITH_TYPE("isel", {dbgs() << __func__ << '\n';}); SmallVector SegList = getInputSegmentList(SM.Mask, HwLen); @@ -1533,7 +1533,7 @@ OpRef HvxSelector::packp(ShuffleMask SM, OpRef Va, OpRef Vb, return concats(Out[0], Out[1], Results); } -OpRef HvxSelector::vmuxs(ArrayRef Bytes, OpRef Va, OpRef Vb, +OpRef HvxSelector::vmuxs(ArrayRef Bytes, const OpRef &Va, const OpRef &Vb, ResultStack &Results) { DEBUG_WITH_TYPE("isel", {dbgs() << __func__ << '\n';}); MVT ByteTy = getSingleVT(MVT::i8); @@ -1546,7 +1546,7 @@ OpRef HvxSelector::vmuxs(ArrayRef Bytes, OpRef Va, OpRef Vb, return OpRef::res(Results.top()); } -OpRef HvxSelector::vmuxp(ArrayRef Bytes, OpRef Va, OpRef Vb, +OpRef HvxSelector::vmuxp(ArrayRef Bytes, const OpRef &Va, const OpRef &Vb, ResultStack &Results) { DEBUG_WITH_TYPE("isel", {dbgs() << __func__ << '\n';}); size_t S = Bytes.size() / 2; @@ -1555,7 +1555,7 @@ OpRef HvxSelector::vmuxp(ArrayRef Bytes, OpRef Va, OpRef Vb, return concats(L, H, Results); } -OpRef HvxSelector::shuffs1(ShuffleMask SM, OpRef Va, ResultStack &Results) { +OpRef HvxSelector::shuffs1(ShuffleMask SM, const OpRef &Va, ResultStack &Results) { DEBUG_WITH_TYPE("isel", {dbgs() << __func__ << '\n';}); unsigned VecLen = SM.Mask.size(); assert(HwLen == VecLen); @@ -1598,7 +1598,7 @@ OpRef HvxSelector::shuffs1(ShuffleMask SM, OpRef Va, ResultStack &Results) { return butterfly(SM, Va, Results); } -OpRef HvxSelector::shuffs2(ShuffleMask SM, OpRef Va, OpRef Vb, +OpRef HvxSelector::shuffs2(const ShuffleMask &SM, const OpRef &Va, const OpRef &Vb, ResultStack &Results) { DEBUG_WITH_TYPE("isel", {dbgs() << __func__ << '\n';}); if (isUndef(SM.Mask)) @@ -1633,7 +1633,7 @@ OpRef HvxSelector::shuffs2(ShuffleMask SM, OpRef Va, OpRef Vb, return vmuxs(Bytes, L, R, Results); } -OpRef HvxSelector::shuffp1(ShuffleMask SM, OpRef Va, ResultStack &Results) { +OpRef HvxSelector::shuffp1(const ShuffleMask &SM, const OpRef &Va, ResultStack &Results) { DEBUG_WITH_TYPE("isel", {dbgs() << __func__ << '\n';}); int VecLen = SM.Mask.size(); @@ -1676,7 +1676,7 @@ OpRef HvxSelector::shuffp1(ShuffleMask SM, OpRef Va, ResultStack &Results) { return OpRef::fail(); } -OpRef HvxSelector::shuffp2(ShuffleMask SM, OpRef Va, OpRef Vb, +OpRef HvxSelector::shuffp2(const ShuffleMask &SM, const OpRef &Va, const OpRef &Vb, ResultStack &Results) { DEBUG_WITH_TYPE("isel", {dbgs() << __func__ << '\n';}); if (isUndef(SM.Mask)) @@ -1917,7 +1917,7 @@ bool HvxSelector::scalarizeShuffle(ArrayRef Mask, const SDLoc &dl, return true; } -SmallVector HvxSelector::getPerfectCompletions(ShuffleMask SM, +SmallVector HvxSelector::getPerfectCompletions(const ShuffleMask &SM, unsigned Width) { auto possibilities = [](ArrayRef Bs, unsigned Width) -> uint32_t { unsigned Impossible = ~(1u << Width) + 1; diff --git a/llvm/lib/Target/NVPTX/NVPTXAsmPrinter.cpp b/llvm/lib/Target/NVPTX/NVPTXAsmPrinter.cpp index cb756246b8d11..7f85694bc2bfa 100644 --- a/llvm/lib/Target/NVPTX/NVPTXAsmPrinter.cpp +++ b/llvm/lib/Target/NVPTX/NVPTXAsmPrinter.cpp @@ -1822,7 +1822,7 @@ void NVPTXAsmPrinter::printFPConstant(const ConstantFP *Fp, raw_ostream &O) { } else llvm_unreachable("unsupported fp type"); - APInt API = APF.bitcastToAPInt(); + const APInt &API = APF.bitcastToAPInt(); O << lead << format_hex_no_prefix(API.getZExtValue(), numHex, /*Upper=*/true); } diff --git a/llvm/lib/Target/NVPTX/NVPTXMCExpr.cpp b/llvm/lib/Target/NVPTX/NVPTXMCExpr.cpp index 95125eb41bc05..5a9f2dd969435 100644 --- a/llvm/lib/Target/NVPTX/NVPTXMCExpr.cpp +++ b/llvm/lib/Target/NVPTX/NVPTXMCExpr.cpp @@ -51,7 +51,7 @@ void NVPTXFloatMCExpr::printImpl(raw_ostream &OS, const MCAsmInfo *MAI) const { break; } - APInt API = APF.bitcastToAPInt(); + const APInt &API = APF.bitcastToAPInt(); OS << format_hex_no_prefix(API.getZExtValue(), NumHex, /*Upper=*/true); } diff --git a/llvm/lib/TargetParser/ARMTargetParser.cpp b/llvm/lib/TargetParser/ARMTargetParser.cpp index 9bcfa6ca62c97..86db9d75fbd45 100644 --- a/llvm/lib/TargetParser/ARMTargetParser.cpp +++ b/llvm/lib/TargetParser/ARMTargetParser.cpp @@ -650,7 +650,7 @@ StringRef ARM::getARMCPUForArch(const llvm::Triple &Triple, StringRef MArch) { llvm_unreachable("invalid arch name"); } -void ARM::PrintSupportedExtensions(StringMap DescMap) { +void ARM::PrintSupportedExtensions(StringMap &DescMap) { outs() << "All available -march extensions for ARM\n\n" << " " << left_justify("Name", 20) << (DescMap.empty() ? "\n" : "Description\n");