AArch64TTIImpl(const AArch64TargetMachine *TM, const Function &F) | llvm::AArch64TTIImpl | inlineexplicit |
addrspacesMayAlias(unsigned AS0, unsigned AS1) const | llvm::BasicTTIImplBase< AArch64TTIImpl > | inline |
adjustInliningThreshold(const CallBase *CB) | llvm::BasicTTIImplBase< AArch64TTIImpl > | inline |
llvm::TargetTransformInfoImplCRTPBase::adjustInliningThreshold(const CallBase *CB) const | llvm::TargetTransformInfoImplBase | inline |
allowsMisalignedMemoryAccesses(LLVMContext &Context, unsigned BitWidth, unsigned AddressSpace, Align Alignment, unsigned *Fast) const | llvm::BasicTTIImplBase< AArch64TTIImpl > | inline |
areInlineCompatible(const Function *Caller, const Function *Callee) const | llvm::AArch64TTIImpl | |
areTypesABICompatible(const Function *Caller, const Function *Callee, const ArrayRef< Type * > &Types) const | llvm::AArch64TTIImpl | |
BasicTTIImplBase(const TargetMachine *TM, const DataLayout &DL) | llvm::BasicTTIImplBase< AArch64TTIImpl > | inlineexplicitprotected |
canHaveNonUndefGlobalInitializerInAddressSpace(unsigned AS) const | llvm::TargetTransformInfoImplBase | inline |
canMacroFuseCmp() const | llvm::TargetTransformInfoImplBase | inline |
canSaveCmp(Loop *L, BranchInst **BI, ScalarEvolution *SE, LoopInfo *LI, DominatorTree *DT, AssumptionCache *AC, TargetLibraryInfo *LibInfo) const | llvm::TargetTransformInfoImplBase | inline |
collectFlatAddressOperands(SmallVectorImpl< int > &OpIndexes, Intrinsic::ID IID) const | llvm::BasicTTIImplBase< AArch64TTIImpl > | inline |
DL | llvm::BasicTTIImplBase< AArch64TTIImpl > | protected |
enableAggressiveInterleaving(bool LoopHasReductions) const | llvm::TargetTransformInfoImplBase | inline |
enableInterleavedAccessVectorization() | llvm::AArch64TTIImpl | inline |
BasicTTIImplBase< AArch64TTIImpl >::enableInterleavedAccessVectorization() const | llvm::TargetTransformInfoImplBase | inline |
enableMaskedInterleavedAccessVectorization() | llvm::AArch64TTIImpl | inline |
BasicTTIImplBase< AArch64TTIImpl >::enableMaskedInterleavedAccessVectorization() const | llvm::TargetTransformInfoImplBase | inline |
enableMemCmpExpansion(bool OptSize, bool IsZeroCmp) const | llvm::AArch64TTIImpl | |
enableOrderedReductions() const | llvm::AArch64TTIImpl | inline |
enableScalableVectorization() const | llvm::AArch64TTIImpl | |
enableSelectOptimize() | llvm::AArch64TTIImpl | inline |
BasicTTIImplBase< AArch64TTIImpl >::enableSelectOptimize() const | llvm::TargetTransformInfoImplBase | inline |
enableWritePrefetching() const | llvm::BasicTTIImplBase< AArch64TTIImpl > | inlinevirtual |
forceScalarizeMaskedGather(VectorType *DataType, Align Alignment) const | llvm::TargetTransformInfoImplBase | inline |
forceScalarizeMaskedScatter(VectorType *DataType, Align Alignment) const | llvm::TargetTransformInfoImplBase | inline |
getAddressComputationCost(Type *Ty, ScalarEvolution *SE, const SCEV *Ptr) | llvm::AArch64TTIImpl | |
llvm::TargetTransformInfoImplCRTPBase::getAddressComputationCost(Type *Tp, ScalarEvolution *, const SCEV *) const | llvm::TargetTransformInfoImplBase | inline |
getAltInstrCost(VectorType *VecTy, unsigned Opcode0, unsigned Opcode1, const SmallBitVector &OpcodeMask, TTI::TargetCostKind CostKind) const | llvm::TargetTransformInfoImplBase | inline |
getArithmeticInstrCost(unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Op1Info={TTI::OK_AnyValue, TTI::OP_None}, TTI::OperandValueInfo Op2Info={TTI::OK_AnyValue, TTI::OP_None}, ArrayRef< const Value * > Args={}, const Instruction *CxtI=nullptr) | llvm::AArch64TTIImpl | |
llvm::TargetTransformInfoImplCRTPBase::getArithmeticInstrCost(unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Opd1Info, TTI::OperandValueInfo Opd2Info, ArrayRef< const Value * > Args, const Instruction *CxtI=nullptr) const | llvm::TargetTransformInfoImplBase | inline |
getArithmeticReductionCost(unsigned Opcode, VectorType *Ty, std::optional< FastMathFlags > FMF, TTI::TargetCostKind CostKind) | llvm::AArch64TTIImpl | |
llvm::TargetTransformInfoImplCRTPBase::getArithmeticReductionCost(unsigned, VectorType *, std::optional< FastMathFlags > FMF, TTI::TargetCostKind) const | llvm::TargetTransformInfoImplBase | inline |
getArithmeticReductionCostSVE(unsigned Opcode, VectorType *ValTy, TTI::TargetCostKind CostKind) | llvm::AArch64TTIImpl | |
getAssumedAddrSpace(const Value *V) const | llvm::BasicTTIImplBase< AArch64TTIImpl > | inline |
getAtomicMemIntrinsicMaxElementSize() const | llvm::TargetTransformInfoImplBase | inline |
getBranchMispredictPenalty() const | llvm::TargetTransformInfoImplBase | inline |
getCacheAssociativity(TargetTransformInfo::CacheLevel Level) const | llvm::BasicTTIImplBase< AArch64TTIImpl > | inlinevirtual |
getCacheLineSize() const | llvm::BasicTTIImplBase< AArch64TTIImpl > | inlinevirtual |
getCacheSize(TargetTransformInfo::CacheLevel Level) const | llvm::BasicTTIImplBase< AArch64TTIImpl > | inlinevirtual |
getCallerAllocaCost(const CallBase *CB, const AllocaInst *AI) const | llvm::BasicTTIImplBase< AArch64TTIImpl > | inline |
getCallInstrCost(Function *F, Type *RetTy, ArrayRef< Type * > Tys, TTI::TargetCostKind CostKind) | llvm::BasicTTIImplBase< AArch64TTIImpl > | inline |
llvm::TargetTransformInfoImplCRTPBase::getCallInstrCost(Function *F, Type *RetTy, ArrayRef< Type * > Tys, TTI::TargetCostKind CostKind) const | llvm::TargetTransformInfoImplBase | inline |
getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src, TTI::CastContextHint CCH, TTI::TargetCostKind CostKind, const Instruction *I=nullptr) | llvm::AArch64TTIImpl | |
llvm::TargetTransformInfoImplCRTPBase::getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src, TTI::CastContextHint CCH, TTI::TargetCostKind CostKind, const Instruction *I) const | llvm::TargetTransformInfoImplBase | inline |
getCFInstrCost(unsigned Opcode, TTI::TargetCostKind CostKind, const Instruction *I=nullptr) | llvm::AArch64TTIImpl | |
llvm::TargetTransformInfoImplCRTPBase::getCFInstrCost(unsigned Opcode, TTI::TargetCostKind CostKind, const Instruction *I=nullptr) const | llvm::TargetTransformInfoImplBase | inline |
getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy, CmpInst::Predicate VecPred, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Op1Info={TTI::OK_AnyValue, TTI::OP_None}, TTI::OperandValueInfo Op2Info={TTI::OK_AnyValue, TTI::OP_None}, const Instruction *I=nullptr) | llvm::AArch64TTIImpl | |
llvm::TargetTransformInfoImplCRTPBase::getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy, CmpInst::Predicate VecPred, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Op1Info, TTI::OperandValueInfo Op2Info, const Instruction *I) const | llvm::TargetTransformInfoImplBase | inline |
getConstantStrideStep(ScalarEvolution *SE, const SCEV *Ptr) const | llvm::TargetTransformInfoImplBase | inlineprotected |
getCostOfKeepingLiveOverCall(ArrayRef< Type * > Tys) | llvm::AArch64TTIImpl | |
BasicTTIImplBase< AArch64TTIImpl >::getCostOfKeepingLiveOverCall(ArrayRef< Type * > Tys) const | llvm::TargetTransformInfoImplBase | inline |
getDataLayout() const | llvm::TargetTransformInfoImplBase | inline |
getEpilogueVectorizationMinVF() const | llvm::AArch64TTIImpl | |
BasicTTIImplBase< AArch64TTIImpl >::getEpilogueVectorizationMinVF() | llvm::BasicTTIImplBase< AArch64TTIImpl > | inline |
getEstimatedNumberOfCaseClusters(const SwitchInst &SI, unsigned &JumpTableSize, ProfileSummaryInfo *PSI, BlockFrequencyInfo *BFI) | llvm::BasicTTIImplBase< AArch64TTIImpl > | inline |
llvm::TargetTransformInfoImplCRTPBase::getEstimatedNumberOfCaseClusters(const SwitchInst &SI, unsigned &JTSize, ProfileSummaryInfo *PSI, BlockFrequencyInfo *BFI) const | llvm::TargetTransformInfoImplBase | inline |
getExtendedReductionCost(unsigned Opcode, bool IsUnsigned, Type *ResTy, VectorType *Ty, FastMathFlags FMF, TTI::TargetCostKind CostKind) | llvm::BasicTTIImplBase< AArch64TTIImpl > | inline |
llvm::TargetTransformInfoImplCRTPBase::getExtendedReductionCost(unsigned Opcode, bool IsUnsigned, Type *ResTy, VectorType *Ty, FastMathFlags FMF, TTI::TargetCostKind CostKind) const | llvm::TargetTransformInfoImplBase | inline |
getExtractWithExtendCost(unsigned Opcode, Type *Dst, VectorType *VecTy, unsigned Index) | llvm::AArch64TTIImpl | |
llvm::TargetTransformInfoImplCRTPBase::getExtractWithExtendCost(unsigned Opcode, Type *Dst, VectorType *VecTy, unsigned Index) const | llvm::TargetTransformInfoImplBase | inline |
getFlatAddressSpace() | llvm::BasicTTIImplBase< AArch64TTIImpl > | inline |
llvm::TargetTransformInfoImplCRTPBase::getFlatAddressSpace() const | llvm::TargetTransformInfoImplBase | inline |
getFPOpCost(Type *Ty) | llvm::BasicTTIImplBase< AArch64TTIImpl > | inline |
llvm::TargetTransformInfoImplCRTPBase::getFPOpCost(Type *Ty) const | llvm::TargetTransformInfoImplBase | inline |
getGatherScatterOpCost(unsigned Opcode, Type *DataTy, const Value *Ptr, bool VariableMask, Align Alignment, TTI::TargetCostKind CostKind, const Instruction *I=nullptr) | llvm::AArch64TTIImpl | |
llvm::TargetTransformInfoImplCRTPBase::getGatherScatterOpCost(unsigned Opcode, Type *DataTy, const Value *Ptr, bool VariableMask, Align Alignment, TTI::TargetCostKind CostKind, const Instruction *I=nullptr) const | llvm::TargetTransformInfoImplBase | inline |
getGEPCost(Type *PointeeType, const Value *Ptr, ArrayRef< const Value * > Operands, Type *AccessType, TTI::TargetCostKind CostKind) | llvm::BasicTTIImplBase< AArch64TTIImpl > | inline |
llvm::TargetTransformInfoImplCRTPBase::getGEPCost(Type *PointeeType, const Value *Ptr, ArrayRef< const Value * > Operands, Type *AccessType, TTI::TargetCostKind CostKind) const | llvm::TargetTransformInfoImplCRTPBase< T > | inline |
getGISelRematGlobalCost() const | llvm::AArch64TTIImpl | inline |
getInlineCallPenalty(const Function *F, const CallBase &Call, unsigned DefaultCallPenalty) const | llvm::AArch64TTIImpl | |
getInlinerVectorBonusPercent() const | llvm::BasicTTIImplBase< AArch64TTIImpl > | inline |
getInliningCostBenefitAnalysisProfitableMultiplier() const | llvm::TargetTransformInfoImplBase | inline |
getInliningCostBenefitAnalysisSavingsMultiplier() const | llvm::TargetTransformInfoImplBase | inline |
getInliningLastCallToStaticBonus() const | llvm::TargetTransformInfoImplBase | inline |
getInliningThresholdMultiplier() const | llvm::BasicTTIImplBase< AArch64TTIImpl > | inline |
getInstructionCost(const User *U, ArrayRef< const Value * > Operands, TTI::TargetCostKind CostKind) | llvm::TargetTransformInfoImplCRTPBase< T > | inline |
getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef< unsigned > Indices, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, bool UseMaskForCond=false, bool UseMaskForGaps=false) | llvm::AArch64TTIImpl | |
llvm::TargetTransformInfoImplCRTPBase::getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef< unsigned > Indices, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, bool UseMaskForCond, bool UseMaskForGaps) const | llvm::TargetTransformInfoImplBase | inline |
getIntImmCodeSizeCost(unsigned Opcode, unsigned Idx, const APInt &Imm, Type *Ty) const | llvm::TargetTransformInfoImplBase | inline |
getIntImmCost(int64_t Val) | llvm::AArch64TTIImpl | |
getIntImmCost(const APInt &Imm, Type *Ty, TTI::TargetCostKind CostKind) | llvm::AArch64TTIImpl | |
BasicTTIImplBase< AArch64TTIImpl >::getIntImmCost(const APInt &Imm, Type *Ty, TTI::TargetCostKind CostKind) const | llvm::TargetTransformInfoImplBase | inline |
getIntImmCostInst(unsigned Opcode, unsigned Idx, const APInt &Imm, Type *Ty, TTI::TargetCostKind CostKind, Instruction *Inst=nullptr) | llvm::AArch64TTIImpl | |
BasicTTIImplBase< AArch64TTIImpl >::getIntImmCostInst(unsigned Opcode, unsigned Idx, const APInt &Imm, Type *Ty, TTI::TargetCostKind CostKind, Instruction *Inst=nullptr) const | llvm::TargetTransformInfoImplBase | inline |
getIntImmCostIntrin(Intrinsic::ID IID, unsigned Idx, const APInt &Imm, Type *Ty, TTI::TargetCostKind CostKind) | llvm::AArch64TTIImpl | |
BasicTTIImplBase< AArch64TTIImpl >::getIntImmCostIntrin(Intrinsic::ID IID, unsigned Idx, const APInt &Imm, Type *Ty, TTI::TargetCostKind CostKind) const | llvm::TargetTransformInfoImplBase | inline |
getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA, TTI::TargetCostKind CostKind) | llvm::AArch64TTIImpl | |
llvm::TargetTransformInfoImplCRTPBase::getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA, TTI::TargetCostKind CostKind) const | llvm::TargetTransformInfoImplBase | inline |
getLoadStoreVecRegBitWidth(unsigned AddrSpace) const | llvm::TargetTransformInfoImplBase | inline |
getLoadVectorFactor(unsigned VF, unsigned LoadSize, unsigned ChainSizeInBytes, VectorType *VecTy) const | llvm::TargetTransformInfoImplBase | inline |
getMaskedMemoryOpCost(unsigned Opcode, Type *Src, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind) | llvm::AArch64TTIImpl | |
llvm::TargetTransformInfoImplCRTPBase::getMaskedMemoryOpCost(unsigned Opcode, Type *Src, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind) const | llvm::TargetTransformInfoImplBase | inline |
getMaximumVF(unsigned ElemWidth, unsigned Opcode) const | llvm::TargetTransformInfoImplBase | inline |
getMaxInterleaveFactor(ElementCount VF) | llvm::AArch64TTIImpl | |
llvm::TargetTransformInfoImplCRTPBase::getMaxInterleaveFactor(ElementCount VF) const | llvm::TargetTransformInfoImplBase | inline |
getMaxMemIntrinsicInlineSizeThreshold() const | llvm::TargetTransformInfoImplBase | inline |
getMaxNumArgs() const | llvm::TargetTransformInfoImplBase | inline |
getMaxNumElements(ElementCount VF) const | llvm::AArch64TTIImpl | inline |
getMaxPrefetchIterationsAhead() const | llvm::BasicTTIImplBase< AArch64TTIImpl > | inlinevirtual |
getMaxVScale() const | llvm::BasicTTIImplBase< AArch64TTIImpl > | inline |
getMemcpyCost(const Instruction *I) const | llvm::TargetTransformInfoImplBase | inline |
getMemcpyLoopLoweringType(LLVMContext &Context, Value *Length, unsigned SrcAddrSpace, unsigned DestAddrSpace, Align SrcAlign, Align DestAlign, std::optional< uint32_t > AtomicElementSize) const | llvm::TargetTransformInfoImplBase | inline |
getMemcpyLoopResidualLoweringType(SmallVectorImpl< Type * > &OpsOut, LLVMContext &Context, unsigned RemainingBytes, unsigned SrcAddrSpace, unsigned DestAddrSpace, Align SrcAlign, Align DestAlign, std::optional< uint32_t > AtomicCpySize) const | llvm::TargetTransformInfoImplBase | inline |
getMemoryOpCost(unsigned Opcode, Type *Src, MaybeAlign Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, TTI::OperandValueInfo OpInfo={TTI::OK_AnyValue, TTI::OP_None}, const Instruction *I=nullptr) | llvm::AArch64TTIImpl | |
llvm::TargetTransformInfoImplCRTPBase::getMemoryOpCost(unsigned Opcode, Type *Src, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, TTI::OperandValueInfo OpInfo, const Instruction *I) const | llvm::TargetTransformInfoImplBase | inline |
getMinimumVF(unsigned ElemWidth, bool IsScalable) const | llvm::TargetTransformInfoImplBase | inline |
getMinMaxReductionCost(Intrinsic::ID IID, VectorType *Ty, FastMathFlags FMF, TTI::TargetCostKind CostKind) | llvm::AArch64TTIImpl | |
llvm::TargetTransformInfoImplCRTPBase::getMinMaxReductionCost(Intrinsic::ID IID, VectorType *, FastMathFlags, TTI::TargetCostKind) const | llvm::TargetTransformInfoImplBase | inline |
getMinPageSize() const | llvm::AArch64TTIImpl | inline |
getMinPrefetchStride(unsigned NumMemAccesses, unsigned NumStridedMemAccesses, unsigned NumPrefetches, bool HasCall) const | llvm::BasicTTIImplBase< AArch64TTIImpl > | inlinevirtual |
getMinTripCountTailFoldingThreshold() const | llvm::AArch64TTIImpl | inline |
getMinVectorRegisterBitWidth() const | llvm::AArch64TTIImpl | inline |
getMulAccReductionCost(bool IsUnsigned, Type *ResTy, VectorType *Ty, TTI::TargetCostKind CostKind) | llvm::BasicTTIImplBase< AArch64TTIImpl > | inline |
llvm::TargetTransformInfoImplCRTPBase::getMulAccReductionCost(bool IsUnsigned, Type *ResTy, VectorType *Ty, TTI::TargetCostKind CostKind) const | llvm::TargetTransformInfoImplBase | inline |
getNumberOfParts(Type *Tp) | llvm::BasicTTIImplBase< AArch64TTIImpl > | inline |
llvm::TargetTransformInfoImplCRTPBase::getNumberOfParts(Type *Tp) const | llvm::TargetTransformInfoImplBase | inline |
getNumberOfRegisters(unsigned ClassID) const | llvm::AArch64TTIImpl | inline |
getNumBytesToPadGlobalArray(unsigned Size, Type *ArrayType) const | llvm::TargetTransformInfoImplBase | inline |
getOperandsScalarizationOverhead(ArrayRef< const Value * > Args, ArrayRef< Type * > Tys, TTI::TargetCostKind CostKind) | llvm::BasicTTIImplBase< AArch64TTIImpl > | inline |
llvm::TargetTransformInfoImplCRTPBase::getOperandsScalarizationOverhead(ArrayRef< const Value * > Args, ArrayRef< Type * > Tys, TTI::TargetCostKind CostKind) const | llvm::TargetTransformInfoImplBase | inline |
getOrCreateResultFromMemIntrinsic(IntrinsicInst *Inst, Type *ExpectedType) | llvm::AArch64TTIImpl | |
BasicTTIImplBase< AArch64TTIImpl >::getOrCreateResultFromMemIntrinsic(IntrinsicInst *Inst, Type *ExpectedType) const | llvm::TargetTransformInfoImplBase | inline |
getOrderedReductionCost(unsigned Opcode, VectorType *Ty, TTI::TargetCostKind CostKind) | llvm::BasicTTIImplBase< AArch64TTIImpl > | inline |
getPeelingPreferences(Loop *L, ScalarEvolution &SE, TTI::PeelingPreferences &PP) | llvm::AArch64TTIImpl | |
llvm::TargetTransformInfoImplCRTPBase::getPeelingPreferences(Loop *, ScalarEvolution &, TTI::PeelingPreferences &) const | llvm::TargetTransformInfoImplBase | inline |
getPointersChainCost(ArrayRef< const Value * > Ptrs, const Value *Base, const TTI::PointersChainInfo &Info, Type *AccessTy, TTI::TargetCostKind CostKind) | llvm::TargetTransformInfoImplCRTPBase< T > | inline |
getPopcntSupport(unsigned TyWidth) | llvm::AArch64TTIImpl | |
BasicTTIImplBase< AArch64TTIImpl >::getPopcntSupport(unsigned IntTyWidthInBit) const | llvm::TargetTransformInfoImplBase | inline |
getPredicatedAddrSpace(const Value *V) const | llvm::BasicTTIImplBase< AArch64TTIImpl > | inline |
getPredictableBranchThreshold() const | llvm::TargetTransformInfoImplBase | inline |
getPreferredAddressingMode(const Loop *L, ScalarEvolution *SE) const | llvm::TargetTransformInfoImplBase | inline |
getPreferredExpandedReductionShuffle(const IntrinsicInst *II) const | llvm::TargetTransformInfoImplBase | inline |
getPreferredLargeGEPBaseOffset(int64_t MinOffset, int64_t MaxOffset) | llvm::BasicTTIImplBase< AArch64TTIImpl > | inline |
getPreferredTailFoldingStyle(bool IVUpdateMayOverflow) const | llvm::AArch64TTIImpl | inline |
BasicTTIImplBase< AArch64TTIImpl >::getPreferredTailFoldingStyle(bool IVUpdateMayOverflow=true) | llvm::BasicTTIImplBase< AArch64TTIImpl > | inline |
getPrefetchDistance() const | llvm::BasicTTIImplBase< AArch64TTIImpl > | inlinevirtual |
getRegisterBitWidth(TargetTransformInfo::RegisterKind K) const | llvm::AArch64TTIImpl | |
getRegisterClassForType(bool Vector, Type *Ty=nullptr) const | llvm::TargetTransformInfoImplBase | inline |
getRegisterClassName(unsigned ClassID) const | llvm::TargetTransformInfoImplBase | inline |
getRegUsageForType(Type *Ty) | llvm::BasicTTIImplBase< AArch64TTIImpl > | inline |
llvm::TargetTransformInfoImplCRTPBase::getRegUsageForType(Type *Ty) const | llvm::TargetTransformInfoImplBase | inline |
getReplicationShuffleCost(Type *EltTy, int ReplicationFactor, int VF, const APInt &DemandedDstElts, TTI::TargetCostKind CostKind) | llvm::BasicTTIImplBase< AArch64TTIImpl > | inline |
getScalarizationOverhead(VectorType *Ty, const APInt &DemandedElts, bool Insert, bool Extract, TTI::TargetCostKind CostKind, ArrayRef< Value * > VL={}) | llvm::AArch64TTIImpl | |
BasicTTIImplBase< AArch64TTIImpl >::getScalarizationOverhead(VectorType *InTy, bool Insert, bool Extract, TTI::TargetCostKind CostKind) | llvm::BasicTTIImplBase< AArch64TTIImpl > | inline |
BasicTTIImplBase< AArch64TTIImpl >::getScalarizationOverhead(VectorType *RetTy, ArrayRef< const Value * > Args, ArrayRef< Type * > Tys, TTI::TargetCostKind CostKind) | llvm::BasicTTIImplBase< AArch64TTIImpl > | inline |
llvm::TargetTransformInfoImplCRTPBase::getScalarizationOverhead(VectorType *Ty, const APInt &DemandedElts, bool Insert, bool Extract, TTI::TargetCostKind CostKind, ArrayRef< Value * > VL={}) const | llvm::TargetTransformInfoImplBase | inline |
getScalingFactorCost(Type *Ty, GlobalValue *BaseGV, StackOffset BaseOffset, bool HasBaseReg, int64_t Scale, unsigned AddrSpace) const | llvm::AArch64TTIImpl | |
BasicTTIImplBase< AArch64TTIImpl >::getScalingFactorCost(Type *Ty, GlobalValue *BaseGV, StackOffset BaseOffset, bool HasBaseReg, int64_t Scale, unsigned AddrSpace) | llvm::BasicTTIImplBase< AArch64TTIImpl > | inline |
getShuffleCost(TTI::ShuffleKind Kind, VectorType *Tp, ArrayRef< int > Mask, TTI::TargetCostKind CostKind, int Index, VectorType *SubTp, ArrayRef< const Value * > Args={}, const Instruction *CxtI=nullptr) | llvm::AArch64TTIImpl | |
llvm::TargetTransformInfoImplCRTPBase::getShuffleCost(TTI::ShuffleKind Kind, VectorType *Ty, ArrayRef< int > Mask, TTI::TargetCostKind CostKind, int Index, VectorType *SubTp, ArrayRef< const Value * > Args={}, const Instruction *CxtI=nullptr) const | llvm::TargetTransformInfoImplBase | inline |
getSpliceCost(VectorType *Tp, int Index) | llvm::AArch64TTIImpl | |
getStoreMinimumVF(unsigned VF, Type *ScalarMemTy, Type *ScalarValTy) const | llvm::AArch64TTIImpl | inline |
getStoreVectorFactor(unsigned VF, unsigned StoreSize, unsigned ChainSizeInBytes, VectorType *VecTy) const | llvm::TargetTransformInfoImplBase | inline |
getStridedMemoryOpCost(unsigned Opcode, Type *DataTy, const Value *Ptr, bool VariableMask, Align Alignment, TTI::TargetCostKind CostKind, const Instruction *I) | llvm::BasicTTIImplBase< AArch64TTIImpl > | inline |
llvm::TargetTransformInfoImplCRTPBase::getStridedMemoryOpCost(unsigned Opcode, Type *DataTy, const Value *Ptr, bool VariableMask, Align Alignment, TTI::TargetCostKind CostKind, const Instruction *I=nullptr) const | llvm::TargetTransformInfoImplBase | inline |
getTgtMemIntrinsic(IntrinsicInst *Inst, MemIntrinsicInfo &Info) | llvm::AArch64TTIImpl | |
BasicTTIImplBase< AArch64TTIImpl >::getTgtMemIntrinsic(IntrinsicInst *Inst, MemIntrinsicInfo &Info) const | llvm::TargetTransformInfoImplBase | inline |
getTreeReductionCost(unsigned Opcode, VectorType *Ty, TTI::TargetCostKind CostKind) | llvm::BasicTTIImplBase< AArch64TTIImpl > | inline |
getTypeBasedIntrinsicInstrCost(const IntrinsicCostAttributes &ICA, TTI::TargetCostKind CostKind) | llvm::BasicTTIImplBase< AArch64TTIImpl > | inline |
getTypeLegalizationCost(Type *Ty) const | llvm::BasicTTIImplBase< AArch64TTIImpl > | inline |
getUnrollingPreferences(Loop *L, ScalarEvolution &SE, TTI::UnrollingPreferences &UP, OptimizationRemarkEmitter *ORE) | llvm::AArch64TTIImpl | |
llvm::TargetTransformInfoImplCRTPBase::getUnrollingPreferences(Loop *, ScalarEvolution &, TTI::UnrollingPreferences &, OptimizationRemarkEmitter *) const | llvm::TargetTransformInfoImplBase | inline |
getVectorInstrCost(unsigned Opcode, Type *Val, TTI::TargetCostKind CostKind, unsigned Index, Value *Op0, Value *Op1) | llvm::AArch64TTIImpl | |
getVectorInstrCost(unsigned Opcode, Type *Val, TTI::TargetCostKind CostKind, unsigned Index, Value *Scalar, ArrayRef< std::tuple< Value *, User *, int > > ScalarUserAndIdx) | llvm::AArch64TTIImpl | |
getVectorInstrCost(const Instruction &I, Type *Val, TTI::TargetCostKind CostKind, unsigned Index) | llvm::AArch64TTIImpl | |
llvm::TargetTransformInfoImplCRTPBase::getVectorInstrCost(unsigned Opcode, Type *Val, TTI::TargetCostKind CostKind, unsigned Index, Value *Op0, Value *Op1) const | llvm::TargetTransformInfoImplBase | inline |
llvm::TargetTransformInfoImplCRTPBase::getVectorInstrCost(unsigned Opcode, Type *Val, TTI::TargetCostKind CostKind, unsigned Index, Value *Scalar, ArrayRef< std::tuple< Value *, User *, int > > ScalarUserAndIdx) const | llvm::TargetTransformInfoImplBase | inline |
llvm::TargetTransformInfoImplCRTPBase::getVectorInstrCost(const Instruction &I, Type *Val, TTI::TargetCostKind CostKind, unsigned Index) const | llvm::TargetTransformInfoImplBase | inline |
getVectorSplitCost() | llvm::BasicTTIImplBase< AArch64TTIImpl > | inline |
getVPLegalizationStrategy(const VPIntrinsic &PI) const | llvm::TargetTransformInfoImplBase | inline |
getVPMemoryOpCost(unsigned Opcode, Type *Src, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, const Instruction *I) const | llvm::TargetTransformInfoImplBase | inline |
getVScaleForTuning() const | llvm::AArch64TTIImpl | inline |
hasActiveVectorLength(unsigned Opcode, Type *DataType, Align Alignment) const | llvm::TargetTransformInfoImplBase | inline |
hasArmWideBranch(bool) const | llvm::TargetTransformInfoImplBase | inline |
hasBranchDivergence(const Function *F=nullptr) | llvm::BasicTTIImplBase< AArch64TTIImpl > | inline |
llvm::TargetTransformInfoImplCRTPBase::hasBranchDivergence(const Function *F=nullptr) const | llvm::TargetTransformInfoImplBase | inline |
hasConditionalLoadStoreForType(Type *Ty) const | llvm::TargetTransformInfoImplBase | inline |
hasDivRemOp(Type *DataType, bool IsSigned) const | llvm::TargetTransformInfoImplBase | inline |
hasVolatileVariant(Instruction *I, unsigned AddrSpace) const | llvm::TargetTransformInfoImplBase | inline |
haveFastSqrt(Type *Ty) | llvm::BasicTTIImplBase< AArch64TTIImpl > | inline |
llvm::TargetTransformInfoImplCRTPBase::haveFastSqrt(Type *Ty) const | llvm::TargetTransformInfoImplBase | inline |
improveShuffleKindFromMask(TTI::ShuffleKind Kind, ArrayRef< int > Mask, VectorType *Ty, int &Index, VectorType *&SubTy) const | llvm::BasicTTIImplBase< AArch64TTIImpl > | inline |
instCombineIntrinsic(InstCombiner &IC, IntrinsicInst &II) const | llvm::AArch64TTIImpl | |
BasicTTIImplBase< AArch64TTIImpl >::instCombineIntrinsic(InstCombiner &IC, IntrinsicInst &II) | llvm::BasicTTIImplBase< AArch64TTIImpl > | inline |
isAlwaysUniform(const Value *V) | llvm::BasicTTIImplBase< AArch64TTIImpl > | inline |
llvm::TargetTransformInfoImplCRTPBase::isAlwaysUniform(const Value *V) const | llvm::TargetTransformInfoImplBase | inline |
isConstantStridedAccessLessThan(ScalarEvolution *SE, const SCEV *Ptr, int64_t MergeDistance) const | llvm::TargetTransformInfoImplBase | inlineprotected |
isElementTypeLegalForScalableVector(Type *Ty) const | llvm::AArch64TTIImpl | inline |
isExpensiveToSpeculativelyExecute(const Instruction *I) | llvm::TargetTransformInfoImplCRTPBase< T > | inline |
isExtPartOfAvgExpr(const Instruction *ExtUser, Type *Dst, Type *Src) | llvm::AArch64TTIImpl | |
isFCmpOrdCheaperThanFCmpZero(Type *Ty) | llvm::BasicTTIImplBase< AArch64TTIImpl > | inline |
llvm::TargetTransformInfoImplCRTPBase::isFCmpOrdCheaperThanFCmpZero(Type *Ty) const | llvm::TargetTransformInfoImplBase | inline |
isFPVectorizationPotentiallyUnsafe() const | llvm::TargetTransformInfoImplBase | inline |
isHardwareLoopProfitable(Loop *L, ScalarEvolution &SE, AssumptionCache &AC, TargetLibraryInfo *LibInfo, HardwareLoopInfo &HWLoopInfo) | llvm::BasicTTIImplBase< AArch64TTIImpl > | inline |
llvm::TargetTransformInfoImplCRTPBase::isHardwareLoopProfitable(Loop *L, ScalarEvolution &SE, AssumptionCache &AC, TargetLibraryInfo *LibInfo, HardwareLoopInfo &HWLoopInfo) const | llvm::TargetTransformInfoImplBase | inline |
isIndexedLoadLegal(TTI::MemIndexedMode M, Type *Ty, const DataLayout &DL) const | llvm::BasicTTIImplBase< AArch64TTIImpl > | inline |
isIndexedStoreLegal(TTI::MemIndexedMode M, Type *Ty, const DataLayout &DL) const | llvm::BasicTTIImplBase< AArch64TTIImpl > | inline |
isLegalAddImmediate(int64_t imm) | llvm::BasicTTIImplBase< AArch64TTIImpl > | inline |
llvm::TargetTransformInfoImplCRTPBase::isLegalAddImmediate(int64_t Imm) const | llvm::TargetTransformInfoImplBase | inline |
isLegalAddressingMode(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset, bool HasBaseReg, int64_t Scale, unsigned AddrSpace, Instruction *I=nullptr, int64_t ScalableOffset=0) | llvm::BasicTTIImplBase< AArch64TTIImpl > | inline |
llvm::TargetTransformInfoImplCRTPBase::isLegalAddressingMode(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset, bool HasBaseReg, int64_t Scale, unsigned AddrSpace, Instruction *I=nullptr, int64_t ScalableOffset=0) const | llvm::TargetTransformInfoImplBase | inline |
isLegalAddScalableImmediate(int64_t Imm) | llvm::BasicTTIImplBase< AArch64TTIImpl > | inline |
llvm::TargetTransformInfoImplCRTPBase::isLegalAddScalableImmediate(int64_t Imm) const | llvm::TargetTransformInfoImplBase | inline |
isLegalAltInstr(VectorType *VecTy, unsigned Opcode0, unsigned Opcode1, const SmallBitVector &OpcodeMask) const | llvm::TargetTransformInfoImplBase | inline |
isLegalBroadcastLoad(Type *ElementTy, ElementCount NumElements) const | llvm::AArch64TTIImpl | inline |
isLegalICmpImmediate(int64_t imm) | llvm::BasicTTIImplBase< AArch64TTIImpl > | inline |
llvm::TargetTransformInfoImplCRTPBase::isLegalICmpImmediate(int64_t Imm) const | llvm::TargetTransformInfoImplBase | inline |
isLegalInterleavedAccessType(VectorType *VTy, unsigned Factor, Align Alignment, unsigned AddrSpace) | llvm::TargetTransformInfoImplBase | inline |
isLegalMaskedCompressStore(Type *DataType, Align Alignment) const | llvm::TargetTransformInfoImplBase | inline |
isLegalMaskedExpandLoad(Type *DataType, Align Alignment) const | llvm::TargetTransformInfoImplBase | inline |
isLegalMaskedGather(Type *DataType, Align Alignment) const | llvm::AArch64TTIImpl | inline |
isLegalMaskedGatherScatter(Type *DataType) const | llvm::AArch64TTIImpl | inline |
isLegalMaskedLoad(Type *DataType, Align Alignment) | llvm::AArch64TTIImpl | inline |
BasicTTIImplBase< AArch64TTIImpl >::isLegalMaskedLoad(Type *DataType, Align Alignment) const | llvm::TargetTransformInfoImplBase | inline |
isLegalMaskedLoadStore(Type *DataType, Align Alignment) | llvm::AArch64TTIImpl | inline |
isLegalMaskedScatter(Type *DataType, Align Alignment) const | llvm::AArch64TTIImpl | inline |
isLegalMaskedStore(Type *DataType, Align Alignment) | llvm::AArch64TTIImpl | inline |
BasicTTIImplBase< AArch64TTIImpl >::isLegalMaskedStore(Type *DataType, Align Alignment) const | llvm::TargetTransformInfoImplBase | inline |
isLegalMaskedVectorHistogram(Type *AddrType, Type *DataType) const | llvm::TargetTransformInfoImplBase | inline |
isLegalNTLoad(Type *DataType, Align Alignment) | llvm::AArch64TTIImpl | inline |
BasicTTIImplBase< AArch64TTIImpl >::isLegalNTLoad(Type *DataType, Align Alignment) const | llvm::TargetTransformInfoImplBase | inline |
isLegalNTStore(Type *DataType, Align Alignment) | llvm::AArch64TTIImpl | inline |
BasicTTIImplBase< AArch64TTIImpl >::isLegalNTStore(Type *DataType, Align Alignment) const | llvm::TargetTransformInfoImplBase | inline |
isLegalNTStoreLoad(Type *DataType, Align Alignment) | llvm::AArch64TTIImpl | inline |
isLegalStridedLoadStore(Type *DataType, Align Alignment) const | llvm::TargetTransformInfoImplBase | inline |
isLegalToVectorizeLoad(LoadInst *LI) const | llvm::TargetTransformInfoImplBase | inline |
isLegalToVectorizeLoadChain(unsigned ChainSizeInBytes, Align Alignment, unsigned AddrSpace) const | llvm::TargetTransformInfoImplBase | inline |
isLegalToVectorizeReduction(const RecurrenceDescriptor &RdxDesc, ElementCount VF) const | llvm::AArch64TTIImpl | |
isLegalToVectorizeStore(StoreInst *SI) const | llvm::TargetTransformInfoImplBase | inline |
isLegalToVectorizeStoreChain(unsigned ChainSizeInBytes, Align Alignment, unsigned AddrSpace) const | llvm::TargetTransformInfoImplBase | inline |
isLoweredToCall(const Function *F) const | llvm::TargetTransformInfoImplBase | inline |
isLSRCostLess(const TargetTransformInfo::LSRCost &C1, const TargetTransformInfo::LSRCost &C2) | llvm::AArch64TTIImpl | |
BasicTTIImplBase< AArch64TTIImpl >::isLSRCostLess(TTI::LSRCost C1, TTI::LSRCost C2) | llvm::BasicTTIImplBase< AArch64TTIImpl > | inline |
llvm::TargetTransformInfoImplCRTPBase::isLSRCostLess(const TTI::LSRCost &C1, const TTI::LSRCost &C2) const | llvm::TargetTransformInfoImplBase | inline |
isNoopAddrSpaceCast(unsigned FromAS, unsigned ToAS) const | llvm::BasicTTIImplBase< AArch64TTIImpl > | inline |
isNumRegsMajorCostOfLSR() | llvm::BasicTTIImplBase< AArch64TTIImpl > | inline |
llvm::TargetTransformInfoImplCRTPBase::isNumRegsMajorCostOfLSR() const | llvm::TargetTransformInfoImplBase | inline |
isProfitableLSRChainElement(Instruction *I) | llvm::BasicTTIImplBase< AArch64TTIImpl > | inline |
llvm::TargetTransformInfoImplCRTPBase::isProfitableLSRChainElement(Instruction *I) const | llvm::TargetTransformInfoImplBase | inline |
isProfitableToHoist(Instruction *I) | llvm::BasicTTIImplBase< AArch64TTIImpl > | inline |
llvm::TargetTransformInfoImplCRTPBase::isProfitableToHoist(Instruction *I) const | llvm::TargetTransformInfoImplBase | inline |
isProfitableToSinkOperands(Instruction *I, SmallVectorImpl< Use * > &Ops) const | llvm::AArch64TTIImpl | |
isSingleThreaded() const | llvm::BasicTTIImplBase< AArch64TTIImpl > | inline |
isSourceOfDivergence(const Value *V) | llvm::BasicTTIImplBase< AArch64TTIImpl > | inline |
llvm::TargetTransformInfoImplCRTPBase::isSourceOfDivergence(const Value *V) const | llvm::TargetTransformInfoImplBase | inline |
isStridedAccess(const SCEV *Ptr) const | llvm::TargetTransformInfoImplBase | inlineprotected |
isTargetIntrinsicTriviallyScalarizable(Intrinsic::ID ID) const | llvm::BasicTTIImplBase< AArch64TTIImpl > | inline |
isTargetIntrinsicWithOverloadTypeAtArg(Intrinsic::ID ID, int OpdIdx) const | llvm::BasicTTIImplBase< AArch64TTIImpl > | inline |
isTargetIntrinsicWithScalarOpAtArg(Intrinsic::ID ID, unsigned ScalarOpdIdx) const | llvm::BasicTTIImplBase< AArch64TTIImpl > | inline |
isTargetIntrinsicWithStructReturnOverloadAtField(Intrinsic::ID ID, int RetIdx) const | llvm::BasicTTIImplBase< AArch64TTIImpl > | inline |
isTruncateFree(Type *Ty1, Type *Ty2) | llvm::BasicTTIImplBase< AArch64TTIImpl > | inline |
llvm::TargetTransformInfoImplCRTPBase::isTruncateFree(Type *Ty1, Type *Ty2) const | llvm::TargetTransformInfoImplBase | inline |
isTypeLegal(Type *Ty) | llvm::BasicTTIImplBase< AArch64TTIImpl > | inline |
llvm::TargetTransformInfoImplCRTPBase::isTypeLegal(Type *Ty) const | llvm::TargetTransformInfoImplBase | inline |
isValidAddrSpaceCast(unsigned FromAS, unsigned ToAS) const | llvm::BasicTTIImplBase< AArch64TTIImpl > | inline |
isVectorShiftByScalarCheap(Type *Ty) const | llvm::TargetTransformInfoImplBase | inline |
isVScaleKnownToBeAPowerOfTwo() const | llvm::AArch64TTIImpl | inline |
LSRWithInstrQueries() const | llvm::TargetTransformInfoImplBase | inline |
minRequiredElementSize(const Value *Val, bool &isSigned) const | llvm::TargetTransformInfoImplBase | inlineprotected |
preferEpilogueVectorization() const | llvm::TargetTransformInfoImplBase | inline |
preferFixedOverScalableIfEqualCost() const | llvm::AArch64TTIImpl | inline |
preferInLoopReduction(unsigned Opcode, Type *Ty, TTI::ReductionFlags Flags) const | llvm::TargetTransformInfoImplBase | inline |
preferPredicatedReductionSelect(unsigned Opcode, Type *Ty, TTI::ReductionFlags Flags) const | llvm::AArch64TTIImpl | inline |
preferPredicateOverEpilogue(TailFoldingInfo *TFI) | llvm::AArch64TTIImpl | |
llvm::TargetTransformInfoImplCRTPBase::preferPredicateOverEpilogue(TailFoldingInfo *TFI) const | llvm::TargetTransformInfoImplBase | inline |
prefersVectorizedAddressing() const | llvm::AArch64TTIImpl | |
preferToKeepConstantsAttached(const Instruction &Inst, const Function &Fn) const | llvm::BasicTTIImplBase< AArch64TTIImpl > | inline |
rewriteIntrinsicWithAddressSpace(IntrinsicInst *II, Value *OldV, Value *NewV) const | llvm::BasicTTIImplBase< AArch64TTIImpl > | inline |
shouldBuildLookupTables() | llvm::BasicTTIImplBase< AArch64TTIImpl > | inline |
llvm::TargetTransformInfoImplCRTPBase::shouldBuildLookupTables() const | llvm::TargetTransformInfoImplBase | inline |
shouldBuildLookupTablesForConstant(Constant *C) const | llvm::TargetTransformInfoImplBase | inline |
shouldBuildRelLookupTables() const | llvm::BasicTTIImplBase< AArch64TTIImpl > | inline |
shouldConsiderAddressTypePromotion(const Instruction &I, bool &AllowPromotionWithoutCommonHeader) | llvm::AArch64TTIImpl | |
BasicTTIImplBase< AArch64TTIImpl >::shouldConsiderAddressTypePromotion(const Instruction &I, bool &AllowPromotionWithoutCommonHeader) const | llvm::TargetTransformInfoImplBase | inline |
shouldDropLSRSolutionIfLessProfitable() const | llvm::BasicTTIImplBase< AArch64TTIImpl > | inline |
shouldExpandReduction(const IntrinsicInst *II) const | llvm::AArch64TTIImpl | inline |
shouldMaximizeVectorBandwidth(TargetTransformInfo::RegisterKind K) const | llvm::AArch64TTIImpl | |
shouldPrefetchAddressSpace(unsigned AS) const | llvm::BasicTTIImplBase< AArch64TTIImpl > | inlinevirtual |
shouldTreatInstructionLikeSelect(const Instruction *I) | llvm::AArch64TTIImpl | |
simplifyDemandedUseBitsIntrinsic(InstCombiner &IC, IntrinsicInst &II, APInt DemandedMask, KnownBits &Known, bool &KnownBitsComputed) | llvm::BasicTTIImplBase< AArch64TTIImpl > | inline |
llvm::TargetTransformInfoImplCRTPBase::simplifyDemandedUseBitsIntrinsic(InstCombiner &IC, IntrinsicInst &II, APInt DemandedMask, KnownBits &Known, bool &KnownBitsComputed) const | llvm::TargetTransformInfoImplBase | inline |
simplifyDemandedVectorEltsIntrinsic(InstCombiner &IC, IntrinsicInst &II, APInt DemandedElts, APInt &UndefElts, APInt &UndefElts2, APInt &UndefElts3, std::function< void(Instruction *, unsigned, APInt, APInt &)> SimplifyAndSetOp) const | llvm::AArch64TTIImpl | |
BasicTTIImplBase< AArch64TTIImpl >::simplifyDemandedVectorEltsIntrinsic(InstCombiner &IC, IntrinsicInst &II, APInt DemandedElts, APInt &UndefElts, APInt &UndefElts2, APInt &UndefElts3, std::function< void(Instruction *, unsigned, APInt, APInt &)> SimplifyAndSetOp) | llvm::BasicTTIImplBase< AArch64TTIImpl > | inline |
supportsEfficientVectorElementLoadStore() const | llvm::TargetTransformInfoImplBase | inline |
supportsScalableVectors() const | llvm::AArch64TTIImpl | inline |
supportsTailCallFor(const CallBase *CB) const | llvm::TargetTransformInfoImplCRTPBase< T > | inline |
supportsTailCalls() const | llvm::TargetTransformInfoImplBase | inline |
TargetTransformInfoImplBase(const DataLayout &DL) | llvm::TargetTransformInfoImplBase | inlineexplicitprotected |
TargetTransformInfoImplBase(const TargetTransformInfoImplBase &Arg)=default | llvm::TargetTransformInfoImplBase | |
TargetTransformInfoImplBase(TargetTransformInfoImplBase &&Arg) | llvm::TargetTransformInfoImplBase | inline |
TargetTransformInfoImplCRTPBase(const DataLayout &DL) | llvm::TargetTransformInfoImplCRTPBase< T > | inlineexplicitprotected |
useAA() const | llvm::BasicTTIImplBase< AArch64TTIImpl > | inline |
useColdCCForColdCall(Function &F) const | llvm::TargetTransformInfoImplBase | inline |
useNeonVector(const Type *Ty) const | llvm::AArch64TTIImpl | |
~BasicTTIImplBase()=default | llvm::BasicTTIImplBase< AArch64TTIImpl > | protectedvirtual |