|
| BPFTTIImpl (const BPFTargetMachine *TM, const Function &F) |
|
int | getIntImmCost (const APInt &Imm, Type *Ty, TTI::TargetCostKind CostKind) |
|
InstructionCost | getCmpSelInstrCost (unsigned Opcode, Type *ValTy, Type *CondTy, CmpInst::Predicate VecPred, TTI::TargetCostKind CostKind, const llvm::Instruction *I=nullptr) |
|
InstructionCost | getArithmeticInstrCost (unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Op1Info={TTI::OK_AnyValue, TTI::OP_None}, TTI::OperandValueInfo Op2Info={TTI::OK_AnyValue, TTI::OP_None}, ArrayRef< const Value * > Args=std::nullopt, const Instruction *CxtI=nullptr) |
|
TTI::MemCmpExpansionOptions | enableMemCmpExpansion (bool OptSize, bool IsZeroCmp) const |
|
unsigned | getMaxNumArgs () const |
|
bool | allowsMisalignedMemoryAccesses (LLVMContext &Context, unsigned BitWidth, unsigned AddressSpace, Align Alignment, unsigned *Fast) const |
|
bool | hasBranchDivergence (const Function *F=nullptr) |
|
bool | isSourceOfDivergence (const Value *V) |
|
bool | isAlwaysUniform (const Value *V) |
|
bool | isValidAddrSpaceCast (unsigned FromAS, unsigned ToAS) const |
|
bool | addrspacesMayAlias (unsigned AS0, unsigned AS1) const |
|
unsigned | getFlatAddressSpace () |
|
bool | collectFlatAddressOperands (SmallVectorImpl< int > &OpIndexes, Intrinsic::ID IID) const |
|
bool | isNoopAddrSpaceCast (unsigned FromAS, unsigned ToAS) const |
|
unsigned | getAssumedAddrSpace (const Value *V) const |
|
bool | isSingleThreaded () const |
|
std::pair< const Value *, unsigned > | getPredicatedAddrSpace (const Value *V) const |
|
Value * | rewriteIntrinsicWithAddressSpace (IntrinsicInst *II, Value *OldV, Value *NewV) const |
|
bool | isLegalAddImmediate (int64_t imm) |
|
bool | isLegalAddScalableImmediate (int64_t Imm) |
|
bool | isLegalICmpImmediate (int64_t imm) |
|
bool | isLegalAddressingMode (Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset, bool HasBaseReg, int64_t Scale, unsigned AddrSpace, Instruction *I=nullptr, int64_t ScalableOffset=0) |
|
int64_t | getPreferredLargeGEPBaseOffset (int64_t MinOffset, int64_t MaxOffset) |
|
unsigned | getStoreMinimumVF (unsigned VF, Type *ScalarMemTy, Type *ScalarValTy) const |
|
bool | isIndexedLoadLegal (TTI::MemIndexedMode M, Type *Ty, const DataLayout &DL) const |
|
bool | isIndexedStoreLegal (TTI::MemIndexedMode M, Type *Ty, const DataLayout &DL) const |
|
bool | isLSRCostLess (TTI::LSRCost C1, TTI::LSRCost C2) |
|
bool | isNumRegsMajorCostOfLSR () |
|
bool | shouldDropLSRSolutionIfLessProfitable () const |
|
bool | isProfitableLSRChainElement (Instruction *I) |
|
InstructionCost | getScalingFactorCost (Type *Ty, GlobalValue *BaseGV, StackOffset BaseOffset, bool HasBaseReg, int64_t Scale, unsigned AddrSpace) |
|
bool | isTruncateFree (Type *Ty1, Type *Ty2) |
|
bool | isProfitableToHoist (Instruction *I) |
|
bool | useAA () const |
|
bool | isTypeLegal (Type *Ty) |
|
unsigned | getRegUsageForType (Type *Ty) |
|
InstructionCost | getGEPCost (Type *PointeeType, const Value *Ptr, ArrayRef< const Value * > Operands, Type *AccessType, TTI::TargetCostKind CostKind) |
|
unsigned | getEstimatedNumberOfCaseClusters (const SwitchInst &SI, unsigned &JumpTableSize, ProfileSummaryInfo *PSI, BlockFrequencyInfo *BFI) |
|
bool | shouldBuildLookupTables () |
|
bool | shouldBuildRelLookupTables () const |
|
bool | haveFastSqrt (Type *Ty) |
|
bool | isFCmpOrdCheaperThanFCmpZero (Type *Ty) |
|
InstructionCost | getFPOpCost (Type *Ty) |
|
bool | preferToKeepConstantsAttached (const Instruction &Inst, const Function &Fn) const |
|
unsigned | getInliningThresholdMultiplier () const |
|
unsigned | adjustInliningThreshold (const CallBase *CB) |
|
unsigned | getCallerAllocaCost (const CallBase *CB, const AllocaInst *AI) const |
|
int | getInlinerVectorBonusPercent () const |
|
void | getUnrollingPreferences (Loop *L, ScalarEvolution &SE, TTI::UnrollingPreferences &UP, OptimizationRemarkEmitter *ORE) |
|
void | getPeelingPreferences (Loop *L, ScalarEvolution &SE, TTI::PeelingPreferences &PP) |
|
bool | isHardwareLoopProfitable (Loop *L, ScalarEvolution &SE, AssumptionCache &AC, TargetLibraryInfo *LibInfo, HardwareLoopInfo &HWLoopInfo) |
|
bool | preferPredicateOverEpilogue (TailFoldingInfo *TFI) |
|
TailFoldingStyle | getPreferredTailFoldingStyle (bool IVUpdateMayOverflow=true) |
|
std::optional< Instruction * > | instCombineIntrinsic (InstCombiner &IC, IntrinsicInst &II) |
|
std::optional< Value * > | simplifyDemandedUseBitsIntrinsic (InstCombiner &IC, IntrinsicInst &II, APInt DemandedMask, KnownBits &Known, bool &KnownBitsComputed) |
|
std::optional< Value * > | simplifyDemandedVectorEltsIntrinsic (InstCombiner &IC, IntrinsicInst &II, APInt DemandedElts, APInt &UndefElts, APInt &UndefElts2, APInt &UndefElts3, std::function< void(Instruction *, unsigned, APInt, APInt &)> SimplifyAndSetOp) |
|
virtual std::optional< unsigned > | getCacheSize (TargetTransformInfo::CacheLevel Level) const |
|
virtual std::optional< unsigned > | getCacheAssociativity (TargetTransformInfo::CacheLevel Level) const |
|
virtual unsigned | getCacheLineSize () const |
|
virtual unsigned | getPrefetchDistance () const |
|
virtual unsigned | getMinPrefetchStride (unsigned NumMemAccesses, unsigned NumStridedMemAccesses, unsigned NumPrefetches, bool HasCall) const |
|
virtual unsigned | getMaxPrefetchIterationsAhead () const |
|
virtual bool | enableWritePrefetching () const |
|
virtual bool | shouldPrefetchAddressSpace (unsigned AS) const |
|
TypeSize | getRegisterBitWidth (TargetTransformInfo::RegisterKind K) const |
|
std::optional< unsigned > | getMaxVScale () const |
|
std::optional< unsigned > | getVScaleForTuning () const |
|
bool | isVScaleKnownToBeAPowerOfTwo () const |
|
InstructionCost | getScalarizationOverhead (VectorType *InTy, const APInt &DemandedElts, bool Insert, bool Extract, TTI::TargetCostKind CostKind) |
| Estimate the overhead of scalarizing an instruction.
|
|
InstructionCost | getScalarizationOverhead (VectorType *InTy, bool Insert, bool Extract, TTI::TargetCostKind CostKind) |
| Helper wrapper for the DemandedElts variant of getScalarizationOverhead.
|
|
InstructionCost | getScalarizationOverhead (VectorType *RetTy, ArrayRef< const Value * > Args, ArrayRef< Type * > Tys, TTI::TargetCostKind CostKind) |
| Estimate the overhead of scalarizing the inputs and outputs of an instruction, with return type RetTy and arguments Args of type Tys.
|
|
InstructionCost | getOperandsScalarizationOverhead (ArrayRef< const Value * > Args, ArrayRef< Type * > Tys, TTI::TargetCostKind CostKind) |
| Estimate the overhead of scalarizing an instructions unique non-constant operands.
|
|
std::pair< InstructionCost, MVT > | getTypeLegalizationCost (Type *Ty) const |
| Estimate the cost of type-legalization and the legalized type.
|
|
unsigned | getMaxInterleaveFactor (ElementCount VF) |
|
InstructionCost | getArithmeticInstrCost (unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Opd1Info={TTI::OK_AnyValue, TTI::OP_None}, TTI::OperandValueInfo Opd2Info={TTI::OK_AnyValue, TTI::OP_None}, ArrayRef< const Value * > Args=std::nullopt, const Instruction *CxtI=nullptr) |
|
TTI::ShuffleKind | improveShuffleKindFromMask (TTI::ShuffleKind Kind, ArrayRef< int > Mask, VectorType *Ty, int &Index, VectorType *&SubTy) const |
|
InstructionCost | getShuffleCost (TTI::ShuffleKind Kind, VectorType *Tp, ArrayRef< int > Mask, TTI::TargetCostKind CostKind, int Index, VectorType *SubTp, ArrayRef< const Value * > Args=std::nullopt, const Instruction *CxtI=nullptr) |
|
InstructionCost | getCastInstrCost (unsigned Opcode, Type *Dst, Type *Src, TTI::CastContextHint CCH, TTI::TargetCostKind CostKind, const Instruction *I=nullptr) |
|
InstructionCost | getExtractWithExtendCost (unsigned Opcode, Type *Dst, VectorType *VecTy, unsigned Index) |
|
InstructionCost | getCFInstrCost (unsigned Opcode, TTI::TargetCostKind CostKind, const Instruction *I=nullptr) |
|
InstructionCost | getCmpSelInstrCost (unsigned Opcode, Type *ValTy, Type *CondTy, CmpInst::Predicate VecPred, TTI::TargetCostKind CostKind, const Instruction *I=nullptr) |
|
InstructionCost | getVectorInstrCost (unsigned Opcode, Type *Val, TTI::TargetCostKind CostKind, unsigned Index, Value *Op0, Value *Op1) |
|
InstructionCost | getVectorInstrCost (const Instruction &I, Type *Val, TTI::TargetCostKind CostKind, unsigned Index) |
|
InstructionCost | getReplicationShuffleCost (Type *EltTy, int ReplicationFactor, int VF, const APInt &DemandedDstElts, TTI::TargetCostKind CostKind) |
|
InstructionCost | getMemoryOpCost (unsigned Opcode, Type *Src, MaybeAlign Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, TTI::OperandValueInfo OpInfo={TTI::OK_AnyValue, TTI::OP_None}, const Instruction *I=nullptr) |
|
InstructionCost | getMaskedMemoryOpCost (unsigned Opcode, Type *DataTy, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind) |
|
InstructionCost | getGatherScatterOpCost (unsigned Opcode, Type *DataTy, const Value *Ptr, bool VariableMask, Align Alignment, TTI::TargetCostKind CostKind, const Instruction *I=nullptr) |
|
InstructionCost | getStridedMemoryOpCost (unsigned Opcode, Type *DataTy, const Value *Ptr, bool VariableMask, Align Alignment, TTI::TargetCostKind CostKind, const Instruction *I) |
|
InstructionCost | getInterleavedMemoryOpCost (unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef< unsigned > Indices, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, bool UseMaskForCond=false, bool UseMaskForGaps=false) |
|
InstructionCost | getIntrinsicInstrCost (const IntrinsicCostAttributes &ICA, TTI::TargetCostKind CostKind) |
| Get intrinsic cost based on arguments.
|
|
InstructionCost | getTypeBasedIntrinsicInstrCost (const IntrinsicCostAttributes &ICA, TTI::TargetCostKind CostKind) |
| Get intrinsic cost based on argument types.
|
|
InstructionCost | getCallInstrCost (Function *F, Type *RetTy, ArrayRef< Type * > Tys, TTI::TargetCostKind CostKind) |
| Compute a cost of the given call instruction.
|
|
unsigned | getNumberOfParts (Type *Tp) |
|
InstructionCost | getAddressComputationCost (Type *Ty, ScalarEvolution *, const SCEV *) |
|
InstructionCost | getTreeReductionCost (unsigned Opcode, VectorType *Ty, TTI::TargetCostKind CostKind) |
| Try to calculate arithmetic and shuffle op costs for reduction intrinsics.
|
|
InstructionCost | getOrderedReductionCost (unsigned Opcode, VectorType *Ty, TTI::TargetCostKind CostKind) |
| Try to calculate the cost of performing strict (in-order) reductions, which involves doing a sequence of floating point additions in lane order, starting with an initial value.
|
|
InstructionCost | getArithmeticReductionCost (unsigned Opcode, VectorType *Ty, std::optional< FastMathFlags > FMF, TTI::TargetCostKind CostKind) |
|
InstructionCost | getMinMaxReductionCost (Intrinsic::ID IID, VectorType *Ty, FastMathFlags FMF, TTI::TargetCostKind CostKind) |
| Try to calculate op costs for min/max reduction operations.
|
|
InstructionCost | getExtendedReductionCost (unsigned Opcode, bool IsUnsigned, Type *ResTy, VectorType *Ty, FastMathFlags FMF, TTI::TargetCostKind CostKind) |
|
InstructionCost | getMulAccReductionCost (bool IsUnsigned, Type *ResTy, VectorType *Ty, TTI::TargetCostKind CostKind) |
|
InstructionCost | getVectorSplitCost () |
|
InstructionCost | getGEPCost (Type *PointeeType, const Value *Ptr, ArrayRef< const Value * > Operands, Type *AccessType, TTI::TargetCostKind CostKind) |
|
InstructionCost | getPointersChainCost (ArrayRef< const Value * > Ptrs, const Value *Base, const TTI::PointersChainInfo &Info, Type *AccessTy, TTI::TargetCostKind CostKind) |
|
InstructionCost | getInstructionCost (const User *U, ArrayRef< const Value * > Operands, TTI::TargetCostKind CostKind) |
|
bool | isExpensiveToSpeculativelyExecute (const Instruction *I) |
|
bool | supportsTailCallFor (const CallBase *CB) const |
|
InstructionCost | getGEPCost (Type *PointeeType, const Value *Ptr, ArrayRef< const Value * > Operands, Type *AccessType, TTI::TargetCostKind CostKind) const |
|
| TargetTransformInfoImplBase (const TargetTransformInfoImplBase &Arg)=default |
|
| TargetTransformInfoImplBase (TargetTransformInfoImplBase &&Arg) |
|
const DataLayout & | getDataLayout () const |
|
InstructionCost | getGEPCost (Type *PointeeType, const Value *Ptr, ArrayRef< const Value * > Operands, Type *AccessType, TTI::TargetCostKind CostKind) const |
|
unsigned | getEstimatedNumberOfCaseClusters (const SwitchInst &SI, unsigned &JTSize, ProfileSummaryInfo *PSI, BlockFrequencyInfo *BFI) const |
|
unsigned | getInliningThresholdMultiplier () const |
|
unsigned | getInliningCostBenefitAnalysisSavingsMultiplier () const |
|
unsigned | getInliningCostBenefitAnalysisProfitableMultiplier () const |
|
unsigned | adjustInliningThreshold (const CallBase *CB) const |
|
unsigned | getCallerAllocaCost (const CallBase *CB, const AllocaInst *AI) const |
|
int | getInlinerVectorBonusPercent () const |
|
InstructionCost | getMemcpyCost (const Instruction *I) const |
|
uint64_t | getMaxMemIntrinsicInlineSizeThreshold () const |
|
BranchProbability | getPredictableBranchThreshold () const |
|
InstructionCost | getBranchMispredictPenalty () const |
|
bool | hasBranchDivergence (const Function *F=nullptr) const |
|
bool | isSourceOfDivergence (const Value *V) const |
|
bool | isAlwaysUniform (const Value *V) const |
|
bool | isValidAddrSpaceCast (unsigned FromAS, unsigned ToAS) const |
|
bool | addrspacesMayAlias (unsigned AS0, unsigned AS1) const |
|
unsigned | getFlatAddressSpace () const |
|
bool | collectFlatAddressOperands (SmallVectorImpl< int > &OpIndexes, Intrinsic::ID IID) const |
|
bool | isNoopAddrSpaceCast (unsigned, unsigned) const |
|
bool | canHaveNonUndefGlobalInitializerInAddressSpace (unsigned AS) const |
|
unsigned | getAssumedAddrSpace (const Value *V) const |
|
bool | isSingleThreaded () const |
|
std::pair< const Value *, unsigned > | getPredicatedAddrSpace (const Value *V) const |
|
Value * | rewriteIntrinsicWithAddressSpace (IntrinsicInst *II, Value *OldV, Value *NewV) const |
|
bool | isLoweredToCall (const Function *F) const |
|
bool | isHardwareLoopProfitable (Loop *L, ScalarEvolution &SE, AssumptionCache &AC, TargetLibraryInfo *LibInfo, HardwareLoopInfo &HWLoopInfo) const |
|
bool | preferPredicateOverEpilogue (TailFoldingInfo *TFI) const |
|
TailFoldingStyle | getPreferredTailFoldingStyle (bool IVUpdateMayOverflow=true) const |
|
std::optional< Instruction * > | instCombineIntrinsic (InstCombiner &IC, IntrinsicInst &II) const |
|
std::optional< Value * > | simplifyDemandedUseBitsIntrinsic (InstCombiner &IC, IntrinsicInst &II, APInt DemandedMask, KnownBits &Known, bool &KnownBitsComputed) const |
|
std::optional< Value * > | simplifyDemandedVectorEltsIntrinsic (InstCombiner &IC, IntrinsicInst &II, APInt DemandedElts, APInt &UndefElts, APInt &UndefElts2, APInt &UndefElts3, std::function< void(Instruction *, unsigned, APInt, APInt &)> SimplifyAndSetOp) const |
|
void | getUnrollingPreferences (Loop *, ScalarEvolution &, TTI::UnrollingPreferences &, OptimizationRemarkEmitter *) const |
|
void | getPeelingPreferences (Loop *, ScalarEvolution &, TTI::PeelingPreferences &) const |
|
bool | isLegalAddImmediate (int64_t Imm) const |
|
bool | isLegalAddScalableImmediate (int64_t Imm) const |
|
bool | isLegalICmpImmediate (int64_t Imm) const |
|
bool | isLegalAddressingMode (Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset, bool HasBaseReg, int64_t Scale, unsigned AddrSpace, Instruction *I=nullptr, int64_t ScalableOffset=0) const |
|
bool | isLSRCostLess (const TTI::LSRCost &C1, const TTI::LSRCost &C2) const |
|
bool | isNumRegsMajorCostOfLSR () const |
|
bool | shouldDropLSRSolutionIfLessProfitable () const |
|
bool | isProfitableLSRChainElement (Instruction *I) const |
|
bool | canMacroFuseCmp () const |
|
bool | canSaveCmp (Loop *L, BranchInst **BI, ScalarEvolution *SE, LoopInfo *LI, DominatorTree *DT, AssumptionCache *AC, TargetLibraryInfo *LibInfo) const |
|
TTI::AddressingModeKind | getPreferredAddressingMode (const Loop *L, ScalarEvolution *SE) const |
|
bool | isLegalMaskedStore (Type *DataType, Align Alignment) const |
|
bool | isLegalMaskedLoad (Type *DataType, Align Alignment) const |
|
bool | isLegalNTStore (Type *DataType, Align Alignment) const |
|
bool | isLegalNTLoad (Type *DataType, Align Alignment) const |
|
bool | isLegalBroadcastLoad (Type *ElementTy, ElementCount NumElements) const |
|
bool | isLegalMaskedScatter (Type *DataType, Align Alignment) const |
|
bool | isLegalMaskedGather (Type *DataType, Align Alignment) const |
|
bool | forceScalarizeMaskedGather (VectorType *DataType, Align Alignment) const |
|
bool | forceScalarizeMaskedScatter (VectorType *DataType, Align Alignment) const |
|
bool | isLegalMaskedCompressStore (Type *DataType, Align Alignment) const |
|
bool | isLegalAltInstr (VectorType *VecTy, unsigned Opcode0, unsigned Opcode1, const SmallBitVector &OpcodeMask) const |
|
bool | isLegalMaskedExpandLoad (Type *DataType, Align Alignment) const |
|
bool | isLegalStridedLoadStore (Type *DataType, Align Alignment) const |
|
bool | isLegalMaskedVectorHistogram (Type *AddrType, Type *DataType) const |
|
bool | enableOrderedReductions () const |
|
bool | hasDivRemOp (Type *DataType, bool IsSigned) const |
|
bool | hasVolatileVariant (Instruction *I, unsigned AddrSpace) const |
|
bool | prefersVectorizedAddressing () const |
|
InstructionCost | getScalingFactorCost (Type *Ty, GlobalValue *BaseGV, StackOffset BaseOffset, bool HasBaseReg, int64_t Scale, unsigned AddrSpace) const |
|
bool | LSRWithInstrQueries () const |
|
bool | isTruncateFree (Type *Ty1, Type *Ty2) const |
|
bool | isProfitableToHoist (Instruction *I) const |
|
bool | useAA () const |
|
bool | isTypeLegal (Type *Ty) const |
|
unsigned | getRegUsageForType (Type *Ty) const |
|
bool | shouldBuildLookupTables () const |
|
bool | shouldBuildLookupTablesForConstant (Constant *C) const |
|
bool | shouldBuildRelLookupTables () const |
|
bool | useColdCCForColdCall (Function &F) const |
|
InstructionCost | getScalarizationOverhead (VectorType *Ty, const APInt &DemandedElts, bool Insert, bool Extract, TTI::TargetCostKind CostKind) const |
|
InstructionCost | getOperandsScalarizationOverhead (ArrayRef< const Value * > Args, ArrayRef< Type * > Tys, TTI::TargetCostKind CostKind) const |
|
bool | supportsEfficientVectorElementLoadStore () const |
|
bool | supportsTailCalls () const |
|
bool | enableAggressiveInterleaving (bool LoopHasReductions) const |
|
TTI::MemCmpExpansionOptions | enableMemCmpExpansion (bool OptSize, bool IsZeroCmp) const |
|
bool | enableSelectOptimize () const |
|
bool | shouldTreatInstructionLikeSelect (const Instruction *I) |
|
bool | enableInterleavedAccessVectorization () const |
|
bool | enableMaskedInterleavedAccessVectorization () const |
|
bool | isFPVectorizationPotentiallyUnsafe () const |
|
bool | allowsMisalignedMemoryAccesses (LLVMContext &Context, unsigned BitWidth, unsigned AddressSpace, Align Alignment, unsigned *Fast) const |
|
TTI::PopcntSupportKind | getPopcntSupport (unsigned IntTyWidthInBit) const |
|
bool | haveFastSqrt (Type *Ty) const |
|
bool | isExpensiveToSpeculativelyExecute (const Instruction *I) |
|
bool | isFCmpOrdCheaperThanFCmpZero (Type *Ty) const |
|
InstructionCost | getFPOpCost (Type *Ty) const |
|
InstructionCost | getIntImmCodeSizeCost (unsigned Opcode, unsigned Idx, const APInt &Imm, Type *Ty) const |
|
InstructionCost | getIntImmCost (const APInt &Imm, Type *Ty, TTI::TargetCostKind CostKind) const |
|
InstructionCost | getIntImmCostInst (unsigned Opcode, unsigned Idx, const APInt &Imm, Type *Ty, TTI::TargetCostKind CostKind, Instruction *Inst=nullptr) const |
|
InstructionCost | getIntImmCostIntrin (Intrinsic::ID IID, unsigned Idx, const APInt &Imm, Type *Ty, TTI::TargetCostKind CostKind) const |
|
bool | preferToKeepConstantsAttached (const Instruction &Inst, const Function &Fn) const |
|
unsigned | getNumberOfRegisters (unsigned ClassID) const |
|
bool | hasConditionalLoadStoreForType (Type *Ty) const |
|
unsigned | getRegisterClassForType (bool Vector, Type *Ty=nullptr) const |
|
const char * | getRegisterClassName (unsigned ClassID) const |
|
TypeSize | getRegisterBitWidth (TargetTransformInfo::RegisterKind K) const |
|
unsigned | getMinVectorRegisterBitWidth () const |
|
std::optional< unsigned > | getMaxVScale () const |
|
std::optional< unsigned > | getVScaleForTuning () const |
|
bool | isVScaleKnownToBeAPowerOfTwo () const |
|
bool | shouldMaximizeVectorBandwidth (TargetTransformInfo::RegisterKind K) const |
|
ElementCount | getMinimumVF (unsigned ElemWidth, bool IsScalable) const |
|
unsigned | getMaximumVF (unsigned ElemWidth, unsigned Opcode) const |
|
unsigned | getStoreMinimumVF (unsigned VF, Type *, Type *) const |
|
bool | shouldConsiderAddressTypePromotion (const Instruction &I, bool &AllowPromotionWithoutCommonHeader) const |
|
unsigned | getCacheLineSize () const |
|
std::optional< unsigned > | getCacheSize (TargetTransformInfo::CacheLevel Level) const |
|
std::optional< unsigned > | getCacheAssociativity (TargetTransformInfo::CacheLevel Level) const |
|
std::optional< unsigned > | getMinPageSize () const |
|
unsigned | getPrefetchDistance () const |
|
unsigned | getMinPrefetchStride (unsigned NumMemAccesses, unsigned NumStridedMemAccesses, unsigned NumPrefetches, bool HasCall) const |
|
unsigned | getMaxPrefetchIterationsAhead () const |
|
bool | enableWritePrefetching () const |
|
bool | shouldPrefetchAddressSpace (unsigned AS) const |
|
unsigned | getMaxInterleaveFactor (ElementCount VF) const |
|
InstructionCost | getArithmeticInstrCost (unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Opd1Info, TTI::OperandValueInfo Opd2Info, ArrayRef< const Value * > Args, const Instruction *CxtI=nullptr) const |
|
InstructionCost | getAltInstrCost (VectorType *VecTy, unsigned Opcode0, unsigned Opcode1, const SmallBitVector &OpcodeMask, TTI::TargetCostKind CostKind) const |
|
InstructionCost | getShuffleCost (TTI::ShuffleKind Kind, VectorType *Ty, ArrayRef< int > Mask, TTI::TargetCostKind CostKind, int Index, VectorType *SubTp, ArrayRef< const Value * > Args=std::nullopt, const Instruction *CxtI=nullptr) const |
|
InstructionCost | getCastInstrCost (unsigned Opcode, Type *Dst, Type *Src, TTI::CastContextHint CCH, TTI::TargetCostKind CostKind, const Instruction *I) const |
|
InstructionCost | getExtractWithExtendCost (unsigned Opcode, Type *Dst, VectorType *VecTy, unsigned Index) const |
|
InstructionCost | getCFInstrCost (unsigned Opcode, TTI::TargetCostKind CostKind, const Instruction *I=nullptr) const |
|
InstructionCost | getCmpSelInstrCost (unsigned Opcode, Type *ValTy, Type *CondTy, CmpInst::Predicate VecPred, TTI::TargetCostKind CostKind, const Instruction *I) const |
|
InstructionCost | getVectorInstrCost (unsigned Opcode, Type *Val, TTI::TargetCostKind CostKind, unsigned Index, Value *Op0, Value *Op1) const |
|
InstructionCost | getVectorInstrCost (const Instruction &I, Type *Val, TTI::TargetCostKind CostKind, unsigned Index) const |
|
unsigned | getReplicationShuffleCost (Type *EltTy, int ReplicationFactor, int VF, const APInt &DemandedDstElts, TTI::TargetCostKind CostKind) |
|
InstructionCost | getMemoryOpCost (unsigned Opcode, Type *Src, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, TTI::OperandValueInfo OpInfo, const Instruction *I) const |
|
InstructionCost | getVPMemoryOpCost (unsigned Opcode, Type *Src, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, const Instruction *I) const |
|
InstructionCost | getMaskedMemoryOpCost (unsigned Opcode, Type *Src, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind) const |
|
InstructionCost | getGatherScatterOpCost (unsigned Opcode, Type *DataTy, const Value *Ptr, bool VariableMask, Align Alignment, TTI::TargetCostKind CostKind, const Instruction *I=nullptr) const |
|
InstructionCost | getStridedMemoryOpCost (unsigned Opcode, Type *DataTy, const Value *Ptr, bool VariableMask, Align Alignment, TTI::TargetCostKind CostKind, const Instruction *I=nullptr) const |
|
unsigned | getInterleavedMemoryOpCost (unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef< unsigned > Indices, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, bool UseMaskForCond, bool UseMaskForGaps) const |
|
InstructionCost | getIntrinsicInstrCost (const IntrinsicCostAttributes &ICA, TTI::TargetCostKind CostKind) const |
|
InstructionCost | getCallInstrCost (Function *F, Type *RetTy, ArrayRef< Type * > Tys, TTI::TargetCostKind CostKind) const |
|
unsigned | getNumberOfParts (Type *Tp) const |
|
InstructionCost | getAddressComputationCost (Type *Tp, ScalarEvolution *, const SCEV *) const |
|
InstructionCost | getArithmeticReductionCost (unsigned, VectorType *, std::optional< FastMathFlags > FMF, TTI::TargetCostKind) const |
|
InstructionCost | getMinMaxReductionCost (Intrinsic::ID IID, VectorType *, FastMathFlags, TTI::TargetCostKind) const |
|
InstructionCost | getExtendedReductionCost (unsigned Opcode, bool IsUnsigned, Type *ResTy, VectorType *Ty, FastMathFlags FMF, TTI::TargetCostKind CostKind) const |
|
InstructionCost | getMulAccReductionCost (bool IsUnsigned, Type *ResTy, VectorType *Ty, TTI::TargetCostKind CostKind) const |
|
InstructionCost | getCostOfKeepingLiveOverCall (ArrayRef< Type * > Tys) const |
|
bool | getTgtMemIntrinsic (IntrinsicInst *Inst, MemIntrinsicInfo &Info) const |
|
unsigned | getAtomicMemIntrinsicMaxElementSize () const |
|
Value * | getOrCreateResultFromMemIntrinsic (IntrinsicInst *Inst, Type *ExpectedType) const |
|
Type * | getMemcpyLoopLoweringType (LLVMContext &Context, Value *Length, unsigned SrcAddrSpace, unsigned DestAddrSpace, Align SrcAlign, Align DestAlign, std::optional< uint32_t > AtomicElementSize) const |
|
void | getMemcpyLoopResidualLoweringType (SmallVectorImpl< Type * > &OpsOut, LLVMContext &Context, unsigned RemainingBytes, unsigned SrcAddrSpace, unsigned DestAddrSpace, Align SrcAlign, Align DestAlign, std::optional< uint32_t > AtomicCpySize) const |
|
bool | areInlineCompatible (const Function *Caller, const Function *Callee) const |
|
unsigned | getInlineCallPenalty (const Function *F, const CallBase &Call, unsigned DefaultCallPenalty) const |
|
bool | areTypesABICompatible (const Function *Caller, const Function *Callee, const ArrayRef< Type * > &Types) const |
|
bool | isIndexedLoadLegal (TTI::MemIndexedMode Mode, Type *Ty, const DataLayout &DL) const |
|
bool | isIndexedStoreLegal (TTI::MemIndexedMode Mode, Type *Ty, const DataLayout &DL) const |
|
unsigned | getLoadStoreVecRegBitWidth (unsigned AddrSpace) const |
|
bool | isLegalToVectorizeLoad (LoadInst *LI) const |
|
bool | isLegalToVectorizeStore (StoreInst *SI) const |
|
bool | isLegalToVectorizeLoadChain (unsigned ChainSizeInBytes, Align Alignment, unsigned AddrSpace) const |
|
bool | isLegalToVectorizeStoreChain (unsigned ChainSizeInBytes, Align Alignment, unsigned AddrSpace) const |
|
bool | isLegalToVectorizeReduction (const RecurrenceDescriptor &RdxDesc, ElementCount VF) const |
|
bool | isElementTypeLegalForScalableVector (Type *Ty) const |
|
unsigned | getLoadVectorFactor (unsigned VF, unsigned LoadSize, unsigned ChainSizeInBytes, VectorType *VecTy) const |
|
unsigned | getStoreVectorFactor (unsigned VF, unsigned StoreSize, unsigned ChainSizeInBytes, VectorType *VecTy) const |
|
bool | preferFixedOverScalableIfEqualCost () const |
|
bool | preferInLoopReduction (unsigned Opcode, Type *Ty, TTI::ReductionFlags Flags) const |
|
bool | preferPredicatedReductionSelect (unsigned Opcode, Type *Ty, TTI::ReductionFlags Flags) const |
|
bool | preferEpilogueVectorization () const |
|
bool | shouldExpandReduction (const IntrinsicInst *II) const |
|
TTI::ReductionShuffle | getPreferredExpandedReductionShuffle (const IntrinsicInst *II) const |
|
unsigned | getGISelRematGlobalCost () const |
|
unsigned | getMinTripCountTailFoldingThreshold () const |
|
bool | supportsScalableVectors () const |
|
bool | enableScalableVectorization () const |
|
bool | hasActiveVectorLength (unsigned Opcode, Type *DataType, Align Alignment) const |
|
TargetTransformInfo::VPLegalization | getVPLegalizationStrategy (const VPIntrinsic &PI) const |
|
bool | hasArmWideBranch (bool) const |
|
unsigned | getMaxNumArgs () const |
|