LLVM  11.0.0git
Public Member Functions | List of all members
llvm::AArch64TTIImpl Class Reference

#include "Target/AArch64/AArch64TargetTransformInfo.h"

Inheritance diagram for llvm::AArch64TTIImpl:
Inheritance graph
[legend]
Collaboration diagram for llvm::AArch64TTIImpl:
Collaboration graph
[legend]

Public Member Functions

 AArch64TTIImpl (const AArch64TargetMachine *TM, const Function &F)
 
bool areInlineCompatible (const Function *Caller, const Function *Callee) const
 
Scalar TTI Implementations
int getIntImmCost (int64_t Val)
 Calculate the cost of materializing a 64-bit value. More...
 
int getIntImmCost (const APInt &Imm, Type *Ty, TTI::TargetCostKind CostKind)
 Calculate the cost of materializing the given constant. More...
 
int getIntImmCostInst (unsigned Opcode, unsigned Idx, const APInt &Imm, Type *Ty, TTI::TargetCostKind CostKind)
 
int getIntImmCostIntrin (Intrinsic::ID IID, unsigned Idx, const APInt &Imm, Type *Ty, TTI::TargetCostKind CostKind)
 
TTI::PopcntSupportKind getPopcntSupport (unsigned TyWidth)
 
Vector TTI Implementations
bool enableInterleavedAccessVectorization ()
 
unsigned getNumberOfRegisters (unsigned ClassID) const
 
unsigned getRegisterBitWidth (bool Vector) const
 
unsigned getMinVectorRegisterBitWidth ()
 
unsigned getMaxInterleaveFactor (unsigned VF)
 
int getCastInstrCost (unsigned Opcode, Type *Dst, Type *Src, TTI::TargetCostKind CostKind, const Instruction *I=nullptr)
 
int getExtractWithExtendCost (unsigned Opcode, Type *Dst, VectorType *VecTy, unsigned Index)
 
unsigned getCFInstrCost (unsigned Opcode, TTI::TargetCostKind CostKind)
 
int getVectorInstrCost (unsigned Opcode, Type *Val, unsigned Index)
 
int getArithmeticInstrCost (unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind=TTI::TCK_RecipThroughput, TTI::OperandValueKind Opd1Info=TTI::OK_AnyValue, TTI::OperandValueKind Opd2Info=TTI::OK_AnyValue, TTI::OperandValueProperties Opd1PropInfo=TTI::OP_None, TTI::OperandValueProperties Opd2PropInfo=TTI::OP_None, ArrayRef< const Value *> Args=ArrayRef< const Value *>(), const Instruction *CxtI=nullptr)
 
int getAddressComputationCost (Type *Ty, ScalarEvolution *SE, const SCEV *Ptr)
 
int getCmpSelInstrCost (unsigned Opcode, Type *ValTy, Type *CondTy, TTI::TargetCostKind CostKind, const Instruction *I=nullptr)
 
TTI::MemCmpExpansionOptions enableMemCmpExpansion (bool OptSize, bool IsZeroCmp) const
 
int getMemoryOpCost (unsigned Opcode, Type *Src, MaybeAlign Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, const Instruction *I=nullptr)
 
int getCostOfKeepingLiveOverCall (ArrayRef< Type *> Tys)
 
void getUnrollingPreferences (Loop *L, ScalarEvolution &SE, TTI::UnrollingPreferences &UP)
 
ValuegetOrCreateResultFromMemIntrinsic (IntrinsicInst *Inst, Type *ExpectedType)
 
bool getTgtMemIntrinsic (IntrinsicInst *Inst, MemIntrinsicInfo &Info)
 
bool isLegalMaskedLoadStore (Type *DataType, Align Alignment)
 
bool isLegalMaskedLoad (Type *DataType, Align Alignment)
 
bool isLegalMaskedStore (Type *DataType, Align Alignment)
 
bool isLegalNTStore (Type *DataType, Align Alignment)
 
int getInterleavedMemoryOpCost (unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef< unsigned > Indices, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind=TTI::TCK_SizeAndLatency, bool UseMaskForCond=false, bool UseMaskForGaps=false)
 
bool shouldConsiderAddressTypePromotion (const Instruction &I, bool &AllowPromotionWithoutCommonHeader)
 See if I should be considered for address type promotion. More...
 
bool shouldExpandReduction (const IntrinsicInst *II) const
 
unsigned getGISelRematGlobalCost () const
 
bool useReductionIntrinsic (unsigned Opcode, Type *Ty, TTI::ReductionFlags Flags) const
 
int getArithmeticReductionCost (unsigned Opcode, VectorType *Ty, bool IsPairwiseForm, TTI::TargetCostKind CostKind=TTI::TCK_RecipThroughput)
 
int getShuffleCost (TTI::ShuffleKind Kind, VectorType *Tp, int Index, VectorType *SubTp)
 
- Public Member Functions inherited from llvm::BasicTTIImplBase< AArch64TTIImpl >
bool allowsMisalignedMemoryAccesses (LLVMContext &Context, unsigned BitWidth, unsigned AddressSpace, unsigned Alignment, bool *Fast) const
 
bool hasBranchDivergence ()
 
bool useGPUDivergenceAnalysis ()
 
bool isSourceOfDivergence (const Value *V)
 
bool isAlwaysUniform (const Value *V)
 
unsigned getFlatAddressSpace ()
 
bool collectFlatAddressOperands (SmallVectorImpl< int > &OpIndexes, Intrinsic::ID IID) const
 
bool isNoopAddrSpaceCast (unsigned FromAS, unsigned ToAS) const
 
ValuerewriteIntrinsicWithAddressSpace (IntrinsicInst *II, Value *OldV, Value *NewV) const
 
bool isLegalAddImmediate (int64_t imm)
 
bool isLegalICmpImmediate (int64_t imm)
 
bool isLegalAddressingMode (Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset, bool HasBaseReg, int64_t Scale, unsigned AddrSpace, Instruction *I=nullptr)
 
bool isIndexedLoadLegal (TTI::MemIndexedMode M, Type *Ty, const DataLayout &DL) const
 
bool isIndexedStoreLegal (TTI::MemIndexedMode M, Type *Ty, const DataLayout &DL) const
 
bool isLSRCostLess (TTI::LSRCost C1, TTI::LSRCost C2)
 
bool isProfitableLSRChainElement (Instruction *I)
 
int getScalingFactorCost (Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset, bool HasBaseReg, int64_t Scale, unsigned AddrSpace)
 
bool isTruncateFree (Type *Ty1, Type *Ty2)
 
bool isProfitableToHoist (Instruction *I)
 
bool useAA () const
 
bool isTypeLegal (Type *Ty)
 
int getGEPCost (Type *PointeeType, const Value *Ptr, ArrayRef< const Value * > Operands)
 
unsigned getEstimatedNumberOfCaseClusters (const SwitchInst &SI, unsigned &JumpTableSize, ProfileSummaryInfo *PSI, BlockFrequencyInfo *BFI)
 
bool shouldBuildLookupTables ()
 
bool haveFastSqrt (Type *Ty)
 
bool isFCmpOrdCheaperThanFCmpZero (Type *Ty)
 
unsigned getFPOpCost (Type *Ty)
 
unsigned getInliningThresholdMultiplier ()
 
int getInlinerVectorBonusPercent ()
 
void getUnrollingPreferences (Loop *L, ScalarEvolution &SE, TTI::UnrollingPreferences &UP)
 
bool isHardwareLoopProfitable (Loop *L, ScalarEvolution &SE, AssumptionCache &AC, TargetLibraryInfo *LibInfo, HardwareLoopInfo &HWLoopInfo)
 
bool preferPredicateOverEpilogue (Loop *L, LoopInfo *LI, ScalarEvolution &SE, AssumptionCache &AC, TargetLibraryInfo *TLI, DominatorTree *DT, const LoopAccessInfo *LAI)
 
bool emitGetActiveLaneMask ()
 
int getInstructionLatency (const Instruction *I)
 
virtual Optional< unsignedgetCacheSize (TargetTransformInfo::CacheLevel Level) const
 
virtual Optional< unsignedgetCacheAssociativity (TargetTransformInfo::CacheLevel Level) const
 
virtual unsigned getCacheLineSize () const
 
virtual unsigned getPrefetchDistance () const
 
virtual unsigned getMinPrefetchStride (unsigned NumMemAccesses, unsigned NumStridedMemAccesses, unsigned NumPrefetches, bool HasCall) const
 
virtual unsigned getMaxPrefetchIterationsAhead () const
 
virtual bool enableWritePrefetching () const
 
unsigned getRegisterBitWidth (bool Vector) const
 
unsigned getScalarizationOverhead (VectorType *InTy, const APInt &DemandedElts, bool Insert, bool Extract)
 Estimate the overhead of scalarizing an instruction. More...
 
unsigned getScalarizationOverhead (VectorType *InTy, bool Insert, bool Extract)
 Helper wrapper for the DemandedElts variant of getScalarizationOverhead. More...
 
unsigned getScalarizationOverhead (VectorType *InTy, ArrayRef< const Value * > Args)
 
unsigned getOperandsScalarizationOverhead (ArrayRef< const Value * > Args, unsigned VF)
 Estimate the overhead of scalarizing an instructions unique non-constant operands. More...
 
unsigned getMaxInterleaveFactor (unsigned VF)
 
unsigned getArithmeticInstrCost (unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind=TTI::TCK_RecipThroughput, TTI::OperandValueKind Opd1Info=TTI::OK_AnyValue, TTI::OperandValueKind Opd2Info=TTI::OK_AnyValue, TTI::OperandValueProperties Opd1PropInfo=TTI::OP_None, TTI::OperandValueProperties Opd2PropInfo=TTI::OP_None, ArrayRef< const Value * > Args=ArrayRef< const Value * >(), const Instruction *CxtI=nullptr)
 
unsigned getShuffleCost (TTI::ShuffleKind Kind, VectorType *Tp, int Index, VectorType *SubTp)
 
unsigned getCastInstrCost (unsigned Opcode, Type *Dst, Type *Src, TTI::TargetCostKind CostKind, const Instruction *I=nullptr)
 
unsigned getExtractWithExtendCost (unsigned Opcode, Type *Dst, VectorType *VecTy, unsigned Index)
 
unsigned getCFInstrCost (unsigned Opcode, TTI::TargetCostKind CostKind)
 
unsigned getCmpSelInstrCost (unsigned Opcode, Type *ValTy, Type *CondTy, TTI::TargetCostKind CostKind, const Instruction *I=nullptr)
 
unsigned getVectorInstrCost (unsigned Opcode, Type *Val, unsigned Index)
 
unsigned getMemoryOpCost (unsigned Opcode, Type *Src, MaybeAlign Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, const Instruction *I=nullptr)
 
unsigned getInterleavedMemoryOpCost (unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef< unsigned > Indices, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, bool UseMaskForCond=false, bool UseMaskForGaps=false)
 
unsigned getIntrinsicInstrCost (const IntrinsicCostAttributes &ICA, TTI::TargetCostKind CostKind)
 Get intrinsic cost based on arguments. More...
 
unsigned getTypeBasedIntrinsicInstrCost (const IntrinsicCostAttributes &ICA, TTI::TargetCostKind CostKind)
 Get intrinsic cost based on argument types. More...
 
unsigned getCallInstrCost (Function *F, Type *RetTy, ArrayRef< Type * > Tys, TTI::TargetCostKind CostKind=TTI::TCK_SizeAndLatency)
 Compute a cost of the given call instruction. More...
 
unsigned getNumberOfParts (Type *Tp)
 
unsigned getAddressComputationCost (Type *Ty, ScalarEvolution *, const SCEV *)
 
unsigned getArithmeticReductionCost (unsigned Opcode, VectorType *Ty, bool IsPairwise, TTI::TargetCostKind CostKind)
 Try to calculate arithmetic and shuffle op costs for reduction operations. More...
 
unsigned getMinMaxReductionCost (VectorType *Ty, VectorType *CondTy, bool IsPairwise, bool IsUnsigned, TTI::TargetCostKind CostKind)
 Try to calculate op costs for min/max reduction operations. More...
 
unsigned getVectorSplitCost ()
 
- Public Member Functions inherited from llvm::TargetTransformInfoImplCRTPBase< AArch64TTIImpl >
int getGEPCost (Type *PointeeType, const Value *Ptr, ArrayRef< const Value * > Operands, TTI::TargetCostKind CostKind=TTI::TCK_SizeAndLatency)
 
int getUserCost (const User *U, ArrayRef< const Value * > Operands, TTI::TargetCostKind CostKind)
 
int getInstructionLatency (const Instruction *I)
 
- Public Member Functions inherited from llvm::TargetTransformInfoImplBase
 TargetTransformInfoImplBase (const TargetTransformInfoImplBase &Arg)
 
 TargetTransformInfoImplBase (TargetTransformInfoImplBase &&Arg)
 
const DataLayoutgetDataLayout () const
 
int getGEPCost (Type *PointeeType, const Value *Ptr, ArrayRef< const Value *> Operands, TTI::TargetCostKind CostKind=TTI::TCK_SizeAndLatency)
 
unsigned getEstimatedNumberOfCaseClusters (const SwitchInst &SI, unsigned &JTSize, ProfileSummaryInfo *PSI, BlockFrequencyInfo *BFI)
 
unsigned getInliningThresholdMultiplier ()
 
int getInlinerVectorBonusPercent ()
 
unsigned getMemcpyCost (const Instruction *I)
 
bool hasBranchDivergence ()
 
bool useGPUDivergenceAnalysis ()
 
bool isSourceOfDivergence (const Value *V)
 
bool isAlwaysUniform (const Value *V)
 
unsigned getFlatAddressSpace ()
 
bool collectFlatAddressOperands (SmallVectorImpl< int > &OpIndexes, Intrinsic::ID IID) const
 
bool isNoopAddrSpaceCast (unsigned, unsigned) const
 
ValuerewriteIntrinsicWithAddressSpace (IntrinsicInst *II, Value *OldV, Value *NewV) const
 
bool isLoweredToCall (const Function *F)
 
bool isHardwareLoopProfitable (Loop *L, ScalarEvolution &SE, AssumptionCache &AC, TargetLibraryInfo *LibInfo, HardwareLoopInfo &HWLoopInfo)
 
bool preferPredicateOverEpilogue (Loop *L, LoopInfo *LI, ScalarEvolution &SE, AssumptionCache &AC, TargetLibraryInfo *TLI, DominatorTree *DT, const LoopAccessInfo *LAI) const
 
bool emitGetActiveLaneMask () const
 
void getUnrollingPreferences (Loop *, ScalarEvolution &, TTI::UnrollingPreferences &)
 
bool isLegalAddImmediate (int64_t Imm)
 
bool isLegalICmpImmediate (int64_t Imm)
 
bool isLegalAddressingMode (Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset, bool HasBaseReg, int64_t Scale, unsigned AddrSpace, Instruction *I=nullptr)
 
bool isLSRCostLess (TTI::LSRCost &C1, TTI::LSRCost &C2)
 
bool isProfitableLSRChainElement (Instruction *I)
 
bool canMacroFuseCmp ()
 
bool canSaveCmp (Loop *L, BranchInst **BI, ScalarEvolution *SE, LoopInfo *LI, DominatorTree *DT, AssumptionCache *AC, TargetLibraryInfo *LibInfo)
 
bool shouldFavorPostInc () const
 
bool shouldFavorBackedgeIndex (const Loop *L) const
 
bool isLegalMaskedStore (Type *DataType, Align Alignment)
 
bool isLegalMaskedLoad (Type *DataType, Align Alignment)
 
bool isLegalNTStore (Type *DataType, Align Alignment)
 
bool isLegalNTLoad (Type *DataType, Align Alignment)
 
bool isLegalMaskedScatter (Type *DataType, Align Alignment)
 
bool isLegalMaskedGather (Type *DataType, Align Alignment)
 
bool isLegalMaskedCompressStore (Type *DataType)
 
bool isLegalMaskedExpandLoad (Type *DataType)
 
bool hasDivRemOp (Type *DataType, bool IsSigned)
 
bool hasVolatileVariant (Instruction *I, unsigned AddrSpace)
 
bool prefersVectorizedAddressing ()
 
int getScalingFactorCost (Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset, bool HasBaseReg, int64_t Scale, unsigned AddrSpace)
 
bool LSRWithInstrQueries ()
 
bool isTruncateFree (Type *Ty1, Type *Ty2)
 
bool isProfitableToHoist (Instruction *I)
 
bool useAA ()
 
bool isTypeLegal (Type *Ty)
 
bool shouldBuildLookupTables ()
 
bool shouldBuildLookupTablesForConstant (Constant *C)
 
bool useColdCCForColdCall (Function &F)
 
unsigned getScalarizationOverhead (VectorType *Ty, const APInt &DemandedElts, bool Insert, bool Extract)
 
unsigned getOperandsScalarizationOverhead (ArrayRef< const Value *> Args, unsigned VF)
 
bool supportsEfficientVectorElementLoadStore ()
 
bool enableAggressiveInterleaving (bool LoopHasReductions)
 
TTI::MemCmpExpansionOptions enableMemCmpExpansion (bool OptSize, bool IsZeroCmp) const
 
bool enableInterleavedAccessVectorization ()
 
bool enableMaskedInterleavedAccessVectorization ()
 
bool isFPVectorizationPotentiallyUnsafe ()
 
bool allowsMisalignedMemoryAccesses (LLVMContext &Context, unsigned BitWidth, unsigned AddressSpace, unsigned Alignment, bool *Fast)
 
TTI::PopcntSupportKind getPopcntSupport (unsigned IntTyWidthInBit)
 
bool haveFastSqrt (Type *Ty)
 
bool isFCmpOrdCheaperThanFCmpZero (Type *Ty)
 
unsigned getFPOpCost (Type *Ty)
 
int getIntImmCodeSizeCost (unsigned Opcode, unsigned Idx, const APInt &Imm, Type *Ty)
 
unsigned getIntImmCost (const APInt &Imm, Type *Ty, TTI::TargetCostKind CostKind)
 
unsigned getIntImmCostInst (unsigned Opcode, unsigned Idx, const APInt &Imm, Type *Ty, TTI::TargetCostKind CostKind)
 
unsigned getIntImmCostIntrin (Intrinsic::ID IID, unsigned Idx, const APInt &Imm, Type *Ty, TTI::TargetCostKind CostKind)
 
unsigned getNumberOfRegisters (unsigned ClassID) const
 
unsigned getRegisterClassForType (bool Vector, Type *Ty=nullptr) const
 
const chargetRegisterClassName (unsigned ClassID) const
 
unsigned getRegisterBitWidth (bool Vector) const
 
unsigned getMinVectorRegisterBitWidth ()
 
bool shouldMaximizeVectorBandwidth (bool OptSize) const
 
unsigned getMinimumVF (unsigned ElemWidth) const
 
bool shouldConsiderAddressTypePromotion (const Instruction &I, bool &AllowPromotionWithoutCommonHeader)
 
unsigned getCacheLineSize () const
 
llvm::Optional< unsignedgetCacheSize (TargetTransformInfo::CacheLevel Level) const
 
llvm::Optional< unsignedgetCacheAssociativity (TargetTransformInfo::CacheLevel Level) const
 
unsigned getPrefetchDistance () const
 
unsigned getMinPrefetchStride (unsigned NumMemAccesses, unsigned NumStridedMemAccesses, unsigned NumPrefetches, bool HasCall) const
 
unsigned getMaxPrefetchIterationsAhead () const
 
bool enableWritePrefetching () const
 
unsigned getMaxInterleaveFactor (unsigned VF)
 
unsigned getArithmeticInstrCost (unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind, TTI::OperandValueKind Opd1Info, TTI::OperandValueKind Opd2Info, TTI::OperandValueProperties Opd1PropInfo, TTI::OperandValueProperties Opd2PropInfo, ArrayRef< const Value *> Args, const Instruction *CxtI=nullptr)
 
unsigned getShuffleCost (TTI::ShuffleKind Kind, VectorType *Ty, int Index, VectorType *SubTp)
 
unsigned getCastInstrCost (unsigned Opcode, Type *Dst, Type *Src, TTI::TargetCostKind CostKind, const Instruction *I)
 
unsigned getExtractWithExtendCost (unsigned Opcode, Type *Dst, VectorType *VecTy, unsigned Index)
 
unsigned getCFInstrCost (unsigned Opcode, TTI::TargetCostKind CostKind)
 
unsigned getCmpSelInstrCost (unsigned Opcode, Type *ValTy, Type *CondTy, TTI::TargetCostKind CostKind, const Instruction *I) const
 
unsigned getVectorInstrCost (unsigned Opcode, Type *Val, unsigned Index)
 
unsigned getMemoryOpCost (unsigned Opcode, Type *Src, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, const Instruction *I) const
 
unsigned getMaskedMemoryOpCost (unsigned Opcode, Type *Src, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind)
 
unsigned getGatherScatterOpCost (unsigned Opcode, Type *DataTy, const Value *Ptr, bool VariableMask, Align Alignment, TTI::TargetCostKind CostKind, const Instruction *I=nullptr)
 
unsigned getInterleavedMemoryOpCost (unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef< unsigned > Indices, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, bool UseMaskForCond, bool UseMaskForGaps)
 
unsigned getIntrinsicInstrCost (const IntrinsicCostAttributes &ICA, TTI::TargetCostKind CostKind)
 
unsigned getCallInstrCost (Function *F, Type *RetTy, ArrayRef< Type *> Tys, TTI::TargetCostKind CostKind)
 
unsigned getNumberOfParts (Type *Tp)
 
unsigned getAddressComputationCost (Type *Tp, ScalarEvolution *, const SCEV *)
 
unsigned getArithmeticReductionCost (unsigned, VectorType *, bool, TTI::TargetCostKind)
 
unsigned getMinMaxReductionCost (VectorType *, VectorType *, bool, bool, TTI::TargetCostKind)
 
unsigned getCostOfKeepingLiveOverCall (ArrayRef< Type *> Tys)
 
bool getTgtMemIntrinsic (IntrinsicInst *Inst, MemIntrinsicInfo &Info)
 
unsigned getAtomicMemIntrinsicMaxElementSize () const
 
ValuegetOrCreateResultFromMemIntrinsic (IntrinsicInst *Inst, Type *ExpectedType)
 
TypegetMemcpyLoopLoweringType (LLVMContext &Context, Value *Length, unsigned SrcAddrSpace, unsigned DestAddrSpace, unsigned SrcAlign, unsigned DestAlign) const
 
void getMemcpyLoopResidualLoweringType (SmallVectorImpl< Type *> &OpsOut, LLVMContext &Context, unsigned RemainingBytes, unsigned SrcAddrSpace, unsigned DestAddrSpace, unsigned SrcAlign, unsigned DestAlign) const
 
bool areInlineCompatible (const Function *Caller, const Function *Callee) const
 
bool areFunctionArgsABICompatible (const Function *Caller, const Function *Callee, SmallPtrSetImpl< Argument *> &Args) const
 
bool isIndexedLoadLegal (TTI::MemIndexedMode Mode, Type *Ty, const DataLayout &DL) const
 
bool isIndexedStoreLegal (TTI::MemIndexedMode Mode, Type *Ty, const DataLayout &DL) const
 
unsigned getLoadStoreVecRegBitWidth (unsigned AddrSpace) const
 
bool isLegalToVectorizeLoad (LoadInst *LI) const
 
bool isLegalToVectorizeStore (StoreInst *SI) const
 
bool isLegalToVectorizeLoadChain (unsigned ChainSizeInBytes, Align Alignment, unsigned AddrSpace) const
 
bool isLegalToVectorizeStoreChain (unsigned ChainSizeInBytes, Align Alignment, unsigned AddrSpace) const
 
unsigned getLoadVectorFactor (unsigned VF, unsigned LoadSize, unsigned ChainSizeInBytes, VectorType *VecTy) const
 
unsigned getStoreVectorFactor (unsigned VF, unsigned StoreSize, unsigned ChainSizeInBytes, VectorType *VecTy) const
 
bool useReductionIntrinsic (unsigned Opcode, Type *Ty, TTI::ReductionFlags Flags) const
 
bool shouldExpandReduction (const IntrinsicInst *II) const
 
unsigned getGISelRematGlobalCost () const
 
bool hasActiveVectorLength () const
 

Additional Inherited Members

- Protected Types inherited from llvm::TargetTransformInfoImplBase
typedef TargetTransformInfo TTI
 
- Protected Member Functions inherited from llvm::BasicTTIImplBase< AArch64TTIImpl >
 BasicTTIImplBase (const TargetMachine *TM, const DataLayout &DL)
 
virtual ~BasicTTIImplBase ()=default
 
- Protected Member Functions inherited from llvm::TargetTransformInfoImplCRTPBase< AArch64TTIImpl >
 TargetTransformInfoImplCRTPBase (const DataLayout &DL)
 
- Protected Member Functions inherited from llvm::TargetTransformInfoImplBase
 TargetTransformInfoImplBase (const DataLayout &DL)
 
unsigned minRequiredElementSize (const Value *Val, bool &isSigned)
 
bool isStridedAccess (const SCEV *Ptr)
 
const SCEVConstantgetConstantStrideStep (ScalarEvolution *SE, const SCEV *Ptr)
 
bool isConstantStridedAccessLessThan (ScalarEvolution *SE, const SCEV *Ptr, int64_t MergeDistance)
 
- Protected Attributes inherited from llvm::TargetTransformInfoImplBase
const DataLayoutDL
 

Detailed Description

Definition at line 41 of file AArch64TargetTransformInfo.h.

Constructor & Destructor Documentation

◆ AArch64TTIImpl()

llvm::AArch64TTIImpl::AArch64TTIImpl ( const AArch64TargetMachine TM,
const Function F 
)
inlineexplicit

Member Function Documentation

◆ areInlineCompatible()

bool AArch64TTIImpl::areInlineCompatible ( const Function Caller,
const Function Callee 
) const

◆ enableInterleavedAccessVectorization()

bool llvm::AArch64TTIImpl::enableInterleavedAccessVectorization ( )
inline

Definition at line 87 of file AArch64TargetTransformInfo.h.

◆ enableMemCmpExpansion()

AArch64TTIImpl::TTI::MemCmpExpansionOptions AArch64TTIImpl::enableMemCmpExpansion ( bool  OptSize,
bool  IsZeroCmp 
) const

◆ getAddressComputationCost()

int AArch64TTIImpl::getAddressComputationCost ( Type Ty,
ScalarEvolution SE,
const SCEV Ptr 
)

◆ getArithmeticInstrCost()

int AArch64TTIImpl::getArithmeticInstrCost ( unsigned  Opcode,
Type Ty,
TTI::TargetCostKind  CostKind = TTI::TCK_RecipThroughput,
TTI::OperandValueKind  Opd1Info = TTI::OK_AnyValue,
TTI::OperandValueKind  Opd2Info = TTI::OK_AnyValue,
TTI::OperandValueProperties  Opd1PropInfo = TTI::OP_None,
TTI::OperandValueProperties  Opd2PropInfo = TTI::OP_None,
ArrayRef< const Value *>  Args = ArrayRef<const Value *>(),
const Instruction CxtI = nullptr 
)

◆ getArithmeticReductionCost()

int AArch64TTIImpl::getArithmeticReductionCost ( unsigned  Opcode,
VectorType Ty,
bool  IsPairwiseForm,
TTI::TargetCostKind  CostKind = TTI::TCK_RecipThroughput 
)

◆ getCastInstrCost()

int AArch64TTIImpl::getCastInstrCost ( unsigned  Opcode,
Type Dst,
Type Src,
TTI::TargetCostKind  CostKind,
const Instruction I = nullptr 
)

◆ getCFInstrCost()

unsigned AArch64TTIImpl::getCFInstrCost ( unsigned  Opcode,
TTI::TargetCostKind  CostKind 
)

◆ getCmpSelInstrCost()

int AArch64TTIImpl::getCmpSelInstrCost ( unsigned  Opcode,
Type ValTy,
Type CondTy,
TTI::TargetCostKind  CostKind,
const Instruction I = nullptr 
)

◆ getCostOfKeepingLiveOverCall()

int AArch64TTIImpl::getCostOfKeepingLiveOverCall ( ArrayRef< Type *>  Tys)

◆ getExtractWithExtendCost()

int AArch64TTIImpl::getExtractWithExtendCost ( unsigned  Opcode,
Type Dst,
VectorType VecTy,
unsigned  Index 
)

◆ getGISelRematGlobalCost()

unsigned llvm::AArch64TTIImpl::getGISelRematGlobalCost ( ) const
inline

◆ getInterleavedMemoryOpCost()

int AArch64TTIImpl::getInterleavedMemoryOpCost ( unsigned  Opcode,
Type VecTy,
unsigned  Factor,
ArrayRef< unsigned Indices,
Align  Alignment,
unsigned  AddressSpace,
TTI::TargetCostKind  CostKind = TTI::TCK_SizeAndLatency,
bool  UseMaskForCond = false,
bool  UseMaskForGaps = false 
)

◆ getIntImmCost() [1/2]

int AArch64TTIImpl::getIntImmCost ( int64_t  Val)

Calculate the cost of materializing a 64-bit value.

This helper method might only calculate a fraction of a larger immediate. Therefore it is valid to return a cost of ZERO.

Definition at line 45 of file AArch64TargetTransformInfo.cpp.

References llvm::AArch64_IMM::expandMOVImm(), llvm::AArch64_AM::isLogicalImmediate(), and llvm::SmallVectorBase< SmallVectorSizeType< T > >::size().

Referenced by AArch64TTIImpl(), getIntImmCost(), getIntImmCostInst(), and getIntImmCostIntrin().

◆ getIntImmCost() [2/2]

int AArch64TTIImpl::getIntImmCost ( const APInt Imm,
Type Ty,
TTI::TargetCostKind  CostKind 
)

◆ getIntImmCostInst()

int AArch64TTIImpl::getIntImmCostInst ( unsigned  Opcode,
unsigned  Idx,
const APInt Imm,
Type Ty,
TTI::TargetCostKind  CostKind 
)

◆ getIntImmCostIntrin()

int AArch64TTIImpl::getIntImmCostIntrin ( Intrinsic::ID  IID,
unsigned  Idx,
const APInt Imm,
Type Ty,
TTI::TargetCostKind  CostKind 
)

◆ getMaxInterleaveFactor()

unsigned AArch64TTIImpl::getMaxInterleaveFactor ( unsigned  VF)

◆ getMemoryOpCost()

int AArch64TTIImpl::getMemoryOpCost ( unsigned  Opcode,
Type Src,
MaybeAlign  Alignment,
unsigned  AddressSpace,
TTI::TargetCostKind  CostKind,
const Instruction I = nullptr 
)

◆ getMinVectorRegisterBitWidth()

unsigned llvm::AArch64TTIImpl::getMinVectorRegisterBitWidth ( )
inline

◆ getNumberOfRegisters()

unsigned llvm::AArch64TTIImpl::getNumberOfRegisters ( unsigned  ClassID) const
inline

Definition at line 89 of file AArch64TargetTransformInfo.h.

References llvm::AArch64Subtarget::hasNEON(), and llvm::Vector.

◆ getOrCreateResultFromMemIntrinsic()

Value * AArch64TTIImpl::getOrCreateResultFromMemIntrinsic ( IntrinsicInst Inst,
Type ExpectedType 
)

◆ getPopcntSupport()

TargetTransformInfo::PopcntSupportKind AArch64TTIImpl::getPopcntSupport ( unsigned  TyWidth)

◆ getRegisterBitWidth()

unsigned llvm::AArch64TTIImpl::getRegisterBitWidth ( bool  Vector) const
inline

◆ getShuffleCost()

int AArch64TTIImpl::getShuffleCost ( TTI::ShuffleKind  Kind,
VectorType Tp,
int  Index,
VectorType SubTp 
)

◆ getTgtMemIntrinsic()

bool AArch64TTIImpl::getTgtMemIntrinsic ( IntrinsicInst Inst,
MemIntrinsicInfo Info 
)

◆ getUnrollingPreferences()

void AArch64TTIImpl::getUnrollingPreferences ( Loop L,
ScalarEvolution SE,
TTI::UnrollingPreferences UP 
)

◆ getVectorInstrCost()

int AArch64TTIImpl::getVectorInstrCost ( unsigned  Opcode,
Type Val,
unsigned  Index 
)

◆ isLegalMaskedLoad()

bool llvm::AArch64TTIImpl::isLegalMaskedLoad ( Type DataType,
Align  Alignment 
)
inline

Definition at line 177 of file AArch64TargetTransformInfo.h.

References isLegalMaskedLoadStore().

◆ isLegalMaskedLoadStore()

bool llvm::AArch64TTIImpl::isLegalMaskedLoadStore ( Type DataType,
Align  Alignment 
)
inline

◆ isLegalMaskedStore()

bool llvm::AArch64TTIImpl::isLegalMaskedStore ( Type DataType,
Align  Alignment 
)
inline

Definition at line 181 of file AArch64TargetTransformInfo.h.

References isLegalMaskedLoadStore().

◆ isLegalNTStore()

bool llvm::AArch64TTIImpl::isLegalNTStore ( Type DataType,
Align  Alignment 
)
inline

◆ shouldConsiderAddressTypePromotion()

bool AArch64TTIImpl::shouldConsiderAddressTypePromotion ( const Instruction I,
bool AllowPromotionWithoutCommonHeader 
)

See if I should be considered for address type promotion.

We check if I is a sext with right type and used in memory accesses. If it used in a "complex" getelementptr, we allow it to be promoted without finding other sext instructions that sign extended the same initial value. A getelementptr is considered as "complex" if it has more than 2 operands.

Definition at line 943 of file AArch64TargetTransformInfo.cpp.

References llvm::Function::getContext(), llvm::Type::getInt64Ty(), llvm::Instruction::getParent(), llvm::BasicBlock::getParent(), llvm::Value::getType(), and llvm::Value::users().

Referenced by isLegalNTStore().

◆ shouldExpandReduction()

bool llvm::AArch64TTIImpl::shouldExpandReduction ( const IntrinsicInst II) const
inline

◆ useReductionIntrinsic()

bool AArch64TTIImpl::useReductionIntrinsic ( unsigned  Opcode,
Type Ty,
TTI::ReductionFlags  Flags 
) const

The documentation for this class was generated from the following files: