22 #ifndef LLVM_ANALYSIS_TARGETTRANSFORMINFO_H
23 #define LLVM_ANALYSIS_TARGETTRANSFORMINFO_H
31 #include "llvm/Support/DataTypes.h"
39 class ScalarEvolution;
358 bool HasBaseReg, int64_t Scale,
359 unsigned AddrSpace = 0)
const;
379 bool HasBaseReg, int64_t Scale,
380 unsigned AddrSpace = 0)
const;
432 unsigned Alignment = 1,
433 bool *
Fast =
nullptr)
const;
540 Type *SubTp =
nullptr)
const;
549 unsigned Index = -1)
const;
557 Type *CondTy =
nullptr)
const;
579 bool VariableMask,
unsigned Alignment)
const;
631 const SCEV *
Ptr =
nullptr)
const;
651 Type *ExpectedType)
const;
671 unsigned AddrSpace)
const;
676 unsigned AddrSpace)
const;
681 unsigned ChainSizeInBytes,
687 unsigned ChainSizeInBytes,
699 template <
typename T>
class Model;
701 std::unique_ptr<Concept> TTIImpl;
728 int64_t BaseOffset,
bool HasBaseReg,
730 unsigned AddrSpace) = 0;
736 int64_t BaseOffset,
bool HasBaseReg,
737 int64_t Scale,
unsigned AddrSpace) = 0;
788 unsigned AddressSpace) = 0;
791 unsigned AddressSpace) = 0;
793 Value *Ptr,
bool VariableMask,
794 unsigned Alignment) = 0;
799 unsigned AddressSpace) = 0;
801 bool IsPairwiseForm) = 0;
812 const SCEV *Ptr) = 0;
817 Type *ExpectedType) = 0;
825 unsigned AddrSpace)
const = 0;
828 unsigned AddrSpace)
const = 0;
830 unsigned ChainSizeInBytes,
833 unsigned ChainSizeInBytes,
837 template <
typename T>
842 Model(
T Impl) : Impl(std::move(Impl)) {}
845 const DataLayout &getDataLayout()
const override {
846 return Impl.getDataLayout();
850 return Impl.getOperationCost(Opcode, Ty, OpTy);
853 ArrayRef<const Value *> Operands)
override {
854 return Impl.getGEPCost(PointeeType, Ptr, Operands);
856 int getCallCost(FunctionType *FTy,
int NumArgs)
override {
857 return Impl.getCallCost(FTy, NumArgs);
859 int getCallCost(
const Function *
F,
int NumArgs)
override {
860 return Impl.getCallCost(F, NumArgs);
863 ArrayRef<const Value *> Arguments)
override {
864 return Impl.getCallCost(F, Arguments);
867 return Impl.getInliningThresholdMultiplier();
870 ArrayRef<Type *> ParamTys)
override {
871 return Impl.getIntrinsicCost(IID, RetTy, ParamTys);
874 ArrayRef<const Value *> Arguments)
override {
875 return Impl.getIntrinsicCost(IID, RetTy, Arguments);
877 int getUserCost(
const User *U)
override {
return Impl.getUserCost(U); }
880 return Impl.isSourceOfDivergence(V);
883 return Impl.isLoweredToCall(F);
886 return Impl.getUnrollingPreferences(L, UP);
889 return Impl.isLegalAddImmediate(Imm);
892 return Impl.isLegalICmpImmediate(Imm);
895 bool HasBaseReg, int64_t Scale,
896 unsigned AddrSpace)
override {
897 return Impl.isLegalAddressingMode(Ty, BaseGV, BaseOffset, HasBaseReg,
901 return Impl.isLegalMaskedStore(DataType);
904 return Impl.isLegalMaskedLoad(DataType);
907 return Impl.isLegalMaskedScatter(DataType);
910 return Impl.isLegalMaskedGather(DataType);
913 bool HasBaseReg, int64_t Scale,
914 unsigned AddrSpace)
override {
915 return Impl.getScalingFactorCost(Ty, BaseGV, BaseOffset, HasBaseReg,
919 return Impl.isFoldableMemAccessOffset(I, Offset);
922 return Impl.isTruncateFree(Ty1, Ty2);
925 return Impl.isProfitableToHoist(I);
929 unsigned getJumpBufSize()
override {
return Impl.getJumpBufSize(); }
931 return Impl.shouldBuildLookupTables();
934 return Impl.shouldBuildLookupTablesForConstant(C);
937 return Impl.enableAggressiveInterleaving(LoopHasReductions);
940 return Impl.enableInterleavedAccessVectorization();
943 return Impl.isFPVectorizationPotentiallyUnsafe();
947 unsigned Alignment,
bool *
Fast)
override {
948 return Impl.allowsMisalignedMemoryAccesses(Context, BitWidth, AddressSpace,
952 return Impl.getPopcntSupport(IntTyWidthInBit);
960 return Impl.getIntImmCodeSizeCost(Opc, Idx, Imm, Ty);
963 return Impl.getIntImmCost(Imm, Ty);
965 int getIntImmCost(
unsigned Opc,
unsigned Idx,
const APInt &Imm,
967 return Impl.getIntImmCost(Opc, Idx, Imm, Ty);
971 return Impl.getIntImmCost(IID, Idx, Imm, Ty);
974 return Impl.getNumberOfRegisters(Vector);
977 return Impl.getRegisterBitWidth(Vector);
981 return Impl.getCacheLineSize();
985 return Impl.getMinPrefetchStride();
988 return Impl.getMaxPrefetchIterationsAhead();
991 return Impl.getMaxInterleaveFactor(VF);
998 ArrayRef<const Value *>
Args)
override {
999 return Impl.getArithmeticInstrCost(Opcode, Ty, Opd1Info, Opd2Info,
1000 Opd1PropInfo, Opd2PropInfo, Args);
1003 Type *SubTp)
override {
1004 return Impl.getShuffleCost(Kind, Tp, Index, SubTp);
1007 return Impl.getCastInstrCost(Opcode, Dst, Src);
1010 unsigned Index)
override {
1011 return Impl.getExtractWithExtendCost(Opcode, Dst, VecTy, Index);
1014 return Impl.getCFInstrCost(Opcode);
1017 return Impl.getCmpSelInstrCost(Opcode, ValTy, CondTy);
1020 return Impl.getVectorInstrCost(Opcode, Val, Index);
1023 unsigned AddressSpace)
override {
1024 return Impl.getMemoryOpCost(Opcode, Src, Alignment, AddressSpace);
1027 unsigned AddressSpace)
override {
1028 return Impl.getMaskedMemoryOpCost(Opcode, Src, Alignment, AddressSpace);
1031 Value *Ptr,
bool VariableMask,
1032 unsigned Alignment)
override {
1033 return Impl.getGatherScatterOpCost(Opcode, DataTy, Ptr, VariableMask,
1037 ArrayRef<unsigned> Indices,
unsigned Alignment,
1038 unsigned AddressSpace)
override {
1039 return Impl.getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
1040 Alignment, AddressSpace);
1043 bool IsPairwiseForm)
override {
1044 return Impl.getReductionCost(Opcode, Ty, IsPairwiseForm);
1047 FastMathFlags FMF)
override {
1048 return Impl.getIntrinsicInstrCost(ID, RetTy, Tys, FMF);
1051 ArrayRef<Value *> Args,
1052 FastMathFlags FMF)
override {
1053 return Impl.getIntrinsicInstrCost(ID, RetTy, Args, FMF);
1056 ArrayRef<Type *> Tys)
override {
1057 return Impl.getCallInstrCost(F, RetTy, Tys);
1060 return Impl.getNumberOfParts(Tp);
1063 const SCEV *Ptr)
override {
1064 return Impl.getAddressComputationCost(Ty, SE, Ptr);
1067 return Impl.getCostOfKeepingLiveOverCall(Tys);
1070 MemIntrinsicInfo &Info)
override {
1071 return Impl.getTgtMemIntrinsic(Inst, Info);
1074 Type *ExpectedType)
override {
1075 return Impl.getOrCreateResultFromMemIntrinsic(Inst, ExpectedType);
1078 const Function *Callee)
const override {
1079 return Impl.areInlineCompatible(Caller, Callee);
1082 return Impl.getLoadStoreVecRegBitWidth(AddrSpace);
1085 return Impl.isLegalToVectorizeLoad(LI);
1088 return Impl.isLegalToVectorizeStore(SI);
1092 unsigned AddrSpace)
const override {
1093 return Impl.isLegalToVectorizeLoadChain(ChainSizeInBytes, Alignment,
1098 unsigned AddrSpace)
const override {
1099 return Impl.isLegalToVectorizeStoreChain(ChainSizeInBytes, Alignment,
1103 unsigned ChainSizeInBytes,
1104 VectorType *VecTy)
const override {
1105 return Impl.getLoadVectorFactor(VF, LoadSize, ChainSizeInBytes, VecTy);
1108 unsigned ChainSizeInBytes,
1109 VectorType *VecTy)
const override {
1110 return Impl.getStoreVectorFactor(VF, StoreSize, ChainSizeInBytes, VecTy);
1114 template <
typename T>
1116 : TTIImpl(new
Model<
T>(Impl)) {}
1147 : TTICallback(Arg.TTICallback) {}
1149 : TTICallback(std::move(Arg.TTICallback)) {}
1151 TTICallback = RHS.TTICallback;
1155 TTICallback = std::move(RHS.TTICallback);
1175 std::function<Result(const Function &)> TTICallback;
1189 virtual void anchor();
A parsed version of the target data layout string in and methods for querying it. ...
TargetIRAnalysis & operator=(const TargetIRAnalysis &RHS)
The main scalar evolution driver.
ImmutablePass * createTargetTransformInfoWrapperPass(TargetIRAnalysis TIRA)
Create an analysis pass wrapper around a TTI object.
Analysis pass providing the TargetTransformInfo.
bool IsSimple
True only if this memory operation is non-volatile, non-atomic, and unordered.
An instruction for reading from memory.
TargetIRAnalysis & operator=(TargetIRAnalysis &&RHS)
unsigned short MatchingId
Class to represent function types.
Function Alias Analysis false
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory)...
An instruction for storing to memory.
A set of analyses that are preserved following a run of a transformation pass.
The instances of the Type class are immutable: once they are created, they are never changed...
This is an important class for using LLVM in a threaded context.
This is an important base class in LLVM.
A CRTP mix-in that provides informational APIs needed for analysis passes.
TargetIRAnalysis()
Default construct a target IR analysis.
TargetIRAnalysis(const TargetIRAnalysis &Arg)
ImmutablePass class - This class is used to provide information that does not need to be run...
static GCRegistry::Add< ShadowStackGC > C("shadow-stack","Very portable GC for uncooperative code generators")
Class to represent vector types.
Class for arbitrary precision integers.
Result run(const Function &F, FunctionAnalysisManager &)
This class represents an analyzed expression in the program.
Represents a single loop in the control flow graph.
TargetIRAnalysis(TargetIRAnalysis &&Arg)
API to communicate dependencies between analyses during invalidation.
TargetTransformInfo Result
LLVM Value Representation.
print Print MemDeps of function
Value * PtrVal
This is the pointer that the intrinsic is loading from or storing to.
Convenience struct for specifying and reasoning about fast-math flags.
A container for analyses that lazily runs them and caches their results.
This header defines various interfaces for pass management in LLVM.
Information about a load/store intrinsic defined by the target.
Fast - This calling convention attempts to make calls as fast as possible (e.g.
A special type used by analysis passes to provide an address that identifies that particular analysis...
A wrapper class for inspecting calls to intrinsic functions.