24 #define DEBUG_TYPE "tti"
38 : TTIImpl(new
Model<NoTTIImpl>(NoTTIImpl(DL))) {}
43 : TTIImpl(std::move(Arg.TTIImpl)) {}
46 TTIImpl = std::move(RHS.TTIImpl);
52 int Cost = TTIImpl->getOperationCost(Opcode, Ty, OpTy);
53 assert(Cost >= 0 &&
"TTI should not produce negative costs!");
58 int Cost = TTIImpl->getCallCost(FTy, NumArgs);
59 assert(Cost >= 0 &&
"TTI should not produce negative costs!");
65 int Cost = TTIImpl->getCallCost(F, Arguments);
66 assert(Cost >= 0 &&
"TTI should not produce negative costs!");
71 return TTIImpl->getInliningThresholdMultiplier();
76 return TTIImpl->getGEPCost(PointeeType, Ptr, Operands);
81 int Cost = TTIImpl->getIntrinsicCost(IID, RetTy, Arguments);
82 assert(Cost >= 0 &&
"TTI should not produce negative costs!");
87 int Cost = TTIImpl->getUserCost(U);
88 assert(Cost >= 0 &&
"TTI should not produce negative costs!");
93 return TTIImpl->hasBranchDivergence();
97 return TTIImpl->isSourceOfDivergence(V);
101 return TTIImpl->isLoweredToCall(F);
106 return TTIImpl->getUnrollingPreferences(L, UP);
110 return TTIImpl->isLegalAddImmediate(Imm);
114 return TTIImpl->isLegalICmpImmediate(Imm);
121 unsigned AddrSpace)
const {
122 return TTIImpl->isLegalAddressingMode(Ty, BaseGV, BaseOffset, HasBaseReg,
127 return TTIImpl->isLegalMaskedStore(DataType);
131 return TTIImpl->isLegalMaskedLoad(DataType);
135 return TTIImpl->isLegalMaskedGather(DataType);
139 return TTIImpl->isLegalMaskedGather(DataType);
146 unsigned AddrSpace)
const {
147 int Cost = TTIImpl->getScalingFactorCost(Ty, BaseGV, BaseOffset, HasBaseReg,
149 assert(Cost >= 0 &&
"TTI should not produce negative costs!");
155 return TTIImpl->isFoldableMemAccessOffset(I, Offset);
159 return TTIImpl->isTruncateFree(Ty1, Ty2);
163 return TTIImpl->isProfitableToHoist(I);
167 return TTIImpl->isTypeLegal(Ty);
171 return TTIImpl->getJumpBufAlignment();
175 return TTIImpl->getJumpBufSize();
179 return TTIImpl->shouldBuildLookupTables();
182 return TTIImpl->shouldBuildLookupTablesForConstant(C);
186 return TTIImpl->enableAggressiveInterleaving(LoopHasReductions);
190 return TTIImpl->enableInterleavedAccessVectorization();
194 return TTIImpl->isFPVectorizationPotentiallyUnsafe();
202 return TTIImpl->allowsMisalignedMemoryAccesses(Context, BitWidth, AddressSpace,
208 return TTIImpl->getPopcntSupport(IntTyWidthInBit);
212 return TTIImpl->haveFastSqrt(Ty);
216 int Cost = TTIImpl->getFPOpCost(Ty);
217 assert(Cost >= 0 &&
"TTI should not produce negative costs!");
224 int Cost = TTIImpl->getIntImmCodeSizeCost(Opcode, Idx, Imm, Ty);
225 assert(Cost >= 0 &&
"TTI should not produce negative costs!");
230 int Cost = TTIImpl->getIntImmCost(Imm, Ty);
231 assert(Cost >= 0 &&
"TTI should not produce negative costs!");
237 int Cost = TTIImpl->getIntImmCost(Opcode, Idx, Imm, Ty);
238 assert(Cost >= 0 &&
"TTI should not produce negative costs!");
244 int Cost = TTIImpl->getIntImmCost(IID, Idx, Imm, Ty);
245 assert(Cost >= 0 &&
"TTI should not produce negative costs!");
250 return TTIImpl->getNumberOfRegisters(Vector);
254 return TTIImpl->getRegisterBitWidth(Vector);
258 return TTIImpl->getCacheLineSize();
262 return TTIImpl->getPrefetchDistance();
266 return TTIImpl->getMinPrefetchStride();
270 return TTIImpl->getMaxPrefetchIterationsAhead();
274 return TTIImpl->getMaxInterleaveFactor(VF);
282 int Cost = TTIImpl->getArithmeticInstrCost(Opcode, Ty, Opd1Info, Opd2Info,
283 Opd1PropInfo, Opd2PropInfo, Args);
284 assert(Cost >= 0 &&
"TTI should not produce negative costs!");
290 int Cost = TTIImpl->getShuffleCost(Kind, Ty, Index, SubTp);
291 assert(Cost >= 0 &&
"TTI should not produce negative costs!");
297 int Cost = TTIImpl->getCastInstrCost(Opcode, Dst, Src);
298 assert(Cost >= 0 &&
"TTI should not produce negative costs!");
304 unsigned Index)
const {
305 int Cost = TTIImpl->getExtractWithExtendCost(Opcode, Dst, VecTy, Index);
306 assert(Cost >= 0 &&
"TTI should not produce negative costs!");
311 int Cost = TTIImpl->getCFInstrCost(Opcode);
312 assert(Cost >= 0 &&
"TTI should not produce negative costs!");
317 Type *CondTy)
const {
318 int Cost = TTIImpl->getCmpSelInstrCost(Opcode, ValTy, CondTy);
319 assert(Cost >= 0 &&
"TTI should not produce negative costs!");
324 unsigned Index)
const {
325 int Cost = TTIImpl->getVectorInstrCost(Opcode, Val, Index);
326 assert(Cost >= 0 &&
"TTI should not produce negative costs!");
333 int Cost = TTIImpl->getMemoryOpCost(Opcode, Src, Alignment, AddressSpace);
334 assert(Cost >= 0 &&
"TTI should not produce negative costs!");
342 TTIImpl->getMaskedMemoryOpCost(Opcode, Src, Alignment, AddressSpace);
343 assert(Cost >= 0 &&
"TTI should not produce negative costs!");
349 unsigned Alignment)
const {
350 int Cost = TTIImpl->getGatherScatterOpCost(Opcode, DataTy, Ptr, VariableMask,
352 assert(Cost >= 0 &&
"TTI should not produce negative costs!");
359 int Cost = TTIImpl->getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
360 Alignment, AddressSpace);
361 assert(Cost >= 0 &&
"TTI should not produce negative costs!");
368 int Cost = TTIImpl->getIntrinsicInstrCost(ID, RetTy, Tys, FMF);
369 assert(Cost >= 0 &&
"TTI should not produce negative costs!");
376 int Cost = TTIImpl->getIntrinsicInstrCost(ID, RetTy, Args, FMF);
377 assert(Cost >= 0 &&
"TTI should not produce negative costs!");
383 int Cost = TTIImpl->getCallInstrCost(F, RetTy, Tys);
384 assert(Cost >= 0 &&
"TTI should not produce negative costs!");
389 return TTIImpl->getNumberOfParts(Tp);
395 int Cost = TTIImpl->getAddressComputationCost(Tp, SE, Ptr);
396 assert(Cost >= 0 &&
"TTI should not produce negative costs!");
401 bool IsPairwiseForm)
const {
402 int Cost = TTIImpl->getReductionCost(Opcode, Ty, IsPairwiseForm);
403 assert(Cost >= 0 &&
"TTI should not produce negative costs!");
409 return TTIImpl->getCostOfKeepingLiveOverCall(Tys);
414 return TTIImpl->getTgtMemIntrinsic(Inst, Info);
419 return TTIImpl->getOrCreateResultFromMemIntrinsic(Inst, ExpectedType);
424 return TTIImpl->areInlineCompatible(Caller, Callee);
428 return TTIImpl->getLoadStoreVecRegBitWidth(AS);
432 return TTIImpl->isLegalToVectorizeLoad(LI);
436 return TTIImpl->isLegalToVectorizeStore(SI);
440 unsigned ChainSizeInBytes,
unsigned Alignment,
unsigned AddrSpace)
const {
441 return TTIImpl->isLegalToVectorizeLoadChain(ChainSizeInBytes, Alignment,
446 unsigned ChainSizeInBytes,
unsigned Alignment,
unsigned AddrSpace)
const {
447 return TTIImpl->isLegalToVectorizeStoreChain(ChainSizeInBytes, Alignment,
453 unsigned ChainSizeInBytes,
455 return TTIImpl->getLoadVectorFactor(VF, LoadSize, ChainSizeInBytes, VecTy);
460 unsigned ChainSizeInBytes,
462 return TTIImpl->getStoreVectorFactor(VF, StoreSize, ChainSizeInBytes, VecTy);
471 : TTICallback(std::move(TTICallback)) {}
475 return TTICallback(F);
486 "Target Transform Information",
false,
true)
506 TTI = TIRA.
run(F, DummyFAM);
A parsed version of the target data layout string in and methods for querying it. ...
static PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
The main scalar evolution driver.
ImmutablePass * createTargetTransformInfoWrapperPass(TargetIRAnalysis TIRA)
Create an analysis pass wrapper around a TTI object.
Analysis pass providing the TargetTransformInfo.
An instruction for reading from memory.
Class to represent function types.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory)...
An instruction for storing to memory.
void initializeTargetTransformInfoWrapperPassPass(PassRegistry &)
The instances of the Type class are immutable: once they are created, they are never changed...
This is an important class for using LLVM in a threaded context.
This is an important base class in LLVM.
TargetIRAnalysis()
Default construct a target IR analysis.
#define INITIALIZE_PASS(passName, arg, name, cfg, analysis)
ImmutablePass class - This class is used to provide information that does not need to be run...
Module.h This file contains the declarations for the Module class.
static GCRegistry::Add< ShadowStackGC > C("shadow-stack","Very portable GC for uncooperative code generators")
Class to represent vector types.
Class for arbitrary precision integers.
Result run(const Function &F, FunctionAnalysisManager &)
const DataLayout & getDataLayout() const
Get the data layout for the module's target platform.
This class represents an analyzed expression in the program.
Represents a single loop in the control flow graph.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
TargetTransformInfo Result
Module * getParent()
Get the module that this global value is contained inside of...
LLVM Value Representation.
print Print MemDeps of function
Convenience struct for specifying and reasoning about fast-math flags.
A container for analyses that lazily runs them and caches their results.
Information about a load/store intrinsic defined by the target.
Fast - This calling convention attempts to make calls as fast as possible (e.g.
A special type used by analysis passes to provide an address that identifies that particular analysis...
A wrapper class for inspecting calls to intrinsic functions.