30#define DEBUG_TYPE "tti"
34 cl::desc(
"Recognize reduction patterns."));
38 cl::desc(
"Use this to override the target cache line size when "
39 "specified by the user."));
43 cl::desc(
"Use this to override the target's minimum page size."));
48 "Use this to override the target's predictable branch threshold (%)."));
62 std::unique_ptr<const TargetTransformInfoImplBase> Impl)
79 ScalarizationCost(ScalarizationCost), LibInfo(LibInfo) {
82 FMF = FPMO->getFastMathFlags();
87 ParamTys.insert(ParamTys.begin(), FTy->param_begin(), FTy->param_end());
95 : II(
I), RetTy(RTy), IID(Id), FMF(Flags), ScalarizationCost(ScalarCost) {
96 ParamTys.insert(ParamTys.begin(), Tys.
begin(), Tys.
end());
101 : RetTy(Ty), IID(Id) {
103 Arguments.insert(Arguments.begin(), Args.begin(), Args.end());
104 ParamTys.reserve(Arguments.size());
113 : II(
I), RetTy(RTy), IID(Id), FMF(Flags), ScalarizationCost(ScalarCost),
115 ParamTys.insert(ParamTys.begin(), Tys.
begin(), Tys.
end());
116 Arguments.insert(Arguments.begin(), Args.begin(), Args.end());
132 L->getExitingBlocks(ExitingBlocks);
137 if (!
L->isLoopLatch(BB)) {
146 if (ConstEC->getValue()->isZero())
167 bool NotAlways =
false;
169 if (!
L->contains(Pred))
187 if (!BI->isConditional())
207 : TTIImpl(
std::make_unique<NoTTIImpl>(
DL)) {}
212 : TTIImpl(
std::
move(Arg.TTIImpl)) {}
215 TTIImpl = std::move(RHS.TTIImpl);
220 return TTIImpl->getInliningThresholdMultiplier();
225 return TTIImpl->getInliningCostBenefitAnalysisSavingsMultiplier();
231 return TTIImpl->getInliningCostBenefitAnalysisProfitableMultiplier();
235 return TTIImpl->getInliningLastCallToStaticBonus();
240 return TTIImpl->adjustInliningThreshold(CB);
245 return TTIImpl->getCallerAllocaCost(CB, AI);
249 return TTIImpl->getInlinerVectorBonusPercent();
255 return TTIImpl->getGEPCost(PointeeType,
Ptr, Operands, AccessType,
CostKind);
263 "If pointers have same base address it has to be provided.");
264 return TTIImpl->getPointersChainCost(Ptrs,
Base, Info, AccessTy,
CostKind);
270 return TTIImpl->getEstimatedNumberOfCaseClusters(
SI, JTSize, PSI, BFI);
279 "TTI should not produce negative costs!");
286 : TTIImpl->getPredictableBranchThreshold();
290 return TTIImpl->getBranchMispredictPenalty();
294 return TTIImpl->hasBranchDivergence(
F);
299 if (
Call->hasFnAttr(Attribute::NoDivergenceSource))
302 return TTIImpl->isSourceOfDivergence(V);
306 return TTIImpl->isAlwaysUniform(V);
310 unsigned ToAS)
const {
311 return TTIImpl->isValidAddrSpaceCast(FromAS, ToAS);
315 unsigned ToAS)
const {
316 return TTIImpl->addrspacesMayAlias(FromAS, ToAS);
320 return TTIImpl->getFlatAddressSpace();
325 return TTIImpl->collectFlatAddressOperands(OpIndexes, IID);
329 unsigned ToAS)
const {
330 return TTIImpl->isNoopAddrSpaceCast(FromAS, ToAS);
335 return TTIImpl->canHaveNonUndefGlobalInitializerInAddressSpace(AS);
339 return TTIImpl->getAssumedAddrSpace(V);
343 return TTIImpl->isSingleThreaded();
346std::pair<const Value *, unsigned>
348 return TTIImpl->getPredicatedAddrSpace(V);
353 return TTIImpl->rewriteIntrinsicWithAddressSpace(
II, OldV, NewV);
357 return TTIImpl->isLoweredToCall(
F);
363 return TTIImpl->isHardwareLoopProfitable(L, SE, AC, LibInfo, HWLoopInfo);
367 return TTIImpl->getEpilogueVectorizationMinVF();
372 return TTIImpl->preferPredicateOverEpilogue(TFI);
376 bool IVUpdateMayOverflow)
const {
377 return TTIImpl->getPreferredTailFoldingStyle(IVUpdateMayOverflow);
380std::optional<Instruction *>
383 return TTIImpl->instCombineIntrinsic(IC,
II);
388 bool &KnownBitsComputed)
const {
389 return TTIImpl->simplifyDemandedUseBitsIntrinsic(IC,
II, DemandedMask, Known,
397 SimplifyAndSetOp)
const {
398 return TTIImpl->simplifyDemandedVectorEltsIntrinsic(
399 IC,
II, DemandedElts, UndefElts, UndefElts2, UndefElts3,
406 return TTIImpl->getUnrollingPreferences(L, SE, UP, ORE);
411 return TTIImpl->getPeelingPreferences(L, SE, PP);
415 return TTIImpl->isLegalAddImmediate(Imm);
419 return TTIImpl->isLegalAddScalableImmediate(Imm);
423 return TTIImpl->isLegalICmpImmediate(Imm);
428 bool HasBaseReg, int64_t Scale,
431 int64_t ScalableOffset)
const {
432 return TTIImpl->isLegalAddressingMode(Ty, BaseGV, BaseOffset, HasBaseReg,
433 Scale, AddrSpace,
I, ScalableOffset);
438 return TTIImpl->isLSRCostLess(C1, C2);
442 return TTIImpl->isNumRegsMajorCostOfLSR();
446 return TTIImpl->shouldDropLSRSolutionIfLessProfitable();
450 return TTIImpl->isProfitableLSRChainElement(
I);
454 return TTIImpl->canMacroFuseCmp();
461 return TTIImpl->canSaveCmp(L, BI, SE, LI, DT, AC, LibInfo);
467 return TTIImpl->getPreferredAddressingMode(L, SE);
472 return TTIImpl->isLegalMaskedStore(DataType, Alignment,
AddressSpace);
477 return TTIImpl->isLegalMaskedLoad(DataType, Alignment,
AddressSpace);
481 Align Alignment)
const {
482 return TTIImpl->isLegalNTStore(DataType, Alignment);
486 return TTIImpl->isLegalNTLoad(DataType, Alignment);
491 return TTIImpl->isLegalBroadcastLoad(ElementTy, NumElements);
495 Align Alignment)
const {
496 return TTIImpl->isLegalMaskedGather(DataType, Alignment);
500 VectorType *VecTy,
unsigned Opcode0,
unsigned Opcode1,
502 return TTIImpl->isLegalAltInstr(VecTy, Opcode0, Opcode1, OpcodeMask);
506 Align Alignment)
const {
507 return TTIImpl->isLegalMaskedScatter(DataType, Alignment);
511 Align Alignment)
const {
512 return TTIImpl->forceScalarizeMaskedGather(DataType, Alignment);
516 Align Alignment)
const {
517 return TTIImpl->forceScalarizeMaskedScatter(DataType, Alignment);
521 Align Alignment)
const {
522 return TTIImpl->isLegalMaskedCompressStore(DataType, Alignment);
526 Align Alignment)
const {
527 return TTIImpl->isLegalMaskedExpandLoad(DataType, Alignment);
531 Align Alignment)
const {
532 return TTIImpl->isLegalStridedLoadStore(DataType, Alignment);
537 unsigned AddrSpace)
const {
538 return TTIImpl->isLegalInterleavedAccessType(VTy, Factor, Alignment,
543 Type *DataType)
const {
544 return TTIImpl->isLegalMaskedVectorHistogram(AddrType, DataType);
548 return TTIImpl->enableOrderedReductions();
552 return TTIImpl->hasDivRemOp(DataType, IsSigned);
556 unsigned AddrSpace)
const {
557 return TTIImpl->hasVolatileVariant(
I, AddrSpace);
561 return TTIImpl->prefersVectorizedAddressing();
566 int64_t Scale,
unsigned AddrSpace)
const {
568 Ty, BaseGV, BaseOffset, HasBaseReg, Scale, AddrSpace);
569 assert(
Cost >= 0 &&
"TTI should not produce negative costs!");
574 return TTIImpl->LSRWithInstrQueries();
578 return TTIImpl->isTruncateFree(Ty1, Ty2);
582 return TTIImpl->isProfitableToHoist(
I);
588 return TTIImpl->isTypeLegal(Ty);
592 return TTIImpl->getRegUsageForType(Ty);
596 return TTIImpl->shouldBuildLookupTables();
601 return TTIImpl->shouldBuildLookupTablesForConstant(
C);
605 return TTIImpl->shouldBuildRelLookupTables();
609 return TTIImpl->useColdCCForColdCall(
F);
614 return TTIImpl->isTargetIntrinsicTriviallyScalarizable(
ID);
619 return TTIImpl->isTargetIntrinsicWithScalarOpAtArg(
ID, ScalarOpdIdx);
624 return TTIImpl->isTargetIntrinsicWithOverloadTypeAtArg(
ID, OpdIdx);
629 return TTIImpl->isTargetIntrinsicWithStructReturnOverloadAtField(
ID, RetIdx);
636 return TTIImpl->getScalarizationOverhead(Ty, DemandedElts, Insert, Extract,
642 return TTIImpl->getOperandsScalarizationOverhead(Tys,
CostKind);
646 return TTIImpl->supportsEfficientVectorElementLoadStore();
650 return TTIImpl->supportsTailCalls();
654 return TTIImpl->supportsTailCallFor(CB);
658 bool LoopHasReductions)
const {
659 return TTIImpl->enableAggressiveInterleaving(LoopHasReductions);
664 return TTIImpl->enableMemCmpExpansion(OptSize, IsZeroCmp);
668 return TTIImpl->enableSelectOptimize();
673 return TTIImpl->shouldTreatInstructionLikeSelect(
I);
677 return TTIImpl->enableInterleavedAccessVectorization();
681 return TTIImpl->enableMaskedInterleavedAccessVectorization();
685 return TTIImpl->isFPVectorizationPotentiallyUnsafe();
693 unsigned *
Fast)
const {
694 return TTIImpl->allowsMisalignedMemoryAccesses(Context,
BitWidth,
700 return TTIImpl->getPopcntSupport(IntTyWidthInBit);
704 return TTIImpl->haveFastSqrt(Ty);
709 return TTIImpl->isExpensiveToSpeculativelyExecute(
I);
713 return TTIImpl->isFCmpOrdCheaperThanFCmpZero(Ty);
718 assert(
Cost >= 0 &&
"TTI should not produce negative costs!");
727 assert(
Cost >= 0 &&
"TTI should not produce negative costs!");
735 assert(
Cost >= 0 &&
"TTI should not produce negative costs!");
740 unsigned Opcode,
unsigned Idx,
const APInt &Imm,
Type *Ty,
743 TTIImpl->getIntImmCostInst(Opcode, Idx, Imm, Ty,
CostKind, Inst);
744 assert(
Cost >= 0 &&
"TTI should not produce negative costs!");
753 TTIImpl->getIntImmCostIntrin(IID, Idx, Imm, Ty,
CostKind);
754 assert(
Cost >= 0 &&
"TTI should not produce negative costs!");
760 return TTIImpl->preferToKeepConstantsAttached(Inst, Fn);
764 return TTIImpl->getNumberOfRegisters(ClassID);
768 bool IsStore)
const {
769 return TTIImpl->hasConditionalLoadStoreForType(Ty, IsStore);
774 return TTIImpl->getRegisterClassForType(
Vector, Ty);
778 return TTIImpl->getRegisterClassName(ClassID);
783 return TTIImpl->getRegisterBitWidth(K);
787 return TTIImpl->getMinVectorRegisterBitWidth();
791 return TTIImpl->getMaxVScale();
795 return TTIImpl->getVScaleForTuning();
799 return TTIImpl->isVScaleKnownToBeAPowerOfTwo();
804 return TTIImpl->shouldMaximizeVectorBandwidth(K);
808 bool IsScalable)
const {
809 return TTIImpl->getMinimumVF(ElemWidth, IsScalable);
813 unsigned Opcode)
const {
814 return TTIImpl->getMaximumVF(ElemWidth, Opcode);
818 Type *ScalarValTy)
const {
819 return TTIImpl->getStoreMinimumVF(VF, ScalarMemTy, ScalarValTy);
823 const Instruction &
I,
bool &AllowPromotionWithoutCommonHeader)
const {
824 return TTIImpl->shouldConsiderAddressTypePromotion(
825 I, AllowPromotionWithoutCommonHeader);
830 : TTIImpl->getCacheLineSize();
833std::optional<unsigned>
835 return TTIImpl->getCacheSize(Level);
838std::optional<unsigned>
840 return TTIImpl->getCacheAssociativity(Level);
845 : TTIImpl->getMinPageSize();
849 return TTIImpl->getPrefetchDistance();
853 unsigned NumMemAccesses,
unsigned NumStridedMemAccesses,
854 unsigned NumPrefetches,
bool HasCall)
const {
855 return TTIImpl->getMinPrefetchStride(NumMemAccesses, NumStridedMemAccesses,
856 NumPrefetches, HasCall);
860 return TTIImpl->getMaxPrefetchIterationsAhead();
864 return TTIImpl->enableWritePrefetching();
868 return TTIImpl->shouldPrefetchAddressSpace(AS);
872 unsigned Opcode,
Type *InputTypeA,
Type *InputTypeB,
Type *AccumType,
876 return TTIImpl->getPartialReductionCost(Opcode, InputTypeA, InputTypeB,
877 AccumType, VF, OpAExtend, OpBExtend,
882 return TTIImpl->getMaxInterleaveFactor(VF);
896 if (CI->getValue().isPowerOf2())
898 else if (CI->getValue().isNegatedPowerOf2())
908 if (ShuffleInst->isZeroEltSplat())
923 if (CI->getValue().isPowerOf2())
925 else if (CI->getValue().isNegatedPowerOf2())
931 bool AllPow2 =
true, AllNegPow2 =
true;
932 for (
uint64_t I = 0, E = CDS->getNumElements();
I != E; ++
I) {
934 AllPow2 &= CI->getValue().isPowerOf2();
935 AllNegPow2 &= CI->getValue().isNegatedPowerOf2();
936 if (AllPow2 || AllNegPow2)
939 AllPow2 = AllNegPow2 =
false;
948 return {OpInfo, OpProps};
960 if (TLibInfo && Opcode == Instruction::FRem) {
964 TLibInfo->
getLibFunc(Instruction::FRem, Ty->getScalarType(), Func) &&
971 TTIImpl->getArithmeticInstrCost(Opcode, Ty,
CostKind,
974 assert(
Cost >= 0 &&
"TTI should not produce negative costs!");
979 VectorType *VecTy,
unsigned Opcode0,
unsigned Opcode1,
982 TTIImpl->getAltInstrCost(VecTy, Opcode0, Opcode1, OpcodeMask,
CostKind);
983 assert(
Cost >= 0 &&
"TTI should not produce negative costs!");
993 "Expected the Mask to match the return size if given");
995 "Expected the same scalar types");
997 Kind, DstTy, SrcTy, Mask,
CostKind, Index, SubTp, Args, CxtI);
998 assert(
Cost >= 0 &&
"TTI should not produce negative costs!");
1013 case Instruction::CastOps::ZExt:
1015 case Instruction::CastOps::SExt:
1028 auto getLoadStoreKind = [](
const Value *V,
unsigned LdStOp,
unsigned MaskedOp,
1029 unsigned GatScatOp) {
1034 if (
I->getOpcode() == LdStOp)
1038 if (
II->getIntrinsicID() == MaskedOp)
1040 if (
II->getIntrinsicID() == GatScatOp)
1047 switch (
I->getOpcode()) {
1048 case Instruction::ZExt:
1049 case Instruction::SExt:
1050 case Instruction::FPExt:
1051 return getLoadStoreKind(
I->getOperand(0), Instruction::Load,
1052 Intrinsic::masked_load, Intrinsic::masked_gather);
1053 case Instruction::Trunc:
1054 case Instruction::FPTrunc:
1056 return getLoadStoreKind(*
I->user_begin(), Instruction::Store,
1057 Intrinsic::masked_store,
1058 Intrinsic::masked_scatter);
1070 assert((
I ==
nullptr ||
I->getOpcode() == Opcode) &&
1071 "Opcode should reflect passed instruction.");
1073 TTIImpl->getCastInstrCost(Opcode, Dst, Src, CCH,
CostKind,
I);
1074 assert(
Cost >= 0 &&
"TTI should not produce negative costs!");
1082 TTIImpl->getExtractWithExtendCost(Opcode, Dst, VecTy, Index,
CostKind);
1083 assert(
Cost >= 0 &&
"TTI should not produce negative costs!");
1089 assert((
I ==
nullptr ||
I->getOpcode() == Opcode) &&
1090 "Opcode should reflect passed instruction.");
1092 assert(
Cost >= 0 &&
"TTI should not produce negative costs!");
1100 assert((
I ==
nullptr ||
I->getOpcode() == Opcode) &&
1101 "Opcode should reflect passed instruction.");
1103 Opcode, ValTy, CondTy, VecPred,
CostKind, Op1Info, Op2Info,
I);
1104 assert(
Cost >= 0 &&
"TTI should not produce negative costs!");
1111 assert((Opcode == Instruction::InsertElement ||
1112 Opcode == Instruction::ExtractElement) &&
1113 "Expecting Opcode to be insertelement/extractelement.");
1115 TTIImpl->getVectorInstrCost(Opcode, Val,
CostKind, Index, Op0, Op1);
1116 assert(
Cost >= 0 &&
"TTI should not produce negative costs!");
1123 ArrayRef<std::tuple<Value *, User *, int>> ScalarUserAndIdx)
const {
1124 assert((Opcode == Instruction::InsertElement ||
1125 Opcode == Instruction::ExtractElement) &&
1126 "Expecting Opcode to be insertelement/extractelement.");
1128 Opcode, Val,
CostKind, Index, Scalar, ScalarUserAndIdx);
1129 assert(
Cost >= 0 &&
"TTI should not produce negative costs!");
1136 unsigned Index)
const {
1141 assert(
Cost >= 0 &&
"TTI should not produce negative costs!");
1147 unsigned Index)
const {
1149 TTIImpl->getIndexedVectorInstrCostFromEnd(Opcode, Val,
CostKind, Index);
1150 assert(
Cost >= 0 &&
"TTI should not produce negative costs!");
1156 assert((Opcode == Instruction::InsertValue ||
1157 Opcode == Instruction::ExtractValue) &&
1158 "Expecting Opcode to be insertvalue/extractvalue.");
1160 assert(
Cost >= 0 &&
"TTI should not produce negative costs!");
1165 Type *EltTy,
int ReplicationFactor,
int VF,
const APInt &DemandedDstElts,
1168 EltTy, ReplicationFactor, VF, DemandedDstElts,
CostKind);
1169 assert(
Cost >= 0 &&
"TTI should not produce negative costs!");
1177 assert((
I ==
nullptr ||
I->getOpcode() == Opcode) &&
1178 "Opcode should reflect passed instruction.");
1181 assert(
Cost >= 0 &&
"TTI should not produce negative costs!");
1190 assert(
Cost >= 0 &&
"TTI should not produce negative costs!");
1195 unsigned Opcode,
Type *DataTy,
const Value *
Ptr,
bool VariableMask,
1198 Opcode, DataTy,
Ptr, VariableMask, Alignment,
CostKind,
I);
1200 "TTI should not produce negative costs!");
1205 unsigned Opcode,
Type *DataTy,
bool VariableMask,
Align Alignment,
1208 Opcode, DataTy, VariableMask, Alignment,
CostKind,
I);
1209 assert(
Cost >= 0 &&
"TTI should not produce negative costs!");
1214 unsigned Opcode,
Type *DataTy,
const Value *
Ptr,
bool VariableMask,
1217 Opcode, DataTy,
Ptr, VariableMask, Alignment,
CostKind,
I);
1218 assert(
Cost >= 0 &&
"TTI should not produce negative costs!");
1225 bool UseMaskForCond,
bool UseMaskForGaps)
const {
1228 UseMaskForCond, UseMaskForGaps);
1229 assert(
Cost >= 0 &&
"TTI should not produce negative costs!");
1237 assert(
Cost >= 0 &&
"TTI should not produce negative costs!");
1246 assert(
Cost >= 0 &&
"TTI should not produce negative costs!");
1251 return TTIImpl->getNumberOfParts(Tp);
1258 TTIImpl->getAddressComputationCost(PtrTy, SE,
Ptr,
CostKind);
1259 assert(
Cost >= 0 &&
"TTI should not produce negative costs!");
1265 assert(
Cost >= 0 &&
"TTI should not produce negative costs!");
1270 return TTIImpl->getMaxMemIntrinsicInlineSizeThreshold();
1274 unsigned Opcode,
VectorType *Ty, std::optional<FastMathFlags> FMF,
1277 TTIImpl->getArithmeticReductionCost(Opcode, Ty, FMF,
CostKind);
1278 assert(
Cost >= 0 &&
"TTI should not produce negative costs!");
1286 TTIImpl->getMinMaxReductionCost(IID, Ty, FMF,
CostKind);
1287 assert(
Cost >= 0 &&
"TTI should not produce negative costs!");
1294 return TTIImpl->getExtendedReductionCost(Opcode, IsUnsigned, ResTy, Ty, FMF,
1299 bool IsUnsigned,
unsigned RedOpcode,
Type *ResTy,
VectorType *Ty,
1301 return TTIImpl->getMulAccReductionCost(IsUnsigned, RedOpcode, ResTy, Ty,
1307 return TTIImpl->getCostOfKeepingLiveOverCall(Tys);
1312 return TTIImpl->getTgtMemIntrinsic(Inst, Info);
1316 return TTIImpl->getAtomicMemIntrinsicMaxElementSize();
1321 return TTIImpl->getOrCreateResultFromMemIntrinsic(Inst, ExpectedType,
1327 unsigned DestAddrSpace,
Align SrcAlign,
Align DestAlign,
1328 std::optional<uint32_t> AtomicElementSize)
const {
1329 return TTIImpl->getMemcpyLoopLoweringType(Context,
Length, SrcAddrSpace,
1330 DestAddrSpace, SrcAlign, DestAlign,
1336 unsigned RemainingBytes,
unsigned SrcAddrSpace,
unsigned DestAddrSpace,
1338 std::optional<uint32_t> AtomicCpySize)
const {
1339 TTIImpl->getMemcpyLoopResidualLoweringType(
1340 OpsOut, Context, RemainingBytes, SrcAddrSpace, DestAddrSpace, SrcAlign,
1341 DestAlign, AtomicCpySize);
1346 return TTIImpl->areInlineCompatible(Caller, Callee);
1352 unsigned DefaultCallPenalty)
const {
1353 return TTIImpl->getInlineCallPenalty(
F,
Call, DefaultCallPenalty);
1359 return TTIImpl->areTypesABICompatible(Caller, Callee, Types);
1364 return TTIImpl->isIndexedLoadLegal(Mode, Ty);
1369 return TTIImpl->isIndexedStoreLegal(Mode, Ty);
1373 return TTIImpl->getLoadStoreVecRegBitWidth(AS);
1377 return TTIImpl->isLegalToVectorizeLoad(LI);
1381 return TTIImpl->isLegalToVectorizeStore(
SI);
1385 unsigned ChainSizeInBytes,
Align Alignment,
unsigned AddrSpace)
const {
1386 return TTIImpl->isLegalToVectorizeLoadChain(ChainSizeInBytes, Alignment,
1391 unsigned ChainSizeInBytes,
Align Alignment,
unsigned AddrSpace)
const {
1392 return TTIImpl->isLegalToVectorizeStoreChain(ChainSizeInBytes, Alignment,
1398 return TTIImpl->isLegalToVectorizeReduction(RdxDesc, VF);
1402 return TTIImpl->isElementTypeLegalForScalableVector(Ty);
1407 unsigned ChainSizeInBytes,
1409 return TTIImpl->getLoadVectorFactor(VF, LoadSize, ChainSizeInBytes, VecTy);
1414 unsigned ChainSizeInBytes,
1416 return TTIImpl->getStoreVectorFactor(VF, StoreSize, ChainSizeInBytes, VecTy);
1420 bool IsEpilogue)
const {
1421 return TTIImpl->preferFixedOverScalableIfEqualCost(IsEpilogue);
1426 return TTIImpl->preferInLoopReduction(Kind, Ty);
1430 return TTIImpl->preferAlternateOpcodeVectorization();
1434 return TTIImpl->preferPredicatedReductionSelect();
1438 return TTIImpl->preferEpilogueVectorization();
1442 return TTIImpl->shouldConsiderVectorizationRegPressure();
1447 return TTIImpl->getVPLegalizationStrategy(VPI);
1451 return TTIImpl->hasArmWideBranch(Thumb);
1455 return TTIImpl->getFeatureMask(
F);
1459 return TTIImpl->isMultiversionedFunction(
F);
1463 return TTIImpl->getMaxNumArgs();
1467 return TTIImpl->shouldExpandReduction(
II);
1473 return TTIImpl->getPreferredExpandedReductionShuffle(
II);
1477 return TTIImpl->getGISelRematGlobalCost();
1481 return TTIImpl->getMinTripCountTailFoldingThreshold();
1485 return TTIImpl->supportsScalableVectors();
1489 return TTIImpl->enableScalableVectorization();
1493 return TTIImpl->hasActiveVectorLength();
1498 return TTIImpl->isProfitableToSinkOperands(
I, OpsToSink);
1502 return TTIImpl->isVectorShiftByScalarCheap(Ty);
1508 return TTIImpl->getNumBytesToPadGlobalArray(
Size,
ArrayType);
1514 return TTIImpl->collectKernelLaunchBounds(
F, LB);
1518 return TTIImpl->allowVectorElementIndexingUsingGEP();
1527 : TTICallback(
std::
move(TTICallback)) {}
1531 assert(!
F.isIntrinsic() &&
"Should not request TTI for intrinsics");
1532 return TTICallback(
F);
1538 return Result(
F.getDataLayout());
1543 "Target Transform Information",
false,
true)
1557 TTI = TIRA.run(
F, DummyFAM);
for(const MachineOperand &MO :llvm::drop_begin(OldMI.operands(), Desc.getNumOperands()))
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static cl::opt< OutputCostKind > CostKind("cost-kind", cl::desc("Target cost kind"), cl::init(OutputCostKind::RecipThroughput), cl::values(clEnumValN(OutputCostKind::RecipThroughput, "throughput", "Reciprocal throughput"), clEnumValN(OutputCostKind::Latency, "latency", "Instruction latency"), clEnumValN(OutputCostKind::CodeSize, "code-size", "Code size"), clEnumValN(OutputCostKind::SizeAndLatency, "size-latency", "Code size and latency"), clEnumValN(OutputCostKind::All, "all", "Print all cost kinds")))
static cl::opt< bool > ForceNestedLoop("force-nested-hardware-loop", cl::Hidden, cl::init(false), cl::desc("Force allowance of nested hardware loops"))
static cl::opt< bool > ForceHardwareLoopPHI("force-hardware-loop-phi", cl::Hidden, cl::init(false), cl::desc("Force hardware loop counter to be updated through a phi"))
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
Module.h This file contains the declarations for the Module class.
uint64_t IntrinsicInst * II
#define INITIALIZE_PASS(passName, arg, name, cfg, analysis)
This file defines the SmallVector class.
static SymbolRef::Type getType(const Symbol *Sym)
Class for arbitrary precision integers.
an instruction to allocate memory on the stack
This class represents an incoming formal argument to a Function.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Class to represent array types.
A cache of @llvm.assume calls within a function.
LLVM Basic Block Representation.
BlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate IR basic block frequen...
Conditional or Unconditional Branch instruction.
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
User::op_iterator arg_begin()
Return the iterator pointing to the beginning of the argument list.
User::op_iterator arg_end()
Return the iterator pointing to the end of the argument list.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
This is an important base class in LLVM.
A parsed version of the target data layout string in and methods for querying it.
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
LLVM_ABI bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
Convenience struct for specifying and reasoning about fast-math flags.
Class to represent function types.
FunctionType * getFunctionType() const
Returns the FunctionType for me.
ImmutablePass class - This class is used to provide information that does not need to be run.
The core instruction combiner logic.
LLVM_ABI IntrinsicCostAttributes(Intrinsic::ID Id, const CallBase &CI, InstructionCost ScalarCost=InstructionCost::getInvalid(), bool TypeBasedOnly=false, TargetLibraryInfo const *LibInfo=nullptr)
A wrapper class for inspecting calls to intrinsic functions.
This is an important class for using LLVM in a threaded context.
An instruction for reading from memory.
Wrapper class to LoopBlocksDFS that provides a standard begin()/end() interface for the DFS reverse p...
void perform(const LoopInfo *LI)
Traverse the loop blocks and store the DFS result.
LoopT * getLoopFor(const BlockT *BB) const
Return the inner most loop that BB lives in.
Represents a single loop in the control flow graph.
Analysis providing profile information.
The RecurrenceDescriptor is used to identify recurrences variables in a loop.
This class represents a constant integer value.
This class represents an analyzed expression in the program.
The main scalar evolution driver.
LLVM_ABI uint64_t getTypeSizeInBits(Type *Ty) const
Return the size in bits of the specified type, for which isSCEVable must return true.
LLVM_ABI bool isLoopInvariant(const SCEV *S, const Loop *L)
Return true if the value of the given SCEV is unchanging in the specified loop.
LLVM_ABI const SCEV * getExitCount(const Loop *L, const BasicBlock *ExitingBlock, ExitCountKind Kind=Exact)
Return the number of times the backedge executes before the given exit would be taken; if not exactly...
This is a 'bitvector' (really, a variable-sized bit array), optimized for the case when the array is ...
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StackOffset holds a fixed and a scalable offset in bytes.
An instruction for storing to memory.
Analysis pass providing the TargetTransformInfo.
LLVM_ABI Result run(const Function &F, FunctionAnalysisManager &)
TargetTransformInfo Result
LLVM_ABI TargetIRAnalysis()
Default construct a target IR analysis.
Provides information about what library functions are available for the current target.
bool getLibFunc(StringRef funcName, LibFunc &F) const
Searches for a particular function name.
StringRef getName(LibFunc F) const
bool isFunctionVectorizable(StringRef F, const ElementCount &VF) const
The instances of the Type class are immutable: once they are created, they are never changed.
LLVM_ABI bool isScalableTy(SmallPtrSetImpl< const Type * > &Visited) const
Return true if this is a type whose size is a known multiple of vscale.
static LLVM_ABI IntegerType * getInt32Ty(LLVMContext &C)
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
This is the common base class for vector predication intrinsics.
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
Base class of all SIMD vector types.
ElementCount getElementCount() const
Return an ElementCount instance to represent the (possibly scalable) number of elements in the vector...
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
@ Fast
Attempts to make calls as fast as possible (e.g.
@ C
The default llvm calling convention, compatible with C.
initializer< Ty > init(const Ty &Val)
This is an optimization pass for GlobalISel generic memory operations.
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
LLVM_ABI Value * getSplatValue(const Value *V)
Get splat value if the input is a splat vector or return nullptr.
bool containsIrreducibleCFG(RPOTraversalT &RPOTraversal, const LoopInfoT &LI)
Return true if the control flow in RPOTraversal is irreducible.
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
LLVM_ABI ImmutablePass * createTargetTransformInfoWrapperPass(TargetIRAnalysis TIRA)
Create an analysis pass wrapper around a TTI object.
RecurKind
These are the kinds of recurrences that we support.
constexpr unsigned BitWidth
OutputIt move(R &&Range, OutputIt Out)
Provide wrappers to std::move which take ranges instead of having to pass begin/end explicitly.
auto predecessors(const MachineBasicBlock *BB)
AnalysisManager< Function > FunctionAnalysisManager
Convenience typedef for the Function analysis manager.
Implement std::hash so that hash_code can be used in STL containers.
This struct is a compact representation of a valid (non-zero power of two) alignment.
A special type used by analysis passes to provide an address that identifies that particular analysis...
Attributes of a target dependent hardware loop.
LLVM_ABI bool canAnalyze(LoopInfo &LI)
HardwareLoopInfo()=delete
LLVM_ABI bool isHardwareLoopCandidate(ScalarEvolution &SE, LoopInfo &LI, DominatorTree &DT, bool ForceNestedLoop=false, bool ForceHardwareLoopPHI=false)
Information about a load/store intrinsic defined by the target.