LLVM 19.0.0git
TargetTransformInfo.cpp
Go to the documentation of this file.
1//===- llvm/Analysis/TargetTransformInfo.cpp ------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
10#include "llvm/Analysis/CFG.h"
14#include "llvm/IR/CFG.h"
15#include "llvm/IR/Dominators.h"
16#include "llvm/IR/Instruction.h"
19#include "llvm/IR/Module.h"
20#include "llvm/IR/Operator.h"
24#include <optional>
25#include <utility>
26
27using namespace llvm;
28using namespace PatternMatch;
29
30#define DEBUG_TYPE "tti"
31
32static cl::opt<bool> EnableReduxCost("costmodel-reduxcost", cl::init(false),
34 cl::desc("Recognize reduction patterns."));
35
37 "cache-line-size", cl::init(0), cl::Hidden,
38 cl::desc("Use this to override the target cache line size when "
39 "specified by the user."));
40
42 "min-page-size", cl::init(0), cl::Hidden,
43 cl::desc("Use this to override the target's minimum page size."));
44
46 "predictable-branch-threshold", cl::init(99), cl::Hidden,
48 "Use this to override the target's predictable branch threshold (%)."));
49
50namespace {
51/// No-op implementation of the TTI interface using the utility base
52/// classes.
53///
54/// This is used when no target specific information is available.
55struct NoTTIImpl : TargetTransformInfoImplCRTPBase<NoTTIImpl> {
56 explicit NoTTIImpl(const DataLayout &DL)
57 : TargetTransformInfoImplCRTPBase<NoTTIImpl>(DL) {}
58};
59} // namespace
60
62 // If the loop has irreducible control flow, it can not be converted to
63 // Hardware loop.
64 LoopBlocksRPO RPOT(L);
65 RPOT.perform(&LI);
66 if (containsIrreducibleCFG<const BasicBlock *>(RPOT, LI))
67 return false;
68 return true;
69}
70
72 Intrinsic::ID Id, const CallBase &CI, InstructionCost ScalarizationCost,
73 bool TypeBasedOnly)
74 : II(dyn_cast<IntrinsicInst>(&CI)), RetTy(CI.getType()), IID(Id),
75 ScalarizationCost(ScalarizationCost) {
76
77 if (const auto *FPMO = dyn_cast<FPMathOperator>(&CI))
78 FMF = FPMO->getFastMathFlags();
79
80 if (!TypeBasedOnly)
81 Arguments.insert(Arguments.begin(), CI.arg_begin(), CI.arg_end());
83 ParamTys.insert(ParamTys.begin(), FTy->param_begin(), FTy->param_end());
84}
85
88 FastMathFlags Flags,
89 const IntrinsicInst *I,
90 InstructionCost ScalarCost)
91 : II(I), RetTy(RTy), IID(Id), FMF(Flags), ScalarizationCost(ScalarCost) {
92 ParamTys.insert(ParamTys.begin(), Tys.begin(), Tys.end());
93}
94
97 : RetTy(Ty), IID(Id) {
98
99 Arguments.insert(Arguments.begin(), Args.begin(), Args.end());
100 ParamTys.reserve(Arguments.size());
101 for (unsigned Idx = 0, Size = Arguments.size(); Idx != Size; ++Idx)
102 ParamTys.push_back(Arguments[Idx]->getType());
103}
104
108 FastMathFlags Flags,
109 const IntrinsicInst *I,
110 InstructionCost ScalarCost)
111 : II(I), RetTy(RTy), IID(Id), FMF(Flags), ScalarizationCost(ScalarCost) {
112 ParamTys.insert(ParamTys.begin(), Tys.begin(), Tys.end());
113 Arguments.insert(Arguments.begin(), Args.begin(), Args.end());
114}
115
117 // Match default options:
118 // - hardware-loop-counter-bitwidth = 32
119 // - hardware-loop-decrement = 1
121 LoopDecrement = ConstantInt::get(CountType, 1);
122}
123
125 LoopInfo &LI, DominatorTree &DT,
126 bool ForceNestedLoop,
128 SmallVector<BasicBlock *, 4> ExitingBlocks;
129 L->getExitingBlocks(ExitingBlocks);
130
131 for (BasicBlock *BB : ExitingBlocks) {
132 // If we pass the updated counter back through a phi, we need to know
133 // which latch the updated value will be coming from.
134 if (!L->isLoopLatch(BB)) {
136 continue;
137 }
138
139 const SCEV *EC = SE.getExitCount(L, BB);
140 if (isa<SCEVCouldNotCompute>(EC))
141 continue;
142 if (const SCEVConstant *ConstEC = dyn_cast<SCEVConstant>(EC)) {
143 if (ConstEC->getValue()->isZero())
144 continue;
145 } else if (!SE.isLoopInvariant(EC, L))
146 continue;
147
148 if (SE.getTypeSizeInBits(EC->getType()) > CountType->getBitWidth())
149 continue;
150
151 // If this exiting block is contained in a nested loop, it is not eligible
152 // for insertion of the branch-and-decrement since the inner loop would
153 // end up messing up the value in the CTR.
154 if (!IsNestingLegal && LI.getLoopFor(BB) != L && !ForceNestedLoop)
155 continue;
156
157 // We now have a loop-invariant count of loop iterations (which is not the
158 // constant zero) for which we know that this loop will not exit via this
159 // existing block.
160
161 // We need to make sure that this block will run on every loop iteration.
162 // For this to be true, we must dominate all blocks with backedges. Such
163 // blocks are in-loop predecessors to the header block.
164 bool NotAlways = false;
165 for (BasicBlock *Pred : predecessors(L->getHeader())) {
166 if (!L->contains(Pred))
167 continue;
168
169 if (!DT.dominates(BB, Pred)) {
170 NotAlways = true;
171 break;
172 }
173 }
174
175 if (NotAlways)
176 continue;
177
178 // Make sure this blocks ends with a conditional branch.
179 Instruction *TI = BB->getTerminator();
180 if (!TI)
181 continue;
182
183 if (BranchInst *BI = dyn_cast<BranchInst>(TI)) {
184 if (!BI->isConditional())
185 continue;
186
187 ExitBranch = BI;
188 } else
189 continue;
190
191 // Note that this block may not be the loop latch block, even if the loop
192 // has a latch block.
193 ExitBlock = BB;
194 ExitCount = EC;
195 break;
196 }
197
198 if (!ExitBlock)
199 return false;
200 return true;
201}
202
204 : TTIImpl(new Model<NoTTIImpl>(NoTTIImpl(DL))) {}
205
207
209 : TTIImpl(std::move(Arg.TTIImpl)) {}
210
212 TTIImpl = std::move(RHS.TTIImpl);
213 return *this;
214}
215
217 return TTIImpl->getInliningThresholdMultiplier();
218}
219
220unsigned
222 return TTIImpl->getInliningCostBenefitAnalysisSavingsMultiplier();
223}
224
225unsigned
227 const {
228 return TTIImpl->getInliningCostBenefitAnalysisProfitableMultiplier();
229}
230
231unsigned
233 return TTIImpl->adjustInliningThreshold(CB);
234}
235
237 const AllocaInst *AI) const {
238 return TTIImpl->getCallerAllocaCost(CB, AI);
239}
240
242 return TTIImpl->getInlinerVectorBonusPercent();
243}
244
246 Type *PointeeType, const Value *Ptr, ArrayRef<const Value *> Operands,
247 Type *AccessType, TTI::TargetCostKind CostKind) const {
248 return TTIImpl->getGEPCost(PointeeType, Ptr, Operands, AccessType, CostKind);
249}
250
253 const TTI::PointersChainInfo &Info, Type *AccessTy,
255 assert((Base || !Info.isSameBase()) &&
256 "If pointers have same base address it has to be provided.");
257 return TTIImpl->getPointersChainCost(Ptrs, Base, Info, AccessTy, CostKind);
258}
259
261 const SwitchInst &SI, unsigned &JTSize, ProfileSummaryInfo *PSI,
262 BlockFrequencyInfo *BFI) const {
263 return TTIImpl->getEstimatedNumberOfCaseClusters(SI, JTSize, PSI, BFI);
264}
265
269 enum TargetCostKind CostKind) const {
270 InstructionCost Cost = TTIImpl->getInstructionCost(U, Operands, CostKind);
272 "TTI should not produce negative costs!");
273 return Cost;
274}
275
277 return PredictableBranchThreshold.getNumOccurrences() > 0
279 : TTIImpl->getPredictableBranchThreshold();
280}
281
283 return TTIImpl->hasBranchDivergence(F);
284}
285
287 return TTIImpl->isSourceOfDivergence(V);
288}
289
291 return TTIImpl->isAlwaysUniform(V);
292}
293
295 unsigned ToAS) const {
296 return TTIImpl->isValidAddrSpaceCast(FromAS, ToAS);
297}
298
300 unsigned ToAS) const {
301 return TTIImpl->addrspacesMayAlias(FromAS, ToAS);
302}
303
305 return TTIImpl->getFlatAddressSpace();
306}
307
309 SmallVectorImpl<int> &OpIndexes, Intrinsic::ID IID) const {
310 return TTIImpl->collectFlatAddressOperands(OpIndexes, IID);
311}
312
314 unsigned ToAS) const {
315 return TTIImpl->isNoopAddrSpaceCast(FromAS, ToAS);
316}
317
319 unsigned AS) const {
320 return TTIImpl->canHaveNonUndefGlobalInitializerInAddressSpace(AS);
321}
322
324 return TTIImpl->getAssumedAddrSpace(V);
325}
326
328 return TTIImpl->isSingleThreaded();
329}
330
331std::pair<const Value *, unsigned>
333 return TTIImpl->getPredicatedAddrSpace(V);
334}
335
337 IntrinsicInst *II, Value *OldV, Value *NewV) const {
338 return TTIImpl->rewriteIntrinsicWithAddressSpace(II, OldV, NewV);
339}
340
342 return TTIImpl->isLoweredToCall(F);
343}
344
347 TargetLibraryInfo *LibInfo, HardwareLoopInfo &HWLoopInfo) const {
348 return TTIImpl->isHardwareLoopProfitable(L, SE, AC, LibInfo, HWLoopInfo);
349}
350
352 TailFoldingInfo *TFI) const {
353 return TTIImpl->preferPredicateOverEpilogue(TFI);
354}
355
357 bool IVUpdateMayOverflow) const {
358 return TTIImpl->getPreferredTailFoldingStyle(IVUpdateMayOverflow);
359}
360
361std::optional<Instruction *>
363 IntrinsicInst &II) const {
364 return TTIImpl->instCombineIntrinsic(IC, II);
365}
366
368 InstCombiner &IC, IntrinsicInst &II, APInt DemandedMask, KnownBits &Known,
369 bool &KnownBitsComputed) const {
370 return TTIImpl->simplifyDemandedUseBitsIntrinsic(IC, II, DemandedMask, Known,
371 KnownBitsComputed);
372}
373
375 InstCombiner &IC, IntrinsicInst &II, APInt DemandedElts, APInt &UndefElts,
376 APInt &UndefElts2, APInt &UndefElts3,
377 std::function<void(Instruction *, unsigned, APInt, APInt &)>
378 SimplifyAndSetOp) const {
379 return TTIImpl->simplifyDemandedVectorEltsIntrinsic(
380 IC, II, DemandedElts, UndefElts, UndefElts2, UndefElts3,
381 SimplifyAndSetOp);
382}
383
386 OptimizationRemarkEmitter *ORE) const {
387 return TTIImpl->getUnrollingPreferences(L, SE, UP, ORE);
388}
389
391 PeelingPreferences &PP) const {
392 return TTIImpl->getPeelingPreferences(L, SE, PP);
393}
394
396 return TTIImpl->isLegalAddImmediate(Imm);
397}
398
400 return TTIImpl->isLegalAddScalableImmediate(Imm);
401}
402
404 return TTIImpl->isLegalICmpImmediate(Imm);
405}
406
408 int64_t BaseOffset,
409 bool HasBaseReg, int64_t Scale,
410 unsigned AddrSpace,
411 Instruction *I,
412 int64_t ScalableOffset) const {
413 return TTIImpl->isLegalAddressingMode(Ty, BaseGV, BaseOffset, HasBaseReg,
414 Scale, AddrSpace, I, ScalableOffset);
415}
416
418 const LSRCost &C2) const {
419 return TTIImpl->isLSRCostLess(C1, C2);
420}
421
423 return TTIImpl->isNumRegsMajorCostOfLSR();
424}
425
427 return TTIImpl->shouldFoldTerminatingConditionAfterLSR();
428}
429
431 return TTIImpl->isProfitableLSRChainElement(I);
432}
433
435 return TTIImpl->canMacroFuseCmp();
436}
437
439 ScalarEvolution *SE, LoopInfo *LI,
441 TargetLibraryInfo *LibInfo) const {
442 return TTIImpl->canSaveCmp(L, BI, SE, LI, DT, AC, LibInfo);
443}
444
447 ScalarEvolution *SE) const {
448 return TTIImpl->getPreferredAddressingMode(L, SE);
449}
450
452 Align Alignment) const {
453 return TTIImpl->isLegalMaskedStore(DataType, Alignment);
454}
455
457 Align Alignment) const {
458 return TTIImpl->isLegalMaskedLoad(DataType, Alignment);
459}
460
462 Align Alignment) const {
463 return TTIImpl->isLegalNTStore(DataType, Alignment);
464}
465
466bool TargetTransformInfo::isLegalNTLoad(Type *DataType, Align Alignment) const {
467 return TTIImpl->isLegalNTLoad(DataType, Alignment);
468}
469
471 ElementCount NumElements) const {
472 return TTIImpl->isLegalBroadcastLoad(ElementTy, NumElements);
473}
474
476 Align Alignment) const {
477 return TTIImpl->isLegalMaskedGather(DataType, Alignment);
478}
479
481 VectorType *VecTy, unsigned Opcode0, unsigned Opcode1,
482 const SmallBitVector &OpcodeMask) const {
483 return TTIImpl->isLegalAltInstr(VecTy, Opcode0, Opcode1, OpcodeMask);
484}
485
487 Align Alignment) const {
488 return TTIImpl->isLegalMaskedScatter(DataType, Alignment);
489}
490
492 Align Alignment) const {
493 return TTIImpl->forceScalarizeMaskedGather(DataType, Alignment);
494}
495
497 Align Alignment) const {
498 return TTIImpl->forceScalarizeMaskedScatter(DataType, Alignment);
499}
500
502 Align Alignment) const {
503 return TTIImpl->isLegalMaskedCompressStore(DataType, Alignment);
504}
505
507 Align Alignment) const {
508 return TTIImpl->isLegalMaskedExpandLoad(DataType, Alignment);
509}
510
512 Align Alignment) const {
513 return TTIImpl->isLegalStridedLoadStore(DataType, Alignment);
514}
515
517 Type *DataType) const {
518 return TTIImpl->isLegalMaskedVectorHistogram(AddrType, DataType);
519}
520
522 return TTIImpl->enableOrderedReductions();
523}
524
525bool TargetTransformInfo::hasDivRemOp(Type *DataType, bool IsSigned) const {
526 return TTIImpl->hasDivRemOp(DataType, IsSigned);
527}
528
530 unsigned AddrSpace) const {
531 return TTIImpl->hasVolatileVariant(I, AddrSpace);
532}
533
535 return TTIImpl->prefersVectorizedAddressing();
536}
537
539 Type *Ty, GlobalValue *BaseGV, StackOffset BaseOffset, bool HasBaseReg,
540 int64_t Scale, unsigned AddrSpace) const {
541 InstructionCost Cost = TTIImpl->getScalingFactorCost(
542 Ty, BaseGV, BaseOffset, HasBaseReg, Scale, AddrSpace);
543 assert(Cost >= 0 && "TTI should not produce negative costs!");
544 return Cost;
545}
546
548 return TTIImpl->LSRWithInstrQueries();
549}
550
552 return TTIImpl->isTruncateFree(Ty1, Ty2);
553}
554
556 return TTIImpl->isProfitableToHoist(I);
557}
558
559bool TargetTransformInfo::useAA() const { return TTIImpl->useAA(); }
560
562 return TTIImpl->isTypeLegal(Ty);
563}
564
566 return TTIImpl->getRegUsageForType(Ty);
567}
568
570 return TTIImpl->shouldBuildLookupTables();
571}
572
574 Constant *C) const {
575 return TTIImpl->shouldBuildLookupTablesForConstant(C);
576}
577
579 return TTIImpl->shouldBuildRelLookupTables();
580}
581
583 return TTIImpl->useColdCCForColdCall(F);
584}
585
587 VectorType *Ty, const APInt &DemandedElts, bool Insert, bool Extract,
589 return TTIImpl->getScalarizationOverhead(Ty, DemandedElts, Insert, Extract,
590 CostKind);
591}
592
596 return TTIImpl->getOperandsScalarizationOverhead(Args, Tys, CostKind);
597}
598
600 return TTIImpl->supportsEfficientVectorElementLoadStore();
601}
602
604 return TTIImpl->supportsTailCalls();
605}
606
608 return TTIImpl->supportsTailCallFor(CB);
609}
610
612 bool LoopHasReductions) const {
613 return TTIImpl->enableAggressiveInterleaving(LoopHasReductions);
614}
615
617TargetTransformInfo::enableMemCmpExpansion(bool OptSize, bool IsZeroCmp) const {
618 return TTIImpl->enableMemCmpExpansion(OptSize, IsZeroCmp);
619}
620
622 return TTIImpl->enableSelectOptimize();
623}
624
626 const Instruction *I) const {
627 return TTIImpl->shouldTreatInstructionLikeSelect(I);
628}
629
631 return TTIImpl->enableInterleavedAccessVectorization();
632}
633
635 return TTIImpl->enableMaskedInterleavedAccessVectorization();
636}
637
639 return TTIImpl->isFPVectorizationPotentiallyUnsafe();
640}
641
642bool
644 unsigned BitWidth,
645 unsigned AddressSpace,
646 Align Alignment,
647 unsigned *Fast) const {
648 return TTIImpl->allowsMisalignedMemoryAccesses(Context, BitWidth,
649 AddressSpace, Alignment, Fast);
650}
651
653TargetTransformInfo::getPopcntSupport(unsigned IntTyWidthInBit) const {
654 return TTIImpl->getPopcntSupport(IntTyWidthInBit);
655}
656
658 return TTIImpl->haveFastSqrt(Ty);
659}
660
662 const Instruction *I) const {
663 return TTIImpl->isExpensiveToSpeculativelyExecute(I);
664}
665
667 return TTIImpl->isFCmpOrdCheaperThanFCmpZero(Ty);
668}
669
671 InstructionCost Cost = TTIImpl->getFPOpCost(Ty);
672 assert(Cost >= 0 && "TTI should not produce negative costs!");
673 return Cost;
674}
675
677 unsigned Idx,
678 const APInt &Imm,
679 Type *Ty) const {
680 InstructionCost Cost = TTIImpl->getIntImmCodeSizeCost(Opcode, Idx, Imm, Ty);
681 assert(Cost >= 0 && "TTI should not produce negative costs!");
682 return Cost;
683}
684
688 InstructionCost Cost = TTIImpl->getIntImmCost(Imm, Ty, CostKind);
689 assert(Cost >= 0 && "TTI should not produce negative costs!");
690 return Cost;
691}
692
694 unsigned Opcode, unsigned Idx, const APInt &Imm, Type *Ty,
697 TTIImpl->getIntImmCostInst(Opcode, Idx, Imm, Ty, CostKind, Inst);
698 assert(Cost >= 0 && "TTI should not produce negative costs!");
699 return Cost;
700}
701
704 const APInt &Imm, Type *Ty,
707 TTIImpl->getIntImmCostIntrin(IID, Idx, Imm, Ty, CostKind);
708 assert(Cost >= 0 && "TTI should not produce negative costs!");
709 return Cost;
710}
711
713 const Instruction &Inst, const Function &Fn) const {
714 return TTIImpl->preferToKeepConstantsAttached(Inst, Fn);
715}
716
717unsigned TargetTransformInfo::getNumberOfRegisters(unsigned ClassID) const {
718 return TTIImpl->getNumberOfRegisters(ClassID);
719}
720
722 Type *Ty) const {
723 return TTIImpl->getRegisterClassForType(Vector, Ty);
724}
725
726const char *TargetTransformInfo::getRegisterClassName(unsigned ClassID) const {
727 return TTIImpl->getRegisterClassName(ClassID);
728}
729
732 return TTIImpl->getRegisterBitWidth(K);
733}
734
736 return TTIImpl->getMinVectorRegisterBitWidth();
737}
738
739std::optional<unsigned> TargetTransformInfo::getMaxVScale() const {
740 return TTIImpl->getMaxVScale();
741}
742
743std::optional<unsigned> TargetTransformInfo::getVScaleForTuning() const {
744 return TTIImpl->getVScaleForTuning();
745}
746
748 return TTIImpl->isVScaleKnownToBeAPowerOfTwo();
749}
750
753 return TTIImpl->shouldMaximizeVectorBandwidth(K);
754}
755
757 bool IsScalable) const {
758 return TTIImpl->getMinimumVF(ElemWidth, IsScalable);
759}
760
761unsigned TargetTransformInfo::getMaximumVF(unsigned ElemWidth,
762 unsigned Opcode) const {
763 return TTIImpl->getMaximumVF(ElemWidth, Opcode);
764}
765
766unsigned TargetTransformInfo::getStoreMinimumVF(unsigned VF, Type *ScalarMemTy,
767 Type *ScalarValTy) const {
768 return TTIImpl->getStoreMinimumVF(VF, ScalarMemTy, ScalarValTy);
769}
770
772 const Instruction &I, bool &AllowPromotionWithoutCommonHeader) const {
773 return TTIImpl->shouldConsiderAddressTypePromotion(
774 I, AllowPromotionWithoutCommonHeader);
775}
776
778 return CacheLineSize.getNumOccurrences() > 0 ? CacheLineSize
779 : TTIImpl->getCacheLineSize();
780}
781
782std::optional<unsigned>
784 return TTIImpl->getCacheSize(Level);
785}
786
787std::optional<unsigned>
789 return TTIImpl->getCacheAssociativity(Level);
790}
791
792std::optional<unsigned> TargetTransformInfo::getMinPageSize() const {
793 return MinPageSize.getNumOccurrences() > 0 ? MinPageSize
794 : TTIImpl->getMinPageSize();
795}
796
798 return TTIImpl->getPrefetchDistance();
799}
800
802 unsigned NumMemAccesses, unsigned NumStridedMemAccesses,
803 unsigned NumPrefetches, bool HasCall) const {
804 return TTIImpl->getMinPrefetchStride(NumMemAccesses, NumStridedMemAccesses,
805 NumPrefetches, HasCall);
806}
807
809 return TTIImpl->getMaxPrefetchIterationsAhead();
810}
811
813 return TTIImpl->enableWritePrefetching();
814}
815
817 return TTIImpl->shouldPrefetchAddressSpace(AS);
818}
819
821 return TTIImpl->getMaxInterleaveFactor(VF);
822}
823
828
829 if (isa<ConstantInt>(V) || isa<ConstantFP>(V)) {
830 if (const auto *CI = dyn_cast<ConstantInt>(V)) {
831 if (CI->getValue().isPowerOf2())
832 OpProps = OP_PowerOf2;
833 else if (CI->getValue().isNegatedPowerOf2())
834 OpProps = OP_NegatedPowerOf2;
835 }
836 return {OK_UniformConstantValue, OpProps};
837 }
838
839 // A broadcast shuffle creates a uniform value.
840 // TODO: Add support for non-zero index broadcasts.
841 // TODO: Add support for different source vector width.
842 if (const auto *ShuffleInst = dyn_cast<ShuffleVectorInst>(V))
843 if (ShuffleInst->isZeroEltSplat())
844 OpInfo = OK_UniformValue;
845
846 const Value *Splat = getSplatValue(V);
847
848 // Check for a splat of a constant or for a non uniform vector of constants
849 // and check if the constant(s) are all powers of two.
850 if (isa<ConstantVector>(V) || isa<ConstantDataVector>(V)) {
852 if (Splat) {
854 if (auto *CI = dyn_cast<ConstantInt>(Splat)) {
855 if (CI->getValue().isPowerOf2())
856 OpProps = OP_PowerOf2;
857 else if (CI->getValue().isNegatedPowerOf2())
858 OpProps = OP_NegatedPowerOf2;
859 }
860 } else if (const auto *CDS = dyn_cast<ConstantDataSequential>(V)) {
861 bool AllPow2 = true, AllNegPow2 = true;
862 for (unsigned I = 0, E = CDS->getNumElements(); I != E; ++I) {
863 if (auto *CI = dyn_cast<ConstantInt>(CDS->getElementAsConstant(I))) {
864 AllPow2 &= CI->getValue().isPowerOf2();
865 AllNegPow2 &= CI->getValue().isNegatedPowerOf2();
866 if (AllPow2 || AllNegPow2)
867 continue;
868 }
869 AllPow2 = AllNegPow2 = false;
870 break;
871 }
872 OpProps = AllPow2 ? OP_PowerOf2 : OpProps;
873 OpProps = AllNegPow2 ? OP_NegatedPowerOf2 : OpProps;
874 }
875 }
876
877 // Check for a splat of a uniform value. This is not loop aware, so return
878 // true only for the obviously uniform cases (argument, globalvalue)
879 if (Splat && (isa<Argument>(Splat) || isa<GlobalValue>(Splat)))
880 OpInfo = OK_UniformValue;
881
882 return {OpInfo, OpProps};
883}
884
886 unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind,
887 OperandValueInfo Op1Info, OperandValueInfo Op2Info,
888 ArrayRef<const Value *> Args, const Instruction *CxtI,
889 const TargetLibraryInfo *TLibInfo) const {
890
891 // Use call cost for frem intructions that have platform specific vector math
892 // functions, as those will be replaced with calls later by SelectionDAG or
893 // ReplaceWithVecLib pass.
894 if (TLibInfo && Opcode == Instruction::FRem) {
895 VectorType *VecTy = dyn_cast<VectorType>(Ty);
896 LibFunc Func;
897 if (VecTy &&
898 TLibInfo->getLibFunc(Instruction::FRem, Ty->getScalarType(), Func) &&
899 TLibInfo->isFunctionVectorizable(TLibInfo->getName(Func),
900 VecTy->getElementCount()))
901 return getCallInstrCost(nullptr, VecTy, {VecTy, VecTy}, CostKind);
902 }
903
905 TTIImpl->getArithmeticInstrCost(Opcode, Ty, CostKind,
906 Op1Info, Op2Info,
907 Args, CxtI);
908 assert(Cost >= 0 && "TTI should not produce negative costs!");
909 return Cost;
910}
911
913 VectorType *VecTy, unsigned Opcode0, unsigned Opcode1,
914 const SmallBitVector &OpcodeMask, TTI::TargetCostKind CostKind) const {
916 TTIImpl->getAltInstrCost(VecTy, Opcode0, Opcode1, OpcodeMask, CostKind);
917 assert(Cost >= 0 && "TTI should not produce negative costs!");
918 return Cost;
919}
920
922 ShuffleKind Kind, VectorType *Ty, ArrayRef<int> Mask,
924 ArrayRef<const Value *> Args, const Instruction *CxtI) const {
925 InstructionCost Cost = TTIImpl->getShuffleCost(Kind, Ty, Mask, CostKind,
926 Index, SubTp, Args, CxtI);
927 assert(Cost >= 0 && "TTI should not produce negative costs!");
928 return Cost;
929}
930
933 if (!I)
935
936 auto getLoadStoreKind = [](const Value *V, unsigned LdStOp, unsigned MaskedOp,
937 unsigned GatScatOp) {
938 const Instruction *I = dyn_cast<Instruction>(V);
939 if (!I)
941
942 if (I->getOpcode() == LdStOp)
944
945 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
946 if (II->getIntrinsicID() == MaskedOp)
948 if (II->getIntrinsicID() == GatScatOp)
950 }
951
953 };
954
955 switch (I->getOpcode()) {
956 case Instruction::ZExt:
957 case Instruction::SExt:
958 case Instruction::FPExt:
959 return getLoadStoreKind(I->getOperand(0), Instruction::Load,
960 Intrinsic::masked_load, Intrinsic::masked_gather);
961 case Instruction::Trunc:
962 case Instruction::FPTrunc:
963 if (I->hasOneUse())
964 return getLoadStoreKind(*I->user_begin(), Instruction::Store,
965 Intrinsic::masked_store,
966 Intrinsic::masked_scatter);
967 break;
968 default:
970 }
971
973}
974
976 unsigned Opcode, Type *Dst, Type *Src, CastContextHint CCH,
978 assert((I == nullptr || I->getOpcode() == Opcode) &&
979 "Opcode should reflect passed instruction.");
981 TTIImpl->getCastInstrCost(Opcode, Dst, Src, CCH, CostKind, I);
982 assert(Cost >= 0 && "TTI should not produce negative costs!");
983 return Cost;
984}
985
987 unsigned Opcode, Type *Dst, VectorType *VecTy, unsigned Index) const {
989 TTIImpl->getExtractWithExtendCost(Opcode, Dst, VecTy, Index);
990 assert(Cost >= 0 && "TTI should not produce negative costs!");
991 return Cost;
992}
993
995 unsigned Opcode, TTI::TargetCostKind CostKind, const Instruction *I) const {
996 assert((I == nullptr || I->getOpcode() == Opcode) &&
997 "Opcode should reflect passed instruction.");
998 InstructionCost Cost = TTIImpl->getCFInstrCost(Opcode, CostKind, I);
999 assert(Cost >= 0 && "TTI should not produce negative costs!");
1000 return Cost;
1001}
1002
1004 unsigned Opcode, Type *ValTy, Type *CondTy, CmpInst::Predicate VecPred,
1005 TTI::TargetCostKind CostKind, const Instruction *I) const {
1006 assert((I == nullptr || I->getOpcode() == Opcode) &&
1007 "Opcode should reflect passed instruction.");
1009 TTIImpl->getCmpSelInstrCost(Opcode, ValTy, CondTy, VecPred, CostKind, I);
1010 assert(Cost >= 0 && "TTI should not produce negative costs!");
1011 return Cost;
1012}
1013
1015 unsigned Opcode, Type *Val, TTI::TargetCostKind CostKind, unsigned Index,
1016 Value *Op0, Value *Op1) const {
1017 // FIXME: Assert that Opcode is either InsertElement or ExtractElement.
1018 // This is mentioned in the interface description and respected by all
1019 // callers, but never asserted upon.
1021 TTIImpl->getVectorInstrCost(Opcode, Val, CostKind, Index, Op0, Op1);
1022 assert(Cost >= 0 && "TTI should not produce negative costs!");
1023 return Cost;
1024}
1025
1029 unsigned Index) const {
1030 // FIXME: Assert that Opcode is either InsertElement or ExtractElement.
1031 // This is mentioned in the interface description and respected by all
1032 // callers, but never asserted upon.
1033 InstructionCost Cost = TTIImpl->getVectorInstrCost(I, Val, CostKind, Index);
1034 assert(Cost >= 0 && "TTI should not produce negative costs!");
1035 return Cost;
1036}
1037
1039 Type *EltTy, int ReplicationFactor, int VF, const APInt &DemandedDstElts,
1041 InstructionCost Cost = TTIImpl->getReplicationShuffleCost(
1042 EltTy, ReplicationFactor, VF, DemandedDstElts, CostKind);
1043 assert(Cost >= 0 && "TTI should not produce negative costs!");
1044 return Cost;
1045}
1046
1048 unsigned Opcode, Type *Src, Align Alignment, unsigned AddressSpace,
1050 const Instruction *I) const {
1051 assert((I == nullptr || I->getOpcode() == Opcode) &&
1052 "Opcode should reflect passed instruction.");
1053 InstructionCost Cost = TTIImpl->getMemoryOpCost(
1054 Opcode, Src, Alignment, AddressSpace, CostKind, OpInfo, I);
1055 assert(Cost >= 0 && "TTI should not produce negative costs!");
1056 return Cost;
1057}
1058
1060 unsigned Opcode, Type *Src, Align Alignment, unsigned AddressSpace,
1062 InstructionCost Cost = TTIImpl->getMaskedMemoryOpCost(Opcode, Src, Alignment,
1064 assert(Cost >= 0 && "TTI should not produce negative costs!");
1065 return Cost;
1066}
1067
1069 unsigned Opcode, Type *DataTy, const Value *Ptr, bool VariableMask,
1070 Align Alignment, TTI::TargetCostKind CostKind, const Instruction *I) const {
1071 InstructionCost Cost = TTIImpl->getGatherScatterOpCost(
1072 Opcode, DataTy, Ptr, VariableMask, Alignment, CostKind, I);
1073 assert((!Cost.isValid() || Cost >= 0) &&
1074 "TTI should not produce negative costs!");
1075 return Cost;
1076}
1077
1079 unsigned Opcode, Type *DataTy, const Value *Ptr, bool VariableMask,
1080 Align Alignment, TTI::TargetCostKind CostKind, const Instruction *I) const {
1081 InstructionCost Cost = TTIImpl->getStridedMemoryOpCost(
1082 Opcode, DataTy, Ptr, VariableMask, Alignment, CostKind, I);
1083 assert(Cost >= 0 && "TTI should not produce negative costs!");
1084 return Cost;
1085}
1086
1088 unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef<unsigned> Indices,
1089 Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind,
1090 bool UseMaskForCond, bool UseMaskForGaps) const {
1091 InstructionCost Cost = TTIImpl->getInterleavedMemoryOpCost(
1092 Opcode, VecTy, Factor, Indices, Alignment, AddressSpace, CostKind,
1093 UseMaskForCond, UseMaskForGaps);
1094 assert(Cost >= 0 && "TTI should not produce negative costs!");
1095 return Cost;
1096}
1097
1101 InstructionCost Cost = TTIImpl->getIntrinsicInstrCost(ICA, CostKind);
1102 assert(Cost >= 0 && "TTI should not produce negative costs!");
1103 return Cost;
1104}
1105
1108 ArrayRef<Type *> Tys,
1110 InstructionCost Cost = TTIImpl->getCallInstrCost(F, RetTy, Tys, CostKind);
1111 assert(Cost >= 0 && "TTI should not produce negative costs!");
1112 return Cost;
1113}
1114
1116 return TTIImpl->getNumberOfParts(Tp);
1117}
1118
1121 const SCEV *Ptr) const {
1122 InstructionCost Cost = TTIImpl->getAddressComputationCost(Tp, SE, Ptr);
1123 assert(Cost >= 0 && "TTI should not produce negative costs!");
1124 return Cost;
1125}
1126
1128 InstructionCost Cost = TTIImpl->getMemcpyCost(I);
1129 assert(Cost >= 0 && "TTI should not produce negative costs!");
1130 return Cost;
1131}
1132
1134 return TTIImpl->getMaxMemIntrinsicInlineSizeThreshold();
1135}
1136
1138 unsigned Opcode, VectorType *Ty, std::optional<FastMathFlags> FMF,
1141 TTIImpl->getArithmeticReductionCost(Opcode, Ty, FMF, CostKind);
1142 assert(Cost >= 0 && "TTI should not produce negative costs!");
1143 return Cost;
1144}
1145
1150 TTIImpl->getMinMaxReductionCost(IID, Ty, FMF, CostKind);
1151 assert(Cost >= 0 && "TTI should not produce negative costs!");
1152 return Cost;
1153}
1154
1156 unsigned Opcode, bool IsUnsigned, Type *ResTy, VectorType *Ty,
1158 return TTIImpl->getExtendedReductionCost(Opcode, IsUnsigned, ResTy, Ty, FMF,
1159 CostKind);
1160}
1161
1163 bool IsUnsigned, Type *ResTy, VectorType *Ty,
1165 return TTIImpl->getMulAccReductionCost(IsUnsigned, ResTy, Ty, CostKind);
1166}
1167
1170 return TTIImpl->getCostOfKeepingLiveOverCall(Tys);
1171}
1172
1174 MemIntrinsicInfo &Info) const {
1175 return TTIImpl->getTgtMemIntrinsic(Inst, Info);
1176}
1177
1179 return TTIImpl->getAtomicMemIntrinsicMaxElementSize();
1180}
1181
1183 IntrinsicInst *Inst, Type *ExpectedType) const {
1184 return TTIImpl->getOrCreateResultFromMemIntrinsic(Inst, ExpectedType);
1185}
1186
1188 LLVMContext &Context, Value *Length, unsigned SrcAddrSpace,
1189 unsigned DestAddrSpace, unsigned SrcAlign, unsigned DestAlign,
1190 std::optional<uint32_t> AtomicElementSize) const {
1191 return TTIImpl->getMemcpyLoopLoweringType(Context, Length, SrcAddrSpace,
1192 DestAddrSpace, SrcAlign, DestAlign,
1193 AtomicElementSize);
1194}
1195
1197 SmallVectorImpl<Type *> &OpsOut, LLVMContext &Context,
1198 unsigned RemainingBytes, unsigned SrcAddrSpace, unsigned DestAddrSpace,
1199 unsigned SrcAlign, unsigned DestAlign,
1200 std::optional<uint32_t> AtomicCpySize) const {
1201 TTIImpl->getMemcpyLoopResidualLoweringType(
1202 OpsOut, Context, RemainingBytes, SrcAddrSpace, DestAddrSpace, SrcAlign,
1203 DestAlign, AtomicCpySize);
1204}
1205
1207 const Function *Callee) const {
1208 return TTIImpl->areInlineCompatible(Caller, Callee);
1209}
1210
1211unsigned
1213 const CallBase &Call,
1214 unsigned DefaultCallPenalty) const {
1215 return TTIImpl->getInlineCallPenalty(F, Call, DefaultCallPenalty);
1216}
1217
1219 const Function *Caller, const Function *Callee,
1220 const ArrayRef<Type *> &Types) const {
1221 return TTIImpl->areTypesABICompatible(Caller, Callee, Types);
1222}
1223
1225 Type *Ty) const {
1226 return TTIImpl->isIndexedLoadLegal(Mode, Ty);
1227}
1228
1230 Type *Ty) const {
1231 return TTIImpl->isIndexedStoreLegal(Mode, Ty);
1232}
1233
1235 return TTIImpl->getLoadStoreVecRegBitWidth(AS);
1236}
1237
1239 return TTIImpl->isLegalToVectorizeLoad(LI);
1240}
1241
1243 return TTIImpl->isLegalToVectorizeStore(SI);
1244}
1245
1247 unsigned ChainSizeInBytes, Align Alignment, unsigned AddrSpace) const {
1248 return TTIImpl->isLegalToVectorizeLoadChain(ChainSizeInBytes, Alignment,
1249 AddrSpace);
1250}
1251
1253 unsigned ChainSizeInBytes, Align Alignment, unsigned AddrSpace) const {
1254 return TTIImpl->isLegalToVectorizeStoreChain(ChainSizeInBytes, Alignment,
1255 AddrSpace);
1256}
1257
1259 const RecurrenceDescriptor &RdxDesc, ElementCount VF) const {
1260 return TTIImpl->isLegalToVectorizeReduction(RdxDesc, VF);
1261}
1262
1264 return TTIImpl->isElementTypeLegalForScalableVector(Ty);
1265}
1266
1268 unsigned LoadSize,
1269 unsigned ChainSizeInBytes,
1270 VectorType *VecTy) const {
1271 return TTIImpl->getLoadVectorFactor(VF, LoadSize, ChainSizeInBytes, VecTy);
1272}
1273
1275 unsigned StoreSize,
1276 unsigned ChainSizeInBytes,
1277 VectorType *VecTy) const {
1278 return TTIImpl->getStoreVectorFactor(VF, StoreSize, ChainSizeInBytes, VecTy);
1279}
1280
1282 ReductionFlags Flags) const {
1283 return TTIImpl->preferInLoopReduction(Opcode, Ty, Flags);
1284}
1285
1287 unsigned Opcode, Type *Ty, ReductionFlags Flags) const {
1288 return TTIImpl->preferPredicatedReductionSelect(Opcode, Ty, Flags);
1289}
1290
1292 return TTIImpl->preferEpilogueVectorization();
1293}
1294
1297 return TTIImpl->getVPLegalizationStrategy(VPI);
1298}
1299
1301 return TTIImpl->hasArmWideBranch(Thumb);
1302}
1303
1305 return TTIImpl->getMaxNumArgs();
1306}
1307
1309 return TTIImpl->shouldExpandReduction(II);
1310}
1311
1313 return TTIImpl->getGISelRematGlobalCost();
1314}
1315
1317 return TTIImpl->getMinTripCountTailFoldingThreshold();
1318}
1319
1321 return TTIImpl->supportsScalableVectors();
1322}
1323
1325 return TTIImpl->enableScalableVectorization();
1326}
1327
1328bool TargetTransformInfo::hasActiveVectorLength(unsigned Opcode, Type *DataType,
1329 Align Alignment) const {
1330 return TTIImpl->hasActiveVectorLength(Opcode, DataType, Alignment);
1331}
1332
1334
1335TargetIRAnalysis::TargetIRAnalysis() : TTICallback(&getDefaultTTI) {}
1336
1338 std::function<Result(const Function &)> TTICallback)
1339 : TTICallback(std::move(TTICallback)) {}
1340
1343 return TTICallback(F);
1344}
1345
1346AnalysisKey TargetIRAnalysis::Key;
1347
1348TargetIRAnalysis::Result TargetIRAnalysis::getDefaultTTI(const Function &F) {
1349 return Result(F.getParent()->getDataLayout());
1350}
1351
1352// Register the basic pass.
1354 "Target Transform Information", false, true)
1356
1357void TargetTransformInfoWrapperPass::anchor() {}
1358
1360 : ImmutablePass(ID) {
1363}
1364
1366 TargetIRAnalysis TIRA)
1367 : ImmutablePass(ID), TIRA(std::move(TIRA)) {
1370}
1371
1373 FunctionAnalysisManager DummyFAM;
1374 TTI = TIRA.run(F, DummyFAM);
1375 return *TTI;
1376}
1377
1380 return new TargetTransformInfoWrapperPass(std::move(TIRA));
1381}
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Analysis containing CSE Info
Definition: CSEInfo.cpp:27
static cl::opt< TargetTransformInfo::TargetCostKind > CostKind("cost-kind", cl::desc("Target cost kind"), cl::init(TargetTransformInfo::TCK_RecipThroughput), cl::values(clEnumValN(TargetTransformInfo::TCK_RecipThroughput, "throughput", "Reciprocal throughput"), clEnumValN(TargetTransformInfo::TCK_Latency, "latency", "Instruction latency"), clEnumValN(TargetTransformInfo::TCK_CodeSize, "code-size", "Code size"), clEnumValN(TargetTransformInfo::TCK_SizeAndLatency, "size-latency", "Code size and latency")))
return RetTy
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
uint64_t Size
static cl::opt< bool > ForceNestedLoop("force-nested-hardware-loop", cl::Hidden, cl::init(false), cl::desc("Force allowance of nested hardware loops"))
static cl::opt< bool > ForceHardwareLoopPHI("force-hardware-loop-phi", cl::Hidden, cl::init(false), cl::desc("Force hardware loop counter to be updated through a phi"))
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
mir Rename Register Operands
Module.h This file contains the declarations for the Module class.
LLVMContext & Context
#define INITIALIZE_PASS(passName, arg, name, cfg, analysis)
Definition: PassSupport.h:38
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static SymbolRef::Type getType(const Symbol *Sym)
Definition: TapiFile.cpp:40
This file provides helpers for the implementation of a TargetTransformInfo-conforming class.
static cl::opt< unsigned > PredictableBranchThreshold("predictable-branch-threshold", cl::init(99), cl::Hidden, cl::desc("Use this to override the target's predictable branch threshold (%)."))
static cl::opt< bool > EnableReduxCost("costmodel-reduxcost", cl::init(false), cl::Hidden, cl::desc("Recognize reduction patterns."))
static cl::opt< unsigned > MinPageSize("min-page-size", cl::init(0), cl::Hidden, cl::desc("Use this to override the target's minimum page size."))
static cl::opt< unsigned > CacheLineSize("cache-line-size", cl::init(0), cl::Hidden, cl::desc("Use this to override the target cache line size when " "specified by the user."))
This pass exposes codegen information to IR-level passes.
Value * RHS
Class for arbitrary precision integers.
Definition: APInt.h:76
an instruction to allocate memory on the stack
Definition: Instructions.h:59
A container for analyses that lazily runs them and caches their results.
Definition: PassManager.h:321
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
iterator end() const
Definition: ArrayRef.h:154
iterator begin() const
Definition: ArrayRef.h:153
A cache of @llvm.assume calls within a function.
LLVM Basic Block Representation.
Definition: BasicBlock.h:60
LLVMContext & getContext() const
Get the context in which this basic block lives.
Definition: BasicBlock.cpp:168
BlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate IR basic block frequen...
Conditional or Unconditional Branch instruction.
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Definition: InstrTypes.h:1494
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
Definition: InstrTypes.h:1742
User::op_iterator arg_begin()
Return the iterator pointing to the beginning of the argument list.
Definition: InstrTypes.h:1662
User::op_iterator arg_end()
Return the iterator pointing to the end of the argument list.
Definition: InstrTypes.h:1668
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition: InstrTypes.h:993
This is an important base class in LLVM.
Definition: Constant.h:41
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:110
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition: Dominators.h:162
bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
Definition: Dominators.cpp:122
Convenience struct for specifying and reasoning about fast-math flags.
Definition: FMF.h:20
Class to represent function types.
Definition: DerivedTypes.h:103
param_iterator param_begin() const
Definition: DerivedTypes.h:128
param_iterator param_end() const
Definition: DerivedTypes.h:129
FunctionType * getFunctionType() const
Returns the FunctionType for me.
Definition: Function.h:202
ImmutablePass class - This class is used to provide information that does not need to be run.
Definition: Pass.h:282
The core instruction combiner logic.
Definition: InstCombiner.h:47
unsigned getBitWidth() const
Get the number of bits in this IntegerType.
Definition: DerivedTypes.h:72
IntrinsicCostAttributes(Intrinsic::ID Id, const CallBase &CI, InstructionCost ScalarCost=InstructionCost::getInvalid(), bool TypeBasedOnly=false)
A wrapper class for inspecting calls to intrinsic functions.
Definition: IntrinsicInst.h:47
Intrinsic::ID getIntrinsicID() const
Return the intrinsic ID of this intrinsic.
Definition: IntrinsicInst.h:54
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:67
An instruction for reading from memory.
Definition: Instructions.h:184
bool contains(const LoopT *L) const
Return true if the specified loop is contained within in this loop.
void getExitingBlocks(SmallVectorImpl< BlockT * > &ExitingBlocks) const
Return all blocks inside the loop that have successors outside of the loop.
BlockT * getHeader() const
bool isLoopLatch(const BlockT *BB) const
Wrapper class to LoopBlocksDFS that provides a standard begin()/end() interface for the DFS reverse p...
Definition: LoopIterator.h:172
void perform(const LoopInfo *LI)
Traverse the loop blocks and store the DFS result.
Definition: LoopIterator.h:180
LoopT * getLoopFor(const BlockT *BB) const
Return the inner most loop that BB lives in.
Represents a single loop in the control flow graph.
Definition: LoopInfo.h:44
The optimization diagnostic interface.
static PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
Analysis providing profile information.
The RecurrenceDescriptor is used to identify recurrences variables in a loop.
Definition: IVDescriptors.h:71
This class represents a constant integer value.
This class represents an analyzed expression in the program.
The main scalar evolution driver.
uint64_t getTypeSizeInBits(Type *Ty) const
Return the size in bits of the specified type, for which isSCEVable must return true.
bool isLoopInvariant(const SCEV *S, const Loop *L)
Return true if the value of the given SCEV is unchanging in the specified loop.
const SCEV * getExitCount(const Loop *L, const BasicBlock *ExitingBlock, ExitCountKind Kind=Exact)
Return the number of times the backedge executes before the given exit would be taken; if not exactly...
This is a 'bitvector' (really, a variable-sized bit array), optimized for the case when the array is ...
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:586
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1209
StackOffset holds a fixed and a scalable offset in bytes.
Definition: TypeSize.h:33
An instruction for storing to memory.
Definition: Instructions.h:317
Multiway switch.
Analysis pass providing the TargetTransformInfo.
Result run(const Function &F, FunctionAnalysisManager &)
TargetTransformInfo Result
TargetIRAnalysis()
Default construct a target IR analysis.
Provides information about what library functions are available for the current target.
bool getLibFunc(StringRef funcName, LibFunc &F) const
Searches for a particular function name.
StringRef getName(LibFunc F) const
bool isFunctionVectorizable(StringRef F, const ElementCount &VF) const
CRTP base class for use as a mix-in that aids implementing a TargetTransformInfo-compatible class.
Wrapper pass for TargetTransformInfo.
TargetTransformInfoWrapperPass()
We must provide a default constructor for the pass but it should never be used.
TargetTransformInfo & getTTI(const Function &F)
This pass provides access to the codegen interfaces that are needed for IR-level transformations.
bool getTgtMemIntrinsic(IntrinsicInst *Inst, MemIntrinsicInfo &Info) const
bool isLegalToVectorizeLoad(LoadInst *LI) const
std::optional< unsigned > getVScaleForTuning() const
static CastContextHint getCastContextHint(const Instruction *I)
Calculates a CastContextHint from I.
bool addrspacesMayAlias(unsigned AS0, unsigned AS1) const
Return false if a AS0 address cannot possibly alias a AS1 address.
bool isLegalMaskedScatter(Type *DataType, Align Alignment) const
Return true if the target supports masked scatter.
InstructionCost getStridedMemoryOpCost(unsigned Opcode, Type *DataTy, const Value *Ptr, bool VariableMask, Align Alignment, TTI::TargetCostKind CostKind=TTI::TCK_RecipThroughput, const Instruction *I=nullptr) const
InstructionCost getReplicationShuffleCost(Type *EltTy, int ReplicationFactor, int VF, const APInt &DemandedDstElts, TTI::TargetCostKind CostKind)
bool shouldBuildLookupTables() const
Return true if switches should be turned into lookup tables for the target.
bool isLegalToVectorizeStore(StoreInst *SI) const
bool enableAggressiveInterleaving(bool LoopHasReductions) const
Don't restrict interleaved unrolling to small loops.
void getMemcpyLoopResidualLoweringType(SmallVectorImpl< Type * > &OpsOut, LLVMContext &Context, unsigned RemainingBytes, unsigned SrcAddrSpace, unsigned DestAddrSpace, unsigned SrcAlign, unsigned DestAlign, std::optional< uint32_t > AtomicCpySize=std::nullopt) const
bool isFCmpOrdCheaperThanFCmpZero(Type *Ty) const
Return true if it is faster to check if a floating-point value is NaN (or not-NaN) versus a compariso...
bool preferInLoopReduction(unsigned Opcode, Type *Ty, ReductionFlags Flags) const
bool supportsEfficientVectorElementLoadStore() const
If target has efficient vector element load/store instructions, it can return true here so that inser...
bool isAlwaysUniform(const Value *V) const
unsigned getAssumedAddrSpace(const Value *V) const
bool isLSRCostLess(const TargetTransformInfo::LSRCost &C1, const TargetTransformInfo::LSRCost &C2) const
Return true if LSR cost of C1 is lower than C2.
bool isLegalMaskedExpandLoad(Type *DataType, Align Alignment) const
Return true if the target supports masked expand load.
bool prefersVectorizedAddressing() const
Return true if target doesn't mind addresses in vectors.
bool hasBranchDivergence(const Function *F=nullptr) const
Return true if branch divergence exists.
MemCmpExpansionOptions enableMemCmpExpansion(bool OptSize, bool IsZeroCmp) const
InstructionCost getAddressComputationCost(Type *Ty, ScalarEvolution *SE=nullptr, const SCEV *Ptr=nullptr) const
void getUnrollingPreferences(Loop *L, ScalarEvolution &, UnrollingPreferences &UP, OptimizationRemarkEmitter *ORE) const
Get target-customized preferences for the generic loop unrolling transformation.
bool shouldBuildLookupTablesForConstant(Constant *C) const
Return true if switches should be turned into lookup tables containing this constant value for the ta...
bool shouldFoldTerminatingConditionAfterLSR() const
Return true if LSR should attempts to replace a use of an otherwise dead primary IV in the latch cond...
InstructionCost getOperandsScalarizationOverhead(ArrayRef< const Value * > Args, ArrayRef< Type * > Tys, TTI::TargetCostKind CostKind) const
Estimate the overhead of scalarizing an instructions unique non-constant operands.
bool supportsTailCallFor(const CallBase *CB) const
If target supports tail call on CB.
std::optional< Instruction * > instCombineIntrinsic(InstCombiner &IC, IntrinsicInst &II) const
Targets can implement their own combinations for target-specific intrinsics.
bool isProfitableLSRChainElement(Instruction *I) const
TypeSize getRegisterBitWidth(RegisterKind K) const
unsigned getInlineCallPenalty(const Function *F, const CallBase &Call, unsigned DefaultCallPenalty) const
Returns a penalty for invoking call Call in F.
bool isExpensiveToSpeculativelyExecute(const Instruction *I) const
Return true if the cost of the instruction is too high to speculatively execute and should be kept be...
bool isLegalMaskedGather(Type *DataType, Align Alignment) const
Return true if the target supports masked gather.
InstructionCost getMemoryOpCost(unsigned Opcode, Type *Src, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind=TTI::TCK_RecipThroughput, OperandValueInfo OpdInfo={OK_AnyValue, OP_None}, const Instruction *I=nullptr) const
std::optional< unsigned > getMaxVScale() const
InstructionCost getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef< unsigned > Indices, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind=TTI::TCK_RecipThroughput, bool UseMaskForCond=false, bool UseMaskForGaps=false) const
std::optional< Value * > simplifyDemandedVectorEltsIntrinsic(InstCombiner &IC, IntrinsicInst &II, APInt DemandedElts, APInt &UndefElts, APInt &UndefElts2, APInt &UndefElts3, std::function< void(Instruction *, unsigned, APInt, APInt &)> SimplifyAndSetOp) const
Can be used to implement target-specific instruction combining.
bool enableOrderedReductions() const
Return true if we should be enabling ordered reductions for the target.
unsigned getInliningCostBenefitAnalysisProfitableMultiplier() const
InstructionCost getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA, TTI::TargetCostKind CostKind) const
InstructionCost getArithmeticReductionCost(unsigned Opcode, VectorType *Ty, std::optional< FastMathFlags > FMF, TTI::TargetCostKind CostKind=TTI::TCK_RecipThroughput) const
Calculate the cost of vector reduction intrinsics.
unsigned getAtomicMemIntrinsicMaxElementSize() const
InstructionCost getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src, TTI::CastContextHint CCH, TTI::TargetCostKind CostKind=TTI::TCK_SizeAndLatency, const Instruction *I=nullptr) const
bool LSRWithInstrQueries() const
Return true if the loop strength reduce pass should make Instruction* based TTI queries to isLegalAdd...
unsigned getStoreVectorFactor(unsigned VF, unsigned StoreSize, unsigned ChainSizeInBytes, VectorType *VecTy) const
VPLegalization getVPLegalizationStrategy(const VPIntrinsic &PI) const
bool shouldTreatInstructionLikeSelect(const Instruction *I) const
Should the Select Optimization pass treat the given instruction like a select, potentially converting...
bool isNoopAddrSpaceCast(unsigned FromAS, unsigned ToAS) const
InstructionCost getArithmeticInstrCost(unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind=TTI::TCK_RecipThroughput, TTI::OperandValueInfo Opd1Info={TTI::OK_AnyValue, TTI::OP_None}, TTI::OperandValueInfo Opd2Info={TTI::OK_AnyValue, TTI::OP_None}, ArrayRef< const Value * > Args=std::nullopt, const Instruction *CxtI=nullptr, const TargetLibraryInfo *TLibInfo=nullptr) const
This is an approximation of reciprocal throughput of a math/logic op.
bool shouldMaximizeVectorBandwidth(TargetTransformInfo::RegisterKind K) const
TailFoldingStyle getPreferredTailFoldingStyle(bool IVUpdateMayOverflow=true) const
Query the target what the preferred style of tail folding is.
InstructionCost getGEPCost(Type *PointeeType, const Value *Ptr, ArrayRef< const Value * > Operands, Type *AccessType=nullptr, TargetCostKind CostKind=TCK_SizeAndLatency) const
Estimate the cost of a GEP operation when lowered.
bool isLegalToVectorizeStoreChain(unsigned ChainSizeInBytes, Align Alignment, unsigned AddrSpace) const
unsigned getRegUsageForType(Type *Ty) const
Returns the estimated number of registers required to represent Ty.
bool isLegalBroadcastLoad(Type *ElementTy, ElementCount NumElements) const
\Returns true if the target supports broadcasting a load to a vector of type <NumElements x ElementTy...
bool isIndexedStoreLegal(enum MemIndexedMode Mode, Type *Ty) const
std::pair< const Value *, unsigned > getPredicatedAddrSpace(const Value *V) const
unsigned getLoadStoreVecRegBitWidth(unsigned AddrSpace) const
InstructionCost getExtendedReductionCost(unsigned Opcode, bool IsUnsigned, Type *ResTy, VectorType *Ty, FastMathFlags FMF, TTI::TargetCostKind CostKind=TTI::TCK_RecipThroughput) const
Calculate the cost of an extended reduction pattern, similar to getArithmeticReductionCost of a reduc...
static OperandValueInfo getOperandInfo(const Value *V)
Collect properties of V used in cost analysis, e.g. OP_PowerOf2.
InstructionCost getMulAccReductionCost(bool IsUnsigned, Type *ResTy, VectorType *Ty, TTI::TargetCostKind CostKind=TTI::TCK_RecipThroughput) const
Calculate the cost of an extended reduction pattern, similar to getArithmeticReductionCost of an Add ...
unsigned getRegisterClassForType(bool Vector, Type *Ty=nullptr) const
bool isLegalAddressingMode(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset, bool HasBaseReg, int64_t Scale, unsigned AddrSpace=0, Instruction *I=nullptr, int64_t ScalableOffset=0) const
Return true if the addressing mode represented by AM is legal for this target, for a load/store of th...
PopcntSupportKind getPopcntSupport(unsigned IntTyWidthInBit) const
Return hardware support for population count.
unsigned getEstimatedNumberOfCaseClusters(const SwitchInst &SI, unsigned &JTSize, ProfileSummaryInfo *PSI, BlockFrequencyInfo *BFI) const
bool isElementTypeLegalForScalableVector(Type *Ty) const
bool forceScalarizeMaskedGather(VectorType *Type, Align Alignment) const
Return true if the target forces scalarizing of llvm.masked.gather intrinsics.
unsigned getMaxPrefetchIterationsAhead() const
bool canHaveNonUndefGlobalInitializerInAddressSpace(unsigned AS) const
Return true if globals in this address space can have initializers other than undef.
ElementCount getMinimumVF(unsigned ElemWidth, bool IsScalable) const
InstructionCost getIntImmCostIntrin(Intrinsic::ID IID, unsigned Idx, const APInt &Imm, Type *Ty, TargetCostKind CostKind) const
bool enableMaskedInterleavedAccessVectorization() const
Enable matching of interleaved access groups that contain predicated accesses or gaps and therefore v...
InstructionCost getIntImmCostInst(unsigned Opc, unsigned Idx, const APInt &Imm, Type *Ty, TargetCostKind CostKind, Instruction *Inst=nullptr) const
Return the expected cost of materialization for the given integer immediate of the specified type for...
bool isLegalStridedLoadStore(Type *DataType, Align Alignment) const
Return true if the target supports strided load.
TargetTransformInfo & operator=(TargetTransformInfo &&RHS)
InstructionCost getMinMaxReductionCost(Intrinsic::ID IID, VectorType *Ty, FastMathFlags FMF=FastMathFlags(), TTI::TargetCostKind CostKind=TTI::TCK_RecipThroughput) const
TargetCostKind
The kind of cost model.
@ TCK_RecipThroughput
Reciprocal throughput.
bool areTypesABICompatible(const Function *Caller, const Function *Callee, const ArrayRef< Type * > &Types) const
bool enableSelectOptimize() const
Should the Select Optimization pass be enabled and ran.
bool collectFlatAddressOperands(SmallVectorImpl< int > &OpIndexes, Intrinsic::ID IID) const
Return any intrinsic address operand indexes which may be rewritten if they use a flat address space ...
OperandValueProperties
Additional properties of an operand's values.
InstructionCost getPointersChainCost(ArrayRef< const Value * > Ptrs, const Value *Base, const PointersChainInfo &Info, Type *AccessTy, TargetCostKind CostKind=TTI::TCK_RecipThroughput) const
Estimate the cost of a chain of pointers (typically pointer operands of a chain of loads or stores wi...
bool isIndexedLoadLegal(enum MemIndexedMode Mode, Type *Ty) const
unsigned getMaximumVF(unsigned ElemWidth, unsigned Opcode) const
bool isSourceOfDivergence(const Value *V) const
Returns whether V is a source of divergence.
bool isLegalICmpImmediate(int64_t Imm) const
Return true if the specified immediate is legal icmp immediate, that is the target has icmp instructi...
bool isTypeLegal(Type *Ty) const
Return true if this type is legal.
bool isLegalToVectorizeReduction(const RecurrenceDescriptor &RdxDesc, ElementCount VF) const
std::optional< unsigned > getCacheAssociativity(CacheLevel Level) const
bool isLegalNTLoad(Type *DataType, Align Alignment) const
Return true if the target supports nontemporal load.
InstructionCost getMemcpyCost(const Instruction *I) const
unsigned adjustInliningThreshold(const CallBase *CB) const
bool isLegalAddImmediate(int64_t Imm) const
Return true if the specified immediate is legal add immediate, that is the target has add instruction...
unsigned getLoadVectorFactor(unsigned VF, unsigned LoadSize, unsigned ChainSizeInBytes, VectorType *VecTy) const
InstructionCost getMaskedMemoryOpCost(unsigned Opcode, Type *Src, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind=TTI::TCK_RecipThroughput) const
bool canSaveCmp(Loop *L, BranchInst **BI, ScalarEvolution *SE, LoopInfo *LI, DominatorTree *DT, AssumptionCache *AC, TargetLibraryInfo *LibInfo) const
Return true if the target can save a compare for loop count, for example hardware loop saves a compar...
Value * rewriteIntrinsicWithAddressSpace(IntrinsicInst *II, Value *OldV, Value *NewV) const
Rewrite intrinsic call II such that OldV will be replaced with NewV, which has a different address sp...
InstructionCost getCostOfKeepingLiveOverCall(ArrayRef< Type * > Tys) const
unsigned getMinPrefetchStride(unsigned NumMemAccesses, unsigned NumStridedMemAccesses, unsigned NumPrefetches, bool HasCall) const
Some HW prefetchers can handle accesses up to a certain constant stride.
bool preferPredicatedReductionSelect(unsigned Opcode, Type *Ty, ReductionFlags Flags) const
bool shouldPrefetchAddressSpace(unsigned AS) const
InstructionCost getIntImmCost(const APInt &Imm, Type *Ty, TargetCostKind CostKind) const
Return the expected cost of materializing for the given integer immediate of the specified type.
unsigned getMinVectorRegisterBitWidth() const
bool isLegalNTStore(Type *DataType, Align Alignment) const
Return true if the target supports nontemporal store.
unsigned getFlatAddressSpace() const
Returns the address space ID for a target's 'flat' address space.
bool preferToKeepConstantsAttached(const Instruction &Inst, const Function &Fn) const
It can be advantageous to detach complex constants from their uses to make their generation cheaper.
bool hasArmWideBranch(bool Thumb) const
const char * getRegisterClassName(unsigned ClassID) const
bool preferEpilogueVectorization() const
Return true if the loop vectorizer should consider vectorizing an otherwise scalar epilogue loop.
bool shouldConsiderAddressTypePromotion(const Instruction &I, bool &AllowPromotionWithoutCommonHeader) const
BranchProbability getPredictableBranchThreshold() const
If a branch or a select condition is skewed in one direction by more than this factor,...
unsigned getCallerAllocaCost(const CallBase *CB, const AllocaInst *AI) const
bool allowsMisalignedMemoryAccesses(LLVMContext &Context, unsigned BitWidth, unsigned AddressSpace=0, Align Alignment=Align(1), unsigned *Fast=nullptr) const
Determine if the target supports unaligned memory accesses.
InstructionCost getGatherScatterOpCost(unsigned Opcode, Type *DataTy, const Value *Ptr, bool VariableMask, Align Alignment, TTI::TargetCostKind CostKind=TTI::TCK_RecipThroughput, const Instruction *I=nullptr) const
bool hasActiveVectorLength(unsigned Opcode, Type *DataType, Align Alignment) const
PopcntSupportKind
Flags indicating the kind of support for population count.
InstructionCost getIntImmCodeSizeCost(unsigned Opc, unsigned Idx, const APInt &Imm, Type *Ty) const
Return the expected cost for the given integer when optimising for size.
AddressingModeKind getPreferredAddressingMode(const Loop *L, ScalarEvolution *SE) const
Return the preferred addressing mode LSR should make efforts to generate.
bool isLoweredToCall(const Function *F) const
Test whether calls to a function lower to actual program function calls.
bool isLegalToVectorizeLoadChain(unsigned ChainSizeInBytes, Align Alignment, unsigned AddrSpace) const
bool isHardwareLoopProfitable(Loop *L, ScalarEvolution &SE, AssumptionCache &AC, TargetLibraryInfo *LibInfo, HardwareLoopInfo &HWLoopInfo) const
Query the target whether it would be profitable to convert the given loop into a hardware loop.
unsigned getInliningThresholdMultiplier() const
unsigned getNumberOfRegisters(unsigned ClassID) const
bool isLegalAltInstr(VectorType *VecTy, unsigned Opcode0, unsigned Opcode1, const SmallBitVector &OpcodeMask) const
Return true if this is an alternating opcode pattern that can be lowered to a single instruction on t...
bool isProfitableToHoist(Instruction *I) const
Return true if it is profitable to hoist instruction in the then/else to before if.
bool hasVolatileVariant(Instruction *I, unsigned AddrSpace) const
Return true if the given instruction (assumed to be a memory access instruction) has a volatile varia...
bool isLegalMaskedCompressStore(Type *DataType, Align Alignment) const
Return true if the target supports masked compress store.
std::optional< unsigned > getMinPageSize() const
bool isFPVectorizationPotentiallyUnsafe() const
Indicate that it is potentially unsafe to automatically vectorize floating-point operations because t...
bool isLegalMaskedStore(Type *DataType, Align Alignment) const
Return true if the target supports masked store.
bool shouldBuildRelLookupTables() const
Return true if lookup tables should be turned into relative lookup tables.
unsigned getStoreMinimumVF(unsigned VF, Type *ScalarMemTy, Type *ScalarValTy) const
std::optional< unsigned > getCacheSize(CacheLevel Level) const
std::optional< Value * > simplifyDemandedUseBitsIntrinsic(InstCombiner &IC, IntrinsicInst &II, APInt DemandedMask, KnownBits &Known, bool &KnownBitsComputed) const
Can be used to implement target-specific instruction combining.
bool isLegalAddScalableImmediate(int64_t Imm) const
Return true if adding the specified scalable immediate is legal, that is the target has add instructi...
bool hasDivRemOp(Type *DataType, bool IsSigned) const
Return true if the target has a unified operation to calculate division and remainder.
InstructionCost getAltInstrCost(VectorType *VecTy, unsigned Opcode0, unsigned Opcode1, const SmallBitVector &OpcodeMask, TTI::TargetCostKind CostKind=TTI::TCK_RecipThroughput) const
Returns the cost estimation for alternating opcode pattern that can be lowered to a single instructio...
bool enableInterleavedAccessVectorization() const
Enable matching of interleaved access groups.
unsigned getMinTripCountTailFoldingThreshold() const
InstructionCost getInstructionCost(const User *U, ArrayRef< const Value * > Operands, TargetCostKind CostKind) const
Estimate the cost of a given IR user when lowered.
unsigned getMaxInterleaveFactor(ElementCount VF) const
bool isNumRegsMajorCostOfLSR() const
Return true if LSR major cost is number of registers.
unsigned getInliningCostBenefitAnalysisSavingsMultiplier() const
bool isLegalMaskedVectorHistogram(Type *AddrType, Type *DataType) const
InstructionCost getExtractWithExtendCost(unsigned Opcode, Type *Dst, VectorType *VecTy, unsigned Index) const
unsigned getGISelRematGlobalCost() const
MemIndexedMode
The type of load/store indexing.
bool areInlineCompatible(const Function *Caller, const Function *Callee) const
bool useColdCCForColdCall(Function &F) const
Return true if the input function which is cold at all call sites, should use coldcc calling conventi...
InstructionCost getFPOpCost(Type *Ty) const
Return the expected cost of supporting the floating point operation of the specified type.
bool supportsTailCalls() const
If the target supports tail calls.
bool canMacroFuseCmp() const
Return true if the target can fuse a compare and branch.
Value * getOrCreateResultFromMemIntrinsic(IntrinsicInst *Inst, Type *ExpectedType) const
bool isValidAddrSpaceCast(unsigned FromAS, unsigned ToAS) const
Query the target whether the specified address space cast from FromAS to ToAS is valid.
unsigned getNumberOfParts(Type *Tp) const
Type * getMemcpyLoopLoweringType(LLVMContext &Context, Value *Length, unsigned SrcAddrSpace, unsigned DestAddrSpace, unsigned SrcAlign, unsigned DestAlign, std::optional< uint32_t > AtomicElementSize=std::nullopt) const
InstructionCost getScalingFactorCost(Type *Ty, GlobalValue *BaseGV, StackOffset BaseOffset, bool HasBaseReg, int64_t Scale, unsigned AddrSpace=0) const
Return the cost of the scaling factor used in the addressing mode represented by AM for this target,...
bool isTruncateFree(Type *Ty1, Type *Ty2) const
Return true if it's free to truncate a value of type Ty1 to type Ty2.
InstructionCost getShuffleCost(ShuffleKind Kind, VectorType *Tp, ArrayRef< int > Mask=std::nullopt, TTI::TargetCostKind CostKind=TTI::TCK_RecipThroughput, int Index=0, VectorType *SubTp=nullptr, ArrayRef< const Value * > Args=std::nullopt, const Instruction *CxtI=nullptr) const
InstructionCost getScalarizationOverhead(VectorType *Ty, const APInt &DemandedElts, bool Insert, bool Extract, TTI::TargetCostKind CostKind) const
Estimate the overhead of scalarizing an instruction.
bool preferPredicateOverEpilogue(TailFoldingInfo *TFI) const
Query the target whether it would be prefered to create a predicated vector loop, which can avoid the...
bool forceScalarizeMaskedScatter(VectorType *Type, Align Alignment) const
Return true if the target forces scalarizing of llvm.masked.scatter intrinsics.
bool haveFastSqrt(Type *Ty) const
Return true if the hardware has a fast square-root instruction.
bool shouldExpandReduction(const IntrinsicInst *II) const
TargetTransformInfo(T Impl)
Construct a TTI object using a type implementing the Concept API below.
uint64_t getMaxMemIntrinsicInlineSizeThreshold() const
Returns the maximum memset / memcpy size in bytes that still makes it profitable to inline the call.
InstructionCost getVectorInstrCost(unsigned Opcode, Type *Val, TTI::TargetCostKind CostKind, unsigned Index=-1, Value *Op0=nullptr, Value *Op1=nullptr) const
ShuffleKind
The various kinds of shuffle patterns for vector queries.
void getPeelingPreferences(Loop *L, ScalarEvolution &SE, PeelingPreferences &PP) const
Get target-customized preferences for the generic loop peeling transformation.
InstructionCost getCallInstrCost(Function *F, Type *RetTy, ArrayRef< Type * > Tys, TTI::TargetCostKind CostKind=TTI::TCK_SizeAndLatency) const
InstructionCost getCFInstrCost(unsigned Opcode, TTI::TargetCostKind CostKind=TTI::TCK_SizeAndLatency, const Instruction *I=nullptr) const
CastContextHint
Represents a hint about the context in which a cast is used.
@ Masked
The cast is used with a masked load/store.
@ None
The cast is not used with a load/store of any kind.
@ Normal
The cast is used with a normal load/store.
@ GatherScatter
The cast is used with a gather/scatter.
InstructionCost getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy, CmpInst::Predicate VecPred, TTI::TargetCostKind CostKind=TTI::TCK_RecipThroughput, const Instruction *I=nullptr) const
OperandValueKind
Additional information about an operand's possible values.
CacheLevel
The possible cache levels.
bool isLegalMaskedLoad(Type *DataType, Align Alignment) const
Return true if the target supports masked load.
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
static IntegerType * getInt32Ty(LLVMContext &C)
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
Definition: Type.h:348
This is the common base class for vector predication intrinsics.
LLVM Value Representation.
Definition: Value.h:74
Base class of all SIMD vector types.
Definition: DerivedTypes.h:403
ElementCount getElementCount() const
Return an ElementCount instance to represent the (possibly scalable) number of elements in the vector...
Definition: DerivedTypes.h:641
@ Fast
Attempts to make calls as fast as possible (e.g.
Definition: CallingConv.h:41
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:450
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
@ Length
Definition: DWP.cpp:456
void initializeTargetTransformInfoWrapperPassPass(PassRegistry &)
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition: Casting.h:649
AddressSpace
Definition: NVPTXBaseInfo.h:21
Value * getSplatValue(const Value *V)
Get splat value if the input is a splat vector or return nullptr.
ImmutablePass * createTargetTransformInfoWrapperPass(TargetIRAnalysis TIRA)
Create an analysis pass wrapper around a TTI object.
constexpr unsigned BitWidth
Definition: BitmaskEnum.h:191
OutputIt move(R &&Range, OutputIt Out)
Provide wrappers to std::move which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1849
auto predecessors(const MachineBasicBlock *BB)
InstructionCost Cost
Implement std::hash so that hash_code can be used in STL containers.
Definition: BitVector.h:858
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
A special type used by analysis passes to provide an address that identifies that particular analysis...
Definition: Analysis.h:26
Attributes of a target dependent hardware loop.
bool canAnalyze(LoopInfo &LI)
bool isHardwareLoopCandidate(ScalarEvolution &SE, LoopInfo &LI, DominatorTree &DT, bool ForceNestedLoop=false, bool ForceHardwareLoopPHI=false)
Information about a load/store intrinsic defined by the target.
Returns options for expansion of memcmp. IsZeroCmp is.
Describe known properties for a set of pointers.
Flags describing the kind of vector reduction.
Parameters that control the generic loop unrolling transformation.