161#define LV_NAME "loop-vectorize"
162#define DEBUG_TYPE LV_NAME
172 "llvm.loop.vectorize.followup_vectorized";
174 "llvm.loop.vectorize.followup_epilogue";
177STATISTIC(LoopsVectorized,
"Number of loops vectorized");
178STATISTIC(LoopsAnalyzed,
"Number of loops analyzed for vectorization");
179STATISTIC(LoopsEpilogueVectorized,
"Number of epilogues vectorized");
183 cl::desc(
"Enable vectorization of epilogue loops."));
187 cl::desc(
"When epilogue vectorization is enabled, and a value greater than "
188 "1 is specified, forces the given VF for all applicable epilogue "
193 cl::desc(
"Only loops with vectorization factor equal to or larger than "
194 "the specified value are considered for epilogue vectorization."));
200 cl::desc(
"Loops with a constant trip count that is smaller than this "
201 "value are vectorized only if no scalar iteration overheads "
206 cl::desc(
"The maximum allowed number of runtime memory checks"));
210 cl::desc(
"Use the legacy cost model instead of the VPlan-based cost model. "
211 "This option will be removed in the future."));
227 "prefer-predicate-over-epilogue",
230 cl::desc(
"Tail-folding and predication preferences over creating a scalar "
234 "Don't tail-predicate loops, create scalar epilogue"),
236 "predicate-else-scalar-epilogue",
237 "prefer tail-folding, create scalar epilogue if tail "
240 "predicate-dont-vectorize",
241 "prefers tail-folding, don't attempt vectorization if "
242 "tail-folding fails.")));
245 "force-tail-folding-style",
cl::desc(
"Force the tail folding style"),
248 clEnumValN(TailFoldingStyle::None,
"none",
"Disable tail folding"),
250 TailFoldingStyle::Data,
"data",
251 "Create lane mask for data only, using active.lane.mask intrinsic"),
252 clEnumValN(TailFoldingStyle::DataWithoutLaneMask,
253 "data-without-lane-mask",
254 "Create lane mask with compare/stepvector"),
255 clEnumValN(TailFoldingStyle::DataAndControlFlow,
"data-and-control",
256 "Create lane mask using active.lane.mask intrinsic, and use "
257 "it for both data and control flow"),
258 clEnumValN(TailFoldingStyle::DataAndControlFlowWithoutRuntimeCheck,
259 "data-and-control-without-rt-check",
260 "Similar to data-and-control, but remove the runtime check"),
261 clEnumValN(TailFoldingStyle::DataWithEVL,
"data-with-evl",
262 "Use predicated EVL instructions for tail folding. If EVL "
263 "is unsupported, fallback to data-without-lane-mask.")));
267 cl::desc(
"Maximize bandwidth when selecting vectorization factor which "
268 "will be determined by the smallest type in loop."));
272 cl::desc(
"Enable vectorization on interleaved memory accesses in a loop"));
278 cl::desc(
"Enable vectorization on masked interleaved memory accesses in a loop"));
282 cl::desc(
"A flag that overrides the target's number of scalar registers."));
286 cl::desc(
"A flag that overrides the target's number of vector registers."));
290 cl::desc(
"A flag that overrides the target's max interleave factor for "
295 cl::desc(
"A flag that overrides the target's max interleave factor for "
296 "vectorized loops."));
300 cl::desc(
"A flag that overrides the target's expected cost for "
301 "an instruction to a single constant value. Mostly "
302 "useful for getting consistent testing."));
307 "Pretend that scalable vectors are supported, even if the target does "
308 "not support them. This flag should only be used for testing."));
313 "The cost of a loop that is considered 'small' by the interleaver."));
317 cl::desc(
"Enable the use of the block frequency analysis to access PGO "
318 "heuristics minimizing code growth in cold regions and being more "
319 "aggressive in hot regions."));
325 "Enable runtime interleaving until load/store ports are saturated"));
330 cl::desc(
"Max number of stores to be predicated behind an if."));
334 cl::desc(
"Count the induction variable only once when interleaving"));
338 cl::desc(
"Enable if predication of stores during vectorization."));
342 cl::desc(
"The maximum interleave count to use when interleaving a scalar "
343 "reduction in a nested loop."));
348 cl::desc(
"Prefer in-loop vector reductions, "
349 "overriding the targets preference."));
353 cl::desc(
"Enable the vectorisation of loops with in-order (strict) "
359 "Prefer predicating a reduction operation over an after loop select."));
364 cl::desc(
"Enable VPlan-native vectorization path with "
365 "support for outer loop vectorization."));
375 "Build VPlan for every supported loop nest in the function and bail "
376 "out right after the build (stress test the VPlan H-CFG construction "
377 "in the VPlan-native vectorization path)."));
381 cl::desc(
"Enable loop interleaving in Loop vectorization passes"));
384 cl::desc(
"Run the Loop vectorization passes"));
388 cl::desc(
"Use dot format instead of plain text when dumping VPlans"));
391 "force-widen-divrem-via-safe-divisor",
cl::Hidden,
393 "Override cost based safe divisor widening for div/rem instructions"));
396 "vectorizer-maximize-bandwidth-for-vector-calls",
cl::init(
true),
398 cl::desc(
"Try wider VFs if they enable the use of vector variants"));
417 return DL.getTypeAllocSizeInBits(Ty) !=
DL.getTypeSizeInBits(Ty);
448 unsigned Factor = Vals.
size();
449 assert(Factor > 1 &&
"Tried to interleave invalid number of vectors");
453 for (
Value *Val : Vals)
454 assert(Val->getType() == VecTy &&
"Tried to interleave mismatched types");
459 if (VecTy->isScalableTy()) {
460 VectorType *WideVecTy = VectorType::getDoubleElementsVectorType(VecTy);
461 return Builder.
CreateIntrinsic(WideVecTy, Intrinsic::vector_interleave2,
470 const unsigned NumElts = VecTy->getElementCount().getFixedValue();
477class GeneratedRTChecks;
521 this->MinProfitableTripCount = VecWidth;
537 virtual std::pair<BasicBlock *, Value *>
564 VPValue *BlockInMask,
bool NeedsMaskForGaps);
579 std::pair<BasicBlock *, Value *> AdditionalBypass = {
nullptr,
nullptr});
644 const SCEV2ValueTy &ExpandedSCEVs,
645 std::pair<BasicBlock *, Value *> AdditionalBypass = {
nullptr,
nullptr});
792 "A high UF for the epilogue loop is likely not beneficial.");
812 GeneratedRTChecks &Checks)
814 EPI.MainLoopVF,
EPI.MainLoopVF,
EPI.MainLoopUF, LVL,
821 const SCEV2ValueTy &ExpandedSCEVs)
final {
828 virtual std::pair<BasicBlock *, Value *>
852 GeneratedRTChecks &Check)
857 std::pair<BasicBlock *, Value *>
881 GeneratedRTChecks &Checks)
888 std::pair<BasicBlock *, Value *>
910 if (
I->getDebugLoc() !=
Empty)
911 return I->getDebugLoc();
913 for (
Use &
Op :
I->operands()) {
915 if (OpInst->getDebugLoc() !=
Empty)
916 return OpInst->getDebugLoc();
919 return I->getDebugLoc();
928 dbgs() <<
"LV: " << Prefix << DebugMsg;
950 CodeRegion =
I->getParent();
953 if (
I->getDebugLoc())
954 DL =
I->getDebugLoc();
971 return B.CreateElementCount(Ty, VF);
977 assert(!isa<SCEVCouldNotCompute>(BackedgeTakenCount) &&
"Invalid loop count");
991 <<
"loop not vectorized: " << OREMsg);
1012 "Vectorizing: ", TheLoop->
isInnermost() ?
"innermost loop" :
"outer loop",
1018 <<
"vectorized " << LoopType <<
"loop (vectorization width: "
1020 <<
", interleaved count: " <<
ore::NV(
"InterleaveCount", IC) <<
")";
1164 "Profitable to scalarize relevant only for VF > 1.");
1167 "cost-model should not be used for outer loops (in VPlan-native path)");
1169 auto Scalars = InstsToScalarize.find(VF);
1170 assert(Scalars != InstsToScalarize.end() &&
1171 "VF not yet analyzed for scalarization profitability");
1172 return Scalars->second.contains(
I);
1179 "cost-model should not be used for outer loops (in VPlan-native path)");
1183 if (isa<PseudoProbeInst>(
I))
1189 auto UniformsPerVF = Uniforms.find(VF);
1190 assert(UniformsPerVF != Uniforms.end() &&
1191 "VF not yet analyzed for uniformity");
1192 return UniformsPerVF->second.count(
I);
1199 "cost-model should not be used for outer loops (in VPlan-native path)");
1203 auto ScalarsPerVF = Scalars.find(VF);
1204 assert(ScalarsPerVF != Scalars.end() &&
1205 "Scalar values are not calculated for VF");
1206 return ScalarsPerVF->second.count(
I);
1212 return VF.
isVector() && MinBWs.contains(
I) &&
1234 WideningDecisions[std::make_pair(
I, VF)] = std::make_pair(W,
Cost);
1245 for (
unsigned i = 0; i < Grp->
getFactor(); ++i) {
1248 WideningDecisions[std::make_pair(
I, VF)] = std::make_pair(W,
Cost);
1250 WideningDecisions[std::make_pair(
I, VF)] = std::make_pair(W, 0);
1262 "cost-model should not be used for outer loops (in VPlan-native path)");
1264 std::pair<Instruction *, ElementCount> InstOnVF = std::make_pair(
I, VF);
1265 auto Itr = WideningDecisions.
find(InstOnVF);
1266 if (Itr == WideningDecisions.
end())
1268 return Itr->second.first;
1275 std::pair<Instruction *, ElementCount> InstOnVF = std::make_pair(
I, VF);
1277 "The cost is not calculated");
1278 return WideningDecisions[InstOnVF].second;
1291 std::optional<unsigned> MaskPos,
1294 CallWideningDecisions[std::make_pair(CI, VF)] = {Kind, Variant, IID,
1301 return CallWideningDecisions.
at(std::make_pair(CI, VF));
1309 auto *Trunc = dyn_cast<TruncInst>(
I);
1322 Value *
Op = Trunc->getOperand(0);
1342 if (VF.
isScalar() || Uniforms.contains(VF))
1346 collectLoopUniforms(VF);
1347 collectLoopScalars(VF);
1367 bool LI = isa<LoadInst>(V);
1368 bool SI = isa<StoreInst>(V);
1383 const RecurrenceDescriptor &RdxDesc = Reduction.second;
1384 return TTI.isLegalToVectorizeReduction(RdxDesc, VF);
1395 return ScalarCost < SafeDivisorCost;
1419 std::pair<InstructionCost, InstructionCost>
1447 LLVM_DEBUG(
dbgs() <<
"LV: Loop does not require scalar epilogue\n");
1454 dbgs() <<
"LV: Loop requires scalar epilogue: multiple exits\n");
1459 "interleaved group requires scalar epilogue\n");
1462 LLVM_DEBUG(
dbgs() <<
"LV: Loop does not require scalar epilogue\n");
1471 auto RequiresScalarEpilogue = [
this](
ElementCount VF) {
1474 bool IsRequired =
all_of(
Range, RequiresScalarEpilogue);
1476 (IsRequired ||
none_of(
Range, RequiresScalarEpilogue)) &&
1477 "all VFs in range must agree on whether a scalar epilogue is required");
1489 if (!ChosenTailFoldingStyle)
1491 return IVUpdateMayOverflow ? ChosenTailFoldingStyle->first
1492 : ChosenTailFoldingStyle->second;
1500 assert(!ChosenTailFoldingStyle &&
"Tail folding must not be selected yet.");
1502 ChosenTailFoldingStyle =
1508 ChosenTailFoldingStyle = std::make_pair(
1523 IsScalableVF && UserIC <= 1 &&
1532 ChosenTailFoldingStyle =
1537 <<
"LV: Preference for VP intrinsics indicated. Will "
1538 "not try to generate VP Intrinsics "
1540 ?
"since interleave count specified is greater than 1.\n"
1541 :
"due to non-interleaving reasons.\n"));
1567 return InLoopReductions.contains(Phi);
1582 WideningDecisions.
clear();
1583 CallWideningDecisions.
clear();
1611 std::optional<InstructionCost>
1616 unsigned NumPredStores = 0;
1625 bool FoldTailByMasking);
1630 ElementCount getMaximizedVFForTarget(
unsigned MaxTripCount,
1631 unsigned SmallestType,
1632 unsigned WidestType,
1634 bool FoldTailByMasking);
1638 bool isScalableVectorizationAllowed();
1642 ElementCount getMaxLegalScalableVF(
unsigned MaxSafeElements);
1688 PredicatedBBsAfterVectorization;
1701 std::optional<std::pair<TailFoldingStyle, TailFoldingStyle>>
1702 ChosenTailFoldingStyle;
1705 std::optional<bool> IsScalableVectorizationAllowed;
1739 ScalarCostsTy &ScalarCosts,
1765 std::pair<InstWidening, InstructionCost>>;
1767 DecisionList WideningDecisions;
1769 using CallDecisionList =
1772 CallDecisionList CallWideningDecisions;
1795 Ops, [
this, VF](
Value *V) {
return this->needsExtract(V, VF); }));
1853class GeneratedRTChecks {
1859 Value *SCEVCheckCond =
nullptr;
1867 Value *MemRuntimeCheckCond =
nullptr;
1876 bool CostTooHigh =
false;
1877 const bool AddBranchWeights;
1879 Loop *OuterLoop =
nullptr;
1884 bool AddBranchWeights)
1885 : DT(DT), LI(LI),
TTI(
TTI), SCEVExp(SE,
DL,
"scev.check"),
1886 MemCheckExp(SE,
DL,
"scev.check"), AddBranchWeights(AddBranchWeights) {}
1914 nullptr,
"vector.scevcheck");
1921 if (RtPtrChecking.Need) {
1922 auto *Pred = SCEVCheckBlock ? SCEVCheckBlock : Preheader;
1923 MemCheckBlock =
SplitBlock(Pred, Pred->getTerminator(), DT, LI,
nullptr,
1926 auto DiffChecks = RtPtrChecking.getDiffChecks();
1928 Value *RuntimeVF =
nullptr;
1933 RuntimeVF = getRuntimeVF(B, B.getIntNTy(Bits), VF);
1939 MemCheckBlock->
getTerminator(), L, RtPtrChecking.getChecks(),
1942 assert(MemRuntimeCheckCond &&
1943 "no RT checks generated although RtPtrChecking "
1944 "claimed checks are required");
1947 if (!MemCheckBlock && !SCEVCheckBlock)
1957 if (SCEVCheckBlock) {
1962 if (MemCheckBlock) {
1969 if (MemCheckBlock) {
1973 if (SCEVCheckBlock) {
1979 OuterLoop =
L->getParentLoop();
1983 if (SCEVCheckBlock || MemCheckBlock)
1996 if (SCEVCheckBlock->getTerminator() == &
I)
2003 if (MemCheckBlock) {
2006 if (MemCheckBlock->getTerminator() == &
I)
2029 unsigned BestTripCount = 2;
2033 BestTripCount = SmallTC;
2037 BestTripCount = *EstimatedTC;
2040 BestTripCount = std::max(BestTripCount, 1U);
2044 NewMemCheckCost = std::max(*NewMemCheckCost.
getValue(),
2047 if (BestTripCount > 1)
2049 <<
"We expect runtime memory checks to be hoisted "
2050 <<
"out of the outer loop. Cost reduced from "
2051 << MemCheckCost <<
" to " << NewMemCheckCost <<
'\n');
2053 MemCheckCost = NewMemCheckCost;
2057 RTCheckCost += MemCheckCost;
2060 if (SCEVCheckBlock || MemCheckBlock)
2061 LLVM_DEBUG(
dbgs() <<
"Total cost of runtime checks: " << RTCheckCost
2069 ~GeneratedRTChecks() {
2073 SCEVCleaner.markResultUsed();
2075 if (!MemRuntimeCheckCond)
2076 MemCheckCleaner.markResultUsed();
2078 if (MemRuntimeCheckCond) {
2079 auto &SE = *MemCheckExp.
getSE();
2086 I.eraseFromParent();
2089 MemCheckCleaner.cleanup();
2090 SCEVCleaner.cleanup();
2093 SCEVCheckBlock->eraseFromParent();
2094 if (MemRuntimeCheckCond)
2095 MemCheckBlock->eraseFromParent();
2109 SCEVCheckCond =
nullptr;
2110 if (
auto *
C = dyn_cast<ConstantInt>(
Cond))
2121 SCEVCheckBlock->getTerminator()->eraseFromParent();
2122 SCEVCheckBlock->moveBefore(LoopVectorPreHeader);
2123 Pred->getTerminator()->replaceSuccessorWith(LoopVectorPreHeader,
2130 if (AddBranchWeights)
2133 return SCEVCheckBlock;
2142 if (!MemRuntimeCheckCond)
2151 MemCheckBlock->moveBefore(LoopVectorPreHeader);
2158 if (AddBranchWeights) {
2162 MemCheckBlock->getTerminator()->setDebugLoc(
2163 Pred->getTerminator()->getDebugLoc());
2166 MemRuntimeCheckCond =
nullptr;
2167 return MemCheckBlock;
2173 return Style == TailFoldingStyle::Data ||
2174 Style == TailFoldingStyle::DataAndControlFlow ||
2175 Style == TailFoldingStyle::DataAndControlFlowWithoutRuntimeCheck;
2179 return Style == TailFoldingStyle::DataAndControlFlow ||
2180 Style == TailFoldingStyle::DataAndControlFlowWithoutRuntimeCheck;
2210 LLVM_DEBUG(
dbgs() <<
"LV: Loop hints prevent outer loop vectorization.\n");
2216 LLVM_DEBUG(
dbgs() <<
"LV: Not vectorizing: Interleave is not supported for "
2236 if (!containsIrreducibleCFG<const BasicBlock *>(RPOT, *LI)) {
2246 for (
Loop *InnerL : L)
2268 ?
B.CreateSExtOrTrunc(
Index, StepTy)
2269 :
B.CreateCast(Instruction::SIToFP,
Index, StepTy);
2270 if (CastedIndex !=
Index) {
2272 Index = CastedIndex;
2282 assert(
X->getType() ==
Y->getType() &&
"Types don't match!");
2283 if (
auto *CX = dyn_cast<ConstantInt>(
X))
2286 if (
auto *CY = dyn_cast<ConstantInt>(
Y))
2289 return B.CreateAdd(
X,
Y);
2295 assert(
X->getType()->getScalarType() ==
Y->getType() &&
2296 "Types don't match!");
2297 if (
auto *CX = dyn_cast<ConstantInt>(
X))
2300 if (
auto *CY = dyn_cast<ConstantInt>(
Y))
2303 VectorType *XVTy = dyn_cast<VectorType>(
X->getType());
2304 if (XVTy && !isa<VectorType>(
Y->getType()))
2305 Y =
B.CreateVectorSplat(XVTy->getElementCount(),
Y);
2306 return B.CreateMul(
X,
Y);
2309 switch (InductionKind) {
2312 "Vector indices not supported for integer inductions yet");
2314 "Index type does not match StartValue type");
2315 if (isa<ConstantInt>(Step) && cast<ConstantInt>(Step)->isMinusOne())
2316 return B.CreateSub(StartValue,
Index);
2324 "Vector indices not supported for FP inductions yet");
2327 (InductionBinOp->
getOpcode() == Instruction::FAdd ||
2328 InductionBinOp->
getOpcode() == Instruction::FSub) &&
2329 "Original bin op should be defined for FP induction");
2332 return B.CreateBinOp(InductionBinOp->
getOpcode(), StartValue, MulExp,
2346 if (
F.hasFnAttribute(Attribute::VScaleRange))
2347 return F.getFnAttribute(Attribute::VScaleRange).getVScaleRangeMax();
2349 return std::nullopt;
2358 ElementCount VF, std::optional<unsigned> UF = std::nullopt) {
2360 unsigned MaxUF = UF ? *UF :
Cost->TTI.getMaxInterleaveFactor(VF);
2362 Type *IdxTy =
Cost->Legal->getWidestInductionType();
2363 APInt MaxUIntTripCount = cast<IntegerType>(IdxTy)->getMask();
2369 Cost->PSE.getSE()->getSmallConstantMaxTripCount(
Cost->TheLoop)) {
2372 std::optional<unsigned> MaxVScale =
2376 MaxVF *= *MaxVScale;
2379 return (MaxUIntTripCount - TC).ugt(MaxVF * MaxUF);
2427 VPValue *BlockInMask,
bool NeedsMaskForGaps) {
2433 unsigned InterleaveFactor = Group->
getFactor();
2442 "Reversed masked interleave-group not supported.");
2460 for (
unsigned Part = 0; Part < State.
UF; Part++) {
2462 if (
auto *
I = dyn_cast<Instruction>(AddrPart))
2477 bool InBounds =
false;
2479 InBounds =
gep->isInBounds();
2487 auto CreateGroupMask = [
this, &BlockInMask, &State, &InterleaveFactor](
2488 unsigned Part,
Value *MaskForGaps) ->
Value * {
2490 assert(!MaskForGaps &&
"Interleaved groups with gaps are not supported.");
2491 assert(InterleaveFactor == 2 &&
2492 "Unsupported deinterleave factor for scalable vectors");
2493 auto *BlockInMaskPart = State.
get(BlockInMask, Part);
2498 nullptr,
"interleaved.mask");
2504 Value *BlockInMaskPart = State.
get(BlockInMask, Part);
2508 "interleaved.mask");
2515 if (isa<LoadInst>(Instr)) {
2516 Value *MaskForGaps =
nullptr;
2517 if (NeedsMaskForGaps) {
2520 assert(MaskForGaps &&
"Mask for Gaps is required but it is null");
2525 for (
unsigned Part = 0; Part < State.
UF; Part++) {
2527 if (BlockInMask || MaskForGaps) {
2529 "masked interleaved groups are not allowed.");
2530 Value *GroupMask = CreateGroupMask(Part, MaskForGaps);
2533 GroupMask, PoisonVec,
"wide.masked.vec");
2542 if (VecTy->isScalableTy()) {
2543 assert(InterleaveFactor == 2 &&
2544 "Unsupported deinterleave factor for scalable vectors");
2546 for (
unsigned Part = 0; Part < State.
UF; ++Part) {
2550 Intrinsic::vector_deinterleave2, VecTy, NewLoads[Part],
2551 nullptr,
"strided.vec");
2553 for (
unsigned I = 0;
I < InterleaveFactor; ++
I) {
2561 if (Member->getType() != ScalarTy) {
2569 State.
set(VPDefs[J], StridedVec, Part);
2580 for (
unsigned I = 0;
I < InterleaveFactor; ++
I) {
2589 for (
unsigned Part = 0; Part < State.
UF; Part++) {
2591 NewLoads[Part], StrideMask,
"strided.vec");
2594 if (Member->getType() != ScalarTy) {
2603 State.
set(VPDefs[J], StridedVec, Part);
2614 Value *MaskForGaps =
2617 "masked interleaved groups are not allowed.");
2619 "masking gaps for scalable vectors is not yet supported.");
2620 for (
unsigned Part = 0; Part < State.
UF; Part++) {
2623 unsigned StoredIdx = 0;
2624 for (
unsigned i = 0; i < InterleaveFactor; i++) {
2626 "Fail to get a member from an interleaved store group");
2636 Value *StoredVec = State.
get(StoredValues[StoredIdx], Part);
2644 if (StoredVec->
getType() != SubVT)
2653 if (BlockInMask || MaskForGaps) {
2654 Value *GroupMask = CreateGroupMask(Part, MaskForGaps);
2669 assert(!Instr->getType()->isAggregateType() &&
"Can't handle vectors");
2673 if (isa<NoAliasScopeDeclInst>(Instr))
2678 bool IsVoidRetTy = Instr->getType()->isVoidTy();
2682 Cloned->
setName(Instr->getName() +
".cloned");
2687 "inferred type and type from generated instructions do not match");
2693 if (
auto DL = Instr->getDebugLoc())
2699 auto InputInstance = Instance;
2703 Cloned->
setOperand(
I.index(), State.
get(Operand, InputInstance));
2710 State.
set(RepRecipe, Cloned, Instance);
2713 if (
auto *
II = dyn_cast<AssumeInst>(Cloned))
2718 if (IfPredicateInstr)
2742 if (
Cost->foldTailByMasking()) {
2744 "VF*UF must be a power of 2 when folding tail by masking");
2776 auto *SrcVecTy = cast<VectorType>(V->getType());
2777 assert(
VF == SrcVecTy->getElementCount() &&
"Vector dimensions do not match");
2778 Type *SrcElemTy = SrcVecTy->getElementType();
2780 assert((
DL.getTypeSizeInBits(SrcElemTy) ==
DL.getTypeSizeInBits(DstElemTy)) &&
2781 "Vector elements must have same size");
2792 "Only one type should be a pointer type");
2794 "Only one type should be a floating point type");
2820 auto CreateStep = [&]() ->
Value * {
2845 Value *MaxUIntTripCount =
2846 ConstantInt::get(CountTy, cast<IntegerType>(CountTy)->getMask());
2860 "TC check is expected to dominate Bypass");
2875 if (!SCEVCheckBlock)
2881 "Cannot SCEV check stride or overflow when optimizing for size");
2896 return SCEVCheckBlock;
2915 "Cannot emit memory checks when optimizing for size, unless forced "
2921 <<
"Code-size may be reduced by not forcing "
2922 "vectorization, or by source-code modifications "
2923 "eliminating the need for runtime checks "
2924 "(e.g., adding 'restrict').";
2932 return MemCheckBlock;
2941 "multiple exit loop without required epilogue?");
2945 LI,
nullptr,
Twine(Prefix) +
"middle.block");
2948 nullptr,
Twine(Prefix) +
"scalar.ph");
2954 std::pair<BasicBlock *, Value *> AdditionalBypass) {
2960 Value *EndValueFromAdditionalBypass = AdditionalBypass.second;
2961 if (OrigPhi == OldInduction) {
2968 if (
II.getInductionBinOp() && isa<FPMathOperator>(
II.getInductionBinOp()))
2969 B.setFastMathFlags(
II.getInductionBinOp()->getFastMathFlags());
2972 Step,
II.getKind(),
II.getInductionBinOp());
2976 if (AdditionalBypass.first) {
2977 B.SetInsertPoint(AdditionalBypass.first,
2978 AdditionalBypass.first->getFirstInsertionPt());
2979 EndValueFromAdditionalBypass =
2981 Step,
II.getKind(),
II.getInductionBinOp());
2982 EndValueFromAdditionalBypass->
setName(
"ind.end");
3002 if (AdditionalBypass.first)
3004 EndValueFromAdditionalBypass);
3011 const SCEV2ValueTy &ExpandedSCEVs) {
3012 const SCEV *Step =
ID.getStep();
3013 if (
auto *
C = dyn_cast<SCEVConstant>(Step))
3014 return C->getValue();
3015 if (
auto *U = dyn_cast<SCEVUnknown>(Step))
3016 return U->getValue();
3017 auto I = ExpandedSCEVs.find(Step);
3018 assert(
I != ExpandedSCEVs.end() &&
"SCEV must be expanded at this point");
3023 const SCEV2ValueTy &ExpandedSCEVs,
3024 std::pair<BasicBlock *, Value *> AdditionalBypass) {
3025 assert(((AdditionalBypass.first && AdditionalBypass.second) ||
3026 (!AdditionalBypass.first && !AdditionalBypass.second)) &&
3027 "Inconsistent information about additional bypass.");
3036 PHINode *OrigPhi = InductionEntry.first;
3045std::pair<BasicBlock *, Value *>
3047 const SCEV2ValueTy &ExpandedSCEVs) {
3133 assert(isa<PHINode>(UI) &&
"Expected LCSSA form");
3134 MissingVals[UI] = EndValue;
3142 auto *UI = cast<Instruction>(U);
3144 assert(isa<PHINode>(UI) &&
"Expected LCSSA form");
3148 if (
II.getInductionBinOp() && isa<FPMathOperator>(
II.getInductionBinOp()))
3149 B.setFastMathFlags(
II.getInductionBinOp()->getFastMathFlags());
3151 Value *CountMinusOne =
B.CreateSub(
3153 CountMinusOne->
setName(
"cmo");
3156 assert(StepVPV &&
"step must have been expanded during VPlan execution");
3158 : State.
get(StepVPV, {0, 0});
3161 II.getKind(),
II.getInductionBinOp());
3162 Escape->
setName(
"ind.escape");
3163 MissingVals[UI] = Escape;
3167 for (
auto &
I : MissingVals) {
3174 if (
PHI->getBasicBlockIndex(MiddleBlock) == -1) {
3175 PHI->addIncoming(
I.second, MiddleBlock);
3183struct CSEDenseMapInfo {
3185 return isa<InsertElementInst>(
I) || isa<ExtractElementInst>(
I) ||
3186 isa<ShuffleVectorInst>(
I) || isa<GetElementPtrInst>(
I);
3198 assert(canHandle(
I) &&
"Unknown instruction!");
3200 I->value_op_end()));
3204 if (
LHS == getEmptyKey() ||
RHS == getEmptyKey() ||
3205 LHS == getTombstoneKey() ||
RHS == getTombstoneKey())
3207 return LHS->isIdenticalTo(
RHS);
3218 if (!CSEDenseMapInfo::canHandle(&In))
3224 In.replaceAllUsesWith(V);
3225 In.eraseFromParent();
3239 return CallWideningDecisions.at(std::make_pair(CI, VF)).Cost;
3248 for (
auto &ArgOp : CI->
args())
3257 return std::min(ScalarCallCost, IntrinsicCost);
3259 return ScalarCallCost;
3272 assert(
ID &&
"Expected intrinsic call!");
3275 if (
auto *FPMO = dyn_cast<FPMathOperator>(CI))
3276 FMF = FPMO->getFastMathFlags();
3282 std::back_inserter(ParamTys),
3283 [&](
Type *Ty) { return MaybeVectorizeType(Ty, VF); });
3286 dyn_cast<IntrinsicInst>(CI));
3307 for (
PHINode &PN : Exit->phis())
3335 KV.second->fixPhi(Plan, State);
3375 auto isBlockOfUsePredicated = [&](
Use &U) ->
bool {
3376 auto *
I = cast<Instruction>(U.getUser());
3378 if (
auto *Phi = dyn_cast<PHINode>(
I))
3379 BB = Phi->getIncomingBlock(
3381 return BB == PredBB;
3392 Worklist.
insert(InstsToReanalyze.
begin(), InstsToReanalyze.
end());
3393 InstsToReanalyze.
clear();
3396 while (!Worklist.
empty()) {
3402 if (!
I || isa<PHINode>(
I) || !VectorLoop->contains(
I) ||
3403 I->mayHaveSideEffects() ||
I->mayReadFromMemory())
3411 if (
I->getParent() == PredBB) {
3412 Worklist.
insert(
I->op_begin(),
I->op_end());
3426 I->moveBefore(&*PredBB->getFirstInsertionPt());
3427 Worklist.
insert(
I->op_begin(),
I->op_end());
3439 for (
VPBasicBlock *VPBB : VPBlockUtils::blocksOnly<VPBasicBlock>(Iter)) {
3444 PHINode *NewPhi = cast<PHINode>(State.
get(VPPhi, 0));
3456void LoopVectorizationCostModel::collectLoopScalars(
ElementCount VF) {
3461 "This function should not be visited twice for the same VF");
3467 Scalars[VF].
insert(Uniforms[VF].begin(), Uniforms[VF].end());
3486 "Widening decision should be ready at this moment");
3487 if (
auto *Store = dyn_cast<StoreInst>(MemAccess))
3488 if (
Ptr == Store->getValueOperand())
3491 "Ptr is neither a value or pointer operand");
3497 auto isLoopVaryingBitCastOrGEP = [&](
Value *
V) {
3498 return ((isa<BitCastInst>(V) &&
V->getType()->isPointerTy()) ||
3499 isa<GetElementPtrInst>(V)) &&
3510 if (!isLoopVaryingBitCastOrGEP(
Ptr))
3515 auto *
I = cast<Instruction>(
Ptr);
3523 return isa<LoadInst>(U) || isa<StoreInst>(U);
3527 PossibleNonScalarPtrs.
insert(
I);
3545 for (
auto &
I : *BB) {
3546 if (
auto *Load = dyn_cast<LoadInst>(&
I)) {
3547 evaluatePtrUse(Load,
Load->getPointerOperand());
3548 }
else if (
auto *Store = dyn_cast<StoreInst>(&
I)) {
3549 evaluatePtrUse(Store,
Store->getPointerOperand());
3550 evaluatePtrUse(Store,
Store->getValueOperand());
3553 for (
auto *
I : ScalarPtrs)
3554 if (!PossibleNonScalarPtrs.
count(
I)) {
3562 auto ForcedScalar = ForcedScalars.
find(VF);
3563 if (ForcedScalar != ForcedScalars.
end())
3564 for (
auto *
I : ForcedScalar->second) {
3565 LLVM_DEBUG(
dbgs() <<
"LV: Found (forced) scalar instruction: " << *
I <<
"\n");
3574 while (
Idx != Worklist.
size()) {
3576 if (!isLoopVaryingBitCastOrGEP(Dst->getOperand(0)))
3578 auto *Src = cast<Instruction>(Dst->getOperand(0));
3580 auto *J = cast<Instruction>(U);
3581 return !TheLoop->contains(J) || Worklist.count(J) ||
3582 ((isa<LoadInst>(J) || isa<StoreInst>(J)) &&
3583 isScalarUse(J, Src));
3586 LLVM_DEBUG(
dbgs() <<
"LV: Found scalar instruction: " << *Src <<
"\n");
3593 auto *Ind = Induction.first;
3594 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch));
3603 auto IsDirectLoadStoreFromPtrIndvar = [&](
Instruction *Indvar,
3605 return Induction.second.getKind() ==
3607 (isa<LoadInst>(
I) || isa<StoreInst>(
I)) &&
3614 auto *I = cast<Instruction>(U);
3615 return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I) ||
3616 IsDirectLoadStoreFromPtrIndvar(Ind, I);
3624 auto *IndUpdatePhi = dyn_cast<PHINode>(IndUpdate);
3630 auto ScalarIndUpdate =
3632 auto *I = cast<Instruction>(U);
3633 return I == Ind || !TheLoop->contains(I) || Worklist.count(I) ||
3634 IsDirectLoadStoreFromPtrIndvar(IndUpdate, I);
3636 if (!ScalarIndUpdate)
3641 Worklist.
insert(IndUpdate);
3642 LLVM_DEBUG(
dbgs() <<
"LV: Found scalar instruction: " << *Ind <<
"\n");
3643 LLVM_DEBUG(
dbgs() <<
"LV: Found scalar instruction: " << *IndUpdate
3657 switch(
I->getOpcode()) {
3660 case Instruction::Call:
3663 return CallWideningDecisions.at(std::make_pair(cast<CallInst>(
I), VF))
3665 case Instruction::Load:
3666 case Instruction::Store: {
3678 case Instruction::UDiv:
3679 case Instruction::SDiv:
3680 case Instruction::SRem:
3681 case Instruction::URem: {
3697 switch(
I->getOpcode()) {
3700 case Instruction::Load:
3701 case Instruction::Store: {
3714 (isa<LoadInst>(
I) ||
3715 (isa<StoreInst>(
I) &&
3721 case Instruction::UDiv:
3722 case Instruction::SDiv:
3723 case Instruction::SRem:
3724 case Instruction::URem:
3728 case Instruction::Call:
3733std::pair<InstructionCost, InstructionCost>
3736 assert(
I->getOpcode() == Instruction::UDiv ||
3737 I->getOpcode() == Instruction::SDiv ||
3738 I->getOpcode() == Instruction::SRem ||
3739 I->getOpcode() == Instruction::URem);
3750 ScalarizationCost = 0;
3765 ScalarizationCost += getScalarizationOverhead(
I, VF,
CostKind);
3779 Instruction::Select, VecTy,
3785 Value *Op2 =
I->getOperand(1);
3794 {TargetTransformInfo::OK_AnyValue, TargetTransformInfo::OP_None},
3796 return {ScalarizationCost, SafeDivisorCost};
3803 "Decision should not be set yet.");
3805 assert(Group &&
"Must have a group.");
3809 auto &
DL =
I->getDataLayout();
3816 unsigned InterleaveFactor = Group->getFactor();
3817 bool ScalarNI =
DL.isNonIntegralPointerType(ScalarTy);
3818 for (
unsigned i = 0; i < InterleaveFactor; i++) {
3823 bool MemberNI =
DL.isNonIntegralPointerType(
MemberTy);
3825 if (MemberNI != ScalarNI) {
3828 }
else if (MemberNI && ScalarNI &&
3829 ScalarTy->getPointerAddressSpace() !=
3830 MemberTy->getPointerAddressSpace()) {
3840 bool PredicatedAccessRequiresMasking =
3843 bool LoadAccessWithGapsRequiresEpilogMasking =
3844 isa<LoadInst>(
I) && Group->requiresScalarEpilogue() &&
3846 bool StoreAccessWithGapsRequiresMasking =
3847 isa<StoreInst>(
I) && (Group->getNumMembers() < Group->getFactor());
3848 if (!PredicatedAccessRequiresMasking &&
3849 !LoadAccessWithGapsRequiresEpilogMasking &&
3850 !StoreAccessWithGapsRequiresMasking)
3857 "Masked interleave-groups for predicated accesses are not enabled.");
3859 if (Group->isReverse())
3871 assert((isa<LoadInst, StoreInst>(
I)) &&
"Invalid memory instruction");
3887 auto &
DL =
I->getDataLayout();
3894void LoopVectorizationCostModel::collectLoopUniforms(
ElementCount VF) {
3901 "This function should not be visited twice for the same VF");
3905 Uniforms[VF].
clear();
3913 auto isOutOfScope = [&](
Value *V) ->
bool {
3925 auto addToWorklistIfAllowed = [&](
Instruction *
I) ->
void {
3926 if (isOutOfScope(
I)) {
3933 dbgs() <<
"LV: Found not uniform due to requiring predication: " << *
I
3937 LLVM_DEBUG(
dbgs() <<
"LV: Found uniform instruction: " << *
I <<
"\n");
3947 auto *
Cmp = dyn_cast<Instruction>(E->getTerminator()->getOperand(0));
3949 addToWorklistIfAllowed(Cmp);
3958 if (PrevVF.isVector()) {
3959 auto Iter = Uniforms.
find(PrevVF);
3960 if (Iter != Uniforms.
end() && !Iter->second.contains(
I))
3965 if (isa<LoadInst>(
I))
3976 "Widening decision should be ready at this moment");
3978 if (isUniformMemOpUse(
I))
3981 return (WideningDecision ==
CM_Widen ||
3990 if (isa<StoreInst>(
I) &&
I->getOperand(0) ==
Ptr)
4006 for (
auto &
I : *BB) {
4008 switch (
II->getIntrinsicID()) {
4009 case Intrinsic::sideeffect:
4010 case Intrinsic::experimental_noalias_scope_decl:
4011 case Intrinsic::assume:
4012 case Intrinsic::lifetime_start:
4013 case Intrinsic::lifetime_end:
4015 addToWorklistIfAllowed(&
I);
4024 if (
auto *EVI = dyn_cast<ExtractValueInst>(&
I)) {
4025 assert(isOutOfScope(EVI->getAggregateOperand()) &&
4026 "Expected aggregate value to be loop invariant");
4027 addToWorklistIfAllowed(EVI);
4036 if (isUniformMemOpUse(&
I))
4037 addToWorklistIfAllowed(&
I);
4039 if (isVectorizedMemAccessUse(&
I,
Ptr))
4046 for (
auto *V : HasUniformUse) {
4047 if (isOutOfScope(V))
4049 auto *
I = cast<Instruction>(V);
4050 auto UsersAreMemAccesses =
4052 return isVectorizedMemAccessUse(cast<Instruction>(U), V);
4054 if (UsersAreMemAccesses)
4055 addToWorklistIfAllowed(
I);
4062 while (idx != Worklist.
size()) {
4065 for (
auto *OV :
I->operand_values()) {
4067 if (isOutOfScope(OV))
4071 auto *
OP = dyn_cast<PHINode>(OV);
4076 auto *OI = cast<Instruction>(OV);
4078 auto *J = cast<Instruction>(U);
4079 return Worklist.count(J) || isVectorizedMemAccessUse(J, OI);
4081 addToWorklistIfAllowed(OI);
4093 auto *Ind = Induction.first;
4094 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch));
4099 auto *I = cast<Instruction>(U);
4100 return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I) ||
4101 isVectorizedMemAccessUse(I, Ind);
4108 auto UniformIndUpdate =
4110 auto *I = cast<Instruction>(U);
4111 return I == Ind || !TheLoop->contains(I) || Worklist.count(I) ||
4112 isVectorizedMemAccessUse(I, IndUpdate);
4114 if (!UniformIndUpdate)
4118 addToWorklistIfAllowed(Ind);
4119 addToWorklistIfAllowed(IndUpdate);
4130 "runtime pointer checks needed. Enable vectorization of this "
4131 "loop with '#pragma clang loop vectorize(enable)' when "
4132 "compiling with -Os/-Oz",
4133 "CantVersionLoopWithOptForSize",
ORE,
TheLoop);
4139 "runtime SCEV checks needed. Enable vectorization of this "
4140 "loop with '#pragma clang loop vectorize(enable)' when "
4141 "compiling with -Os/-Oz",
4142 "CantVersionLoopWithOptForSize",
ORE,
TheLoop);
4149 "runtime stride == 1 checks needed. Enable vectorization of "
4150 "this loop without such check by compiling with -Os/-Oz",
4151 "CantVersionLoopWithOptForSize",
ORE,
TheLoop);
4158bool LoopVectorizationCostModel::isScalableVectorizationAllowed() {
4159 if (IsScalableVectorizationAllowed)
4160 return *IsScalableVectorizationAllowed;
4162 IsScalableVectorizationAllowed =
false;
4168 "ScalableVectorizationDisabled",
ORE,
TheLoop);
4172 LLVM_DEBUG(
dbgs() <<
"LV: Scalable vectorization is available\n");
4175 std::numeric_limits<ElementCount::ScalarTy>::max());
4186 "Scalable vectorization not supported for the reduction "
4187 "operations found in this loop.",
4199 "for all element types found in this loop.",
4206 "for safe distance analysis.",
4211 IsScalableVectorizationAllowed =
true;
4216LoopVectorizationCostModel::getMaxLegalScalableVF(
unsigned MaxSafeElements) {
4217 if (!isScalableVectorizationAllowed())
4221 std::numeric_limits<ElementCount::ScalarTy>::max());
4223 return MaxScalableVF;
4231 "Max legal vector width too small, scalable vectorization "
4235 return MaxScalableVF;
4239 unsigned MaxTripCount,
ElementCount UserVF,
bool FoldTailByMasking) {
4241 unsigned SmallestType, WidestType;
4248 unsigned MaxSafeElements =
4252 auto MaxSafeScalableVF = getMaxLegalScalableVF(MaxSafeElements);
4254 LLVM_DEBUG(
dbgs() <<
"LV: The max safe fixed VF is: " << MaxSafeFixedVF
4256 LLVM_DEBUG(
dbgs() <<
"LV: The max safe scalable VF is: " << MaxSafeScalableVF
4261 auto MaxSafeUserVF =
4262 UserVF.
isScalable() ? MaxSafeScalableVF : MaxSafeFixedVF;
4279 <<
" is unsafe, clamping to max safe VF="
4280 << MaxSafeFixedVF <<
".\n");
4285 <<
"User-specified vectorization factor "
4286 <<
ore::NV(
"UserVectorizationFactor", UserVF)
4287 <<
" is unsafe, clamping to maximum safe vectorization factor "
4288 <<
ore::NV(
"VectorizationFactor", MaxSafeFixedVF);
4290 return MaxSafeFixedVF;
4295 <<
" is ignored because scalable vectors are not "
4301 <<
"User-specified vectorization factor "
4302 <<
ore::NV(
"UserVectorizationFactor", UserVF)
4303 <<
" is ignored because the target does not support scalable "
4304 "vectors. The compiler will pick a more suitable value.";
4308 <<
" is unsafe. Ignoring scalable UserVF.\n");
4313 <<
"User-specified vectorization factor "
4314 <<
ore::NV(
"UserVectorizationFactor", UserVF)
4315 <<
" is unsafe. Ignoring the hint to let the compiler pick a "
4316 "more suitable value.";
4321 LLVM_DEBUG(
dbgs() <<
"LV: The Smallest and Widest types: " << SmallestType
4322 <<
" / " << WidestType <<
" bits.\n");
4327 getMaximizedVFForTarget(MaxTripCount, SmallestType, WidestType,
4328 MaxSafeFixedVF, FoldTailByMasking))
4332 getMaximizedVFForTarget(MaxTripCount, SmallestType, WidestType,
4333 MaxSafeScalableVF, FoldTailByMasking))
4334 if (MaxVF.isScalable()) {
4335 Result.ScalableVF = MaxVF;
4336 LLVM_DEBUG(
dbgs() <<
"LV: Found feasible scalable VF = " << MaxVF
4349 "Not inserting runtime ptr check for divergent target",
4350 "runtime pointer checks needed. Not enabled for divergent target",
4351 "CantVersionLoopWithDivergentTarget",
ORE,
TheLoop);
4360 "loop trip count is one, irrelevant for vectorization",
4365 switch (ScalarEpilogueStatus) {
4367 return computeFeasibleMaxVF(MaxTC, UserVF,
false);
4372 dbgs() <<
"LV: vector predicate hint/switch found.\n"
4373 <<
"LV: Not allowing scalar epilogue, creating predicated "
4374 <<
"vector loop.\n");
4381 dbgs() <<
"LV: Not allowing scalar epilogue due to -Os/-Oz.\n");
4383 LLVM_DEBUG(
dbgs() <<
"LV: Not allowing scalar epilogue due to low trip "
4402 LLVM_DEBUG(
dbgs() <<
"LV: Cannot fold tail by masking: vectorize with a "
4403 "scalar epilogue instead.\n");
4405 return computeFeasibleMaxVF(MaxTC, UserVF,
false);
4416 "No decisions should have been taken at this point");
4426 std::optional<unsigned> MaxPowerOf2RuntimeVF =
4431 MaxPowerOf2RuntimeVF = std::max<unsigned>(
4432 *MaxPowerOf2RuntimeVF,
4435 MaxPowerOf2RuntimeVF = std::nullopt;
4438 if (MaxPowerOf2RuntimeVF && *MaxPowerOf2RuntimeVF > 0) {
4440 "MaxFixedVF must be a power of 2");
4441 unsigned MaxVFtimesIC =
4442 UserIC ? *MaxPowerOf2RuntimeVF * UserIC : *MaxPowerOf2RuntimeVF;
4446 BackedgeTakenCount, SE->
getOne(BackedgeTakenCount->
getType()));
4452 LLVM_DEBUG(
dbgs() <<
"LV: No tail will remain for any chosen VF.\n");
4466 <<
"LV: tail is folded with EVL, forcing unroll factor to be 1. Will "
4467 "try to generate VP Intrinsics with scalable vector "
4473 "Expected scalable vector factor.");
4483 LLVM_DEBUG(
dbgs() <<
"LV: Cannot fold tail by masking: vectorize with a "
4484 "scalar epilogue instead.\n");
4490 LLVM_DEBUG(
dbgs() <<
"LV: Can't fold tail by masking: don't vectorize\n");
4496 "Unable to calculate the loop count due to complex control flow",
4497 "unable to calculate the loop count due to complex control flow",
4503 "Cannot optimize for size and vectorize at the same time.",
4504 "cannot optimize for size and vectorize at the same time. "
4505 "Enable vectorization of this loop with '#pragma clang loop "
4506 "vectorize(enable)' when compiling with -Os/-Oz",
4511ElementCount LoopVectorizationCostModel::getMaximizedVFForTarget(
4512 unsigned MaxTripCount,
unsigned SmallestType,
unsigned WidestType,
4514 bool ComputeScalableMaxVF = MaxSafeVF.
isScalable();
4522 "Scalable flags must match");
4530 ComputeScalableMaxVF);
4531 MaxVectorElementCount = MinVF(MaxVectorElementCount, MaxSafeVF);
4533 << (MaxVectorElementCount * WidestType) <<
" bits.\n");
4535 if (!MaxVectorElementCount) {
4537 << (ComputeScalableMaxVF ?
"scalable" :
"fixed")
4538 <<
" vector registers.\n");
4542 unsigned WidestRegisterMinEC = MaxVectorElementCount.getKnownMinValue();
4543 if (MaxVectorElementCount.isScalable() &&
4547 WidestRegisterMinEC *= Min;
4556 if (MaxTripCount && MaxTripCount <= WidestRegisterMinEC &&
4564 LLVM_DEBUG(
dbgs() <<
"LV: Clamping the MaxVF to maximum power of two not "
4565 "exceeding the constant trip count: "
4566 << ClampedUpperTripCount <<
"\n");
4568 ClampedUpperTripCount,
4569 FoldTailByMasking ? MaxVectorElementCount.isScalable() :
false);
4582 ComputeScalableMaxVF);
4583 MaxVectorElementCountMaxBW = MinVF(MaxVectorElementCountMaxBW, MaxSafeVF);
4597 for (
int I = RUs.size() - 1;
I >= 0; --
I) {
4598 const auto &MLU = RUs[
I].MaxLocalUsers;
4599 if (
all_of(MLU, [&](
decltype(MLU.front()) &LU) {
4600 return LU.second <= TTI.getNumberOfRegisters(LU.first);
4610 <<
") with target's minimum: " << MinVF <<
'\n');
4626static std::optional<unsigned>
4628 const Function *Fn = L->getHeader()->getParent();
4632 auto Max = Attr.getVScaleRangeMax();
4633 if (Max && Min == Max)
4640bool LoopVectorizationPlanner::isMoreProfitable(
4648 unsigned EstimatedWidthA =
A.Width.getKnownMinValue();
4649 unsigned EstimatedWidthB =
B.Width.getKnownMinValue();
4651 if (
A.Width.isScalable())
4652 EstimatedWidthA *= *VScale;
4653 if (
B.Width.isScalable())
4654 EstimatedWidthB *= *VScale;
4661 A.Width.isScalable() && !
B.Width.isScalable();
4672 return CmpFn(CostA * EstimatedWidthB, CostB * EstimatedWidthA);
4674 auto GetCostForTC = [MaxTripCount,
this](
unsigned VF,
4686 return VectorCost *
divideCeil(MaxTripCount, VF);
4687 return VectorCost * (MaxTripCount / VF) + ScalarCost * (MaxTripCount % VF);
4690 auto RTCostA = GetCostForTC(EstimatedWidthA, CostA,
A.ScalarCost);
4691 auto RTCostB = GetCostForTC(EstimatedWidthB, CostB,
B.ScalarCost);
4692 return CmpFn(RTCostA, RTCostB);
4698 if (InvalidCosts.
empty())
4705 std::map<Instruction *, unsigned> Numbering;
4707 for (
auto &Pair : InvalidCosts)
4708 if (!Numbering.count(Pair.first))
4709 Numbering[Pair.first] =
I++;
4713 if (Numbering[
A.first] != Numbering[
B.first])
4714 return Numbering[
A.first] < Numbering[
B.first];
4715 const auto &
LHS =
A.second;
4716 const auto &
RHS =
B.second;
4717 return std::make_tuple(
LHS.isScalable(),
LHS.getKnownMinValue()) <
4718 std::make_tuple(
RHS.isScalable(),
RHS.getKnownMinValue());
4730 Subset =
Tail.take_front(1);
4739 if (Subset ==
Tail ||
Tail[Subset.size()].first !=
I) {
4740 std::string OutString;
4742 assert(!Subset.empty() &&
"Unexpected empty range");
4743 OS <<
"Instruction with invalid costs prevented vectorization at VF=(";
4744 for (
const auto &Pair : Subset)
4745 OS << (Pair.second == Subset.front().second ?
"" :
", ") << Pair.second;
4747 if (
auto *CI = dyn_cast<CallInst>(
I))
4748 OS <<
" call to " << CI->getCalledFunction()->getName();
4750 OS <<
" " <<
I->getOpcodeName();
4753 Tail =
Tail.drop_front(Subset.size());
4757 Subset =
Tail.take_front(Subset.size() + 1);
4758 }
while (!
Tail.empty());
4772 for (
VPBasicBlock *VPBB : VPBlockUtils::blocksOnly<VPBasicBlock>(
4781 switch (R.getVPDefID()) {
4782 case VPDef::VPDerivedIVSC:
4783 case VPDef::VPScalarIVStepsSC:
4784 case VPDef::VPScalarCastSC:
4785 case VPDef::VPReplicateSC:
4786 case VPDef::VPInstructionSC:
4787 case VPDef::VPCanonicalIVPHISC:
4788 case VPDef::VPVectorPointerSC:
4789 case VPDef::VPExpandSCEVSC:
4790 case VPDef::VPEVLBasedIVPHISC:
4791 case VPDef::VPPredInstPHISC:
4792 case VPDef::VPBranchOnMaskSC:
4794 case VPDef::VPReductionSC:
4795 case VPDef::VPActiveLaneMaskPHISC:
4796 case VPDef::VPWidenCallSC:
4797 case VPDef::VPWidenCanonicalIVSC:
4798 case VPDef::VPWidenCastSC:
4799 case VPDef::VPWidenGEPSC:
4800 case VPDef::VPWidenSC:
4801 case VPDef::VPWidenSelectSC:
4802 case VPDef::VPBlendSC:
4803 case VPDef::VPFirstOrderRecurrencePHISC:
4804 case VPDef::VPWidenPHISC:
4805 case VPDef::VPWidenIntOrFpInductionSC:
4806 case VPDef::VPWidenPointerInductionSC:
4807 case VPDef::VPReductionPHISC:
4808 case VPDef::VPInterleaveSC:
4809 case VPDef::VPWidenLoadEVLSC:
4810 case VPDef::VPWidenLoadSC:
4811 case VPDef::VPWidenStoreEVLSC:
4812 case VPDef::VPWidenStoreSC:
4818 auto WillWiden = [&
TTI, VF](
Type *ScalarTy) {
4836 if (R.getNumDefinedValues() == 0 &&
4837 !isa<VPWidenStoreRecipe, VPWidenStoreEVLRecipe, VPInterleaveRecipe>(
4846 R.getNumDefinedValues() >= 1 ? R.getVPValue(0) : R.getOperand(1);
4848 if (!Visited.
insert({ScalarTy}).second)
4850 if (WillWiden(ScalarTy))
4860 LLVM_DEBUG(
dbgs() <<
"LV: Scalar loop costs: " << ExpectedCost <<
".\n");
4861 assert(ExpectedCost.
isValid() &&
"Unexpected invalid cost for scalar loop");
4863 [](std::unique_ptr<VPlan> &
P) {
4866 "Expected Scalar VF to be a candidate");
4873 if (ForceVectorization &&
4874 (VPlans.
size() > 1 || !VPlans[0]->hasScalarVFOnly())) {
4882 for (
auto &
P : VPlans) {
4892 unsigned AssumedMinimumVscale =
4895 Candidate.Width.isScalable()
4896 ? Candidate.Width.getKnownMinValue() * AssumedMinimumVscale
4897 : Candidate.Width.getFixedValue();
4899 <<
" costs: " << (Candidate.Cost / Width));
4900 if (VF.isScalable())
4902 << AssumedMinimumVscale <<
")");
4909 <<
"LV: Not considering vector loop of width " << VF
4910 <<
" because it will not generate any vector instructions.\n");
4915 if (isMoreProfitable(Candidate, ScalarCost))
4916 ProfitableVFs.push_back(Candidate);
4918 if (isMoreProfitable(Candidate, ChosenFactor))
4919 ChosenFactor = Candidate;
4927 "There are conditional stores.",
4928 "store that is conditionally executed prevents vectorization",
4929 "ConditionalStore", ORE, OrigLoop);
4930 ChosenFactor = ScalarCost;
4934 !isMoreProfitable(ChosenFactor, ScalarCost))
dbgs()
4935 <<
"LV: Vectorization seems to be not beneficial, "
4936 <<
"but was forced by a user.\n");
4938 return ChosenFactor;
4941bool LoopVectorizationPlanner::isCandidateForEpilogueVectorization(
4946 [&](
PHINode &Phi) { return Legal->isFixedOrderRecurrence(&Phi); }))
4956 if (!OrigLoop->
contains(cast<Instruction>(U)))
4960 if (!OrigLoop->
contains(cast<Instruction>(U)))
4989 unsigned Multiplier = 1;
5001 LLVM_DEBUG(
dbgs() <<
"LEV: Epilogue vectorization is disabled.\n");
5006 LLVM_DEBUG(
dbgs() <<
"LEV: Unable to vectorize epilogue because no "
5007 "epilogue is allowed.\n");
5013 if (!isCandidateForEpilogueVectorization(MainLoopVF)) {
5014 LLVM_DEBUG(
dbgs() <<
"LEV: Unable to vectorize epilogue because the loop "
5015 "is not a supported candidate.\n");
5020 LLVM_DEBUG(
dbgs() <<
"LEV: Epilogue vectorization factor is forced.\n");
5023 return {ForcedEC, 0, 0};
5025 LLVM_DEBUG(
dbgs() <<
"LEV: Epilogue vectorization forced factor is not "
5034 dbgs() <<
"LEV: Epilogue vectorization skipped due to opt for size.\n");
5039 LLVM_DEBUG(
dbgs() <<
"LEV: Epilogue vectorization is not profitable for "
5051 EstimatedRuntimeVF *= *VScale;
5056 const SCEV *RemainingIterations =
nullptr;
5057 for (
auto &NextVF : ProfitableVFs) {
5064 if ((!NextVF.Width.isScalable() && MainLoopVF.
isScalable() &&
5071 if (!MainLoopVF.
isScalable() && !NextVF.Width.isScalable()) {
5073 if (!RemainingIterations) {
5080 SE.
getConstant(TCType, NextVF.Width.getKnownMinValue()),
5081 RemainingIterations))
5085 if (Result.Width.isScalar() || isMoreProfitable(NextVF, Result))
5091 << Result.Width <<
"\n");
5095std::pair<unsigned, unsigned>
5097 unsigned MinWidth = -1U;
5098 unsigned MaxWidth = 8;
5111 MaxWidth = std::min<unsigned>(
5112 MaxWidth, std::min<unsigned>(
5118 MinWidth = std::min<unsigned>(
5119 MinWidth,
DL.getTypeSizeInBits(
T->getScalarType()).getFixedValue());
5120 MaxWidth = std::max<unsigned>(
5121 MaxWidth,
DL.getTypeSizeInBits(
T->getScalarType()).getFixedValue());
5124 return {MinWidth, MaxWidth};
5132 for (
Instruction &
I : BB->instructionsWithoutDebug()) {
5140 if (!isa<LoadInst>(
I) && !isa<StoreInst>(
I) && !isa<PHINode>(
I))
5145 if (
auto *PN = dyn_cast<PHINode>(&
I)) {
5159 if (
auto *ST = dyn_cast<StoreInst>(&
I))
5160 T = ST->getValueOperand()->getType();
5163 "Expected the load/store/recurrence type to be sized");
5192 LLVM_DEBUG(
dbgs() <<
"LV: Preference for VP intrinsics indicated. "
5193 "Unroll factor forced to be 1.\n");
5206 if (LoopCost == 0) {
5208 assert(LoopCost.
isValid() &&
"Expected to have chosen a VF with valid cost");
5218 for (
auto& pair : R.MaxLocalUsers) {
5219 pair.second = std::max(pair.second, 1U);
5233 unsigned IC = UINT_MAX;
5235 for (
auto& pair : R.MaxLocalUsers) {
5247 unsigned MaxLocalUsers = pair.second;
5248 unsigned LoopInvariantRegs = 0;
5249 if (R.LoopInvariantRegs.find(pair.first) != R.LoopInvariantRegs.end())
5250 LoopInvariantRegs = R.LoopInvariantRegs[pair.first];
5252 unsigned TmpIC =
llvm::bit_floor((TargetNumRegisters - LoopInvariantRegs) /
5256 TmpIC =
llvm::bit_floor((TargetNumRegisters - LoopInvariantRegs - 1) /
5257 std::max(1U, (MaxLocalUsers - 1)));
5260 IC = std::min(IC, TmpIC);
5278 EstimatedVF *= *VScale;
5280 assert(EstimatedVF >= 1 &&
"Estimated VF shouldn't be less than 1");
5286 unsigned AvailableTC =
5298 std::max(1u, std::min(AvailableTC / EstimatedVF, MaxInterleaveCount)));
5299 unsigned InterleaveCountLB =
bit_floor(std::max(
5300 1u, std::min(AvailableTC / (EstimatedVF * 2), MaxInterleaveCount)));
5301 MaxInterleaveCount = InterleaveCountLB;
5303 if (InterleaveCountUB != InterleaveCountLB) {
5304 unsigned TailTripCountUB =
5305 (AvailableTC % (EstimatedVF * InterleaveCountUB));
5306 unsigned TailTripCountLB =
5307 (AvailableTC % (EstimatedVF * InterleaveCountLB));
5310 if (TailTripCountUB == TailTripCountLB)
5311 MaxInterleaveCount = InterleaveCountUB;
5313 }
else if (BestKnownTC && *BestKnownTC > 0) {
5317 ? (*BestKnownTC) - 1
5325 MaxInterleaveCount =
bit_floor(std::max(
5326 1u, std::min(AvailableTC / (EstimatedVF * 2), MaxInterleaveCount)));
5329 assert(MaxInterleaveCount > 0 &&
5330 "Maximum interleave count must be greater than 0");
5334 if (IC > MaxInterleaveCount)
5335 IC = MaxInterleaveCount;
5338 IC = std::max(1u, IC);
5340 assert(IC > 0 &&
"Interleave count must be greater than 0.");
5344 if (VF.
isVector() && HasReductions) {
5345 LLVM_DEBUG(
dbgs() <<
"LV: Interleaving because of reductions.\n");
5353 bool ScalarInterleavingRequiresPredication =
5355 return Legal->blockNeedsPredication(BB);
5357 bool ScalarInterleavingRequiresRuntimePointerCheck =
5363 <<
"LV: IC is " << IC <<
'\n'
5364 <<
"LV: VF is " << VF <<
'\n');
5365 const bool AggressivelyInterleaveReductions =
5367 if (!ScalarInterleavingRequiresRuntimePointerCheck &&
5368 !ScalarInterleavingRequiresPredication && LoopCost <
SmallLoopCost) {
5372 unsigned SmallIC = std::min(IC, (
unsigned)llvm::bit_floor<uint64_t>(
5379 unsigned StoresIC = IC / (NumStores ? NumStores : 1);
5380 unsigned LoadsIC = IC / (NumLoads ? NumLoads : 1);
5386 bool HasSelectCmpReductions =
5389 const RecurrenceDescriptor &RdxDesc = Reduction.second;
5390 return RecurrenceDescriptor::isAnyOfRecurrenceKind(
5391 RdxDesc.getRecurrenceKind());
5393 if (HasSelectCmpReductions) {
5394 LLVM_DEBUG(
dbgs() <<
"LV: Not interleaving select-cmp reductions.\n");
5404 bool HasOrderedReductions =
5406 const RecurrenceDescriptor &RdxDesc = Reduction.second;
5407 return RdxDesc.isOrdered();
5409 if (HasOrderedReductions) {
5411 dbgs() <<
"LV: Not interleaving scalar ordered reductions.\n");
5416 SmallIC = std::min(SmallIC,
F);
5417 StoresIC = std::min(StoresIC,
F);
5418 LoadsIC = std::min(LoadsIC,
F);
5422 std::max(StoresIC, LoadsIC) > SmallIC) {
5424 dbgs() <<
"LV: Interleaving to saturate store or load ports.\n");
5425 return std::max(StoresIC, LoadsIC);
5430 if (VF.
isScalar() && AggressivelyInterleaveReductions) {
5434 return std::max(IC / 2, SmallIC);
5436 LLVM_DEBUG(
dbgs() <<
"LV: Interleaving to reduce branch cost.\n");
5443 if (AggressivelyInterleaveReductions) {
5493 for (
Instruction &
I : BB->instructionsWithoutDebug()) {
5497 for (
Value *U :
I.operands()) {
5498 auto *Instr = dyn_cast<Instruction>(U);
5509 LoopInvariants.
insert(Instr);
5514 EndPoint[Instr] = IdxToInstr.
size();
5532 LLVM_DEBUG(
dbgs() <<
"LV(REG): Calculating max register usage:\n");
5534 const auto &TTICapture =
TTI;
5541 for (
unsigned int i = 0, s = IdxToInstr.
size(); i < s; ++i) {
5545 InstrList &
List = TransposeEnds[i];
5560 for (
unsigned j = 0, e = VFs.
size(); j < e; ++j) {
5568 if (VFs[j].isScalar()) {
5569 for (
auto *Inst : OpenIntervals) {
5578 for (
auto *Inst : OpenIntervals) {
5591 RegUsage[ClassID] += GetRegUsage(Inst->getType(), VFs[j]);
5597 auto &Entry = MaxUsages[j][pair.first];
5598 Entry = std::max(Entry, pair.second);
5603 << OpenIntervals.
size() <<
'\n');
5609 for (
unsigned i = 0, e = VFs.
size(); i < e; ++i) {
5615 for (
auto *Inst : LoopInvariants) {
5618 bool IsScalar =
all_of(Inst->users(), [&](
User *U) {
5619 auto *I = cast<Instruction>(U);
5620 return TheLoop != LI->getLoopFor(I->getParent()) ||
5621 isScalarAfterVectorization(I, VFs[i]);
5627 Invariant[ClassID] += GetRegUsage(Inst->getType(), VF);
5631 dbgs() <<
"LV(REG): VF = " << VFs[i] <<
'\n';
5632 dbgs() <<
"LV(REG): Found max usage: " << MaxUsages[i].
size()
5634 for (
const auto &pair : MaxUsages[i]) {
5635 dbgs() <<
"LV(REG): RegisterClass: "
5639 dbgs() <<
"LV(REG): Found invariant usage: " << Invariant.
size()
5641 for (
const auto &pair : Invariant) {
5642 dbgs() <<
"LV(REG): RegisterClass: "
5656bool LoopVectorizationCostModel::useEmulatedMaskMemRefHack(
Instruction *
I,
5667 "Expecting a scalar emulated instruction");
5668 return isa<LoadInst>(
I) ||
5669 (isa<StoreInst>(
I) &&
5686 PredicatedBBsAfterVectorization[VF].
clear();
5703 !useEmulatedMaskMemRefHack(&
I, VF) &&
5704 computePredInstDiscount(&
I, ScalarCosts, VF) >= 0)
5707 PredicatedBBsAfterVectorization[VF].
insert(BB);
5709 if (Pred->getSingleSuccessor() == BB)
5710 PredicatedBBsAfterVectorization[VF].
insert(Pred);
5719 "Instruction marked uniform-after-vectorization will be predicated");
5737 if (!
I->hasOneUse() || PredInst->
getParent() !=
I->getParent() ||
5756 for (
Use &U :
I->operands())
5757 if (
auto *J = dyn_cast<Instruction>(U.get()))
5769 while (!Worklist.
empty()) {
5773 if (ScalarCosts.contains(
I))
5803 for (
Use &U :
I->operands())
5804 if (
auto *J = dyn_cast<Instruction>(
U.get())) {
5806 "Instruction has non-scalar type");
5807 if (canBeScalarized(J))
5809 else if (needsExtract(J, VF)) {
5811 cast<VectorType>(
ToVectorTy(J->getType(), VF)),
5822 Discount += VectorCost - ScalarCost;
5823 ScalarCosts[
I] = ScalarCost;
5838 for (
Instruction &
I : BB->instructionsWithoutDebug()) {
5855 LLVM_DEBUG(
dbgs() <<
"LV: Found an estimated cost of " <<
C <<
" for VF "
5856 << VF <<
" For instruction: " <<
I <<
'\n');
5884 const Loop *TheLoop) {
5886 auto *Gep = dyn_cast<GetElementPtrInst>(
Ptr);
5892 auto SE = PSE.
getSE();
5893 unsigned NumOperands = Gep->getNumOperands();
5894 for (
unsigned i = 1; i < NumOperands; ++i) {
5895 Value *Opd = Gep->getOperand(i);
5897 !
Legal->isInductionVariable(Opd))
5906LoopVectorizationCostModel::getMemInstScalarizationCost(
Instruction *
I,
5909 "Scalarization cost of instruction implies vectorization.");
5956 if (useEmulatedMaskMemRefHack(
I, VF))
5966LoopVectorizationCostModel::getConsecutiveMemOpCost(
Instruction *
I,
5969 auto *VectorTy = cast<VectorType>(
ToVectorTy(ValTy, VF));
5975 assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) &&
5976 "Stride should be 1 or -1 for consecutive memory access");
5988 bool Reverse = ConsecutiveStride < 0;
5996LoopVectorizationCostModel::getUniformMemOpCost(
Instruction *
I,
6001 auto *VectorTy = cast<VectorType>(
ToVectorTy(ValTy, VF));
6005 if (isa<LoadInst>(
I)) {
6017 (isLoopInvariantStoreValue
6024LoopVectorizationCostModel::getGatherScatterCost(
Instruction *
I,
6027 auto *VectorTy = cast<VectorType>(
ToVectorTy(ValTy, VF));
6038LoopVectorizationCostModel::getInterleaveGroupCost(
Instruction *
I,
6041 auto *VectorTy = cast<VectorType>(
ToVectorTy(ValTy, VF));
6046 assert(Group &&
"Fail to get an interleaved access group.");
6048 unsigned InterleaveFactor = Group->getFactor();
6053 for (
unsigned IF = 0;
IF < InterleaveFactor;
IF++)
6054 if (Group->getMember(IF))
6058 bool UseMaskForGaps =
6060 (isa<StoreInst>(
I) && (Group->getNumMembers() < Group->getFactor()));
6062 I->getOpcode(), WideVecTy, Group->getFactor(), Indices, Group->getAlign(),
6065 if (Group->isReverse()) {
6068 "Reverse masked interleaved access not supported.");
6069 Cost += Group->getNumMembers() *
6076std::optional<InstructionCost>
6082 if (InLoopReductions.
empty() || VF.
isScalar() || !isa<VectorType>(Ty))
6083 return std::nullopt;
6084 auto *VectorTy = cast<VectorType>(Ty);
6101 return std::nullopt;
6112 if (!InLoopReductionImmediateChains.
count(RetI))
6113 return std::nullopt;
6117 Instruction *LastChain = InLoopReductionImmediateChains.
at(RetI);
6119 while (!isa<PHINode>(ReductionPhi))
6120 ReductionPhi = InLoopReductionImmediateChains.
at(ReductionPhi);
6152 : dyn_cast<Instruction>(RetI->
getOperand(1));
6157 if (RedOp && RdxDesc.
getOpcode() == Instruction::Add &&
6170 bool IsUnsigned = isa<ZExtInst>(Op0);
6187 RedCost < ExtCost * 2 + MulCost + Ext2Cost + BaseCost)
6188 return I == RetI ? RedCost : 0;
6192 bool IsUnsigned = isa<ZExtInst>(RedOp);
6201 if (RedCost.
isValid() && RedCost < BaseCost + ExtCost)
6202 return I == RetI ? RedCost : 0;
6203 }
else if (RedOp && RdxDesc.
getOpcode() == Instruction::Add &&
6208 bool IsUnsigned = isa<ZExtInst>(Op0);
6231 if (Op0Ty != LargestOpTy || Op1Ty != LargestOpTy) {
6232 Instruction *ExtraExtOp = (Op0Ty != LargestOpTy) ? Op0 : Op1;
6240 (RedCost + ExtraExtCost) < (ExtCost0 + ExtCost1 + MulCost + BaseCost))
6241 return I == RetI ? RedCost : 0;
6250 if (RedCost.
isValid() && RedCost < MulCost + BaseCost)
6251 return I == RetI ? RedCost : 0;
6255 return I == RetI ? std::optional<InstructionCost>(BaseCost) : std::nullopt;
6259LoopVectorizationCostModel::getMemoryInstructionCost(
Instruction *
I,
6289 if (!
RetTy->isVoidTy() &&
6311 for (
auto *V : filterExtractingOperands(Ops, VF))
6314 filterExtractingOperands(Ops, VF), Tys,
CostKind);
6336 auto isLegalToScalarize = [&]() {
6350 if (isa<LoadInst>(
I))
6355 auto &SI = cast<StoreInst>(
I);
6373 if (GatherScatterCost < ScalarizationCost)
6385 assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) &&
6386 "Expected consecutive stride.");
6395 unsigned NumAccesses = 1;
6398 assert(Group &&
"Fail to get an interleaved access group.");
6404 NumAccesses = Group->getNumMembers();
6406 InterleaveCost = getInterleaveGroupCost(&
I, VF);
6411 ? getGatherScatterCost(&
I, VF) * NumAccesses
6415 getMemInstScalarizationCost(&
I, VF) * NumAccesses;
6421 if (InterleaveCost <= GatherScatterCost &&
6422 InterleaveCost < ScalarizationCost) {
6424 Cost = InterleaveCost;
6425 }
else if (GatherScatterCost < ScalarizationCost) {
6427 Cost = GatherScatterCost;
6430 Cost = ScalarizationCost;
6464 while (!Worklist.
empty()) {
6466 for (
auto &
Op :
I->operands())
6467 if (
auto *InstOp = dyn_cast<Instruction>(
Op))
6468 if ((InstOp->getParent() ==
I->getParent()) && !isa<PHINode>(InstOp) &&
6469 AddrDefs.
insert(InstOp).second)
6473 for (
auto *
I : AddrDefs) {
6474 if (isa<LoadInst>(
I)) {
6488 for (
unsigned I = 0;
I < Group->getFactor(); ++
I) {
6505 "Trying to set a vectorization decision for a scalar VF");
6524 for (
auto &ArgOp : CI->
args())
6529 for (
Type *ScalarTy : ScalarTys)
6538 std::nullopt, *RedCost);
6552 getScalarizationOverhead(CI, VF,
CostKind);
6558 bool UsesMask =
false;
6564 if (
Info.Shape.VF != VF)
6568 if (MaskRequired && !
Info.isMasked())
6572 bool ParamsOk =
true;
6574 switch (Param.ParamKind) {
6593 dyn_cast<SCEVAddRecExpr>(SE->
getSCEV(ScalarParam));
6595 if (!SAR || SAR->getLoop() !=
TheLoop) {
6601 dyn_cast<SCEVConstant>(SAR->getStepRecurrence(*SE));
6629 if (VecFunc && UsesMask && !MaskRequired)
6649 if (VectorCost <=
Cost) {
6654 if (IntrinsicCost <=
Cost) {
6655 Cost = IntrinsicCost;
6674 return InstsToScalarize[VF][
I];
6677 auto ForcedScalar = ForcedScalars.
find(VF);
6678 if (VF.
isVector() && ForcedScalar != ForcedScalars.
end()) {
6679 auto InstSet = ForcedScalar->second;
6680 if (InstSet.count(
I))
6691 auto hasSingleCopyAfterVectorization = [
this](
Instruction *
I,
6696 auto Scalarized = InstsToScalarize.
find(VF);
6697 assert(Scalarized != InstsToScalarize.
end() &&
6698 "VF not yet analyzed for scalarization profitability");
6699 return !Scalarized->second.count(
I) &&
6701 auto *UI = cast<Instruction>(U);
6702 return !Scalarized->second.count(UI);
6705 (void) hasSingleCopyAfterVectorization;
6714 assert(
I->getOpcode() == Instruction::GetElementPtr ||
6715 I->getOpcode() == Instruction::PHI ||
6716 (
I->getOpcode() == Instruction::BitCast &&
6717 I->getType()->isPointerTy()) ||
6718 hasSingleCopyAfterVectorization(
I, VF));
6728 switch (
I->getOpcode()) {
6729 case Instruction::GetElementPtr:
6735 case Instruction::Br: {
6742 bool ScalarPredicatedBB =
false;
6748 ScalarPredicatedBB =
true;
6750 if (ScalarPredicatedBB) {
6772 case Instruction::PHI: {
6773 auto *Phi = cast<PHINode>(
I);
6785 cast<VectorType>(VectorTy), Mask,
CostKind,
6793 return (Phi->getNumIncomingValues() - 1) *
6795 Instruction::Select,
ToVectorTy(Phi->getType(), VF),
6801 case Instruction::UDiv:
6802 case Instruction::SDiv:
6803 case Instruction::URem:
6804 case Instruction::SRem:
6808 ScalarCost : SafeDivisorCost;
6812 case Instruction::Add:
6813 case Instruction::FAdd:
6814 case Instruction::Sub:
6815 case Instruction::FSub:
6816 case Instruction::Mul:
6817 case Instruction::FMul:
6818 case Instruction::FDiv:
6819 case Instruction::FRem:
6820 case Instruction::Shl:
6821 case Instruction::LShr:
6822 case Instruction::AShr:
6823 case Instruction::And:
6824 case Instruction::Or:
6825 case Instruction::Xor: {
6829 if (
I->getOpcode() == Instruction::Mul &&
6840 Value *Op2 =
I->getOperand(1);
6849 {TargetTransformInfo::OK_AnyValue, TargetTransformInfo::OP_None},
6852 case Instruction::FNeg: {
6855 {TargetTransformInfo::OK_AnyValue, TargetTransformInfo::OP_None},
6856 {TargetTransformInfo::OK_AnyValue, TargetTransformInfo::OP_None},
6857 I->getOperand(0),
I);
6859 case Instruction::Select: {
6861 const SCEV *CondSCEV = SE->
getSCEV(SI->getCondition());
6864 const Value *Op0, *Op1;
6881 Type *CondTy = SI->getCondition()->getType();
6886 if (
auto *Cmp = dyn_cast<CmpInst>(SI->getCondition()))
6887 Pred = Cmp->getPredicate();
6891 case Instruction::ICmp:
6892 case Instruction::FCmp: {
6893 Type *ValTy =
I->getOperand(0)->getType();
6894 Instruction *Op0AsInstruction = dyn_cast<Instruction>(
I->getOperand(0));
6899 cast<CmpInst>(
I)->getPredicate(),
CostKind,
6902 case Instruction::Store:
6903 case Instruction::Load: {
6908 "CM decision should be taken at this point");
6915 return getMemoryInstructionCost(
I, VF);
6917 case Instruction::BitCast:
6918 if (
I->getType()->isPointerTy())
6921 case Instruction::ZExt:
6922 case Instruction::SExt:
6923 case Instruction::FPToUI:
6924 case Instruction::FPToSI:
6925 case Instruction::FPExt:
6926 case Instruction::PtrToInt:
6927 case Instruction::IntToPtr:
6928 case Instruction::SIToFP:
6929 case Instruction::UIToFP:
6930 case Instruction::Trunc:
6931 case Instruction::FPTrunc: {
6934 assert((isa<LoadInst>(
I) || isa<StoreInst>(
I)) &&
6935 "Expected a load or a store!");
6961 unsigned Opcode =
I->getOpcode();
6964 if (Opcode == Instruction::Trunc || Opcode == Instruction::FPTrunc) {
6966 if (
StoreInst *Store = dyn_cast<StoreInst>(*
I->user_begin()))
6967 CCH = ComputeCCH(Store);
6970 else if (Opcode == Instruction::ZExt || Opcode == Instruction::SExt ||
6971 Opcode == Instruction::FPExt) {
6972 if (
LoadInst *Load = dyn_cast<LoadInst>(
I->getOperand(0)))
6973 CCH = ComputeCCH(Load);
6980 auto *Trunc = cast<TruncInst>(
I);
6982 Trunc->getSrcTy(), CCH,
CostKind, Trunc);
6989 Type *SrcScalarTy =
I->getOperand(0)->getType();
6990 Instruction *Op0AsInstruction = dyn_cast<Instruction>(
I->getOperand(0));
7001 (
I->getOpcode() == Instruction::ZExt ||
7002 I->getOpcode() == Instruction::SExt))
7008 case Instruction::Call:
7010 case Instruction::ExtractValue:
7012 case Instruction::Alloca:
7034 if ((SI = dyn_cast<StoreInst>(&
I)) &&
7043 if (Group->getInsertPos() == &
I)
7046 DeadInterleavePointerOps.
push_back(PointerOp);
7052 for (
unsigned I = 0;
I != DeadInterleavePointerOps.
size(); ++
I) {
7053 auto *
Op = dyn_cast<Instruction>(DeadInterleavePointerOps[
I]);
7055 Instruction *UI = cast<Instruction>(U);
7056 return !VecValuesToIgnore.contains(U) &&
7057 (!isAccessInterleaved(UI) ||
7058 getInterleavedAccessGroup(UI)->getInsertPos() == UI);
7062 DeadInterleavePointerOps.
append(
Op->op_begin(),
Op->op_end());
7103 bool InLoop = !ReductionOperations.
empty();
7106 InLoopReductions.
insert(Phi);
7109 for (
auto *
I : ReductionOperations) {
7110 InLoopReductionImmediateChains[
I] = LastChain;
7114 LLVM_DEBUG(
dbgs() <<
"LV: Using " << (InLoop ?
"inloop" :
"out of loop")
7115 <<
" reduction for phi: " << *Phi <<
"\n");
7123 return tryInsertInstruction(
7136 unsigned WidestType;
7145 unsigned N =
RegSize.getKnownMinValue() / WidestType;
7166 <<
"overriding computed VF.\n");
7171 LLVM_DEBUG(
dbgs() <<
"LV: Not vectorizing. Scalable VF requested, but "
7172 <<
"not supported by the target.\n");
7174 "Scalable vectorization requested but not supported by the target",
7175 "the scalable user-specified vectorization width for outer-loop "
7176 "vectorization cannot be used because the target does not support "
7177 "scalable vectors.",
7178 "ScalableVFUnfeasible", ORE, OrigLoop);
7183 "VF needs to be a power of two");
7185 <<
"VF " << VF <<
" to build VPlans.\n");
7192 return {VF, 0 , 0 };
7196 dbgs() <<
"LV: Not vectorizing. Inner loops aren't supported in the "
7197 "VPlan-native path.\n");
7201std::optional<VectorizationFactor>
7209 return std::nullopt;
7216 <<
"LV: Invalidate all interleaved groups due to fold-tail by masking "
7217 "which requires masked-interleaved support.\n");
7231 if (!UserVF.
isZero() && UserVFIsLegal) {
7233 "VF needs to be a power of two");
7239 buildVPlansWithVPRecipes(UserVF, UserVF);
7241 LLVM_DEBUG(
dbgs() <<
"LV: No VPlan could be built for " << UserVF
7243 return std::nullopt;
7247 return {{UserVF, 0, 0}};
7250 "InvalidCost", ORE, OrigLoop);
7263 for (
const auto &VF : VFCandidates) {
7278 return std::nullopt;
7280 [](std::unique_ptr<VPlan> &
P) {
return P->hasScalarVFOnly(); }))
7292 return std::nullopt;
7330 for (
User *U :
IV->users()) {
7331 auto *CI = cast<Instruction>(U);
7332 if (!CostCtx.CM.isOptimizableIVTruncate(CI, VF))
7337 if (!CostCtx.SkipCostComputation.insert(IVInst).second)
7341 dbgs() <<
"Cost of " << InductionCost <<
" for VF " << VF
7342 <<
": induction instruction " << *IVInst <<
"\n";
7344 Cost += InductionCost;
7357 auto *
Term = dyn_cast<BranchInst>(EB->getTerminator());
7360 if (
auto *CondI = dyn_cast<Instruction>(
Term->getOperand(0))) {
7361 ExitInstrs.
insert(CondI);
7365 for (
unsigned I = 0;
I != ExitInstrs.
size(); ++
I) {
7368 !CostCtx.SkipCostComputation.insert(CondI).second)
7370 Cost += CostCtx.getLegacyCost(CondI, VF);
7372 auto *OpI = dyn_cast<Instruction>(
Op);
7373 if (!OpI ||
any_of(OpI->users(), [&ExitInstrs,
this](
User *U) {
7374 return OrigLoop->contains(cast<Instruction>(U)->getParent()) &&
7375 !ExitInstrs.contains(cast<Instruction>(U));
7391 RdxDesc.getRecurrenceKind()))
7397 RdxDesc.getRecurrenceKind())) {
7399 RedPhi->users(), [](
User *U) { return isa<SelectInst>(U); }));
7400 assert(!CostCtx.SkipCostComputation.contains(
Select) &&
7401 "reduction op visited multiple times");
7402 CostCtx.SkipCostComputation.insert(
Select);
7403 auto ReductionCost = CostCtx.getLegacyCost(
Select, VF);
7404 LLVM_DEBUG(
dbgs() <<
"Cost of " << ReductionCost <<
" for VF " << VF
7405 <<
":\n any-of reduction " << *
Select <<
"\n");
7406 Cost += ReductionCost;
7410 const auto &ChainOps = RdxDesc.getReductionOpChain(RedPhi, OrigLoop);
7415 for (
auto *ChainOp : ChainOps) {
7416 for (
Value *
Op : ChainOp->operands()) {
7417 if (
auto *
I = dyn_cast<Instruction>(
Op))
7418 ChainOpsAndOperands.insert(
I);
7429 assert(!CostCtx.SkipCostComputation.contains(
I) &&
7430 "reduction op visited multiple times");
7431 CostCtx.SkipCostComputation.insert(
I);
7432 LLVM_DEBUG(
dbgs() <<
"Cost of " << ReductionCost <<
" for VF " << VF
7433 <<
":\n in-loop reduction " << *
I <<
"\n");
7434 Cost += *ReductionCost;
7446 CostCtx.SkipCostComputation.insert(BB->getTerminator());
7447 auto BranchCost = CostCtx.getLegacyCost(BB->getTerminator(), VF);
7458 VPlan &FirstPlan = *VPlans[0];
7462 VPlan *BestPlan = &FirstPlan;
7465 "More than a single plan/VF w/o any plan having scalar VF");
7472 if (ForceVectorization) {
7479 for (
auto &
P : VPlans) {
7486 <<
"LV: Not considering vector loop of width " << VF
7487 <<
" because it will not generate any vector instructions.\n");
7493 if (isMoreProfitable(CurrentFactor, BestFactor)) {
7494 BestFactor = CurrentFactor;
7505 [VF](
const VPlanPtr &Plan) {
return Plan->hasVF(VF); }) ==
7507 "Best VF has not a single VPlan.");
7509 for (
const VPlanPtr &Plan : VPlans) {
7510 if (Plan->hasVF(VF))
7520 bool IsUnrollMetadata =
false;
7521 MDNode *LoopID = L->getLoopID();
7524 for (
unsigned i = 1, ie = LoopID->
getNumOperands(); i < ie; ++i) {
7525 auto *MD = dyn_cast<MDNode>(LoopID->
getOperand(i));
7527 const auto *S = dyn_cast<MDString>(MD->getOperand(0));
7529 S && S->getString().starts_with(
"llvm.loop.unroll.disable");
7535 if (!IsUnrollMetadata) {
7537 LLVMContext &Context = L->getHeader()->getContext();
7540 MDString::get(Context,
"llvm.loop.unroll.runtime.disable"));
7546 L->setLoopID(NewLoopID);
7556 bool VectorizingEpilogue) {
7561 auto *PhiR = cast<VPReductionPHIRecipe>(RedResult->
getOperand(0));
7567 dyn_cast<PHINode>(PhiR->getStartValue()->getUnderlyingValue());
7570 auto *Cmp = cast<ICmpInst>(PhiR->getStartValue()->getUnderlyingValue());
7573 ResumePhi = cast<PHINode>(Cmp->getOperand(0));
7575 assert((!VectorizingEpilogue || ResumePhi) &&
7576 "when vectorizing the epilogue loop, we need a resume phi from main "
7593 BCBlockPhi->addIncoming(FinalValue,
Incoming);
7595 BCBlockPhi->addIncoming(ResumePhi->getIncomingValueForBlock(
Incoming),
7601 auto *OrigPhi = cast<PHINode>(PhiR->getUnderlyingValue());
7605 int IncomingEdgeBlockIdx =
7607 assert(IncomingEdgeBlockIdx >= 0 &&
"Invalid block index");
7609 int SelfEdgeBlockIdx = (IncomingEdgeBlockIdx ? 0 : 1);
7610 OrigPhi->setIncomingValue(SelfEdgeBlockIdx, BCBlockPhi);
7612 OrigPhi->setIncomingValue(IncomingEdgeBlockIdx, LoopExitInst);
7614 ReductionResumeValues[&RdxDesc] = BCBlockPhi;
7617std::pair<DenseMap<const SCEV *, Value *>,
7624 "Trying to execute plan with unsupported VF");
7626 "Trying to execute plan with unsupported UF");
7628 (IsEpilogueVectorization || !ExpandedSCEVs) &&
7629 "expanded SCEVs to reuse can only be used during epilogue vectorization");
7630 (void)IsEpilogueVectorization;
7635 <<
", UF=" << BestUF <<
'\n');
7636 BestVPlan.
setName(
"Final VPlan");
7653 assert(IsEpilogueVectorization &&
"should only re-use the existing trip "
7654 "count during epilogue vectorization");
7658 Value *CanonicalIVStartValue;
7659 std::tie(State.
CFG.
PrevBB, CanonicalIVStartValue) =
7662#ifdef EXPENSIVE_CHECKS
7663 assert(DT->
verify(DominatorTree::VerificationLevel::Fast));
7669 std::unique_ptr<LoopVersioning> LVer =
nullptr;
7677 LVer = std::make_unique<LoopVersioning>(
7680 State.
LVer = &*LVer;
7697 CanonicalIVStartValue, State);
7707 dyn_cast<VPInstruction>(&R), ReductionResumeValues, State, OrigLoop,
7716 std::optional<MDNode *> VectorizedLoopID =
7723 if (VectorizedLoopID)
7724 L->setLoopID(*VectorizedLoopID);
7747 cast<BranchInst>(State.
CFG.
VPBB2IRBB[ExitVPBB]->getTerminator());
7748 if (MiddleTerm->isConditional() &&
7752 assert(TripCount > 0 &&
"trip count should not be zero");
7753 const uint32_t Weights[] = {1, TripCount - 1};
7760#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
7762 for (
const auto &Plan : VPlans)
7776std::pair<BasicBlock *, Value *>
7778 const SCEV2ValueTy &ExpandedSCEVs) {
7818 dbgs() <<
"Create Skeleton for epilogue vectorized loop (first pass)\n"
7828 dbgs() <<
"intermediate fn:\n"
7836 assert(Bypass &&
"Expected valid bypass basic block.");
7857 TCCheckBlock->
setName(
"vector.main.loop.iter.check");
7861 DT,
LI,
nullptr,
"vector.ph");
7866 "TC check is expected to dominate Bypass");
7884 return TCCheckBlock;
7893std::pair<BasicBlock *, Value *>
7895 const SCEV2ValueTy &ExpandedSCEVs) {
7903 nullptr,
"vec.epilog.iter.check",
true);
7905 VecEpilogueIterationCountCheck);
7910 "expected this to be saved from the previous pass.");
7928 VecEpilogueIterationCountCheck,
7952 for (
PHINode &Phi : VecEpilogueIterationCountCheck->
phis())
7955 for (
PHINode *Phi : PhisInBlock) {
7957 Phi->replaceIncomingBlockWith(
7959 VecEpilogueIterationCountCheck);
7966 return EPI.EpilogueIterationCountCheck == IncB;
7978 Type *IdxTy =
Legal->getWidestInductionType();
7982 EPResumeVal->
addIncoming(ConstantInt::get(IdxTy, 0),
7993 {VecEpilogueIterationCountCheck,
8004 "Expected trip count to have been safed in the first pass.");
8008 "saved trip count does not dominate insertion point.");
8019 Value *CheckMinIters =
8023 "min.epilog.iters.check");
8029 unsigned EpilogueLoopStep =
8035 unsigned EstimatedSkipCount = std::min(MainLoopStep, EpilogueLoopStep);
8036 const uint32_t Weights[] = {EstimatedSkipCount,
8037 MainLoopStep - EstimatedSkipCount};
8047 dbgs() <<
"Create Skeleton for epilogue vectorized loop (second pass)\n"
8061 assert(!
Range.isEmpty() &&
"Trying to test an empty VF range.");
8062 bool PredicateAtRangeStart = Predicate(
Range.Start);
8065 if (Predicate(TmpVF) != PredicateAtRangeStart) {
8070 return PredicateAtRangeStart;
8080 auto MaxVFTimes2 = MaxVF * 2;
8082 VFRange SubRange = {VF, MaxVFTimes2};
8083 VPlans.push_back(buildVPlan(SubRange));
8091 if (
auto *
I = dyn_cast<Instruction>(
Op)) {
8092 if (
auto *R = Ingredient2Recipe.lookup(
I))
8093 return R->getVPSingleValue();
8104 std::pair<BasicBlock *, BasicBlock *> Edge(Src, Dst);
8106 if (ECEntryIt != EdgeMaskCache.
end())
8107 return ECEntryIt->second;
8112 BranchInst *BI = dyn_cast<BranchInst>(Src->getTerminator());
8113 assert(BI &&
"Unexpected terminator found");
8116 return EdgeMaskCache[Edge] = SrcMask;
8122 return EdgeMaskCache[Edge] = SrcMask;
8125 assert(EdgeMask &&
"No Edge Mask found for condition");
8137 return EdgeMaskCache[Edge] = EdgeMask;
8144 std::pair<BasicBlock *, BasicBlock *> Edge(Src, Dst);
8146 assert(ECEntryIt != EdgeMaskCache.
end() &&
8147 "looking up mask for edge which has not been created");
8148 return ECEntryIt->second;
8156 BlockMaskCache[Header] =
nullptr;
8168 HeaderVPBB->
insert(
IV, NewInsertionPoint);
8175 BlockMaskCache[Header] = BlockMask;
8181 assert(BCEntryIt != BlockMaskCache.
end() &&
8182 "Trying to access mask for block without one.");
8183 return BCEntryIt->second;
8187 assert(OrigLoop->
contains(BB) &&
"Block is not a part of a loop");
8188 assert(BlockMaskCache.
count(BB) == 0 &&
"Mask for block already computed");
8190 "Loop header must have cached block mask");
8199 BlockMaskCache[BB] = EdgeMask;
8204 BlockMask = EdgeMask;
8208 BlockMask = Builder.
createOr(BlockMask, EdgeMask, {});
8211 BlockMaskCache[BB] = BlockMask;
8217 assert((isa<LoadInst>(
I) || isa<StoreInst>(
I)) &&
8218 "Must be called with either a load or store");
8224 "CM decision should be taken at this point.");
8250 auto *
GEP = dyn_cast<GetElementPtrInst>(
8251 Ptr->getUnderlyingValue()->stripPointerCasts());
8258 if (
LoadInst *Load = dyn_cast<LoadInst>(
I))
8276 "step must be loop invariant");
8280 if (
auto *TruncI = dyn_cast<TruncInst>(PhiOrTrunc)) {
8283 assert(isa<PHINode>(PhiOrTrunc) &&
"must be a phi node here");
8294 *PSE.
getSE(), *OrigLoop);
8320 auto isOptimizableIVTruncate =
8328 isOptimizableIVTruncate(
I),
Range)) {
8330 auto *
Phi = cast<PHINode>(
I->getOperand(0));
8341 unsigned NumIncoming =
Phi->getNumIncomingValues();
8352 for (
unsigned In = 0;
In < NumIncoming;
In++) {
8357 assert(In == 0 &&
"Both null and non-null edge masks found");
8359 "Distinct incoming values with one having a full mask");
8382 if (
ID && (
ID == Intrinsic::assume ||
ID == Intrinsic::lifetime_end ||
8383 ID == Intrinsic::lifetime_start ||
ID == Intrinsic::sideeffect ||
8384 ID == Intrinsic::pseudoprobe ||
8385 ID == Intrinsic::experimental_noalias_scope_decl))
8392 bool ShouldUseVectorIntrinsic =
8399 if (ShouldUseVectorIntrinsic)
8404 std::optional<unsigned> MaskPos;
8426 Variant = Decision.Variant;
8427 MaskPos = Decision.MaskPos;
8434 if (ShouldUseVectorCall) {
8435 if (MaskPos.has_value()) {
8450 Ops.insert(Ops.
begin() + *MaskPos, Mask);
8462 assert(!isa<BranchInst>(
I) && !isa<PHINode>(
I) && !isa<LoadInst>(
I) &&
8463 !isa<StoreInst>(
I) &&
"Instruction should have been handled earlier");
8478 switch (
I->getOpcode()) {
8481 case Instruction::SDiv:
8482 case Instruction::UDiv:
8483 case Instruction::SRem:
8484 case Instruction::URem: {
8492 auto *SafeRHS = Builder.
createSelect(Mask, Ops[1], One,
I->getDebugLoc());
8498 case Instruction::Add:
8499 case Instruction::And:
8500 case Instruction::AShr:
8501 case Instruction::FAdd:
8502 case Instruction::FCmp:
8503 case Instruction::FDiv:
8504 case Instruction::FMul:
8505 case Instruction::FNeg:
8506 case Instruction::FRem:
8507 case Instruction::FSub:
8508 case Instruction::ICmp:
8509 case Instruction::LShr:
8510 case Instruction::Mul:
8511 case Instruction::Or:
8512 case Instruction::Select:
8513 case Instruction::Shl:
8514 case Instruction::Sub:
8515 case Instruction::Xor:
8516 case Instruction::Freeze:
8524 auto *PN = cast<PHINode>(R->getUnderlyingValue());
8526 getRecipe(cast<Instruction>(PN->getIncomingValueForBlock(OrigLatch)));
8543 if (!IsUniform &&
Range.Start.isScalable() && isa<IntrinsicInst>(
I)) {
8545 case Intrinsic::assume:
8546 case Intrinsic::lifetime_start:
8547 case Intrinsic::lifetime_end:
8569 VPValue *BlockInMask =
nullptr;
8570 if (!IsPredicated) {
8574 LLVM_DEBUG(
dbgs() <<
"LV: Scalarizing and predicating:" << *
I <<
"\n");
8585 assert((
Range.Start.isScalar() || !IsUniform || !IsPredicated ||
8586 (
Range.Start.isScalable() && isa<IntrinsicInst>(
I))) &&
8587 "Should not predicate a uniform recipe");
8589 IsUniform, BlockInMask);
8600 if (
auto Phi = dyn_cast<PHINode>(Instr)) {
8601 if (Phi->getParent() != OrigLoop->
getHeader())
8604 if ((Recipe = tryToOptimizeInductionPHI(Phi,
Operands,
Range)))
8610 "can only widen reductions and fixed-order recurrences here");
8628 PhisToFix.push_back(PhiRecipe);
8632 if (isa<TruncInst>(Instr) && (Recipe = tryToOptimizeInductionTruncate(
8641 if (
auto *CI = dyn_cast<CallInst>(Instr))
8644 if (isa<LoadInst>(Instr) || isa<StoreInst>(Instr))
8647 if (!shouldWiden(Instr,
Range))
8650 if (
auto GEP = dyn_cast<GetElementPtrInst>(Instr))
8654 if (
auto *SI = dyn_cast<SelectInst>(Instr)) {
8659 if (
auto *CI = dyn_cast<CastInst>(Instr)) {
8664 return tryToWiden(Instr,
Operands, VPBB);
8667void LoopVectorizationPlanner::buildVPlansWithVPRecipes(
ElementCount MinVF,
8671 auto MaxVFTimes2 = MaxVF * 2;
8673 VFRange SubRange = {VF, MaxVFTimes2};
8674 if (
auto Plan = tryToBuildVPlanWithVPRecipes(SubRange)) {
8686 VPlans.push_back(std::move(Plan));
8696 Value *StartIdx = ConstantInt::get(IdxTy, 0);
8703 Header->insert(CanonicalIVPHI, Header->begin());
8708 Instruction::Add, {CanonicalIVPHI, &Plan.
getVFxUF()}, {HasNUW,
false},
DL,
8710 CanonicalIVPHI->
addOperand(CanonicalIVIncrement);
8729 Value *IncomingValue =
8730 ExitPhi.getIncomingValueForBlock(ExitingBB);
8736 if ((isa<VPWidenIntOrFpInductionRecipe>(V) &&
8737 !cast<VPWidenIntOrFpInductionRecipe>(V)->getTruncInst()) ||
8738 isa<VPWidenPointerInductionRecipe>(V))
8760 if (isa<VPIRBasicBlock>(Succ))
8762 assert(!ScalarPHVPBB &&
"Two candidates for ScalarPHVPBB?");
8763 ScalarPHVPBB = cast<VPBasicBlock>(Succ);
8768 VPBuilder ScalarPHBuilder(ScalarPHVPBB);
8772 if (
auto *Terminator = MiddleVPBB->getTerminator()) {
8773 auto *Condition = dyn_cast<VPInstruction>(Terminator->getOperand(0));
8774 assert((!Condition || Condition->getParent() == MiddleVPBB) &&
8775 "Condition expected in MiddleVPBB");
8776 MiddleBuilder.
setInsertPoint(Condition ? Condition : Terminator);
8782 auto *FOR = dyn_cast<VPFirstOrderRecurrencePHIRecipe>(&HeaderPhi);
8788 {FOR->getBackedgeValue(), OneVPV},
8789 {},
"vector.recur.extract");
8792 "scalar.recur.init");
8793 Plan.
addLiveOut(cast<PHINode>(FOR->getUnderlyingInstr()), ResumePhiRecipe);
8798LoopVectorizationPlanner::tryToBuildVPlanWithVPRecipes(
VFRange &
Range) {
8813 bool RequiresScalarEpilogueCheck =
8828 bool IVUpdateMayOverflow =
false;
8839 VPRecipeBuilder RecipeBuilder(*Plan, OrigLoop, TLI, Legal, CM, PSE, Builder);
8859 "Unsupported interleave factor for scalable vectors");
8864 InterleaveGroups.
insert(IG);
8882 bool NeedsBlends = BB != HeaderBB && !BB->phis().empty();
8883 return Legal->blockNeedsPredication(BB) || NeedsBlends;
8888 if (VPBB != HeaderVPBB)
8892 if (VPBB == HeaderVPBB)
8893 RecipeBuilder.createHeaderMask();
8894 else if (NeedsMasks)
8895 RecipeBuilder.createBlockInMask(BB);
8902 auto *
Phi = dyn_cast<PHINode>(Instr);
8903 if (Phi &&
Phi->getParent() == HeaderBB) {
8904 Operands.push_back(Plan->getOrAddLiveIn(
8907 auto OpRange = RecipeBuilder.mapToVPValues(
Instr->operands());
8908 Operands = {OpRange.begin(), OpRange.end()};
8914 if ((SI = dyn_cast<StoreInst>(&
I)) &&
8919 RecipeBuilder.tryToCreateWidenRecipe(Instr,
Operands,
Range, VPBB);
8921 Recipe = RecipeBuilder.handleReplication(Instr,
Range);
8923 RecipeBuilder.setRecipe(Instr, Recipe);
8924 if (isa<VPHeaderPHIRecipe>(Recipe)) {
8935 "unexpected recipe needs moving");
8955 assert(isa<VPRegionBlock>(Plan->getVectorLoopRegion()) &&
8956 !Plan->getVectorLoopRegion()->getEntryBasicBlock()->empty() &&
8957 "entry block must be set to a VPRegionBlock having a non-empty entry "
8959 RecipeBuilder.fixHeaderPhis();
8969 adjustRecipesForReductions(Plan, RecipeBuilder,
Range.Start);
8974 for (
const auto *IG : InterleaveGroups) {
8976 cast<VPWidenMemoryRecipe>(RecipeBuilder.getRecipe(IG->getInsertPos()));
8978 for (
unsigned i = 0; i < IG->getFactor(); ++i)
8979 if (
auto *SI = dyn_cast_or_null<StoreInst>(IG->getMember(i))) {
8980 auto *StoreR = cast<VPWidenStoreRecipe>(RecipeBuilder.getRecipe(SI));
8981 StoredValues.
push_back(StoreR->getStoredValue());
8984 bool NeedsMaskForGaps =
8987 Recipe->getMask(), NeedsMaskForGaps);
8988 VPIG->insertBefore(Recipe);
8990 for (
unsigned i = 0; i < IG->getFactor(); ++i)
8992 VPRecipeBase *MemberR = RecipeBuilder.getRecipe(Member);
8993 if (!
Member->getType()->isVoidTy()) {
9004 Plan->setName(
"Initial VPlan");
9009 auto *StrideV = cast<SCEVUnknown>(Stride)->getValue();
9010 auto *ScevStride = dyn_cast<SCEVConstant>(PSE.
getSCEV(StrideV));
9015 auto *CI = Plan->getOrAddLiveIn(
9016 ConstantInt::get(Stride->getType(), ScevStride->getAPInt()));
9017 if (
VPValue *StrideVPV = Plan->getLiveIn(StrideV))
9023 if (!isa<SExtInst, ZExtInst>(U))
9025 VPValue *StrideVPV = Plan->getLiveIn(U);
9028 unsigned BW =
U->getType()->getScalarSizeInBits();
9029 APInt C = isa<SExtInst>(U) ? ScevStride->getAPInt().sext(BW)
9030 : ScevStride->getAPInt().zext(BW);
9031 VPValue *CI = Plan->getOrAddLiveIn(ConstantInt::get(
U->getType(),
C));
9049 bool WithoutRuntimeCheck =
9052 WithoutRuntimeCheck);
9068 *PSE.
getSE(),
true,
false, OrigLoop);
9072 HCFGBuilder.buildHierarchicalCFG();
9080 *PSE.
getSE(), *TLI);
9085 Plan->getVectorLoopRegion()->getExitingBasicBlock()->getTerminator();
9086 Term->eraseFromParent();
9110void LoopVectorizationPlanner::adjustRecipesForReductions(
9112 VPRegionBlock *VectorLoopRegion = Plan->getVectorLoopRegion();
9119 if (
auto *ReductionPhi = dyn_cast<VPReductionPHIRecipe>(&R))
9122 bool HasIntermediateStore =
false;
9127 auto *IS2 =
R2->getRecurrenceDescriptor().IntermediateStore;
9128 HasIntermediateStore |= IS1 || IS2;
9149 if (HasIntermediateStore && ReductionPHIList.
size() > 1)
9151 R->moveBefore(*Header, Header->getFirstNonPhi());
9154 auto *PhiR = dyn_cast<VPReductionPHIRecipe>(&R);
9155 if (!PhiR || !PhiR->isInLoop() || (MinVF.
isScalar() && !PhiR->isOrdered()))
9161 "AnyOf reductions are not allowed for in-loop reductions");
9166 for (
unsigned I = 0;
I != Worklist.
size(); ++
I) {
9169 auto *UserRecipe = dyn_cast<VPSingleDefRecipe>(U);
9171 assert(isa<VPLiveOut>(U) &&
9172 "U must either be a VPSingleDef or VPLiveOut");
9175 Worklist.
insert(UserRecipe);
9188 Instruction *CurrentLinkI = CurrentLink->getUnderlyingInstr();
9191 unsigned IndexOfFirstOperand;
9199 "Expected instruction to be a call to the llvm.fmuladd intrinsic");
9200 assert(((MinVF.
isScalar() && isa<VPReplicateRecipe>(CurrentLink)) ||
9201 isa<VPWidenCallRecipe>(CurrentLink)) &&
9202 CurrentLink->getOperand(2) == PreviousLink &&
9203 "expected a call where the previous link is the added operand");
9211 {CurrentLink->getOperand(0), CurrentLink->getOperand(1)},
9213 LinkVPBB->
insert(FMulRecipe, CurrentLink->getIterator());
9216 auto *Blend = dyn_cast<VPBlendRecipe>(CurrentLink);
9217 if (PhiR->isInLoop() && Blend) {
9218 assert(Blend->getNumIncomingValues() == 2 &&
9219 "Blend must have 2 incoming values");
9220 if (Blend->getIncomingValue(0) == PhiR)
9221 Blend->replaceAllUsesWith(Blend->getIncomingValue(1));
9223 assert(Blend->getIncomingValue(1) == PhiR &&
9224 "PhiR must be an operand of the blend");
9225 Blend->replaceAllUsesWith(Blend->getIncomingValue(0));
9231 if (isa<VPWidenRecipe>(CurrentLink)) {
9232 assert(isa<CmpInst>(CurrentLinkI) &&
9233 "need to have the compare of the select");
9236 assert(isa<VPWidenSelectRecipe>(CurrentLink) &&
9237 "must be a select recipe");
9238 IndexOfFirstOperand = 1;
9241 "Expected to replace a VPWidenSC");
9242 IndexOfFirstOperand = 0;
9247 CurrentLink->getOperand(IndexOfFirstOperand) == PreviousLink
9248 ? IndexOfFirstOperand + 1
9249 : IndexOfFirstOperand;
9250 VecOp = CurrentLink->getOperand(VecOpId);
9251 assert(VecOp != PreviousLink &&
9252 CurrentLink->getOperand(CurrentLink->getNumOperands() - 1 -
9253 (VecOpId - IndexOfFirstOperand)) ==
9255 "PreviousLink must be the operand other than VecOp");
9271 CurrentLink->replaceAllUsesWith(RedRecipe);
9272 PreviousLink = RedRecipe;
9281 Plan->getVectorLoopRegion()->getEntryBasicBlock()->phis()) {
9294 return isa<VPWidenSelectRecipe>(U) ||
9295 (isa<VPReplicateRecipe>(U) &&
9296 cast<VPReplicateRecipe>(U)->getUnderlyingInstr()->getOpcode() ==
9297 Instruction::Select);
9303 for (
unsigned I = 0;
I != CmpR->getNumOperands(); ++
I)
9304 if (CmpR->getOperand(
I) == PhiR)
9312 if (
Select->getOperand(1) == PhiR)
9315 Select->getVPSingleValue()->replaceAllUsesWith(
Or);
9329 assert(OrigExitingVPV->getDefiningRecipe()->getParent() != LatchVPBB &&
9330 "reduction recipe must be defined before latch");
9332 std::optional<FastMathFlags> FMFs =
9339 return isa<VPInstruction>(&U) &&
9340 cast<VPInstruction>(&U)->getOpcode() ==
9357 assert(!PhiR->
isInLoop() &&
"Unexpected truncated inloop reduction!");
9366 Trunc->
insertAfter(NewExitingVPV->getDefiningRecipe());
9367 Extnd->insertAfter(Trunc);
9369 PhiR->
setOperand(1, Extnd->getVPSingleValue());
9370 NewExitingVPV = Extnd;
9389 OrigExitingVPV->replaceUsesWithIf(
9390 FinalReductionResult,
9397#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
9400 O << Indent <<
"INTERLEAVE-GROUP with factor " << IG->getFactor() <<
" at ";
9401 IG->getInsertPos()->printAsOperand(O,
false);
9411 for (
unsigned i = 0; i < IG->getFactor(); ++i) {
9412 if (!IG->getMember(i))
9415 O <<
"\n" << Indent <<
" store ";
9417 O <<
" to index " << i;
9419 O <<
"\n" << Indent <<
" ";
9421 O <<
" = load from index " << i;
9430 "Not a pointer induction according to InductionDescriptor!");
9432 "Unexpected type.");
9434 "Recipe should have been replaced");
9437 PHINode *CanonicalIV = cast<PHINode>(State.
get(IVR, 0,
true));
9442 Type *ScStValueType = ScalarStartValue->
getType();
9447 NewPointerPhi->
addIncoming(ScalarStartValue, VectorPH);
9454 Value *NumUnrolledElems =
9465 NewPointerPhi->
addIncoming(InductionGEP, VectorPH);
9470 for (
unsigned Part = 0; Part < State.
UF; ++Part) {
9472 Value *StartOffsetScalar =
9474 Value *StartOffset =
9481 "scalar step must be the same across all parts");
9488 State.
set(
this,
GEP, Part);
9493 assert(!State.
Instance &&
"VPDerivedIVRecipe being replicated.");
9504 Kind, cast_if_present<BinaryOperator>(FPBinOp));
9505 DerivedIV->
setName(
"offset.idx");
9506 assert(DerivedIV != CanonicalIV &&
"IV didn't need transforming?");
9522 "uniform recipe shouldn't be predicated");
9528 if (State.
Instance->Lane.isFirstLane()) {
9542 if ((isa<LoadInst>(UI) || isa<StoreInst>(UI)) &&
9544 return Op->isDefinedOutsideVectorRegions();
9548 for (
unsigned Part = 1; Part < State.
UF; ++Part)
9557 for (
unsigned Part = 0; Part < State.
UF; ++Part)
9564 if (isa<StoreInst>(UI) &&
9575 for (
unsigned Part = 0; Part < State.
UF; ++Part)
9576 for (
unsigned Lane = 0; Lane < EndLane; ++Lane)
9588 auto &Builder = State.
Builder;
9590 for (
unsigned Part = 0; Part < State.
UF; ++Part) {
9592 Value *Mask =
nullptr;
9593 if (
auto *VPMask =
getMask()) {
9596 Mask = State.
get(VPMask, Part);
9598 Mask = Builder.CreateVectorReverse(Mask,
"reverse");
9603 NewLI = Builder.CreateMaskedGather(DataTy,
Addr, Alignment, Mask,
nullptr,
9604 "wide.masked.gather");
9606 NewLI = Builder.CreateMaskedLoad(DataTy,
Addr, Alignment, Mask,
9608 "wide.masked.load");
9610 NewLI = Builder.CreateAlignedLoad(DataTy,
Addr, Alignment,
"wide.load");
9615 NewLI = Builder.CreateVectorReverse(NewLI,
"reverse");
9616 State.
set(
this, NewLI, Part);
9625 Value *AllTrueMask =
9627 return Builder.
CreateIntrinsic(ValTy, Intrinsic::experimental_vp_reverse,
9628 {Operand, AllTrueMask, EVL},
nullptr,
Name);
9632 assert(State.
UF == 1 &&
"Expected only UF == 1 when vectorizing with "
9633 "explicit vector length.");
9641 auto &Builder = State.
Builder;
9646 Value *Mask =
nullptr;
9648 Mask = State.
get(VPMask, 0);
9652 Mask = Builder.CreateVectorSplat(State.
VF, Builder.getTrue());
9657 Builder.CreateIntrinsic(DataTy, Intrinsic::vp_gather, {
Addr, Mask, EVL},
9658 nullptr,
"wide.masked.gather");
9663 Instruction::Load, DataTy,
Addr,
"vp.op.load"));
9671 State.
set(
this, Res, 0);
9681 auto &Builder = State.
Builder;
9684 for (
unsigned Part = 0; Part < State.
UF; ++Part) {
9686 Value *Mask =
nullptr;
9687 if (
auto *VPMask =
getMask()) {
9690 Mask = State.
get(VPMask, Part);
9692 Mask = Builder.CreateVectorReverse(Mask,
"reverse");
9695 Value *StoredVal = State.
get(StoredVPValue, Part);
9699 StoredVal = Builder.CreateVectorReverse(StoredVal,
"reverse");
9705 NewSI = Builder.CreateMaskedScatter(StoredVal,
Addr, Alignment, Mask);
9707 NewSI = Builder.CreateMaskedStore(StoredVal,
Addr, Alignment, Mask);
9709 NewSI = Builder.CreateAlignedStore(StoredVal,
Addr, Alignment);
9715 assert(State.
UF == 1 &&
"Expected only UF == 1 when vectorizing with "
9716 "explicit vector length.");
9723 auto &Builder = State.
Builder;
9727 Value *StoredVal = State.
get(StoredValue, 0);
9731 Value *Mask =
nullptr;
9733 Mask = State.
get(VPMask, 0);
9737 Mask = Builder.CreateVectorSplat(State.
VF, Builder.getTrue());
9740 if (CreateScatter) {
9742 Intrinsic::vp_scatter,
9743 {StoredVal, Addr, Mask, EVL});
9749 {StoredVal, Addr}));
9818 LLVM_DEBUG(
dbgs() <<
"LV: cannot compute the outer-loop trip count\n");
9822 Function *
F = L->getHeader()->getParent();
9828 LoopVectorizationCostModel CM(
SEL, L, PSE, LI, LVL, *
TTI, TLI, DB, AC, ORE,
F,
9833 LoopVectorizationPlanner LVP(L, LI, DT, TLI, *
TTI, LVL, CM, IAI, PSE, Hints,
9853 bool AddBranchWeights =
9855 GeneratedRTChecks Checks(*PSE.
getSE(), DT, LI,
TTI,
9856 F->getDataLayout(), AddBranchWeights);
9858 VF.
Width, 1, LVL, &CM, BFI, PSI, Checks);
9860 << L->getHeader()->getParent()->getName() <<
"\"\n");
9880 if (
auto *S = dyn_cast<StoreInst>(&Inst)) {
9881 if (S->getValueOperand()->getType()->isFloatTy())
9891 while (!Worklist.
empty()) {
9893 if (!L->contains(
I))
9895 if (!Visited.
insert(
I).second)
9902 if (isa<FPExtInst>(
I) && EmittedRemark.
insert(
I).second)
9905 I->getDebugLoc(), L->getHeader())
9906 <<
"floating point conversion changes vector width. "
9907 <<
"Mixed floating point precision requires an up/down "
9908 <<
"cast that will negatively impact performance.";
9911 for (
Use &
Op :
I->operands())
9912 if (
auto *OpI = dyn_cast<Instruction>(
Op))
9919 std::optional<unsigned> VScale,
Loop *L,
9932 <<
"LV: Interleaving only is not profitable due to runtime checks\n");
9973 unsigned AssumedMinimumVscale = 1;
9975 AssumedMinimumVscale = *VScale;
9976 IntVF *= AssumedMinimumVscale;
9994 uint64_t MinTC = std::max(MinTC1, MinTC2);
9996 MinTC =
alignTo(MinTC, IntVF);
10000 dbgs() <<
"LV: Minimum required TC for runtime checks to be profitable:"
10008 LLVM_DEBUG(
dbgs() <<
"LV: Vectorization is not beneficial: expected "
10009 "trip count < minimum profitable VF ("
10020 : InterleaveOnlyWhenForced(Opts.InterleaveOnlyWhenForced ||
10022 VectorizeOnlyWhenForced(Opts.VectorizeOnlyWhenForced ||
10027 "VPlan-native path is not enabled. Only process inner loops.");
10030 << L->getHeader()->getParent()->getName() <<
"' from "
10031 << L->getLocStr() <<
"\n");
10036 dbgs() <<
"LV: Loop hints:"
10047 Function *
F = L->getHeader()->getParent();
10058 LLVM_DEBUG(
dbgs() <<
"LV: Loop hints prevent vectorization.\n");
10069 LLVM_DEBUG(
dbgs() <<
"LV: Not vectorizing: Cannot prove legality.\n");
10079 if (!L->isInnermost())
10083 assert(L->isInnermost() &&
"Inner loop expected.");
10093 if (UseInterleaved)
10105 LLVM_DEBUG(
dbgs() <<
"LV: Found a loop with a very small trip count. "
10106 <<
"This loop is worth vectorizing only if no scalar "
10107 <<
"iteration overheads are incurred.");
10109 LLVM_DEBUG(
dbgs() <<
" But vectorizing was explicitly forced.\n");
10122 LLVM_DEBUG(
dbgs() <<
" But the target considers the trip count too "
10123 "small to consider vectorizing.\n");
10125 "The trip count is below the minial threshold value.",
10126 "loop trip count is too low, avoiding vectorization",
10127 "LowTripCount",
ORE, L);
10136 if (
F->hasFnAttribute(Attribute::NoImplicitFloat)) {
10138 "Can't vectorize when the NoImplicitFloat attribute is used",
10139 "loop not vectorized due to NoImplicitFloat attribute",
10140 "NoImplicitFloat",
ORE, L);
10152 "Potentially unsafe FP op prevents vectorization",
10153 "loop not vectorized due to unsafe FP support.",
10154 "UnsafeFP",
ORE, L);
10159 bool AllowOrderedReductions;
10169 ExactFPMathInst->getDebugLoc(),
10170 ExactFPMathInst->getParent())
10171 <<
"loop not vectorized: cannot prove it is safe to reorder "
10172 "floating-point operations";
10174 LLVM_DEBUG(
dbgs() <<
"LV: loop not vectorized: cannot prove it is safe to "
10175 "reorder floating-point operations\n");
10181 LoopVectorizationCostModel CM(
SEL, L, PSE,
LI, &LVL, *
TTI,
TLI,
DB,
AC,
ORE,
10184 LoopVectorizationPlanner LVP(L,
LI,
DT,
TLI, *
TTI, &LVL, CM, IAI, PSE, Hints,
10192 std::optional<VectorizationFactor> MaybeVF = LVP.
plan(UserVF, UserIC);
10197 bool AddBranchWeights =
10200 F->getDataLayout(), AddBranchWeights);
10206 unsigned SelectedIC = std::max(IC, UserIC);
10213 bool ForceVectorization =
10215 if (!ForceVectorization &&
10217 *PSE.
getSE(),
SEL)) {
10220 DEBUG_TYPE,
"CantReorderMemOps", L->getStartLoc(),
10222 <<
"loop not vectorized: cannot prove it is safe to reorder "
10223 "memory operations";
10232 std::pair<StringRef, std::string> VecDiagMsg, IntDiagMsg;
10233 bool VectorizeLoop =
true, InterleaveLoop =
true;
10235 LLVM_DEBUG(
dbgs() <<
"LV: Vectorization is possible but not beneficial.\n");
10236 VecDiagMsg = std::make_pair(
10237 "VectorizationNotBeneficial",
10238 "the cost-model indicates that vectorization is not beneficial");
10239 VectorizeLoop =
false;
10242 if (!MaybeVF && UserIC > 1) {
10245 LLVM_DEBUG(
dbgs() <<
"LV: Ignoring UserIC, because vectorization and "
10246 "interleaving should be avoided up front\n");
10247 IntDiagMsg = std::make_pair(
10248 "InterleavingAvoided",
10249 "Ignoring UserIC, because interleaving was avoided up front");
10250 InterleaveLoop =
false;
10251 }
else if (IC == 1 && UserIC <= 1) {
10254 IntDiagMsg = std::make_pair(
10255 "InterleavingNotBeneficial",
10256 "the cost-model indicates that interleaving is not beneficial");
10257 InterleaveLoop =
false;
10259 IntDiagMsg.first =
"InterleavingNotBeneficialAndDisabled";
10260 IntDiagMsg.second +=
10261 " and is explicitly disabled or interleave count is set to 1";
10263 }
else if (IC > 1 && UserIC == 1) {
10266 dbgs() <<
"LV: Interleaving is beneficial but is explicitly disabled.");
10267 IntDiagMsg = std::make_pair(
10268 "InterleavingBeneficialButDisabled",
10269 "the cost-model indicates that interleaving is beneficial "
10270 "but is explicitly disabled or interleave count is set to 1");
10271 InterleaveLoop =
false;
10275 IC = UserIC > 0 ? UserIC : IC;
10279 if (!VectorizeLoop && !InterleaveLoop) {
10283 L->getStartLoc(), L->getHeader())
10284 << VecDiagMsg.second;
10288 L->getStartLoc(), L->getHeader())
10289 << IntDiagMsg.second;
10292 }
else if (!VectorizeLoop && InterleaveLoop) {
10296 L->getStartLoc(), L->getHeader())
10297 << VecDiagMsg.second;
10299 }
else if (VectorizeLoop && !InterleaveLoop) {
10301 <<
") in " << L->getLocStr() <<
'\n');
10304 L->getStartLoc(), L->getHeader())
10305 << IntDiagMsg.second;
10307 }
else if (VectorizeLoop && InterleaveLoop) {
10309 <<
") in " << L->getLocStr() <<
'\n');
10313 bool DisableRuntimeUnroll =
false;
10314 MDNode *OrigLoopID = L->getLoopID();
10316 using namespace ore;
10317 if (!VectorizeLoop) {
10318 assert(IC > 1 &&
"interleave count should not be 1 or 0");
10321 InnerLoopUnroller Unroller(L, PSE,
LI,
DT,
TLI,
TTI,
AC,
ORE, IC, &LVL,
10327 "VPlan cost model and legacy cost model disagreed");
10333 <<
"interleaved loop (interleaved count: "
10334 << NV(
"InterleaveCount", IC) <<
")";
10349 EPI, &LVL, &CM,
BFI,
PSI, Checks);
10351 std::unique_ptr<VPlan> BestMainPlan(
10353 const auto &[ExpandedSCEVs, ReductionResumeValues] = LVP.
executePlan(
10368 Header->setName(
"vec.epilog.vector.body");
10378 auto *ExpandR = cast<VPExpandSCEVRecipe>(&R);
10380 ExpandedSCEVs.find(ExpandR->getSCEV())->second);
10384 ExpandR->eraseFromParent();
10391 if (isa<VPCanonicalIVPHIRecipe>(&R))
10394 Value *ResumeV =
nullptr;
10396 if (
auto *ReductionPhi = dyn_cast<VPReductionPHIRecipe>(&R)) {
10398 ReductionPhi->getRecurrenceDescriptor();
10400 ResumeV = ReductionResumeValues.find(&RdxDesc)->second;
10406 cast<Instruction>(ResumeV)->
getParent()->getFirstNonPHI());
10416 if (
auto *Ind = dyn_cast<VPWidenPointerInductionRecipe>(&R)) {
10417 IndPhi = cast<PHINode>(Ind->getUnderlyingValue());
10418 ID = &Ind->getInductionDescriptor();
10420 auto *WidenInd = cast<VPWidenIntOrFpInductionRecipe>(&R);
10421 IndPhi = WidenInd->getPHINode();
10422 ID = &WidenInd->getInductionDescriptor();
10429 assert(ResumeV &&
"Must have a resume value");
10431 cast<VPHeaderPHIRecipe>(&R)->setStartValue(StartVal);
10435 "DT not preserved correctly");
10437 DT,
true, &ExpandedSCEVs);
10438 ++LoopsEpilogueVectorized;
10441 DisableRuntimeUnroll =
true;
10448 "Plan should have a single VF");
10451 <<
"VF picked by VPlan cost model: " << Width <<
"\n");
10453 "VPlan cost model and legacy cost model disagreed");
10465 DisableRuntimeUnroll =
true;
10475 std::optional<MDNode *> RemainderLoopID =
10478 if (RemainderLoopID) {
10479 L->setLoopID(*RemainderLoopID);
10481 if (DisableRuntimeUnroll)
10520 bool Changed =
false, CFGChanged =
false;
10527 for (
const auto &L : *
LI)
10528 Changed |= CFGChanged |=
10539 LoopsAnalyzed += Worklist.
size();
10542 while (!Worklist.
empty()) {
10588 runImpl(
F,
SE,
LI,
TTI,
DT,
BFI, &
TLI,
DB,
AC,
LAIs,
ORE,
PSI);
10589 if (!Result.MadeAnyChange)
10603 if (Result.MadeCFGChange) {
10619 OS, MapClassName2PassName);
10622 OS << (InterleaveOnlyWhenForced ?
"" :
"no-") <<
"interleave-forced-only;";
10623 OS << (VectorizeOnlyWhenForced ?
"" :
"no-") <<
"vectorize-forced-only;";
static unsigned getIntrinsicID(const SDNode *N)
AMDGPU Lower Kernel Arguments
amdgpu AMDGPU Register Bank Select
This file implements a class to represent arbitrary precision integral constant values and operations...
ReachingDefAnalysis InstSet & ToRemove
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static bool isEqual(const Function &Caller, const Function &Callee)
This file contains the simple types necessary to represent the attributes associated with functions a...
static const Function * getParent(const Value *V)
This is the interface for LLVM's primary stateless and local alias analysis.
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
Analysis containing CSE Info
#define clEnumValN(ENUMVAL, FLAGNAME, DESC)
This file contains the declarations for the subclasses of Constant, which represent the different fla...
static cl::opt< TargetTransformInfo::TargetCostKind > CostKind("cost-kind", cl::desc("Target cost kind"), cl::init(TargetTransformInfo::TCK_RecipThroughput), cl::values(clEnumValN(TargetTransformInfo::TCK_RecipThroughput, "throughput", "Reciprocal throughput"), clEnumValN(TargetTransformInfo::TCK_Latency, "latency", "Instruction latency"), clEnumValN(TargetTransformInfo::TCK_CodeSize, "code-size", "Code size"), clEnumValN(TargetTransformInfo::TCK_SizeAndLatency, "size-latency", "Code size and latency")))
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
#define DEBUG_WITH_TYPE(TYPE, X)
DEBUG_WITH_TYPE macro - This macro should be used by passes to emit debug information.
This file defines DenseMapInfo traits for DenseMap.
This file defines the DenseMap class.
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
This is the interface for a simple mod/ref and alias analysis over globals.
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
This defines the Use class.
This file defines an InstructionCost class that is used when calculating the cost of an instruction,...
This header provides classes for managing per-loop analyses.
static const char * VerboseDebug
loop Loop Strength Reduction
This file defines the LoopVectorizationLegality class.
This file provides a LoopVectorizationPlanner class.
static void collectSupportedLoops(Loop &L, LoopInfo *LI, OptimizationRemarkEmitter *ORE, SmallVectorImpl< Loop * > &V)
static cl::opt< unsigned > EpilogueVectorizationForceVF("epilogue-vectorization-force-VF", cl::init(1), cl::Hidden, cl::desc("When epilogue vectorization is enabled, and a value greater than " "1 is specified, forces the given VF for all applicable epilogue " "loops."))
static cl::opt< bool > UseLegacyCostModel("vectorize-use-legacy-cost-model", cl::init(false), cl::Hidden, cl::desc("Use the legacy cost model instead of the VPlan-based cost model. " "This option will be removed in the future."))
static ElementCount determineVPlanVF(const TargetTransformInfo &TTI, LoopVectorizationCostModel &CM)
static void createAndCollectMergePhiForReduction(VPInstruction *RedResult, DenseMap< const RecurrenceDescriptor *, Value * > &ReductionResumeValues, VPTransformState &State, Loop *OrigLoop, BasicBlock *LoopMiddleBlock, bool VectorizingEpilogue)
static std::optional< unsigned > getSmallBestKnownTC(ScalarEvolution &SE, Loop *L)
Returns "best known" trip count for the specified loop L as defined by the following procedure: 1) Re...
static cl::opt< unsigned > VectorizeMemoryCheckThreshold("vectorize-memory-check-threshold", cl::init(128), cl::Hidden, cl::desc("The maximum allowed number of runtime memory checks"))
static cl::opt< unsigned > TinyTripCountVectorThreshold("vectorizer-min-trip-count", cl::init(16), cl::Hidden, cl::desc("Loops with a constant trip count that is smaller than this " "value are vectorized only if no scalar iteration overheads " "are incurred."))
Loops with a known constant trip count below this number are vectorized only if no scalar iteration o...
static Instruction * createReverseEVL(IRBuilderBase &Builder, Value *Operand, Value *EVL, const Twine &Name)
Use all-true mask for reverse rather than actual mask, as it avoids a dependence w/o affecting the re...
static void debugVectorizationMessage(const StringRef Prefix, const StringRef DebugMsg, Instruction *I)
Write a DebugMsg about vectorization to the debug output stream.
static cl::opt< bool > EnableCondStoresVectorization("enable-cond-stores-vec", cl::init(true), cl::Hidden, cl::desc("Enable if predication of stores during vectorization."))
static Value * interleaveVectors(IRBuilderBase &Builder, ArrayRef< Value * > Vals, const Twine &Name)
Return a vector containing interleaved elements from multiple smaller input vectors.
static Value * emitTransformedIndex(IRBuilderBase &B, Value *Index, Value *StartValue, Value *Step, InductionDescriptor::InductionKind InductionKind, const BinaryOperator *InductionBinOp)
Compute the transformed value of Index at offset StartValue using step StepValue.
static DebugLoc getDebugLocFromInstOrOperands(Instruction *I)
Look for a meaningful debug location on the instruction or it's operands.
static void emitInvalidCostRemarks(SmallVector< InstructionVFPair > InvalidCosts, OptimizationRemarkEmitter *ORE, Loop *TheLoop)
static void addUsersInExitBlock(VPBasicBlock *HeaderVPBB, Loop *OrigLoop, VPRecipeBuilder &Builder, VPlan &Plan)
const char LLVMLoopVectorizeFollowupAll[]
static cl::opt< bool > ForceTargetSupportsScalableVectors("force-target-supports-scalable-vectors", cl::init(false), cl::Hidden, cl::desc("Pretend that scalable vectors are supported, even if the target does " "not support them. This flag should only be used for testing."))
static void addCanonicalIVRecipes(VPlan &Plan, Type *IdxTy, bool HasNUW, DebugLoc DL)
static std::optional< unsigned > getVScaleForTuning(const Loop *L, const TargetTransformInfo &TTI)
Convenience function that returns the value of vscale_range iff vscale_range.min == vscale_range....
static bool useActiveLaneMaskForControlFlow(TailFoldingStyle Style)
static constexpr uint32_t MemCheckBypassWeights[]
static Type * MaybeVectorizeType(Type *Elt, ElementCount VF)
cl::opt< unsigned > ForceTargetInstructionCost("force-target-instruction-cost", cl::init(0), cl::Hidden, cl::desc("A flag that overrides the target's expected cost for " "an instruction to a single constant value. Mostly " "useful for getting consistent testing."))
std::optional< unsigned > getMaxVScale(const Function &F, const TargetTransformInfo &TTI)
static constexpr uint32_t MinItersBypassWeights[]
static cl::opt< unsigned > ForceTargetNumScalarRegs("force-target-num-scalar-regs", cl::init(0), cl::Hidden, cl::desc("A flag that overrides the target's number of scalar registers."))
static cl::opt< bool > UseWiderVFIfCallVariantsPresent("vectorizer-maximize-bandwidth-for-vector-calls", cl::init(true), cl::Hidden, cl::desc("Try wider VFs if they enable the use of vector variants"))
static cl::opt< unsigned > SmallLoopCost("small-loop-cost", cl::init(20), cl::Hidden, cl::desc("The cost of a loop that is considered 'small' by the interleaver."))
static cl::opt< unsigned > ForceTargetNumVectorRegs("force-target-num-vector-regs", cl::init(0), cl::Hidden, cl::desc("A flag that overrides the target's number of vector registers."))
static bool areRuntimeChecksProfitable(GeneratedRTChecks &Checks, VectorizationFactor &VF, std::optional< unsigned > VScale, Loop *L, ScalarEvolution &SE, ScalarEpilogueLowering SEL)
static bool isExplicitVecOuterLoop(Loop *OuterLp, OptimizationRemarkEmitter *ORE)
static cl::opt< bool > EnableIndVarRegisterHeur("enable-ind-var-reg-heur", cl::init(true), cl::Hidden, cl::desc("Count the induction variable only once when interleaving"))
static cl::opt< TailFoldingStyle > ForceTailFoldingStyle("force-tail-folding-style", cl::desc("Force the tail folding style"), cl::init(TailFoldingStyle::None), cl::values(clEnumValN(TailFoldingStyle::None, "none", "Disable tail folding"), clEnumValN(TailFoldingStyle::Data, "data", "Create lane mask for data only, using active.lane.mask intrinsic"), clEnumValN(TailFoldingStyle::DataWithoutLaneMask, "data-without-lane-mask", "Create lane mask with compare/stepvector"), clEnumValN(TailFoldingStyle::DataAndControlFlow, "data-and-control", "Create lane mask using active.lane.mask intrinsic, and use " "it for both data and control flow"), clEnumValN(TailFoldingStyle::DataAndControlFlowWithoutRuntimeCheck, "data-and-control-without-rt-check", "Similar to data-and-control, but remove the runtime check"), clEnumValN(TailFoldingStyle::DataWithEVL, "data-with-evl", "Use predicated EVL instructions for tail folding. If EVL " "is unsupported, fallback to data-without-lane-mask.")))
static cl::opt< bool > EnableEpilogueVectorization("enable-epilogue-vectorization", cl::init(true), cl::Hidden, cl::desc("Enable vectorization of epilogue loops."))
static ScalarEpilogueLowering getScalarEpilogueLowering(Function *F, Loop *L, LoopVectorizeHints &Hints, ProfileSummaryInfo *PSI, BlockFrequencyInfo *BFI, TargetTransformInfo *TTI, TargetLibraryInfo *TLI, LoopVectorizationLegality &LVL, InterleavedAccessInfo *IAI)
const char VerboseDebug[]
static cl::opt< bool > PreferPredicatedReductionSelect("prefer-predicated-reduction-select", cl::init(false), cl::Hidden, cl::desc("Prefer predicating a reduction operation over an after loop select."))
static VPWidenIntOrFpInductionRecipe * createWidenInductionRecipes(PHINode *Phi, Instruction *PhiOrTrunc, VPValue *Start, const InductionDescriptor &IndDesc, VPlan &Plan, ScalarEvolution &SE, Loop &OrigLoop)
Creates a VPWidenIntOrFpInductionRecpipe for Phi.
static OptimizationRemarkAnalysis createLVAnalysis(const char *PassName, StringRef RemarkName, Loop *TheLoop, Instruction *I)
Create an analysis remark that explains why vectorization failed.
static constexpr uint32_t SCEVCheckBypassWeights[]
static cl::opt< bool > PreferInLoopReductions("prefer-inloop-reductions", cl::init(false), cl::Hidden, cl::desc("Prefer in-loop vector reductions, " "overriding the targets preference."))
static cl::opt< unsigned > EpilogueVectorizationMinVF("epilogue-vectorization-minimum-VF", cl::init(16), cl::Hidden, cl::desc("Only loops with vectorization factor equal to or larger than " "the specified value are considered for epilogue vectorization."))
const char LLVMLoopVectorizeFollowupVectorized[]
static cl::opt< bool > EnableLoadStoreRuntimeInterleave("enable-loadstore-runtime-interleave", cl::init(true), cl::Hidden, cl::desc("Enable runtime interleaving until load/store ports are saturated"))
static void addLiveOutsForFirstOrderRecurrences(VPlan &Plan)
Feed a resume value for every FOR from the vector loop to the scalar loop, if middle block branches t...
static cl::opt< bool > VPlanBuildStressTest("vplan-build-stress-test", cl::init(false), cl::Hidden, cl::desc("Build VPlan for every supported loop nest in the function and bail " "out right after the build (stress test the VPlan H-CFG construction " "in the VPlan-native vectorization path)."))
static bool hasIrregularType(Type *Ty, const DataLayout &DL)
A helper function that returns true if the given type is irregular.
static cl::opt< bool > LoopVectorizeWithBlockFrequency("loop-vectorize-with-block-frequency", cl::init(true), cl::Hidden, cl::desc("Enable the use of the block frequency analysis to access PGO " "heuristics minimizing code growth in cold regions and being more " "aggressive in hot regions."))
static Value * getExpandedStep(const InductionDescriptor &ID, const SCEV2ValueTy &ExpandedSCEVs)
Return the expanded step for ID using ExpandedSCEVs to look up SCEV expansion results.
const char LLVMLoopVectorizeFollowupEpilogue[]
static bool useActiveLaneMask(TailFoldingStyle Style)
static bool isIndvarOverflowCheckKnownFalse(const LoopVectorizationCostModel *Cost, ElementCount VF, std::optional< unsigned > UF=std::nullopt)
For the given VF and UF and maximum trip count computed for the loop, return whether the induction va...
static cl::opt< PreferPredicateTy::Option > PreferPredicateOverEpilogue("prefer-predicate-over-epilogue", cl::init(PreferPredicateTy::ScalarEpilogue), cl::Hidden, cl::desc("Tail-folding and predication preferences over creating a scalar " "epilogue loop."), cl::values(clEnumValN(PreferPredicateTy::ScalarEpilogue, "scalar-epilogue", "Don't tail-predicate loops, create scalar epilogue"), clEnumValN(PreferPredicateTy::PredicateElseScalarEpilogue, "predicate-else-scalar-epilogue", "prefer tail-folding, create scalar epilogue if tail " "folding fails."), clEnumValN(PreferPredicateTy::PredicateOrDontVectorize, "predicate-dont-vectorize", "prefers tail-folding, don't attempt vectorization if " "tail-folding fails.")))
static cl::opt< bool > EnableInterleavedMemAccesses("enable-interleaved-mem-accesses", cl::init(false), cl::Hidden, cl::desc("Enable vectorization on interleaved memory accesses in a loop"))
static cl::opt< bool > EnableMaskedInterleavedMemAccesses("enable-masked-interleaved-mem-accesses", cl::init(false), cl::Hidden, cl::desc("Enable vectorization on masked interleaved memory accesses in a loop"))
An interleave-group may need masking if it resides in a block that needs predication,...
static cl::opt< bool > ForceOrderedReductions("force-ordered-reductions", cl::init(false), cl::Hidden, cl::desc("Enable the vectorisation of loops with in-order (strict) " "FP reductions"))
static void cse(BasicBlock *BB)
Perform cse of induction variable instructions.
static const SCEV * getAddressAccessSCEV(Value *Ptr, LoopVectorizationLegality *Legal, PredicatedScalarEvolution &PSE, const Loop *TheLoop)
Gets Address Access SCEV after verifying that the access pattern is loop invariant except the inducti...
static cl::opt< cl::boolOrDefault > ForceSafeDivisor("force-widen-divrem-via-safe-divisor", cl::Hidden, cl::desc("Override cost based safe divisor widening for div/rem instructions"))
static cl::opt< unsigned > ForceTargetMaxVectorInterleaveFactor("force-target-max-vector-interleave", cl::init(0), cl::Hidden, cl::desc("A flag that overrides the target's max interleave factor for " "vectorized loops."))
static bool processLoopInVPlanNativePath(Loop *L, PredicatedScalarEvolution &PSE, LoopInfo *LI, DominatorTree *DT, LoopVectorizationLegality *LVL, TargetTransformInfo *TTI, TargetLibraryInfo *TLI, DemandedBits *DB, AssumptionCache *AC, OptimizationRemarkEmitter *ORE, BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI, LoopVectorizeHints &Hints, LoopVectorizationRequirements &Requirements)
static bool useMaskedInterleavedAccesses(const TargetTransformInfo &TTI)
static cl::opt< unsigned > NumberOfStoresToPredicate("vectorize-num-stores-pred", cl::init(1), cl::Hidden, cl::desc("Max number of stores to be predicated behind an if."))
The number of stores in a loop that are allowed to need predication.
static void AddRuntimeUnrollDisableMetaData(Loop *L)
static cl::opt< unsigned > MaxNestedScalarReductionIC("max-nested-scalar-reduction-interleave", cl::init(2), cl::Hidden, cl::desc("The maximum interleave count to use when interleaving a scalar " "reduction in a nested loop."))
static cl::opt< unsigned > ForceTargetMaxScalarInterleaveFactor("force-target-max-scalar-interleave", cl::init(0), cl::Hidden, cl::desc("A flag that overrides the target's max interleave factor for " "scalar loops."))
static cl::opt< bool > PrintVPlansInDotFormat("vplan-print-in-dot-format", cl::Hidden, cl::desc("Use dot format instead of plain text when dumping VPlans"))
static void checkMixedPrecision(Loop *L, OptimizationRemarkEmitter *ORE)
static bool willGenerateVectors(VPlan &Plan, ElementCount VF, const TargetTransformInfo &TTI)
Check if any recipe of Plan will generate a vector value, which will be assigned a vector register.
static cl::opt< bool > MaximizeBandwidth("vectorizer-maximize-bandwidth", cl::init(false), cl::Hidden, cl::desc("Maximize bandwidth when selecting vectorization factor which " "will be determined by the smallest type in loop."))
mir Rename Register Operands
This file implements a map that provides insertion order iteration.
std::pair< uint64_t, uint64_t > Interval
Module.h This file contains the declarations for the Module class.
ConstantRange Range(APInt(BitWidth, Low), APInt(BitWidth, High))
uint64_t IntrinsicInst * II
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
This file contains the declarations for profiling metadata utility functions.
const SmallVectorImpl< MachineOperand > & Cond
static BinaryOperator * CreateMul(Value *S1, Value *S2, const Twine &Name, BasicBlock::iterator InsertBefore, Value *FlagsOp)
static BinaryOperator * CreateAdd(Value *S1, Value *S2, const Twine &Name, BasicBlock::iterator InsertBefore, Value *FlagsOp)
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
separate const offset from gep
This file defines the SmallPtrSet class.
This file defines the SmallSet class.
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
static SymbolRef::Type getType(const Symbol *Sym)
This file defines the VPlanHCFGBuilder class which contains the public interface (buildHierarchicalCF...
This file declares the class VPlanVerifier, which contains utility functions to check the consistency...
This file contains the declarations of the Vectorization Plan base classes:
static const char PassName[]
static const uint32_t IV[8]
Class for arbitrary precision integers.
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
int64_t getSExtValue() const
Get sign extended value.
A container for analyses that lazily runs them and caches their results.
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
size_t size() const
size - Get the array size.
A function analysis which provides an AssumptionCache.
A cache of @llvm.assume calls within a function.
void registerAssumption(AssumeInst *CI)
Add an @llvm.assume intrinsic to this function's cache.
unsigned getVScaleRangeMin() const
Returns the minimum value for the vscale_range attribute.
static Attribute getWithAlignment(LLVMContext &Context, Align Alignment)
Return a uniquified Attribute object that has the specific alignment set.
LLVM Basic Block Representation.
iterator begin()
Instruction iterator methods.
iterator_range< const_phi_iterator > phis() const
Returns a range that iterates over the phis in the basic block.
InstListType::const_iterator getFirstNonPHIIt() const
Iterator returning form of getFirstNonPHI.
const Instruction * getFirstNonPHI() const
Returns a pointer to the first instruction in this block that is not a PHINode instruction.
const BasicBlock * getSinglePredecessor() const
Return the predecessor of this block if it has a single predecessor block.
const Function * getParent() const
Return the enclosing method, or null if none.
InstListType::iterator iterator
Instruction iterators...
LLVMContext & getContext() const
Get the context in which this basic block lives.
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
BinaryOps getOpcode() const
Analysis pass which computes BlockFrequencyInfo.
BlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate IR basic block frequen...
Conditional or Unconditional Branch instruction.
bool isConditional() const
static BranchInst * Create(BasicBlock *IfTrue, InsertPosition InsertBefore=nullptr)
BasicBlock * getSuccessor(unsigned i) const
Value * getCondition() const
Represents analyses that only rely on functions' control flow.
bool isNoBuiltin() const
Return true if the call should not be treated as a call to a builtin.
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
Value * getArgOperand(unsigned i) const
iterator_range< User::op_iterator > args()
Iteration adapter for range-for loops.
unsigned arg_size() const
void addParamAttr(unsigned ArgNo, Attribute::AttrKind Kind)
Adds the attribute to the indicated argument.
This class represents a function call, abstracting a target machine's calling convention.
static bool isBitOrNoopPointerCastable(Type *SrcTy, Type *DestTy, const DataLayout &DL)
Check whether a bitcast, inttoptr, or ptrtoint cast between these types is valid and a no-op.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
@ ICMP_UGT
unsigned greater than
@ ICMP_ULT
unsigned less than
@ ICMP_ULE
unsigned less or equal
static ConstantInt * getTrue(LLVMContext &Context)
static ConstantInt * getFalse(LLVMContext &Context)
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
An analysis that produces DemandedBits for a function.
ValueT lookup(const_arg_type_t< KeyT > Val) const
lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...
iterator find(const_arg_type_t< KeyT > Val)
size_type count(const_arg_type_t< KeyT > Val) const
Return 1 if the specified key is in the map, 0 otherwise.
const ValueT & at(const_arg_type_t< KeyT > Val) const
at - Return the entry for the specified key, or abort if no such entry exists.
bool contains(const_arg_type_t< KeyT > Val) const
Return true if the specified key is in the map, false otherwise.
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Implements a dense probed hash-table based set.
DomTreeNodeBase * getIDom() const
Analysis pass which computes a DominatorTree.
bool verify(VerificationLevel VL=VerificationLevel::Full) const
verify - checks if the tree is correct.
void changeImmediateDominator(DomTreeNodeBase< NodeT > *N, DomTreeNodeBase< NodeT > *NewIDom)
changeImmediateDominator - This method is used to update the dominator tree information when a node's...
DomTreeNodeBase< NodeT > * addNewBlock(NodeT *BB, NodeT *DomBB)
Add a new node to the dominator tree information.
void eraseNode(NodeT *BB)
eraseNode - Removes a node from the dominator tree.
DomTreeNodeBase< NodeT > * getNode(const NodeT *BB) const
getNode - return the (Post)DominatorTree node for the specified basic block.
bool properlyDominates(const DomTreeNodeBase< NodeT > *A, const DomTreeNodeBase< NodeT > *B) const
properlyDominates - Returns true iff A dominates B and A != B.
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
constexpr bool isVector() const
One or more elements.
static constexpr ElementCount getScalable(ScalarTy MinVal)
static constexpr ElementCount getFixed(ScalarTy MinVal)
static constexpr ElementCount get(ScalarTy MinVal, bool Scalable)
constexpr bool isScalar() const
Exactly one element.
BasicBlock * emitMinimumVectorEpilogueIterCountCheck(BasicBlock *Bypass, BasicBlock *Insert)
Emits an iteration count bypass check after the main vector loop has finished to see if there are any...
void printDebugTracesAtEnd() override
EpilogueVectorizerEpilogueLoop(Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI, DominatorTree *DT, const TargetLibraryInfo *TLI, const TargetTransformInfo *TTI, AssumptionCache *AC, OptimizationRemarkEmitter *ORE, EpilogueLoopVectorizationInfo &EPI, LoopVectorizationLegality *LVL, llvm::LoopVectorizationCostModel *CM, BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI, GeneratedRTChecks &Checks)
void printDebugTracesAtStart() override
Allow subclasses to override and print debug traces before/after vplan execution, when trace informat...
std::pair< BasicBlock *, Value * > createEpilogueVectorizedLoopSkeleton(const SCEV2ValueTy &ExpandedSCEVs) final
Implements the interface for creating a vectorized skeleton using the epilogue loop strategy (ie the ...
A specialized derived class of inner loop vectorizer that performs vectorization of main loops in the...
EpilogueVectorizerMainLoop(Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI, DominatorTree *DT, const TargetLibraryInfo *TLI, const TargetTransformInfo *TTI, AssumptionCache *AC, OptimizationRemarkEmitter *ORE, EpilogueLoopVectorizationInfo &EPI, LoopVectorizationLegality *LVL, llvm::LoopVectorizationCostModel *CM, BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI, GeneratedRTChecks &Check)
void printDebugTracesAtEnd() override
std::pair< BasicBlock *, Value * > createEpilogueVectorizedLoopSkeleton(const SCEV2ValueTy &ExpandedSCEVs) final
Implements the interface for creating a vectorized skeleton using the main loop strategy (ie the firs...
void printDebugTracesAtStart() override
Allow subclasses to override and print debug traces before/after vplan execution, when trace informat...
BasicBlock * emitIterationCountCheck(BasicBlock *Bypass, bool ForEpilogue)
Emits an iteration count bypass check once for the main loop (when ForEpilogue is false) and once for...
FastMathFlags getFastMathFlags() const
Convenience function for getting all the fast-math flags.
Convenience struct for specifying and reasoning about fast-math flags.
Class to represent function types.
param_iterator param_begin() const
param_iterator param_end() const
bool hasOptSize() const
Optimize this function for size (-Os) or minimum size (-Oz).
FunctionType * getFunctionType() const
Returns the FunctionType for me.
const DataLayout & getDataLayout() const
Get the data layout of the module this function belongs to.
Attribute getFnAttribute(Attribute::AttrKind Kind) const
Return the attribute for the given attribute kind.
bool hasMinSize() const
Optimize this function for minimum size (-Oz).
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
static GetElementPtrInst * Create(Type *PointeeType, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Common base class shared among various IRBuilders.
Value * CreateBinaryIntrinsic(Intrinsic::ID ID, Value *LHS, Value *RHS, Instruction *FMFSource=nullptr, const Twine &Name="")
Create a call to intrinsic ID with 2 operands which is mangled on the first type.
IntegerType * getInt1Ty()
Fetch the type representing a single bit.
LoadInst * CreateAlignedLoad(Type *Ty, Value *Ptr, MaybeAlign Align, const char *Name)
Value * CreateVectorSplat(unsigned NumElts, Value *V, const Twine &Name="")
Return a vector value that contains.
Value * CreateExtractValue(Value *Agg, ArrayRef< unsigned > Idxs, const Twine &Name="")
ConstantInt * getTrue()
Get the constant value for i1 true.
CallInst * CreateIntrinsic(Intrinsic::ID ID, ArrayRef< Type * > Types, ArrayRef< Value * > Args, Instruction *FMFSource=nullptr, const Twine &Name="")
Create a call to intrinsic ID with Args, mangled using Types.
CallInst * CreateMaskedLoad(Type *Ty, Value *Ptr, Align Alignment, Value *Mask, Value *PassThru=nullptr, const Twine &Name="")
Create a call to Masked Load intrinsic.
Value * CreateSelect(Value *C, Value *True, Value *False, const Twine &Name="", Instruction *MDFrom=nullptr)
BasicBlock::iterator GetInsertPoint() const
IntegerType * getInt32Ty()
Fetch the type representing a 32-bit integer.
void setFastMathFlags(FastMathFlags NewFMF)
Set the fast-math flags to be used with generated fp-math operators.
Value * CreateVectorReverse(Value *V, const Twine &Name="")
Return a vector value that contains the vector V reversed.
Value * CreateICmpNE(Value *LHS, Value *RHS, const Twine &Name="")
Value * CreateGEP(Type *Ty, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &Name="", GEPNoWrapFlags NW=GEPNoWrapFlags::none())
Value * CreateNeg(Value *V, const Twine &Name="", bool HasNSW=false)
ConstantInt * getInt32(uint32_t C)
Get a constant 32-bit value.
Value * CreateBitOrPointerCast(Value *V, Type *DestTy, const Twine &Name="")
Value * CreateICmpEQ(Value *LHS, Value *RHS, const Twine &Name="")
InstTy * Insert(InstTy *I, const Twine &Name="") const
Insert and return the specified instruction.
Value * CreateSub(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Value * CreateShuffleVector(Value *V1, Value *V2, Value *Mask, const Twine &Name="")
CallInst * CreateMaskedStore(Value *Val, Value *Ptr, Align Alignment, Value *Mask)
Create a call to Masked Store intrinsic.
Value * CreateAdd(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
ConstantInt * getFalse()
Get the constant value for i1 false.
Value * CreateBinOp(Instruction::BinaryOps Opc, Value *LHS, Value *RHS, const Twine &Name="", MDNode *FPMathTag=nullptr)
void SetInsertPoint(BasicBlock *TheBB)
This specifies that created instructions should be appended to the end of the specified block.
StoreInst * CreateAlignedStore(Value *Val, Value *Ptr, MaybeAlign Align, bool isVolatile=false)
Value * CreateICmp(CmpInst::Predicate P, Value *LHS, Value *RHS, const Twine &Name="")
IntegerType * getInt8Ty()
Fetch the type representing an 8-bit integer.
Value * CreateURem(Value *LHS, Value *RHS, const Twine &Name="")
Value * CreateStepVector(Type *DstType, const Twine &Name="")
Creates a vector of type DstType with the linear sequence <0, 1, ...>
Value * CreateMul(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
A struct for saving information about induction variables.
InductionKind getKind() const
const SCEV * getStep() const
InductionKind
This enum represents the kinds of inductions that we support.
@ IK_NoInduction
Not an induction variable.
@ IK_FpInduction
Floating point induction variable.
@ IK_PtrInduction
Pointer induction var. Step = C.
@ IK_IntInduction
Integer induction variable. Step = C.
const SmallVectorImpl< Instruction * > & getCastInsts() const
Returns a reference to the type cast instructions in the induction update chain, that are redundant w...
Value * getStartValue() const
An extension of the inner loop vectorizer that creates a skeleton for a vectorized loop that has its ...
InnerLoopAndEpilogueVectorizer(Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI, DominatorTree *DT, const TargetLibraryInfo *TLI, const TargetTransformInfo *TTI, AssumptionCache *AC, OptimizationRemarkEmitter *ORE, EpilogueLoopVectorizationInfo &EPI, LoopVectorizationLegality *LVL, llvm::LoopVectorizationCostModel *CM, BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI, GeneratedRTChecks &Checks)
virtual std::pair< BasicBlock *, Value * > createEpilogueVectorizedLoopSkeleton(const SCEV2ValueTy &ExpandedSCEVs)=0
The interface for creating a vectorized skeleton using one of two different strategies,...
std::pair< BasicBlock *, Value * > createVectorizedLoopSkeleton(const SCEV2ValueTy &ExpandedSCEVs) final
Create a new empty loop that will contain vectorized instructions later on, while the old loop will b...
EpilogueLoopVectorizationInfo & EPI
Holds and updates state information required to vectorize the main loop and its epilogue in two separ...
InnerLoopUnroller(Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI, DominatorTree *DT, const TargetLibraryInfo *TLI, const TargetTransformInfo *TTI, AssumptionCache *AC, OptimizationRemarkEmitter *ORE, unsigned UnrollFactor, LoopVectorizationLegality *LVL, LoopVectorizationCostModel *CM, BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI, GeneratedRTChecks &Check)
InnerLoopVectorizer vectorizes loops which contain only one basic block to a specified vectorization ...
virtual void printDebugTracesAtStart()
Allow subclasses to override and print debug traces before/after vplan execution, when trace informat...
PHINode * createInductionResumeValue(PHINode *OrigPhi, const InductionDescriptor &ID, Value *Step, ArrayRef< BasicBlock * > BypassBlocks, std::pair< BasicBlock *, Value * > AdditionalBypass={nullptr, nullptr})
Create a new phi node for the induction variable OrigPhi to resume iteration count in the scalar epil...
void scalarizeInstruction(const Instruction *Instr, VPReplicateRecipe *RepRecipe, const VPIteration &Instance, VPTransformState &State)
A helper function to scalarize a single Instruction in the innermost loop.
BasicBlock * LoopScalarBody
The scalar loop body.
Value * TripCount
Trip count of the original loop.
void sinkScalarOperands(Instruction *PredInst)
Iteratively sink the scalarized operands of a predicated instruction into the block that was created ...
const TargetLibraryInfo * TLI
Target Library Info.
DenseMap< PHINode *, Value * > IVEndValues
ElementCount MinProfitableTripCount
Value * createBitOrPointerCast(Value *V, VectorType *DstVTy, const DataLayout &DL)
Returns a bitcasted value to the requested vector type.
const TargetTransformInfo * TTI
Target Transform Info.
Value * VectorTripCount
Trip count of the widened loop (TripCount - TripCount % (VF*UF))
bool areSafetyChecksAdded()
InnerLoopVectorizer(Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI, DominatorTree *DT, const TargetLibraryInfo *TLI, const TargetTransformInfo *TTI, AssumptionCache *AC, OptimizationRemarkEmitter *ORE, ElementCount VecWidth, ElementCount MinProfitableTripCount, unsigned UnrollFactor, LoopVectorizationLegality *LVL, LoopVectorizationCostModel *CM, BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI, GeneratedRTChecks &RTChecks)
BasicBlock * emitSCEVChecks(BasicBlock *Bypass)
Emit a bypass check to see if all of the SCEV assumptions we've had to make are correct.
LoopVectorizationCostModel * Cost
The profitablity analysis.
SmallMapVector< const RecurrenceDescriptor *, PHINode *, 4 > ReductionResumeValues
BlockFrequencyInfo * BFI
BFI and PSI are used to check for profile guided size optimizations.
Value * getTripCount() const
Returns the original loop trip count.
BasicBlock * LoopMiddleBlock
Middle Block between the vector and the scalar.
OptimizationRemarkEmitter * ORE
Interface to emit optimization remarks.
SmallVector< Instruction *, 4 > PredicatedInstructions
Store instructions that were predicated.
BasicBlock * completeLoopSkeleton()
Complete the loop skeleton by adding debug MDs, creating appropriate conditional branches in the midd...
void createVectorLoopSkeleton(StringRef Prefix)
Emit basic blocks (prefixed with Prefix) for the iteration check, vector loop preheader,...
BasicBlock * emitMemRuntimeChecks(BasicBlock *Bypass)
Emit bypass checks to check any memory assumptions we may have made.
BasicBlock * LoopScalarPreHeader
The scalar-loop preheader.
LoopVectorizationLegality * Legal
The legality analysis.
void emitIterationCountCheck(BasicBlock *Bypass)
Emit a bypass check to see if the vector trip count is zero, including if it overflows.
PredicatedScalarEvolution & PSE
A wrapper around ScalarEvolution used to add runtime SCEV checks.
void fixupIVUsers(PHINode *OrigPhi, const InductionDescriptor &II, Value *VectorTripCount, Value *EndValue, BasicBlock *MiddleBlock, BasicBlock *VectorHeader, VPlan &Plan, VPTransformState &State)
Set up the values of the IVs correctly when exiting the vector loop.
void createInductionResumeValues(const SCEV2ValueTy &ExpandedSCEVs, std::pair< BasicBlock *, Value * > AdditionalBypass={nullptr, nullptr})
Create new phi nodes for the induction variables to resume iteration count in the scalar epilogue,...
void fixNonInductionPHIs(VPlan &Plan, VPTransformState &State)
Fix the non-induction PHIs in Plan.
DominatorTree * DT
Dominator Tree.
void setTripCount(Value *TC)
Used to set the trip count after ILV's construction and after the preheader block has been executed.
bool OptForSizeBasedOnProfile
BasicBlock * LoopVectorPreHeader
The vector-loop preheader.
virtual void printDebugTracesAtEnd()
AssumptionCache * AC
Assumption Cache.
Value * getOrCreateVectorTripCount(BasicBlock *InsertBlock)
Returns (and creates if needed) the trip count of the widened loop.
IRBuilder Builder
The builder that we use.
void vectorizeInterleaveGroup(const InterleaveGroup< Instruction > *Group, ArrayRef< VPValue * > VPDefs, VPTransformState &State, VPValue *Addr, ArrayRef< VPValue * > StoredValues, VPValue *BlockInMask, bool NeedsMaskForGaps)
Try to vectorize interleaved access group Group with the base address given in Addr,...
virtual std::pair< BasicBlock *, Value * > createVectorizedLoopSkeleton(const SCEV2ValueTy &ExpandedSCEVs)
Create a new empty loop that will contain vectorized instructions later on, while the old loop will b...
unsigned UF
The vectorization unroll factor to use.
void fixVectorizedLoop(VPTransformState &State, VPlan &Plan)
Fix the vectorized code, taking care of header phi's, live-outs, and more.
BasicBlock * LoopExitBlock
The unique ExitBlock of the scalar loop if one exists.
SmallVector< BasicBlock *, 4 > LoopBypassBlocks
A list of all bypass blocks. The first block is the entry of the loop.
GeneratedRTChecks & RTChecks
Structure to hold information about generated runtime checks, responsible for cleaning the checks,...
virtual ~InnerLoopVectorizer()=default
ElementCount VF
The vectorization SIMD factor to use.
Loop * OrigLoop
The original loop.
static InstructionCost getInvalid(CostType Val=0)
static InstructionCost getMax()
std::optional< CostType > getValue() const
This function is intended to be used as sparingly as possible, since the class provides the full rang...
void insertBefore(Instruction *InsertPos)
Insert an unlinked instruction into a basic block immediately before the specified instruction.
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
void replaceSuccessorWith(BasicBlock *OldBB, BasicBlock *NewBB)
Replace specified successor OldBB to point at the provided block.
Instruction * user_back()
Specialize the methods defined in Value, as we know that an instruction can only be used by other ins...
FastMathFlags getFastMathFlags() const LLVM_READONLY
Convenience function for getting all the fast-math flags, which must be an operator which supports th...
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
void setDebugLoc(DebugLoc Loc)
Set the debug location information for this instruction.
void moveBefore(Instruction *MovePos)
Unlink this instruction from its current basic block and insert it into the basic block that MovePos ...
static IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
The group of interleaved loads/stores sharing the same stride and close to each other.
uint32_t getFactor() const
InstTy * getMember(uint32_t Index) const
Get the member with the given index Index.
uint32_t getIndex(const InstTy *Instr) const
Get the index for the given member.
InstTy * getInsertPos() const
void addMetadata(InstTy *NewInst) const
Add metadata (e.g.
Drive the analysis of interleaved memory accesses in the loop.
InterleaveGroup< Instruction > * getInterleaveGroup(const Instruction *Instr) const
Get the interleave group that Instr belongs to.
bool requiresScalarEpilogue() const
Returns true if an interleaved group that may access memory out-of-bounds requires a scalar epilogue ...
bool isInterleaved(Instruction *Instr) const
Check if Instr belongs to any interleave group.
bool invalidateGroups()
Invalidate groups, e.g., in case all blocks in loop will be predicated contrary to original assumptio...
iterator_range< SmallPtrSetIterator< llvm::InterleaveGroup< Instruction > * > > getInterleaveGroups()
void analyzeInterleaving(bool EnableMaskedInterleavedGroup)
Analyze the interleaved accesses and collect them in interleave groups.
void invalidateGroupsRequiringScalarEpilogue()
Invalidate groups that require a scalar epilogue (due to gaps).
A wrapper class for inspecting calls to intrinsic functions.
This is an important class for using LLVM in a threaded context.
An instruction for reading from memory.
This analysis provides dependence information for the memory accesses of a loop.
Drive the analysis of memory accesses in the loop.
const RuntimePointerChecking * getRuntimePointerChecking() const
unsigned getNumRuntimePointerChecks() const
Number of memchecks required to prove independence of otherwise may-alias pointers.
const DenseMap< Value *, const SCEV * > & getSymbolicStrides() const
If an access has a symbolic strides, this maps the pointer value to the stride symbol.
Analysis pass that exposes the LoopInfo for a function.
bool contains(const LoopT *L) const
Return true if the specified loop is contained within in this loop.
BlockT * getLoopLatch() const
If there is a single latch block for this loop, return it.
bool isInnermost() const
Return true if the loop does not contain any (natural) loops.
void getExitBlocks(SmallVectorImpl< BlockT * > &ExitBlocks) const
Return all of the successor blocks of this loop.
void getExitingBlocks(SmallVectorImpl< BlockT * > &ExitingBlocks) const
Return all blocks inside the loop that have successors outside of the loop.
BlockT * getHeader() const
unsigned getLoopDepth() const
Return the nesting level of this loop.
void addBasicBlockToLoop(BlockT *NewBB, LoopInfoBase< BlockT, LoopT > &LI)
This method is used by other analyses to update loop information.
iterator_range< block_iterator > blocks() const
BlockT * getLoopPreheader() const
If there is a preheader for this loop, return it.
ArrayRef< BlockT * > getBlocks() const
Get a list of the basic blocks which make up this loop.
BlockT * getExitingBlock() const
If getExitingBlocks would return exactly one block, return that block.
bool isLoopExiting(const BlockT *BB) const
True if terminator in the block can branch to another block that is outside of the current loop.
BlockT * getUniqueExitBlock() const
If getUniqueExitBlocks would return exactly one block, return that block.
Store the result of a depth first search within basic blocks contained by a single loop.
RPOIterator beginRPO() const
Reverse iterate over the cached postorder blocks.
void perform(const LoopInfo *LI)
Traverse the loop blocks and store the DFS result.
RPOIterator endRPO() const
Wrapper class to LoopBlocksDFS that provides a standard begin()/end() interface for the DFS reverse p...
void perform(const LoopInfo *LI)
Traverse the loop blocks and store the DFS result.
void removeBlock(BlockT *BB)
This method completely removes BB from all data structures, including all of the Loop objects it is n...
LoopT * getLoopFor(const BlockT *BB) const
Return the inner most loop that BB lives in.
LoopVectorizationCostModel - estimates the expected speedups due to vectorization.
SmallPtrSet< Type *, 16 > ElementTypesInLoop
All element types found in the loop.
void collectElementTypesForWidening()
Collect all element types in the loop for which widening is needed.
bool canVectorizeReductions(ElementCount VF) const
Returns true if the target machine supports all of the reduction variables found for the given VF.
bool requiresScalarEpilogue(VFRange Range) const
Returns true if we're required to use a scalar epilogue for at least the final iteration of the origi...
bool isPredicatedInst(Instruction *I) const
Returns true if I is an instruction that needs to be predicated at runtime.
bool hasPredStores() const
void collectValuesToIgnore()
Collect values we want to ignore in the cost model.
void collectInLoopReductions()
Split reductions into those that happen in the loop, and those that happen outside.
std::pair< unsigned, unsigned > getSmallestAndWidestTypes()
bool isUniformAfterVectorization(Instruction *I, ElementCount VF) const
Returns true if I is known to be uniform after vectorization.
PredicatedScalarEvolution & PSE
Predicated scalar evolution analysis.
const LoopVectorizeHints * Hints
Loop Vectorize Hint.
std::optional< InstructionCost > getReductionPatternCost(Instruction *I, ElementCount VF, Type *VectorTy, TTI::TargetCostKind CostKind) const
Return the cost of instructions in an inloop reduction pattern, if I is part of that pattern.
const TargetTransformInfo & TTI
Vector target information.
LoopVectorizationCostModel(ScalarEpilogueLowering SEL, Loop *L, PredicatedScalarEvolution &PSE, LoopInfo *LI, LoopVectorizationLegality *Legal, const TargetTransformInfo &TTI, const TargetLibraryInfo *TLI, DemandedBits *DB, AssumptionCache *AC, OptimizationRemarkEmitter *ORE, const Function *F, const LoopVectorizeHints *Hints, InterleavedAccessInfo &IAI)
const Function * TheFunction
LoopVectorizationLegality * Legal
Vectorization legality.
bool isLegalMaskedLoad(Type *DataType, Value *Ptr, Align Alignment) const
Returns true if the target machine supports masked load operation for the given DataType and kind of ...
InstructionCost getInstructionCost(Instruction *I, ElementCount VF)
Returns the execution time cost of an instruction for a given vector width.
DemandedBits * DB
Demanded bits analysis.
bool interleavedAccessCanBeWidened(Instruction *I, ElementCount VF) const
Returns true if I is a memory instruction in an interleaved-group of memory accesses that can be vect...
const TargetLibraryInfo * TLI
Target Library Info.
bool memoryInstructionCanBeWidened(Instruction *I, ElementCount VF)
Returns true if I is a memory instruction with consecutive memory access that can be widened.
const InterleaveGroup< Instruction > * getInterleavedAccessGroup(Instruction *Instr) const
Get the interleaved access group that Instr belongs to.
InstructionCost getVectorIntrinsicCost(CallInst *CI, ElementCount VF) const
Estimate cost of an intrinsic call instruction CI if it were vectorized with factor VF.
bool isScalarAfterVectorization(Instruction *I, ElementCount VF) const
Returns true if I is known to be scalar after vectorization.
bool isOptimizableIVTruncate(Instruction *I, ElementCount VF)
Return True if instruction I is an optimizable truncate whose operand is an induction variable.
FixedScalableVFPair computeMaxVF(ElementCount UserVF, unsigned UserIC)
Loop * TheLoop
The loop that we evaluate.
TailFoldingStyle getTailFoldingStyle(bool IVUpdateMayOverflow=true) const
Returns the TailFoldingStyle that is best for the current loop.
InterleavedAccessInfo & InterleaveInfo
The interleave access information contains groups of interleaved accesses with the same stride and cl...
SmallPtrSet< const Value *, 16 > ValuesToIgnore
Values to ignore in the cost model.
void setVectorizedCallDecision(ElementCount VF)
A call may be vectorized in different ways depending on whether we have vectorized variants available...
void invalidateCostModelingDecisions()
Invalidates decisions already taken by the cost model.
bool isAccessInterleaved(Instruction *Instr) const
Check if Instr belongs to any interleaved access group.
bool selectUserVectorizationFactor(ElementCount UserVF)
Setup cost-based decisions for user vectorization factor.
OptimizationRemarkEmitter * ORE
Interface to emit optimization remarks.
bool isLegalMaskedStore(Type *DataType, Value *Ptr, Align Alignment) const
Returns true if the target machine supports masked store operation for the given DataType and kind of...
LoopInfo * LI
Loop Info analysis.
bool requiresScalarEpilogue(bool IsVectorizing) const
Returns true if we're required to use a scalar epilogue for at least the final iteration of the origi...
SmallVector< RegisterUsage, 8 > calculateRegisterUsage(ArrayRef< ElementCount > VFs)
SmallPtrSet< const Value *, 16 > VecValuesToIgnore
Values to ignore in the cost model when VF > 1.
bool isInLoopReduction(PHINode *Phi) const
Returns true if the Phi is part of an inloop reduction.
bool isProfitableToScalarize(Instruction *I, ElementCount VF) const
void setWideningDecision(const InterleaveGroup< Instruction > *Grp, ElementCount VF, InstWidening W, InstructionCost Cost)
Save vectorization decision W and Cost taken by the cost model for interleaving group Grp and vector ...
const MapVector< Instruction *, uint64_t > & getMinimalBitwidths() const
CallWideningDecision getCallWideningDecision(CallInst *CI, ElementCount VF) const
bool isLegalGatherOrScatter(Value *V, ElementCount VF)
Returns true if the target machine can represent V as a masked gather or scatter operation.
bool canTruncateToMinimalBitwidth(Instruction *I, ElementCount VF) const
bool runtimeChecksRequired()
bool foldTailByMasking() const
Returns true if all loop blocks should be masked to fold tail loop.
bool foldTailWithEVL() const
Returns true if VP intrinsics with explicit vector length support should be generated in the tail fol...
bool isEpilogueVectorizationProfitable(const ElementCount VF) const
Returns true if epilogue vectorization is considered profitable, and false otherwise.
void collectUniformsAndScalars(ElementCount VF)
Collect Uniform and Scalar values for the given VF.
bool blockNeedsPredicationForAnyReason(BasicBlock *BB) const
Returns true if the instructions in this block requires predication for any reason,...
void setCallWideningDecision(CallInst *CI, ElementCount VF, InstWidening Kind, Function *Variant, Intrinsic::ID IID, std::optional< unsigned > MaskPos, InstructionCost Cost)
void setTailFoldingStyles(bool IsScalableVF, unsigned UserIC)
Selects and saves TailFoldingStyle for 2 options - if IV update may overflow or not.
AssumptionCache * AC
Assumption cache.
void setWideningDecision(Instruction *I, ElementCount VF, InstWidening W, InstructionCost Cost)
Save vectorization decision W and Cost taken by the cost model for instruction I and vector width VF.
InstWidening
Decision that was taken during cost calculation for memory instruction.
bool isScalarWithPredication(Instruction *I, ElementCount VF) const
Returns true if I is an instruction which requires predication and for which our chosen predication s...
InstructionCost getVectorCallCost(CallInst *CI, ElementCount VF) const
Estimate cost of a call instruction CI if it were vectorized with factor VF.
InstructionCost expectedCost(ElementCount VF, SmallVectorImpl< InstructionVFPair > *Invalid=nullptr)
Returns the expected execution cost.
bool useOrderedReductions(const RecurrenceDescriptor &RdxDesc) const
Returns true if we should use strict in-order reductions for the given RdxDesc.
std::pair< InstructionCost, InstructionCost > getDivRemSpeculationCost(Instruction *I, ElementCount VF) const
Return the costs for our two available strategies for lowering a div/rem operation which requires spe...
bool isDivRemScalarWithPredication(InstructionCost ScalarCost, InstructionCost SafeDivisorCost) const
Given costs for both strategies, return true if the scalar predication lowering should be used for di...
void setCostBasedWideningDecision(ElementCount VF)
Memory access instruction may be vectorized in more than one way.
InstWidening getWideningDecision(Instruction *I, ElementCount VF) const
Return the cost model decision for the given instruction I and vector width VF.
bool isScalarEpilogueAllowed() const
Returns true if a scalar epilogue is not allowed due to optsize or a loop hint annotation.
InstructionCost getWideningCost(Instruction *I, ElementCount VF)
Return the vectorization cost for the given instruction I and vector width VF.
unsigned selectInterleaveCount(ElementCount VF, InstructionCost LoopCost)
void collectInstsToScalarize(ElementCount VF)
Collects the instructions to scalarize for each predicated instruction in the loop.
LoopVectorizationLegality checks if it is legal to vectorize a loop, and to what vectorization factor...
unsigned getNumStores() const
bool hasVectorCallVariants() const
Returns true if there is at least one function call in the loop which has a vectorized variant availa...
uint64_t getMaxSafeVectorWidthInBits() const
bool isInvariantAddressOfReduction(Value *V)
Returns True if given address is invariant and is used to store recurrent expression.
bool blockNeedsPredication(BasicBlock *BB) const
Return true if the block BB needs to be predicated in order for the loop to be vectorized.
bool canVectorize(bool UseVPlanNativePath)
Returns true if it is legal to vectorize this loop.
int isConsecutivePtr(Type *AccessTy, Value *Ptr) const
Check if this pointer is consecutive when vectorizing.
bool canVectorizeFPMath(bool EnableStrictReductions)
Returns true if it is legal to vectorize the FP math operations in this loop.
bool isReductionVariable(PHINode *PN) const
Returns True if PN is a reduction variable in this loop.
bool isFixedOrderRecurrence(const PHINode *Phi) const
Returns True if Phi is a fixed-order recurrence in this loop.
const InductionDescriptor * getPointerInductionDescriptor(PHINode *Phi) const
Returns a pointer to the induction descriptor, if Phi is pointer induction.
const InductionDescriptor * getIntOrFpInductionDescriptor(PHINode *Phi) const
Returns a pointer to the induction descriptor, if Phi is an integer or floating point induction.
bool isInductionPhi(const Value *V) const
Returns True if V is a Phi node of an induction variable in this loop.
PHINode * getPrimaryInduction()
Returns the primary induction variable.
const InductionList & getInductionVars() const
Returns the induction variables found in the loop.
bool isInvariant(Value *V) const
Returns true if value V is uniform across VF lanes, when VF is provided, and otherwise if V is invari...
const ReductionList & getReductionVars() const
Returns the reduction variables found in the loop.
bool isSafeForAnyVectorWidth() const
unsigned getNumLoads() const
bool canFoldTailByMasking() const
Return true if we can vectorize this loop while folding its tail by masking.
void prepareToFoldTailByMasking()
Mark all respective loads/stores for masking.
Type * getWidestInductionType()
Returns the widest induction type.
const LoopAccessInfo * getLAI() const
bool isUniformMemOp(Instruction &I, ElementCount VF) const
A uniform memory op is a load or store which accesses the same memory location on all VF lanes,...
bool isMaskRequired(const Instruction *I) const
Returns true if vector representation of the instruction I requires mask.
const RuntimePointerChecking * getRuntimePointerChecking() const
Returns the information that we collected about runtime memory check.
Planner drives the vectorization process after having passed Legality checks.
std::optional< VectorizationFactor > plan(ElementCount UserVF, unsigned UserIC)
Plan how to best vectorize, return the best VF and its cost, or std::nullopt if vectorization and int...
VectorizationFactor selectEpilogueVectorizationFactor(const ElementCount MaxVF, unsigned IC)
VectorizationFactor planInVPlanNativePath(ElementCount UserVF)
Use the VPlan-native path to plan how to best vectorize, return the best VF and its cost.
std::pair< DenseMap< const SCEV *, Value * >, DenseMap< const RecurrenceDescriptor *, Value * > > executePlan(ElementCount VF, unsigned UF, VPlan &BestPlan, InnerLoopVectorizer &LB, DominatorTree *DT, bool IsEpilogueVectorization, const DenseMap< const SCEV *, Value * > *ExpandedSCEVs=nullptr)
Generate the IR code for the vectorized loop captured in VPlan BestPlan according to the best selecte...
void buildVPlans(ElementCount MinVF, ElementCount MaxVF)
Build VPlans for power-of-2 VF's between MinVF and MaxVF inclusive, according to the information gath...
VPlan & getBestPlanFor(ElementCount VF) const
Return the best VPlan for VF.
VPlan & getBestPlan() const
Return the most profitable plan and fix its VF to the most profitable one.
static bool getDecisionAndClampRange(const std::function< bool(ElementCount)> &Predicate, VFRange &Range)
Test a Predicate on a Range of VF's.
void printPlans(raw_ostream &O)
bool hasPlanWithVF(ElementCount VF) const
Look through the existing plans and return true if we have one with vectorization factor VF.
This holds vectorization requirements that must be verified late in the process.
Instruction * getExactFPInst()
Utility class for getting and setting loop vectorizer hints in the form of loop metadata.
bool isScalableVectorizationDisabled() const
enum ForceKind getForce() const
bool allowVectorization(Function *F, Loop *L, bool VectorizeOnlyWhenForced) const
bool allowReordering() const
When enabling loop hints are provided we allow the vectorizer to change the order of operations that ...
void emitRemarkWithHints() const
Dumps all the hint information.
bool isPotentiallyUnsafe() const
ElementCount getWidth() const
@ FK_Enabled
Forcing enabled.
@ FK_Undefined
Not selected.
@ FK_Disabled
Forcing disabled.
unsigned getPredicate() const
void setAlreadyVectorized()
Mark the loop L as already vectorized by setting the width to 1.
const char * vectorizeAnalysisPassName() const
If hints are provided that force vectorization, use the AlwaysPrint pass name to force the frontend t...
unsigned getInterleave() const
void prepareNoAliasMetadata()
Set up the aliasing scopes based on the memchecks.
Represents a single loop in the control flow graph.
bool hasLoopInvariantOperands(const Instruction *I) const
Return true if all the operands of the specified instruction are loop invariant.
DebugLoc getStartLoc() const
Return the debug location of the start of this loop.
bool isLoopInvariant(const Value *V) const
Return true if the specified value is loop invariant.
MDNode * getLoopID() const
Return the llvm.loop loop id metadata node for this loop if it is present.
void replaceOperandWith(unsigned I, Metadata *New)
Replace a specific operand.
const MDOperand & getOperand(unsigned I) const
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)
unsigned getNumOperands() const
Return number of MDNode operands.
static MDString * get(LLVMContext &Context, StringRef Str)
This class implements a map that also provides access to all stored values in a deterministic order.
iterator find(const KeyT &Key)
Function * getFunction(StringRef Name) const
Look up the specified function in the module symbol table.
An analysis over an "inner" IR unit that provides access to an analysis manager over a "outer" IR uni...
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
void setIncomingValueForBlock(const BasicBlock *BB, Value *V)
Set every incoming value(s) for block BB to V.
Value * getIncomingValueForBlock(const BasicBlock *BB) const
static unsigned getIncomingValueNumForOperand(unsigned i)
static PHINode * Create(Type *Ty, unsigned NumReservedValues, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructors - NumReservedValues is a hint for the number of incoming edges that this phi node will h...
static PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
An interface layer with SCEV used to manage how we see SCEV expressions for values in the context of ...
ScalarEvolution * getSE() const
Returns the ScalarEvolution analysis used.
const SCEVPredicate & getPredicate() const
const SCEV * getBackedgeTakenCount()
Get the (predicated) backedge count for the analyzed loop.
const SCEV * getSCEV(Value *V)
Returns the SCEV expression of V, in the context of the current SCEV predicate.
A set of analyses that are preserved following a run of a transformation pass.
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
void preserveSet()
Mark an analysis set as preserved.
void preserve()
Mark an analysis as preserved.
An analysis pass based on the new PM to deliver ProfileSummaryInfo.
Analysis providing profile information.
bool hasProfileSummary() const
Returns true if profile summary is available.
The RecurrenceDescriptor is used to identify recurrences variables in a loop.
static bool isFMulAddIntrinsic(Instruction *I)
Returns true if the instruction is a call to the llvm.fmuladd intrinsic.
FastMathFlags getFastMathFlags() const
Instruction * getLoopExitInstr() const
static unsigned getOpcode(RecurKind Kind)
Returns the opcode corresponding to the RecurrenceKind.
Type * getRecurrenceType() const
Returns the type of the recurrence.
const SmallPtrSet< Instruction *, 8 > & getCastInsts() const
Returns a reference to the instructions used for type-promoting the recurrence.
unsigned getMinWidthCastToRecurrenceTypeInBits() const
Returns the minimum width used by the recurrence in bits.
TrackingVH< Value > getRecurrenceStartValue() const
SmallVector< Instruction *, 4 > getReductionOpChain(PHINode *Phi, Loop *L) const
Attempts to find a chain of operations from Phi to LoopExitInst that can be treated as a set of reduc...
static bool isAnyOfRecurrenceKind(RecurKind Kind)
Returns true if the recurrence kind is of the form select(cmp(),x,y) where one of (x,...
bool isSigned() const
Returns true if all source operands of the recurrence are SExtInsts.
RecurKind getRecurrenceKind() const
bool isOrdered() const
Expose an ordered FP reduction to the instance users.
static bool isMinMaxRecurrenceKind(RecurKind Kind)
Returns true if the recurrence kind is any min/max kind.
bool Need
This flag indicates if we need to add the runtime check.
std::optional< ArrayRef< PointerDiffInfo > > getDiffChecks() const
const SmallVectorImpl< RuntimePointerCheck > & getChecks() const
Returns the checks that generateChecks created.
This class represents a constant integer value.
const APInt & getAPInt() const
Helper to remove instructions inserted during SCEV expansion, unless they are marked as used.
This class uses information about analyze scalars to rewrite expressions in canonical form.
ScalarEvolution * getSE()
bool isInsertedInstruction(Instruction *I) const
Return true if the specified instruction was inserted by the code rewriter.
Value * expandCodeForPredicate(const SCEVPredicate *Pred, Instruction *Loc)
Generates a code sequence that evaluates this predicate.
This class represents an assumption made using SCEV expressions which can be checked at run-time.
virtual bool isAlwaysTrue() const =0
Returns true if the predicate is always true.
This class represents an analyzed expression in the program.
bool isOne() const
Return true if the expression is a constant one.
bool isZero() const
Return true if the expression is a constant zero.
Type * getType() const
Return the LLVM type of this SCEV expression.
Analysis pass that exposes the ScalarEvolution for a function.
The main scalar evolution driver.
const SCEV * getURemExpr(const SCEV *LHS, const SCEV *RHS)
Represents an unsigned remainder expression based on unsigned division.
const SCEV * getConstant(ConstantInt *V)
const SCEV * getSCEV(Value *V)
Return a SCEV expression for the full generality of the specified expression.
unsigned getSmallConstantMaxTripCount(const Loop *L)
Returns the upper bound of the loop trip count as a normal unsigned value.
const SCEV * getTripCountFromExitCount(const SCEV *ExitCount)
A version of getTripCountFromExitCount below which always picks an evaluation type which can not resu...
const SCEV * getOne(Type *Ty)
Return a SCEV for the constant 1 of a specific type.
void forgetLoop(const Loop *L)
This method should be called by the client when it has changed a loop in a way that may effect Scalar...
bool isLoopInvariant(const SCEV *S, const Loop *L)
Return true if the value of the given SCEV is unchanging in the specified loop.
bool isKnownPredicate(ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS)
Test if the given expression is known to satisfy the condition described by Pred, LHS,...
void forgetValue(Value *V)
This method should be called by the client when it has changed a value in a way that may effect its v...
void forgetBlockAndLoopDispositions(Value *V=nullptr)
Called when the client has changed the disposition of values in a loop or block.
void forgetLcssaPhiWithNewPredecessor(Loop *L, PHINode *V)
Forget LCSSA phi node V of loop L to which a new predecessor was added, such that it may no longer be...
unsigned getSmallConstantTripCount(const Loop *L)
Returns the exact trip count of the loop if we can compute it, and the result is a small constant.
const SCEV * applyLoopGuards(const SCEV *Expr, const Loop *L)
Try to apply information from loop guards for L to Expr.
const SCEV * getAddExpr(SmallVectorImpl< const SCEV * > &Ops, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)
Get a canonical add expression, or something simpler if possible.
LLVMContext & getContext() const
This class represents the LLVM 'select' instruction.
A vector that has set insertion semantics.
ArrayRef< value_type > getArrayRef() const
size_type size() const
Determine the number of elements in the SetVector.
iterator end()
Get an iterator to the end of the SetVector.
size_type count(const key_type &key) const
Count the number of elements of a given key in the SetVector.
bool empty() const
Determine if the SetVector is empty or not.
iterator begin()
Get an iterator to the beginning of the SetVector.
bool insert(const value_type &X)
Insert a new element into the SetVector.
value_type pop_back_val()
This class provides computation of slot numbers for LLVM Assembly writing.
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
bool erase(PtrType Ptr)
Remove pointer from the set.
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
A SetVector that performs no allocations if smaller than a certain size.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
An instruction for storing to memory.
StringRef - Represent a constant reference to a string, i.e.
Analysis pass providing the TargetTransformInfo.
Analysis pass providing the TargetLibraryInfo.
Provides information about what library functions are available for the current target.
This class represents a truncation of integer types.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
The instances of the Type class are immutable: once they are created, they are never changed.
unsigned getIntegerBitWidth() const
bool isVectorTy() const
True if this is an instance of VectorType.
bool isPointerTy() const
True if this is an instance of PointerType.
static IntegerType * getInt1Ty(LLVMContext &C)
static IntegerType * getIntNTy(LLVMContext &C, unsigned N)
unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
static Type * getVoidTy(LLVMContext &C)
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
bool isFloatingPointTy() const
Return true if this is one of the floating-point types.
bool isIntOrPtrTy() const
Return true if this is an integer type or a pointer type.
bool isIntegerTy() const
True if this is an instance of IntegerType.
bool isTokenTy() const
Return true if this is 'token'.
bool isVoidTy() const
Return true if this is 'void'.
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
This function has undefined behavior.
A Use represents the edge between a Value definition and its users.
bool replaceUsesOfWith(Value *From, Value *To)
Replace uses of one Value with another.
void setOperand(unsigned i, Value *Val)
Value * getOperand(unsigned i) const
static SmallVector< VFInfo, 8 > getMappings(const CallInst &CI)
Retrieve all the VFInfo instances associated to the CallInst CI.
VPBasicBlock serves as the leaf of the Hierarchical Control-Flow Graph.
void appendRecipe(VPRecipeBase *Recipe)
Augment the existing recipes of a VPBasicBlock with an additional Recipe as the last recipe.
RecipeListTy::iterator iterator
Instruction iterators...
void execute(VPTransformState *State) override
The method which generates the output IR instructions that correspond to this VPBasicBlock,...
iterator begin()
Recipe iterator methods.
iterator_range< iterator > phis()
Returns an iterator range over the PHI-like recipes in the block.
iterator getFirstNonPhi()
Return the position of the first non-phi node recipe in the block.
void insert(VPRecipeBase *Recipe, iterator InsertPt)
A recipe for vectorizing a phi-node as a sequence of mask-based select instructions.
VPBlockBase is the building block of the Hierarchical Control-Flow Graph.
VPRegionBlock * getParent()
const VPBasicBlock * getExitingBasicBlock() const
void setName(const Twine &newName)
const VPBasicBlock * getEntryBasicBlock() const
VPBlockBase * getSingleSuccessor() const
const VPBlocksTy & getSuccessors() const
static void insertBlockAfter(VPBlockBase *NewBlock, VPBlockBase *BlockPtr)
Insert disconnected VPBlockBase NewBlock after BlockPtr.
RAII object that stores the current insertion point and restores it when the object is destroyed.
VPlan-based builder utility analogous to IRBuilder.
VPValue * createOr(VPValue *LHS, VPValue *RHS, DebugLoc DL={}, const Twine &Name="")
VPBasicBlock * getInsertBlock() const
VPValue * createICmp(CmpInst::Predicate Pred, VPValue *A, VPValue *B, DebugLoc DL={}, const Twine &Name="")
Create a new ICmp VPInstruction with predicate Pred and operands A and B.
VPInstruction * createOverflowingOp(unsigned Opcode, std::initializer_list< VPValue * > Operands, VPRecipeWithIRFlags::WrapFlagsTy WrapFlags, DebugLoc DL={}, const Twine &Name="")
VPInstruction * createNaryOp(unsigned Opcode, ArrayRef< VPValue * > Operands, Instruction *Inst=nullptr, const Twine &Name="")
Create an N-ary operation with Opcode, Operands and set Inst as its underlying Instruction.
VPValue * createNot(VPValue *Operand, DebugLoc DL={}, const Twine &Name="")
VPValue * createLogicalAnd(VPValue *LHS, VPValue *RHS, DebugLoc DL={}, const Twine &Name="")
VPValue * createSelect(VPValue *Cond, VPValue *TrueVal, VPValue *FalseVal, DebugLoc DL={}, const Twine &Name="", std::optional< FastMathFlags > FMFs=std::nullopt)
void setInsertPoint(VPBasicBlock *TheBB)
This specifies that created VPInstructions should be appended to the end of the specified block.
Canonical scalar induction phi of the vector loop.
Type * getScalarType() const
Returns the scalar type of the induction.
ArrayRef< VPValue * > definedValues()
Returns an ArrayRef of the values defined by the VPDef.
VPValue * getVPSingleValue()
Returns the only VPValue defined by the VPDef.
VPValue * getVPValue(unsigned I)
Returns the VPValue with index I defined by the VPDef.
void execute(VPTransformState &State) override
Generate the transformed value of the induction at offset StartValue (1.
VPValue * getStepValue() const
VPValue * getStartValue() const
This is a concrete Recipe that models a single VPlan-level instruction.
@ ResumePhi
Creates a scalar phi in a leaf VPBB with a single predecessor in VPlan.
unsigned getOpcode() const
VPInterleaveRecipe is a recipe for transforming an interleave group of load or stores into one wide l...
VPValue * getAddr() const
Return the address accessed by this recipe.
VPValue * getMask() const
Return the mask used by this recipe.
void print(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
void execute(VPTransformState &State) override
Generate the wide load or store, and shuffles.
ArrayRef< VPValue * > getStoredValues() const
Return the VPValues stored by this interleave group.
unsigned getNumStoreOperands() const
Returns the number of stored operands of this interleave group.
static VPLane getLastLaneForVF(const ElementCount &VF)
static VPLane getFirstLane()
VPRecipeBase is a base class modeling a sequence of one or more output IR instructions.
VPBasicBlock * getParent()
DebugLoc getDebugLoc() const
Returns the debug location of the recipe.
void insertBefore(VPRecipeBase *InsertPos)
Insert an unlinked recipe into a basic block immediately before the specified recipe.
void insertAfter(VPRecipeBase *InsertPos)
Insert an unlinked Recipe into a basic block immediately after the specified Recipe.
iplist< VPRecipeBase >::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
Helper class to create VPRecipies from IR instructions.
VPValue * getVPValueOrAddLiveIn(Value *V, VPlan &Plan)
VPValue * createEdgeMask(BasicBlock *Src, BasicBlock *Dst)
A helper function that computes the predicate of the edge between SRC and DST.
VPReplicateRecipe * handleReplication(Instruction *I, VFRange &Range)
Build a VPReplicationRecipe for I.
VPValue * getBlockInMask(BasicBlock *BB) const
Returns the entry mask for the block BB.
VPValue * getEdgeMask(BasicBlock *Src, BasicBlock *Dst) const
A helper that returns the previously computed predicate of the edge between SRC and DST.
iterator_range< mapped_iterator< Use *, std::function< VPValue *(Value *)> > > mapToVPValues(User::op_range Operands)
Returns a range mapping the values of the range Operands to their corresponding VPValues.
void fixHeaderPhis()
Add the incoming values from the backedge to reduction & first-order recurrence cross-iteration phis.
VPRecipeBase * tryToCreateWidenRecipe(Instruction *Instr, ArrayRef< VPValue * > Operands, VFRange &Range, VPBasicBlock *VPBB)
Create and return a widened recipe for I if one can be created within the given VF Range.
void createHeaderMask()
Create the mask for the vector loop header block.
void createBlockInMask(BasicBlock *BB)
A helper function that computes the predicate of the block BB, assuming that the header block of the ...
VPRecipeBase * getRecipe(Instruction *I)
Return the recipe created for given ingredient.
void setFlags(Instruction *I) const
Set the IR flags for I.
A recipe for handling reduction phis.
bool isInLoop() const
Returns true, if the phi is part of an in-loop reduction.
const RecurrenceDescriptor & getRecurrenceDescriptor() const
A recipe to represent inloop reduction operations, performing a reduction on a vector operand into a ...
VPRegionBlock represents a collection of VPBasicBlocks and VPRegionBlocks which form a Single-Entry-S...
const VPBlockBase * getEntry() const
bool isReplicator() const
An indicator whether this region is to generate multiple replicated instances of output IR correspond...
VPReplicateRecipe replicates a given instruction producing multiple scalar copies of the original sca...
void execute(VPTransformState &State) override
Generate replicas of the desired Ingredient.
bool shouldPack() const
Returns true if the recipe is used by a widened recipe via an intervening VPPredInstPHIRecipe.
VPSingleDef is a base class for recipes for modeling a sequence of one or more output IR that define ...
Instruction * getUnderlyingInstr()
Returns the underlying instruction.
This class can be used to assign names to VPValues.
An analysis for type-inference for VPValues.
Type * inferScalarType(const VPValue *V)
Infer the type of V. Returns the scalar type of V.
This class augments VPValue with operands which provide the inverse def-use edges from VPValue's user...
void setOperand(unsigned I, VPValue *New)
unsigned getNumOperands() const
VPValue * getOperand(unsigned N) const
void addOperand(VPValue *Operand)
void printAsOperand(raw_ostream &OS, VPSlotTracker &Tracker) const
void replaceAllUsesWith(VPValue *New)
user_iterator user_begin()
Value * getLiveInIRValue()
Returns the underlying IR value, if this VPValue is defined outside the scope of VPlan.
bool isLiveIn() const
Returns true if this VPValue is a live-in, i.e. defined outside the VPlan.
void replaceUsesWithIf(VPValue *New, llvm::function_ref< bool(VPUser &U, unsigned Idx)> ShouldReplace)
Go through the uses list for this VPValue and make each use point to New if the callback ShouldReplac...
A recipe to compute the pointers for widened memory accesses of IndexTy for all parts.
A recipe for widening Call instructions.
A Recipe for widening the canonical induction variable of the vector loop.
VPWidenCastRecipe is a recipe to create vector cast instructions.
A recipe for handling GEP instructions.
A recipe for handling phi nodes of integer and floating-point inductions, producing their vector valu...
A common base class for widening memory operations.
bool Reverse
Whether the consecutive accessed addresses are in reverse order.
bool isConsecutive() const
Return whether the loaded-from / stored-to addresses are consecutive.
VPValue * getMask() const
Return the mask used by this recipe.
VPValue * getAddr() const
Return the address accessed by this recipe.
bool isReverse() const
Return whether the consecutive loaded/stored addresses are in reverse order.
A recipe for handling phis that are widened in the vector loop.
VPValue * getIncomingValue(unsigned I)
Returns the I th incoming VPValue.
VPBasicBlock * getIncomingBlock(unsigned I)
Returns the I th incoming VPBasicBlock.
bool onlyScalarsGenerated(bool IsScalable)
Returns true if only scalar values will be generated.
void execute(VPTransformState &State) override
Generate vector values for the pointer induction.
VPWidenRecipe is a recipe for producing a copy of vector type its ingredient.
Main class to build the VPlan H-CFG for an incoming IR.
VPlan models a candidate for vectorization, encoding various decisions take to produce efficient outp...
void printDOT(raw_ostream &O) const
Print this VPlan in DOT format to O.
void prepareToExecute(Value *TripCount, Value *VectorTripCount, Value *CanonicalIVStartValue, VPTransformState &State)
Prepare the plan for execution, setting up the required live-in values.
VPBasicBlock * getEntry()
VPValue & getVectorTripCount()
The vector trip count.
void setName(const Twine &newName)
VPValue & getVFxUF()
Returns VF * UF of the vector loop region.
VPValue * getTripCount() const
The trip count of the original loop.
VPValue * getOrCreateBackedgeTakenCount()
The backedge taken count of the original loop.
void removeLiveOut(PHINode *PN)
iterator_range< SmallSetVector< ElementCount, 2 >::iterator > vectorFactors() const
Returns an iterator range over all VFs of the plan.
void addLiveOut(PHINode *PN, VPValue *V)
VPBasicBlock * getPreheader()
VPRegionBlock * getVectorLoopRegion()
Returns the VPRegionBlock of the vector loop.
bool hasVF(ElementCount VF)
bool hasUF(unsigned UF) const
void setVF(ElementCount VF)
InstructionCost cost(ElementCount VF, VPCostContext &Ctx)
Return the cost of this plan.
void resetTripCount(VPValue *NewTripCount)
Resets the trip count for the VPlan.
static VPlanPtr createInitialVPlan(const SCEV *TripCount, ScalarEvolution &PSE, bool RequiresScalarEpilogueCheck, bool TailFolded, Loop *TheLoop)
Create initial VPlan, having an "entry" VPBasicBlock (wrapping original scalar pre-header ) which con...
VPValue * getOrAddLiveIn(Value *V)
Gets the live-in VPValue for V or adds a new live-in (if none exists yet) for V.
LLVM_DUMP_METHOD void dump() const
Dump the plan to stderr (for debugging).
bool hasScalarVFOnly() const
void execute(VPTransformState *State)
Generate the IR code for this VPlan.
VPCanonicalIVPHIRecipe * getCanonicalIV()
Returns the canonical induction recipe of the vector loop.
const MapVector< PHINode *, VPLiveOut * > & getLiveOuts() const
void print(raw_ostream &O) const
Print this VPlan to O.
VPValue * getSCEVExpansion(const SCEV *S) const
VPlan * duplicate()
Clone the current VPlan, update all VPValues of the new VPlan and cloned recipes to refer to the clon...
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
bool hasOneUser() const
Return true if there is exactly one user of this value.
void setName(const Twine &Name)
Change the name of the value.
void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
iterator_range< user_iterator > users()
const Value * stripPointerCasts() const
Strip off pointer casts, all-zero GEPs and address space casts.
LLVMContext & getContext() const
All values hold a context through their type.
StringRef getName() const
Return a constant reference to the value's name.
VectorBuilder & setEVL(Value *NewExplicitVectorLength)
VectorBuilder & setMask(Value *NewMask)
Value * createVectorInstruction(unsigned Opcode, Type *ReturnTy, ArrayRef< Value * > VecOpArray, const Twine &Name=Twine())
Base class of all SIMD vector types.
static bool isValidElementType(Type *ElemTy)
Return true if the specified type is valid as a element type.
ElementCount getElementCount() const
Return an ElementCount instance to represent the (possibly scalable) number of elements in the vector...
static VectorType * get(Type *ElementType, ElementCount EC)
This static method is the primary way to construct an VectorType.
Type * getElementType() const
std::pair< iterator, bool > insert(const ValueT &V)
bool contains(const_arg_type_t< ValueT > V) const
Check if the set contains the given element.
constexpr ScalarTy getFixedValue() const
static constexpr bool isKnownLE(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
constexpr bool isNonZero() const
static constexpr bool isKnownLT(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
constexpr LeafTy multiplyCoefficientBy(ScalarTy RHS) const
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
constexpr bool isZero() const
static constexpr bool isKnownGT(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
constexpr LeafTy divideCoefficientBy(ScalarTy RHS) const
We do not provide the '/' operator here because division for polynomial types does not work in the sa...
static constexpr bool isKnownGE(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
An efficient, type-erasing, non-owning reference to a callable.
const ParentTy * getParent() const
self_iterator getIterator()
A range adaptor for a pair of iterators.
This class implements an extremely fast bulk output stream that can only output to a stream.
A raw_ostream that writes to an std::string.
This provides a very simple, boring adaptor for a begin and end iterator into a range type.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ PredicateElseScalarEpilogue
@ PredicateOrDontVectorize
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
@ Tail
Attemps to make calls as fast as possible while guaranteeing that tail call optimization can always b...
@ C
The default llvm calling convention, compatible with C.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
std::variant< std::monostate, Loc::Single, Loc::Multi, Loc::MMI, Loc::EntryValue > Variant
Alias for the std::variant specialization base class of DbgVariable.
bool match(Val *V, const Pattern &P)
bind_ty< Instruction > m_Instruction(Instruction *&I)
Match an instruction, capturing it if we match.
BinaryOp_match< LHS, RHS, Instruction::Mul > m_Mul(const LHS &L, const RHS &R)
OneUse_match< T > m_OneUse(const T &SubPattern)
auto m_LogicalOr()
Matches L || R where L and R are arbitrary values.
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
match_combine_or< CastInst_match< OpTy, ZExtInst >, CastInst_match< OpTy, SExtInst > > m_ZExtOrSExt(const OpTy &Op)
auto m_LogicalAnd()
Matches L && R where L and R are arbitrary values.
ValuesClass values(OptsTy... Options)
Helper to build a ValuesClass by forwarding a variable number of arguments as an initializer list to ...
initializer< Ty > init(const Ty &Val)
DiagnosticInfoOptimizationBase::Argument NV
NodeAddr< InstrNode * > Instr
NodeAddr< PhiNode * > Phi
const_iterator begin(StringRef path, Style style=Style::native)
Get begin iterator over path.
const_iterator end(StringRef path)
Get end iterator over path.
VPValue * getOrCreateVPValueForSCEVExpr(VPlan &Plan, const SCEV *Expr, ScalarEvolution &SE)
Get or create a VPValue that corresponds to the expansion of Expr.
bool isUniformAfterVectorization(VPValue *VPV)
Returns true if VPV is uniform after vectorization.
This is an optimization pass for GlobalISel generic memory operations.
bool simplifyLoop(Loop *L, DominatorTree *DT, LoopInfo *LI, ScalarEvolution *SE, AssumptionCache *AC, MemorySSAUpdater *MSSAU, bool PreserveLCSSA)
Simplify each loop in a loop nest recursively.
void ReplaceInstWithInst(BasicBlock *BB, BasicBlock::iterator &BI, Instruction *I)
Replace the instruction specified by BI with the instruction specified by I.
Value * addRuntimeChecks(Instruction *Loc, Loop *TheLoop, const SmallVectorImpl< RuntimePointerCheck > &PointerChecks, SCEVExpander &Expander, bool HoistRuntimeChecks=false)
Add code that checks at runtime if the accessed arrays in PointerChecks overlap.
void stable_sort(R &&Range)
bool RemoveRedundantDbgInstrs(BasicBlock *BB)
Try to remove redundant dbg.value instructions from given basic block.
std::optional< unsigned > getLoopEstimatedTripCount(Loop *L, unsigned *EstimatedLoopInvocationWeight=nullptr)
Returns a loop's estimated trip count based on branch weight metadata.
static void reportVectorization(OptimizationRemarkEmitter *ORE, Loop *TheLoop, VectorizationFactor VF, unsigned IC)
Report successful vectorization of the loop.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Intrinsic::ID getMinMaxReductionIntrinsicOp(Intrinsic::ID RdxID)
Returns the min/max intrinsic used when expanding a min/max reduction.
auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)
Get the size of a range.
Intrinsic::ID getVectorIntrinsicIDForCall(const CallInst *CI, const TargetLibraryInfo *TLI)
Returns intrinsic ID for call.
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are are tuples (A,...
unsigned getLoadStoreAddressSpace(Value *I)
A helper function that returns the address space of the pointer operand of load or store instruction.
bool verifyFunction(const Function &F, raw_ostream *OS=nullptr)
Check a function for errors, useful for use when debugging a pass.
const Value * getLoadStorePointerOperand(const Value *V)
A helper function that returns the pointer operand of a load or store instruction.
const SCEV * createTripCountSCEV(Type *IdxTy, PredicatedScalarEvolution &PSE, Loop *OrigLoop)
std::pair< Instruction *, ElementCount > InstructionVFPair
Value * getRuntimeVF(IRBuilderBase &B, Type *Ty, ElementCount VF)
Return the runtime value for VF.
bool formLCSSARecursively(Loop &L, const DominatorTree &DT, const LoopInfo *LI, ScalarEvolution *SE)
Put a loop nest into LCSSA form.
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
std::optional< MDNode * > makeFollowupLoopID(MDNode *OrigLoopID, ArrayRef< StringRef > FollowupAttrs, const char *InheritOptionsAttrsPrefix="", bool AlwaysNew=false)
Create a new loop identifier for a loop created from a loop transformation.
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
bool shouldOptimizeForSize(const MachineFunction *MF, ProfileSummaryInfo *PSI, const MachineBlockFrequencyInfo *BFI, PGSOQueryType QueryType=PGSOQueryType::Other)
Returns true if machine function MF is suggested to be size-optimized based on the profile.
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
Value * concatenateVectors(IRBuilderBase &Builder, ArrayRef< Value * > Vecs)
Concatenate a list of vectors.
iterator_range< df_iterator< VPBlockShallowTraversalWrapper< VPBlockBase * > > > vp_depth_first_shallow(VPBlockBase *G)
Returns an iterator range to traverse the graph starting at G in depth-first order.
iterator_range< df_iterator< VPBlockDeepTraversalWrapper< VPBlockBase * > > > vp_depth_first_deep(VPBlockBase *G)
Returns an iterator range to traverse the graph starting at G in depth-first order while traversing t...
static void reportVectorizationInfo(const StringRef Msg, const StringRef ORETag, OptimizationRemarkEmitter *ORE, Loop *TheLoop, Instruction *I=nullptr)
Reports an informative message: print Msg for debugging purposes as well as an optimization remark.
auto map_range(ContainerTy &&C, FuncTy F)
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
void collectEphemeralRecipesForVPlan(VPlan &Plan, DenseSet< VPRecipeBase * > &EphRecipes)
Constant * createBitMaskForGaps(IRBuilderBase &Builder, unsigned VF, const InterleaveGroup< Instruction > &Group)
Create a mask that filters the members of an interleave group where there are gaps.
llvm::SmallVector< int, 16 > createStrideMask(unsigned Start, unsigned Stride, unsigned VF)
Create a stride shuffle mask.
auto reverse(ContainerTy &&C)
void setBranchWeights(Instruction &I, ArrayRef< uint32_t > Weights, bool IsExpected)
Create a new branch_weights metadata node and add or overwrite a prof metadata reference to instructi...
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
cl::opt< bool > EnableVPlanNativePath("enable-vplan-native-path", cl::Hidden, cl::desc("Enable VPlan-native vectorization path with " "support for outer loop vectorization."))
void sort(IteratorTy Start, IteratorTy End)
llvm::SmallVector< int, 16 > createReplicatedMask(unsigned ReplicationFactor, unsigned VF)
Create a mask with replicated elements.
std::unique_ptr< VPlan > VPlanPtr
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
bool isPointerTy(const Type *T)
bool none_of(R &&Range, UnaryPredicate P)
Provide wrappers to std::none_of which take ranges instead of having to pass begin/end explicitly.
cl::opt< bool > EnableLoopVectorization
Align getLoadStoreAlignment(Value *I)
A helper function that returns the alignment of load or store instruction.
iterator_range< filter_iterator< detail::IterOfRange< RangeT >, PredicateT > > make_filter_range(RangeT &&Range, PredicateT Pred)
Convenience function that takes a range of elements and a predicate, and return a new filter_iterator...
void llvm_unreachable_internal(const char *msg=nullptr, const char *file=nullptr, unsigned line=0)
This function calls abort(), and prints the optional message to stderr.
auto drop_end(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the last N elements excluded.
constexpr T divideCeil(U Numerator, V Denominator)
Returns the integer ceil(Numerator / Denominator).
Type * ToVectorTy(Type *Scalar, ElementCount EC)
A helper function for converting Scalar types to vector types.
bool isAssignmentTrackingEnabled(const Module &M)
Return true if assignment tracking is enabled for module M.
llvm::SmallVector< int, 16 > createInterleaveMask(unsigned VF, unsigned NumVecs)
Create an interleave shuffle mask.
RecurKind
These are the kinds of recurrences that we support.
@ Or
Bitwise or logical OR of integers.
@ FMulAdd
Sum of float products with llvm.fmuladd(a * b + sum).
void setProfileInfoAfterUnrolling(Loop *OrigLoop, Loop *UnrolledLoop, Loop *RemainderLoop, uint64_t UF)
Set weights for UnrolledLoop and RemainderLoop based on weights for OrigLoop and the following distri...
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
void reportVectorizationFailure(const StringRef DebugMsg, const StringRef OREMsg, const StringRef ORETag, OptimizationRemarkEmitter *ORE, Loop *TheLoop, Instruction *I=nullptr)
Reports a vectorization failure: print DebugMsg for debugging purposes along with the corresponding o...
@ CM_ScalarEpilogueNotAllowedLowTripLoop
@ CM_ScalarEpilogueNotNeededUsePredicate
@ CM_ScalarEpilogueNotAllowedOptSize
@ CM_ScalarEpilogueAllowed
@ CM_ScalarEpilogueNotAllowedUsePredicate
bool isSafeToSpeculativelyExecute(const Instruction *I, const Instruction *CtxI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr, const TargetLibraryInfo *TLI=nullptr)
Return true if the instruction does not have any effects besides calculating the result and does not ...
auto count_if(R &&Range, UnaryPredicate P)
Wrapper function around std::count_if to count the number of times an element satisfying a given pred...
Value * createStepForVF(IRBuilderBase &B, Type *Ty, ElementCount VF, int64_t Step)
Return a value for Step multiplied by VF.
BasicBlock * SplitBlock(BasicBlock *Old, BasicBlock::iterator SplitPt, DominatorTree *DT, LoopInfo *LI=nullptr, MemorySSAUpdater *MSSAU=nullptr, const Twine &BBName="", bool Before=false)
Split the specified block at the specified instruction.
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
auto predecessors(const MachineBasicBlock *BB)
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Value * addDiffRuntimeChecks(Instruction *Loc, ArrayRef< PointerDiffInfo > Checks, SCEVExpander &Expander, function_ref< Value *(IRBuilderBase &, unsigned)> GetVF, unsigned IC)
bool all_equal(std::initializer_list< T > Values)
Returns true if all Values in the initializer lists are equal or the list.
@ DataAndControlFlowWithoutRuntimeCheck
Use predicate to control both data and control flow, but modify the trip count so that a runtime over...
@ None
Don't use tail folding.
@ DataWithEVL
Use predicated EVL instructions for tail-folding.
@ DataWithoutLaneMask
Same as Data, but avoids using the get.active.lane.mask intrinsic to calculate the mask and instead i...
unsigned getReciprocalPredBlockProb()
A helper function that returns the reciprocal of the block probability of predicated blocks.
bool hasBranchWeightMD(const Instruction &I)
Checks if an instructions has Branch Weight Metadata.
hash_code hash_combine(const Ts &...args)
Combine values into a single hash_code.
T bit_floor(T Value)
Returns the largest integral power of two no greater than Value if Value is nonzero.
Type * getLoadStoreType(Value *I)
A helper function that returns the type of a load or store instruction.
bool verifyVPlanIsValid(const VPlan &Plan)
Verify invariants for general VPlans.
MapVector< Instruction *, uint64_t > computeMinimumValueSizes(ArrayRef< BasicBlock * > Blocks, DemandedBits &DB, const TargetTransformInfo *TTI=nullptr)
Compute a map of integer instructions to their minimum legal type size.
hash_code hash_combine_range(InputIteratorT first, InputIteratorT last)
Compute a hash_code for a sequence of values.
cl::opt< bool > EnableLoopInterleaving
Implement std::hash so that hash_code can be used in STL containers.
This struct is a compact representation of a valid (non-zero power of two) alignment.
A special type used by analysis passes to provide an address that identifies that particular analysis...
static void collectEphemeralValues(const Loop *L, AssumptionCache *AC, SmallPtrSetImpl< const Value * > &EphValues)
Collect a loop's ephemeral values (those used only by an assume or similar intrinsics in the loop).
An information struct used to provide DenseMap with the various necessary components for a given valu...
Encapsulate information regarding vectorization of a loop and its epilogue.
BasicBlock * SCEVSafetyCheck
BasicBlock * MemSafetyCheck
BasicBlock * MainLoopIterationCountCheck
EpilogueLoopVectorizationInfo(ElementCount MVF, unsigned MUF, ElementCount EVF, unsigned EUF)
BasicBlock * EpilogueIterationCountCheck
A class that represents two vectorization factors (initialized with 0 by default).
static FixedScalableVFPair getNone()
Incoming for lane maks phi as machine instruction, incoming register Reg and incoming block Block are...
std::optional< unsigned > MaskPos
A struct that represents some properties of the register usage of a loop.
SmallMapVector< unsigned, unsigned, 4 > MaxLocalUsers
Holds the maximum number of concurrent live intervals in the loop.
SmallMapVector< unsigned, unsigned, 4 > LoopInvariantRegs
Holds the number of loop invariant values that are used in the loop.
bool processLoop(Loop *L)
LoopAccessInfoManager * LAIs
void printPipeline(raw_ostream &OS, function_ref< StringRef(StringRef)> MapClassName2PassName)
LoopVectorizePass(LoopVectorizeOptions Opts={})
LoopVectorizeResult runImpl(Function &F, ScalarEvolution &SE_, LoopInfo &LI_, TargetTransformInfo &TTI_, DominatorTree &DT_, BlockFrequencyInfo *BFI_, TargetLibraryInfo *TLI_, DemandedBits &DB_, AssumptionCache &AC_, LoopAccessInfoManager &LAIs_, OptimizationRemarkEmitter &ORE_, ProfileSummaryInfo *PSI_)
PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM)
OptimizationRemarkEmitter * ORE
Storage for information about made changes.
A CRTP mix-in to automatically provide informational APIs needed for passes.
A MapVector that performs no allocations if smaller than a certain size.
Holds the VFShape for a specific scalar to vector function mapping.
std::optional< unsigned > getParamIndexForOptionalMask() const
Instruction Set Architecture.
Encapsulates information needed to describe a parameter.
A range of powers-of-2 vectorization factors with fixed start and adjustable end.
Struct to hold various analysis needed for cost computations.
LoopVectorizationCostModel & CM
bool skipCostComputation(Instruction *UI, bool IsVector) const
Return true if the cost for UI shouldn't be computed, e.g.
InstructionCost getLegacyCost(Instruction *UI, ElementCount VF) const
Return the cost for UI with VF using the legacy cost model as fallback until computing the cost of al...
SmallPtrSet< Instruction *, 8 > SkipCostComputation
A recipe for handling first-order recurrence phis.
VPIteration represents a single point in the iteration space of the output (vectorized and/or unrolle...
bool isFirstIteration() const
void execute(VPTransformState &State) override
Generate the wide load or gather.
VPValue * getEVL() const
Return the EVL operand.
A recipe for widening load operations, using the address to load from and an optional mask.
void execute(VPTransformState &State) override
Generate a wide load or gather.
A recipe for widening select instructions.
VPValue * getStoredValue() const
Return the address accessed by this recipe.
void execute(VPTransformState &State) override
Generate the wide store or scatter.
VPValue * getEVL() const
Return the EVL operand.
A recipe for widening store operations, using the stored value, the address to store to and an option...
void execute(VPTransformState &State) override
Generate a wide store or scatter.
VPValue * getStoredValue() const
Return the value stored by this recipe.
TODO: The following VectorizationFactor was pulled out of LoopVectorizationCostModel class.
InstructionCost Cost
Cost of the loop with that width.
ElementCount MinProfitableTripCount
The minimum trip count required to make vectorization profitable, e.g.
ElementCount Width
Vector width with best cost.
InstructionCost ScalarCost
Cost of the scalar loop.
static VectorizationFactor Disabled()
Width 1 means no vectorization, cost 0 means uncomputed cost.
static bool HoistRuntimeChecks