159#define LV_NAME "loop-vectorize"
160#define DEBUG_TYPE LV_NAME
170 "llvm.loop.vectorize.followup_vectorized";
172 "llvm.loop.vectorize.followup_epilogue";
175STATISTIC(LoopsVectorized,
"Number of loops vectorized");
176STATISTIC(LoopsAnalyzed,
"Number of loops analyzed for vectorization");
177STATISTIC(LoopsEpilogueVectorized,
"Number of epilogues vectorized");
181 cl::desc(
"Enable vectorization of epilogue loops."));
185 cl::desc(
"When epilogue vectorization is enabled, and a value greater than "
186 "1 is specified, forces the given VF for all applicable epilogue "
191 cl::desc(
"Only loops with vectorization factor equal to or larger than "
192 "the specified value are considered for epilogue vectorization."));
198 cl::desc(
"Loops with a constant trip count that is smaller than this "
199 "value are vectorized only if no scalar iteration overheads "
204 cl::desc(
"The maximum allowed number of runtime memory checks"));
220 "prefer-predicate-over-epilogue",
223 cl::desc(
"Tail-folding and predication preferences over creating a scalar "
227 "Don't tail-predicate loops, create scalar epilogue"),
229 "predicate-else-scalar-epilogue",
230 "prefer tail-folding, create scalar epilogue if tail "
233 "predicate-dont-vectorize",
234 "prefers tail-folding, don't attempt vectorization if "
235 "tail-folding fails.")));
238 "force-tail-folding-style",
cl::desc(
"Force the tail folding style"),
241 clEnumValN(TailFoldingStyle::None,
"none",
"Disable tail folding"),
243 TailFoldingStyle::Data,
"data",
244 "Create lane mask for data only, using active.lane.mask intrinsic"),
245 clEnumValN(TailFoldingStyle::DataWithoutLaneMask,
246 "data-without-lane-mask",
247 "Create lane mask with compare/stepvector"),
248 clEnumValN(TailFoldingStyle::DataAndControlFlow,
"data-and-control",
249 "Create lane mask using active.lane.mask intrinsic, and use "
250 "it for both data and control flow"),
252 TailFoldingStyle::DataAndControlFlowWithoutRuntimeCheck,
253 "data-and-control-without-rt-check",
254 "Similar to data-and-control, but remove the runtime check")));
258 cl::desc(
"Maximize bandwidth when selecting vectorization factor which "
259 "will be determined by the smallest type in loop."));
263 cl::desc(
"Enable vectorization on interleaved memory accesses in a loop"));
269 cl::desc(
"Enable vectorization on masked interleaved memory accesses in a loop"));
273 cl::desc(
"A flag that overrides the target's number of scalar registers."));
277 cl::desc(
"A flag that overrides the target's number of vector registers."));
281 cl::desc(
"A flag that overrides the target's max interleave factor for "
286 cl::desc(
"A flag that overrides the target's max interleave factor for "
287 "vectorized loops."));
291 cl::desc(
"A flag that overrides the target's expected cost for "
292 "an instruction to a single constant value. Mostly "
293 "useful for getting consistent testing."));
298 "Pretend that scalable vectors are supported, even if the target does "
299 "not support them. This flag should only be used for testing."));
304 "The cost of a loop that is considered 'small' by the interleaver."));
308 cl::desc(
"Enable the use of the block frequency analysis to access PGO "
309 "heuristics minimizing code growth in cold regions and being more "
310 "aggressive in hot regions."));
316 "Enable runtime interleaving until load/store ports are saturated"));
321 cl::desc(
"Max number of stores to be predicated behind an if."));
325 cl::desc(
"Count the induction variable only once when interleaving"));
329 cl::desc(
"Enable if predication of stores during vectorization."));
333 cl::desc(
"The maximum interleave count to use when interleaving a scalar "
334 "reduction in a nested loop."));
339 cl::desc(
"Prefer in-loop vector reductions, "
340 "overriding the targets preference."));
344 cl::desc(
"Enable the vectorisation of loops with in-order (strict) "
350 "Prefer predicating a reduction operation over an after loop select."));
355 cl::desc(
"Enable VPlan-native vectorization path with "
356 "support for outer loop vectorization."));
366 "Build VPlan for every supported loop nest in the function and bail "
367 "out right after the build (stress test the VPlan H-CFG construction "
368 "in the VPlan-native vectorization path)."));
372 cl::desc(
"Enable loop interleaving in Loop vectorization passes"));
375 cl::desc(
"Run the Loop vectorization passes"));
379 cl::desc(
"Use dot format instead of plain text when dumping VPlans"));
382 "force-widen-divrem-via-safe-divisor",
cl::Hidden,
384 "Override cost based safe divisor widening for div/rem instructions"));
387 "vectorizer-maximize-bandwidth-for-vector-calls",
cl::init(
true),
389 cl::desc(
"Try wider VFs if they enable the use of vector variants"));
408 return DL.getTypeAllocSizeInBits(Ty) !=
DL.getTypeSizeInBits(Ty);
447 unsigned Factor = Vals.
size();
448 assert(Factor > 1 &&
"Tried to interleave invalid number of vectors");
452 for (
Value *Val : Vals)
453 assert(Val->getType() == VecTy &&
"Tried to interleave mismatched types");
458 if (VecTy->isScalableTy()) {
459 VectorType *WideVecTy = VectorType::getDoubleElementsVectorType(VecTy);
461 WideVecTy, Intrinsic::experimental_vector_interleave2, Vals,
469 const unsigned NumElts = VecTy->getElementCount().getFixedValue();
476class GeneratedRTChecks;
520 this->MinProfitableTripCount = VecWidth;
536 virtual std::pair<BasicBlock *, Value *>
568 VPValue *BlockInMask,
bool NeedsMaskForGaps);
587 std::pair<BasicBlock *, Value *> AdditionalBypass = {
nullptr,
nullptr});
660 const SCEV2ValueTy &ExpandedSCEVs,
661 std::pair<BasicBlock *, Value *> AdditionalBypass = {
nullptr,
nullptr});
808 "A high UF for the epilogue loop is likely not beneficial.");
828 GeneratedRTChecks &Checks)
830 EPI.MainLoopVF,
EPI.MainLoopVF,
EPI.MainLoopUF, LVL,
837 const SCEV2ValueTy &ExpandedSCEVs)
final {
844 virtual std::pair<BasicBlock *, Value *>
868 GeneratedRTChecks &Check)
873 std::pair<BasicBlock *, Value *>
897 GeneratedRTChecks &Checks)
904 std::pair<BasicBlock *, Value *>
926 if (
I->getDebugLoc() !=
Empty)
927 return I->getDebugLoc();
929 for (
Use &
Op :
I->operands()) {
931 if (OpInst->getDebugLoc() !=
Empty)
932 return OpInst->getDebugLoc();
935 return I->getDebugLoc();
944 dbgs() <<
"LV: " << Prefix << DebugMsg;
966 CodeRegion =
I->getParent();
969 if (
I->getDebugLoc())
970 DL =
I->getDebugLoc();
987 return B.CreateElementCount(Ty, VF);
993 assert(!isa<SCEVCouldNotCompute>(BackedgeTakenCount) &&
"Invalid loop count");
1007 <<
"loop not vectorized: " << OREMsg);
1025 "Vectorizing: ", TheLoop->
isInnermost() ?
"innermost loop" :
"outer loop",
1031 <<
"vectorized " << LoopType <<
"loop (vectorization width: "
1033 <<
", interleaved count: " <<
ore::NV(
"InterleaveCount", IC) <<
")";
1045 if (
const DebugLoc LoopDbgLoc = L->getStartLoc())
1046 LoopDbgLoc.print(
OS);
1049 OS << L->getHeader()->getParent()->getParent()->getModuleIdentifier();
1194 "Profitable to scalarize relevant only for VF > 1.");
1197 "cost-model should not be used for outer loops (in VPlan-native path)");
1199 auto Scalars = InstsToScalarize.find(VF);
1200 assert(Scalars != InstsToScalarize.end() &&
1201 "VF not yet analyzed for scalarization profitability");
1202 return Scalars->second.contains(
I);
1209 "cost-model should not be used for outer loops (in VPlan-native path)");
1213 if (isa<PseudoProbeInst>(
I))
1219 auto UniformsPerVF = Uniforms.find(VF);
1220 assert(UniformsPerVF != Uniforms.end() &&
1221 "VF not yet analyzed for uniformity");
1222 return UniformsPerVF->second.count(
I);
1229 "cost-model should not be used for outer loops (in VPlan-native path)");
1233 auto ScalarsPerVF = Scalars.find(VF);
1234 assert(ScalarsPerVF != Scalars.end() &&
1235 "Scalar values are not calculated for VF");
1236 return ScalarsPerVF->second.count(
I);
1242 return VF.
isVector() && MinBWs.contains(
I) &&
1264 WideningDecisions[std::make_pair(
I, VF)] = std::make_pair(W,
Cost);
1275 for (
unsigned i = 0; i < Grp->
getFactor(); ++i) {
1278 WideningDecisions[std::make_pair(
I, VF)] = std::make_pair(W,
Cost);
1280 WideningDecisions[std::make_pair(
I, VF)] = std::make_pair(W, 0);
1292 "cost-model should not be used for outer loops (in VPlan-native path)");
1294 std::pair<Instruction *, ElementCount> InstOnVF = std::make_pair(
I, VF);
1295 auto Itr = WideningDecisions.
find(InstOnVF);
1296 if (Itr == WideningDecisions.
end())
1298 return Itr->second.first;
1305 std::pair<Instruction *, ElementCount> InstOnVF = std::make_pair(
I, VF);
1307 "The cost is not calculated");
1308 return WideningDecisions[InstOnVF].second;
1321 std::optional<unsigned> MaskPos,
1324 CallWideningDecisions[std::make_pair(CI, VF)] = {Kind, Variant, IID,
1331 return CallWideningDecisions.
at(std::make_pair(CI, VF));
1339 auto *Trunc = dyn_cast<TruncInst>(
I);
1352 Value *
Op = Trunc->getOperand(0);
1372 if (VF.
isScalar() || Uniforms.contains(VF))
1376 collectLoopUniforms(VF);
1377 collectLoopScalars(VF);
1397 bool LI = isa<LoadInst>(V);
1398 bool SI = isa<StoreInst>(V);
1413 const RecurrenceDescriptor &RdxDesc = Reduction.second;
1414 return TTI.isLegalToVectorizeReduction(RdxDesc, VF);
1425 return ScalarCost < SafeDivisorCost;
1449 std::pair<InstructionCost, InstructionCost>
1490 auto RequiresScalarEpilogue = [
this](
ElementCount VF) {
1493 bool IsRequired =
all_of(Range, RequiresScalarEpilogue);
1495 (IsRequired ||
none_of(Range, RequiresScalarEpilogue)) &&
1496 "all VFs in range must agree on whether a scalar epilogue is required");
1508 return IVUpdateMayOverflow ? ChosenTailFoldingStyle.first
1509 : ChosenTailFoldingStyle.second;
1517 "Tail folding must not be selected yet.");
1522 ChosenTailFoldingStyle.first = ChosenTailFoldingStyle.second =
1527 ChosenTailFoldingStyle.first =
1529 ChosenTailFoldingStyle.second =
1549 return InLoopReductions.contains(Phi);
1564 WideningDecisions.
clear();
1565 CallWideningDecisions.
clear();
1595 unsigned NumPredStores = 0;
1604 bool FoldTailByMasking);
1609 ElementCount getMaximizedVFForTarget(
unsigned MaxTripCount,
1610 unsigned SmallestType,
1611 unsigned WidestType,
1613 bool FoldTailByMasking);
1617 ElementCount getMaxLegalScalableVF(
unsigned MaxSafeElements);
1630 std::optional<InstructionCost>
1678 PredicatedBBsAfterVectorization;
1691 std::pair<TailFoldingStyle, TailFoldingStyle> ChosenTailFoldingStyle =
1726 ScalarCostsTy &ScalarCosts,
1752 std::pair<InstWidening, InstructionCost>>;
1754 DecisionList WideningDecisions;
1756 using CallDecisionList =
1759 CallDecisionList CallWideningDecisions;
1782 Ops, [
this, VF](
Value *V) {
return this->needsExtract(V, VF); }));
1840class GeneratedRTChecks {
1846 Value *SCEVCheckCond =
nullptr;
1854 Value *MemRuntimeCheckCond =
nullptr;
1863 bool CostTooHigh =
false;
1864 const bool AddBranchWeights;
1866 Loop *OuterLoop =
nullptr;
1871 bool AddBranchWeights)
1872 : DT(DT), LI(LI),
TTI(
TTI), SCEVExp(SE,
DL,
"scev.check"),
1873 MemCheckExp(SE,
DL,
"scev.check"), AddBranchWeights(AddBranchWeights) {}
1901 nullptr,
"vector.scevcheck");
1908 if (RtPtrChecking.Need) {
1909 auto *Pred = SCEVCheckBlock ? SCEVCheckBlock : Preheader;
1910 MemCheckBlock =
SplitBlock(Pred, Pred->getTerminator(), DT, LI,
nullptr,
1913 auto DiffChecks = RtPtrChecking.getDiffChecks();
1915 Value *RuntimeVF =
nullptr;
1920 RuntimeVF = getRuntimeVF(B, B.getIntNTy(Bits), VF);
1926 MemCheckBlock->
getTerminator(), L, RtPtrChecking.getChecks(),
1929 assert(MemRuntimeCheckCond &&
1930 "no RT checks generated although RtPtrChecking "
1931 "claimed checks are required");
1934 if (!MemCheckBlock && !SCEVCheckBlock)
1944 if (SCEVCheckBlock) {
1949 if (MemCheckBlock) {
1956 if (MemCheckBlock) {
1960 if (SCEVCheckBlock) {
1966 OuterLoop =
L->getParentLoop();
1970 if (SCEVCheckBlock || MemCheckBlock)
1983 if (SCEVCheckBlock->getTerminator() == &
I)
1990 if (MemCheckBlock) {
1993 if (MemCheckBlock->getTerminator() == &
I)
2016 unsigned BestTripCount = 2;
2020 BestTripCount = SmallTC;
2024 BestTripCount = *EstimatedTC;
2027 BestTripCount = std::max(BestTripCount, 1U);
2031 NewMemCheckCost = std::max(*NewMemCheckCost.
getValue(),
2034 if (BestTripCount > 1)
2036 <<
"We expect runtime memory checks to be hoisted "
2037 <<
"out of the outer loop. Cost reduced from "
2038 << MemCheckCost <<
" to " << NewMemCheckCost <<
'\n');
2040 MemCheckCost = NewMemCheckCost;
2044 RTCheckCost += MemCheckCost;
2047 if (SCEVCheckBlock || MemCheckBlock)
2048 LLVM_DEBUG(
dbgs() <<
"Total cost of runtime checks: " << RTCheckCost
2056 ~GeneratedRTChecks() {
2060 SCEVCleaner.markResultUsed();
2062 if (!MemRuntimeCheckCond)
2063 MemCheckCleaner.markResultUsed();
2065 if (MemRuntimeCheckCond) {
2066 auto &SE = *MemCheckExp.
getSE();
2073 I.eraseFromParent();
2076 MemCheckCleaner.cleanup();
2077 SCEVCleaner.cleanup();
2080 SCEVCheckBlock->eraseFromParent();
2081 if (MemRuntimeCheckCond)
2082 MemCheckBlock->eraseFromParent();
2096 SCEVCheckCond =
nullptr;
2097 if (
auto *
C = dyn_cast<ConstantInt>(
Cond))
2108 SCEVCheckBlock->getTerminator()->eraseFromParent();
2109 SCEVCheckBlock->moveBefore(LoopVectorPreHeader);
2110 Pred->getTerminator()->replaceSuccessorWith(LoopVectorPreHeader,
2117 if (AddBranchWeights)
2120 return SCEVCheckBlock;
2129 if (!MemRuntimeCheckCond)
2138 MemCheckBlock->moveBefore(LoopVectorPreHeader);
2145 if (AddBranchWeights) {
2149 MemCheckBlock->getTerminator()->setDebugLoc(
2150 Pred->getTerminator()->getDebugLoc());
2153 MemRuntimeCheckCond =
nullptr;
2154 return MemCheckBlock;
2160 return Style == TailFoldingStyle::Data ||
2161 Style == TailFoldingStyle::DataAndControlFlow ||
2162 Style == TailFoldingStyle::DataAndControlFlowWithoutRuntimeCheck;
2166 return Style == TailFoldingStyle::DataAndControlFlow ||
2167 Style == TailFoldingStyle::DataAndControlFlowWithoutRuntimeCheck;
2197 LLVM_DEBUG(
dbgs() <<
"LV: Loop hints prevent outer loop vectorization.\n");
2203 LLVM_DEBUG(
dbgs() <<
"LV: Not vectorizing: Interleave is not supported for "
2223 if (!containsIrreducibleCFG<const BasicBlock *>(RPOT, *LI)) {
2233 for (
Loop *InnerL : L)
2255 ?
B.CreateSExtOrTrunc(
Index, StepTy)
2256 :
B.CreateCast(Instruction::SIToFP,
Index, StepTy);
2257 if (CastedIndex !=
Index) {
2259 Index = CastedIndex;
2269 assert(
X->getType() ==
Y->getType() &&
"Types don't match!");
2270 if (
auto *CX = dyn_cast<ConstantInt>(
X))
2273 if (
auto *CY = dyn_cast<ConstantInt>(
Y))
2276 return B.CreateAdd(
X,
Y);
2282 assert(
X->getType()->getScalarType() ==
Y->getType() &&
2283 "Types don't match!");
2284 if (
auto *CX = dyn_cast<ConstantInt>(
X))
2287 if (
auto *CY = dyn_cast<ConstantInt>(
Y))
2290 VectorType *XVTy = dyn_cast<VectorType>(
X->getType());
2291 if (XVTy && !isa<VectorType>(
Y->getType()))
2292 Y =
B.CreateVectorSplat(XVTy->getElementCount(),
Y);
2293 return B.CreateMul(
X,
Y);
2296 switch (InductionKind) {
2299 "Vector indices not supported for integer inductions yet");
2301 "Index type does not match StartValue type");
2302 if (isa<ConstantInt>(Step) && cast<ConstantInt>(Step)->isMinusOne())
2303 return B.CreateSub(StartValue,
Index);
2311 "Vector indices not supported for FP inductions yet");
2314 (InductionBinOp->
getOpcode() == Instruction::FAdd ||
2315 InductionBinOp->
getOpcode() == Instruction::FSub) &&
2316 "Original bin op should be defined for FP induction");
2319 return B.CreateBinOp(InductionBinOp->
getOpcode(), StartValue, MulExp,
2333 if (
F.hasFnAttribute(Attribute::VScaleRange))
2334 return F.getFnAttribute(Attribute::VScaleRange).getVScaleRangeMax();
2336 return std::nullopt;
2345 ElementCount VF, std::optional<unsigned> UF = std::nullopt) {
2347 unsigned MaxUF = UF ? *UF :
Cost->TTI.getMaxInterleaveFactor(VF);
2349 Type *IdxTy =
Cost->Legal->getWidestInductionType();
2350 APInt MaxUIntTripCount = cast<IntegerType>(IdxTy)->getMask();
2356 Cost->PSE.getSE()->getSmallConstantMaxTripCount(
Cost->TheLoop)) {
2359 std::optional<unsigned> MaxVScale =
2363 MaxVF *= *MaxVScale;
2366 return (MaxUIntTripCount - TC).ugt(MaxVF * MaxUF);
2414 VPValue *BlockInMask,
bool NeedsMaskForGaps) {
2416 const DataLayout &
DL = Instr->getModule()->getDataLayout();
2420 unsigned InterleaveFactor = Group->
getFactor();
2429 "Reversed masked interleave-group not supported.");
2447 for (
unsigned Part = 0; Part <
UF; Part++) {
2449 if (
auto *
I = dyn_cast<Instruction>(AddrPart))
2464 bool InBounds =
false;
2466 InBounds =
gep->isInBounds();
2474 auto CreateGroupMask = [
this, &BlockInMask, &State, &InterleaveFactor](
2475 unsigned Part,
Value *MaskForGaps) ->
Value * {
2477 assert(!MaskForGaps &&
"Interleaved groups with gaps are not supported.");
2478 assert(InterleaveFactor == 2 &&
2479 "Unsupported deinterleave factor for scalable vectors");
2480 auto *BlockInMaskPart = State.
get(BlockInMask, Part);
2485 MaskTy, Intrinsic::experimental_vector_interleave2, Ops,
2486 nullptr,
"interleaved.mask");
2492 Value *BlockInMaskPart = State.
get(BlockInMask, Part);
2496 "interleaved.mask");
2503 if (isa<LoadInst>(Instr)) {
2504 Value *MaskForGaps =
nullptr;
2505 if (NeedsMaskForGaps) {
2508 assert(MaskForGaps &&
"Mask for Gaps is required but it is null");
2513 for (
unsigned Part = 0; Part <
UF; Part++) {
2515 if (BlockInMask || MaskForGaps) {
2517 "masked interleaved groups are not allowed.");
2518 Value *GroupMask = CreateGroupMask(Part, MaskForGaps);
2521 GroupMask, PoisonVec,
"wide.masked.vec");
2530 if (VecTy->isScalableTy()) {
2531 assert(InterleaveFactor == 2 &&
2532 "Unsupported deinterleave factor for scalable vectors");
2534 for (
unsigned Part = 0; Part <
UF; ++Part) {
2538 Intrinsic::experimental_vector_deinterleave2, VecTy, NewLoads[Part],
2539 nullptr,
"strided.vec");
2541 for (
unsigned I = 0;
I < InterleaveFactor; ++
I) {
2549 if (Member->getType() != ScalarTy) {
2557 State.
set(VPDefs[J], StridedVec, Part);
2568 for (
unsigned I = 0;
I < InterleaveFactor; ++
I) {
2577 for (
unsigned Part = 0; Part <
UF; Part++) {
2579 NewLoads[Part], StrideMask,
"strided.vec");
2582 if (Member->getType() != ScalarTy) {
2591 State.
set(VPDefs[J], StridedVec, Part);
2602 Value *MaskForGaps =
2605 "masked interleaved groups are not allowed.");
2607 "masking gaps for scalable vectors is not yet supported.");
2608 for (
unsigned Part = 0; Part <
UF; Part++) {
2611 unsigned StoredIdx = 0;
2612 for (
unsigned i = 0; i < InterleaveFactor; i++) {
2614 "Fail to get a member from an interleaved store group");
2624 Value *StoredVec = State.
get(StoredValues[StoredIdx], Part);
2632 if (StoredVec->
getType() != SubVT)
2641 if (BlockInMask || MaskForGaps) {
2642 Value *GroupMask = CreateGroupMask(Part, MaskForGaps);
2657 assert(!Instr->getType()->isAggregateType() &&
"Can't handle vectors");
2661 if (isa<NoAliasScopeDeclInst>(Instr))
2666 bool IsVoidRetTy = Instr->getType()->isVoidTy();
2670 Cloned->
setName(Instr->getName() +
".cloned");
2675 "inferred type and type from generated instructions do not match");
2681 if (
auto DL = Instr->getDebugLoc())
2687 auto InputInstance = Instance;
2691 Cloned->
setOperand(
I.index(), State.
get(Operand, InputInstance));
2698 State.
set(RepRecipe, Cloned, Instance);
2701 if (
auto *II = dyn_cast<AssumeInst>(Cloned))
2706 if (IfPredicateInstr)
2730 if (
Cost->foldTailByMasking()) {
2732 "VF*UF must be a power of 2 when folding tail by masking");
2764 auto *DstFVTy = cast<VectorType>(DstVTy);
2765 auto VF = DstFVTy->getElementCount();
2766 auto *SrcVecTy = cast<VectorType>(V->getType());
2767 assert(
VF == SrcVecTy->getElementCount() &&
"Vector dimensions do not match");
2768 Type *SrcElemTy = SrcVecTy->getElementType();
2769 Type *DstElemTy = DstFVTy->getElementType();
2770 assert((
DL.getTypeSizeInBits(SrcElemTy) ==
DL.getTypeSizeInBits(DstElemTy)) &&
2771 "Vector elements must have same size");
2782 "Only one type should be a pointer type");
2784 "Only one type should be a floating point type");
2810 auto CreateStep = [&]() ->
Value * {
2835 Value *MaxUIntTripCount =
2836 ConstantInt::get(CountTy, cast<IntegerType>(CountTy)->getMask());
2850 "TC check is expected to dominate Bypass");
2871 if (!SCEVCheckBlock)
2877 "Cannot SCEV check stride or overflow when optimizing for size");
2892 return SCEVCheckBlock;
2911 "Cannot emit memory checks when optimizing for size, unless forced "
2917 <<
"Code-size may be reduced by not forcing "
2918 "vectorization, or by source-code modifications "
2919 "eliminating the need for runtime checks "
2920 "(e.g., adding 'restrict').";
2928 return MemCheckBlock;
2937 "multiple exit loop without required epilogue?");
2941 LI,
nullptr,
Twine(Prefix) +
"middle.block");
2944 nullptr,
Twine(Prefix) +
"scalar.ph");
2961 BrInst->
setDebugLoc(ScalarLatchTerm->getDebugLoc());
2977 std::pair<BasicBlock *, Value *> AdditionalBypass) {
2983 Value *EndValueFromAdditionalBypass = AdditionalBypass.second;
2984 if (OrigPhi == OldInduction) {
2999 if (AdditionalBypass.first) {
3000 B.SetInsertPoint(AdditionalBypass.first,
3001 AdditionalBypass.first->getFirstInsertionPt());
3002 EndValueFromAdditionalBypass =
3005 EndValueFromAdditionalBypass->
setName(
"ind.end");
3026 if (AdditionalBypass.first)
3028 EndValueFromAdditionalBypass);
3035 const SCEV2ValueTy &ExpandedSCEVs) {
3036 const SCEV *Step =
ID.getStep();
3037 if (
auto *
C = dyn_cast<SCEVConstant>(Step))
3038 return C->getValue();
3039 if (
auto *U = dyn_cast<SCEVUnknown>(Step))
3040 return U->getValue();
3041 auto I = ExpandedSCEVs.find(Step);
3042 assert(
I != ExpandedSCEVs.end() &&
"SCEV must be expanded at this point");
3047 const SCEV2ValueTy &ExpandedSCEVs,
3048 std::pair<BasicBlock *, Value *> AdditionalBypass) {
3049 assert(((AdditionalBypass.first && AdditionalBypass.second) ||
3050 (!AdditionalBypass.first && !AdditionalBypass.second)) &&
3051 "Inconsistent information about additional bypass.");
3060 PHINode *OrigPhi = InductionEntry.first;
3085 !
Cost->foldTailByMasking()) {
3094 B.SetCurrentDebugLocation(ScalarLatchTerm->getDebugLoc());
3107#ifdef EXPENSIVE_CHECKS
3114std::pair<BasicBlock *, Value *>
3116 const SCEV2ValueTy &ExpandedSCEVs) {
3201 assert(isa<PHINode>(UI) &&
"Expected LCSSA form");
3202 MissingVals[UI] = EndValue;
3210 auto *UI = cast<Instruction>(U);
3212 assert(isa<PHINode>(UI) &&
"Expected LCSSA form");
3219 Value *CountMinusOne =
B.CreateSub(
3221 CountMinusOne->
setName(
"cmo");
3224 assert(StepVPV &&
"step must have been expanded during VPlan execution");
3226 : State.
get(StepVPV, {0, 0});
3230 Escape->
setName(
"ind.escape");
3231 MissingVals[UI] = Escape;
3235 for (
auto &
I : MissingVals) {
3242 if (
PHI->getBasicBlockIndex(MiddleBlock) == -1) {
3243 PHI->addIncoming(
I.second, MiddleBlock);
3251struct CSEDenseMapInfo {
3253 return isa<InsertElementInst>(
I) || isa<ExtractElementInst>(
I) ||
3254 isa<ShuffleVectorInst>(
I) || isa<GetElementPtrInst>(
I);
3266 assert(canHandle(
I) &&
"Unknown instruction!");
3268 I->value_op_end()));
3272 if (
LHS == getEmptyKey() ||
RHS == getEmptyKey() ||
3273 LHS == getTombstoneKey() ||
RHS == getTombstoneKey())
3275 return LHS->isIdenticalTo(
RHS);
3286 if (!CSEDenseMapInfo::canHandle(&In))
3292 In.replaceAllUsesWith(V);
3293 In.eraseFromParent();
3307 return CallWideningDecisions.at(std::make_pair(CI, VF)).Cost;
3312 if (
auto RedCost = getReductionPatternCost(CI, VF,
RetTy,
CostKind))
3316 for (
auto &ArgOp : CI->
args())
3325 return std::min(ScalarCallCost, IntrinsicCost);
3327 return ScalarCallCost;
3340 assert(
ID &&
"Expected intrinsic call!");
3343 if (
auto *FPMO = dyn_cast<FPMathOperator>(CI))
3344 FMF = FPMO->getFastMathFlags();
3350 std::back_inserter(ParamTys),
3351 [&](
Type *Ty) { return MaybeVectorizeType(Ty, VF); });
3354 dyn_cast<IntrinsicInst>(CI));
3360 auto *I1 = cast<IntegerType>(cast<VectorType>(
T1)->getElementType());
3361 auto *I2 = cast<IntegerType>(cast<VectorType>(T2)->getElementType());
3362 return I1->getBitWidth() < I2->getBitWidth() ?
T1 : T2;
3366 auto *I1 = cast<IntegerType>(cast<VectorType>(
T1)->getElementType());
3367 auto *I2 = cast<IntegerType>(cast<VectorType>(T2)->getElementType());
3368 return I1->getBitWidth() > I2->getBitWidth() ?
T1 : T2;
3386 if (
auto *FOR = dyn_cast<VPFirstOrderRecurrencePHIRecipe>(&R))
3400 for (
PHINode &PN : Exit->phis())
3430 KV.second->fixPhi(Plan, State);
3513 Value *RuntimeVF =
nullptr;
3515 auto *One = ConstantInt::get(IdxTy, 1);
3523 auto RecurSplice = cast<VPInstruction>(*PhiR->
user_begin());
3525 RecurSplice->getOpcode() ==
3527 "recurrence phi must have a single user: FirstOrderRecurrenceSplice");
3529 for (
VPUser *U : RecurSplice->users())
3530 if (
auto *LiveOut = dyn_cast<VPLiveOut>(U))
3533 if (!LiveOuts.
empty()) {
3539 Value *ExtractForPhiUsedOutsideLoop =
nullptr;
3545 assert(
UF > 1 &&
"VF and UF cannot both be 1");
3550 ExtractForPhiUsedOutsideLoop = State.
get(PreviousDef,
UF - 2);
3555 PHINode *LCSSAPhi = LiveOut->getPhi();
3572 Phi->setName(
"scalar.recur");
3589 auto isBlockOfUsePredicated = [&](
Use &U) ->
bool {
3590 auto *
I = cast<Instruction>(U.getUser());
3592 if (
auto *Phi = dyn_cast<PHINode>(
I))
3593 BB = Phi->getIncomingBlock(
3595 return BB == PredBB;
3606 Worklist.
insert(InstsToReanalyze.
begin(), InstsToReanalyze.
end());
3607 InstsToReanalyze.
clear();
3610 while (!Worklist.
empty()) {
3616 if (!
I || isa<PHINode>(
I) || !VectorLoop->contains(
I) ||
3617 I->mayHaveSideEffects() ||
I->mayReadFromMemory())
3625 if (
I->getParent() == PredBB) {
3626 Worklist.
insert(
I->op_begin(),
I->op_end());
3640 I->moveBefore(&*PredBB->getFirstInsertionPt());
3641 Worklist.
insert(
I->op_begin(),
I->op_end());
3653 for (
VPBasicBlock *VPBB : VPBlockUtils::blocksOnly<VPBasicBlock>(Iter)) {
3658 PHINode *NewPhi = cast<PHINode>(State.
get(VPPhi, 0));
3672 return Cost->useOrderedReductions(RdxDesc);
3675void LoopVectorizationCostModel::collectLoopScalars(
ElementCount VF) {
3680 "This function should not be visited twice for the same VF");
3686 Scalars[VF].
insert(Uniforms[VF].begin(), Uniforms[VF].end());
3705 "Widening decision should be ready at this moment");
3706 if (
auto *Store = dyn_cast<StoreInst>(MemAccess))
3707 if (
Ptr == Store->getValueOperand())
3710 "Ptr is neither a value or pointer operand");
3716 auto isLoopVaryingBitCastOrGEP = [&](
Value *
V) {
3717 return ((isa<BitCastInst>(V) &&
V->getType()->isPointerTy()) ||
3718 isa<GetElementPtrInst>(V)) &&
3729 if (!isLoopVaryingBitCastOrGEP(
Ptr))
3734 auto *
I = cast<Instruction>(
Ptr);
3742 return isa<LoadInst>(U) || isa<StoreInst>(U);
3746 PossibleNonScalarPtrs.
insert(
I);
3764 for (
auto &
I : *BB) {
3765 if (
auto *Load = dyn_cast<LoadInst>(&
I)) {
3766 evaluatePtrUse(Load,
Load->getPointerOperand());
3767 }
else if (
auto *Store = dyn_cast<StoreInst>(&
I)) {
3768 evaluatePtrUse(Store,
Store->getPointerOperand());
3769 evaluatePtrUse(Store,
Store->getValueOperand());
3772 for (
auto *
I : ScalarPtrs)
3773 if (!PossibleNonScalarPtrs.
count(
I)) {
3781 auto ForcedScalar = ForcedScalars.
find(VF);
3782 if (ForcedScalar != ForcedScalars.
end())
3783 for (
auto *
I : ForcedScalar->second) {
3784 LLVM_DEBUG(
dbgs() <<
"LV: Found (forced) scalar instruction: " << *
I <<
"\n");
3793 while (
Idx != Worklist.
size()) {
3795 if (!isLoopVaryingBitCastOrGEP(Dst->getOperand(0)))
3797 auto *Src = cast<Instruction>(Dst->getOperand(0));
3799 auto *J = cast<Instruction>(U);
3800 return !TheLoop->contains(J) || Worklist.count(J) ||
3801 ((isa<LoadInst>(J) || isa<StoreInst>(J)) &&
3802 isScalarUse(J, Src));
3805 LLVM_DEBUG(
dbgs() <<
"LV: Found scalar instruction: " << *Src <<
"\n");
3812 auto *Ind = Induction.first;
3813 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch));
3822 auto IsDirectLoadStoreFromPtrIndvar = [&](
Instruction *Indvar,
3824 return Induction.second.getKind() ==
3826 (isa<LoadInst>(
I) || isa<StoreInst>(
I)) &&
3833 auto *I = cast<Instruction>(U);
3834 return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I) ||
3835 IsDirectLoadStoreFromPtrIndvar(Ind, I);
3842 auto ScalarIndUpdate =
3844 auto *I = cast<Instruction>(U);
3845 return I == Ind || !TheLoop->contains(I) || Worklist.count(I) ||
3846 IsDirectLoadStoreFromPtrIndvar(IndUpdate, I);
3848 if (!ScalarIndUpdate)
3853 Worklist.
insert(IndUpdate);
3854 LLVM_DEBUG(
dbgs() <<
"LV: Found scalar instruction: " << *Ind <<
"\n");
3855 LLVM_DEBUG(
dbgs() <<
"LV: Found scalar instruction: " << *IndUpdate
3869 switch(
I->getOpcode()) {
3872 case Instruction::Call:
3875 return CallWideningDecisions.at(std::make_pair(cast<CallInst>(
I), VF))
3877 case Instruction::Load:
3878 case Instruction::Store: {
3890 case Instruction::UDiv:
3891 case Instruction::SDiv:
3892 case Instruction::SRem:
3893 case Instruction::URem: {
3909 switch(
I->getOpcode()) {
3912 case Instruction::Load:
3913 case Instruction::Store: {
3926 (isa<LoadInst>(
I) ||
3927 (isa<StoreInst>(
I) &&
3933 case Instruction::UDiv:
3934 case Instruction::SDiv:
3935 case Instruction::SRem:
3936 case Instruction::URem:
3940 case Instruction::Call:
3945std::pair<InstructionCost, InstructionCost>
3948 assert(
I->getOpcode() == Instruction::UDiv ||
3949 I->getOpcode() == Instruction::SDiv ||
3950 I->getOpcode() == Instruction::SRem ||
3951 I->getOpcode() == Instruction::URem);
3962 ScalarizationCost = 0;
3977 ScalarizationCost += getScalarizationOverhead(
I, VF,
CostKind);
3991 Instruction::Select, VecTy,
3997 Value *Op2 =
I->getOperand(1);
4006 {TargetTransformInfo::OK_AnyValue, TargetTransformInfo::OP_None},
4008 return {ScalarizationCost, SafeDivisorCost};
4015 "Decision should not be set yet.");
4017 assert(Group &&
"Must have a group.");
4021 auto &
DL =
I->getModule()->getDataLayout();
4028 unsigned InterleaveFactor = Group->getFactor();
4029 bool ScalarNI =
DL.isNonIntegralPointerType(ScalarTy);
4030 for (
unsigned i = 0; i < InterleaveFactor; i++) {
4035 bool MemberNI =
DL.isNonIntegralPointerType(
MemberTy);
4037 if (MemberNI != ScalarNI) {
4040 }
else if (MemberNI && ScalarNI &&
4041 ScalarTy->getPointerAddressSpace() !=
4042 MemberTy->getPointerAddressSpace()) {
4052 bool PredicatedAccessRequiresMasking =
4055 bool LoadAccessWithGapsRequiresEpilogMasking =
4056 isa<LoadInst>(
I) && Group->requiresScalarEpilogue() &&
4058 bool StoreAccessWithGapsRequiresMasking =
4059 isa<StoreInst>(
I) && (Group->getNumMembers() < Group->getFactor());
4060 if (!PredicatedAccessRequiresMasking &&
4061 !LoadAccessWithGapsRequiresEpilogMasking &&
4062 !StoreAccessWithGapsRequiresMasking)
4069 "Masked interleave-groups for predicated accesses are not enabled.");
4071 if (Group->isReverse())
4083 assert((isa<LoadInst, StoreInst>(
I)) &&
"Invalid memory instruction");
4099 auto &
DL =
I->getModule()->getDataLayout();
4106void LoopVectorizationCostModel::collectLoopUniforms(
ElementCount VF) {
4113 "This function should not be visited twice for the same VF");
4117 Uniforms[VF].
clear();
4125 auto isOutOfScope = [&](
Value *V) ->
bool {
4139 auto addToWorklistIfAllowed = [&](
Instruction *
I) ->
void {
4140 if (isOutOfScope(
I)) {
4146 LLVM_DEBUG(
dbgs() <<
"LV: Found not uniform being ScalarWithPredication: "
4150 LLVM_DEBUG(
dbgs() <<
"LV: Found uniform instruction: " << *
I <<
"\n");
4159 addToWorklistIfAllowed(Cmp);
4167 if (PrevVF.isVector()) {
4168 auto Iter = Uniforms.
find(PrevVF);
4169 if (Iter != Uniforms.
end() && !Iter->second.contains(
I))
4174 if (isa<LoadInst>(
I))
4185 "Widening decision should be ready at this moment");
4187 if (isUniformMemOpUse(
I))
4190 return (WideningDecision ==
CM_Widen ||
4199 if (isa<StoreInst>(
I) &&
I->getOperand(0) ==
Ptr)
4215 for (
auto &
I : *BB) {
4217 switch (II->getIntrinsicID()) {
4218 case Intrinsic::sideeffect:
4219 case Intrinsic::experimental_noalias_scope_decl:
4220 case Intrinsic::assume:
4221 case Intrinsic::lifetime_start:
4222 case Intrinsic::lifetime_end:
4224 addToWorklistIfAllowed(&
I);
4233 if (
auto *EVI = dyn_cast<ExtractValueInst>(&
I)) {
4234 assert(isOutOfScope(EVI->getAggregateOperand()) &&
4235 "Expected aggregate value to be loop invariant");
4236 addToWorklistIfAllowed(EVI);
4245 if (isUniformMemOpUse(&
I))
4246 addToWorklistIfAllowed(&
I);
4248 if (isVectorizedMemAccessUse(&
I,
Ptr))
4255 for (
auto *V : HasUniformUse) {
4256 if (isOutOfScope(V))
4258 auto *
I = cast<Instruction>(V);
4259 auto UsersAreMemAccesses =
4261 return isVectorizedMemAccessUse(cast<Instruction>(U), V);
4263 if (UsersAreMemAccesses)
4264 addToWorklistIfAllowed(
I);
4271 while (idx != Worklist.
size()) {
4274 for (
auto *OV :
I->operand_values()) {
4276 if (isOutOfScope(OV))
4280 auto *
OP = dyn_cast<PHINode>(OV);
4285 auto *OI = cast<Instruction>(OV);
4287 auto *J = cast<Instruction>(U);
4288 return Worklist.count(J) || isVectorizedMemAccessUse(J, OI);
4290 addToWorklistIfAllowed(OI);
4301 auto *Ind = Induction.first;
4302 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch));
4307 auto *I = cast<Instruction>(U);
4308 return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I) ||
4309 isVectorizedMemAccessUse(I, Ind);
4316 auto UniformIndUpdate =
4318 auto *I = cast<Instruction>(U);
4319 return I == Ind || !TheLoop->contains(I) || Worklist.count(I) ||
4320 isVectorizedMemAccessUse(I, IndUpdate);
4322 if (!UniformIndUpdate)
4326 addToWorklistIfAllowed(Ind);
4327 addToWorklistIfAllowed(IndUpdate);
4338 "runtime pointer checks needed. Enable vectorization of this "
4339 "loop with '#pragma clang loop vectorize(enable)' when "
4340 "compiling with -Os/-Oz",
4341 "CantVersionLoopWithOptForSize",
ORE,
TheLoop);
4347 "runtime SCEV checks needed. Enable vectorization of this "
4348 "loop with '#pragma clang loop vectorize(enable)' when "
4349 "compiling with -Os/-Oz",
4350 "CantVersionLoopWithOptForSize",
ORE,
TheLoop);
4357 "runtime stride == 1 checks needed. Enable vectorization of "
4358 "this loop without such check by compiling with -Os/-Oz",
4359 "CantVersionLoopWithOptForSize",
ORE,
TheLoop);
4367LoopVectorizationCostModel::getMaxLegalScalableVF(
unsigned MaxSafeElements) {
4373 "ScalableVectorizationDisabled",
ORE,
TheLoop);
4377 LLVM_DEBUG(
dbgs() <<
"LV: Scalable vectorization is available\n");
4380 std::numeric_limits<ElementCount::ScalarTy>::max());
4391 "Scalable vectorization not supported for the reduction "
4392 "operations found in this loop.",
4404 "for all element types found in this loop.",
4410 return MaxScalableVF;
4420 "Max legal vector width too small, scalable vectorization "
4424 return MaxScalableVF;
4428 unsigned MaxTripCount,
ElementCount UserVF,
bool FoldTailByMasking) {
4430 unsigned SmallestType, WidestType;
4437 unsigned MaxSafeElements =
4441 auto MaxSafeScalableVF = getMaxLegalScalableVF(MaxSafeElements);
4443 LLVM_DEBUG(
dbgs() <<
"LV: The max safe fixed VF is: " << MaxSafeFixedVF
4445 LLVM_DEBUG(
dbgs() <<
"LV: The max safe scalable VF is: " << MaxSafeScalableVF
4450 auto MaxSafeUserVF =
4451 UserVF.
isScalable() ? MaxSafeScalableVF : MaxSafeFixedVF;
4468 <<
" is unsafe, clamping to max safe VF="
4469 << MaxSafeFixedVF <<
".\n");
4474 <<
"User-specified vectorization factor "
4475 <<
ore::NV(
"UserVectorizationFactor", UserVF)
4476 <<
" is unsafe, clamping to maximum safe vectorization factor "
4477 <<
ore::NV(
"VectorizationFactor", MaxSafeFixedVF);
4479 return MaxSafeFixedVF;
4484 <<
" is ignored because scalable vectors are not "
4490 <<
"User-specified vectorization factor "
4491 <<
ore::NV(
"UserVectorizationFactor", UserVF)
4492 <<
" is ignored because the target does not support scalable "
4493 "vectors. The compiler will pick a more suitable value.";
4497 <<
" is unsafe. Ignoring scalable UserVF.\n");
4502 <<
"User-specified vectorization factor "
4503 <<
ore::NV(
"UserVectorizationFactor", UserVF)
4504 <<
" is unsafe. Ignoring the hint to let the compiler pick a "
4505 "more suitable value.";
4510 LLVM_DEBUG(
dbgs() <<
"LV: The Smallest and Widest types: " << SmallestType
4511 <<
" / " << WidestType <<
" bits.\n");
4516 getMaximizedVFForTarget(MaxTripCount, SmallestType, WidestType,
4517 MaxSafeFixedVF, FoldTailByMasking))
4521 getMaximizedVFForTarget(MaxTripCount, SmallestType, WidestType,
4522 MaxSafeScalableVF, FoldTailByMasking))
4523 if (MaxVF.isScalable()) {
4524 Result.ScalableVF = MaxVF;
4525 LLVM_DEBUG(
dbgs() <<
"LV: Found feasible scalable VF = " << MaxVF
4538 "Not inserting runtime ptr check for divergent target",
4539 "runtime pointer checks needed. Not enabled for divergent target",
4540 "CantVersionLoopWithDivergentTarget",
ORE,
TheLoop);
4549 "loop trip count is one, irrelevant for vectorization",
4554 switch (ScalarEpilogueStatus) {
4556 return computeFeasibleMaxVF(MaxTC, UserVF,
false);
4561 dbgs() <<
"LV: vector predicate hint/switch found.\n"
4562 <<
"LV: Not allowing scalar epilogue, creating predicated "
4563 <<
"vector loop.\n");
4570 dbgs() <<
"LV: Not allowing scalar epilogue due to -Os/-Oz.\n");
4572 LLVM_DEBUG(
dbgs() <<
"LV: Not allowing scalar epilogue due to low trip "
4591 LLVM_DEBUG(
dbgs() <<
"LV: Cannot fold tail by masking: vectorize with a "
4592 "scalar epilogue instead.\n");
4594 return computeFeasibleMaxVF(MaxTC, UserVF,
false);
4605 "No decisions should have been taken at this point");
4615 std::optional<unsigned> MaxPowerOf2RuntimeVF =
4620 MaxPowerOf2RuntimeVF = std::max<unsigned>(
4621 *MaxPowerOf2RuntimeVF,
4624 MaxPowerOf2RuntimeVF = std::nullopt;
4627 if (MaxPowerOf2RuntimeVF && *MaxPowerOf2RuntimeVF > 0) {
4629 "MaxFixedVF must be a power of 2");
4630 unsigned MaxVFtimesIC =
4631 UserIC ? *MaxPowerOf2RuntimeVF * UserIC : *MaxPowerOf2RuntimeVF;
4635 BackedgeTakenCount, SE->
getOne(BackedgeTakenCount->
getType()));
4641 LLVM_DEBUG(
dbgs() <<
"LV: No tail will remain for any chosen VF.\n");
4657 LLVM_DEBUG(
dbgs() <<
"LV: Cannot fold tail by masking: vectorize with a "
4658 "scalar epilogue instead.\n");
4664 LLVM_DEBUG(
dbgs() <<
"LV: Can't fold tail by masking: don't vectorize\n");
4670 "Unable to calculate the loop count due to complex control flow",
4671 "unable to calculate the loop count due to complex control flow",
4677 "Cannot optimize for size and vectorize at the same time.",
4678 "cannot optimize for size and vectorize at the same time. "
4679 "Enable vectorization of this loop with '#pragma clang loop "
4680 "vectorize(enable)' when compiling with -Os/-Oz",
4685ElementCount LoopVectorizationCostModel::getMaximizedVFForTarget(
4686 unsigned MaxTripCount,
unsigned SmallestType,
unsigned WidestType,
4688 bool ComputeScalableMaxVF = MaxSafeVF.
isScalable();
4696 "Scalable flags must match");
4704 ComputeScalableMaxVF);
4705 MaxVectorElementCount = MinVF(MaxVectorElementCount, MaxSafeVF);
4707 << (MaxVectorElementCount * WidestType) <<
" bits.\n");
4709 if (!MaxVectorElementCount) {
4711 << (ComputeScalableMaxVF ?
"scalable" :
"fixed")
4712 <<
" vector registers.\n");
4716 unsigned WidestRegisterMinEC = MaxVectorElementCount.getKnownMinValue();
4717 if (MaxVectorElementCount.isScalable() &&
4721 WidestRegisterMinEC *= Min;
4730 if (MaxTripCount && MaxTripCount <= WidestRegisterMinEC &&
4738 LLVM_DEBUG(
dbgs() <<
"LV: Clamping the MaxVF to maximum power of two not "
4739 "exceeding the constant trip count: "
4740 << ClampedUpperTripCount <<
"\n");
4742 ClampedUpperTripCount,
4743 FoldTailByMasking ? MaxVectorElementCount.isScalable() :
false);
4756 ComputeScalableMaxVF);
4757 MaxVectorElementCountMaxBW = MinVF(MaxVectorElementCountMaxBW, MaxSafeVF);
4771 for (
int i = RUs.size() - 1; i >= 0; --i) {
4772 bool Selected =
true;
4773 for (
auto &pair : RUs[i].MaxLocalUsers) {
4775 if (pair.second > TargetNumRegisters)
4787 <<
") with target's minimum: " << MinVF <<
'\n');
4803static std::optional<unsigned>
4805 const Function *Fn = L->getHeader()->getParent();
4809 auto Max = Attr.getVScaleRangeMax();
4810 if (Max && Min == Max)
4817bool LoopVectorizationPlanner::isMoreProfitable(
4824 if (!
A.Width.isScalable() && !
B.Width.isScalable() && MaxTripCount) {
4833 auto GetCostForTC = [MaxTripCount,
this](
unsigned VF,
4837 : VectorCost * (MaxTripCount / VF) +
4838 ScalarCost * (MaxTripCount % VF);
4840 auto RTCostA = GetCostForTC(
A.Width.getFixedValue(), CostA,
A.ScalarCost);
4841 auto RTCostB = GetCostForTC(
B.Width.getFixedValue(), CostB,
B.ScalarCost);
4843 return RTCostA < RTCostB;
4847 unsigned EstimatedWidthA =
A.Width.getKnownMinValue();
4848 unsigned EstimatedWidthB =
B.Width.getKnownMinValue();
4850 if (
A.Width.isScalable())
4851 EstimatedWidthA *= *VScale;
4852 if (
B.Width.isScalable())
4853 EstimatedWidthB *= *VScale;
4859 if (
A.Width.isScalable() && !
B.Width.isScalable())
4860 return (CostA *
B.Width.getFixedValue()) <= (CostB * EstimatedWidthA);
4865 return (CostA * EstimatedWidthB) < (CostB * EstimatedWidthA);
4871 if (InvalidCosts.
empty())
4878 std::map<Instruction *, unsigned> Numbering;
4880 for (
auto &Pair : InvalidCosts)
4881 if (!Numbering.count(Pair.first))
4882 Numbering[Pair.first] =
I++;
4886 if (Numbering[
A.first] != Numbering[
B.first])
4887 return Numbering[
A.first] < Numbering[
B.first];
4889 return ECC(
A.second,
B.second);
4901 Subset =
Tail.take_front(1);
4910 if (Subset ==
Tail ||
Tail[Subset.size()].first !=
I) {
4911 std::string OutString;
4913 assert(!Subset.empty() &&
"Unexpected empty range");
4914 OS <<
"Instruction with invalid costs prevented vectorization at VF=(";
4915 for (
const auto &Pair : Subset)
4916 OS << (Pair.second == Subset.front().second ?
"" :
", ") << Pair.second;
4918 if (
auto *CI = dyn_cast<CallInst>(
I))
4919 OS <<
" call to " << CI->getCalledFunction()->getName();
4921 OS <<
" " <<
I->getOpcodeName();
4924 Tail =
Tail.drop_front(Subset.size());
4928 Subset =
Tail.take_front(Subset.size() + 1);
4929 }
while (!
Tail.empty());
4936 LLVM_DEBUG(
dbgs() <<
"LV: Scalar loop costs: " << ExpectedCost <<
".\n");
4937 assert(ExpectedCost.
isValid() &&
"Unexpected invalid cost for scalar loop");
4939 "Expected Scalar VF to be a candidate");
4946 if (ForceVectorization && VFCandidates.
size() > 1) {
4954 for (
const auto &i : VFCandidates) {
4964 unsigned AssumedMinimumVscale =
4967 Candidate.Width.isScalable()
4968 ? Candidate.Width.getKnownMinValue() * AssumedMinimumVscale
4969 : Candidate.Width.getFixedValue();
4971 <<
" costs: " << (Candidate.Cost / Width));
4974 << AssumedMinimumVscale <<
")");
4978 if (!
C.second && !ForceVectorization) {
4980 dbgs() <<
"LV: Not considering vector loop of width " << i
4981 <<
" because it will not generate any vector instructions.\n");
4986 if (isMoreProfitable(Candidate, ScalarCost))
4987 ProfitableVFs.push_back(Candidate);
4989 if (isMoreProfitable(Candidate, ChosenFactor))
4990 ChosenFactor = Candidate;
4997 "There are conditional stores.",
4998 "store that is conditionally executed prevents vectorization",
4999 "ConditionalStore", ORE, OrigLoop);
5000 ChosenFactor = ScalarCost;
5004 !isMoreProfitable(ChosenFactor, ScalarCost))
dbgs()
5005 <<
"LV: Vectorization seems to be not beneficial, "
5006 <<
"but was forced by a user.\n");
5008 return ChosenFactor;
5011bool LoopVectorizationPlanner::isCandidateForEpilogueVectorization(
5016 [&](
PHINode &Phi) { return Legal->isFixedOrderRecurrence(&Phi); }))
5024 Entry.first->getIncomingValueForBlock(OrigLoop->
getLoopLatch());
5026 if (!OrigLoop->
contains(cast<Instruction>(U)))
5029 for (
User *U : Entry.first->users())
5030 if (!OrigLoop->
contains(cast<Instruction>(U)))
5059 unsigned Multiplier = 1;
5071 LLVM_DEBUG(
dbgs() <<
"LEV: Epilogue vectorization is disabled.\n");
5076 LLVM_DEBUG(
dbgs() <<
"LEV: Unable to vectorize epilogue because no "
5077 "epilogue is allowed.\n");
5083 if (!isCandidateForEpilogueVectorization(MainLoopVF)) {
5084 LLVM_DEBUG(
dbgs() <<
"LEV: Unable to vectorize epilogue because the loop "
5085 "is not a supported candidate.\n");
5090 LLVM_DEBUG(
dbgs() <<
"LEV: Epilogue vectorization factor is forced.\n");
5093 return {ForcedEC, 0, 0};
5095 LLVM_DEBUG(
dbgs() <<
"LEV: Epilogue vectorization forced factor is not "
5104 dbgs() <<
"LEV: Epilogue vectorization skipped due to opt for size.\n");
5109 LLVM_DEBUG(
dbgs() <<
"LEV: Epilogue vectorization is not profitable for "
5121 EstimatedRuntimeVF *= *VScale;
5126 const SCEV *RemainingIterations =
nullptr;
5127 for (
auto &NextVF : ProfitableVFs) {
5134 if ((!NextVF.Width.isScalable() && MainLoopVF.
isScalable() &&
5141 if (!MainLoopVF.
isScalable() && !NextVF.Width.isScalable()) {
5143 if (!RemainingIterations) {
5150 SE.
getConstant(TCType, NextVF.Width.getKnownMinValue()),
5151 RemainingIterations))
5155 if (Result.Width.isScalar() || isMoreProfitable(NextVF, Result))
5161 << Result.Width <<
"\n");
5165std::pair<unsigned, unsigned>
5167 unsigned MinWidth = -1U;
5168 unsigned MaxWidth = 8;
5181 MaxWidth = std::min<unsigned>(
5182 MaxWidth, std::min<unsigned>(
5188 MinWidth = std::min<unsigned>(
5189 MinWidth,
DL.getTypeSizeInBits(
T->getScalarType()).getFixedValue());
5190 MaxWidth = std::max<unsigned>(
5191 MaxWidth,
DL.getTypeSizeInBits(
T->getScalarType()).getFixedValue());
5194 return {MinWidth, MaxWidth};
5202 for (
Instruction &
I : BB->instructionsWithoutDebug()) {
5210 if (!isa<LoadInst>(
I) && !isa<StoreInst>(
I) && !isa<PHINode>(
I))
5215 if (
auto *PN = dyn_cast<PHINode>(&
I)) {
5229 if (
auto *ST = dyn_cast<StoreInst>(&
I))
5230 T = ST->getValueOperand()->getType();
5233 "Expected the load/store/recurrence type to be sized");
5269 if (LoopCost == 0) {
5271 assert(LoopCost.
isValid() &&
"Expected to have chosen a VF with valid cost");
5281 for (
auto& pair : R.MaxLocalUsers) {
5282 pair.second = std::max(pair.second, 1U);
5296 unsigned IC = UINT_MAX;
5298 for (
auto& pair : R.MaxLocalUsers) {
5310 unsigned MaxLocalUsers = pair.second;
5311 unsigned LoopInvariantRegs = 0;
5312 if (R.LoopInvariantRegs.find(pair.first) != R.LoopInvariantRegs.end())
5313 LoopInvariantRegs = R.LoopInvariantRegs[pair.first];
5315 unsigned TmpIC =
llvm::bit_floor((TargetNumRegisters - LoopInvariantRegs) /
5319 TmpIC =
llvm::bit_floor((TargetNumRegisters - LoopInvariantRegs - 1) /
5320 std::max(1U, (MaxLocalUsers - 1)));
5323 IC = std::min(IC, TmpIC);
5341 EstimatedVF *= *VScale;
5343 assert(EstimatedVF >= 1 &&
"Estimated VF shouldn't be less than 1");
5349 unsigned AvailableTC =
5361 std::max(1u, std::min(AvailableTC / EstimatedVF, MaxInterleaveCount)));
5362 unsigned InterleaveCountLB =
bit_floor(std::max(
5363 1u, std::min(AvailableTC / (EstimatedVF * 2), MaxInterleaveCount)));
5364 MaxInterleaveCount = InterleaveCountLB;
5366 if (InterleaveCountUB != InterleaveCountLB) {
5367 unsigned TailTripCountUB =
5368 (AvailableTC % (EstimatedVF * InterleaveCountUB));
5369 unsigned TailTripCountLB =
5370 (AvailableTC % (EstimatedVF * InterleaveCountLB));
5373 if (TailTripCountUB == TailTripCountLB)
5374 MaxInterleaveCount = InterleaveCountUB;
5376 }
else if (BestKnownTC && *BestKnownTC > 0) {
5380 ? (*BestKnownTC) - 1
5388 MaxInterleaveCount =
bit_floor(std::max(
5389 1u, std::min(AvailableTC / (EstimatedVF * 2), MaxInterleaveCount)));
5392 assert(MaxInterleaveCount > 0 &&
5393 "Maximum interleave count must be greater than 0");
5397 if (IC > MaxInterleaveCount)
5398 IC = MaxInterleaveCount;
5401 IC = std::max(1u, IC);
5403 assert(IC > 0 &&
"Interleave count must be greater than 0.");
5407 if (VF.
isVector() && HasReductions) {
5408 LLVM_DEBUG(
dbgs() <<
"LV: Interleaving because of reductions.\n");
5416 bool ScalarInterleavingRequiresPredication =
5418 return Legal->blockNeedsPredication(BB);
5420 bool ScalarInterleavingRequiresRuntimePointerCheck =
5426 <<
"LV: IC is " << IC <<
'\n'
5427 <<
"LV: VF is " << VF <<
'\n');
5428 const bool AggressivelyInterleaveReductions =
5430 if (!ScalarInterleavingRequiresRuntimePointerCheck &&
5431 !ScalarInterleavingRequiresPredication && LoopCost <
SmallLoopCost) {
5435 unsigned SmallIC = std::min(IC, (
unsigned)llvm::bit_floor<uint64_t>(
5442 unsigned StoresIC = IC / (NumStores ? NumStores : 1);
5443 unsigned LoadsIC = IC / (NumLoads ? NumLoads : 1);
5449 bool HasSelectCmpReductions =
5452 const RecurrenceDescriptor &RdxDesc = Reduction.second;
5453 return RecurrenceDescriptor::isAnyOfRecurrenceKind(
5454 RdxDesc.getRecurrenceKind());
5456 if (HasSelectCmpReductions) {
5457 LLVM_DEBUG(
dbgs() <<
"LV: Not interleaving select-cmp reductions.\n");
5467 bool HasOrderedReductions =
5469 const RecurrenceDescriptor &RdxDesc = Reduction.second;
5470 return RdxDesc.isOrdered();
5472 if (HasOrderedReductions) {
5474 dbgs() <<
"LV: Not interleaving scalar ordered reductions.\n");
5479 SmallIC = std::min(SmallIC,
F);
5480 StoresIC = std::min(StoresIC,
F);
5481 LoadsIC = std::min(LoadsIC,
F);
5485 std::max(StoresIC, LoadsIC) > SmallIC) {
5487 dbgs() <<
"LV: Interleaving to saturate store or load ports.\n");
5488 return std::max(StoresIC, LoadsIC);
5493 if (VF.
isScalar() && AggressivelyInterleaveReductions) {
5497 return std::max(IC / 2, SmallIC);
5499 LLVM_DEBUG(
dbgs() <<
"LV: Interleaving to reduce branch cost.\n");
5506 if (AggressivelyInterleaveReductions) {
5556 for (
Instruction &
I : BB->instructionsWithoutDebug()) {
5560 for (
Value *U :
I.operands()) {
5561 auto *Instr = dyn_cast<Instruction>(U);
5572 LoopInvariants.
insert(Instr);
5577 EndPoint[Instr] = IdxToInstr.
size();
5595 LLVM_DEBUG(
dbgs() <<
"LV(REG): Calculating max register usage:\n");
5597 const auto &TTICapture =
TTI;
5604 for (
unsigned int i = 0, s = IdxToInstr.
size(); i < s; ++i) {
5608 InstrList &
List = TransposeEnds[i];
5623 for (
unsigned j = 0, e = VFs.
size(); j < e; ++j) {
5631 if (VFs[j].isScalar()) {
5632 for (
auto *Inst : OpenIntervals) {
5641 for (
auto *Inst : OpenIntervals) {
5654 RegUsage[ClassID] += GetRegUsage(Inst->getType(), VFs[j]);
5660 auto &Entry = MaxUsages[j][pair.first];
5661 Entry = std::max(Entry, pair.second);
5666 << OpenIntervals.
size() <<
'\n');
5672 for (
unsigned i = 0, e = VFs.
size(); i < e; ++i) {
5678 for (
auto *Inst : LoopInvariants) {
5681 bool IsScalar =
all_of(Inst->users(), [&](
User *U) {
5682 auto *I = cast<Instruction>(U);
5683 return TheLoop != LI->getLoopFor(I->getParent()) ||
5684 isScalarAfterVectorization(I, VFs[i]);
5690 Invariant[ClassID] += GetRegUsage(Inst->getType(), VF);
5694 dbgs() <<
"LV(REG): VF = " << VFs[i] <<
'\n';
5695 dbgs() <<
"LV(REG): Found max usage: " << MaxUsages[i].
size()
5697 for (
const auto &pair : MaxUsages[i]) {
5698 dbgs() <<
"LV(REG): RegisterClass: "
5702 dbgs() <<
"LV(REG): Found invariant usage: " << Invariant.
size()
5704 for (
const auto &pair : Invariant) {
5705 dbgs() <<
"LV(REG): RegisterClass: "
5719bool LoopVectorizationCostModel::useEmulatedMaskMemRefHack(
Instruction *
I,
5730 "Expecting a scalar emulated instruction");
5731 return isa<LoadInst>(
I) ||
5732 (isa<StoreInst>(
I) &&
5749 PredicatedBBsAfterVectorization[VF].
clear();
5764 if (!VF.
isScalable() && !useEmulatedMaskMemRefHack(&
I, VF) &&
5765 computePredInstDiscount(&
I, ScalarCosts, VF) >= 0)
5768 PredicatedBBsAfterVectorization[VF].
insert(BB);
5776 "Instruction marked uniform-after-vectorization will be predicated");
5794 if (!
I->hasOneUse() || PredInst->
getParent() !=
I->getParent() ||
5813 for (
Use &U :
I->operands())
5814 if (
auto *J = dyn_cast<Instruction>(U.get()))
5826 while (!Worklist.
empty()) {
5830 if (ScalarCosts.contains(
I))
5861 for (
Use &U :
I->operands())
5862 if (
auto *J = dyn_cast<Instruction>(
U.get())) {
5864 "Instruction has non-scalar type");
5865 if (canBeScalarized(J))
5867 else if (needsExtract(J, VF)) {
5869 cast<VectorType>(
ToVectorTy(J->getType(), VF)),
5880 Discount += VectorCost - ScalarCost;
5881 ScalarCosts[
I] = ScalarCost;
5897 for (
Instruction &
I : BB->instructionsWithoutDebug()) {
5906 if (
C.first.isValid() &&
5914 BlockCost.first +=
C.first;
5915 BlockCost.second |=
C.second;
5917 <<
" for VF " << VF <<
" For instruction: " <<
I
5931 Cost.first += BlockCost.first;
5932 Cost.second |= BlockCost.second;
5947 const Loop *TheLoop) {
5949 auto *Gep = dyn_cast<GetElementPtrInst>(
Ptr);
5955 auto SE = PSE.
getSE();
5956 unsigned NumOperands = Gep->getNumOperands();
5957 for (
unsigned i = 1; i < NumOperands; ++i) {
5958 Value *Opd = Gep->getOperand(i);
5960 !
Legal->isInductionVariable(Opd))
5969LoopVectorizationCostModel::getMemInstScalarizationCost(
Instruction *
I,
5972 "Scalarization cost of instruction implies vectorization.");
6019 if (useEmulatedMaskMemRefHack(
I, VF))
6029LoopVectorizationCostModel::getConsecutiveMemOpCost(
Instruction *
I,
6032 auto *VectorTy = cast<VectorType>(
ToVectorTy(ValTy, VF));
6038 assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) &&
6039 "Stride should be 1 or -1 for consecutive memory access");
6051 bool Reverse = ConsecutiveStride < 0;
6059LoopVectorizationCostModel::getUniformMemOpCost(
Instruction *
I,
6064 auto *VectorTy = cast<VectorType>(
ToVectorTy(ValTy, VF));
6068 if (isa<LoadInst>(
I)) {
6080 (isLoopInvariantStoreValue
6087LoopVectorizationCostModel::getGatherScatterCost(
Instruction *
I,
6090 auto *VectorTy = cast<VectorType>(
ToVectorTy(ValTy, VF));
6101LoopVectorizationCostModel::getInterleaveGroupCost(
Instruction *
I,
6104 auto *VectorTy = cast<VectorType>(
ToVectorTy(ValTy, VF));
6109 assert(Group &&
"Fail to get an interleaved access group.");
6111 unsigned InterleaveFactor = Group->getFactor();
6116 for (
unsigned IF = 0;
IF < InterleaveFactor;
IF++)
6117 if (Group->getMember(IF))
6121 bool UseMaskForGaps =
6123 (isa<StoreInst>(
I) && (Group->getNumMembers() < Group->getFactor()));
6125 I->getOpcode(), WideVecTy, Group->getFactor(), Indices, Group->getAlign(),
6128 if (Group->isReverse()) {
6131 "Reverse masked interleaved access not supported.");
6132 Cost += Group->getNumMembers() *
6139std::optional<InstructionCost>
6140LoopVectorizationCostModel::getReductionPatternCost(
6145 if (InLoopReductions.
empty() || VF.
isScalar() || !isa<VectorType>(Ty))
6146 return std::nullopt;
6147 auto *VectorTy = cast<VectorType>(Ty);
6164 return std::nullopt;
6175 if (!InLoopReductionImmediateChains.
count(RetI))
6176 return std::nullopt;
6180 Instruction *LastChain = InLoopReductionImmediateChains.
at(RetI);
6182 while (!isa<PHINode>(ReductionPhi))
6183 ReductionPhi = InLoopReductionImmediateChains.
at(ReductionPhi);
6212 if (RedOp && RdxDesc.
getOpcode() == Instruction::Add &&
6225 bool IsUnsigned = isa<ZExtInst>(Op0);
6242 RedCost < ExtCost * 2 + MulCost + Ext2Cost + BaseCost)
6243 return I == RetI ? RedCost : 0;
6247 bool IsUnsigned = isa<ZExtInst>(RedOp);
6256 if (RedCost.
isValid() && RedCost < BaseCost + ExtCost)
6257 return I == RetI ? RedCost : 0;
6258 }
else if (RedOp && RdxDesc.
getOpcode() == Instruction::Add &&
6263 bool IsUnsigned = isa<ZExtInst>(Op0);
6286 if (Op0Ty != LargestOpTy || Op1Ty != LargestOpTy) {
6287 Instruction *ExtraExtOp = (Op0Ty != LargestOpTy) ? Op0 : Op1;
6295 (RedCost + ExtraExtCost) < (ExtCost0 + ExtCost1 + MulCost + BaseCost))
6296 return I == RetI ? RedCost : 0;
6305 if (RedCost.
isValid() && RedCost < MulCost + BaseCost)
6306 return I == RetI ? RedCost : 0;
6310 return I == RetI ? std::optional<InstructionCost>(BaseCost) :
std::nullopt;
6314LoopVectorizationCostModel::getMemoryInstructionCost(
Instruction *
I,
6332LoopVectorizationCostModel::getInstructionCost(
Instruction *
I,
6343 auto ForcedScalar = ForcedScalars.
find(VF);
6344 if (VF.
isVector() && ForcedScalar != ForcedScalars.
end()) {
6345 auto InstSet = ForcedScalar->second;
6346 if (InstSet.count(
I))
6356 bool TypeNotScalarized =
false;
6387 if (!
RetTy->isVoidTy() &&
6409 for (
auto *V : filterExtractingOperands(Ops, VF))
6412 filterExtractingOperands(Ops, VF), Tys,
CostKind);
6434 auto isLegalToScalarize = [&]() {
6448 if (isa<LoadInst>(
I))
6453 auto &SI = cast<StoreInst>(
I);
6471 if (GatherScatterCost < ScalarizationCost)
6483 assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) &&
6484 "Expected consecutive stride.");
6493 unsigned NumAccesses = 1;
6496 assert(Group &&
"Fail to get an interleaved access group.");
6502 NumAccesses = Group->getNumMembers();
6504 InterleaveCost = getInterleaveGroupCost(&
I, VF);
6509 ? getGatherScatterCost(&
I, VF) * NumAccesses
6513 getMemInstScalarizationCost(&
I, VF) * NumAccesses;
6519 if (InterleaveCost <= GatherScatterCost &&
6520 InterleaveCost < ScalarizationCost) {
6522 Cost = InterleaveCost;
6523 }
else if (GatherScatterCost < ScalarizationCost) {
6525 Cost = GatherScatterCost;
6528 Cost = ScalarizationCost;
6562 while (!Worklist.
empty()) {
6564 for (
auto &
Op :
I->operands())
6565 if (
auto *InstOp = dyn_cast<Instruction>(
Op))
6566 if ((InstOp->getParent() ==
I->getParent()) && !isa<PHINode>(InstOp) &&
6567 AddrDefs.
insert(InstOp).second)
6571 for (
auto *
I : AddrDefs) {
6572 if (isa<LoadInst>(
I)) {
6586 for (
unsigned I = 0;
I < Group->getFactor(); ++
I) {
6603 "Trying to set a vectorization decision for a scalar VF");
6622 for (
auto &ArgOp : CI->
args())
6627 for (
Type *ScalarTy : ScalarTys)
6633 if (
auto RedCost = getReductionPatternCost(CI, VF,
RetTy,
CostKind)) {
6636 std::nullopt, *RedCost);
6650 getScalarizationOverhead(CI, VF,
CostKind);
6656 bool UsesMask =
false;
6662 if (
Info.Shape.VF != VF)
6666 if (MaskRequired && !
Info.isMasked())
6670 bool ParamsOk =
true;
6672 switch (Param.ParamKind) {
6691 dyn_cast<SCEVAddRecExpr>(SE->
getSCEV(ScalarParam));
6693 if (!SAR || SAR->getLoop() !=
TheLoop) {
6699 dyn_cast<SCEVConstant>(SAR->getStepRecurrence(*SE));
6727 if (VecFunc && UsesMask && !MaskRequired)
6747 if (VectorCost <=
Cost) {
6752 if (IntrinsicCost <=
Cost) {
6753 Cost = IntrinsicCost;
6772 auto hasSingleCopyAfterVectorization = [
this](
Instruction *
I,
6777 auto Scalarized = InstsToScalarize.
find(VF);
6778 assert(Scalarized != InstsToScalarize.
end() &&
6779 "VF not yet analyzed for scalarization profitability");
6780 return !Scalarized->second.count(
I) &&
6782 auto *UI = cast<Instruction>(U);
6783 return !Scalarized->second.count(UI);
6786 (void) hasSingleCopyAfterVectorization;
6794 assert(
I->getOpcode() == Instruction::GetElementPtr ||
6795 I->getOpcode() == Instruction::PHI ||
6796 (
I->getOpcode() == Instruction::BitCast &&
6797 I->getType()->isPointerTy()) ||
6798 hasSingleCopyAfterVectorization(
I, VF));
6804 switch (
I->getOpcode()) {
6805 case Instruction::GetElementPtr:
6811 case Instruction::Br: {
6815 bool ScalarPredicatedBB =
false;
6818 (PredicatedBBsAfterVectorization[VF].count(BI->
getSuccessor(0)) ||
6819 PredicatedBBsAfterVectorization[VF].count(BI->
getSuccessor(1))))
6820 ScalarPredicatedBB =
true;
6822 if (ScalarPredicatedBB) {
6844 case Instruction::PHI: {
6845 auto *
Phi = cast<PHINode>(
I);
6852 cast<VectorType>(VectorTy), Mask,
CostKind,
6860 return (
Phi->getNumIncomingValues() - 1) *
6868 case Instruction::UDiv:
6869 case Instruction::SDiv:
6870 case Instruction::URem:
6871 case Instruction::SRem:
6875 ScalarCost : SafeDivisorCost;
6879 case Instruction::Add:
6880 case Instruction::FAdd:
6881 case Instruction::Sub:
6882 case Instruction::FSub:
6883 case Instruction::Mul:
6884 case Instruction::FMul:
6885 case Instruction::FDiv:
6886 case Instruction::FRem:
6887 case Instruction::Shl:
6888 case Instruction::LShr:
6889 case Instruction::AShr:
6890 case Instruction::And:
6891 case Instruction::Or:
6892 case Instruction::Xor: {
6896 if (
I->getOpcode() == Instruction::Mul &&
6902 if (
auto RedCost = getReductionPatternCost(
I, VF, VectorTy,
CostKind))
6907 Value *Op2 =
I->getOperand(1);
6916 {TargetTransformInfo::OK_AnyValue, TargetTransformInfo::OP_None},
6919 case Instruction::FNeg: {
6922 {TargetTransformInfo::OK_AnyValue, TargetTransformInfo::OP_None},
6923 {TargetTransformInfo::OK_AnyValue, TargetTransformInfo::OP_None},
6924 I->getOperand(0),
I);
6926 case Instruction::Select: {
6931 const Value *Op0, *Op1;
6948 Type *CondTy =
SI->getCondition()->getType();
6953 if (
auto *Cmp = dyn_cast<CmpInst>(
SI->getCondition()))
6954 Pred =
Cmp->getPredicate();
6958 case Instruction::ICmp:
6959 case Instruction::FCmp: {
6960 Type *ValTy =
I->getOperand(0)->getType();
6961 Instruction *Op0AsInstruction = dyn_cast<Instruction>(
I->getOperand(0));
6966 cast<CmpInst>(
I)->getPredicate(),
CostKind,
6969 case Instruction::Store:
6970 case Instruction::Load: {
6975 "CM decision should be taken at this point");
6982 return getMemoryInstructionCost(
I, VF);
6984 case Instruction::BitCast:
6985 if (
I->getType()->isPointerTy())
6988 case Instruction::ZExt:
6989 case Instruction::SExt:
6990 case Instruction::FPToUI:
6991 case Instruction::FPToSI:
6992 case Instruction::FPExt:
6993 case Instruction::PtrToInt:
6994 case Instruction::IntToPtr:
6995 case Instruction::SIToFP:
6996 case Instruction::UIToFP:
6997 case Instruction::Trunc:
6998 case Instruction::FPTrunc: {
7001 assert((isa<LoadInst>(
I) || isa<StoreInst>(
I)) &&
7002 "Expected a load or a store!");
7028 unsigned Opcode =
I->getOpcode();
7031 if (Opcode == Instruction::Trunc || Opcode == Instruction::FPTrunc) {
7033 if (
StoreInst *Store = dyn_cast<StoreInst>(*
I->user_begin()))
7034 CCH = ComputeCCH(Store);
7037 else if (Opcode == Instruction::ZExt || Opcode == Instruction::SExt ||
7038 Opcode == Instruction::FPExt) {
7039 if (
LoadInst *Load = dyn_cast<LoadInst>(
I->getOperand(0)))
7040 CCH = ComputeCCH(Load);
7047 auto *Trunc = cast<TruncInst>(
I);
7049 Trunc->getSrcTy(), CCH,
CostKind, Trunc);
7053 if (
auto RedCost = getReductionPatternCost(
I, VF, VectorTy,
CostKind))
7056 Type *SrcScalarTy =
I->getOperand(0)->getType();
7065 Type *MinVecTy = VectorTy;
7066 if (Opcode == Instruction::Trunc) {
7070 }
else if (Opcode == Instruction::ZExt || Opcode == Instruction::SExt) {
7080 case Instruction::Call:
7082 case Instruction::ExtractValue:
7084 case Instruction::Alloca:
7105 if ((SI = dyn_cast<StoreInst>(&
I)) &&
7148 bool InLoop = !ReductionOperations.
empty();
7151 InLoopReductions.
insert(Phi);
7154 for (
auto *
I : ReductionOperations) {
7155 InLoopReductionImmediateChains[
I] = LastChain;
7159 LLVM_DEBUG(
dbgs() <<
"LV: Using " << (InLoop ?
"inloop" :
"out of loop")
7160 <<
" reduction for phi: " << *Phi <<
"\n");
7168 return tryInsertInstruction(
7181 unsigned WidestType;
7190 unsigned N =
RegSize.getKnownMinValue() / WidestType;
7211 <<
"overriding computed VF.\n");
7216 LLVM_DEBUG(
dbgs() <<
"LV: Not vectorizing. Scalable VF requested, but "
7217 <<
"not supported by the target.\n");
7219 "Scalable vectorization requested but not supported by the target",
7220 "the scalable user-specified vectorization width for outer-loop "
7221 "vectorization cannot be used because the target does not support "
7222 "scalable vectors.",
7223 "ScalableVFUnfeasible", ORE, OrigLoop);
7228 "VF needs to be a power of two");
7230 <<
"VF " << VF <<
" to build VPlans.\n");
7237 return {VF, 0 , 0 };
7241 dbgs() <<
"LV: Not vectorizing. Inner loops aren't supported in the "
7242 "VPlan-native path.\n");
7246std::optional<VectorizationFactor>
7254 return std::nullopt;
7261 <<
"LV: Invalidate all interleaved groups due to fold-tail by masking "
7262 "which requires masked-interleaved support.\n");
7273 if (!UserVF.
isZero() && UserVFIsLegal) {
7275 "VF needs to be a power of two");
7281 buildVPlansWithVPRecipes(UserVF, UserVF);
7283 LLVM_DEBUG(
dbgs() <<
"LV: No VPlan could be built for " << UserVF
7285 return std::nullopt;
7289 return {{UserVF, 0, 0}};
7292 "InvalidCost", ORE, OrigLoop);
7305 for (
const auto &VF : VFCandidates) {
7328 return std::nullopt;
7335 [VF](
const VPlanPtr &Plan) {
return Plan->hasVF(VF); }) ==
7337 "Best VF has not a single VPlan.");
7339 for (
const VPlanPtr &Plan : VPlans) {
7340 if (Plan->hasVF(VF))
7350 bool IsUnrollMetadata =
false;
7351 MDNode *LoopID = L->getLoopID();
7354 for (
unsigned i = 1, ie = LoopID->
getNumOperands(); i < ie; ++i) {
7355 auto *MD = dyn_cast<MDNode>(LoopID->
getOperand(i));
7357 const auto *S = dyn_cast<MDString>(MD->getOperand(0));
7359 S && S->getString().starts_with(
"llvm.loop.unroll.disable");
7365 if (!IsUnrollMetadata) {
7376 L->setLoopID(NewLoopID);
7390 auto *PhiR = cast<VPReductionPHIRecipe>(RedResult->
getOperand(0));
7397 dyn_cast<PHINode>(PhiR->getStartValue()->getUnderlyingValue());
7413 BCBlockPhi->addIncoming(FinalValue,
Incoming);
7415 BCBlockPhi->addIncoming(ResumePhi->getIncomingValueForBlock(
Incoming),
7418 BCBlockPhi->addIncoming(ReductionStartValue,
Incoming);
7421 auto *OrigPhi = cast<PHINode>(PhiR->getUnderlyingValue());
7425 int IncomingEdgeBlockIdx =
7427 assert(IncomingEdgeBlockIdx >= 0 &&
"Invalid block index");
7429 int SelfEdgeBlockIdx = (IncomingEdgeBlockIdx ? 0 : 1);
7430 OrigPhi->setIncomingValue(SelfEdgeBlockIdx, BCBlockPhi);
7432 OrigPhi->setIncomingValue(IncomingEdgeBlockIdx, LoopExitInst);
7434 ReductionResumeValues[&RdxDesc] = BCBlockPhi;
7437std::pair<DenseMap<const SCEV *, Value *>,
7444 "Trying to execute plan with unsupported VF");
7446 "Trying to execute plan with unsupported UF");
7448 (IsEpilogueVectorization || !ExpandedSCEVs) &&
7449 "expanded SCEVs to reuse can only be used during epilogue vectorization");
7451 if (!IsEpilogueVectorization)
7455 <<
", UF=" << BestUF <<
'\n');
7456 BestVPlan.
setName(
"Final VPlan");
7473 assert(IsEpilogueVectorization &&
"should only re-use the existing trip "
7474 "count during epilogue vectorization");
7478 Value *CanonicalIVStartValue;
7479 std::tie(State.
CFG.
PrevBB, CanonicalIVStartValue) =
7486 std::unique_ptr<LoopVersioning> LVer =
nullptr;
7494 LVer = std::make_unique<LoopVersioning>(
7497 State.
LVer = &*LVer;
7514 CanonicalIVStartValue, State);
7524 ReductionResumeValues, State, OrigLoop,
7533 std::optional<MDNode *> VectorizedLoopID =
7540 if (VectorizedLoopID)
7541 L->setLoopID(*VectorizedLoopID);
7565#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
7567 for (
const auto &Plan : VPlans)
7581std::pair<BasicBlock *, Value *>
7583 const SCEV2ValueTy &ExpandedSCEVs) {
7623 dbgs() <<
"Create Skeleton for epilogue vectorized loop (first pass)\n"
7633 dbgs() <<
"intermediate fn:\n"
7641 assert(Bypass &&
"Expected valid bypass basic block.");
7662 TCCheckBlock->
setName(
"vector.main.loop.iter.check");
7666 DT,
LI,
nullptr,
"vector.ph");
7671 "TC check is expected to dominate Bypass");
7695 return TCCheckBlock;
7704std::pair<BasicBlock *, Value *>
7706 const SCEV2ValueTy &ExpandedSCEVs) {
7712 VecEpilogueIterationCountCheck->
setName(
"vec.epilog.iter.check");
7715 LI,
nullptr,
"vec.epilog.ph");
7717 VecEpilogueIterationCountCheck);
7722 "expected this to be saved from the previous pass.");
7740 VecEpilogueIterationCountCheck,
7764 for (
PHINode &Phi : VecEpilogueIterationCountCheck->
phis())
7767 for (
PHINode *Phi : PhisInBlock) {
7769 Phi->replaceIncomingBlockWith(
7771 VecEpilogueIterationCountCheck);
7778 return EPI.EpilogueIterationCountCheck == IncB;
7790 Type *IdxTy =
Legal->getWidestInductionType();
7794 EPResumeVal->
addIncoming(ConstantInt::get(IdxTy, 0),
7805 {VecEpilogueIterationCountCheck,
7816 "Expected trip count to have been safed in the first pass.");
7820 "saved trip count does not dominate insertion point.");
7831 Value *CheckMinIters =
7835 "min.epilog.iters.check");
7841 unsigned EpilogueLoopStep =
7847 unsigned EstimatedSkipCount = std::min(MainLoopStep, EpilogueLoopStep);
7848 const uint32_t Weights[] = {EstimatedSkipCount,
7849 MainLoopStep - EstimatedSkipCount};
7860 dbgs() <<
"Create Skeleton for epilogue vectorized loop (second pass)\n"
7874 assert(!Range.isEmpty() &&
"Trying to test an empty VF range.");
7875 bool PredicateAtRangeStart = Predicate(Range.Start);
7878 if (Predicate(TmpVF) != PredicateAtRangeStart) {
7883 return PredicateAtRangeStart;
7893 auto MaxVFTimes2 = MaxVF * 2;
7895 VFRange SubRange = {VF, MaxVFTimes2};
7906 std::pair<BasicBlock *, BasicBlock *> Edge(Src, Dst);
7908 if (ECEntryIt != EdgeMaskCache.
end())
7909 return ECEntryIt->second;
7914 BranchInst *BI = dyn_cast<BranchInst>(Src->getTerminator());
7915 assert(BI &&
"Unexpected terminator found");
7918 return EdgeMaskCache[Edge] = SrcMask;
7924 return EdgeMaskCache[Edge] = SrcMask;
7927 assert(EdgeMask &&
"No Edge Mask found for condition");
7943 return EdgeMaskCache[Edge] = EdgeMask;
7950 std::pair<BasicBlock *, BasicBlock *> Edge(Src, Dst);
7952 assert(ECEntryIt != EdgeMaskCache.
end() &&
7953 "looking up mask for edge which has not been created");
7954 return ECEntryIt->second;
7962 BlockMaskCache[Header] =
nullptr;
7974 HeaderVPBB->
insert(
IV, NewInsertionPoint);
7981 BlockMaskCache[Header] = BlockMask;
7987 assert(BCEntryIt != BlockMaskCache.
end() &&
7988 "Trying to access mask for block without one.");
7989 return BCEntryIt->second;
7993 assert(OrigLoop->
contains(BB) &&
"Block is not a part of a loop");
7994 assert(BlockMaskCache.
count(BB) == 0 &&
"Mask for block already computed");
7996 "Loop header must have cached block mask");
8005 BlockMaskCache[BB] = EdgeMask;
8010 BlockMask = EdgeMask;
8014 BlockMask = Builder.
createOr(BlockMask, EdgeMask, {});
8017 BlockMaskCache[BB] = BlockMask;
8023 assert((isa<LoadInst>(
I) || isa<StoreInst>(
I)) &&
8024 "Must be called with either a load or store");
8030 "CM decision should be taken at this point.");
8056 auto *
GEP = dyn_cast<GetElementPtrInst>(
8057 Ptr->getUnderlyingValue()->stripPointerCasts());
8064 if (
LoadInst *Load = dyn_cast<LoadInst>(
I))
8083 "step must be loop invariant");
8087 if (
auto *TruncI = dyn_cast<TruncInst>(PhiOrTrunc)) {
8090 assert(isa<PHINode>(PhiOrTrunc) &&
"must be a phi node here");
8101 *PSE.
getSE(), *OrigLoop, Range);
8127 auto isOptimizableIVTruncate =
8135 isOptimizableIVTruncate(
I),
Range)) {
8137 auto *
Phi = cast<PHINode>(
I->getOperand(0));
8149 unsigned NumIncoming =
Phi->getNumIncomingValues();
8158 for (
unsigned In = 0;
In < NumIncoming;
In++) {
8163 assert(In == 0 &&
"Both null and non-null edge masks found");
8165 "Distinct incoming values with one having a full mask");
8187 if (
ID && (
ID == Intrinsic::assume ||
ID == Intrinsic::lifetime_end ||
8188 ID == Intrinsic::lifetime_start ||
ID == Intrinsic::sideeffect ||
8189 ID == Intrinsic::pseudoprobe ||
8190 ID == Intrinsic::experimental_noalias_scope_decl))
8196 bool ShouldUseVectorIntrinsic =
8203 if (ShouldUseVectorIntrinsic)
8208 std::optional<unsigned> MaskPos;
8230 Variant = Decision.Variant;
8231 MaskPos = Decision.MaskPos;
8238 if (ShouldUseVectorCall) {
8239 if (MaskPos.has_value()) {
8254 Ops.insert(Ops.
begin() + *MaskPos, Mask);
8266 assert(!isa<BranchInst>(
I) && !isa<PHINode>(
I) && !isa<LoadInst>(
I) &&
8267 !isa<StoreInst>(
I) &&
"Instruction should have been handled earlier");
8282 switch (
I->getOpcode()) {
8285 case Instruction::SDiv:
8286 case Instruction::UDiv:
8287 case Instruction::SRem:
8288 case Instruction::URem: {
8294 VPValue *One = Plan->getVPValueOrAddLiveIn(
8295 ConstantInt::get(
I->getType(), 1u,
false));
8305 case Instruction::Add:
8306 case Instruction::And:
8307 case Instruction::AShr:
8308 case Instruction::FAdd:
8309 case Instruction::FCmp:
8310 case Instruction::FDiv:
8311 case Instruction::FMul:
8312 case Instruction::FNeg:
8313 case Instruction::FRem:
8314 case Instruction::FSub:
8315 case Instruction::ICmp:
8316 case Instruction::LShr:
8317 case Instruction::Mul:
8318 case Instruction::Or:
8319 case Instruction::Select:
8320 case Instruction::Shl:
8321 case Instruction::Sub:
8322 case Instruction::Xor:
8323 case Instruction::Freeze:
8331 auto *PN = cast<PHINode>(R->getUnderlyingValue());
8333 getRecipe(cast<Instruction>(PN->getIncomingValueForBlock(OrigLatch)));
8351 if (!IsUniform && Range.Start.isScalable() && isa<IntrinsicInst>(
I)) {
8353 case Intrinsic::assume:
8354 case Intrinsic::lifetime_start:
8355 case Intrinsic::lifetime_end:
8377 VPValue *BlockInMask =
nullptr;
8378 if (!IsPredicated) {
8382 LLVM_DEBUG(
dbgs() <<
"LV: Scalarizing and predicating:" << *
I <<
"\n");
8391 IsUniform, BlockInMask);
8401 if (
auto Phi = dyn_cast<PHINode>(Instr)) {
8402 if (Phi->getParent() != OrigLoop->
getHeader())
8403 return tryToBlend(Phi,
Operands, Plan);
8409 if ((Recipe = tryToOptimizeInductionPHI(Phi,
Operands, *Plan, Range)))
8415 "can only widen reductions and fixed-order recurrences here");
8435 auto *Inc = cast<Instruction>(
8436 Phi->getIncomingValueForBlock(OrigLoop->
getLoopLatch()));
8437 auto RecipeIter = Ingredient2Recipe.find(Inc);
8438 if (RecipeIter == Ingredient2Recipe.end())
8441 PhisToFix.push_back(PhiRecipe);
8445 if (isa<TruncInst>(Instr) &&
8446 (Recipe = tryToOptimizeInductionTruncate(cast<TruncInst>(Instr),
Operands,
8455 if (
auto *CI = dyn_cast<CallInst>(Instr))
8456 return tryToWidenCall(CI,
Operands, Range, Plan);
8458 if (isa<LoadInst>(Instr) || isa<StoreInst>(Instr))
8459 return tryToWidenMemory(Instr,
Operands, Range, Plan);
8461 if (!shouldWiden(Instr, Range))
8464 if (
auto GEP = dyn_cast<GetElementPtrInst>(Instr))
8468 if (
auto *SI = dyn_cast<SelectInst>(Instr)) {
8473 if (
auto *CI = dyn_cast<CastInst>(Instr)) {
8478 return tryToWiden(Instr,
Operands, VPBB, Plan);
8481void LoopVectorizationPlanner::buildVPlansWithVPRecipes(
ElementCount MinVF,
8485 auto MaxVFTimes2 = MaxVF * 2;
8487 VFRange SubRange = {VF, MaxVFTimes2};
8488 if (
auto Plan = tryToBuildVPlanWithVPRecipes(SubRange)) {
8505 Value *StartIdx = ConstantInt::get(IdxTy, 0);
8512 Header->insert(CanonicalIVPHI, Header->begin());
8517 Instruction::Add, {CanonicalIVPHI, &Plan.
getVFxUF()}, {HasNUW,
false},
DL,
8519 CanonicalIVPHI->
addOperand(CanonicalIVIncrement);
8538 Value *IncomingValue =
8539 ExitPhi.getIncomingValueForBlock(ExitingBB);
8546LoopVectorizationPlanner::tryToBuildVPlanWithVPRecipes(
VFRange &Range) {
8550 VPRecipeBuilder RecipeBuilder(OrigLoop, TLI, Legal, CM, PSE, Builder);
8570 "Unsupported interleave factor for scalable vectors");
8575 InterleaveGroups.
insert(IG);
8576 for (
unsigned i = 0; i < IG->getFactor(); i++)
8578 RecipeBuilder.recordRecipeOf(Member);
8597 Plan->getVectorLoopRegion()->setEntry(HeaderVPBB);
8598 Plan->getVectorLoopRegion()->setExiting(LatchVPBB);
8604 bool IVUpdateMayOverflow =
false;
8625 bool NeedsBlends = BB != HeaderBB && !BB->phis().empty();
8626 return Legal->blockNeedsPredication(BB) || NeedsBlends;
8631 if (VPBB != HeaderVPBB)
8635 if (VPBB == HeaderVPBB)
8636 RecipeBuilder.createHeaderMask(*Plan);
8637 else if (NeedsMasks)
8638 RecipeBuilder.createBlockInMask(BB, *Plan);
8645 auto *
Phi = dyn_cast<PHINode>(Instr);
8646 if (Phi &&
Phi->getParent() == HeaderBB) {
8647 Operands.push_back(Plan->getVPValueOrAddLiveIn(
8650 auto OpRange = Plan->mapToVPValues(
Instr->operands());
8651 Operands = {OpRange.begin(), OpRange.end()};
8657 if ((SI = dyn_cast<StoreInst>(&
I)) &&
8661 VPRecipeBase *Recipe = RecipeBuilder.tryToCreateWidenRecipe(
8662 Instr,
Operands, Range, VPBB, Plan);
8664 Recipe = RecipeBuilder.handleReplication(Instr, Range, *Plan);
8666 auto *UV =
Def->getUnderlyingValue();
8667 Plan->addVPValue(UV, Def);
8670 RecipeBuilder.setRecipe(Instr, Recipe);
8671 if (isa<VPHeaderPHIRecipe>(Recipe)) {
8682 "unexpected recipe needs moving");
8702 assert(isa<VPRegionBlock>(Plan->getVectorLoopRegion()) &&
8703 !Plan->getVectorLoopRegion()->getEntryBasicBlock()->empty() &&
8704 "entry block must be set to a VPRegionBlock having a non-empty entry "
8706 RecipeBuilder.fixHeaderPhis();
8714 adjustRecipesForReductions(LatchVPBB, Plan, RecipeBuilder,
Range.Start);
8719 for (
const auto *IG : InterleaveGroups) {
8720 auto *Recipe = cast<VPWidenMemoryInstructionRecipe>(
8721 RecipeBuilder.getRecipe(IG->getInsertPos()));
8723 for (
unsigned i = 0; i < IG->getFactor(); ++i)
8724 if (
auto *SI = dyn_cast_or_null<StoreInst>(IG->getMember(i))) {
8726 cast<VPWidenMemoryInstructionRecipe>(RecipeBuilder.getRecipe(SI));
8727 StoredValues.
push_back(StoreR->getStoredValue());
8730 bool NeedsMaskForGaps =
8733 Recipe->getMask(), NeedsMaskForGaps);
8734 VPIG->insertBefore(Recipe);
8736 for (
unsigned i = 0; i < IG->getFactor(); ++i)
8738 VPRecipeBase *MemberR = RecipeBuilder.getRecipe(Member);
8739 if (!
Member->getType()->isVoidTy()) {
8750 Plan->setName(
"Initial VPlan");
8755 auto *StrideV = cast<SCEVUnknown>(Stride)->getValue();
8756 auto *ScevStride = dyn_cast<SCEVConstant>(PSE.
getSCEV(StrideV));
8760 Constant *CI = ConstantInt::get(Stride->getType(), ScevStride->getAPInt());
8762 auto *ConstVPV = Plan->getVPValueOrAddLiveIn(CI);
8765 Plan->getVPValueOrAddLiveIn(StrideV)->replaceAllUsesWith(ConstVPV);
8770 Plan->disableValue2VPValue();
8785 bool WithoutRuntimeCheck =
8788 WithoutRuntimeCheck);
8808 HCFGBuilder.buildHierarchicalCFG();
8816 *PSE.
getSE(), *TLI);
8821 Plan->getVectorLoopRegion()->getExitingBasicBlock()->getTerminator();
8822 Term->eraseFromParent();
8842void LoopVectorizationPlanner::adjustRecipesForReductions(
8845 VPRegionBlock *VectorLoopRegion = Plan->getVectorLoopRegion();
8852 if (
auto *ReductionPhi = dyn_cast<VPReductionPHIRecipe>(&R))
8855 bool HasIntermediateStore =
false;
8860 auto *IS2 =
R2->getRecurrenceDescriptor().IntermediateStore;
8861 HasIntermediateStore |= IS1 || IS2;
8882 if (HasIntermediateStore && ReductionPHIList.
size() > 1)
8884 R->moveBefore(*Header, Header->getFirstNonPhi());
8887 auto *PhiR = dyn_cast<VPReductionPHIRecipe>(&R);
8888 if (!PhiR || !PhiR->isInLoop() || (MinVF.
isScalar() && !PhiR->isOrdered()))
8894 "AnyOf reductions are not allowed for in-loop reductions");
8899 for (
unsigned I = 0;
I != Worklist.
size(); ++
I) {
8902 auto *UserRecipe = dyn_cast<VPSingleDefRecipe>(U);
8904 assert(isa<VPLiveOut>(U) &&
8905 "U must either be a VPSingleDef or VPLiveOut");
8908 Worklist.
insert(UserRecipe);
8921 Instruction *CurrentLinkI = CurrentLink->getUnderlyingInstr();
8924 unsigned IndexOfFirstOperand;
8932 "Expected instruction to be a call to the llvm.fmuladd intrinsic");
8933 assert(((MinVF.
isScalar() && isa<VPReplicateRecipe>(CurrentLink)) ||
8934 isa<VPWidenCallRecipe>(CurrentLink)) &&
8935 CurrentLink->getOperand(2) == PreviousLink &&
8936 "expected a call where the previous link is the added operand");
8944 {CurrentLink->getOperand(0), CurrentLink->getOperand(1)},
8946 LinkVPBB->
insert(FMulRecipe, CurrentLink->getIterator());
8949 auto *Blend = dyn_cast<VPBlendRecipe>(CurrentLink);
8950 if (PhiR->isInLoop() && Blend) {
8951 assert(Blend->getNumIncomingValues() == 2 &&
8952 "Blend must have 2 incoming values");
8953 if (Blend->getIncomingValue(0) == PhiR)
8954 Blend->replaceAllUsesWith(Blend->getIncomingValue(1));
8956 assert(Blend->getIncomingValue(1) == PhiR &&
8957 "PhiR must be an operand of the blend");
8958 Blend->replaceAllUsesWith(Blend->getIncomingValue(0));
8964 if (isa<VPWidenRecipe>(CurrentLink)) {
8965 assert(isa<CmpInst>(CurrentLinkI) &&
8966 "need to have the compare of the select");
8969 assert(isa<VPWidenSelectRecipe>(CurrentLink) &&
8970 "must be a select recipe");
8971 IndexOfFirstOperand = 1;
8974 "Expected to replace a VPWidenSC");
8975 IndexOfFirstOperand = 0;
8980 CurrentLink->getOperand(IndexOfFirstOperand) == PreviousLink
8981 ? IndexOfFirstOperand + 1
8982 : IndexOfFirstOperand;
8983 VecOp = CurrentLink->getOperand(VecOpId);
8984 assert(VecOp != PreviousLink &&
8985 CurrentLink->getOperand(CurrentLink->getNumOperands() - 1 -
8986 (VecOpId - IndexOfFirstOperand)) ==
8988 "PreviousLink must be the operand other than VecOp");
8997 RdxDesc, CurrentLinkI, PreviousLink, VecOp, CondOp);
9003 CurrentLink->replaceAllUsesWith(RedRecipe);
9004 PreviousLink = RedRecipe;
9009 Plan->getVectorLoopRegion()->getEntryBasicBlock()->phis()) {
9022 assert(OrigExitingVPV->getDefiningRecipe()->getParent() != LatchVPBB &&
9023 "reduction recipe must be defined before latch");
9025 std::optional<FastMathFlags> FMFs =
9032 return isa<VPInstruction>(&U) &&
9033 cast<VPInstruction>(&U)->getOpcode() ==
9048 assert(!PhiR->
isInLoop() &&
"Unexpected truncated inloop reduction!");
9057 Trunc->
insertAfter(NewExitingVPV->getDefiningRecipe());
9058 Extnd->insertAfter(Trunc);
9060 PhiR->
setOperand(1, Extnd->getVPSingleValue());
9061 NewExitingVPV = Extnd;
9080 ->appendRecipe(FinalReductionResult);
9082 FinalReductionResult,
9089#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
9092 O << Indent <<
"INTERLEAVE-GROUP with factor " << IG->getFactor() <<
" at ";
9093 IG->getInsertPos()->printAsOperand(O,
false);
9103 for (
unsigned i = 0; i < IG->getFactor(); ++i) {
9104 if (!IG->getMember(i))
9107 O <<
"\n" << Indent <<
" store ";
9109 O <<
" to index " << i;
9111 O <<
"\n" << Indent <<
" ";
9113 O <<
" = load from index " << i;
9122 "Not a pointer induction according to InductionDescriptor!");
9124 "Unexpected type.");
9127 PHINode *CanonicalIV = cast<PHINode>(State.
get(IVR, 0,
true));
9138 "Cannot scalarize a scalable VF");
9141 for (
unsigned Part = 0; Part < State.
UF; ++Part) {
9145 for (
unsigned Lane = 0; Lane < Lanes; ++Lane) {
9147 PartStart, ConstantInt::get(PtrInd->
getType(), Lane));
9165 Type *ScStValueType = ScalarStartValue->
getType();
9170 NewPointerPhi->
addIncoming(ScalarStartValue, VectorPH);
9177 Value *NumUnrolledElems =
9188 NewPointerPhi->
addIncoming(InductionGEP, VectorPH);
9193 for (
unsigned Part = 0; Part < State.
UF; ++Part) {
9195 Value *StartOffsetScalar =
9197 Value *StartOffset =
9204 "scalar step must be the same across all parts");
9211 State.
set(
this,
GEP, Part);
9216 assert(!State.
Instance &&
"VPDerivedIVRecipe being replicated.");
9227 Kind, cast_if_present<BinaryOperator>(FPBinOp));
9228 DerivedIV->
setName(
"offset.idx");
9229 assert(DerivedIV != CanonicalIV &&
"IV didn't need transforming?");
9249 for (
unsigned Part = 0; Part < State.
UF; ++Part) {
9275 PrevInChain = NewRed;
9282 NewRed, PrevInChain);
9283 }
else if (IsOrdered)
9284 NextInChain = NewRed;
9288 State.
set(
this, NextInChain, Part,
true);
9300 if (State.
Instance->Lane.isFirstLane()) {
9314 if ((isa<LoadInst>(UI) || isa<StoreInst>(UI)) &&
9316 return Op->isDefinedOutsideVectorRegions();
9320 for (
unsigned Part = 1; Part < State.
UF; ++Part)
9329 for (
unsigned Part = 0; Part < State.
UF; ++Part)
9336 if (isa<StoreInst>(UI) &&
9347 for (
unsigned Part = 0; Part < State.
UF; ++Part)
9348 for (
unsigned Lane = 0; Lane < EndLane; ++Lane)
9356 LoadInst *LI = dyn_cast<LoadInst>(&Ingredient);
9357 StoreInst *SI = dyn_cast<StoreInst>(&Ingredient);
9359 assert((LI || SI) &&
"Invalid Load/Store instruction");
9360 assert((!SI || StoredValue) &&
"No stored value provided for widened store");
9361 assert((!LI || !StoredValue) &&
"Stored value provided for widened load");
9369 auto &Builder = State.
Builder;
9371 bool isMaskRequired =
getMask();
9372 if (isMaskRequired) {
9375 for (
unsigned Part = 0; Part < State.
UF; ++Part) {
9378 Mask = Builder.CreateVectorReverse(Mask,
"reverse");
9379 BlockInMaskParts[Part] = Mask;
9387 for (
unsigned Part = 0; Part < State.
UF; ++Part) {
9389 Value *StoredVal = State.
get(StoredValue, Part);
9390 if (CreateGatherScatter) {
9391 Value *MaskPart = isMaskRequired ? BlockInMaskParts[Part] :
nullptr;
9393 NewSI = Builder.CreateMaskedScatter(StoredVal, VectorGep, Alignment,
9399 StoredVal = Builder.CreateVectorReverse(StoredVal,
"reverse");
9403 auto *VecPtr = State.
get(
getAddr(), Part,
true);
9405 NewSI = Builder.CreateMaskedStore(StoredVal, VecPtr, Alignment,
9406 BlockInMaskParts[Part]);
9408 NewSI = Builder.CreateAlignedStore(StoredVal, VecPtr, Alignment);
9416 assert(LI &&
"Must have a load instruction");
9418 for (
unsigned Part = 0; Part < State.
UF; ++Part) {
9420 if (CreateGatherScatter) {
9421 Value *MaskPart = isMaskRequired ? BlockInMaskParts[Part] :
nullptr;
9423 NewLI = Builder.CreateMaskedGather(DataTy, VectorGep, Alignment, MaskPart,
9424 nullptr,
"wide.masked.gather");
9427 auto *VecPtr = State.
get(
getAddr(), Part,
true);
9429 NewLI = Builder.CreateMaskedLoad(
9430 DataTy, VecPtr, Alignment, BlockInMaskParts[Part],
9434 Builder.CreateAlignedLoad(DataTy, VecPtr, Alignment,
"wide.load");
9439 NewLI = Builder.CreateVectorReverse(NewLI,
"reverse");
9508 LLVM_DEBUG(
dbgs() <<
"LV: cannot compute the outer-loop trip count\n");
9512 Function *
F = L->getHeader()->getParent();
9518 LoopVectorizationCostModel CM(
SEL, L, PSE, LI, LVL, *
TTI, TLI, DB, AC, ORE,
F,
9523 LoopVectorizationPlanner LVP(L, LI, DT, TLI, *
TTI, LVL, CM, IAI, PSE, Hints,
9543 bool AddBranchWeights =
9545 GeneratedRTChecks Checks(*PSE.
getSE(), DT, LI,
TTI,
9546 F->getParent()->getDataLayout(), AddBranchWeights);
9548 VF.
Width, 1, LVL, &CM, BFI, PSI, Checks);
9550 << L->getHeader()->getParent()->getName() <<
"\"\n");
9570 if (
auto *S = dyn_cast<StoreInst>(&Inst)) {
9571 if (S->getValueOperand()->getType()->isFloatTy())
9581 while (!Worklist.
empty()) {
9583 if (!L->contains(
I))
9585 if (!Visited.
insert(
I).second)
9592 if (isa<FPExtInst>(
I) && EmittedRemark.
insert(
I).second)
9595 I->getDebugLoc(), L->getHeader())
9596 <<
"floating point conversion changes vector width. "
9597 <<
"Mixed floating point precision requires an up/down "
9598 <<
"cast that will negatively impact performance.";
9601 for (
Use &
Op :
I->operands())
9602 if (
auto *OpI = dyn_cast<Instruction>(
Op))
9609 std::optional<unsigned> VScale,
Loop *L,
9622 <<
"LV: Interleaving only is not profitable due to runtime checks\n");
9663 unsigned AssumedMinimumVscale = 1;
9665 AssumedMinimumVscale = *VScale;
9666 IntVF *= AssumedMinimumVscale;
9668 double VecCOverVF = double(*VF.
Cost.
getValue()) / IntVF;
9669 double RtC = *CheckCost.
getValue();
9670 double MinTC1 = RtC / (ScalarC - VecCOverVF);
9679 double MinTC2 = RtC * 10 / ScalarC;
9684 uint64_t MinTC = std::ceil(std::max(MinTC1, MinTC2));
9686 MinTC =
alignTo(MinTC, IntVF);
9690 dbgs() <<
"LV: Minimum required TC for runtime checks to be profitable:"
9698 LLVM_DEBUG(
dbgs() <<
"LV: Vectorization is not beneficial: expected "
9699 "trip count < minimum profitable VF ("
9710 : InterleaveOnlyWhenForced(Opts.InterleaveOnlyWhenForced ||
9712 VectorizeOnlyWhenForced(Opts.VectorizeOnlyWhenForced ||
9717 "VPlan-native path is not enabled. Only process inner loops.");
9724 << L->getHeader()->getParent()->getName() <<
"' from "
9725 << DebugLocStr <<
"\n");
9730 dbgs() <<
"LV: Loop hints:"
9741 Function *
F = L->getHeader()->getParent();
9763 LLVM_DEBUG(
dbgs() <<
"LV: Not vectorizing: Cannot prove legality.\n");
9773 if (!L->isInnermost())
9777 assert(L->isInnermost() &&
"Inner loop expected.");
9799 LLVM_DEBUG(
dbgs() <<
"LV: Found a loop with a very small trip count. "
9800 <<
"This loop is worth vectorizing only if no scalar "
9801 <<
"iteration overheads are incurred.");
9803 LLVM_DEBUG(
dbgs() <<
" But vectorizing was explicitly forced.\n");
9816 LLVM_DEBUG(
dbgs() <<
" But the target considers the trip count too "
9817 "small to consider vectorizing.\n");
9819 "The trip count is below the minial threshold value.",
9820 "loop trip count is too low, avoiding vectorization",
9821 "LowTripCount",
ORE, L);
9830 if (
F->hasFnAttribute(Attribute::NoImplicitFloat)) {
9832 "Can't vectorize when the NoImplicitFloat attribute is used",
9833 "loop not vectorized due to NoImplicitFloat attribute",
9834 "NoImplicitFloat",
ORE, L);
9846 "Potentially unsafe FP op prevents vectorization",
9847 "loop not vectorized due to unsafe FP support.",
9848 "UnsafeFP",
ORE, L);
9853 bool AllowOrderedReductions;
9863 ExactFPMathInst->getDebugLoc(),
9864 ExactFPMathInst->getParent())
9865 <<
"loop not vectorized: cannot prove it is safe to reorder "
9866 "floating-point operations";
9868 LLVM_DEBUG(
dbgs() <<
"LV: loop not vectorized: cannot prove it is safe to "
9869 "reorder floating-point operations\n");
9875 LoopVectorizationCostModel CM(
SEL, L, PSE,
LI, &LVL, *
TTI,
TLI,
DB,
AC,
ORE,
9878 LoopVectorizationPlanner LVP(L,
LI,
DT,
TLI, *
TTI, &LVL, CM, IAI, PSE, Hints,
9886 std::optional<VectorizationFactor> MaybeVF = LVP.
plan(UserVF, UserIC);
9891 bool AddBranchWeights =
9894 F->getParent()->getDataLayout(), AddBranchWeights);
9900 unsigned SelectedIC = std::max(IC, UserIC);
9907 bool ForceVectorization =
9909 if (!ForceVectorization &&
9911 *PSE.
getSE(),
SEL)) {
9914 DEBUG_TYPE,
"CantReorderMemOps", L->getStartLoc(),
9916 <<
"loop not vectorized: cannot prove it is safe to reorder "
9917 "memory operations";
9926 std::pair<StringRef, std::string> VecDiagMsg, IntDiagMsg;
9927 bool VectorizeLoop =
true, InterleaveLoop =
true;
9929 LLVM_DEBUG(
dbgs() <<
"LV: Vectorization is possible but not beneficial.\n");
9930 VecDiagMsg = std::make_pair(
9931 "VectorizationNotBeneficial",
9932 "the cost-model indicates that vectorization is not beneficial");
9933 VectorizeLoop =
false;
9936 if (!MaybeVF && UserIC > 1) {
9939 LLVM_DEBUG(
dbgs() <<
"LV: Ignoring UserIC, because vectorization and "
9940 "interleaving should be avoided up front\n");
9941 IntDiagMsg = std::make_pair(
9942 "InterleavingAvoided",
9943 "Ignoring UserIC, because interleaving was avoided up front");
9944 InterleaveLoop =
false;
9945 }
else if (IC == 1 && UserIC <= 1) {
9948 IntDiagMsg = std::make_pair(
9949 "InterleavingNotBeneficial",
9950 "the cost-model indicates that interleaving is not beneficial");
9951 InterleaveLoop =
false;
9953 IntDiagMsg.first =
"InterleavingNotBeneficialAndDisabled";
9954 IntDiagMsg.second +=
9955 " and is explicitly disabled or interleave count is set to 1";
9957 }
else if (IC > 1 && UserIC == 1) {
9960 dbgs() <<
"LV: Interleaving is beneficial but is explicitly disabled.");
9961 IntDiagMsg = std::make_pair(
9962 "InterleavingBeneficialButDisabled",
9963 "the cost-model indicates that interleaving is beneficial "
9964 "but is explicitly disabled or interleave count is set to 1");
9965 InterleaveLoop =
false;
9969 IC = UserIC > 0 ? UserIC : IC;
9973 if (!VectorizeLoop && !InterleaveLoop) {
9977 L->getStartLoc(), L->getHeader())
9978 << VecDiagMsg.second;
9982 L->getStartLoc(), L->getHeader())
9983 << IntDiagMsg.second;
9986 }
else if (!VectorizeLoop && InterleaveLoop) {
9990 L->getStartLoc(), L->getHeader())
9991 << VecDiagMsg.second;
9993 }
else if (VectorizeLoop && !InterleaveLoop) {
9995 <<
") in " << DebugLocStr <<
'\n');
9998 L->getStartLoc(), L->getHeader())
9999 << IntDiagMsg.second;
10001 }
else if (VectorizeLoop && InterleaveLoop) {
10003 <<
") in " << DebugLocStr <<
'\n');
10007 bool DisableRuntimeUnroll =
false;
10008 MDNode *OrigLoopID = L->getLoopID();
10010 using namespace ore;
10011 if (!VectorizeLoop) {
10012 assert(IC > 1 &&
"interleave count should not be 1 or 0");
10015 InnerLoopUnroller Unroller(L, PSE,
LI,
DT,
TLI,
TTI,
AC,
ORE, IC, &LVL,
10024 <<
"interleaved loop (interleaved count: "
10025 << NV(
"InterleaveCount", IC) <<
")";
10040 EPI, &LVL, &CM,
BFI,
PSI, Checks);
10042 std::unique_ptr<VPlan> BestMainPlan(
10044 const auto &[ExpandedSCEVs, ReductionResumeValues] = LVP.
executePlan(
10059 Header->setName(
"vec.epilog.vector.body");
10069 auto *ExpandR = cast<VPExpandSCEVRecipe>(&R);
10071 ExpandedSCEVs.find(ExpandR->getSCEV())->second);
10075 ExpandR->eraseFromParent();
10082 if (isa<VPCanonicalIVPHIRecipe>(&R))
10085 Value *ResumeV =
nullptr;
10087 if (
auto *ReductionPhi = dyn_cast<VPReductionPHIRecipe>(&R)) {
10088 ResumeV = ReductionResumeValues
10089 .find(&ReductionPhi->getRecurrenceDescriptor())
10097 if (
auto *Ind = dyn_cast<VPWidenPointerInductionRecipe>(&R)) {
10098 IndPhi = cast<PHINode>(Ind->getUnderlyingValue());
10099 ID = &Ind->getInductionDescriptor();
10101 auto *WidenInd = cast<VPWidenIntOrFpInductionRecipe>(&R);
10102 IndPhi = WidenInd->getPHINode();
10103 ID = &WidenInd->getInductionDescriptor();
10110 assert(ResumeV &&
"Must have a resume value");
10112 cast<VPHeaderPHIRecipe>(&R)->setStartValue(StartVal);
10116 DT,
true, &ExpandedSCEVs);
10117 ++LoopsEpilogueVectorized;
10120 DisableRuntimeUnroll =
true;
10134 DisableRuntimeUnroll =
true;
10144 std::optional<MDNode *> RemainderLoopID =
10147 if (RemainderLoopID) {
10148 L->setLoopID(*RemainderLoopID);
10150 if (DisableRuntimeUnroll)
10189 bool Changed =
false, CFGChanged =
false;
10196 for (
const auto &L : *
LI)
10197 Changed |= CFGChanged |=
10208 LoopsAnalyzed += Worklist.
size();
10211 while (!Worklist.
empty()) {
10257 runImpl(
F,
SE,
LI,
TTI,
DT,
BFI, &
TLI,
DB,
AC,
LAIs,
ORE,
PSI);
10258 if (!Result.MadeAnyChange)
10277 if (Result.MadeCFGChange) {
10293 OS, MapClassName2PassName);
10296 OS << (InterleaveOnlyWhenForced ?
"" :
"no-") <<
"interleave-forced-only;";
10297 OS << (VectorizeOnlyWhenForced ?
"" :
"no-") <<
"vectorize-forced-only;";
static unsigned getIntrinsicID(const SDNode *N)
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
AMDGPU Lower Kernel Arguments
amdgpu AMDGPU Register Bank Select
This file implements a class to represent arbitrary precision integral constant values and operations...
ReachingDefAnalysis InstSet & ToRemove
static bool isEqual(const Function &Caller, const Function &Callee)
This file contains the simple types necessary to represent the attributes associated with functions a...
This is the interface for LLVM's primary stateless and local alias analysis.
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
Analysis containing CSE Info
#define clEnumValN(ENUMVAL, FLAGNAME, DESC)
This file contains the declarations for the subclasses of Constant, which represent the different fla...
static cl::opt< TargetTransformInfo::TargetCostKind > CostKind("cost-kind", cl::desc("Target cost kind"), cl::init(TargetTransformInfo::TCK_RecipThroughput), cl::values(clEnumValN(TargetTransformInfo::TCK_RecipThroughput, "throughput", "Reciprocal throughput"), clEnumValN(TargetTransformInfo::TCK_Latency, "latency", "Instruction latency"), clEnumValN(TargetTransformInfo::TCK_CodeSize, "code-size", "Code size"), clEnumValN(TargetTransformInfo::TCK_SizeAndLatency, "size-latency", "Code size and latency")))
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
#define DEBUG_WITH_TYPE(TYPE, X)
DEBUG_WITH_TYPE macro - This macro should be used by passes to emit debug information.
This file defines DenseMapInfo traits for DenseMap.
This file defines the DenseMap class.
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
This is the interface for a simple mod/ref and alias analysis over globals.
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
This file defines an InstructionCost class that is used when calculating the cost of an instruction,...
This header provides classes for managing per-loop analyses.
static const char * VerboseDebug
loop Loop Strength Reduction
This file defines the LoopVectorizationLegality class.
This file provides a LoopVectorizationPlanner class.
static void collectSupportedLoops(Loop &L, LoopInfo *LI, OptimizationRemarkEmitter *ORE, SmallVectorImpl< Loop * > &V)
static cl::opt< unsigned > EpilogueVectorizationForceVF("epilogue-vectorization-force-VF", cl::init(1), cl::Hidden, cl::desc("When epilogue vectorization is enabled, and a value greater than " "1 is specified, forces the given VF for all applicable epilogue " "loops."))
static cl::opt< TailFoldingStyle > ForceTailFoldingStyle("force-tail-folding-style", cl::desc("Force the tail folding style"), cl::init(TailFoldingStyle::None), cl::values(clEnumValN(TailFoldingStyle::None, "none", "Disable tail folding"), clEnumValN(TailFoldingStyle::Data, "data", "Create lane mask for data only, using active.lane.mask intrinsic"), clEnumValN(TailFoldingStyle::DataWithoutLaneMask, "data-without-lane-mask", "Create lane mask with compare/stepvector"), clEnumValN(TailFoldingStyle::DataAndControlFlow, "data-and-control", "Create lane mask using active.lane.mask intrinsic, and use " "it for both data and control flow"), clEnumValN(TailFoldingStyle::DataAndControlFlowWithoutRuntimeCheck, "data-and-control-without-rt-check", "Similar to data-and-control, but remove the runtime check")))
static ElementCount determineVPlanVF(const TargetTransformInfo &TTI, LoopVectorizationCostModel &CM)
static std::optional< unsigned > getSmallBestKnownTC(ScalarEvolution &SE, Loop *L)
Returns "best known" trip count for the specified loop L as defined by the following procedure: 1) Re...
static cl::opt< unsigned > VectorizeMemoryCheckThreshold("vectorize-memory-check-threshold", cl::init(128), cl::Hidden, cl::desc("The maximum allowed number of runtime memory checks"))
static cl::opt< unsigned > TinyTripCountVectorThreshold("vectorizer-min-trip-count", cl::init(16), cl::Hidden, cl::desc("Loops with a constant trip count that is smaller than this " "value are vectorized only if no scalar iteration overheads " "are incurred."))
Loops with a known constant trip count below this number are vectorized only if no scalar iteration o...
static void createAndCollectMergePhiForReduction(VPInstruction *RedResult, DenseMap< const RecurrenceDescriptor *, Value * > &ReductionResumeValues, VPTransformState &State, Loop *OrigLoop, BasicBlock *LoopMiddleBlock)
static void debugVectorizationMessage(const StringRef Prefix, const StringRef DebugMsg, Instruction *I)
Write a DebugMsg about vectorization to the debug output stream.
static cl::opt< bool > EnableCondStoresVectorization("enable-cond-stores-vec", cl::init(true), cl::Hidden, cl::desc("Enable if predication of stores during vectorization."))
static VPWidenIntOrFpInductionRecipe * createWidenInductionRecipes(PHINode *Phi, Instruction *PhiOrTrunc, VPValue *Start, const InductionDescriptor &IndDesc, VPlan &Plan, ScalarEvolution &SE, Loop &OrigLoop, VFRange &Range)
Creates a VPWidenIntOrFpInductionRecpipe for Phi.
static Value * interleaveVectors(IRBuilderBase &Builder, ArrayRef< Value * > Vals, const Twine &Name)
Return a vector containing interleaved elements from multiple smaller input vectors.
static Value * emitTransformedIndex(IRBuilderBase &B, Value *Index, Value *StartValue, Value *Step, InductionDescriptor::InductionKind InductionKind, const BinaryOperator *InductionBinOp)
Compute the transformed value of Index at offset StartValue using step StepValue.
static DebugLoc getDebugLocFromInstOrOperands(Instruction *I)
Look for a meaningful debug location on the instruction or it's operands.
static void emitInvalidCostRemarks(SmallVector< InstructionVFPair > InvalidCosts, OptimizationRemarkEmitter *ORE, Loop *TheLoop)
const char LLVMLoopVectorizeFollowupAll[]
static cl::opt< bool > ForceTargetSupportsScalableVectors("force-target-supports-scalable-vectors", cl::init(false), cl::Hidden, cl::desc("Pretend that scalable vectors are supported, even if the target does " "not support them. This flag should only be used for testing."))
static void addCanonicalIVRecipes(VPlan &Plan, Type *IdxTy, bool HasNUW, DebugLoc DL)
static std::optional< unsigned > getVScaleForTuning(const Loop *L, const TargetTransformInfo &TTI)
Convenience function that returns the value of vscale_range iff vscale_range.min == vscale_range....
static bool useActiveLaneMaskForControlFlow(TailFoldingStyle Style)
static constexpr uint32_t MemCheckBypassWeights[]
static Type * MaybeVectorizeType(Type *Elt, ElementCount VF)
static cl::opt< unsigned > ForceTargetInstructionCost("force-target-instruction-cost", cl::init(0), cl::Hidden, cl::desc("A flag that overrides the target's expected cost for " "an instruction to a single constant value. Mostly " "useful for getting consistent testing."))
std::optional< unsigned > getMaxVScale(const Function &F, const TargetTransformInfo &TTI)
static constexpr uint32_t MinItersBypassWeights[]
static cl::opt< unsigned > ForceTargetNumScalarRegs("force-target-num-scalar-regs", cl::init(0), cl::Hidden, cl::desc("A flag that overrides the target's number of scalar registers."))
static cl::opt< bool > UseWiderVFIfCallVariantsPresent("vectorizer-maximize-bandwidth-for-vector-calls", cl::init(true), cl::Hidden, cl::desc("Try wider VFs if they enable the use of vector variants"))
static Type * smallestIntegerVectorType(Type *T1, Type *T2)
static cl::opt< unsigned > SmallLoopCost("small-loop-cost", cl::init(20), cl::Hidden, cl::desc("The cost of a loop that is considered 'small' by the interleaver."))
static cl::opt< unsigned > ForceTargetNumVectorRegs("force-target-num-vector-regs", cl::init(0), cl::Hidden, cl::desc("A flag that overrides the target's number of vector registers."))
static bool areRuntimeChecksProfitable(GeneratedRTChecks &Checks, VectorizationFactor &VF, std::optional< unsigned > VScale, Loop *L, ScalarEvolution &SE, ScalarEpilogueLowering SEL)
static bool isExplicitVecOuterLoop(Loop *OuterLp, OptimizationRemarkEmitter *ORE)
static cl::opt< bool > EnableIndVarRegisterHeur("enable-ind-var-reg-heur", cl::init(true), cl::Hidden, cl::desc("Count the induction variable only once when interleaving"))
static cl::opt< bool > EnableEpilogueVectorization("enable-epilogue-vectorization", cl::init(true), cl::Hidden, cl::desc("Enable vectorization of epilogue loops."))
static ScalarEpilogueLowering getScalarEpilogueLowering(Function *F, Loop *L, LoopVectorizeHints &Hints, ProfileSummaryInfo *PSI, BlockFrequencyInfo *BFI, TargetTransformInfo *TTI, TargetLibraryInfo *TLI, LoopVectorizationLegality &LVL, InterleavedAccessInfo *IAI)
const char VerboseDebug[]
static cl::opt< bool > PreferPredicatedReductionSelect("prefer-predicated-reduction-select", cl::init(false), cl::Hidden, cl::desc("Prefer predicating a reduction operation over an after loop select."))
static OptimizationRemarkAnalysis createLVAnalysis(const char *PassName, StringRef RemarkName, Loop *TheLoop, Instruction *I)
Create an analysis remark that explains why vectorization failed.
static constexpr uint32_t SCEVCheckBypassWeights[]
static cl::opt< bool > PreferInLoopReductions("prefer-inloop-reductions", cl::init(false), cl::Hidden, cl::desc("Prefer in-loop vector reductions, " "overriding the targets preference."))
static cl::opt< unsigned > EpilogueVectorizationMinVF("epilogue-vectorization-minimum-VF", cl::init(16), cl::Hidden, cl::desc("Only loops with vectorization factor equal to or larger than " "the specified value are considered for epilogue vectorization."))
const char LLVMLoopVectorizeFollowupVectorized[]
static cl::opt< bool > EnableLoadStoreRuntimeInterleave("enable-loadstore-runtime-interleave", cl::init(true), cl::Hidden, cl::desc("Enable runtime interleaving until load/store ports are saturated"))
static cl::opt< bool > VPlanBuildStressTest("vplan-build-stress-test", cl::init(false), cl::Hidden, cl::desc("Build VPlan for every supported loop nest in the function and bail " "out right after the build (stress test the VPlan H-CFG construction " "in the VPlan-native vectorization path)."))
static bool hasIrregularType(Type *Ty, const DataLayout &DL)
A helper function that returns true if the given type is irregular.
static cl::opt< bool > LoopVectorizeWithBlockFrequency("loop-vectorize-with-block-frequency", cl::init(true), cl::Hidden, cl::desc("Enable the use of the block frequency analysis to access PGO " "heuristics minimizing code growth in cold regions and being more " "aggressive in hot regions."))
static std::string getDebugLocString(const Loop *L)
static Value * getExpandedStep(const InductionDescriptor &ID, const SCEV2ValueTy &ExpandedSCEVs)
Return the expanded step for ID using ExpandedSCEVs to look up SCEV expansion results.
const char LLVMLoopVectorizeFollowupEpilogue[]
static bool useActiveLaneMask(TailFoldingStyle Style)
static Type * largestIntegerVectorType(Type *T1, Type *T2)
static void addUsersInExitBlock(VPBasicBlock *HeaderVPBB, Loop *OrigLoop, VPlan &Plan)
static bool isIndvarOverflowCheckKnownFalse(const LoopVectorizationCostModel *Cost, ElementCount VF, std::optional< unsigned > UF=std::nullopt)
For the given VF and UF and maximum trip count computed for the loop, return whether the induction va...
static cl::opt< PreferPredicateTy::Option > PreferPredicateOverEpilogue("prefer-predicate-over-epilogue", cl::init(PreferPredicateTy::ScalarEpilogue), cl::Hidden, cl::desc("Tail-folding and predication preferences over creating a scalar " "epilogue loop."), cl::values(clEnumValN(PreferPredicateTy::ScalarEpilogue, "scalar-epilogue", "Don't tail-predicate loops, create scalar epilogue"), clEnumValN(PreferPredicateTy::PredicateElseScalarEpilogue, "predicate-else-scalar-epilogue", "prefer tail-folding, create scalar epilogue if tail " "folding fails."), clEnumValN(PreferPredicateTy::PredicateOrDontVectorize, "predicate-dont-vectorize", "prefers tail-folding, don't attempt vectorization if " "tail-folding fails.")))
static cl::opt< bool > EnableInterleavedMemAccesses("enable-interleaved-mem-accesses", cl::init(false), cl::Hidden, cl::desc("Enable vectorization on interleaved memory accesses in a loop"))
static cl::opt< bool > EnableMaskedInterleavedMemAccesses("enable-masked-interleaved-mem-accesses", cl::init(false), cl::Hidden, cl::desc("Enable vectorization on masked interleaved memory accesses in a loop"))
An interleave-group may need masking if it resides in a block that needs predication,...
static cl::opt< bool > ForceOrderedReductions("force-ordered-reductions", cl::init(false), cl::Hidden, cl::desc("Enable the vectorisation of loops with in-order (strict) " "FP reductions"))
static void cse(BasicBlock *BB)
Perform cse of induction variable instructions.
static unsigned getReciprocalPredBlockProb()
A helper function that returns the reciprocal of the block probability of predicated blocks.
static const SCEV * getAddressAccessSCEV(Value *Ptr, LoopVectorizationLegality *Legal, PredicatedScalarEvolution &PSE, const Loop *TheLoop)
Gets Address Access SCEV after verifying that the access pattern is loop invariant except the inducti...
static cl::opt< cl::boolOrDefault > ForceSafeDivisor("force-widen-divrem-via-safe-divisor", cl::Hidden, cl::desc("Override cost based safe divisor widening for div/rem instructions"))
static cl::opt< unsigned > ForceTargetMaxVectorInterleaveFactor("force-target-max-vector-interleave", cl::init(0), cl::Hidden, cl::desc("A flag that overrides the target's max interleave factor for " "vectorized loops."))
static bool processLoopInVPlanNativePath(Loop *L, PredicatedScalarEvolution &PSE, LoopInfo *LI, DominatorTree *DT, LoopVectorizationLegality *LVL, TargetTransformInfo *TTI, TargetLibraryInfo *TLI, DemandedBits *DB, AssumptionCache *AC, OptimizationRemarkEmitter *ORE, BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI, LoopVectorizeHints &Hints, LoopVectorizationRequirements &Requirements)
static bool useMaskedInterleavedAccesses(const TargetTransformInfo &TTI)
static cl::opt< unsigned > NumberOfStoresToPredicate("vectorize-num-stores-pred", cl::init(1), cl::Hidden, cl::desc("Max number of stores to be predicated behind an if."))
The number of stores in a loop that are allowed to need predication.
static void AddRuntimeUnrollDisableMetaData(Loop *L)
static cl::opt< unsigned > MaxNestedScalarReductionIC("max-nested-scalar-reduction-interleave", cl::init(2), cl::Hidden, cl::desc("The maximum interleave count to use when interleaving a scalar " "reduction in a nested loop."))
static cl::opt< unsigned > ForceTargetMaxScalarInterleaveFactor("force-target-max-scalar-interleave", cl::init(0), cl::Hidden, cl::desc("A flag that overrides the target's max interleave factor for " "scalar loops."))
static cl::opt< bool > PrintVPlansInDotFormat("vplan-print-in-dot-format", cl::Hidden, cl::desc("Use dot format instead of plain text when dumping VPlans"))
static void checkMixedPrecision(Loop *L, OptimizationRemarkEmitter *ORE)
static cl::opt< bool > MaximizeBandwidth("vectorizer-maximize-bandwidth", cl::init(false), cl::Hidden, cl::desc("Maximize bandwidth when selecting vectorization factor which " "will be determined by the smallest type in loop."))
mir Rename Register Operands
This file implements a map that provides insertion order iteration.
Module.h This file contains the declarations for the Module class.
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
This file contains the declarations for profiling metadata utility functions.
const SmallVectorImpl< MachineOperand > & Cond
static BinaryOperator * CreateMul(Value *S1, Value *S2, const Twine &Name, BasicBlock::iterator InsertBefore, Value *FlagsOp)
static BinaryOperator * CreateAdd(Value *S1, Value *S2, const Twine &Name, BasicBlock::iterator InsertBefore, Value *FlagsOp)
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
separate const offset from gep
This file defines the SmallPtrSet class.
This file defines the SmallSet class.
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
static SymbolRef::Type getType(const Symbol *Sym)
This defines the Use class.
This file defines the VPlanHCFGBuilder class which contains the public interface (buildHierarchicalCF...
This file declares the class VPlanVerifier, which contains utility functions to check the consistency...
This file contains the declarations of the Vectorization Plan base classes:
static const char PassName[]
static const uint32_t IV[8]
Class for arbitrary precision integers.
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
int64_t getSExtValue() const
Get sign extended value.
A container for analyses that lazily runs them and caches their results.
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
size_t size() const
size - Get the array size.
A function analysis which provides an AssumptionCache.
A cache of @llvm.assume calls within a function.
void registerAssumption(AssumeInst *CI)
Add an @llvm.assume intrinsic to this function's cache.
unsigned getVScaleRangeMin() const
Returns the minimum value for the vscale_range attribute.
LLVM Basic Block Representation.
iterator begin()
Instruction iterator methods.
iterator_range< const_phi_iterator > phis() const
Returns a range that iterates over the phis in the basic block.
InstListType::const_iterator getFirstNonPHIIt() const
Iterator returning form of getFirstNonPHI.
const Instruction * getFirstNonPHI() const
Returns a pointer to the first instruction in this block that is not a PHINode instruction.
const BasicBlock * getSinglePredecessor() const
Return the predecessor of this block if it has a single predecessor block.
const Function * getParent() const
Return the enclosing method, or null if none.
InstListType::iterator iterator
Instruction iterators...
LLVMContext & getContext() const
Get the context in which this basic block lives.
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
BinaryOps getOpcode() const
Analysis pass which computes BlockFrequencyInfo.
BlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate IR basic block frequen...
Conditional or Unconditional Branch instruction.
void setCondition(Value *V)
static BranchInst * Create(BasicBlock *IfTrue, BasicBlock::iterator InsertBefore)
bool isConditional() const
BasicBlock * getSuccessor(unsigned i) const
Value * getCondition() const
Represents analyses that only rely on functions' control flow.
bool isNoBuiltin() const
Return true if the call should not be treated as a call to a builtin.
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
Value * getArgOperand(unsigned i) const
iterator_range< User::op_iterator > args()
Iteration adapter for range-for loops.
unsigned arg_size() const
This class represents a function call, abstracting a target machine's calling convention.
static bool isBitOrNoopPointerCastable(Type *SrcTy, Type *DestTy, const DataLayout &DL)
Check whether a bitcast, inttoptr, or ptrtoint cast between these types is valid and a no-op.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
@ ICMP_UGT
unsigned greater than
@ ICMP_ULT
unsigned less than
@ ICMP_ULE
unsigned less or equal
static ConstantInt * getTrue(LLVMContext &Context)
static ConstantInt * getFalse(LLVMContext &Context)
This is an important base class in LLVM.
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
An analysis that produces DemandedBits for a function.
ValueT lookup(const_arg_type_t< KeyT > Val) const
lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...
iterator find(const_arg_type_t< KeyT > Val)
size_type count(const_arg_type_t< KeyT > Val) const
Return 1 if the specified key is in the map, 0 otherwise.
const ValueT & at(const_arg_type_t< KeyT > Val) const
at - Return the entry for the specified key, or abort if no such entry exists.
bool contains(const_arg_type_t< KeyT > Val) const
Return true if the specified key is in the map, false otherwise.
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
DomTreeNodeBase * getIDom() const
Analysis pass which computes a DominatorTree.
bool verify(VerificationLevel VL=VerificationLevel::Full) const
verify - checks if the tree is correct.
void changeImmediateDominator(DomTreeNodeBase< NodeT > *N, DomTreeNodeBase< NodeT > *NewIDom)
changeImmediateDominator - This method is used to update the dominator tree information when a node's...
DomTreeNodeBase< NodeT > * addNewBlock(NodeT *BB, NodeT *DomBB)
Add a new node to the dominator tree information.
void eraseNode(NodeT *BB)
eraseNode - Removes a node from the dominator tree.
DomTreeNodeBase< NodeT > * getNode(const NodeT *BB) const
getNode - return the (Post)DominatorTree node for the specified basic block.
bool properlyDominates(const DomTreeNodeBase< NodeT > *A, const DomTreeNodeBase< NodeT > *B) const
properlyDominates - Returns true iff A dominates B and A != B.
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
constexpr bool isVector() const
One or more elements.
static constexpr ElementCount getScalable(ScalarTy MinVal)
static constexpr ElementCount getFixed(ScalarTy MinVal)
static constexpr ElementCount get(ScalarTy MinVal, bool Scalable)
constexpr bool isScalar() const
Exactly one element.
BasicBlock * emitMinimumVectorEpilogueIterCountCheck(BasicBlock *Bypass, BasicBlock *Insert)
Emits an iteration count bypass check after the main vector loop has finished to see if there are any...
void printDebugTracesAtEnd() override
EpilogueVectorizerEpilogueLoop(Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI, DominatorTree *DT, const TargetLibraryInfo *TLI, const TargetTransformInfo *TTI, AssumptionCache *AC, OptimizationRemarkEmitter *ORE, EpilogueLoopVectorizationInfo &EPI, LoopVectorizationLegality *LVL, llvm::LoopVectorizationCostModel *CM, BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI, GeneratedRTChecks &Checks)
void printDebugTracesAtStart() override
Allow subclasses to override and print debug traces before/after vplan execution, when trace informat...
std::pair< BasicBlock *, Value * > createEpilogueVectorizedLoopSkeleton(const SCEV2ValueTy &ExpandedSCEVs) final
Implements the interface for creating a vectorized skeleton using the epilogue loop strategy (ie the ...
A specialized derived class of inner loop vectorizer that performs vectorization of main loops in the...
EpilogueVectorizerMainLoop(Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI, DominatorTree *DT, const TargetLibraryInfo *TLI, const TargetTransformInfo *TTI, AssumptionCache *AC, OptimizationRemarkEmitter *ORE, EpilogueLoopVectorizationInfo &EPI, LoopVectorizationLegality *LVL, llvm::LoopVectorizationCostModel *CM, BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI, GeneratedRTChecks &Check)
void printDebugTracesAtEnd() override
std::pair< BasicBlock *, Value * > createEpilogueVectorizedLoopSkeleton(const SCEV2ValueTy &ExpandedSCEVs) final
Implements the interface for creating a vectorized skeleton using the main loop strategy (ie the firs...
void printDebugTracesAtStart() override
Allow subclasses to override and print debug traces before/after vplan execution, when trace informat...
BasicBlock * emitIterationCountCheck(BasicBlock *Bypass, bool ForEpilogue)
Emits an iteration count bypass check once for the main loop (when ForEpilogue is false) and once for...
FastMathFlags getFastMathFlags() const
Convenience function for getting all the fast-math flags.
Convenience struct for specifying and reasoning about fast-math flags.
Class to represent function types.
param_iterator param_begin() const
param_iterator param_end() const
bool hasOptSize() const
Optimize this function for size (-Os) or minimum size (-Oz).
FunctionType * getFunctionType() const
Returns the FunctionType for me.
Attribute getFnAttribute(Attribute::AttrKind Kind) const
Return the attribute for the given attribute kind.
bool hasMinSize() const
Optimize this function for minimum size (-Oz).
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
static GetElementPtrInst * Create(Type *PointeeType, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &NameStr, BasicBlock::iterator InsertBefore)
Module * getParent()
Get the module that this global value is contained inside of...
Common base class shared among various IRBuilders.
Value * CreateNeg(Value *V, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Value * CreateBinaryIntrinsic(Intrinsic::ID ID, Value *LHS, Value *RHS, Instruction *FMFSource=nullptr, const Twine &Name="")
Create a call to intrinsic ID with 2 operands which is mangled on the first type.
IntegerType * getInt1Ty()
Fetch the type representing a single bit.
Value * CreateExtractElement(Value *Vec, Value *Idx, const Twine &Name="")
LoadInst * CreateAlignedLoad(Type *Ty, Value *Ptr, MaybeAlign Align, const char *Name)
Value * CreateVectorSplat(unsigned NumElts, Value *V, const Twine &Name="")
Return a vector value that contains.
Value * CreateExtractValue(Value *Agg, ArrayRef< unsigned > Idxs, const Twine &Name="")
ConstantInt * getTrue()
Get the constant value for i1 true.
CallInst * CreateIntrinsic(Intrinsic::ID ID, ArrayRef< Type * > Types, ArrayRef< Value * > Args, Instruction *FMFSource=nullptr, const Twine &Name="")
Create a call to intrinsic ID with Args, mangled using Types.
CallInst * CreateMaskedLoad(Type *Ty, Value *Ptr, Align Alignment, Value *Mask, Value *PassThru=nullptr, const Twine &Name="")
Create a call to Masked Load intrinsic.
Value * CreateSelect(Value *C, Value *True, Value *False, const Twine &Name="", Instruction *MDFrom=nullptr)
BasicBlock::iterator GetInsertPoint() const
IntegerType * getInt32Ty()
Fetch the type representing a 32-bit integer.
void setFastMathFlags(FastMathFlags NewFMF)
Set the fast-math flags to be used with generated fp-math operators.
Value * CreateVectorReverse(Value *V, const Twine &Name="")
Return a vector value that contains the vector V reversed.
ConstantInt * getInt32(uint32_t C)
Get a constant 32-bit value.
Value * CreateBitOrPointerCast(Value *V, Type *DestTy, const Twine &Name="")
PHINode * CreatePHI(Type *Ty, unsigned NumReservedValues, const Twine &Name="")
Value * CreateICmpEQ(Value *LHS, Value *RHS, const Twine &Name="")
InstTy * Insert(InstTy *I, const Twine &Name="") const
Insert and return the specified instruction.
Value * CreateSub(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Value * CreateShuffleVector(Value *V1, Value *V2, Value *Mask, const Twine &Name="")
CallInst * CreateMaskedStore(Value *Val, Value *Ptr, Align Alignment, Value *Mask)
Create a call to Masked Store intrinsic.
Value * CreateAdd(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
ConstantInt * getFalse()
Get the constant value for i1 false.
Value * CreateBinOp(Instruction::BinaryOps Opc, Value *LHS, Value *RHS, const Twine &Name="", MDNode *FPMathTag=nullptr)
void SetInsertPoint(BasicBlock *TheBB)
This specifies that created instructions should be appended to the end of the specified block.
StoreInst * CreateAlignedStore(Value *Val, Value *Ptr, MaybeAlign Align, bool isVolatile=false)
Value * CreateGEP(Type *Ty, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &Name="", bool IsInBounds=false)
Value * CreateICmp(CmpInst::Predicate P, Value *LHS, Value *RHS, const Twine &Name="")
IntegerType * getInt8Ty()
Fetch the type representing an 8-bit integer.
Value * CreateURem(Value *LHS, Value *RHS, const Twine &Name="")
Value * CreateStepVector(Type *DstType, const Twine &Name="")
Creates a vector of type DstType with the linear sequence <0, 1, ...>
Value * CreateSExtOrTrunc(Value *V, Type *DestTy, const Twine &Name="")
Create a SExt or Trunc from the integer value V to DestTy.
Value * CreateMul(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
A struct for saving information about induction variables.
BinaryOperator * getInductionBinOp() const
InductionKind getKind() const
const SCEV * getStep() const
InductionKind
This enum represents the kinds of inductions that we support.
@ IK_NoInduction
Not an induction variable.
@ IK_FpInduction
Floating point induction variable.
@ IK_PtrInduction
Pointer induction var. Step = C.
@ IK_IntInduction
Integer induction variable. Step = C.
const SmallVectorImpl< Instruction * > & getCastInsts() const
Returns a reference to the type cast instructions in the induction update chain, that are redundant w...
Value * getStartValue() const
An extension of the inner loop vectorizer that creates a skeleton for a vectorized loop that has its ...
InnerLoopAndEpilogueVectorizer(Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI, DominatorTree *DT, const TargetLibraryInfo *TLI, const TargetTransformInfo *TTI, AssumptionCache *AC, OptimizationRemarkEmitter *ORE, EpilogueLoopVectorizationInfo &EPI, LoopVectorizationLegality *LVL, llvm::LoopVectorizationCostModel *CM, BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI, GeneratedRTChecks &Checks)
virtual std::pair< BasicBlock *, Value * > createEpilogueVectorizedLoopSkeleton(const SCEV2ValueTy &ExpandedSCEVs)=0
The interface for creating a vectorized skeleton using one of two different strategies,...
std::pair< BasicBlock *, Value * > createVectorizedLoopSkeleton(const SCEV2ValueTy &ExpandedSCEVs) final
Create a new empty loop that will contain vectorized instructions later on, while the old loop will b...
EpilogueLoopVectorizationInfo & EPI
Holds and updates state information required to vectorize the main loop and its epilogue in two separ...
InnerLoopUnroller(Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI, DominatorTree *DT, const TargetLibraryInfo *TLI, const TargetTransformInfo *TTI, AssumptionCache *AC, OptimizationRemarkEmitter *ORE, unsigned UnrollFactor, LoopVectorizationLegality *LVL, LoopVectorizationCostModel *CM, BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI, GeneratedRTChecks &Check)
InnerLoopVectorizer vectorizes loops which contain only one basic block to a specified vectorization ...
virtual void printDebugTracesAtStart()
Allow subclasses to override and print debug traces before/after vplan execution, when trace informat...
PHINode * createInductionResumeValue(PHINode *OrigPhi, const InductionDescriptor &ID, Value *Step, ArrayRef< BasicBlock * > BypassBlocks, std::pair< BasicBlock *, Value * > AdditionalBypass={nullptr, nullptr})
Create a new phi node for the induction variable OrigPhi to resume iteration count in the scalar epil...
void scalarizeInstruction(const Instruction *Instr, VPReplicateRecipe *RepRecipe, const VPIteration &Instance, VPTransformState &State)
A helper function to scalarize a single Instruction in the innermost loop.
BasicBlock * LoopScalarBody
The scalar loop body.
Value * TripCount
Trip count of the original loop.
void sinkScalarOperands(Instruction *PredInst)
Iteratively sink the scalarized operands of a predicated instruction into the block that was created ...
const TargetLibraryInfo * TLI
Target Library Info.
DenseMap< PHINode *, Value * > IVEndValues
ElementCount MinProfitableTripCount
Value * createBitOrPointerCast(Value *V, VectorType *DstVTy, const DataLayout &DL)
Returns a bitcasted value to the requested vector type.
const TargetTransformInfo * TTI
Target Transform Info.
Value * VectorTripCount
Trip count of the widened loop (TripCount - TripCount % (VF*UF))
bool areSafetyChecksAdded()
InnerLoopVectorizer(Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI, DominatorTree *DT, const TargetLibraryInfo *TLI, const TargetTransformInfo *TTI, AssumptionCache *AC, OptimizationRemarkEmitter *ORE, ElementCount VecWidth, ElementCount MinProfitableTripCount, unsigned UnrollFactor, LoopVectorizationLegality *LVL, LoopVectorizationCostModel *CM, BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI, GeneratedRTChecks &RTChecks)
BasicBlock * emitSCEVChecks(BasicBlock *Bypass)
Emit a bypass check to see if all of the SCEV assumptions we've had to make are correct.
LoopVectorizationCostModel * Cost
The profitablity analysis.
SmallMapVector< const RecurrenceDescriptor *, PHINode *, 4 > ReductionResumeValues
BlockFrequencyInfo * BFI
BFI and PSI are used to check for profile guided size optimizations.
Value * getTripCount() const
Returns the original loop trip count.
BasicBlock * LoopMiddleBlock
Middle Block between the vector and the scalar.
OptimizationRemarkEmitter * ORE
Interface to emit optimization remarks.
void fixReduction(VPReductionPHIRecipe *Phi, VPTransformState &State)
Create code for the loop exit value of the reduction.
SmallVector< Instruction *, 4 > PredicatedInstructions
Store instructions that were predicated.
void createVectorLoopSkeleton(StringRef Prefix)
Emit basic blocks (prefixed with Prefix) for the iteration check, vector loop preheader,...
BasicBlock * completeLoopSkeleton()
Complete the loop skeleton by adding debug MDs, creating appropriate conditional branches in the midd...
BasicBlock * emitMemRuntimeChecks(BasicBlock *Bypass)
Emit bypass checks to check any memory assumptions we may have made.
BasicBlock * LoopScalarPreHeader
The scalar-loop preheader.
LoopVectorizationLegality * Legal
The legality analysis.
void emitIterationCountCheck(BasicBlock *Bypass)
Emit a bypass check to see if the vector trip count is zero, including if it overflows.
PredicatedScalarEvolution & PSE
A wrapper around ScalarEvolution used to add runtime SCEV checks.
void fixupIVUsers(PHINode *OrigPhi, const InductionDescriptor &II, Value *VectorTripCount, Value *EndValue, BasicBlock *MiddleBlock, BasicBlock *VectorHeader, VPlan &Plan, VPTransformState &State)
Set up the values of the IVs correctly when exiting the vector loop.
void createInductionResumeValues(const SCEV2ValueTy &ExpandedSCEVs, std::pair< BasicBlock *, Value * > AdditionalBypass={nullptr, nullptr})
Create new phi nodes for the induction variables to resume iteration count in the scalar epilogue,...
void fixNonInductionPHIs(VPlan &Plan, VPTransformState &State)
Fix the non-induction PHIs in Plan.
DominatorTree * DT
Dominator Tree.
void setTripCount(Value *TC)
Used to set the trip count after ILV's construction and after the preheader block has been executed.
bool OptForSizeBasedOnProfile
BasicBlock * LoopVectorPreHeader
The vector-loop preheader.
bool useOrderedReductions(const RecurrenceDescriptor &RdxDesc)
Returns true if the reordering of FP operations is not allowed, but we are able to vectorize with str...
virtual void printDebugTracesAtEnd()
AssumptionCache * AC
Assumption Cache.
Value * getOrCreateVectorTripCount(BasicBlock *InsertBlock)
Returns (and creates if needed) the trip count of the widened loop.
IRBuilder Builder
The builder that we use.
void vectorizeInterleaveGroup(const InterleaveGroup< Instruction > *Group, ArrayRef< VPValue * > VPDefs, VPTransformState &State, VPValue *Addr, ArrayRef< VPValue * > StoredValues, VPValue *BlockInMask, bool NeedsMaskForGaps)
Try to vectorize interleaved access group Group with the base address given in Addr,...
void fixFixedOrderRecurrence(VPFirstOrderRecurrencePHIRecipe *PhiR, VPTransformState &State)
Create the exit value of first order recurrences in the middle block and update their users.
virtual std::pair< BasicBlock *, Value * > createVectorizedLoopSkeleton(const SCEV2ValueTy &ExpandedSCEVs)
Create a new empty loop that will contain vectorized instructions later on, while the old loop will b...
unsigned UF
The vectorization unroll factor to use.
void fixVectorizedLoop(VPTransformState &State, VPlan &Plan)
Fix the vectorized code, taking care of header phi's, live-outs, and more.
BasicBlock * LoopExitBlock
The unique ExitBlock of the scalar loop if one exists.
SmallVector< BasicBlock *, 4 > LoopBypassBlocks
A list of all bypass blocks. The first block is the entry of the loop.
GeneratedRTChecks & RTChecks
Structure to hold information about generated runtime checks, responsible for cleaning the checks,...
virtual ~InnerLoopVectorizer()=default
ElementCount VF
The vectorization SIMD factor to use.
Loop * OrigLoop
The original loop.
static InstructionCost getInvalid(CostType Val=0)
static InstructionCost getMax()
std::optional< CostType > getValue() const
This function is intended to be used as sparingly as possible, since the class provides the full rang...
void insertBefore(Instruction *InsertPos)
Insert an unlinked instruction into a basic block immediately before the specified instruction.
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
const BasicBlock * getParent() const
InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
void replaceSuccessorWith(BasicBlock *OldBB, BasicBlock *NewBB)
Replace specified successor OldBB to point at the provided block.
Instruction * user_back()
Specialize the methods defined in Value, as we know that an instruction can only be used by other ins...
FastMathFlags getFastMathFlags() const LLVM_READONLY
Convenience function for getting all the fast-math flags, which must be an operator which supports th...
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
void setDebugLoc(DebugLoc Loc)
Set the debug location information for this instruction.
void moveBefore(Instruction *MovePos)
Unlink this instruction from its current basic block and insert it into the basic block that MovePos ...
static IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
The group of interleaved loads/stores sharing the same stride and close to each other.
uint32_t getFactor() const
InstTy * getMember(uint32_t Index) const
Get the member with the given index Index.
uint32_t getIndex(const InstTy *Instr) const
Get the index for the given member.
InstTy * getInsertPos() const
void addMetadata(InstTy *NewInst) const
Add metadata (e.g.
Drive the analysis of interleaved memory accesses in the loop.
InterleaveGroup< Instruction > * getInterleaveGroup(const Instruction *Instr) const
Get the interleave group that Instr belongs to.
bool requiresScalarEpilogue() const
Returns true if an interleaved group that may access memory out-of-bounds requires a scalar epilogue ...
bool isInterleaved(Instruction *Instr) const
Check if Instr belongs to any interleave group.
bool invalidateGroups()
Invalidate groups, e.g., in case all blocks in loop will be predicated contrary to original assumptio...
iterator_range< SmallPtrSetIterator< llvm::InterleaveGroup< Instruction > * > > getInterleaveGroups()
void analyzeInterleaving(bool EnableMaskedInterleavedGroup)
Analyze the interleaved accesses and collect them in interleave groups.
void invalidateGroupsRequiringScalarEpilogue()
Invalidate groups that require a scalar epilogue (due to gaps).
Interval Class - An Interval is a set of nodes defined such that every node in the interval has all o...
A wrapper class for inspecting calls to intrinsic functions.
This is an important class for using LLVM in a threaded context.
An instruction for reading from memory.
This analysis provides dependence information for the memory accesses of a loop.
Drive the analysis of memory accesses in the loop.
const RuntimePointerChecking * getRuntimePointerChecking() const
unsigned getNumRuntimePointerChecks() const
Number of memchecks required to prove independence of otherwise may-alias pointers.
const DenseMap< Value *, const SCEV * > & getSymbolicStrides() const
If an access has a symbolic strides, this maps the pointer value to the stride symbol.
Analysis pass that exposes the LoopInfo for a function.
bool contains(const LoopT *L) const
Return true if the specified loop is contained within in this loop.
BlockT * getLoopLatch() const
If there is a single latch block for this loop, return it.
bool isInnermost() const
Return true if the loop does not contain any (natural) loops.
void getExitBlocks(SmallVectorImpl< BlockT * > &ExitBlocks) const
Return all of the successor blocks of this loop.
BlockT * getHeader() const
unsigned getLoopDepth() const
Return the nesting level of this loop.
void addBasicBlockToLoop(BlockT *NewBB, LoopInfoBase< BlockT, LoopT > &LI)
This method is used by other analyses to update loop information.
iterator_range< block_iterator > blocks() const
BlockT * getLoopPreheader() const
If there is a preheader for this loop, return it.
ArrayRef< BlockT * > getBlocks() const
Get a list of the basic blocks which make up this loop.
BlockT * getExitingBlock() const
If getExitingBlocks would return exactly one block, return that block.
bool isLoopExiting(const BlockT *BB) const
True if terminator in the block can branch to another block that is outside of the current loop.
BlockT * getUniqueExitBlock() const
If getUniqueExitBlocks would return exactly one block, return that block.
Store the result of a depth first search within basic blocks contained by a single loop.
RPOIterator beginRPO() const
Reverse iterate over the cached postorder blocks.
void perform(const LoopInfo *LI)
Traverse the loop blocks and store the DFS result.
RPOIterator endRPO() const
Wrapper class to LoopBlocksDFS that provides a standard begin()/end() interface for the DFS reverse p...
void perform(const LoopInfo *LI)
Traverse the loop blocks and store the DFS result.
void removeBlock(BlockT *BB)
This method completely removes BB from all data structures, including all of the Loop objects it is n...
LoopT * getLoopFor(const BlockT *BB) const
Return the inner most loop that BB lives in.
LoopVectorizationCostModel - estimates the expected speedups due to vectorization.
SmallPtrSet< Type *, 16 > ElementTypesInLoop
All element types found in the loop.
bool interleavedAccessCanBeWidened(Instruction *I, ElementCount VF)
Returns true if I is a memory instruction in an interleaved-group of memory accesses that can be vect...
void collectElementTypesForWidening()
Collect all element types in the loop for which widening is needed.
bool canVectorizeReductions(ElementCount VF) const
Returns true if the target machine supports all of the reduction variables found for the given VF.
bool requiresScalarEpilogue(VFRange Range) const
Returns true if we're required to use a scalar epilogue for at least the final iteration of the origi...
bool isPredicatedInst(Instruction *I) const
Returns true if I is an instruction that needs to be predicated at runtime.
bool hasPredStores() const
void collectValuesToIgnore()
Collect values we want to ignore in the cost model.
void collectInLoopReductions()
Split reductions into those that happen in the loop, and those that happen outside.
bool isAccessInterleaved(Instruction *Instr)
Check if Instr belongs to any interleaved access group.
std::pair< unsigned, unsigned > getSmallestAndWidestTypes()
bool isUniformAfterVectorization(Instruction *I, ElementCount VF) const
Returns true if I is known to be uniform after vectorization.
PredicatedScalarEvolution & PSE
Predicated scalar evolution analysis.
const LoopVectorizeHints * Hints
Loop Vectorize Hint.
const TargetTransformInfo & TTI
Vector target information.
LoopVectorizationCostModel(ScalarEpilogueLowering SEL, Loop *L, PredicatedScalarEvolution &PSE, LoopInfo *LI, LoopVectorizationLegality *Legal, const TargetTransformInfo &TTI, const TargetLibraryInfo *TLI, DemandedBits *DB, AssumptionCache *AC, OptimizationRemarkEmitter *ORE, const Function *F, const LoopVectorizeHints *Hints, InterleavedAccessInfo &IAI)
const Function * TheFunction
LoopVectorizationLegality * Legal
Vectorization legality.
bool isLegalMaskedLoad(Type *DataType, Value *Ptr, Align Alignment) const
Returns true if the target machine supports masked load operation for the given DataType and kind of ...
std::pair< InstructionCost, bool > VectorizationCostTy
The vectorization cost is a combination of the cost itself and a boolean indicating whether any of th...
DemandedBits * DB
Demanded bits analysis.
const TargetLibraryInfo * TLI
Target Library Info.
bool memoryInstructionCanBeWidened(Instruction *I, ElementCount VF)
Returns true if I is a memory instruction with consecutive memory access that can be widened.
InstructionCost getVectorIntrinsicCost(CallInst *CI, ElementCount VF) const
Estimate cost of an intrinsic call instruction CI if it were vectorized with factor VF.
bool isScalarAfterVectorization(Instruction *I, ElementCount VF) const
Returns true if I is known to be scalar after vectorization.
bool isOptimizableIVTruncate(Instruction *I, ElementCount VF)
Return True if instruction I is an optimizable truncate whose operand is an induction variable.
FixedScalableVFPair computeMaxVF(ElementCount UserVF, unsigned UserIC)
Loop * TheLoop
The loop that we evaluate.
TailFoldingStyle getTailFoldingStyle(bool IVUpdateMayOverflow=true) const
Returns the TailFoldingStyle that is best for the current loop.
InterleavedAccessInfo & InterleaveInfo
The interleave access information contains groups of interleaved accesses with the same stride and cl...
SmallPtrSet< const Value *, 16 > ValuesToIgnore
Values to ignore in the cost model.
void setVectorizedCallDecision(ElementCount VF)
A call may be vectorized in different ways depending on whether we have vectorized variants available...
void invalidateCostModelingDecisions()
Invalidates decisions already taken by the cost model.
bool selectUserVectorizationFactor(ElementCount UserVF)
Setup cost-based decisions for user vectorization factor.
OptimizationRemarkEmitter * ORE
Interface to emit optimization remarks.
bool isLegalMaskedStore(Type *DataType, Value *Ptr, Align Alignment) const
Returns true if the target machine supports masked store operation for the given DataType and kind of...
LoopInfo * LI
Loop Info analysis.
bool requiresScalarEpilogue(bool IsVectorizing) const
Returns true if we're required to use a scalar epilogue for at least the final iteration of the origi...
SmallVector< RegisterUsage, 8 > calculateRegisterUsage(ArrayRef< ElementCount > VFs)
SmallPtrSet< const Value *, 16 > VecValuesToIgnore
Values to ignore in the cost model when VF > 1.
VectorizationCostTy expectedCost(ElementCount VF, SmallVectorImpl< InstructionVFPair > *Invalid=nullptr)
Returns the expected execution cost.
bool isInLoopReduction(PHINode *Phi) const
Returns true if the Phi is part of an inloop reduction.
bool isProfitableToScalarize(Instruction *I, ElementCount VF) const
void setWideningDecision(const InterleaveGroup< Instruction > *Grp, ElementCount VF, InstWidening W, InstructionCost Cost)
Save vectorization decision W and Cost taken by the cost model for interleaving group Grp and vector ...
const MapVector< Instruction *, uint64_t > & getMinimalBitwidths() const
CallWideningDecision getCallWideningDecision(CallInst *CI, ElementCount VF) const
bool isLegalGatherOrScatter(Value *V, ElementCount VF)
Returns true if the target machine can represent V as a masked gather or scatter operation.
bool canTruncateToMinimalBitwidth(Instruction *I, ElementCount VF) const
bool runtimeChecksRequired()
bool foldTailByMasking() const
Returns true if all loop blocks should be masked to fold tail loop.
bool isEpilogueVectorizationProfitable(const ElementCount VF) const
Returns true if epilogue vectorization is considered profitable, and false otherwise.
void collectUniformsAndScalars(ElementCount VF)
Collect Uniform and Scalar values for the given VF.
bool blockNeedsPredicationForAnyReason(BasicBlock *BB) const
Returns true if the instructions in this block requires predication for any reason,...
void setCallWideningDecision(CallInst *CI, ElementCount VF, InstWidening Kind, Function *Variant, Intrinsic::ID IID, std::optional< unsigned > MaskPos, InstructionCost Cost)
AssumptionCache * AC
Assumption cache.
void setWideningDecision(Instruction *I, ElementCount VF, InstWidening W, InstructionCost Cost)
Save vectorization decision W and Cost taken by the cost model for instruction I and vector width VF.
InstWidening
Decision that was taken during cost calculation for memory instruction.
bool isScalarWithPredication(Instruction *I, ElementCount VF) const
Returns true if I is an instruction which requires predication and for which our chosen predication s...
InstructionCost getVectorCallCost(CallInst *CI, ElementCount VF) const
Estimate cost of a call instruction CI if it were vectorized with factor VF.
bool useOrderedReductions(const RecurrenceDescriptor &RdxDesc) const
Returns true if we should use strict in-order reductions for the given RdxDesc.
std::pair< InstructionCost, InstructionCost > getDivRemSpeculationCost(Instruction *I, ElementCount VF) const
Return the costs for our two available strategies for lowering a div/rem operation which requires spe...
bool isDivRemScalarWithPredication(InstructionCost ScalarCost, InstructionCost SafeDivisorCost) const
Given costs for both strategies, return true if the scalar predication lowering should be used for di...
void setCostBasedWideningDecision(ElementCount VF)
Memory access instruction may be vectorized in more than one way.
InstWidening getWideningDecision(Instruction *I, ElementCount VF) const
Return the cost model decision for the given instruction I and vector width VF.
const InterleaveGroup< Instruction > * getInterleavedAccessGroup(Instruction *Instr)
Get the interleaved access group that Instr belongs to.
bool isScalarEpilogueAllowed() const
Returns true if a scalar epilogue is not allowed due to optsize or a loop hint annotation.
InstructionCost getWideningCost(Instruction *I, ElementCount VF)
Return the vectorization cost for the given instruction I and vector width VF.
unsigned selectInterleaveCount(ElementCount VF, InstructionCost LoopCost)
void setTailFoldingStyles()
Selects and saves TailFoldingStyle for 2 options - if IV update may overflow or not.
void collectInstsToScalarize(ElementCount VF)
Collects the instructions to scalarize for each predicated instruction in the loop.
LoopVectorizationLegality checks if it is legal to vectorize a loop, and to what vectorization factor...
unsigned getNumStores() const
bool hasVectorCallVariants() const
Returns true if there is at least one function call in the loop which has a vectorized variant availa...
uint64_t getMaxSafeVectorWidthInBits() const
bool isInvariantAddressOfReduction(Value *V)
Returns True if given address is invariant and is used to store recurrent expression.
bool blockNeedsPredication(BasicBlock *BB) const
Return true if the block BB needs to be predicated in order for the loop to be vectorized.
bool canVectorize(bool UseVPlanNativePath)
Returns true if it is legal to vectorize this loop.
int isConsecutivePtr(Type *AccessTy, Value *Ptr) const
Check if this pointer is consecutive when vectorizing.
bool canVectorizeFPMath(bool EnableStrictReductions)
Returns true if it is legal to vectorize the FP math operations in this loop.
bool isReductionVariable(PHINode *PN) const
Returns True if PN is a reduction variable in this loop.
bool isFixedOrderRecurrence(const PHINode *Phi) const
Returns True if Phi is a fixed-order recurrence in this loop.
const InductionDescriptor * getPointerInductionDescriptor(PHINode *Phi) const
Returns a pointer to the induction descriptor, if Phi is pointer induction.
const InductionDescriptor * getIntOrFpInductionDescriptor(PHINode *Phi) const
Returns a pointer to the induction descriptor, if Phi is an integer or floating point induction.
bool isInductionPhi(const Value *V) const
Returns True if V is a Phi node of an induction variable in this loop.
PHINode * getPrimaryInduction()
Returns the primary induction variable.
const InductionList & getInductionVars() const
Returns the induction variables found in the loop.
bool isInvariant(Value *V) const
Returns true if value V is uniform across VF lanes, when VF is provided, and otherwise if V is invari...
const ReductionList & getReductionVars() const
Returns the reduction variables found in the loop.
bool isSafeForAnyVectorWidth() const
unsigned getNumLoads() const
Type * getWidestInductionType()
Returns the widest induction type.
const LoopAccessInfo * getLAI() const
bool prepareToFoldTailByMasking()
Return true if we can vectorize this loop while folding its tail by masking, and mark all respective ...
bool isUniformMemOp(Instruction &I, ElementCount VF) const
A uniform memory op is a load or store which accesses the same memory location on all VF lanes,...
bool isMaskRequired(const Instruction *I) const
Returns true if vector representation of the instruction I requires mask.
const RuntimePointerChecking * getRuntimePointerChecking() const
Returns the information that we collected about runtime memory check.
Planner drives the vectorization process after having passed Legality checks.
std::optional< VectorizationFactor > plan(ElementCount UserVF, unsigned UserIC)
Plan how to best vectorize, return the best VF and its cost, or std::nullopt if vectorization and int...
VectorizationFactor selectEpilogueVectorizationFactor(const ElementCount MaxVF, unsigned IC)
VectorizationFactor planInVPlanNativePath(ElementCount UserVF)
Use the VPlan-native path to plan how to best vectorize, return the best VF and its cost.
std::pair< DenseMap< const SCEV *, Value * >, DenseMap< const RecurrenceDescriptor *, Value * > > executePlan(ElementCount VF, unsigned UF, VPlan &BestPlan, InnerLoopVectorizer &LB, DominatorTree *DT, bool IsEpilogueVectorization, const DenseMap< const SCEV *, Value * > *ExpandedSCEVs=nullptr)
Generate the IR code for the vectorized loop captured in VPlan BestPlan according to the best selecte...
void buildVPlans(ElementCount MinVF, ElementCount MaxVF)
Build VPlans for power-of-2 VF's between MinVF and MaxVF inclusive, according to the information gath...
VPlan & getBestPlanFor(ElementCount VF) const
Return the best VPlan for VF.
static bool getDecisionAndClampRange(const std::function< bool(ElementCount)> &Predicate, VFRange &Range)
Test a Predicate on a Range of VF's.
void printPlans(raw_ostream &O)
bool hasPlanWithVF(ElementCount VF) const
Look through the existing plans and return true if we have one with vectorization factor VF.
This holds vectorization requirements that must be verified late in the process.
Instruction * getExactFPInst()
Utility class for getting and setting loop vectorizer hints in the form of loop metadata.
bool isScalableVectorizationDisabled() const
enum ForceKind getForce() const
bool allowVectorization(Function *F, Loop *L, bool VectorizeOnlyWhenForced) const
bool allowReordering() const
When enabling loop hints are provided we allow the vectorizer to change the order of operations that ...
void emitRemarkWithHints() const
Dumps all the hint information.
bool isPotentiallyUnsafe() const
ElementCount getWidth() const
@ FK_Enabled
Forcing enabled.
@ FK_Undefined
Not selected.
@ FK_Disabled
Forcing disabled.
unsigned getPredicate() const
void setAlreadyVectorized()
Mark the loop L as already vectorized by setting the width to 1.
const char * vectorizeAnalysisPassName() const
If hints are provided that force vectorization, use the AlwaysPrint pass name to force the frontend t...
unsigned getInterleave() const
void prepareNoAliasMetadata()
Set up the aliasing scopes based on the memchecks.
Represents a single loop in the control flow graph.
bool hasLoopInvariantOperands(const Instruction *I) const
Return true if all the operands of the specified instruction are loop invariant.
DebugLoc getStartLoc() const
Return the debug location of the start of this loop.
bool isLoopInvariant(const Value *V) const
Return true if the specified value is loop invariant.
MDNode * getLoopID() const
Return the llvm.loop loop id metadata node for this loop if it is present.
void replaceOperandWith(unsigned I, Metadata *New)
Replace a specific operand.
const MDOperand & getOperand(unsigned I) const
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)
unsigned getNumOperands() const
Return number of MDNode operands.
static MDString * get(LLVMContext &Context, StringRef Str)
This class implements a map that also provides access to all stored values in a deterministic order.
iterator find(const KeyT &Key)
Function * getFunction(StringRef Name) const
Look up the specified function in the module symbol table.
const DataLayout & getDataLayout() const
Get the data layout for the module's target platform.
An analysis over an "inner" IR unit that provides access to an analysis manager over a "outer" IR uni...
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
void setIncomingValueForBlock(const BasicBlock *BB, Value *V)
Set every incoming value(s) for block BB to V.
static PHINode * Create(Type *Ty, unsigned NumReservedValues, const Twine &NameStr, BasicBlock::iterator InsertBefore)
Constructors - NumReservedValues is a hint for the number of incoming edges that this phi node will h...
Value * getIncomingValueForBlock(const BasicBlock *BB) const
static unsigned getIncomingValueNumForOperand(unsigned i)
static PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
An interface layer with SCEV used to manage how we see SCEV expressions for values in the context of ...
ScalarEvolution * getSE() const
Returns the ScalarEvolution analysis used.
const SCEVPredicate & getPredicate() const
const SCEV * getBackedgeTakenCount()
Get the (predicated) backedge count for the analyzed loop.
const SCEV * getSCEV(Value *V)
Returns the SCEV expression of V, in the context of the current SCEV predicate.
A set of analyses that are preserved following a run of a transformation pass.
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
void preserveSet()
Mark an analysis set as preserved.
void preserve()
Mark an analysis as preserved.
An analysis pass based on the new PM to deliver ProfileSummaryInfo.
Analysis providing profile information.
bool hasProfileSummary() const
Returns true if profile summary is available.
The RecurrenceDescriptor is used to identify recurrences variables in a loop.
static bool isFMulAddIntrinsic(Instruction *I)
Returns true if the instruction is a call to the llvm.fmuladd intrinsic.
FastMathFlags getFastMathFlags() const
Instruction * getLoopExitInstr() const
static unsigned getOpcode(RecurKind Kind)
Returns the opcode corresponding to the RecurrenceKind.
Type * getRecurrenceType() const
Returns the type of the recurrence.
const SmallPtrSet< Instruction *, 8 > & getCastInsts() const
Returns a reference to the instructions used for type-promoting the recurrence.
unsigned getMinWidthCastToRecurrenceTypeInBits() const
Returns the minimum width used by the recurrence in bits.
TrackingVH< Value > getRecurrenceStartValue() const
SmallVector< Instruction *, 4 > getReductionOpChain(PHINode *Phi, Loop *L) const
Attempts to find a chain of operations from Phi to LoopExitInst that can be treated as a set of reduc...
static bool isAnyOfRecurrenceKind(RecurKind Kind)
Returns true if the recurrence kind is of the form select(cmp(),x,y) where one of (x,...
bool isSigned() const
Returns true if all source operands of the recurrence are SExtInsts.
RecurKind getRecurrenceKind() const
bool isOrdered() const
Expose an ordered FP reduction to the instance users.
Value * getRecurrenceIdentity(RecurKind K, Type *Tp, FastMathFlags FMF) const
Returns identity corresponding to the RecurrenceKind.
static bool isMinMaxRecurrenceKind(RecurKind Kind)
Returns true if the recurrence kind is any min/max kind.
bool Need
This flag indicates if we need to add the runtime check.
std::optional< ArrayRef< PointerDiffInfo > > getDiffChecks() const
const SmallVectorImpl< RuntimePointerCheck > & getChecks() const
Returns the checks that generateChecks created.
This class represents a constant integer value.
const APInt & getAPInt() const
Helper to remove instructions inserted during SCEV expansion, unless they are marked as used.
This class uses information about analyze scalars to rewrite expressions in canonical form.
ScalarEvolution * getSE()
bool isInsertedInstruction(Instruction *I) const
Return true if the specified instruction was inserted by the code rewriter.
Value * expandCodeForPredicate(const SCEVPredicate *Pred, Instruction *Loc)
Generates a code sequence that evaluates this predicate.
This class represents an assumption made using SCEV expressions which can be checked at run-time.
virtual bool isAlwaysTrue() const =0
Returns true if the predicate is always true.
This class represents an analyzed expression in the program.
bool isOne() const
Return true if the expression is a constant one.
bool isZero() const
Return true if the expression is a constant zero.
Type * getType() const
Return the LLVM type of this SCEV expression.
Analysis pass that exposes the ScalarEvolution for a function.
The main scalar evolution driver.
const SCEV * getURemExpr(const SCEV *LHS, const SCEV *RHS)
Represents an unsigned remainder expression based on unsigned division.
const SCEV * getConstant(ConstantInt *V)
const SCEV * getSCEV(Value *V)
Return a SCEV expression for the full generality of the specified expression.
unsigned getSmallConstantMaxTripCount(const Loop *L)
Returns the upper bound of the loop trip count as a normal unsigned value.
const SCEV * getTripCountFromExitCount(const SCEV *ExitCount)
A version of getTripCountFromExitCount below which always picks an evaluation type which can not resu...
const SCEV * getOne(Type *Ty)
Return a SCEV for the constant 1 of a specific type.
void forgetLoop(const Loop *L)
This method should be called by the client when it has changed a loop in a way that may effect Scalar...
bool isLoopInvariant(const SCEV *S, const Loop *L)
Return true if the value of the given SCEV is unchanging in the specified loop.
bool isKnownPredicate(ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS)
Test if the given expression is known to satisfy the condition described by Pred, LHS,...
void forgetValue(Value *V)
This method should be called by the client when it has changed a value in a way that may effect its v...
void forgetBlockAndLoopDispositions(Value *V=nullptr)
Called when the client has changed the disposition of values in a loop or block.
void forgetLcssaPhiWithNewPredecessor(Loop *L, PHINode *V)
Forget LCSSA phi node V of loop L to which a new predecessor was added, such that it may no longer be...
unsigned getSmallConstantTripCount(const Loop *L)
Returns the exact trip count of the loop if we can compute it, and the result is a small constant.
const SCEV * applyLoopGuards(const SCEV *Expr, const Loop *L)
Try to apply information from loop guards for L to Expr.
const SCEV * getAddExpr(SmallVectorImpl< const SCEV * > &Ops, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)
Get a canonical add expression, or something simpler if possible.
LLVMContext & getContext() const
This class represents the LLVM 'select' instruction.
A vector that has set insertion semantics.
ArrayRef< value_type > getArrayRef() const
size_type size() const
Determine the number of elements in the SetVector.
iterator end()
Get an iterator to the end of the SetVector.
size_type count(const key_type &key) const
Count the number of elements of a given key in the SetVector.
bool empty() const
Determine if the SetVector is empty or not.
iterator begin()
Get an iterator to the beginning of the SetVector.
bool insert(const value_type &X)
Insert a new element into the SetVector.
value_type pop_back_val()
This class provides computation of slot numbers for LLVM Assembly writing.
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
bool erase(PtrType Ptr)
erase - If the set contains the specified pointer, remove it and return true, otherwise return false.
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
A SetVector that performs no allocations if smaller than a certain size.
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
size_type count(const T &V) const
count - Return 1 if the element is in the set, 0 otherwise.
std::pair< const_iterator, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
An instruction for storing to memory.
StringRef - Represent a constant reference to a string, i.e.
Analysis pass providing the TargetTransformInfo.
Analysis pass providing the TargetLibraryInfo.
Provides information about what library functions are available for the current target.
Value handle that tracks a Value across RAUW.
This class represents a truncation of integer types.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
The instances of the Type class are immutable: once they are created, they are never changed.
unsigned getIntegerBitWidth() const
bool isVectorTy() const
True if this is an instance of VectorType.
bool isPointerTy() const
True if this is an instance of PointerType.
static IntegerType * getInt1Ty(LLVMContext &C)
static IntegerType * getIntNTy(LLVMContext &C, unsigned N)
unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
bool isFloatingPointTy() const
Return true if this is one of the floating-point types.
bool isIntOrPtrTy() const
Return true if this is an integer type or a pointer type.
bool isIntegerTy() const
True if this is an instance of IntegerType.
bool isTokenTy() const
Return true if this is 'token'.
bool isVoidTy() const
Return true if this is 'void'.
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
This function has undefined behavior.
A Use represents the edge between a Value definition and its users.
bool replaceUsesOfWith(Value *From, Value *To)
Replace uses of one Value with another.
void setOperand(unsigned i, Value *Val)
Value * getOperand(unsigned i) const
static SmallVector< VFInfo, 8 > getMappings(const CallInst &CI)
Retrieve all the VFInfo instances associated to the CallInst CI.
VPBasicBlock serves as the leaf of the Hierarchical Control-Flow Graph.
void appendRecipe(VPRecipeBase *Recipe)
Augment the existing recipes of a VPBasicBlock with an additional Recipe as the last recipe.
void execute(VPTransformState *State) override
The method which generates the output IR instructions that correspond to this VPBasicBlock,...
iterator begin()
Recipe iterator methods.
iterator_range< iterator > phis()
Returns an iterator range over the PHI-like recipes in the block.
iterator getFirstNonPhi()
Return the position of the first non-phi node recipe in the block.
void insert(VPRecipeBase *Recipe, iterator InsertPt)
A recipe for vectorizing a phi-node as a sequence of mask-based select instructions.
VPRegionBlock * getParent()
const VPBasicBlock * getExitingBasicBlock() const
void setName(const Twine &newName)
const VPBasicBlock * getEntryBasicBlock() const
VPBlockBase * getSingleSuccessor() const
static void insertBlockAfter(VPBlockBase *NewBlock, VPBlockBase *BlockPtr)
Insert disconnected VPBlockBase NewBlock after BlockPtr.
RAII object that stores the current insertion point and restores it when the object is destroyed.
VPlan-based builder utility analogous to IRBuilder.
VPValue * createOr(VPValue *LHS, VPValue *RHS, DebugLoc DL={}, const Twine &Name="")
VPBasicBlock * getInsertBlock() const
VPValue * createICmp(CmpInst::Predicate Pred, VPValue *A, VPValue *B, DebugLoc DL={}, const Twine &Name="")
Create a new ICmp VPInstruction with predicate Pred and operands A and B.
VPInstruction * createOverflowingOp(unsigned Opcode, std::initializer_list< VPValue * > Operands, VPRecipeWithIRFlags::WrapFlagsTy WrapFlags, DebugLoc DL={}, const Twine &Name="")
VPInstruction * createNaryOp(unsigned Opcode, ArrayRef< VPValue * > Operands, Instruction *Inst=nullptr, const Twine &Name="")
Create an N-ary operation with Opcode, Operands and set Inst as its underlying Instruction.
VPValue * createNot(VPValue *Operand, DebugLoc DL={}, const Twine &Name="")
VPValue * createSelect(VPValue *Cond, VPValue *TrueVal, VPValue *FalseVal, DebugLoc DL={}, const Twine &Name="", std::optional< FastMathFlags > FMFs=std::nullopt)
void setInsertPoint(VPBasicBlock *TheBB)
This specifies that created VPInstructions should be appended to the end of the specified block.
Canonical scalar induction phi of the vector loop.
ArrayRef< VPValue * > definedValues()
Returns an ArrayRef of the values defined by the VPDef.
VPValue * getVPSingleValue()
Returns the only VPValue defined by the VPDef.
VPValue * getVPValue(unsigned I)
Returns the VPValue with index I defined by the VPDef.
void execute(VPTransformState &State) override
Generate the transformed value of the induction at offset StartValue (1.
VPCanonicalIVPHIRecipe * getCanonicalIV() const
VPValue * getStepValue() const
VPValue * getStartValue() const
This is a concrete Recipe that models a single VPlan-level instruction.
@ FirstOrderRecurrenceSplice
unsigned getOpcode() const
VPInterleaveRecipe is a recipe for transforming an interleave group of load or stores into one wide l...
VPValue * getAddr() const
Return the address accessed by this recipe.
VPValue * getMask() const
Return the mask used by this recipe.
void print(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
void execute(VPTransformState &State) override
Generate the wide load or store, and shuffles.
ArrayRef< VPValue * > getStoredValues() const
Return the VPValues stored by this interleave group.
unsigned getNumStoreOperands() const
Returns the number of stored operands of this interleave group.
static VPLane getLastLaneForVF(const ElementCount &VF)
static VPLane getFirstLane()
A value that is used outside the VPlan.
VPRecipeBase is a base class modeling a sequence of one or more output IR instructions.
VPBasicBlock * getParent()
void insertBefore(VPRecipeBase *InsertPos)
Insert an unlinked recipe into a basic block immediately before the specified recipe.
void insertAfter(VPRecipeBase *InsertPos)
Insert an unlinked Recipe into a basic block immediately after the specified Recipe.
iplist< VPRecipeBase >::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
Helper class to create VPRecipies from IR instructions.
void createHeaderMask(VPlan &Plan)
Create the mask for the vector loop header block.
VPValue * getBlockInMask(BasicBlock *BB) const
Returns the entry mask for the block BB.
VPValue * getEdgeMask(BasicBlock *Src, BasicBlock *Dst) const
A helper that returns the previously computed predicate of the edge between SRC and DST.
void fixHeaderPhis()
Add the incoming values from the backedge to reduction & first-order recurrence cross-iteration phis.
VPValue * createEdgeMask(BasicBlock *Src, BasicBlock *Dst, VPlan &Plan)
A helper function that computes the predicate of the edge between SRC and DST.
void createBlockInMask(BasicBlock *BB, VPlan &Plan)
A helper function that computes the predicate of the block BB, assuming that the header block of the ...
void recordRecipeOf(Instruction *I)
Mark given ingredient for recording its recipe once one is created for it.
VPRecipeBase * tryToCreateWidenRecipe(Instruction *Instr, ArrayRef< VPValue * > Operands, VFRange &Range, VPBasicBlock *VPBB, VPlanPtr &Plan)
Create and return a widened recipe for I if one can be created within the given VF Range.
VPReplicateRecipe * handleReplication(Instruction *I, VFRange &Range, VPlan &Plan)
Build a VPReplicationRecipe for I.
VPRecipeBase * getRecipe(Instruction *I)
Return the recipe created for given ingredient.
void setFlags(Instruction *I) const
Set the IR flags for I.
A recipe for handling reduction phis.
bool isInLoop() const
Returns true, if the phi is part of an in-loop reduction.
const RecurrenceDescriptor & getRecurrenceDescriptor() const
A recipe to represent inloop reduction operations, performing a reduction on a vector operand into a ...
VPValue * getVecOp() const
The VPValue of the vector value to be reduced.
VPValue * getCondOp() const
The VPValue of the condition for the block.
VPValue * getChainOp() const
The VPValue of the scalar Chain being accumulated.
void execute(VPTransformState &State) override
Generate the reduction in the loop.
VPRegionBlock represents a collection of VPBasicBlocks and VPRegionBlocks which form a Single-Entry-S...
bool isReplicator() const
An indicator whether this region is to generate multiple replicated instances of output IR correspond...
VPReplicateRecipe replicates a given instruction producing multiple scalar copies of the original sca...
void execute(VPTransformState &State) override
Generate replicas of the desired Ingredient.
bool shouldPack() const
Returns true if the recipe is used by a widened recipe via an intervening VPPredInstPHIRecipe.
VPSingleDef is a base class for recipes for modeling a sequence of one or more output IR that define ...
Instruction * getUnderlyingInstr()
Returns the underlying instruction.
This class can be used to assign consecutive numbers to all VPValues in a VPlan and allows querying t...
Type * inferScalarType(const VPValue *V)
Infer the type of V. Returns the scalar type of V.
This class augments VPValue with operands which provide the inverse def-use edges from VPValue's user...
void setOperand(unsigned I, VPValue *New)
unsigned getNumOperands() const
VPValue * getOperand(unsigned N) const
void addOperand(VPValue *Operand)
Value * getUnderlyingValue()
Return the underlying Value attached to this VPValue.
void printAsOperand(raw_ostream &OS, VPSlotTracker &Tracker) const
void replaceAllUsesWith(VPValue *New)
user_iterator user_begin()
unsigned getNumUsers() const
Value * getLiveInIRValue()
Returns the underlying IR value, if this VPValue is defined outside the scope of VPlan.
bool isLiveIn() const
Returns true if this VPValue is a live-in, i.e. defined outside the VPlan.
void replaceUsesWithIf(VPValue *New, llvm::function_ref< bool(VPUser &U, unsigned Idx)> ShouldReplace)
Go through the uses list for this VPValue and make each use point to New if the callback ShouldReplac...
A recipe to compute the pointers for widened memory accesses of IndexTy for all parts.
A recipe for widening Call instructions.
A Recipe for widening the canonical induction variable of the vector loop.
VPWidenCastRecipe is a recipe to create vector cast instructions.
A recipe for handling GEP instructions.
A recipe for handling phi nodes of integer and floating-point inductions, producing their vector valu...
A Recipe for widening load/store operations.
VPValue * getMask() const
Return the mask used by this recipe.
VPValue * getAddr() const
Return the address accessed by this recipe.
void execute(VPTransformState &State) override
Generate the wide load/store.
VPValue * getStoredValue() const
Return the address accessed by this recipe.
bool isStore() const
Returns true if this recipe is a store.
bool isConsecutive() const
A recipe for handling phis that are widened in the vector loop.
VPValue * getIncomingValue(unsigned I)
Returns the I th incoming VPValue.
VPBasicBlock * getIncomingBlock(unsigned I)
Returns the I th incoming VPBasicBlock.
bool onlyScalarsGenerated(bool IsScalable)
Returns true if only scalar values will be generated.
void execute(VPTransformState &State) override
Generate vector values for the pointer induction.
VPWidenRecipe is a recipe for producing a copy of vector type its ingredient.
Main class to build the VPlan H-CFG for an incoming IR.
VPlan models a candidate for vectorization, encoding various decisions take to produce efficient outp...
void prepareToExecute(Value *TripCount, Value *VectorTripCount, Value *CanonicalIVStartValue, VPTransformState &State)
Prepare the plan for execution, setting up the required live-in values.
VPBasicBlock * getEntry()
VPValue & getVectorTripCount()
The vector trip count.
void setName(const Twine &newName)
VPValue & getVFxUF()
Returns VF * UF of the vector loop region.
VPValue * getTripCount() const
The trip count of the original loop.
VPValue * getOrCreateBackedgeTakenCount()
The backedge taken count of the original loop.
void removeLiveOut(PHINode *PN)
void addLiveOut(PHINode *PN, VPValue *V)
VPBasicBlock * getPreheader()
VPValue * getVPValueOrAddLiveIn(Value *V)
Gets the VPValue for V or adds a new live-in (if none exists yet) for V.
VPRegionBlock * getVectorLoopRegion()
Returns the VPRegionBlock of the vector loop.
static VPlanPtr createInitialVPlan(const SCEV *TripCount, ScalarEvolution &PSE)
Create initial VPlan skeleton, having an "entry" VPBasicBlock (wrapping original scalar pre-header) w...
bool hasVF(ElementCount VF)
bool hasUF(unsigned UF) const
void resetTripCount(VPValue *NewTripCount)
Resets the trip count for the VPlan.
LLVM_DUMP_METHOD void dump() const
Dump the plan to stderr (for debugging).
iterator_range< mapped_iterator< Use *, std::function< VPValue *(Value *)> > > mapToVPValues(User::op_range Operands)
Returns a range mapping the values the range Operands to their corresponding VPValues.
void execute(VPTransformState *State)
Generate the IR code for this VPlan.
VPCanonicalIVPHIRecipe * getCanonicalIV()
Returns the canonical induction recipe of the vector loop.
const MapVector< PHINode *, VPLiveOut * > & getLiveOuts() const
VPValue * getSCEVExpansion(const SCEV *S) const
VPlan * duplicate()
Clone the current VPlan, update all VPValues of the new VPlan and cloned recipes to refer to the clon...
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
bool hasOneUser() const
Return true if there is exactly one user of this value.
void setName(const Twine &Name)
Change the name of the value.
void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
iterator_range< user_iterator > users()
const Value * stripPointerCasts() const
Strip off pointer casts, all-zero GEPs and address space casts.
StringRef getName() const
Return a constant reference to the value's name.
Base class of all SIMD vector types.
static bool isValidElementType(Type *ElemTy)
Return true if the specified type is valid as a element type.
ElementCount getElementCount() const
Return an ElementCount instance to represent the (possibly scalable) number of elements in the vector...
static VectorType * get(Type *ElementType, ElementCount EC)
This static method is the primary way to construct an VectorType.
Type * getElementType() const
constexpr ScalarTy getFixedValue() const
static constexpr bool isKnownLE(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
constexpr bool isNonZero() const
static constexpr bool isKnownLT(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
constexpr LeafTy multiplyCoefficientBy(ScalarTy RHS) const
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
constexpr bool isZero() const
static constexpr bool isKnownGT(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
constexpr LeafTy divideCoefficientBy(ScalarTy RHS) const
We do not provide the '/' operator here because division for polynomial types does not work in the sa...
static constexpr bool isKnownGE(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
An efficient, type-erasing, non-owning reference to a callable.
self_iterator getIterator()
A range adaptor for a pair of iterators.
This class implements an extremely fast bulk output stream that can only output to a stream.
A raw_ostream that writes to an std::string.
This provides a very simple, boring adaptor for a begin and end iterator into a range type.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ PredicateElseScalarEpilogue
@ PredicateOrDontVectorize
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
@ Tail
Attemps to make calls as fast as possible while guaranteeing that tail call optimization can always b...
@ C
The default llvm calling convention, compatible with C.
std::variant< std::monostate, Loc::Single, Loc::Multi, Loc::MMI, Loc::EntryValue > Variant
Alias for the std::variant specialization base class of DbgVariable.
bool match(Val *V, const Pattern &P)
bind_ty< Instruction > m_Instruction(Instruction *&I)
Match an instruction, capturing it if we match.
BinaryOp_match< LHS, RHS, Instruction::Mul > m_Mul(const LHS &L, const RHS &R)
OneUse_match< T > m_OneUse(const T &SubPattern)
auto m_LogicalOr()
Matches L || R where L and R are arbitrary values.
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
match_combine_or< CastInst_match< OpTy, ZExtInst >, CastInst_match< OpTy, SExtInst > > m_ZExtOrSExt(const OpTy &Op)
auto m_LogicalAnd()
Matches L && R where L and R are arbitrary values.
ValuesClass values(OptsTy... Options)
Helper to build a ValuesClass by forwarding a variable number of arguments as an initializer list to ...
initializer< Ty > init(const Ty &Val)
DiagnosticInfoOptimizationBase::Argument NV
NodeAddr< InstrNode * > Instr
NodeAddr< PhiNode * > Phi
NodeAddr< DefNode * > Def
const_iterator begin(StringRef path, Style style=Style::native)
Get begin iterator over path.
const_iterator end(StringRef path)
Get end iterator over path.
VPValue * getOrCreateVPValueForSCEVExpr(VPlan &Plan, const SCEV *Expr, ScalarEvolution &SE)
Get or create a VPValue that corresponds to the expansion of Expr.
bool isUniformAfterVectorization(VPValue *VPV)
Returns true if VPV is uniform after vectorization.
bool onlyFirstLaneUsed(const VPValue *Def)
Returns true if only the first lane of Def is used.
This is an optimization pass for GlobalISel generic memory operations.
bool simplifyLoop(Loop *L, DominatorTree *DT, LoopInfo *LI, ScalarEvolution *SE, AssumptionCache *AC, MemorySSAUpdater *MSSAU, bool PreserveLCSSA)
Simplify each loop in a loop nest recursively.
void ReplaceInstWithInst(BasicBlock *BB, BasicBlock::iterator &BI, Instruction *I)
Replace the instruction specified by BI with the instruction specified by I.
Value * addRuntimeChecks(Instruction *Loc, Loop *TheLoop, const SmallVectorImpl< RuntimePointerCheck > &PointerChecks, SCEVExpander &Expander, bool HoistRuntimeChecks=false)
Add code that checks at runtime if the accessed arrays in PointerChecks overlap.
void stable_sort(R &&Range)
bool RemoveRedundantDbgInstrs(BasicBlock *BB)
Try to remove redundant dbg.value instructions from given basic block.
std::optional< unsigned > getLoopEstimatedTripCount(Loop *L, unsigned *EstimatedLoopInvocationWeight=nullptr)
Returns a loop's estimated trip count based on branch weight metadata.
static void reportVectorization(OptimizationRemarkEmitter *ORE, Loop *TheLoop, VectorizationFactor VF, unsigned IC)
Report successful vectorization of the loop.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Intrinsic::ID getVectorIntrinsicIDForCall(const CallInst *CI, const TargetLibraryInfo *TLI)
Returns intrinsic ID for call.
uint64_t divideCeil(uint64_t Numerator, uint64_t Denominator)
Returns the integer ceil(Numerator / Denominator).
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are are tuples (A,...
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
unsigned getLoadStoreAddressSpace(Value *I)
A helper function that returns the address space of the pointer operand of load or store instruction.
bool verifyFunction(const Function &F, raw_ostream *OS=nullptr)
Check a function for errors, useful for use when debugging a pass.
const Value * getLoadStorePointerOperand(const Value *V)
A helper function that returns the pointer operand of a load or store instruction.
const SCEV * createTripCountSCEV(Type *IdxTy, PredicatedScalarEvolution &PSE, Loop *OrigLoop)
std::pair< Instruction *, ElementCount > InstructionVFPair
Value * getRuntimeVF(IRBuilderBase &B, Type *Ty, ElementCount VF)
Return the runtime value for VF.
bool formLCSSARecursively(Loop &L, const DominatorTree &DT, const LoopInfo *LI, ScalarEvolution *SE)
Put a loop nest into LCSSA form.
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
std::optional< MDNode * > makeFollowupLoopID(MDNode *OrigLoopID, ArrayRef< StringRef > FollowupAttrs, const char *InheritOptionsAttrsPrefix="", bool AlwaysNew=false)
Create a new loop identifier for a loop created from a loop transformation.
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
bool shouldOptimizeForSize(const MachineFunction *MF, ProfileSummaryInfo *PSI, const MachineBlockFrequencyInfo *BFI, PGSOQueryType QueryType=PGSOQueryType::Other)
Returns true if machine function MF is suggested to be size-optimized based on the profile.
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
Value * concatenateVectors(IRBuilderBase &Builder, ArrayRef< Value * > Vecs)
Concatenate a list of vectors.
iterator_range< df_iterator< VPBlockDeepTraversalWrapper< VPBlockBase * > > > vp_depth_first_deep(VPBlockBase *G)
Returns an iterator range to traverse the graph starting at G in depth-first order while traversing t...
Value * createMinMaxOp(IRBuilderBase &Builder, RecurKind RK, Value *Left, Value *Right)
Returns a Min/Max operation corresponding to MinMaxRecurrenceKind.
void setBranchWeights(Instruction &I, ArrayRef< uint32_t > Weights)
Create a new branch_weights metadata node and add or overwrite a prof metadata reference to instructi...
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Constant * createBitMaskForGaps(IRBuilderBase &Builder, unsigned VF, const InterleaveGroup< Instruction > &Group)
Create a mask that filters the members of an interleave group where there are gaps.
llvm::SmallVector< int, 16 > createStrideMask(unsigned Start, unsigned Stride, unsigned VF)
Create a stride shuffle mask.
auto reverse(ContainerTy &&C)
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
cl::opt< bool > EnableVPlanNativePath("enable-vplan-native-path", cl::Hidden, cl::desc("Enable VPlan-native vectorization path with " "support for outer loop vectorization."))
void sort(IteratorTy Start, IteratorTy End)
llvm::SmallVector< int, 16 > createReplicatedMask(unsigned ReplicationFactor, unsigned VF)
Create a mask with replicated elements.
std::unique_ptr< VPlan > VPlanPtr
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
bool isPointerTy(const Type *T)
bool none_of(R &&Range, UnaryPredicate P)
Provide wrappers to std::none_of which take ranges instead of having to pass begin/end explicitly.
cl::opt< bool > EnableLoopVectorization
Value * createOrderedReduction(IRBuilderBase &B, const RecurrenceDescriptor &Desc, Value *Src, Value *Start)
Create an ordered reduction intrinsic using the given recurrence descriptor Desc.
Align getLoadStoreAlignment(Value *I)
A helper function that returns the alignment of load or store instruction.
iterator_range< filter_iterator< detail::IterOfRange< RangeT >, PredicateT > > make_filter_range(RangeT &&Range, PredicateT Pred)
Convenience function that takes a range of elements and a predicate, and return a new filter_iterator...
void llvm_unreachable_internal(const char *msg=nullptr, const char *file=nullptr, unsigned line=0)
This function calls abort(), and prints the optional message to stderr.
auto drop_end(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the last N elements excluded.
Type * ToVectorTy(Type *Scalar, ElementCount EC)
A helper function for converting Scalar types to vector types.
bool isAssignmentTrackingEnabled(const Module &M)
Return true if assignment tracking is enabled for module M.
llvm::SmallVector< int, 16 > createInterleaveMask(unsigned VF, unsigned NumVecs)
Create an interleave shuffle mask.
RecurKind
These are the kinds of recurrences that we support.
@ FMulAdd
Sum of float products with llvm.fmuladd(a * b + sum).
void setProfileInfoAfterUnrolling(Loop *OrigLoop, Loop *UnrolledLoop, Loop *RemainderLoop, uint64_t UF)
Set weights for UnrolledLoop and RemainderLoop based on weights for OrigLoop and the following distri...
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
void reportVectorizationFailure(const StringRef DebugMsg, const StringRef OREMsg, const StringRef ORETag, OptimizationRemarkEmitter *ORE, Loop *TheLoop, Instruction *I=nullptr)
Reports a vectorization failure: print DebugMsg for debugging purposes along with the corresponding o...
@ CM_ScalarEpilogueNotAllowedLowTripLoop
@ CM_ScalarEpilogueNotNeededUsePredicate
@ CM_ScalarEpilogueNotAllowedOptSize
@ CM_ScalarEpilogueAllowed
@ CM_ScalarEpilogueNotAllowedUsePredicate
@ Invalid
Denotes invalid value.
bool isSafeToSpeculativelyExecute(const Instruction *I, const Instruction *CtxI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr, const TargetLibraryInfo *TLI=nullptr)
Return true if the instruction does not have any effects besides calculating the result and does not ...
auto count_if(R &&Range, UnaryPredicate P)
Wrapper function around std::count_if to count the number of times an element satisfying a given pred...
Value * createStepForVF(IRBuilderBase &B, Type *Ty, ElementCount VF, int64_t Step)
Return a value for Step multiplied by VF.
BasicBlock * SplitBlock(BasicBlock *Old, BasicBlock::iterator SplitPt, DominatorTree *DT, LoopInfo *LI=nullptr, MemorySSAUpdater *MSSAU=nullptr, const Twine &BBName="", bool Before=false)
Split the specified block at the specified instruction.
void reportVectorizationInfo(const StringRef OREMsg, const StringRef ORETag, OptimizationRemarkEmitter *ORE, Loop *TheLoop, Instruction *I=nullptr)
Reports an informative message: print Msg for debugging purposes as well as an optimization remark.
auto predecessors(const MachineBasicBlock *BB)
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Value * addDiffRuntimeChecks(Instruction *Loc, ArrayRef< PointerDiffInfo > Checks, SCEVExpander &Expander, function_ref< Value *(IRBuilderBase &, unsigned)> GetVF, unsigned IC)
bool all_equal(std::initializer_list< T > Values)
Returns true if all Values in the initializer lists are equal or the list.
@ DataAndControlFlowWithoutRuntimeCheck
Use predicate to control both data and control flow, but modify the trip count so that a runtime over...
@ None
Don't use tail folding.
bool hasBranchWeightMD(const Instruction &I)
Checks if an instructions has Branch Weight Metadata.
hash_code hash_combine(const Ts &...args)
Combine values into a single hash_code.
T bit_floor(T Value)
Returns the largest integral power of two no greater than Value if Value is nonzero.
Type * getLoadStoreType(Value *I)
A helper function that returns the type of a load or store instruction.
bool verifyVPlanIsValid(const VPlan &Plan)
Verify invariants for general VPlans.
MapVector< Instruction *, uint64_t > computeMinimumValueSizes(ArrayRef< BasicBlock * > Blocks, DemandedBits &DB, const TargetTransformInfo *TTI=nullptr)
Compute a map of integer instructions to their minimum legal type size.
hash_code hash_combine_range(InputIteratorT first, InputIteratorT last)
Compute a hash_code for a sequence of values.
cl::opt< bool > EnableLoopInterleaving
Value * createTargetReduction(IRBuilderBase &B, const RecurrenceDescriptor &Desc, Value *Src, PHINode *OrigPhi=nullptr)
Create a generic target reduction using a recurrence descriptor Desc The target is queried to determi...
Implement std::hash so that hash_code can be used in STL containers.
This struct is a compact representation of a valid (non-zero power of two) alignment.
A special type used by analysis passes to provide an address that identifies that particular analysis...
static void collectEphemeralValues(const Loop *L, AssumptionCache *AC, SmallPtrSetImpl< const Value * > &EphValues)
Collect a loop's ephemeral values (those used only by an assume or similar intrinsics in the loop).
An information struct used to provide DenseMap with the various necessary components for a given valu...
ElementCountComparator creates a total ordering for ElementCount for the purposes of using it in a se...
Encapsulate information regarding vectorization of a loop and its epilogue.
BasicBlock * SCEVSafetyCheck
BasicBlock * MemSafetyCheck
BasicBlock * MainLoopIterationCountCheck
EpilogueLoopVectorizationInfo(ElementCount MVF, unsigned MUF, ElementCount EVF, unsigned EUF)
BasicBlock * EpilogueIterationCountCheck
A class that represents two vectorization factors (initialized with 0 by default).
static FixedScalableVFPair getNone()
Incoming for lane maks phi as machine instruction, incoming register Reg and incoming block Block are...
std::optional< unsigned > MaskPos
A struct that represents some properties of the register usage of a loop.
SmallMapVector< unsigned, unsigned, 4 > MaxLocalUsers
Holds the maximum number of concurrent live intervals in the loop.
SmallMapVector< unsigned, unsigned, 4 > LoopInvariantRegs
Holds the number of loop invariant values that are used in the loop.
bool processLoop(Loop *L)
LoopAccessInfoManager * LAIs
void printPipeline(raw_ostream &OS, function_ref< StringRef(StringRef)> MapClassName2PassName)
LoopVectorizePass(LoopVectorizeOptions Opts={})
LoopVectorizeResult runImpl(Function &F, ScalarEvolution &SE_, LoopInfo &LI_, TargetTransformInfo &TTI_, DominatorTree &DT_, BlockFrequencyInfo *BFI_, TargetLibraryInfo *TLI_, DemandedBits &DB_, AssumptionCache &AC_, LoopAccessInfoManager &LAIs_, OptimizationRemarkEmitter &ORE_, ProfileSummaryInfo *PSI_)
PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM)
OptimizationRemarkEmitter * ORE
Storage for information about made changes.
A CRTP mix-in to automatically provide informational APIs needed for passes.
A MapVector that performs no allocations if smaller than a certain size.
Holds the VFShape for a specific scalar to vector function mapping.
std::optional< unsigned > getParamIndexForOptionalMask() const
Instruction Set Architecture.
Encapsulates information needed to describe a parameter.
A range of powers-of-2 vectorization factors with fixed start and adjustable end.
A recipe for handling first-order recurrence phis.
VPIteration represents a single point in the iteration space of the output (vectorized and/or unrolle...
bool isFirstIteration() const
A recipe for widening select instructions.
TODO: The following VectorizationFactor was pulled out of LoopVectorizationCostModel class.
InstructionCost Cost
Cost of the loop with that width.
ElementCount MinProfitableTripCount
The minimum trip count required to make vectorization profitable, e.g.
ElementCount Width
Vector width with best cost.
InstructionCost ScalarCost
Cost of the scalar loop.
static VectorizationFactor Disabled()
Width 1 means no vectorization, cost 0 means uncomputed cost.
static bool HoistRuntimeChecks