LLVM 19.0.0git
VPlanTransforms.cpp
Go to the documentation of this file.
1//===-- VPlanTransforms.cpp - Utility VPlan to VPlan transforms -----------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8///
9/// \file
10/// This file implements a set of utility VPlan to VPlan transformations.
11///
12//===----------------------------------------------------------------------===//
13
14#include "VPlanTransforms.h"
15#include "VPRecipeBuilder.h"
16#include "VPlanAnalysis.h"
17#include "VPlanCFG.h"
18#include "VPlanDominatorTree.h"
19#include "VPlanPatternMatch.h"
21#include "llvm/ADT/STLExtras.h"
22#include "llvm/ADT/SetVector.h"
25#include "llvm/IR/Intrinsics.h"
27
28using namespace llvm;
29
31 VPlanPtr &Plan,
33 GetIntOrFpInductionDescriptor,
34 ScalarEvolution &SE, const TargetLibraryInfo &TLI) {
35
37 Plan->getVectorLoopRegion());
38 for (VPBasicBlock *VPBB : VPBlockUtils::blocksOnly<VPBasicBlock>(RPOT)) {
39 // Skip blocks outside region
40 if (!VPBB->getParent())
41 break;
42 VPRecipeBase *Term = VPBB->getTerminator();
43 auto EndIter = Term ? Term->getIterator() : VPBB->end();
44 // Introduce each ingredient into VPlan.
45 for (VPRecipeBase &Ingredient :
46 make_early_inc_range(make_range(VPBB->begin(), EndIter))) {
47
48 VPValue *VPV = Ingredient.getVPSingleValue();
49 Instruction *Inst = cast<Instruction>(VPV->getUnderlyingValue());
50
51 VPRecipeBase *NewRecipe = nullptr;
52 if (auto *VPPhi = dyn_cast<VPWidenPHIRecipe>(&Ingredient)) {
53 auto *Phi = cast<PHINode>(VPPhi->getUnderlyingValue());
54 const auto *II = GetIntOrFpInductionDescriptor(Phi);
55 if (!II)
56 continue;
57
58 VPValue *Start = Plan->getOrAddLiveIn(II->getStartValue());
59 VPValue *Step =
60 vputils::getOrCreateVPValueForSCEVExpr(*Plan, II->getStep(), SE);
61 NewRecipe = new VPWidenIntOrFpInductionRecipe(Phi, Start, Step, *II);
62 } else {
63 assert(isa<VPInstruction>(&Ingredient) &&
64 "only VPInstructions expected here");
65 assert(!isa<PHINode>(Inst) && "phis should be handled above");
66 // Create VPWidenMemoryRecipe for loads and stores.
67 if (LoadInst *Load = dyn_cast<LoadInst>(Inst)) {
68 NewRecipe = new VPWidenLoadRecipe(
69 *Load, Ingredient.getOperand(0), nullptr /*Mask*/,
70 false /*Consecutive*/, false /*Reverse*/,
71 Ingredient.getDebugLoc());
72 } else if (StoreInst *Store = dyn_cast<StoreInst>(Inst)) {
73 NewRecipe = new VPWidenStoreRecipe(
74 *Store, Ingredient.getOperand(1), Ingredient.getOperand(0),
75 nullptr /*Mask*/, false /*Consecutive*/, false /*Reverse*/,
76 Ingredient.getDebugLoc());
77 } else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Inst)) {
78 NewRecipe = new VPWidenGEPRecipe(GEP, Ingredient.operands());
79 } else if (CallInst *CI = dyn_cast<CallInst>(Inst)) {
80 NewRecipe = new VPWidenCallRecipe(
81 CI, Ingredient.operands(), getVectorIntrinsicIDForCall(CI, &TLI),
82 CI->getDebugLoc());
83 } else if (SelectInst *SI = dyn_cast<SelectInst>(Inst)) {
84 NewRecipe = new VPWidenSelectRecipe(*SI, Ingredient.operands());
85 } else if (auto *CI = dyn_cast<CastInst>(Inst)) {
86 NewRecipe = new VPWidenCastRecipe(
87 CI->getOpcode(), Ingredient.getOperand(0), CI->getType(), *CI);
88 } else {
89 NewRecipe = new VPWidenRecipe(*Inst, Ingredient.operands());
90 }
91 }
92
93 NewRecipe->insertBefore(&Ingredient);
94 if (NewRecipe->getNumDefinedValues() == 1)
95 VPV->replaceAllUsesWith(NewRecipe->getVPSingleValue());
96 else
97 assert(NewRecipe->getNumDefinedValues() == 0 &&
98 "Only recpies with zero or one defined values expected");
99 Ingredient.eraseFromParent();
100 }
101 }
102}
103
104static bool sinkScalarOperands(VPlan &Plan) {
105 auto Iter = vp_depth_first_deep(Plan.getEntry());
106 bool Changed = false;
107 // First, collect the operands of all recipes in replicate blocks as seeds for
108 // sinking.
110 for (VPRegionBlock *VPR : VPBlockUtils::blocksOnly<VPRegionBlock>(Iter)) {
111 VPBasicBlock *EntryVPBB = VPR->getEntryBasicBlock();
112 if (!VPR->isReplicator() || EntryVPBB->getSuccessors().size() != 2)
113 continue;
114 VPBasicBlock *VPBB = dyn_cast<VPBasicBlock>(EntryVPBB->getSuccessors()[0]);
115 if (!VPBB || VPBB->getSingleSuccessor() != VPR->getExitingBasicBlock())
116 continue;
117 for (auto &Recipe : *VPBB) {
118 for (VPValue *Op : Recipe.operands())
119 if (auto *Def =
120 dyn_cast_or_null<VPSingleDefRecipe>(Op->getDefiningRecipe()))
121 WorkList.insert(std::make_pair(VPBB, Def));
122 }
123 }
124
125 bool ScalarVFOnly = Plan.hasScalarVFOnly();
126 // Try to sink each replicate or scalar IV steps recipe in the worklist.
127 for (unsigned I = 0; I != WorkList.size(); ++I) {
128 VPBasicBlock *SinkTo;
129 VPSingleDefRecipe *SinkCandidate;
130 std::tie(SinkTo, SinkCandidate) = WorkList[I];
131 if (SinkCandidate->getParent() == SinkTo ||
132 SinkCandidate->mayHaveSideEffects() ||
133 SinkCandidate->mayReadOrWriteMemory())
134 continue;
135 if (auto *RepR = dyn_cast<VPReplicateRecipe>(SinkCandidate)) {
136 if (!ScalarVFOnly && RepR->isUniform())
137 continue;
138 } else if (!isa<VPScalarIVStepsRecipe>(SinkCandidate))
139 continue;
140
141 bool NeedsDuplicating = false;
142 // All recipe users of the sink candidate must be in the same block SinkTo
143 // or all users outside of SinkTo must be uniform-after-vectorization (
144 // i.e., only first lane is used) . In the latter case, we need to duplicate
145 // SinkCandidate.
146 auto CanSinkWithUser = [SinkTo, &NeedsDuplicating,
147 SinkCandidate](VPUser *U) {
148 auto *UI = dyn_cast<VPRecipeBase>(U);
149 if (!UI)
150 return false;
151 if (UI->getParent() == SinkTo)
152 return true;
153 NeedsDuplicating = UI->onlyFirstLaneUsed(SinkCandidate);
154 // We only know how to duplicate VPRecipeRecipes for now.
155 return NeedsDuplicating && isa<VPReplicateRecipe>(SinkCandidate);
156 };
157 if (!all_of(SinkCandidate->users(), CanSinkWithUser))
158 continue;
159
160 if (NeedsDuplicating) {
161 if (ScalarVFOnly)
162 continue;
163 Instruction *I = SinkCandidate->getUnderlyingInstr();
164 auto *Clone = new VPReplicateRecipe(I, SinkCandidate->operands(), true);
165 // TODO: add ".cloned" suffix to name of Clone's VPValue.
166
167 Clone->insertBefore(SinkCandidate);
168 SinkCandidate->replaceUsesWithIf(Clone, [SinkTo](VPUser &U, unsigned) {
169 return cast<VPRecipeBase>(&U)->getParent() != SinkTo;
170 });
171 }
172 SinkCandidate->moveBefore(*SinkTo, SinkTo->getFirstNonPhi());
173 for (VPValue *Op : SinkCandidate->operands())
174 if (auto *Def =
175 dyn_cast_or_null<VPSingleDefRecipe>(Op->getDefiningRecipe()))
176 WorkList.insert(std::make_pair(SinkTo, Def));
177 Changed = true;
178 }
179 return Changed;
180}
181
182/// If \p R is a region with a VPBranchOnMaskRecipe in the entry block, return
183/// the mask.
185 auto *EntryBB = dyn_cast<VPBasicBlock>(R->getEntry());
186 if (!EntryBB || EntryBB->size() != 1 ||
187 !isa<VPBranchOnMaskRecipe>(EntryBB->begin()))
188 return nullptr;
189
190 return cast<VPBranchOnMaskRecipe>(&*EntryBB->begin())->getOperand(0);
191}
192
193/// If \p R is a triangle region, return the 'then' block of the triangle.
195 auto *EntryBB = cast<VPBasicBlock>(R->getEntry());
196 if (EntryBB->getNumSuccessors() != 2)
197 return nullptr;
198
199 auto *Succ0 = dyn_cast<VPBasicBlock>(EntryBB->getSuccessors()[0]);
200 auto *Succ1 = dyn_cast<VPBasicBlock>(EntryBB->getSuccessors()[1]);
201 if (!Succ0 || !Succ1)
202 return nullptr;
203
204 if (Succ0->getNumSuccessors() + Succ1->getNumSuccessors() != 1)
205 return nullptr;
206 if (Succ0->getSingleSuccessor() == Succ1)
207 return Succ0;
208 if (Succ1->getSingleSuccessor() == Succ0)
209 return Succ1;
210 return nullptr;
211}
212
213// Merge replicate regions in their successor region, if a replicate region
214// is connected to a successor replicate region with the same predicate by a
215// single, empty VPBasicBlock.
217 SetVector<VPRegionBlock *> DeletedRegions;
218
219 // Collect replicate regions followed by an empty block, followed by another
220 // replicate region with matching masks to process front. This is to avoid
221 // iterator invalidation issues while merging regions.
223 for (VPRegionBlock *Region1 : VPBlockUtils::blocksOnly<VPRegionBlock>(
224 vp_depth_first_deep(Plan.getEntry()))) {
225 if (!Region1->isReplicator())
226 continue;
227 auto *MiddleBasicBlock =
228 dyn_cast_or_null<VPBasicBlock>(Region1->getSingleSuccessor());
229 if (!MiddleBasicBlock || !MiddleBasicBlock->empty())
230 continue;
231
232 auto *Region2 =
233 dyn_cast_or_null<VPRegionBlock>(MiddleBasicBlock->getSingleSuccessor());
234 if (!Region2 || !Region2->isReplicator())
235 continue;
236
237 VPValue *Mask1 = getPredicatedMask(Region1);
238 VPValue *Mask2 = getPredicatedMask(Region2);
239 if (!Mask1 || Mask1 != Mask2)
240 continue;
241
242 assert(Mask1 && Mask2 && "both region must have conditions");
243 WorkList.push_back(Region1);
244 }
245
246 // Move recipes from Region1 to its successor region, if both are triangles.
247 for (VPRegionBlock *Region1 : WorkList) {
248 if (DeletedRegions.contains(Region1))
249 continue;
250 auto *MiddleBasicBlock = cast<VPBasicBlock>(Region1->getSingleSuccessor());
251 auto *Region2 = cast<VPRegionBlock>(MiddleBasicBlock->getSingleSuccessor());
252
253 VPBasicBlock *Then1 = getPredicatedThenBlock(Region1);
254 VPBasicBlock *Then2 = getPredicatedThenBlock(Region2);
255 if (!Then1 || !Then2)
256 continue;
257
258 // Note: No fusion-preventing memory dependencies are expected in either
259 // region. Such dependencies should be rejected during earlier dependence
260 // checks, which guarantee accesses can be re-ordered for vectorization.
261 //
262 // Move recipes to the successor region.
263 for (VPRecipeBase &ToMove : make_early_inc_range(reverse(*Then1)))
264 ToMove.moveBefore(*Then2, Then2->getFirstNonPhi());
265
266 auto *Merge1 = cast<VPBasicBlock>(Then1->getSingleSuccessor());
267 auto *Merge2 = cast<VPBasicBlock>(Then2->getSingleSuccessor());
268
269 // Move VPPredInstPHIRecipes from the merge block to the successor region's
270 // merge block. Update all users inside the successor region to use the
271 // original values.
272 for (VPRecipeBase &Phi1ToMove : make_early_inc_range(reverse(*Merge1))) {
273 VPValue *PredInst1 =
274 cast<VPPredInstPHIRecipe>(&Phi1ToMove)->getOperand(0);
275 VPValue *Phi1ToMoveV = Phi1ToMove.getVPSingleValue();
276 Phi1ToMoveV->replaceUsesWithIf(PredInst1, [Then2](VPUser &U, unsigned) {
277 auto *UI = dyn_cast<VPRecipeBase>(&U);
278 return UI && UI->getParent() == Then2;
279 });
280
281 Phi1ToMove.moveBefore(*Merge2, Merge2->begin());
282 }
283
284 // Finally, remove the first region.
285 for (VPBlockBase *Pred : make_early_inc_range(Region1->getPredecessors())) {
286 VPBlockUtils::disconnectBlocks(Pred, Region1);
287 VPBlockUtils::connectBlocks(Pred, MiddleBasicBlock);
288 }
289 VPBlockUtils::disconnectBlocks(Region1, MiddleBasicBlock);
290 DeletedRegions.insert(Region1);
291 }
292
293 for (VPRegionBlock *ToDelete : DeletedRegions)
294 delete ToDelete;
295 return !DeletedRegions.empty();
296}
297
299 VPlan &Plan) {
300 Instruction *Instr = PredRecipe->getUnderlyingInstr();
301 // Build the triangular if-then region.
302 std::string RegionName = (Twine("pred.") + Instr->getOpcodeName()).str();
303 assert(Instr->getParent() && "Predicated instruction not in any basic block");
304 auto *BlockInMask = PredRecipe->getMask();
305 auto *BOMRecipe = new VPBranchOnMaskRecipe(BlockInMask);
306 auto *Entry = new VPBasicBlock(Twine(RegionName) + ".entry", BOMRecipe);
307
308 // Replace predicated replicate recipe with a replicate recipe without a
309 // mask but in the replicate region.
310 auto *RecipeWithoutMask = new VPReplicateRecipe(
311 PredRecipe->getUnderlyingInstr(),
312 make_range(PredRecipe->op_begin(), std::prev(PredRecipe->op_end())),
313 PredRecipe->isUniform());
314 auto *Pred = new VPBasicBlock(Twine(RegionName) + ".if", RecipeWithoutMask);
315
316 VPPredInstPHIRecipe *PHIRecipe = nullptr;
317 if (PredRecipe->getNumUsers() != 0) {
318 PHIRecipe = new VPPredInstPHIRecipe(RecipeWithoutMask);
319 PredRecipe->replaceAllUsesWith(PHIRecipe);
320 PHIRecipe->setOperand(0, RecipeWithoutMask);
321 }
322 PredRecipe->eraseFromParent();
323 auto *Exiting = new VPBasicBlock(Twine(RegionName) + ".continue", PHIRecipe);
324 VPRegionBlock *Region = new VPRegionBlock(Entry, Exiting, RegionName, true);
325
326 // Note: first set Entry as region entry and then connect successors starting
327 // from it in order, to propagate the "parent" of each VPBasicBlock.
328 VPBlockUtils::insertTwoBlocksAfter(Pred, Exiting, Entry);
329 VPBlockUtils::connectBlocks(Pred, Exiting);
330
331 return Region;
332}
333
334static void addReplicateRegions(VPlan &Plan) {
336 for (VPBasicBlock *VPBB : VPBlockUtils::blocksOnly<VPBasicBlock>(
337 vp_depth_first_deep(Plan.getEntry()))) {
338 for (VPRecipeBase &R : *VPBB)
339 if (auto *RepR = dyn_cast<VPReplicateRecipe>(&R)) {
340 if (RepR->isPredicated())
341 WorkList.push_back(RepR);
342 }
343 }
344
345 unsigned BBNum = 0;
346 for (VPReplicateRecipe *RepR : WorkList) {
347 VPBasicBlock *CurrentBlock = RepR->getParent();
348 VPBasicBlock *SplitBlock = CurrentBlock->splitAt(RepR->getIterator());
349
350 BasicBlock *OrigBB = RepR->getUnderlyingInstr()->getParent();
352 OrigBB->hasName() ? OrigBB->getName() + "." + Twine(BBNum++) : "");
353 // Record predicated instructions for above packing optimizations.
355 Region->setParent(CurrentBlock->getParent());
357 VPBlockUtils::connectBlocks(CurrentBlock, Region);
359 }
360}
361
362/// Remove redundant VPBasicBlocks by merging them into their predecessor if
363/// the predecessor has a single successor.
366 for (VPBasicBlock *VPBB : VPBlockUtils::blocksOnly<VPBasicBlock>(
367 vp_depth_first_deep(Plan.getEntry()))) {
368 // Don't fold the exit block of the Plan into its single predecessor for
369 // now.
370 // TODO: Remove restriction once more of the skeleton is modeled in VPlan.
371 if (VPBB->getNumSuccessors() == 0 && !VPBB->getParent())
372 continue;
373 auto *PredVPBB =
374 dyn_cast_or_null<VPBasicBlock>(VPBB->getSinglePredecessor());
375 if (!PredVPBB || PredVPBB->getNumSuccessors() != 1)
376 continue;
377 WorkList.push_back(VPBB);
378 }
379
380 for (VPBasicBlock *VPBB : WorkList) {
381 VPBasicBlock *PredVPBB = cast<VPBasicBlock>(VPBB->getSinglePredecessor());
382 for (VPRecipeBase &R : make_early_inc_range(*VPBB))
383 R.moveBefore(*PredVPBB, PredVPBB->end());
384 VPBlockUtils::disconnectBlocks(PredVPBB, VPBB);
385 auto *ParentRegion = cast_or_null<VPRegionBlock>(VPBB->getParent());
386 if (ParentRegion && ParentRegion->getExiting() == VPBB)
387 ParentRegion->setExiting(PredVPBB);
388 for (auto *Succ : to_vector(VPBB->successors())) {
390 VPBlockUtils::connectBlocks(PredVPBB, Succ);
391 }
392 delete VPBB;
393 }
394 return !WorkList.empty();
395}
396
398 // Convert masked VPReplicateRecipes to if-then region blocks.
400
401 bool ShouldSimplify = true;
402 while (ShouldSimplify) {
403 ShouldSimplify = sinkScalarOperands(Plan);
404 ShouldSimplify |= mergeReplicateRegionsIntoSuccessors(Plan);
405 ShouldSimplify |= mergeBlocksIntoPredecessors(Plan);
406 }
407}
408
409/// Remove redundant casts of inductions.
410///
411/// Such redundant casts are casts of induction variables that can be ignored,
412/// because we already proved that the casted phi is equal to the uncasted phi
413/// in the vectorized loop. There is no need to vectorize the cast - the same
414/// value can be used for both the phi and casts in the vector loop.
416 for (auto &Phi : Plan.getVectorLoopRegion()->getEntryBasicBlock()->phis()) {
417 auto *IV = dyn_cast<VPWidenIntOrFpInductionRecipe>(&Phi);
418 if (!IV || IV->getTruncInst())
419 continue;
420
421 // A sequence of IR Casts has potentially been recorded for IV, which
422 // *must be bypassed* when the IV is vectorized, because the vectorized IV
423 // will produce the desired casted value. This sequence forms a def-use
424 // chain and is provided in reverse order, ending with the cast that uses
425 // the IV phi. Search for the recipe of the last cast in the chain and
426 // replace it with the original IV. Note that only the final cast is
427 // expected to have users outside the cast-chain and the dead casts left
428 // over will be cleaned up later.
429 auto &Casts = IV->getInductionDescriptor().getCastInsts();
430 VPValue *FindMyCast = IV;
431 for (Instruction *IRCast : reverse(Casts)) {
432 VPSingleDefRecipe *FoundUserCast = nullptr;
433 for (auto *U : FindMyCast->users()) {
434 auto *UserCast = dyn_cast<VPSingleDefRecipe>(U);
435 if (UserCast && UserCast->getUnderlyingValue() == IRCast) {
436 FoundUserCast = UserCast;
437 break;
438 }
439 }
440 FindMyCast = FoundUserCast;
441 }
442 FindMyCast->replaceAllUsesWith(IV);
443 }
444}
445
446/// Try to replace VPWidenCanonicalIVRecipes with a widened canonical IV
447/// recipe, if it exists.
449 VPCanonicalIVPHIRecipe *CanonicalIV = Plan.getCanonicalIV();
450 VPWidenCanonicalIVRecipe *WidenNewIV = nullptr;
451 for (VPUser *U : CanonicalIV->users()) {
452 WidenNewIV = dyn_cast<VPWidenCanonicalIVRecipe>(U);
453 if (WidenNewIV)
454 break;
455 }
456
457 if (!WidenNewIV)
458 return;
459
461 for (VPRecipeBase &Phi : HeaderVPBB->phis()) {
462 auto *WidenOriginalIV = dyn_cast<VPWidenIntOrFpInductionRecipe>(&Phi);
463
464 if (!WidenOriginalIV || !WidenOriginalIV->isCanonical())
465 continue;
466
467 // Replace WidenNewIV with WidenOriginalIV if WidenOriginalIV provides
468 // everything WidenNewIV's users need. That is, WidenOriginalIV will
469 // generate a vector phi or all users of WidenNewIV demand the first lane
470 // only.
471 if (any_of(WidenOriginalIV->users(),
472 [WidenOriginalIV](VPUser *U) {
473 return !U->usesScalars(WidenOriginalIV);
474 }) ||
475 vputils::onlyFirstLaneUsed(WidenNewIV)) {
476 WidenNewIV->replaceAllUsesWith(WidenOriginalIV);
477 WidenNewIV->eraseFromParent();
478 return;
479 }
480 }
481}
482
483/// Returns true if \p R is dead and can be removed.
484static bool isDeadRecipe(VPRecipeBase &R) {
485 using namespace llvm::PatternMatch;
486 // Do remove conditional assume instructions as their conditions may be
487 // flattened.
488 auto *RepR = dyn_cast<VPReplicateRecipe>(&R);
489 bool IsConditionalAssume =
490 RepR && RepR->isPredicated() &&
491 match(RepR->getUnderlyingInstr(), m_Intrinsic<Intrinsic::assume>());
492 if (IsConditionalAssume)
493 return true;
494
495 if (R.mayHaveSideEffects())
496 return false;
497
498 // Recipe is dead if no user keeps the recipe alive.
499 return all_of(R.definedValues(),
500 [](VPValue *V) { return V->getNumUsers() == 0; });
501}
502
503static void removeDeadRecipes(VPlan &Plan) {
505 Plan.getEntry());
506
507 for (VPBasicBlock *VPBB : reverse(VPBlockUtils::blocksOnly<VPBasicBlock>(RPOT))) {
508 // The recipes in the block are processed in reverse order, to catch chains
509 // of dead recipes.
510 for (VPRecipeBase &R : make_early_inc_range(reverse(*VPBB))) {
511 if (isDeadRecipe(R))
512 R.eraseFromParent();
513 }
514 }
515}
516
519 Instruction::BinaryOps InductionOpcode,
520 FPMathOperator *FPBinOp, ScalarEvolution &SE,
521 Instruction *TruncI, VPValue *StartV, VPValue *Step,
524 VPCanonicalIVPHIRecipe *CanonicalIV = Plan.getCanonicalIV();
525 VPSingleDefRecipe *BaseIV = CanonicalIV;
526 if (!CanonicalIV->isCanonical(Kind, StartV, Step)) {
527 BaseIV = new VPDerivedIVRecipe(Kind, FPBinOp, StartV, CanonicalIV, Step);
528 HeaderVPBB->insert(BaseIV, IP);
529 }
530
531 // Truncate base induction if needed.
533 SE.getContext());
534 Type *ResultTy = TypeInfo.inferScalarType(BaseIV);
535 if (TruncI) {
536 Type *TruncTy = TruncI->getType();
537 assert(ResultTy->getScalarSizeInBits() > TruncTy->getScalarSizeInBits() &&
538 "Not truncating.");
539 assert(ResultTy->isIntegerTy() && "Truncation requires an integer type");
540 BaseIV = new VPScalarCastRecipe(Instruction::Trunc, BaseIV, TruncTy);
541 HeaderVPBB->insert(BaseIV, IP);
542 ResultTy = TruncTy;
543 }
544
545 // Truncate step if needed.
546 Type *StepTy = TypeInfo.inferScalarType(Step);
547 if (ResultTy != StepTy) {
548 assert(StepTy->getScalarSizeInBits() > ResultTy->getScalarSizeInBits() &&
549 "Not truncating.");
550 assert(StepTy->isIntegerTy() && "Truncation requires an integer type");
551 Step = new VPScalarCastRecipe(Instruction::Trunc, Step, ResultTy);
552 auto *VecPreheader =
553 cast<VPBasicBlock>(HeaderVPBB->getSingleHierarchicalPredecessor());
554 VecPreheader->appendRecipe(Step->getDefiningRecipe());
555 }
556
558 BaseIV, Step, InductionOpcode,
559 FPBinOp ? FPBinOp->getFastMathFlags() : FastMathFlags());
560 HeaderVPBB->insert(Steps, IP);
561 return Steps;
562}
563
564/// Legalize VPWidenPointerInductionRecipe, by replacing it with a PtrAdd
565/// (IndStart, ScalarIVSteps (0, Step)) if only its scalar values are used, as
566/// VPWidenPointerInductionRecipe will generate vectors only. If some users
567/// require vectors while other require scalars, the scalar uses need to extract
568/// the scalars from the generated vectors (Note that this is different to how
569/// int/fp inductions are handled). Also optimize VPWidenIntOrFpInductionRecipe,
570/// if any of its users needs scalar values, by providing them scalar steps
571/// built on the canonical scalar IV and update the original IV's users. This is
572/// an optional optimization to reduce the needs of vector extracts.
576 bool HasOnlyVectorVFs = !Plan.hasVF(ElementCount::getFixed(1));
577 VPBasicBlock::iterator InsertPt = HeaderVPBB->getFirstNonPhi();
578 for (VPRecipeBase &Phi : HeaderVPBB->phis()) {
579 // Replace wide pointer inductions which have only their scalars used by
580 // PtrAdd(IndStart, ScalarIVSteps (0, Step)).
581 if (auto *PtrIV = dyn_cast<VPWidenPointerInductionRecipe>(&Phi)) {
582 if (!PtrIV->onlyScalarsGenerated(Plan.hasScalableVF()))
583 continue;
584
585 const InductionDescriptor &ID = PtrIV->getInductionDescriptor();
586 VPValue *StartV =
587 Plan.getOrAddLiveIn(ConstantInt::get(ID.getStep()->getType(), 0));
588 VPValue *StepV = PtrIV->getOperand(1);
590 Plan, InductionDescriptor::IK_IntInduction, Instruction::Add, nullptr,
591 SE, nullptr, StartV, StepV, InsertPt);
592
593 auto *Recipe = new VPInstruction(VPInstruction::PtrAdd,
594 {PtrIV->getStartValue(), Steps},
595 PtrIV->getDebugLoc(), "next.gep");
596
597 Recipe->insertAfter(Steps);
598 PtrIV->replaceAllUsesWith(Recipe);
599 continue;
600 }
601
602 // Replace widened induction with scalar steps for users that only use
603 // scalars.
604 auto *WideIV = dyn_cast<VPWidenIntOrFpInductionRecipe>(&Phi);
605 if (!WideIV)
606 continue;
607 if (HasOnlyVectorVFs && none_of(WideIV->users(), [WideIV](VPUser *U) {
608 return U->usesScalars(WideIV);
609 }))
610 continue;
611
612 const InductionDescriptor &ID = WideIV->getInductionDescriptor();
614 Plan, ID.getKind(), ID.getInductionOpcode(),
615 dyn_cast_or_null<FPMathOperator>(ID.getInductionBinOp()), SE,
616 WideIV->getTruncInst(), WideIV->getStartValue(), WideIV->getStepValue(),
617 InsertPt);
618
619 // Update scalar users of IV to use Step instead.
620 if (!HasOnlyVectorVFs)
621 WideIV->replaceAllUsesWith(Steps);
622 else
623 WideIV->replaceUsesWithIf(Steps, [WideIV](VPUser &U, unsigned) {
624 return U.usesScalars(WideIV);
625 });
626 }
627}
628
629/// Remove redundant EpxandSCEVRecipes in \p Plan's entry block by replacing
630/// them with already existing recipes expanding the same SCEV expression.
633
634 for (VPRecipeBase &R :
636 auto *ExpR = dyn_cast<VPExpandSCEVRecipe>(&R);
637 if (!ExpR)
638 continue;
639
640 auto I = SCEV2VPV.insert({ExpR->getSCEV(), ExpR});
641 if (I.second)
642 continue;
643 ExpR->replaceAllUsesWith(I.first->second);
644 ExpR->eraseFromParent();
645 }
646}
647
649 SmallVector<VPValue *> WorkList;
651 WorkList.push_back(V);
652
653 while (!WorkList.empty()) {
654 VPValue *Cur = WorkList.pop_back_val();
655 if (!Seen.insert(Cur).second)
656 continue;
658 if (!R)
659 continue;
660 if (!isDeadRecipe(*R))
661 continue;
662 WorkList.append(R->op_begin(), R->op_end());
663 R->eraseFromParent();
664 }
665}
666
668 unsigned BestUF,
670 assert(Plan.hasVF(BestVF) && "BestVF is not available in Plan");
671 assert(Plan.hasUF(BestUF) && "BestUF is not available in Plan");
672 VPBasicBlock *ExitingVPBB =
674 auto *Term = &ExitingVPBB->back();
675 // Try to simplify the branch condition if TC <= VF * UF when preparing to
676 // execute the plan for the main vector loop. We only do this if the
677 // terminator is:
678 // 1. BranchOnCount, or
679 // 2. BranchOnCond where the input is Not(ActiveLaneMask).
680 using namespace llvm::VPlanPatternMatch;
681 if (!match(Term, m_BranchOnCount(m_VPValue(), m_VPValue())) &&
682 !match(Term,
683 m_BranchOnCond(m_Not(m_ActiveLaneMask(m_VPValue(), m_VPValue())))))
684 return;
685
686 Type *IdxTy =
688 const SCEV *TripCount = createTripCountSCEV(IdxTy, PSE);
689 ScalarEvolution &SE = *PSE.getSE();
690 ElementCount NumElements = BestVF.multiplyCoefficientBy(BestUF);
691 const SCEV *C = SE.getElementCount(TripCount->getType(), NumElements);
692 if (TripCount->isZero() ||
693 !SE.isKnownPredicate(CmpInst::ICMP_ULE, TripCount, C))
694 return;
695
696 LLVMContext &Ctx = SE.getContext();
697 auto *BOC =
700
701 SmallVector<VPValue *> PossiblyDead(Term->operands());
702 Term->eraseFromParent();
703 for (VPValue *Op : PossiblyDead)
705 ExitingVPBB->appendRecipe(BOC);
706 Plan.setVF(BestVF);
707 Plan.setUF(BestUF);
708 // TODO: Further simplifications are possible
709 // 1. Replace inductions with constants.
710 // 2. Replace vector loop region with VPBasicBlock.
711}
712
713#ifndef NDEBUG
715 auto *Region = dyn_cast_or_null<VPRegionBlock>(R->getParent()->getParent());
716 if (Region && Region->isReplicator()) {
717 assert(Region->getNumSuccessors() == 1 &&
718 Region->getNumPredecessors() == 1 && "Expected SESE region!");
719 assert(R->getParent()->size() == 1 &&
720 "A recipe in an original replicator region must be the only "
721 "recipe in its block");
722 return Region;
723 }
724 return nullptr;
725}
726#endif
727
728static bool properlyDominates(const VPRecipeBase *A, const VPRecipeBase *B,
729 VPDominatorTree &VPDT) {
730 if (A == B)
731 return false;
732
733 auto LocalComesBefore = [](const VPRecipeBase *A, const VPRecipeBase *B) {
734 for (auto &R : *A->getParent()) {
735 if (&R == A)
736 return true;
737 if (&R == B)
738 return false;
739 }
740 llvm_unreachable("recipe not found");
741 };
742 const VPBlockBase *ParentA = A->getParent();
743 const VPBlockBase *ParentB = B->getParent();
744 if (ParentA == ParentB)
745 return LocalComesBefore(A, B);
746
747 assert(!GetReplicateRegion(const_cast<VPRecipeBase *>(A)) &&
748 "No replicate regions expected at this point");
749 assert(!GetReplicateRegion(const_cast<VPRecipeBase *>(B)) &&
750 "No replicate regions expected at this point");
751 return VPDT.properlyDominates(ParentA, ParentB);
752}
753
754/// Sink users of \p FOR after the recipe defining the previous value \p
755/// Previous of the recurrence. \returns true if all users of \p FOR could be
756/// re-arranged as needed or false if it is not possible.
757static bool
759 VPRecipeBase *Previous,
760 VPDominatorTree &VPDT) {
761 // Collect recipes that need sinking.
764 Seen.insert(Previous);
765 auto TryToPushSinkCandidate = [&](VPRecipeBase *SinkCandidate) {
766 // The previous value must not depend on the users of the recurrence phi. In
767 // that case, FOR is not a fixed order recurrence.
768 if (SinkCandidate == Previous)
769 return false;
770
771 if (isa<VPHeaderPHIRecipe>(SinkCandidate) ||
772 !Seen.insert(SinkCandidate).second ||
773 properlyDominates(Previous, SinkCandidate, VPDT))
774 return true;
775
776 if (SinkCandidate->mayHaveSideEffects())
777 return false;
778
779 WorkList.push_back(SinkCandidate);
780 return true;
781 };
782
783 // Recursively sink users of FOR after Previous.
784 WorkList.push_back(FOR);
785 for (unsigned I = 0; I != WorkList.size(); ++I) {
786 VPRecipeBase *Current = WorkList[I];
787 assert(Current->getNumDefinedValues() == 1 &&
788 "only recipes with a single defined value expected");
789
790 for (VPUser *User : Current->getVPSingleValue()->users()) {
791 if (auto *R = dyn_cast<VPRecipeBase>(User))
792 if (!TryToPushSinkCandidate(R))
793 return false;
794 }
795 }
796
797 // Keep recipes to sink ordered by dominance so earlier instructions are
798 // processed first.
799 sort(WorkList, [&VPDT](const VPRecipeBase *A, const VPRecipeBase *B) {
800 return properlyDominates(A, B, VPDT);
801 });
802
803 for (VPRecipeBase *SinkCandidate : WorkList) {
804 if (SinkCandidate == FOR)
805 continue;
806
807 SinkCandidate->moveAfter(Previous);
808 Previous = SinkCandidate;
809 }
810 return true;
811}
812
814 VPBuilder &LoopBuilder) {
815 VPDominatorTree VPDT;
816 VPDT.recalculate(Plan);
817
819 for (VPRecipeBase &R :
821 if (auto *FOR = dyn_cast<VPFirstOrderRecurrencePHIRecipe>(&R))
822 RecurrencePhis.push_back(FOR);
823
824 VPBasicBlock *MiddleVPBB =
825 cast<VPBasicBlock>(Plan.getVectorLoopRegion()->getSingleSuccessor());
826 VPBuilder MiddleBuilder;
827 // Set insert point so new recipes are inserted before terminator and
828 // condition, if there is either the former or both.
829 if (auto *Term =
830 dyn_cast_or_null<VPInstruction>(MiddleVPBB->getTerminator())) {
831 if (auto *Cmp = dyn_cast<VPInstruction>(Term->getOperand(0)))
832 MiddleBuilder.setInsertPoint(Cmp);
833 else
834 MiddleBuilder.setInsertPoint(Term);
835 } else
836 MiddleBuilder.setInsertPoint(MiddleVPBB);
837
838 for (VPFirstOrderRecurrencePHIRecipe *FOR : RecurrencePhis) {
840 VPRecipeBase *Previous = FOR->getBackedgeValue()->getDefiningRecipe();
841 // Fixed-order recurrences do not contain cycles, so this loop is guaranteed
842 // to terminate.
843 while (auto *PrevPhi =
844 dyn_cast_or_null<VPFirstOrderRecurrencePHIRecipe>(Previous)) {
845 assert(PrevPhi->getParent() == FOR->getParent());
846 assert(SeenPhis.insert(PrevPhi).second);
847 Previous = PrevPhi->getBackedgeValue()->getDefiningRecipe();
848 }
849
850 if (!sinkRecurrenceUsersAfterPrevious(FOR, Previous, VPDT))
851 return false;
852
853 // Introduce a recipe to combine the incoming and previous values of a
854 // fixed-order recurrence.
855 VPBasicBlock *InsertBlock = Previous->getParent();
856 if (isa<VPHeaderPHIRecipe>(Previous))
857 LoopBuilder.setInsertPoint(InsertBlock, InsertBlock->getFirstNonPhi());
858 else
859 LoopBuilder.setInsertPoint(InsertBlock,
860 std::next(Previous->getIterator()));
861
862 auto *RecurSplice = cast<VPInstruction>(
864 {FOR, FOR->getBackedgeValue()}));
865
866 FOR->replaceAllUsesWith(RecurSplice);
867 // Set the first operand of RecurSplice to FOR again, after replacing
868 // all users.
869 RecurSplice->setOperand(0, FOR);
870
871 // This is the second phase of vectorizing first-order recurrences. An
872 // overview of the transformation is described below. Suppose we have the
873 // following loop with some use after the loop of the last a[i-1],
874 //
875 // for (int i = 0; i < n; ++i) {
876 // t = a[i - 1];
877 // b[i] = a[i] - t;
878 // }
879 // use t;
880 //
881 // There is a first-order recurrence on "a". For this loop, the shorthand
882 // scalar IR looks like:
883 //
884 // scalar.ph:
885 // s_init = a[-1]
886 // br scalar.body
887 //
888 // scalar.body:
889 // i = phi [0, scalar.ph], [i+1, scalar.body]
890 // s1 = phi [s_init, scalar.ph], [s2, scalar.body]
891 // s2 = a[i]
892 // b[i] = s2 - s1
893 // br cond, scalar.body, exit.block
894 //
895 // exit.block:
896 // use = lcssa.phi [s1, scalar.body]
897 //
898 // In this example, s1 is a recurrence because it's value depends on the
899 // previous iteration. In the first phase of vectorization, we created a
900 // vector phi v1 for s1. We now complete the vectorization and produce the
901 // shorthand vector IR shown below (for VF = 4, UF = 1).
902 //
903 // vector.ph:
904 // v_init = vector(..., ..., ..., a[-1])
905 // br vector.body
906 //
907 // vector.body
908 // i = phi [0, vector.ph], [i+4, vector.body]
909 // v1 = phi [v_init, vector.ph], [v2, vector.body]
910 // v2 = a[i, i+1, i+2, i+3];
911 // v3 = vector(v1(3), v2(0, 1, 2))
912 // b[i, i+1, i+2, i+3] = v2 - v3
913 // br cond, vector.body, middle.block
914 //
915 // middle.block:
916 // s_penultimate = v2(2) = v3(3)
917 // s_resume = v2(3)
918 // br cond, scalar.ph, exit.block
919 //
920 // scalar.ph:
921 // s_init' = phi [s_resume, middle.block], [s_init, otherwise]
922 // br scalar.body
923 //
924 // scalar.body:
925 // i = phi [0, scalar.ph], [i+1, scalar.body]
926 // s1 = phi [s_init', scalar.ph], [s2, scalar.body]
927 // s2 = a[i]
928 // b[i] = s2 - s1
929 // br cond, scalar.body, exit.block
930 //
931 // exit.block:
932 // lo = lcssa.phi [s1, scalar.body], [s.penultimate, middle.block]
933 //
934 // After execution completes the vector loop, we extract the next value of
935 // the recurrence (x) to use as the initial value in the scalar loop. This
936 // is modeled by ExtractFromEnd.
937 Type *IntTy = Plan.getCanonicalIV()->getScalarType();
938
939 // Extract the penultimate value of the recurrence and update VPLiveOut
940 // users of the recurrence splice. Note that the extract of the final value
941 // used to resume in the scalar loop is created earlier during VPlan
942 // construction.
943 auto *Penultimate = cast<VPInstruction>(MiddleBuilder.createNaryOp(
945 {FOR->getBackedgeValue(),
946 Plan.getOrAddLiveIn(ConstantInt::get(IntTy, 2))},
947 {}, "vector.recur.extract.for.phi"));
948 RecurSplice->replaceUsesWithIf(
949 Penultimate, [](VPUser &U, unsigned) { return isa<VPLiveOut>(&U); });
950 }
951 return true;
952}
953
955 SetVector<VPUser *> Users(V->user_begin(), V->user_end());
956 for (unsigned I = 0; I != Users.size(); ++I) {
957 VPRecipeBase *Cur = dyn_cast<VPRecipeBase>(Users[I]);
958 if (!Cur || isa<VPHeaderPHIRecipe>(Cur))
959 continue;
960 for (VPValue *V : Cur->definedValues())
961 Users.insert(V->user_begin(), V->user_end());
962 }
963 return Users.takeVector();
964}
965
967 for (VPRecipeBase &R :
969 auto *PhiR = dyn_cast<VPReductionPHIRecipe>(&R);
970 if (!PhiR)
971 continue;
972 const RecurrenceDescriptor &RdxDesc = PhiR->getRecurrenceDescriptor();
973 RecurKind RK = RdxDesc.getRecurrenceKind();
974 if (RK != RecurKind::Add && RK != RecurKind::Mul)
975 continue;
976
977 for (VPUser *U : collectUsersRecursively(PhiR))
978 if (auto *RecWithFlags = dyn_cast<VPRecipeWithIRFlags>(U)) {
979 RecWithFlags->dropPoisonGeneratingFlags();
980 }
981 }
982}
983
984/// Try to simplify recipe \p R.
985static void simplifyRecipe(VPRecipeBase &R, VPTypeAnalysis &TypeInfo) {
986 using namespace llvm::VPlanPatternMatch;
987 // Try to remove redundant blend recipes.
988 if (auto *Blend = dyn_cast<VPBlendRecipe>(&R)) {
989 VPValue *Inc0 = Blend->getIncomingValue(0);
990 for (unsigned I = 1; I != Blend->getNumIncomingValues(); ++I)
991 if (Inc0 != Blend->getIncomingValue(I) &&
992 !match(Blend->getMask(I), m_False()))
993 return;
994 Blend->replaceAllUsesWith(Inc0);
995 Blend->eraseFromParent();
996 return;
997 }
998
999 VPValue *A;
1000 if (match(&R, m_Trunc(m_ZExtOrSExt(m_VPValue(A))))) {
1001 VPValue *Trunc = R.getVPSingleValue();
1002 Type *TruncTy = TypeInfo.inferScalarType(Trunc);
1003 Type *ATy = TypeInfo.inferScalarType(A);
1004 if (TruncTy == ATy) {
1005 Trunc->replaceAllUsesWith(A);
1006 } else {
1007 // Don't replace a scalarizing recipe with a widened cast.
1008 if (isa<VPReplicateRecipe>(&R))
1009 return;
1010 if (ATy->getScalarSizeInBits() < TruncTy->getScalarSizeInBits()) {
1011
1012 unsigned ExtOpcode = match(R.getOperand(0), m_SExt(m_VPValue()))
1013 ? Instruction::SExt
1014 : Instruction::ZExt;
1015 auto *VPC =
1016 new VPWidenCastRecipe(Instruction::CastOps(ExtOpcode), A, TruncTy);
1017 if (auto *UnderlyingExt = R.getOperand(0)->getUnderlyingValue()) {
1018 // UnderlyingExt has distinct return type, used to retain legacy cost.
1019 VPC->setUnderlyingValue(UnderlyingExt);
1020 }
1021 VPC->insertBefore(&R);
1022 Trunc->replaceAllUsesWith(VPC);
1023 } else if (ATy->getScalarSizeInBits() > TruncTy->getScalarSizeInBits()) {
1024 auto *VPC = new VPWidenCastRecipe(Instruction::Trunc, A, TruncTy);
1025 VPC->insertBefore(&R);
1026 Trunc->replaceAllUsesWith(VPC);
1027 }
1028 }
1029#ifndef NDEBUG
1030 // Verify that the cached type info is for both A and its users is still
1031 // accurate by comparing it to freshly computed types.
1032 VPTypeAnalysis TypeInfo2(
1033 R.getParent()->getPlan()->getCanonicalIV()->getScalarType(),
1034 TypeInfo.getContext());
1035 assert(TypeInfo.inferScalarType(A) == TypeInfo2.inferScalarType(A));
1036 for (VPUser *U : A->users()) {
1037 auto *R = dyn_cast<VPRecipeBase>(U);
1038 if (!R)
1039 continue;
1040 for (VPValue *VPV : R->definedValues())
1041 assert(TypeInfo.inferScalarType(VPV) == TypeInfo2.inferScalarType(VPV));
1042 }
1043#endif
1044 }
1045
1046 // Simplify (X && Y) || (X && !Y) -> X.
1047 // TODO: Split up into simpler, modular combines: (X && Y) || (X && Z) into X
1048 // && (Y || Z) and (X || !X) into true. This requires queuing newly created
1049 // recipes to be visited during simplification.
1050 VPValue *X, *Y, *X1, *Y1;
1051 if (match(&R,
1052 m_c_BinaryOr(m_LogicalAnd(m_VPValue(X), m_VPValue(Y)),
1053 m_LogicalAnd(m_VPValue(X1), m_Not(m_VPValue(Y1))))) &&
1054 X == X1 && Y == Y1) {
1055 R.getVPSingleValue()->replaceAllUsesWith(X);
1056 return;
1057 }
1058
1059 if (match(&R, m_c_Mul(m_VPValue(A), m_SpecificInt(1))))
1060 return R.getVPSingleValue()->replaceAllUsesWith(A);
1061}
1062
1063/// Try to simplify the recipes in \p Plan.
1064static void simplifyRecipes(VPlan &Plan, LLVMContext &Ctx) {
1066 Plan.getEntry());
1067 VPTypeAnalysis TypeInfo(Plan.getCanonicalIV()->getScalarType(), Ctx);
1068 for (VPBasicBlock *VPBB : VPBlockUtils::blocksOnly<VPBasicBlock>(RPOT)) {
1069 for (VPRecipeBase &R : make_early_inc_range(*VPBB)) {
1070 simplifyRecipe(R, TypeInfo);
1071 }
1072 }
1073}
1074
1076 VPlan &Plan, const MapVector<Instruction *, uint64_t> &MinBWs,
1077 LLVMContext &Ctx) {
1078#ifndef NDEBUG
1079 // Count the processed recipes and cross check the count later with MinBWs
1080 // size, to make sure all entries in MinBWs have been handled.
1081 unsigned NumProcessedRecipes = 0;
1082#endif
1083 // Keep track of created truncates, so they can be re-used. Note that we
1084 // cannot use RAUW after creating a new truncate, as this would could make
1085 // other uses have different types for their operands, making them invalidly
1086 // typed.
1088 VPTypeAnalysis TypeInfo(Plan.getCanonicalIV()->getScalarType(), Ctx);
1089 VPBasicBlock *PH = Plan.getEntry();
1090 for (VPBasicBlock *VPBB : VPBlockUtils::blocksOnly<VPBasicBlock>(
1092 for (VPRecipeBase &R : make_early_inc_range(*VPBB)) {
1095 continue;
1096
1097 VPValue *ResultVPV = R.getVPSingleValue();
1098 auto *UI = cast_or_null<Instruction>(ResultVPV->getUnderlyingValue());
1099 unsigned NewResSizeInBits = MinBWs.lookup(UI);
1100 if (!NewResSizeInBits)
1101 continue;
1102
1103#ifndef NDEBUG
1104 NumProcessedRecipes++;
1105#endif
1106 // If the value wasn't vectorized, we must maintain the original scalar
1107 // type. Skip those here, after incrementing NumProcessedRecipes. Also
1108 // skip casts which do not need to be handled explicitly here, as
1109 // redundant casts will be removed during recipe simplification.
1110 if (isa<VPReplicateRecipe, VPWidenCastRecipe>(&R)) {
1111#ifndef NDEBUG
1112 // If any of the operands is a live-in and not used by VPWidenRecipe or
1113 // VPWidenSelectRecipe, but in MinBWs, make sure it is counted as
1114 // processed as well. When MinBWs is currently constructed, there is no
1115 // information about whether recipes are widened or replicated and in
1116 // case they are reciplicated the operands are not truncated. Counting
1117 // them them here ensures we do not miss any recipes in MinBWs.
1118 // TODO: Remove once the analysis is done on VPlan.
1119 for (VPValue *Op : R.operands()) {
1120 if (!Op->isLiveIn())
1121 continue;
1122 auto *UV = dyn_cast_or_null<Instruction>(Op->getUnderlyingValue());
1123 if (UV && MinBWs.contains(UV) && !ProcessedTruncs.contains(Op) &&
1124 all_of(Op->users(), [](VPUser *U) {
1125 return !isa<VPWidenRecipe, VPWidenSelectRecipe>(U);
1126 })) {
1127 // Add an entry to ProcessedTruncs to avoid counting the same
1128 // operand multiple times.
1129 ProcessedTruncs[Op] = nullptr;
1130 NumProcessedRecipes += 1;
1131 }
1132 }
1133#endif
1134 continue;
1135 }
1136
1137 Type *OldResTy = TypeInfo.inferScalarType(ResultVPV);
1138 unsigned OldResSizeInBits = OldResTy->getScalarSizeInBits();
1139 assert(OldResTy->isIntegerTy() && "only integer types supported");
1140 (void)OldResSizeInBits;
1141
1142 auto *NewResTy = IntegerType::get(Ctx, NewResSizeInBits);
1143
1144 // Any wrapping introduced by shrinking this operation shouldn't be
1145 // considered undefined behavior. So, we can't unconditionally copy
1146 // arithmetic wrapping flags to VPW.
1147 if (auto *VPW = dyn_cast<VPRecipeWithIRFlags>(&R))
1148 VPW->dropPoisonGeneratingFlags();
1149
1150 using namespace llvm::VPlanPatternMatch;
1151 if (OldResSizeInBits != NewResSizeInBits &&
1152 !match(&R, m_Binary<Instruction::ICmp>(m_VPValue(), m_VPValue()))) {
1153 // Extend result to original width.
1154 auto *Ext =
1155 new VPWidenCastRecipe(Instruction::ZExt, ResultVPV, OldResTy);
1156 Ext->insertAfter(&R);
1157 ResultVPV->replaceAllUsesWith(Ext);
1158 Ext->setOperand(0, ResultVPV);
1159 assert(OldResSizeInBits > NewResSizeInBits && "Nothing to shrink?");
1160 } else
1161 assert(
1162 match(&R, m_Binary<Instruction::ICmp>(m_VPValue(), m_VPValue())) &&
1163 "Only ICmps should not need extending the result.");
1164
1165 assert(!isa<VPWidenStoreRecipe>(&R) && "stores cannot be narrowed");
1166 if (isa<VPWidenLoadRecipe>(&R))
1167 continue;
1168
1169 // Shrink operands by introducing truncates as needed.
1170 unsigned StartIdx = isa<VPWidenSelectRecipe>(&R) ? 1 : 0;
1171 for (unsigned Idx = StartIdx; Idx != R.getNumOperands(); ++Idx) {
1172 auto *Op = R.getOperand(Idx);
1173 unsigned OpSizeInBits =
1175 if (OpSizeInBits == NewResSizeInBits)
1176 continue;
1177 assert(OpSizeInBits > NewResSizeInBits && "nothing to truncate");
1178 auto [ProcessedIter, IterIsEmpty] =
1179 ProcessedTruncs.insert({Op, nullptr});
1180 VPWidenCastRecipe *NewOp =
1181 IterIsEmpty
1182 ? new VPWidenCastRecipe(Instruction::Trunc, Op, NewResTy)
1183 : ProcessedIter->second;
1184 R.setOperand(Idx, NewOp);
1185 if (!IterIsEmpty)
1186 continue;
1187 ProcessedIter->second = NewOp;
1188 if (!Op->isLiveIn()) {
1189 NewOp->insertBefore(&R);
1190 } else {
1191 PH->appendRecipe(NewOp);
1192#ifndef NDEBUG
1193 auto *OpInst = dyn_cast<Instruction>(Op->getLiveInIRValue());
1194 bool IsContained = MinBWs.contains(OpInst);
1195 NumProcessedRecipes += IsContained;
1196#endif
1197 }
1198 }
1199
1200 }
1201 }
1202
1203 assert(MinBWs.size() == NumProcessedRecipes &&
1204 "some entries in MinBWs haven't been processed");
1205}
1206
1210
1211 simplifyRecipes(Plan, SE.getContext());
1213 removeDeadRecipes(Plan);
1214
1216
1219}
1220
1221// Add a VPActiveLaneMaskPHIRecipe and related recipes to \p Plan and replace
1222// the loop terminator with a branch-on-cond recipe with the negated
1223// active-lane-mask as operand. Note that this turns the loop into an
1224// uncountable one. Only the existing terminator is replaced, all other existing
1225// recipes/users remain unchanged, except for poison-generating flags being
1226// dropped from the canonical IV increment. Return the created
1227// VPActiveLaneMaskPHIRecipe.
1228//
1229// The function uses the following definitions:
1230//
1231// %TripCount = DataWithControlFlowWithoutRuntimeCheck ?
1232// calculate-trip-count-minus-VF (original TC) : original TC
1233// %IncrementValue = DataWithControlFlowWithoutRuntimeCheck ?
1234// CanonicalIVPhi : CanonicalIVIncrement
1235// %StartV is the canonical induction start value.
1236//
1237// The function adds the following recipes:
1238//
1239// vector.ph:
1240// %TripCount = calculate-trip-count-minus-VF (original TC)
1241// [if DataWithControlFlowWithoutRuntimeCheck]
1242// %EntryInc = canonical-iv-increment-for-part %StartV
1243// %EntryALM = active-lane-mask %EntryInc, %TripCount
1244//
1245// vector.body:
1246// ...
1247// %P = active-lane-mask-phi [ %EntryALM, %vector.ph ], [ %ALM, %vector.body ]
1248// ...
1249// %InLoopInc = canonical-iv-increment-for-part %IncrementValue
1250// %ALM = active-lane-mask %InLoopInc, TripCount
1251// %Negated = Not %ALM
1252// branch-on-cond %Negated
1253//
1256 VPRegionBlock *TopRegion = Plan.getVectorLoopRegion();
1257 VPBasicBlock *EB = TopRegion->getExitingBasicBlock();
1258 auto *CanonicalIVPHI = Plan.getCanonicalIV();
1259 VPValue *StartV = CanonicalIVPHI->getStartValue();
1260
1261 auto *CanonicalIVIncrement =
1262 cast<VPInstruction>(CanonicalIVPHI->getBackedgeValue());
1263 // TODO: Check if dropping the flags is needed if
1264 // !DataAndControlFlowWithoutRuntimeCheck.
1265 CanonicalIVIncrement->dropPoisonGeneratingFlags();
1266 DebugLoc DL = CanonicalIVIncrement->getDebugLoc();
1267 // We can't use StartV directly in the ActiveLaneMask VPInstruction, since
1268 // we have to take unrolling into account. Each part needs to start at
1269 // Part * VF
1270 auto *VecPreheader = cast<VPBasicBlock>(TopRegion->getSinglePredecessor());
1271 VPBuilder Builder(VecPreheader);
1272
1273 // Create the ActiveLaneMask instruction using the correct start values.
1274 VPValue *TC = Plan.getTripCount();
1275
1276 VPValue *TripCount, *IncrementValue;
1278 // When the loop is guarded by a runtime overflow check for the loop
1279 // induction variable increment by VF, we can increment the value before
1280 // the get.active.lane mask and use the unmodified tripcount.
1281 IncrementValue = CanonicalIVIncrement;
1282 TripCount = TC;
1283 } else {
1284 // When avoiding a runtime check, the active.lane.mask inside the loop
1285 // uses a modified trip count and the induction variable increment is
1286 // done after the active.lane.mask intrinsic is called.
1287 IncrementValue = CanonicalIVPHI;
1289 {TC}, DL);
1290 }
1291 auto *EntryIncrement = Builder.createOverflowingOp(
1292 VPInstruction::CanonicalIVIncrementForPart, {StartV}, {false, false}, DL,
1293 "index.part.next");
1294
1295 // Create the active lane mask instruction in the VPlan preheader.
1296 auto *EntryALM =
1297 Builder.createNaryOp(VPInstruction::ActiveLaneMask, {EntryIncrement, TC},
1298 DL, "active.lane.mask.entry");
1299
1300 // Now create the ActiveLaneMaskPhi recipe in the main loop using the
1301 // preheader ActiveLaneMask instruction.
1302 auto LaneMaskPhi = new VPActiveLaneMaskPHIRecipe(EntryALM, DebugLoc());
1303 LaneMaskPhi->insertAfter(CanonicalIVPHI);
1304
1305 // Create the active lane mask for the next iteration of the loop before the
1306 // original terminator.
1307 VPRecipeBase *OriginalTerminator = EB->getTerminator();
1308 Builder.setInsertPoint(OriginalTerminator);
1309 auto *InLoopIncrement =
1311 {IncrementValue}, {false, false}, DL);
1312 auto *ALM = Builder.createNaryOp(VPInstruction::ActiveLaneMask,
1313 {InLoopIncrement, TripCount}, DL,
1314 "active.lane.mask.next");
1315 LaneMaskPhi->addOperand(ALM);
1316
1317 // Replace the original terminator with BranchOnCond. We have to invert the
1318 // mask here because a true condition means jumping to the exit block.
1319 auto *NotMask = Builder.createNot(ALM, DL);
1320 Builder.createNaryOp(VPInstruction::BranchOnCond, {NotMask}, DL);
1321 OriginalTerminator->eraseFromParent();
1322 return LaneMaskPhi;
1323}
1324
1325/// Collect all VPValues representing a header mask through the (ICMP_ULE,
1326/// WideCanonicalIV, backedge-taken-count) pattern.
1327/// TODO: Introduce explicit recipe for header-mask instead of searching
1328/// for the header-mask pattern manually.
1330 SmallVector<VPValue *> WideCanonicalIVs;
1331 auto *FoundWidenCanonicalIVUser =
1332 find_if(Plan.getCanonicalIV()->users(),
1333 [](VPUser *U) { return isa<VPWidenCanonicalIVRecipe>(U); });
1335 [](VPUser *U) { return isa<VPWidenCanonicalIVRecipe>(U); }) <=
1336 1 &&
1337 "Must have at most one VPWideCanonicalIVRecipe");
1338 if (FoundWidenCanonicalIVUser != Plan.getCanonicalIV()->users().end()) {
1339 auto *WideCanonicalIV =
1340 cast<VPWidenCanonicalIVRecipe>(*FoundWidenCanonicalIVUser);
1341 WideCanonicalIVs.push_back(WideCanonicalIV);
1342 }
1343
1344 // Also include VPWidenIntOrFpInductionRecipes that represent a widened
1345 // version of the canonical induction.
1346 VPBasicBlock *HeaderVPBB = Plan.getVectorLoopRegion()->getEntryBasicBlock();
1347 for (VPRecipeBase &Phi : HeaderVPBB->phis()) {
1348 auto *WidenOriginalIV = dyn_cast<VPWidenIntOrFpInductionRecipe>(&Phi);
1349 if (WidenOriginalIV && WidenOriginalIV->isCanonical())
1350 WideCanonicalIVs.push_back(WidenOriginalIV);
1351 }
1352
1353 // Walk users of wide canonical IVs and collect to all compares of the form
1354 // (ICMP_ULE, WideCanonicalIV, backedge-taken-count).
1355 SmallVector<VPValue *> HeaderMasks;
1356 for (auto *Wide : WideCanonicalIVs) {
1357 for (VPUser *U : SmallVector<VPUser *>(Wide->users())) {
1358 auto *HeaderMask = dyn_cast<VPInstruction>(U);
1359 if (!HeaderMask || !vputils::isHeaderMask(HeaderMask, Plan))
1360 continue;
1361
1362 assert(HeaderMask->getOperand(0) == Wide &&
1363 "WidenCanonicalIV must be the first operand of the compare");
1364 HeaderMasks.push_back(HeaderMask);
1365 }
1366 }
1367 return HeaderMasks;
1368}
1369
1371 VPlan &Plan, bool UseActiveLaneMaskForControlFlow,
1374 UseActiveLaneMaskForControlFlow) &&
1375 "DataAndControlFlowWithoutRuntimeCheck implies "
1376 "UseActiveLaneMaskForControlFlow");
1377
1378 auto FoundWidenCanonicalIVUser =
1379 find_if(Plan.getCanonicalIV()->users(),
1380 [](VPUser *U) { return isa<VPWidenCanonicalIVRecipe>(U); });
1381 assert(FoundWidenCanonicalIVUser &&
1382 "Must have widened canonical IV when tail folding!");
1383 auto *WideCanonicalIV =
1384 cast<VPWidenCanonicalIVRecipe>(*FoundWidenCanonicalIVUser);
1385 VPSingleDefRecipe *LaneMask;
1386 if (UseActiveLaneMaskForControlFlow) {
1389 } else {
1390 VPBuilder B = VPBuilder::getToInsertAfter(WideCanonicalIV);
1391 LaneMask = B.createNaryOp(VPInstruction::ActiveLaneMask,
1392 {WideCanonicalIV, Plan.getTripCount()}, nullptr,
1393 "active.lane.mask");
1394 }
1395
1396 // Walk users of WideCanonicalIV and replace all compares of the form
1397 // (ICMP_ULE, WideCanonicalIV, backedge-taken-count) with an
1398 // active-lane-mask.
1399 for (VPValue *HeaderMask : collectAllHeaderMasks(Plan))
1400 HeaderMask->replaceAllUsesWith(LaneMask);
1401}
1402
1403/// Add a VPEVLBasedIVPHIRecipe and related recipes to \p Plan and
1404/// replaces all uses except the canonical IV increment of
1405/// VPCanonicalIVPHIRecipe with a VPEVLBasedIVPHIRecipe. VPCanonicalIVPHIRecipe
1406/// is used only for loop iterations counting after this transformation.
1407///
1408/// The function uses the following definitions:
1409/// %StartV is the canonical induction start value.
1410///
1411/// The function adds the following recipes:
1412///
1413/// vector.ph:
1414/// ...
1415///
1416/// vector.body:
1417/// ...
1418/// %EVLPhi = EXPLICIT-VECTOR-LENGTH-BASED-IV-PHI [ %StartV, %vector.ph ],
1419/// [ %NextEVLIV, %vector.body ]
1420/// %VPEVL = EXPLICIT-VECTOR-LENGTH %EVLPhi, original TC
1421/// ...
1422/// %NextEVLIV = add IVSize (cast i32 %VPEVVL to IVSize), %EVLPhi
1423/// ...
1424///
1427 // The transform updates all users of inductions to work based on EVL, instead
1428 // of the VF directly. At the moment, widened inductions cannot be updated, so
1429 // bail out if the plan contains any.
1430 bool ContainsWidenInductions = any_of(Header->phis(), [](VPRecipeBase &Phi) {
1431 return isa<VPWidenIntOrFpInductionRecipe, VPWidenPointerInductionRecipe>(
1432 &Phi);
1433 });
1434 // FIXME: Remove this once we can transform (select header_mask, true_value,
1435 // false_value) into vp.merge.
1436 bool ContainsOutloopReductions =
1437 any_of(Header->phis(), [&](VPRecipeBase &Phi) {
1438 auto *R = dyn_cast<VPReductionPHIRecipe>(&Phi);
1439 return R && !R->isInLoop();
1440 });
1441 if (ContainsWidenInductions || ContainsOutloopReductions)
1442 return false;
1443
1444 auto *CanonicalIVPHI = Plan.getCanonicalIV();
1445 VPValue *StartV = CanonicalIVPHI->getStartValue();
1446
1447 // Create the ExplicitVectorLengthPhi recipe in the main loop.
1448 auto *EVLPhi = new VPEVLBasedIVPHIRecipe(StartV, DebugLoc());
1449 EVLPhi->insertAfter(CanonicalIVPHI);
1451 {EVLPhi, Plan.getTripCount()});
1452 VPEVL->insertBefore(*Header, Header->getFirstNonPhi());
1453
1454 auto *CanonicalIVIncrement =
1455 cast<VPInstruction>(CanonicalIVPHI->getBackedgeValue());
1456 VPSingleDefRecipe *OpVPEVL = VPEVL;
1457 if (unsigned IVSize = CanonicalIVPHI->getScalarType()->getScalarSizeInBits();
1458 IVSize != 32) {
1459 OpVPEVL = new VPScalarCastRecipe(IVSize < 32 ? Instruction::Trunc
1460 : Instruction::ZExt,
1461 OpVPEVL, CanonicalIVPHI->getScalarType());
1462 OpVPEVL->insertBefore(CanonicalIVIncrement);
1463 }
1464 auto *NextEVLIV =
1465 new VPInstruction(Instruction::Add, {OpVPEVL, EVLPhi},
1466 {CanonicalIVIncrement->hasNoUnsignedWrap(),
1467 CanonicalIVIncrement->hasNoSignedWrap()},
1468 CanonicalIVIncrement->getDebugLoc(), "index.evl.next");
1469 NextEVLIV->insertBefore(CanonicalIVIncrement);
1470 EVLPhi->addOperand(NextEVLIV);
1471
1472 for (VPValue *HeaderMask : collectAllHeaderMasks(Plan)) {
1473 for (VPUser *U : collectUsersRecursively(HeaderMask)) {
1474 VPRecipeBase *NewRecipe = nullptr;
1475 auto *CurRecipe = dyn_cast<VPRecipeBase>(U);
1476 if (!CurRecipe)
1477 continue;
1478
1479 auto GetNewMask = [&](VPValue *OrigMask) -> VPValue * {
1480 assert(OrigMask && "Unmasked recipe when folding tail");
1481 return HeaderMask == OrigMask ? nullptr : OrigMask;
1482 };
1483 if (auto *MemR = dyn_cast<VPWidenMemoryRecipe>(CurRecipe)) {
1484 VPValue *NewMask = GetNewMask(MemR->getMask());
1485 if (auto *L = dyn_cast<VPWidenLoadRecipe>(MemR))
1486 NewRecipe = new VPWidenLoadEVLRecipe(L, VPEVL, NewMask);
1487 else if (auto *S = dyn_cast<VPWidenStoreRecipe>(MemR))
1488 NewRecipe = new VPWidenStoreEVLRecipe(S, VPEVL, NewMask);
1489 else
1490 llvm_unreachable("unsupported recipe");
1491 } else if (auto *RedR = dyn_cast<VPReductionRecipe>(CurRecipe)) {
1492 NewRecipe = new VPReductionEVLRecipe(RedR, VPEVL,
1493 GetNewMask(RedR->getCondOp()));
1494 }
1495
1496 if (NewRecipe) {
1497 [[maybe_unused]] unsigned NumDefVal = NewRecipe->getNumDefinedValues();
1498 assert(NumDefVal == CurRecipe->getNumDefinedValues() &&
1499 "New recipe must define the same number of values as the "
1500 "original.");
1501 assert(
1502 NumDefVal <= 1 &&
1503 "Only supports recipes with a single definition or without users.");
1504 NewRecipe->insertBefore(CurRecipe);
1505 if (isa<VPSingleDefRecipe, VPWidenLoadEVLRecipe>(NewRecipe)) {
1506 VPValue *CurVPV = CurRecipe->getVPSingleValue();
1507 CurVPV->replaceAllUsesWith(NewRecipe->getVPSingleValue());
1508 }
1509 CurRecipe->eraseFromParent();
1510 }
1511 }
1512 recursivelyDeleteDeadRecipes(HeaderMask);
1513 }
1514 // Replace all uses of VPCanonicalIVPHIRecipe by
1515 // VPEVLBasedIVPHIRecipe except for the canonical IV increment.
1516 CanonicalIVPHI->replaceAllUsesWith(EVLPhi);
1517 CanonicalIVIncrement->setOperand(0, CanonicalIVPHI);
1518 // TODO: support unroll factor > 1.
1519 Plan.setUF(1);
1520 return true;
1521}
1522
1524 VPlan &Plan, function_ref<bool(BasicBlock *)> BlockNeedsPredication) {
1525 // Collect recipes in the backward slice of `Root` that may generate a poison
1526 // value that is used after vectorization.
1528 auto collectPoisonGeneratingInstrsInBackwardSlice([&](VPRecipeBase *Root) {
1530 Worklist.push_back(Root);
1531
1532 // Traverse the backward slice of Root through its use-def chain.
1533 while (!Worklist.empty()) {
1534 VPRecipeBase *CurRec = Worklist.back();
1535 Worklist.pop_back();
1536
1537 if (!Visited.insert(CurRec).second)
1538 continue;
1539
1540 // Prune search if we find another recipe generating a widen memory
1541 // instruction. Widen memory instructions involved in address computation
1542 // will lead to gather/scatter instructions, which don't need to be
1543 // handled.
1544 if (isa<VPWidenMemoryRecipe>(CurRec) || isa<VPInterleaveRecipe>(CurRec) ||
1545 isa<VPScalarIVStepsRecipe>(CurRec) || isa<VPHeaderPHIRecipe>(CurRec))
1546 continue;
1547
1548 // This recipe contributes to the address computation of a widen
1549 // load/store. If the underlying instruction has poison-generating flags,
1550 // drop them directly.
1551 if (auto *RecWithFlags = dyn_cast<VPRecipeWithIRFlags>(CurRec)) {
1552 VPValue *A, *B;
1553 using namespace llvm::VPlanPatternMatch;
1554 // Dropping disjoint from an OR may yield incorrect results, as some
1555 // analysis may have converted it to an Add implicitly (e.g. SCEV used
1556 // for dependence analysis). Instead, replace it with an equivalent Add.
1557 // This is possible as all users of the disjoint OR only access lanes
1558 // where the operands are disjoint or poison otherwise.
1559 if (match(RecWithFlags, m_BinaryOr(m_VPValue(A), m_VPValue(B))) &&
1560 RecWithFlags->isDisjoint()) {
1561 VPBuilder Builder(RecWithFlags);
1562 VPInstruction *New = Builder.createOverflowingOp(
1563 Instruction::Add, {A, B}, {false, false},
1564 RecWithFlags->getDebugLoc());
1565 New->setUnderlyingValue(RecWithFlags->getUnderlyingValue());
1566 RecWithFlags->replaceAllUsesWith(New);
1567 RecWithFlags->eraseFromParent();
1568 CurRec = New;
1569 } else
1570 RecWithFlags->dropPoisonGeneratingFlags();
1571 } else {
1572 Instruction *Instr = dyn_cast_or_null<Instruction>(
1573 CurRec->getVPSingleValue()->getUnderlyingValue());
1574 (void)Instr;
1575 assert((!Instr || !Instr->hasPoisonGeneratingFlags()) &&
1576 "found instruction with poison generating flags not covered by "
1577 "VPRecipeWithIRFlags");
1578 }
1579
1580 // Add new definitions to the worklist.
1581 for (VPValue *operand : CurRec->operands())
1582 if (VPRecipeBase *OpDef = operand->getDefiningRecipe())
1583 Worklist.push_back(OpDef);
1584 }
1585 });
1586
1587 // Traverse all the recipes in the VPlan and collect the poison-generating
1588 // recipes in the backward slice starting at the address of a VPWidenRecipe or
1589 // VPInterleaveRecipe.
1590 auto Iter = vp_depth_first_deep(Plan.getEntry());
1591 for (VPBasicBlock *VPBB : VPBlockUtils::blocksOnly<VPBasicBlock>(Iter)) {
1592 for (VPRecipeBase &Recipe : *VPBB) {
1593 if (auto *WidenRec = dyn_cast<VPWidenMemoryRecipe>(&Recipe)) {
1594 Instruction &UnderlyingInstr = WidenRec->getIngredient();
1595 VPRecipeBase *AddrDef = WidenRec->getAddr()->getDefiningRecipe();
1596 if (AddrDef && WidenRec->isConsecutive() &&
1597 BlockNeedsPredication(UnderlyingInstr.getParent()))
1598 collectPoisonGeneratingInstrsInBackwardSlice(AddrDef);
1599 } else if (auto *InterleaveRec = dyn_cast<VPInterleaveRecipe>(&Recipe)) {
1600 VPRecipeBase *AddrDef = InterleaveRec->getAddr()->getDefiningRecipe();
1601 if (AddrDef) {
1602 // Check if any member of the interleave group needs predication.
1603 const InterleaveGroup<Instruction> *InterGroup =
1604 InterleaveRec->getInterleaveGroup();
1605 bool NeedPredication = false;
1606 for (int I = 0, NumMembers = InterGroup->getNumMembers();
1607 I < NumMembers; ++I) {
1608 Instruction *Member = InterGroup->getMember(I);
1609 if (Member)
1610 NeedPredication |= BlockNeedsPredication(Member->getParent());
1611 }
1612
1613 if (NeedPredication)
1614 collectPoisonGeneratingInstrsInBackwardSlice(AddrDef);
1615 }
1616 }
1617 }
1618 }
1619}
for(const MachineOperand &MO :llvm::drop_begin(OldMI.operands(), Desc.getNumOperands()))
ReachingDefAnalysis InstSet & ToRemove
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
Hexagon Common GEP
iv Induction Variable Users
Definition: IVUsers.cpp:48
static bool mergeBlocksIntoPredecessors(Loop &L, DominatorTree &DT, LoopInfo &LI, MemorySSAUpdater *MSSAU, ScalarEvolution &SE)
#define I(x, y, z)
Definition: MD5.cpp:58
uint64_t IntrinsicInst * II
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
if(VerifyEach)
This file builds on the ADT/GraphTraits.h file to build a generic graph post order iterator.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file contains some templates that are useful if you are working with the STL at all.
This file implements a set that has insertion order iteration characteristics.
This file implements dominator tree analysis for a single level of a VPlan's H-CFG.
static bool sinkScalarOperands(VPlan &Plan)
static void removeRedundantInductionCasts(VPlan &Plan)
Remove redundant casts of inductions.
static void simplifyRecipes(VPlan &Plan, LLVMContext &Ctx)
Try to simplify the recipes in Plan.
static bool sinkRecurrenceUsersAfterPrevious(VPFirstOrderRecurrencePHIRecipe *FOR, VPRecipeBase *Previous, VPDominatorTree &VPDT)
Sink users of FOR after the recipe defining the previous value Previous of the recurrence.
static bool mergeReplicateRegionsIntoSuccessors(VPlan &Plan)
static VPActiveLaneMaskPHIRecipe * addVPLaneMaskPhiAndUpdateExitBranch(VPlan &Plan, bool DataAndControlFlowWithoutRuntimeCheck)
static bool isDeadRecipe(VPRecipeBase &R)
Returns true if R is dead and can be removed.
static void addReplicateRegions(VPlan &Plan)
static void legalizeAndOptimizeInductions(VPlan &Plan, ScalarEvolution &SE)
Legalize VPWidenPointerInductionRecipe, by replacing it with a PtrAdd (IndStart, ScalarIVSteps (0,...
static void simplifyRecipe(VPRecipeBase &R, VPTypeAnalysis &TypeInfo)
Try to simplify recipe R.
static VPRegionBlock * GetReplicateRegion(VPRecipeBase *R)
static void removeRedundantExpandSCEVRecipes(VPlan &Plan)
Remove redundant EpxandSCEVRecipes in Plan's entry block by replacing them with already existing reci...
static SmallVector< VPValue * > collectAllHeaderMasks(VPlan &Plan)
Collect all VPValues representing a header mask through the (ICMP_ULE, WideCanonicalIV,...
static VPScalarIVStepsRecipe * createScalarIVSteps(VPlan &Plan, InductionDescriptor::InductionKind Kind, Instruction::BinaryOps InductionOpcode, FPMathOperator *FPBinOp, ScalarEvolution &SE, Instruction *TruncI, VPValue *StartV, VPValue *Step, VPBasicBlock::iterator IP)
static bool properlyDominates(const VPRecipeBase *A, const VPRecipeBase *B, VPDominatorTree &VPDT)
static SmallVector< VPUser * > collectUsersRecursively(VPValue *V)
static void recursivelyDeleteDeadRecipes(VPValue *V)
static void removeDeadRecipes(VPlan &Plan)
static VPRegionBlock * createReplicateRegion(VPReplicateRecipe *PredRecipe, VPlan &Plan)
static VPBasicBlock * getPredicatedThenBlock(VPRegionBlock *R)
If R is a triangle region, return the 'then' block of the triangle.
VPValue * getPredicatedMask(VPRegionBlock *R)
If R is a region with a VPBranchOnMaskRecipe in the entry block, return the mask.
static void removeRedundantCanonicalIVs(VPlan &Plan)
Try to replace VPWidenCanonicalIVRecipes with a widened canonical IV recipe, if it exists.
This file provides utility VPlan to VPlan transformations.
static const uint32_t IV[8]
Definition: blake3_impl.h:78
LLVM Basic Block Representation.
Definition: BasicBlock.h:61
const Function * getParent() const
Return the enclosing method, or null if none.
Definition: BasicBlock.h:209
This class represents a function call, abstracting a target machine's calling convention.
@ ICMP_ULE
unsigned less or equal
Definition: InstrTypes.h:783
static ConstantInt * getTrue(LLVMContext &Context)
Definition: Constants.cpp:850
This class represents an Operation in the Expression.
A debug info location.
Definition: DebugLoc.h:33
bool contains(const_arg_type_t< KeyT > Val) const
Return true if the specified key is in the map, false otherwise.
Definition: DenseMap.h:145
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition: DenseMap.h:220
Core dominator tree base class.
void recalculate(ParentType &Func)
recalculate - compute a dominator tree for the given function
bool properlyDominates(const DomTreeNodeBase< NodeT > *A, const DomTreeNodeBase< NodeT > *B) const
properlyDominates - Returns true iff A dominates B and A != B.
static constexpr ElementCount getFixed(ScalarTy MinVal)
Definition: TypeSize.h:311
Utility class for floating point operations which can have information about relaxed accuracy require...
Definition: Operator.h:202
FastMathFlags getFastMathFlags() const
Convenience function for getting all the fast-math flags.
Definition: Operator.h:320
Convenience struct for specifying and reasoning about fast-math flags.
Definition: FMF.h:20
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
Definition: Instructions.h:915
A struct for saving information about induction variables.
InductionKind
This enum represents the kinds of inductions that we support.
@ IK_IntInduction
Integer induction variable. Step = C.
static IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
Definition: Type.cpp:278
The group of interleaved loads/stores sharing the same stride and close to each other.
Definition: VectorUtils.h:470
InstTy * getMember(uint32_t Index) const
Get the member with the given index Index.
Definition: VectorUtils.h:540
uint32_t getNumMembers() const
Definition: VectorUtils.h:488
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:67
An instruction for reading from memory.
Definition: Instructions.h:174
This class implements a map that also provides access to all stored values in a deterministic order.
Definition: MapVector.h:36
bool contains(const KeyT &Key) const
Definition: MapVector.h:163
ValueT lookup(const KeyT &Key) const
Definition: MapVector.h:110
size_type size() const
Definition: MapVector.h:60
An interface layer with SCEV used to manage how we see SCEV expressions for values in the context of ...
ScalarEvolution * getSE() const
Returns the ScalarEvolution analysis used.
The RecurrenceDescriptor is used to identify recurrences variables in a loop.
Definition: IVDescriptors.h:71
RecurKind getRecurrenceKind() const
This class represents an analyzed expression in the program.
bool isZero() const
Return true if the expression is a constant zero.
Type * getType() const
Return the LLVM type of this SCEV expression.
The main scalar evolution driver.
bool isKnownPredicate(ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS)
Test if the given expression is known to satisfy the condition described by Pred, LHS,...
const SCEV * getElementCount(Type *Ty, ElementCount EC)
LLVMContext & getContext() const
This class represents the LLVM 'select' instruction.
A vector that has set insertion semantics.
Definition: SetVector.h:57
size_type size() const
Determine the number of elements in the SetVector.
Definition: SetVector.h:98
bool empty() const
Determine if the SetVector is empty or not.
Definition: SetVector.h:93
bool insert(const value_type &X)
Insert a new element into the SetVector.
Definition: SetVector.h:162
bool contains(const key_type &key) const
Check if the SetVector contains the given key.
Definition: SetVector.h:254
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:344
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
Definition: SmallPtrSet.h:479
bool empty() const
Definition: SmallVector.h:94
size_t size() const
Definition: SmallVector.h:91
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
Definition: SmallVector.h:696
void push_back(const T &Elt)
Definition: SmallVector.h:426
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1209
An instruction for storing to memory.
Definition: Instructions.h:290
Provides information about what library functions are available for the current target.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition: Twine.h:81
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition: Type.h:228
op_range operands()
Definition: User.h:242
A recipe for generating the active lane mask for the vector loop that is used to predicate the vector...
Definition: VPlan.h:2765
VPBasicBlock serves as the leaf of the Hierarchical Control-Flow Graph.
Definition: VPlan.h:2969
void appendRecipe(VPRecipeBase *Recipe)
Augment the existing recipes of a VPBasicBlock with an additional Recipe as the last recipe.
Definition: VPlan.h:3041
RecipeListTy::iterator iterator
Instruction iterators...
Definition: VPlan.h:2993
iterator end()
Definition: VPlan.h:3003
iterator_range< iterator > phis()
Returns an iterator range over the PHI-like recipes in the block.
Definition: VPlan.h:3054
iterator getFirstNonPhi()
Return the position of the first non-phi node recipe in the block.
Definition: VPlan.cpp:212
VPBasicBlock * splitAt(iterator SplitAt)
Split current block at SplitAt by inserting a new block between the current block and its successors ...
Definition: VPlan.cpp:550
VPRecipeBase * getTerminator()
If the block has multiple successors, return the branch recipe terminating the block.
Definition: VPlan.cpp:613
const VPRecipeBase & back() const
Definition: VPlan.h:3015
void insert(VPRecipeBase *Recipe, iterator InsertPt)
Definition: VPlan.h:3032
VPBlockBase is the building block of the Hierarchical Control-Flow Graph.
Definition: VPlan.h:437
VPRegionBlock * getParent()
Definition: VPlan.h:509
const VPBasicBlock * getExitingBasicBlock() const
Definition: VPlan.cpp:177
VPBlockBase * getSinglePredecessor() const
Definition: VPlan.h:550
const VPBasicBlock * getEntryBasicBlock() const
Definition: VPlan.cpp:155
VPBlockBase * getSingleHierarchicalPredecessor()
Definition: VPlan.h:596
VPBlockBase * getSingleSuccessor() const
Definition: VPlan.h:544
const VPBlocksTy & getSuccessors() const
Definition: VPlan.h:534
static void insertTwoBlocksAfter(VPBlockBase *IfTrue, VPBlockBase *IfFalse, VPBlockBase *BlockPtr)
Insert disconnected VPBlockBases IfTrue and IfFalse after BlockPtr.
Definition: VPlan.h:3601
static void disconnectBlocks(VPBlockBase *From, VPBlockBase *To)
Disconnect VPBlockBases From and To bi-directionally.
Definition: VPlan.h:3629
static void connectBlocks(VPBlockBase *From, VPBlockBase *To)
Connect VPBlockBases From and To bi-directionally.
Definition: VPlan.h:3618
A recipe for generating conditional branches on the bits of a mask.
Definition: VPlan.h:2359
VPlan-based builder utility analogous to IRBuilder.
static VPBuilder getToInsertAfter(VPRecipeBase *R)
Create a VPBuilder to insert after R.
VPInstruction * createOverflowingOp(unsigned Opcode, std::initializer_list< VPValue * > Operands, VPRecipeWithIRFlags::WrapFlagsTy WrapFlags, DebugLoc DL={}, const Twine &Name="")
VPInstruction * createNaryOp(unsigned Opcode, ArrayRef< VPValue * > Operands, Instruction *Inst=nullptr, const Twine &Name="")
Create an N-ary operation with Opcode, Operands and set Inst as its underlying Instruction.
VPValue * createNot(VPValue *Operand, DebugLoc DL={}, const Twine &Name="")
void setInsertPoint(VPBasicBlock *TheBB)
This specifies that created VPInstructions should be appended to the end of the specified block.
Canonical scalar induction phi of the vector loop.
Definition: VPlan.h:2708
Type * getScalarType() const
Returns the scalar type of the induction.
Definition: VPlan.h:2737
bool isCanonical(InductionDescriptor::InductionKind Kind, VPValue *Start, VPValue *Step) const
Check if the induction described by Kind, /p Start and Step is canonical, i.e.
unsigned getNumDefinedValues() const
Returns the number of values defined by the VPDef.
Definition: VPlanValue.h:423
ArrayRef< VPValue * > definedValues()
Returns an ArrayRef of the values defined by the VPDef.
Definition: VPlanValue.h:418
VPValue * getVPSingleValue()
Returns the only VPValue defined by the VPDef.
Definition: VPlanValue.h:396
A recipe for converting the input value IV value to the corresponding value of an IV with different s...
Definition: VPlan.h:2862
A recipe for generating the phi node for the current index of elements, adjusted in accordance with E...
Definition: VPlan.h:2797
VPValue * getStartValue()
Returns the start value of the phi, if one is set.
Definition: VPlan.h:1742
This is a concrete Recipe that models a single VPlan-level instruction.
Definition: VPlan.h:1229
@ FirstOrderRecurrenceSplice
Definition: VPlan.h:1235
@ CanonicalIVIncrementForPart
Definition: VPlan.h:1250
@ CalculateTripCountMinusVF
Definition: VPlan.h:1248
VPPredInstPHIRecipe is a recipe for generating the phi nodes needed when control converges back from ...
Definition: VPlan.h:2410
VPRecipeBase is a base class modeling a sequence of one or more output IR instructions.
Definition: VPlan.h:764
bool mayReadOrWriteMemory() const
Returns true if the recipe may read from or write to memory.
Definition: VPlan.h:855
bool mayHaveSideEffects() const
Returns true if the recipe may have side-effects.
VPBasicBlock * getParent()
Definition: VPlan.h:789
DebugLoc getDebugLoc() const
Returns the debug location of the recipe.
Definition: VPlan.h:860
void moveBefore(VPBasicBlock &BB, iplist< VPRecipeBase >::iterator I)
Unlink this recipe and insert into BB before I.
void insertBefore(VPRecipeBase *InsertPos)
Insert an unlinked recipe into a basic block immediately before the specified recipe.
iplist< VPRecipeBase >::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
A recipe to represent inloop reduction operations with vector-predication intrinsics,...
Definition: VPlan.h:2245
VPRegionBlock represents a collection of VPBasicBlocks and VPRegionBlocks which form a Single-Entry-S...
Definition: VPlan.h:3147
const VPBlockBase * getEntry() const
Definition: VPlan.h:3186
VPReplicateRecipe replicates a given instruction producing multiple scalar copies of the original sca...
Definition: VPlan.h:2286
bool isUniform() const
Definition: VPlan.h:2326
VPValue * getMask()
Return the mask of a predicated VPReplicateRecipe.
Definition: VPlan.h:2350
VPScalarCastRecipe is a recipe to create scalar cast instructions.
Definition: VPlan.h:1485
A recipe for handling phi nodes of integer and floating-point inductions, producing their scalar valu...
Definition: VPlan.h:2919
VPSingleDef is a base class for recipes for modeling a sequence of one or more output IR that define ...
Definition: VPlan.h:891
Instruction * getUnderlyingInstr()
Returns the underlying instruction.
Definition: VPlan.h:955
An analysis for type-inference for VPValues.
Definition: VPlanAnalysis.h:39
LLVMContext & getContext()
Return the LLVMContext used by the analysis.
Definition: VPlanAnalysis.h:64
Type * inferScalarType(const VPValue *V)
Infer the type of V. Returns the scalar type of V.
This class augments VPValue with operands which provide the inverse def-use edges from VPValue's user...
Definition: VPlanValue.h:202
operand_range operands()
Definition: VPlanValue.h:272
void setOperand(unsigned I, VPValue *New)
Definition: VPlanValue.h:257
operand_iterator op_end()
Definition: VPlanValue.h:270
operand_iterator op_begin()
Definition: VPlanValue.h:268
void addOperand(VPValue *Operand)
Definition: VPlanValue.h:246
VPRecipeBase * getDefiningRecipe()
Returns the recipe defining this VPValue or nullptr if it is not defined by a recipe,...
Definition: VPlan.cpp:120
Value * getUnderlyingValue() const
Return the underlying Value attached to this VPValue.
Definition: VPlanValue.h:77
void replaceAllUsesWith(VPValue *New)
Definition: VPlan.cpp:1421
unsigned getNumUsers() const
Definition: VPlanValue.h:111
Value * getLiveInIRValue()
Returns the underlying IR value, if this VPValue is defined outside the scope of VPlan.
Definition: VPlanValue.h:172
void replaceUsesWithIf(VPValue *New, llvm::function_ref< bool(VPUser &U, unsigned Idx)> ShouldReplace)
Go through the uses list for this VPValue and make each use point to New if the callback ShouldReplac...
Definition: VPlan.cpp:1425
user_range users()
Definition: VPlanValue.h:132
A recipe for widening Call instructions.
Definition: VPlan.h:1524
A Recipe for widening the canonical induction variable of the vector loop.
Definition: VPlan.h:2833
VPWidenCastRecipe is a recipe to create vector cast instructions.
Definition: VPlan.h:1437
A recipe for handling GEP instructions.
Definition: VPlan.h:1611
A recipe for handling phi nodes of integer and floating-point inductions, producing their vector valu...
Definition: VPlan.h:1766
VPWidenRecipe is a recipe for producing a copy of vector type its ingredient.
Definition: VPlan.h:1405
VPlan models a candidate for vectorization, encoding various decisions take to produce efficient outp...
Definition: VPlan.h:3251
bool hasScalableVF()
Definition: VPlan.h:3392
VPBasicBlock * getEntry()
Definition: VPlan.h:3353
VPValue * getTripCount() const
The trip count of the original loop.
Definition: VPlan.h:3357
VPRegionBlock * getVectorLoopRegion()
Returns the VPRegionBlock of the vector loop.
Definition: VPlan.h:3453
bool hasVF(ElementCount VF)
Definition: VPlan.h:3391
bool hasUF(unsigned UF) const
Definition: VPlan.h:3404
void setVF(ElementCount VF)
Definition: VPlan.h:3385
VPValue * getOrAddLiveIn(Value *V)
Gets the live-in VPValue for V or adds a new live-in (if none exists yet) for V.
Definition: VPlan.h:3419
bool hasScalarVFOnly() const
Definition: VPlan.h:3402
VPCanonicalIVPHIRecipe * getCanonicalIV()
Returns the canonical induction recipe of the vector loop.
Definition: VPlan.h:3461
void setUF(unsigned UF)
Definition: VPlan.h:3406
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:255
void setName(const Twine &Name)
Change the name of the value.
Definition: Value.cpp:377
bool hasName() const
Definition: Value.h:261
StringRef getName() const
Return a constant reference to the value's name.
Definition: Value.cpp:309
constexpr LeafTy multiplyCoefficientBy(ScalarTy RHS) const
Definition: TypeSize.h:258
An efficient, type-erasing, non-owning reference to a callable.
const ParentTy * getParent() const
Definition: ilist_node.h:32
self_iterator getIterator()
Definition: ilist_node.h:132
IteratorT end() const
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
CastInst_match< OpTy, TruncInst > m_Trunc(const OpTy &Op)
Matches Trunc.
specific_intval< false > m_SpecificInt(const APInt &V)
Match a specific integer value or vector with all elements equal to the value.
Definition: PatternMatch.h:972
bool match(Val *V, const Pattern &P)
Definition: PatternMatch.h:49
match_combine_or< CastInst_match< OpTy, ZExtInst >, CastInst_match< OpTy, SExtInst > > m_ZExtOrSExt(const OpTy &Op)
auto m_LogicalAnd()
Matches L && R where L and R are arbitrary values.
BinaryOp_match< cst_pred_ty< is_all_ones >, ValTy, Instruction::Xor, true > m_Not(const ValTy &V)
Matches a 'Not' as 'xor V, -1' or 'xor -1, V'.
CastInst_match< OpTy, SExtInst > m_SExt(const OpTy &Op)
Matches SExt.
BinaryOp_match< LHS, RHS, Instruction::Mul, true > m_c_Mul(const LHS &L, const RHS &R)
Matches a Mul with LHS and RHS in either order.
VPValue * getOrCreateVPValueForSCEVExpr(VPlan &Plan, const SCEV *Expr, ScalarEvolution &SE)
Get or create a VPValue that corresponds to the expansion of Expr.
Definition: VPlan.cpp:1610
bool onlyFirstLaneUsed(const VPValue *Def)
Returns true if only the first lane of Def is used.
Definition: VPlan.cpp:1600
bool isHeaderMask(VPValue *V, VPlan &Plan)
Return true if V is a header mask in Plan.
Definition: VPlan.cpp:1627
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1722
Intrinsic::ID getVectorIntrinsicIDForCall(const CallInst *CI, const TargetLibraryInfo *TLI)
Returns intrinsic ID for call.
const SCEV * createTripCountSCEV(Type *IdxTy, PredicatedScalarEvolution &PSE, Loop *OrigLoop)
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
Definition: STLExtras.h:656
iterator_range< df_iterator< VPBlockDeepTraversalWrapper< VPBlockBase * > > > vp_depth_first_deep(VPBlockBase *G)
Returns an iterator range to traverse the graph starting at G in depth-first order while traversing t...
Definition: VPlanCFG.h:226
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1729
auto reverse(ContainerTy &&C)
Definition: STLExtras.h:419
void sort(IteratorTy Start, IteratorTy End)
Definition: STLExtras.h:1647
std::unique_ptr< VPlan > VPlanPtr
Definition: VPlan.h:147
bool none_of(R &&Range, UnaryPredicate P)
Provide wrappers to std::none_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1736
SmallVector< ValueTypeFromRangeType< R >, Size > to_vector(R &&Range)
Given a range of type R, iterate the entire range and return a SmallVector with elements of the vecto...
Definition: SmallVector.h:1312
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition: Casting.h:548
RecurKind
These are the kinds of recurrences that we support.
Definition: IVDescriptors.h:34
@ Mul
Product of integers.
@ Add
Sum of integers.
DWARFExpression::Operation Op
auto count_if(R &&Range, UnaryPredicate P)
Wrapper function around std::count_if to count the number of times an element satisfying a given pred...
Definition: STLExtras.h:1921
BasicBlock * SplitBlock(BasicBlock *Old, BasicBlock::iterator SplitPt, DominatorTree *DT, LoopInfo *LI=nullptr, MemorySSAUpdater *MSSAU=nullptr, const Twine &BBName="", bool Before=false)
Split the specified block at the specified instruction.
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1749
@ DataAndControlFlowWithoutRuntimeCheck
Use predicate to control both data and control flow, but modify the trip count so that a runtime over...
A recipe for handling first-order recurrence phis.
Definition: VPlan.h:1937
A recipe for widening load operations with vector-predication intrinsics, using the address to load f...
Definition: VPlan.h:2558
A recipe for widening load operations, using the address to load from and an optional mask.
Definition: VPlan.h:2519
A recipe for widening select instructions.
Definition: VPlan.h:1577
A recipe for widening store operations with vector-predication intrinsics, using the value to store,...
Definition: VPlan.h:2634
A recipe for widening store operations, using the stored value, the address to store to and an option...
Definition: VPlan.h:2593
static bool tryAddExplicitVectorLength(VPlan &Plan)
Add a VPEVLBasedIVPHIRecipe and related recipes to Plan and replaces all uses except the canonical IV...
static void createAndOptimizeReplicateRegions(VPlan &Plan)
Wrap predicated VPReplicateRecipes with a mask operand in an if-then region block and remove the mask...
static void dropPoisonGeneratingRecipes(VPlan &Plan, function_ref< bool(BasicBlock *)> BlockNeedsPredication)
Drop poison flags from recipes that may generate a poison value that is used after vectorization,...
static void optimize(VPlan &Plan, ScalarEvolution &SE)
Apply VPlan-to-VPlan optimizations to Plan, including induction recipe optimizations,...
static void clearReductionWrapFlags(VPlan &Plan)
Clear NSW/NUW flags from reduction instructions if necessary.
static void VPInstructionsToVPRecipes(VPlanPtr &Plan, function_ref< const InductionDescriptor *(PHINode *)> GetIntOrFpInductionDescriptor, ScalarEvolution &SE, const TargetLibraryInfo &TLI)
Replaces the VPInstructions in Plan with corresponding widen recipes.
static void truncateToMinimalBitwidths(VPlan &Plan, const MapVector< Instruction *, uint64_t > &MinBWs, LLVMContext &Ctx)
Insert truncates and extends for any truncated recipe.
static void addActiveLaneMask(VPlan &Plan, bool UseActiveLaneMaskForControlFlow, bool DataAndControlFlowWithoutRuntimeCheck)
Replace (ICMP_ULE, wide canonical IV, backedge-taken-count) checks with an (active-lane-mask recipe,...
static bool adjustFixedOrderRecurrences(VPlan &Plan, VPBuilder &Builder)
Sink users of fixed-order recurrences after the recipe defining their previous value.
static void optimizeForVFAndUF(VPlan &Plan, ElementCount BestVF, unsigned BestUF, PredicatedScalarEvolution &PSE)
Optimize Plan based on BestVF and BestUF.