LLVM 20.0.0git
VPlanTransforms.cpp
Go to the documentation of this file.
1//===-- VPlanTransforms.cpp - Utility VPlan to VPlan transforms -----------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8///
9/// \file
10/// This file implements a set of utility VPlan to VPlan transformations.
11///
12//===----------------------------------------------------------------------===//
13
14#include "VPlanTransforms.h"
15#include "VPRecipeBuilder.h"
16#include "VPlan.h"
17#include "VPlanAnalysis.h"
18#include "VPlanCFG.h"
19#include "VPlanDominatorTree.h"
20#include "VPlanPatternMatch.h"
21#include "VPlanUtils.h"
23#include "llvm/ADT/STLExtras.h"
24#include "llvm/ADT/SetVector.h"
25#include "llvm/ADT/TypeSwitch.h"
28#include "llvm/IR/Intrinsics.h"
30
31using namespace llvm;
32
34 VPlanPtr &Plan,
36 GetIntOrFpInductionDescriptor,
37 ScalarEvolution &SE, const TargetLibraryInfo &TLI) {
38
40 Plan->getVectorLoopRegion());
41 for (VPBasicBlock *VPBB : VPBlockUtils::blocksOnly<VPBasicBlock>(RPOT)) {
42 // Skip blocks outside region
43 if (!VPBB->getParent())
44 break;
45 VPRecipeBase *Term = VPBB->getTerminator();
46 auto EndIter = Term ? Term->getIterator() : VPBB->end();
47 // Introduce each ingredient into VPlan.
48 for (VPRecipeBase &Ingredient :
49 make_early_inc_range(make_range(VPBB->begin(), EndIter))) {
50
51 VPValue *VPV = Ingredient.getVPSingleValue();
52 Instruction *Inst = cast<Instruction>(VPV->getUnderlyingValue());
53
54 VPRecipeBase *NewRecipe = nullptr;
55 if (auto *VPPhi = dyn_cast<VPWidenPHIRecipe>(&Ingredient)) {
56 auto *Phi = cast<PHINode>(VPPhi->getUnderlyingValue());
57 const auto *II = GetIntOrFpInductionDescriptor(Phi);
58 if (!II)
59 continue;
60
61 VPValue *Start = Plan->getOrAddLiveIn(II->getStartValue());
62 VPValue *Step =
63 vputils::getOrCreateVPValueForSCEVExpr(*Plan, II->getStep(), SE);
64 NewRecipe = new VPWidenIntOrFpInductionRecipe(
65 Phi, Start, Step, &Plan->getVF(), *II, Ingredient.getDebugLoc());
66 } else {
67 assert(isa<VPInstruction>(&Ingredient) &&
68 "only VPInstructions expected here");
69 assert(!isa<PHINode>(Inst) && "phis should be handled above");
70 // Create VPWidenMemoryRecipe for loads and stores.
71 if (LoadInst *Load = dyn_cast<LoadInst>(Inst)) {
72 NewRecipe = new VPWidenLoadRecipe(
73 *Load, Ingredient.getOperand(0), nullptr /*Mask*/,
74 false /*Consecutive*/, false /*Reverse*/,
75 Ingredient.getDebugLoc());
76 } else if (StoreInst *Store = dyn_cast<StoreInst>(Inst)) {
77 NewRecipe = new VPWidenStoreRecipe(
78 *Store, Ingredient.getOperand(1), Ingredient.getOperand(0),
79 nullptr /*Mask*/, false /*Consecutive*/, false /*Reverse*/,
80 Ingredient.getDebugLoc());
81 } else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Inst)) {
82 NewRecipe = new VPWidenGEPRecipe(GEP, Ingredient.operands());
83 } else if (CallInst *CI = dyn_cast<CallInst>(Inst)) {
84 NewRecipe = new VPWidenIntrinsicRecipe(
85 *CI, getVectorIntrinsicIDForCall(CI, &TLI),
86 {Ingredient.op_begin(), Ingredient.op_end() - 1}, CI->getType(),
87 CI->getDebugLoc());
88 } else if (SelectInst *SI = dyn_cast<SelectInst>(Inst)) {
89 NewRecipe = new VPWidenSelectRecipe(*SI, Ingredient.operands());
90 } else if (auto *CI = dyn_cast<CastInst>(Inst)) {
91 NewRecipe = new VPWidenCastRecipe(
92 CI->getOpcode(), Ingredient.getOperand(0), CI->getType(), *CI);
93 } else {
94 NewRecipe = new VPWidenRecipe(*Inst, Ingredient.operands());
95 }
96 }
97
98 NewRecipe->insertBefore(&Ingredient);
99 if (NewRecipe->getNumDefinedValues() == 1)
100 VPV->replaceAllUsesWith(NewRecipe->getVPSingleValue());
101 else
102 assert(NewRecipe->getNumDefinedValues() == 0 &&
103 "Only recpies with zero or one defined values expected");
104 Ingredient.eraseFromParent();
105 }
106 }
107}
108
109static bool sinkScalarOperands(VPlan &Plan) {
110 auto Iter = vp_depth_first_deep(Plan.getEntry());
111 bool Changed = false;
112 // First, collect the operands of all recipes in replicate blocks as seeds for
113 // sinking.
115 for (VPRegionBlock *VPR : VPBlockUtils::blocksOnly<VPRegionBlock>(Iter)) {
116 VPBasicBlock *EntryVPBB = VPR->getEntryBasicBlock();
117 if (!VPR->isReplicator() || EntryVPBB->getSuccessors().size() != 2)
118 continue;
119 VPBasicBlock *VPBB = dyn_cast<VPBasicBlock>(EntryVPBB->getSuccessors()[0]);
120 if (!VPBB || VPBB->getSingleSuccessor() != VPR->getExitingBasicBlock())
121 continue;
122 for (auto &Recipe : *VPBB) {
123 for (VPValue *Op : Recipe.operands())
124 if (auto *Def =
125 dyn_cast_or_null<VPSingleDefRecipe>(Op->getDefiningRecipe()))
126 WorkList.insert(std::make_pair(VPBB, Def));
127 }
128 }
129
130 bool ScalarVFOnly = Plan.hasScalarVFOnly();
131 // Try to sink each replicate or scalar IV steps recipe in the worklist.
132 for (unsigned I = 0; I != WorkList.size(); ++I) {
133 VPBasicBlock *SinkTo;
134 VPSingleDefRecipe *SinkCandidate;
135 std::tie(SinkTo, SinkCandidate) = WorkList[I];
136 if (SinkCandidate->getParent() == SinkTo ||
137 SinkCandidate->mayHaveSideEffects() ||
138 SinkCandidate->mayReadOrWriteMemory())
139 continue;
140 if (auto *RepR = dyn_cast<VPReplicateRecipe>(SinkCandidate)) {
141 if (!ScalarVFOnly && RepR->isUniform())
142 continue;
143 } else if (!isa<VPScalarIVStepsRecipe>(SinkCandidate))
144 continue;
145
146 bool NeedsDuplicating = false;
147 // All recipe users of the sink candidate must be in the same block SinkTo
148 // or all users outside of SinkTo must be uniform-after-vectorization (
149 // i.e., only first lane is used) . In the latter case, we need to duplicate
150 // SinkCandidate.
151 auto CanSinkWithUser = [SinkTo, &NeedsDuplicating,
152 SinkCandidate](VPUser *U) {
153 auto *UI = cast<VPRecipeBase>(U);
154 if (UI->getParent() == SinkTo)
155 return true;
156 NeedsDuplicating = UI->onlyFirstLaneUsed(SinkCandidate);
157 // We only know how to duplicate VPRecipeRecipes for now.
158 return NeedsDuplicating && isa<VPReplicateRecipe>(SinkCandidate);
159 };
160 if (!all_of(SinkCandidate->users(), CanSinkWithUser))
161 continue;
162
163 if (NeedsDuplicating) {
164 if (ScalarVFOnly)
165 continue;
166 Instruction *I = SinkCandidate->getUnderlyingInstr();
167 auto *Clone = new VPReplicateRecipe(I, SinkCandidate->operands(), true);
168 // TODO: add ".cloned" suffix to name of Clone's VPValue.
169
170 Clone->insertBefore(SinkCandidate);
171 SinkCandidate->replaceUsesWithIf(Clone, [SinkTo](VPUser &U, unsigned) {
172 return cast<VPRecipeBase>(&U)->getParent() != SinkTo;
173 });
174 }
175 SinkCandidate->moveBefore(*SinkTo, SinkTo->getFirstNonPhi());
176 for (VPValue *Op : SinkCandidate->operands())
177 if (auto *Def =
178 dyn_cast_or_null<VPSingleDefRecipe>(Op->getDefiningRecipe()))
179 WorkList.insert(std::make_pair(SinkTo, Def));
180 Changed = true;
181 }
182 return Changed;
183}
184
185/// If \p R is a region with a VPBranchOnMaskRecipe in the entry block, return
186/// the mask.
188 auto *EntryBB = dyn_cast<VPBasicBlock>(R->getEntry());
189 if (!EntryBB || EntryBB->size() != 1 ||
190 !isa<VPBranchOnMaskRecipe>(EntryBB->begin()))
191 return nullptr;
192
193 return cast<VPBranchOnMaskRecipe>(&*EntryBB->begin())->getOperand(0);
194}
195
196/// If \p R is a triangle region, return the 'then' block of the triangle.
198 auto *EntryBB = cast<VPBasicBlock>(R->getEntry());
199 if (EntryBB->getNumSuccessors() != 2)
200 return nullptr;
201
202 auto *Succ0 = dyn_cast<VPBasicBlock>(EntryBB->getSuccessors()[0]);
203 auto *Succ1 = dyn_cast<VPBasicBlock>(EntryBB->getSuccessors()[1]);
204 if (!Succ0 || !Succ1)
205 return nullptr;
206
207 if (Succ0->getNumSuccessors() + Succ1->getNumSuccessors() != 1)
208 return nullptr;
209 if (Succ0->getSingleSuccessor() == Succ1)
210 return Succ0;
211 if (Succ1->getSingleSuccessor() == Succ0)
212 return Succ1;
213 return nullptr;
214}
215
216// Merge replicate regions in their successor region, if a replicate region
217// is connected to a successor replicate region with the same predicate by a
218// single, empty VPBasicBlock.
220 SmallPtrSet<VPRegionBlock *, 4> TransformedRegions;
221
222 // Collect replicate regions followed by an empty block, followed by another
223 // replicate region with matching masks to process front. This is to avoid
224 // iterator invalidation issues while merging regions.
226 for (VPRegionBlock *Region1 : VPBlockUtils::blocksOnly<VPRegionBlock>(
227 vp_depth_first_deep(Plan.getEntry()))) {
228 if (!Region1->isReplicator())
229 continue;
230 auto *MiddleBasicBlock =
231 dyn_cast_or_null<VPBasicBlock>(Region1->getSingleSuccessor());
232 if (!MiddleBasicBlock || !MiddleBasicBlock->empty())
233 continue;
234
235 auto *Region2 =
236 dyn_cast_or_null<VPRegionBlock>(MiddleBasicBlock->getSingleSuccessor());
237 if (!Region2 || !Region2->isReplicator())
238 continue;
239
240 VPValue *Mask1 = getPredicatedMask(Region1);
241 VPValue *Mask2 = getPredicatedMask(Region2);
242 if (!Mask1 || Mask1 != Mask2)
243 continue;
244
245 assert(Mask1 && Mask2 && "both region must have conditions");
246 WorkList.push_back(Region1);
247 }
248
249 // Move recipes from Region1 to its successor region, if both are triangles.
250 for (VPRegionBlock *Region1 : WorkList) {
251 if (TransformedRegions.contains(Region1))
252 continue;
253 auto *MiddleBasicBlock = cast<VPBasicBlock>(Region1->getSingleSuccessor());
254 auto *Region2 = cast<VPRegionBlock>(MiddleBasicBlock->getSingleSuccessor());
255
256 VPBasicBlock *Then1 = getPredicatedThenBlock(Region1);
257 VPBasicBlock *Then2 = getPredicatedThenBlock(Region2);
258 if (!Then1 || !Then2)
259 continue;
260
261 // Note: No fusion-preventing memory dependencies are expected in either
262 // region. Such dependencies should be rejected during earlier dependence
263 // checks, which guarantee accesses can be re-ordered for vectorization.
264 //
265 // Move recipes to the successor region.
266 for (VPRecipeBase &ToMove : make_early_inc_range(reverse(*Then1)))
267 ToMove.moveBefore(*Then2, Then2->getFirstNonPhi());
268
269 auto *Merge1 = cast<VPBasicBlock>(Then1->getSingleSuccessor());
270 auto *Merge2 = cast<VPBasicBlock>(Then2->getSingleSuccessor());
271
272 // Move VPPredInstPHIRecipes from the merge block to the successor region's
273 // merge block. Update all users inside the successor region to use the
274 // original values.
275 for (VPRecipeBase &Phi1ToMove : make_early_inc_range(reverse(*Merge1))) {
276 VPValue *PredInst1 =
277 cast<VPPredInstPHIRecipe>(&Phi1ToMove)->getOperand(0);
278 VPValue *Phi1ToMoveV = Phi1ToMove.getVPSingleValue();
279 Phi1ToMoveV->replaceUsesWithIf(PredInst1, [Then2](VPUser &U, unsigned) {
280 return cast<VPRecipeBase>(&U)->getParent() == Then2;
281 });
282
283 // Remove phi recipes that are unused after merging the regions.
284 if (Phi1ToMove.getVPSingleValue()->getNumUsers() == 0) {
285 Phi1ToMove.eraseFromParent();
286 continue;
287 }
288 Phi1ToMove.moveBefore(*Merge2, Merge2->begin());
289 }
290
291 // Finally, remove the first region.
292 for (VPBlockBase *Pred : make_early_inc_range(Region1->getPredecessors())) {
293 VPBlockUtils::disconnectBlocks(Pred, Region1);
294 VPBlockUtils::connectBlocks(Pred, MiddleBasicBlock);
295 }
296 VPBlockUtils::disconnectBlocks(Region1, MiddleBasicBlock);
297 TransformedRegions.insert(Region1);
298 }
299
300 return !TransformedRegions.empty();
301}
302
304 VPlan &Plan) {
305 Instruction *Instr = PredRecipe->getUnderlyingInstr();
306 // Build the triangular if-then region.
307 std::string RegionName = (Twine("pred.") + Instr->getOpcodeName()).str();
308 assert(Instr->getParent() && "Predicated instruction not in any basic block");
309 auto *BlockInMask = PredRecipe->getMask();
310 auto *BOMRecipe = new VPBranchOnMaskRecipe(BlockInMask);
311 auto *Entry =
312 Plan.createVPBasicBlock(Twine(RegionName) + ".entry", BOMRecipe);
313
314 // Replace predicated replicate recipe with a replicate recipe without a
315 // mask but in the replicate region.
316 auto *RecipeWithoutMask = new VPReplicateRecipe(
317 PredRecipe->getUnderlyingInstr(),
318 make_range(PredRecipe->op_begin(), std::prev(PredRecipe->op_end())),
319 PredRecipe->isUniform());
320 auto *Pred =
321 Plan.createVPBasicBlock(Twine(RegionName) + ".if", RecipeWithoutMask);
322
323 VPPredInstPHIRecipe *PHIRecipe = nullptr;
324 if (PredRecipe->getNumUsers() != 0) {
325 PHIRecipe = new VPPredInstPHIRecipe(RecipeWithoutMask,
326 RecipeWithoutMask->getDebugLoc());
327 PredRecipe->replaceAllUsesWith(PHIRecipe);
328 PHIRecipe->setOperand(0, RecipeWithoutMask);
329 }
330 PredRecipe->eraseFromParent();
331 auto *Exiting =
332 Plan.createVPBasicBlock(Twine(RegionName) + ".continue", PHIRecipe);
334 Plan.createVPRegionBlock(Entry, Exiting, RegionName, true);
335
336 // Note: first set Entry as region entry and then connect successors starting
337 // from it in order, to propagate the "parent" of each VPBasicBlock.
338 VPBlockUtils::insertTwoBlocksAfter(Pred, Exiting, Entry);
339 VPBlockUtils::connectBlocks(Pred, Exiting);
340
341 return Region;
342}
343
344static void addReplicateRegions(VPlan &Plan) {
346 for (VPBasicBlock *VPBB : VPBlockUtils::blocksOnly<VPBasicBlock>(
347 vp_depth_first_deep(Plan.getEntry()))) {
348 for (VPRecipeBase &R : *VPBB)
349 if (auto *RepR = dyn_cast<VPReplicateRecipe>(&R)) {
350 if (RepR->isPredicated())
351 WorkList.push_back(RepR);
352 }
353 }
354
355 unsigned BBNum = 0;
356 for (VPReplicateRecipe *RepR : WorkList) {
357 VPBasicBlock *CurrentBlock = RepR->getParent();
358 VPBasicBlock *SplitBlock = CurrentBlock->splitAt(RepR->getIterator());
359
360 BasicBlock *OrigBB = RepR->getUnderlyingInstr()->getParent();
362 OrigBB->hasName() ? OrigBB->getName() + "." + Twine(BBNum++) : "");
363 // Record predicated instructions for above packing optimizations.
365 Region->setParent(CurrentBlock->getParent());
367 }
368}
369
370/// Remove redundant VPBasicBlocks by merging them into their predecessor if
371/// the predecessor has a single successor.
374 for (VPBasicBlock *VPBB : VPBlockUtils::blocksOnly<VPBasicBlock>(
375 vp_depth_first_deep(Plan.getEntry()))) {
376 // Don't fold the blocks in the skeleton of the Plan into their single
377 // predecessors for now.
378 // TODO: Remove restriction once more of the skeleton is modeled in VPlan.
379 if (!VPBB->getParent())
380 continue;
381 auto *PredVPBB =
382 dyn_cast_or_null<VPBasicBlock>(VPBB->getSinglePredecessor());
383 if (!PredVPBB || PredVPBB->getNumSuccessors() != 1 ||
384 isa<VPIRBasicBlock>(PredVPBB))
385 continue;
386 WorkList.push_back(VPBB);
387 }
388
389 for (VPBasicBlock *VPBB : WorkList) {
390 VPBasicBlock *PredVPBB = cast<VPBasicBlock>(VPBB->getSinglePredecessor());
391 for (VPRecipeBase &R : make_early_inc_range(*VPBB))
392 R.moveBefore(*PredVPBB, PredVPBB->end());
393 VPBlockUtils::disconnectBlocks(PredVPBB, VPBB);
394 auto *ParentRegion = cast_or_null<VPRegionBlock>(VPBB->getParent());
395 if (ParentRegion && ParentRegion->getExiting() == VPBB)
396 ParentRegion->setExiting(PredVPBB);
397 for (auto *Succ : to_vector(VPBB->successors())) {
399 VPBlockUtils::connectBlocks(PredVPBB, Succ);
400 }
401 // VPBB is now dead and will be cleaned up when the plan gets destroyed.
402 }
403 return !WorkList.empty();
404}
405
407 // Convert masked VPReplicateRecipes to if-then region blocks.
409
410 bool ShouldSimplify = true;
411 while (ShouldSimplify) {
412 ShouldSimplify = sinkScalarOperands(Plan);
413 ShouldSimplify |= mergeReplicateRegionsIntoSuccessors(Plan);
414 ShouldSimplify |= mergeBlocksIntoPredecessors(Plan);
415 }
416}
417
418/// Remove redundant casts of inductions.
419///
420/// Such redundant casts are casts of induction variables that can be ignored,
421/// because we already proved that the casted phi is equal to the uncasted phi
422/// in the vectorized loop. There is no need to vectorize the cast - the same
423/// value can be used for both the phi and casts in the vector loop.
425 for (auto &Phi : Plan.getVectorLoopRegion()->getEntryBasicBlock()->phis()) {
426 auto *IV = dyn_cast<VPWidenIntOrFpInductionRecipe>(&Phi);
427 if (!IV || IV->getTruncInst())
428 continue;
429
430 // A sequence of IR Casts has potentially been recorded for IV, which
431 // *must be bypassed* when the IV is vectorized, because the vectorized IV
432 // will produce the desired casted value. This sequence forms a def-use
433 // chain and is provided in reverse order, ending with the cast that uses
434 // the IV phi. Search for the recipe of the last cast in the chain and
435 // replace it with the original IV. Note that only the final cast is
436 // expected to have users outside the cast-chain and the dead casts left
437 // over will be cleaned up later.
438 auto &Casts = IV->getInductionDescriptor().getCastInsts();
439 VPValue *FindMyCast = IV;
440 for (Instruction *IRCast : reverse(Casts)) {
441 VPSingleDefRecipe *FoundUserCast = nullptr;
442 for (auto *U : FindMyCast->users()) {
443 auto *UserCast = dyn_cast<VPSingleDefRecipe>(U);
444 if (UserCast && UserCast->getUnderlyingValue() == IRCast) {
445 FoundUserCast = UserCast;
446 break;
447 }
448 }
449 FindMyCast = FoundUserCast;
450 }
451 FindMyCast->replaceAllUsesWith(IV);
452 }
453}
454
455/// Try to replace VPWidenCanonicalIVRecipes with a widened canonical IV
456/// recipe, if it exists.
458 VPCanonicalIVPHIRecipe *CanonicalIV = Plan.getCanonicalIV();
459 VPWidenCanonicalIVRecipe *WidenNewIV = nullptr;
460 for (VPUser *U : CanonicalIV->users()) {
461 WidenNewIV = dyn_cast<VPWidenCanonicalIVRecipe>(U);
462 if (WidenNewIV)
463 break;
464 }
465
466 if (!WidenNewIV)
467 return;
468
470 for (VPRecipeBase &Phi : HeaderVPBB->phis()) {
471 auto *WidenOriginalIV = dyn_cast<VPWidenIntOrFpInductionRecipe>(&Phi);
472
473 if (!WidenOriginalIV || !WidenOriginalIV->isCanonical())
474 continue;
475
476 // Replace WidenNewIV with WidenOriginalIV if WidenOriginalIV provides
477 // everything WidenNewIV's users need. That is, WidenOriginalIV will
478 // generate a vector phi or all users of WidenNewIV demand the first lane
479 // only.
480 if (any_of(WidenOriginalIV->users(),
481 [WidenOriginalIV](VPUser *U) {
482 return !U->usesScalars(WidenOriginalIV);
483 }) ||
484 vputils::onlyFirstLaneUsed(WidenNewIV)) {
485 WidenNewIV->replaceAllUsesWith(WidenOriginalIV);
486 WidenNewIV->eraseFromParent();
487 return;
488 }
489 }
490}
491
492/// Returns true if \p R is dead and can be removed.
493static bool isDeadRecipe(VPRecipeBase &R) {
494 using namespace llvm::PatternMatch;
495 // Do remove conditional assume instructions as their conditions may be
496 // flattened.
497 auto *RepR = dyn_cast<VPReplicateRecipe>(&R);
498 bool IsConditionalAssume =
499 RepR && RepR->isPredicated() &&
500 match(RepR->getUnderlyingInstr(), m_Intrinsic<Intrinsic::assume>());
501 if (IsConditionalAssume)
502 return true;
503
504 if (R.mayHaveSideEffects())
505 return false;
506
507 // Recipe is dead if no user keeps the recipe alive.
508 return all_of(R.definedValues(),
509 [](VPValue *V) { return V->getNumUsers() == 0; });
510}
511
514 Plan.getEntry());
515
516 for (VPBasicBlock *VPBB : reverse(VPBlockUtils::blocksOnly<VPBasicBlock>(RPOT))) {
517 // The recipes in the block are processed in reverse order, to catch chains
518 // of dead recipes.
519 for (VPRecipeBase &R : make_early_inc_range(reverse(*VPBB))) {
520 if (isDeadRecipe(R))
521 R.eraseFromParent();
522 }
523 }
524}
525
528 Instruction::BinaryOps InductionOpcode,
529 FPMathOperator *FPBinOp, Instruction *TruncI,
530 VPValue *StartV, VPValue *Step, DebugLoc DL,
531 VPBuilder &Builder) {
533 VPCanonicalIVPHIRecipe *CanonicalIV = Plan.getCanonicalIV();
534 VPSingleDefRecipe *BaseIV = Builder.createDerivedIV(
535 Kind, FPBinOp, StartV, CanonicalIV, Step, "offset.idx");
536
537 // Truncate base induction if needed.
538 Type *CanonicalIVType = CanonicalIV->getScalarType();
539 VPTypeAnalysis TypeInfo(CanonicalIVType);
540 Type *ResultTy = TypeInfo.inferScalarType(BaseIV);
541 if (TruncI) {
542 Type *TruncTy = TruncI->getType();
543 assert(ResultTy->getScalarSizeInBits() > TruncTy->getScalarSizeInBits() &&
544 "Not truncating.");
545 assert(ResultTy->isIntegerTy() && "Truncation requires an integer type");
546 BaseIV = Builder.createScalarCast(Instruction::Trunc, BaseIV, TruncTy, DL);
547 ResultTy = TruncTy;
548 }
549
550 // Truncate step if needed.
551 Type *StepTy = TypeInfo.inferScalarType(Step);
552 if (ResultTy != StepTy) {
553 assert(StepTy->getScalarSizeInBits() > ResultTy->getScalarSizeInBits() &&
554 "Not truncating.");
555 assert(StepTy->isIntegerTy() && "Truncation requires an integer type");
556 auto *VecPreheader =
557 cast<VPBasicBlock>(HeaderVPBB->getSingleHierarchicalPredecessor());
558 VPBuilder::InsertPointGuard Guard(Builder);
559 Builder.setInsertPoint(VecPreheader);
560 Step = Builder.createScalarCast(Instruction::Trunc, Step, ResultTy, DL);
561 }
562 return Builder.createScalarIVSteps(InductionOpcode, FPBinOp, BaseIV, Step);
563}
564
565/// Legalize VPWidenPointerInductionRecipe, by replacing it with a PtrAdd
566/// (IndStart, ScalarIVSteps (0, Step)) if only its scalar values are used, as
567/// VPWidenPointerInductionRecipe will generate vectors only. If some users
568/// require vectors while other require scalars, the scalar uses need to extract
569/// the scalars from the generated vectors (Note that this is different to how
570/// int/fp inductions are handled). Also optimize VPWidenIntOrFpInductionRecipe,
571/// if any of its users needs scalar values, by providing them scalar steps
572/// built on the canonical scalar IV and update the original IV's users. This is
573/// an optional optimization to reduce the needs of vector extracts.
577 bool HasOnlyVectorVFs = !Plan.hasVF(ElementCount::getFixed(1));
578 VPBuilder Builder(HeaderVPBB, HeaderVPBB->getFirstNonPhi());
579 for (VPRecipeBase &Phi : HeaderVPBB->phis()) {
580 // Replace wide pointer inductions which have only their scalars used by
581 // PtrAdd(IndStart, ScalarIVSteps (0, Step)).
582 if (auto *PtrIV = dyn_cast<VPWidenPointerInductionRecipe>(&Phi)) {
583 if (!PtrIV->onlyScalarsGenerated(Plan.hasScalableVF()))
584 continue;
585
586 const InductionDescriptor &ID = PtrIV->getInductionDescriptor();
587 VPValue *StartV =
588 Plan.getOrAddLiveIn(ConstantInt::get(ID.getStep()->getType(), 0));
589 VPValue *StepV = PtrIV->getOperand(1);
591 Plan, InductionDescriptor::IK_IntInduction, Instruction::Add, nullptr,
592 nullptr, StartV, StepV, PtrIV->getDebugLoc(), Builder);
593
594 VPValue *PtrAdd = Builder.createPtrAdd(PtrIV->getStartValue(), Steps,
595 PtrIV->getDebugLoc(), "next.gep");
596
597 PtrIV->replaceAllUsesWith(PtrAdd);
598 continue;
599 }
600
601 // Replace widened induction with scalar steps for users that only use
602 // scalars.
603 auto *WideIV = dyn_cast<VPWidenIntOrFpInductionRecipe>(&Phi);
604 if (!WideIV)
605 continue;
606 if (HasOnlyVectorVFs && none_of(WideIV->users(), [WideIV](VPUser *U) {
607 return U->usesScalars(WideIV);
608 }))
609 continue;
610
611 const InductionDescriptor &ID = WideIV->getInductionDescriptor();
613 Plan, ID.getKind(), ID.getInductionOpcode(),
614 dyn_cast_or_null<FPMathOperator>(ID.getInductionBinOp()),
615 WideIV->getTruncInst(), WideIV->getStartValue(), WideIV->getStepValue(),
616 WideIV->getDebugLoc(), Builder);
617
618 // Update scalar users of IV to use Step instead.
619 if (!HasOnlyVectorVFs)
620 WideIV->replaceAllUsesWith(Steps);
621 else
622 WideIV->replaceUsesWithIf(Steps, [WideIV](VPUser &U, unsigned) {
623 return U.usesScalars(WideIV);
624 });
625 }
626}
627
628/// Remove redundant EpxandSCEVRecipes in \p Plan's entry block by replacing
629/// them with already existing recipes expanding the same SCEV expression.
632
633 for (VPRecipeBase &R :
635 auto *ExpR = dyn_cast<VPExpandSCEVRecipe>(&R);
636 if (!ExpR)
637 continue;
638
639 auto I = SCEV2VPV.insert({ExpR->getSCEV(), ExpR});
640 if (I.second)
641 continue;
642 ExpR->replaceAllUsesWith(I.first->second);
643 ExpR->eraseFromParent();
644 }
645}
646
648 SmallVector<VPValue *> WorkList;
650 WorkList.push_back(V);
651
652 while (!WorkList.empty()) {
653 VPValue *Cur = WorkList.pop_back_val();
654 if (!Seen.insert(Cur).second)
655 continue;
657 if (!R)
658 continue;
659 if (!isDeadRecipe(*R))
660 continue;
661 WorkList.append(R->op_begin(), R->op_end());
662 R->eraseFromParent();
663 }
664}
665
666/// Try to simplify recipe \p R.
667static void simplifyRecipe(VPRecipeBase &R, VPTypeAnalysis &TypeInfo) {
668 using namespace llvm::VPlanPatternMatch;
669
670 if (auto *Blend = dyn_cast<VPBlendRecipe>(&R)) {
671 // Try to remove redundant blend recipes.
672 SmallPtrSet<VPValue *, 4> UniqueValues;
673 if (Blend->isNormalized() || !match(Blend->getMask(0), m_False()))
674 UniqueValues.insert(Blend->getIncomingValue(0));
675 for (unsigned I = 1; I != Blend->getNumIncomingValues(); ++I)
676 if (!match(Blend->getMask(I), m_False()))
677 UniqueValues.insert(Blend->getIncomingValue(I));
678
679 if (UniqueValues.size() == 1) {
680 Blend->replaceAllUsesWith(*UniqueValues.begin());
681 Blend->eraseFromParent();
682 return;
683 }
684
685 if (Blend->isNormalized())
686 return;
687
688 // Normalize the blend so its first incoming value is used as the initial
689 // value with the others blended into it.
690
691 unsigned StartIndex = 0;
692 for (unsigned I = 0; I != Blend->getNumIncomingValues(); ++I) {
693 // If a value's mask is used only by the blend then is can be deadcoded.
694 // TODO: Find the most expensive mask that can be deadcoded, or a mask
695 // that's used by multiple blends where it can be removed from them all.
696 VPValue *Mask = Blend->getMask(I);
697 if (Mask->getNumUsers() == 1 && !match(Mask, m_False())) {
698 StartIndex = I;
699 break;
700 }
701 }
702
703 SmallVector<VPValue *, 4> OperandsWithMask;
704 OperandsWithMask.push_back(Blend->getIncomingValue(StartIndex));
705
706 for (unsigned I = 0; I != Blend->getNumIncomingValues(); ++I) {
707 if (I == StartIndex)
708 continue;
709 OperandsWithMask.push_back(Blend->getIncomingValue(I));
710 OperandsWithMask.push_back(Blend->getMask(I));
711 }
712
713 auto *NewBlend = new VPBlendRecipe(
714 cast<PHINode>(Blend->getUnderlyingValue()), OperandsWithMask);
715 NewBlend->insertBefore(&R);
716
717 VPValue *DeadMask = Blend->getMask(StartIndex);
718 Blend->replaceAllUsesWith(NewBlend);
719 Blend->eraseFromParent();
721 return;
722 }
723
724 VPValue *A;
725 if (match(&R, m_Trunc(m_ZExtOrSExt(m_VPValue(A))))) {
726 VPValue *Trunc = R.getVPSingleValue();
727 Type *TruncTy = TypeInfo.inferScalarType(Trunc);
728 Type *ATy = TypeInfo.inferScalarType(A);
729 if (TruncTy == ATy) {
730 Trunc->replaceAllUsesWith(A);
731 } else {
732 // Don't replace a scalarizing recipe with a widened cast.
733 if (isa<VPReplicateRecipe>(&R))
734 return;
735 if (ATy->getScalarSizeInBits() < TruncTy->getScalarSizeInBits()) {
736
737 unsigned ExtOpcode = match(R.getOperand(0), m_SExt(m_VPValue()))
738 ? Instruction::SExt
739 : Instruction::ZExt;
740 auto *VPC =
741 new VPWidenCastRecipe(Instruction::CastOps(ExtOpcode), A, TruncTy);
742 if (auto *UnderlyingExt = R.getOperand(0)->getUnderlyingValue()) {
743 // UnderlyingExt has distinct return type, used to retain legacy cost.
744 VPC->setUnderlyingValue(UnderlyingExt);
745 }
746 VPC->insertBefore(&R);
747 Trunc->replaceAllUsesWith(VPC);
748 } else if (ATy->getScalarSizeInBits() > TruncTy->getScalarSizeInBits()) {
749 auto *VPC = new VPWidenCastRecipe(Instruction::Trunc, A, TruncTy);
750 VPC->insertBefore(&R);
751 Trunc->replaceAllUsesWith(VPC);
752 }
753 }
754#ifndef NDEBUG
755 // Verify that the cached type info is for both A and its users is still
756 // accurate by comparing it to freshly computed types.
757 VPTypeAnalysis TypeInfo2(
758 R.getParent()->getPlan()->getCanonicalIV()->getScalarType());
759 assert(TypeInfo.inferScalarType(A) == TypeInfo2.inferScalarType(A));
760 for (VPUser *U : A->users()) {
761 auto *R = cast<VPRecipeBase>(U);
762 for (VPValue *VPV : R->definedValues())
763 assert(TypeInfo.inferScalarType(VPV) == TypeInfo2.inferScalarType(VPV));
764 }
765#endif
766 }
767
768 // Simplify (X && Y) || (X && !Y) -> X.
769 // TODO: Split up into simpler, modular combines: (X && Y) || (X && Z) into X
770 // && (Y || Z) and (X || !X) into true. This requires queuing newly created
771 // recipes to be visited during simplification.
772 VPValue *X, *Y, *X1, *Y1;
773 if (match(&R,
774 m_c_BinaryOr(m_LogicalAnd(m_VPValue(X), m_VPValue(Y)),
775 m_LogicalAnd(m_VPValue(X1), m_Not(m_VPValue(Y1))))) &&
776 X == X1 && Y == Y1) {
777 R.getVPSingleValue()->replaceAllUsesWith(X);
778 R.eraseFromParent();
779 return;
780 }
781
782 if (match(&R, m_c_Mul(m_VPValue(A), m_SpecificInt(1))))
783 return R.getVPSingleValue()->replaceAllUsesWith(A);
784
785 if (match(&R, m_Not(m_Not(m_VPValue(A)))))
786 return R.getVPSingleValue()->replaceAllUsesWith(A);
787
788 // Remove redundant DerviedIVs, that is 0 + A * 1 -> A and 0 + 0 * x -> 0.
789 if ((match(&R,
790 m_DerivedIV(m_SpecificInt(0), m_VPValue(A), m_SpecificInt(1))) ||
791 match(&R,
792 m_DerivedIV(m_SpecificInt(0), m_SpecificInt(0), m_VPValue()))) &&
793 TypeInfo.inferScalarType(R.getOperand(1)) ==
794 TypeInfo.inferScalarType(R.getVPSingleValue()))
795 return R.getVPSingleValue()->replaceAllUsesWith(R.getOperand(1));
796}
797
798/// Try to simplify the recipes in \p Plan. Use \p CanonicalIVTy as type for all
799/// un-typed live-ins in VPTypeAnalysis.
800static void simplifyRecipes(VPlan &Plan, Type *CanonicalIVTy) {
802 Plan.getEntry());
803 VPTypeAnalysis TypeInfo(CanonicalIVTy);
804 for (VPBasicBlock *VPBB : VPBlockUtils::blocksOnly<VPBasicBlock>(RPOT)) {
805 for (VPRecipeBase &R : make_early_inc_range(*VPBB)) {
806 simplifyRecipe(R, TypeInfo);
807 }
808 }
809}
810
812 unsigned BestUF,
814 assert(Plan.hasVF(BestVF) && "BestVF is not available in Plan");
815 assert(Plan.hasUF(BestUF) && "BestUF is not available in Plan");
816 VPRegionBlock *VectorRegion = Plan.getVectorLoopRegion();
817 VPBasicBlock *ExitingVPBB = VectorRegion->getExitingBasicBlock();
818 auto *Term = &ExitingVPBB->back();
819 // Try to simplify the branch condition if TC <= VF * UF when preparing to
820 // execute the plan for the main vector loop. We only do this if the
821 // terminator is:
822 // 1. BranchOnCount, or
823 // 2. BranchOnCond where the input is Not(ActiveLaneMask).
824 using namespace llvm::VPlanPatternMatch;
825 if (!match(Term, m_BranchOnCount(m_VPValue(), m_VPValue())) &&
826 !match(Term,
827 m_BranchOnCond(m_Not(m_ActiveLaneMask(m_VPValue(), m_VPValue())))))
828 return;
829
830 ScalarEvolution &SE = *PSE.getSE();
831 const SCEV *TripCount =
833 assert(!isa<SCEVCouldNotCompute>(TripCount) &&
834 "Trip count SCEV must be computable");
835 ElementCount NumElements = BestVF.multiplyCoefficientBy(BestUF);
836 const SCEV *C = SE.getElementCount(TripCount->getType(), NumElements);
837 if (TripCount->isZero() ||
838 !SE.isKnownPredicate(CmpInst::ICMP_ULE, TripCount, C))
839 return;
840
841 // The vector loop region only executes once. If possible, completely remove
842 // the region, otherwise replace the terminator controlling the latch with
843 // (BranchOnCond true).
844 auto *Header = cast<VPBasicBlock>(VectorRegion->getEntry());
845 auto *CanIVTy = Plan.getCanonicalIV()->getScalarType();
846 if (all_of(
847 Header->phis(),
848 IsaPred<VPCanonicalIVPHIRecipe, VPFirstOrderRecurrencePHIRecipe>)) {
849 for (VPRecipeBase &HeaderR : make_early_inc_range(Header->phis())) {
850 auto *HeaderPhiR = cast<VPHeaderPHIRecipe>(&HeaderR);
851 HeaderPhiR->replaceAllUsesWith(HeaderPhiR->getStartValue());
852 HeaderPhiR->eraseFromParent();
853 }
854
855 VPBlockBase *Preheader = VectorRegion->getSinglePredecessor();
856 VPBlockBase *Exit = VectorRegion->getSingleSuccessor();
857 VPBlockUtils::disconnectBlocks(Preheader, VectorRegion);
858 VPBlockUtils::disconnectBlocks(VectorRegion, Exit);
859
860 for (VPBlockBase *B : vp_depth_first_shallow(VectorRegion->getEntry()))
861 B->setParent(nullptr);
862
863 VPBlockUtils::connectBlocks(Preheader, Header);
864 VPBlockUtils::connectBlocks(ExitingVPBB, Exit);
865 simplifyRecipes(Plan, CanIVTy);
866 } else {
867 // The vector region contains header phis for which we cannot remove the
868 // loop region yet.
869 LLVMContext &Ctx = SE.getContext();
870 auto *BOC = new VPInstruction(
872 {Plan.getOrAddLiveIn(ConstantInt::getTrue(Ctx))}, Term->getDebugLoc());
873 ExitingVPBB->appendRecipe(BOC);
874 }
875
876 Term->eraseFromParent();
878
879 Plan.setVF(BestVF);
880 Plan.setUF(BestUF);
881 // TODO: Further simplifications are possible
882 // 1. Replace inductions with constants.
883 // 2. Replace vector loop region with VPBasicBlock.
884}
885
886/// Sink users of \p FOR after the recipe defining the previous value \p
887/// Previous of the recurrence. \returns true if all users of \p FOR could be
888/// re-arranged as needed or false if it is not possible.
889static bool
891 VPRecipeBase *Previous,
892 VPDominatorTree &VPDT) {
893 // Collect recipes that need sinking.
896 Seen.insert(Previous);
897 auto TryToPushSinkCandidate = [&](VPRecipeBase *SinkCandidate) {
898 // The previous value must not depend on the users of the recurrence phi. In
899 // that case, FOR is not a fixed order recurrence.
900 if (SinkCandidate == Previous)
901 return false;
902
903 if (isa<VPHeaderPHIRecipe>(SinkCandidate) ||
904 !Seen.insert(SinkCandidate).second ||
905 VPDT.properlyDominates(Previous, SinkCandidate))
906 return true;
907
908 if (SinkCandidate->mayHaveSideEffects())
909 return false;
910
911 WorkList.push_back(SinkCandidate);
912 return true;
913 };
914
915 // Recursively sink users of FOR after Previous.
916 WorkList.push_back(FOR);
917 for (unsigned I = 0; I != WorkList.size(); ++I) {
918 VPRecipeBase *Current = WorkList[I];
919 assert(Current->getNumDefinedValues() == 1 &&
920 "only recipes with a single defined value expected");
921
922 for (VPUser *User : Current->getVPSingleValue()->users()) {
923 if (!TryToPushSinkCandidate(cast<VPRecipeBase>(User)))
924 return false;
925 }
926 }
927
928 // Keep recipes to sink ordered by dominance so earlier instructions are
929 // processed first.
930 sort(WorkList, [&VPDT](const VPRecipeBase *A, const VPRecipeBase *B) {
931 return VPDT.properlyDominates(A, B);
932 });
933
934 for (VPRecipeBase *SinkCandidate : WorkList) {
935 if (SinkCandidate == FOR)
936 continue;
937
938 SinkCandidate->moveAfter(Previous);
939 Previous = SinkCandidate;
940 }
941 return true;
942}
943
944/// Try to hoist \p Previous and its operands before all users of \p FOR.
946 VPRecipeBase *Previous,
947 VPDominatorTree &VPDT) {
948 if (Previous->mayHaveSideEffects() || Previous->mayReadFromMemory())
949 return false;
950
951 // Collect recipes that need hoisting.
952 SmallVector<VPRecipeBase *> HoistCandidates;
954 VPRecipeBase *HoistPoint = nullptr;
955 // Find the closest hoist point by looking at all users of FOR and selecting
956 // the recipe dominating all other users.
957 for (VPUser *U : FOR->users()) {
958 auto *R = cast<VPRecipeBase>(U);
959 if (!HoistPoint || VPDT.properlyDominates(R, HoistPoint))
960 HoistPoint = R;
961 }
962 assert(all_of(FOR->users(),
963 [&VPDT, HoistPoint](VPUser *U) {
964 auto *R = cast<VPRecipeBase>(U);
965 return HoistPoint == R ||
966 VPDT.properlyDominates(HoistPoint, R);
967 }) &&
968 "HoistPoint must dominate all users of FOR");
969
970 auto NeedsHoisting = [HoistPoint, &VPDT,
971 &Visited](VPValue *HoistCandidateV) -> VPRecipeBase * {
972 VPRecipeBase *HoistCandidate = HoistCandidateV->getDefiningRecipe();
973 if (!HoistCandidate)
974 return nullptr;
975 VPRegionBlock *EnclosingLoopRegion =
976 HoistCandidate->getParent()->getEnclosingLoopRegion();
977 assert((!HoistCandidate->getParent()->getParent() ||
978 HoistCandidate->getParent()->getParent() == EnclosingLoopRegion) &&
979 "CFG in VPlan should still be flat, without replicate regions");
980 // Hoist candidate was already visited, no need to hoist.
981 if (!Visited.insert(HoistCandidate).second)
982 return nullptr;
983
984 // Candidate is outside loop region or a header phi, dominates FOR users w/o
985 // hoisting.
986 if (!EnclosingLoopRegion || isa<VPHeaderPHIRecipe>(HoistCandidate))
987 return nullptr;
988
989 // If we reached a recipe that dominates HoistPoint, we don't need to
990 // hoist the recipe.
991 if (VPDT.properlyDominates(HoistCandidate, HoistPoint))
992 return nullptr;
993 return HoistCandidate;
994 };
995 auto CanHoist = [&](VPRecipeBase *HoistCandidate) {
996 // Avoid hoisting candidates with side-effects, as we do not yet analyze
997 // associated dependencies.
998 return !HoistCandidate->mayHaveSideEffects();
999 };
1000
1001 if (!NeedsHoisting(Previous->getVPSingleValue()))
1002 return true;
1003
1004 // Recursively try to hoist Previous and its operands before all users of FOR.
1005 HoistCandidates.push_back(Previous);
1006
1007 for (unsigned I = 0; I != HoistCandidates.size(); ++I) {
1008 VPRecipeBase *Current = HoistCandidates[I];
1009 assert(Current->getNumDefinedValues() == 1 &&
1010 "only recipes with a single defined value expected");
1011 if (!CanHoist(Current))
1012 return false;
1013
1014 for (VPValue *Op : Current->operands()) {
1015 // If we reach FOR, it means the original Previous depends on some other
1016 // recurrence that in turn depends on FOR. If that is the case, we would
1017 // also need to hoist recipes involving the other FOR, which may break
1018 // dependencies.
1019 if (Op == FOR)
1020 return false;
1021
1022 if (auto *R = NeedsHoisting(Op))
1023 HoistCandidates.push_back(R);
1024 }
1025 }
1026
1027 // Order recipes to hoist by dominance so earlier instructions are processed
1028 // first.
1029 sort(HoistCandidates, [&VPDT](const VPRecipeBase *A, const VPRecipeBase *B) {
1030 return VPDT.properlyDominates(A, B);
1031 });
1032
1033 for (VPRecipeBase *HoistCandidate : HoistCandidates) {
1034 HoistCandidate->moveBefore(*HoistPoint->getParent(),
1035 HoistPoint->getIterator());
1036 }
1037
1038 return true;
1039}
1040
1042 VPBuilder &LoopBuilder) {
1043 VPDominatorTree VPDT;
1044 VPDT.recalculate(Plan);
1045
1047 for (VPRecipeBase &R :
1049 if (auto *FOR = dyn_cast<VPFirstOrderRecurrencePHIRecipe>(&R))
1050 RecurrencePhis.push_back(FOR);
1051
1052 for (VPFirstOrderRecurrencePHIRecipe *FOR : RecurrencePhis) {
1054 VPRecipeBase *Previous = FOR->getBackedgeValue()->getDefiningRecipe();
1055 // Fixed-order recurrences do not contain cycles, so this loop is guaranteed
1056 // to terminate.
1057 while (auto *PrevPhi =
1058 dyn_cast_or_null<VPFirstOrderRecurrencePHIRecipe>(Previous)) {
1059 assert(PrevPhi->getParent() == FOR->getParent());
1060 assert(SeenPhis.insert(PrevPhi).second);
1061 Previous = PrevPhi->getBackedgeValue()->getDefiningRecipe();
1062 }
1063
1064 if (!sinkRecurrenceUsersAfterPrevious(FOR, Previous, VPDT) &&
1065 !hoistPreviousBeforeFORUsers(FOR, Previous, VPDT))
1066 return false;
1067
1068 // Introduce a recipe to combine the incoming and previous values of a
1069 // fixed-order recurrence.
1070 VPBasicBlock *InsertBlock = Previous->getParent();
1071 if (isa<VPHeaderPHIRecipe>(Previous))
1072 LoopBuilder.setInsertPoint(InsertBlock, InsertBlock->getFirstNonPhi());
1073 else
1074 LoopBuilder.setInsertPoint(InsertBlock,
1075 std::next(Previous->getIterator()));
1076
1077 auto *RecurSplice = cast<VPInstruction>(
1079 {FOR, FOR->getBackedgeValue()}));
1080
1081 FOR->replaceAllUsesWith(RecurSplice);
1082 // Set the first operand of RecurSplice to FOR again, after replacing
1083 // all users.
1084 RecurSplice->setOperand(0, FOR);
1085 }
1086 return true;
1087}
1088
1090 SetVector<VPUser *> Users(V->user_begin(), V->user_end());
1091 for (unsigned I = 0; I != Users.size(); ++I) {
1092 VPRecipeBase *Cur = cast<VPRecipeBase>(Users[I]);
1093 if (isa<VPHeaderPHIRecipe>(Cur))
1094 continue;
1095 for (VPValue *V : Cur->definedValues())
1096 Users.insert(V->user_begin(), V->user_end());
1097 }
1098 return Users.takeVector();
1099}
1100
1102 for (VPRecipeBase &R :
1104 auto *PhiR = dyn_cast<VPReductionPHIRecipe>(&R);
1105 if (!PhiR)
1106 continue;
1107 const RecurrenceDescriptor &RdxDesc = PhiR->getRecurrenceDescriptor();
1108 RecurKind RK = RdxDesc.getRecurrenceKind();
1109 if (RK != RecurKind::Add && RK != RecurKind::Mul)
1110 continue;
1111
1112 for (VPUser *U : collectUsersRecursively(PhiR))
1113 if (auto *RecWithFlags = dyn_cast<VPRecipeWithIRFlags>(U)) {
1114 RecWithFlags->dropPoisonGeneratingFlags();
1115 }
1116 }
1117}
1118
1119/// Move loop-invariant recipes out of the vector loop region in \p Plan.
1120static void licm(VPlan &Plan) {
1121 VPBasicBlock *Preheader = Plan.getVectorPreheader();
1122
1123 // Return true if we do not know how to (mechanically) hoist a given recipe
1124 // out of a loop region. Does not address legality concerns such as aliasing
1125 // or speculation safety.
1126 auto CannotHoistRecipe = [](VPRecipeBase &R) {
1127 // Allocas cannot be hoisted.
1128 auto *RepR = dyn_cast<VPReplicateRecipe>(&R);
1129 return RepR && RepR->getOpcode() == Instruction::Alloca;
1130 };
1131
1132 // Hoist any loop invariant recipes from the vector loop region to the
1133 // preheader. Preform a shallow traversal of the vector loop region, to
1134 // exclude recipes in replicate regions.
1135 VPRegionBlock *LoopRegion = Plan.getVectorLoopRegion();
1136 for (VPBasicBlock *VPBB : VPBlockUtils::blocksOnly<VPBasicBlock>(
1137 vp_depth_first_shallow(LoopRegion->getEntry()))) {
1138 for (VPRecipeBase &R : make_early_inc_range(*VPBB)) {
1139 if (CannotHoistRecipe(R))
1140 continue;
1141 // TODO: Relax checks in the future, e.g. we could also hoist reads, if
1142 // their memory location is not modified in the vector loop.
1143 if (R.mayHaveSideEffects() || R.mayReadFromMemory() || R.isPhi() ||
1144 any_of(R.operands(), [](VPValue *Op) {
1145 return !Op->isDefinedOutsideLoopRegions();
1146 }))
1147 continue;
1148 R.moveBefore(*Preheader, Preheader->end());
1149 }
1150 }
1151}
1152
1154 VPlan &Plan, const MapVector<Instruction *, uint64_t> &MinBWs) {
1155#ifndef NDEBUG
1156 // Count the processed recipes and cross check the count later with MinBWs
1157 // size, to make sure all entries in MinBWs have been handled.
1158 unsigned NumProcessedRecipes = 0;
1159#endif
1160 // Keep track of created truncates, so they can be re-used. Note that we
1161 // cannot use RAUW after creating a new truncate, as this would could make
1162 // other uses have different types for their operands, making them invalidly
1163 // typed.
1165 Type *CanonicalIVType = Plan.getCanonicalIV()->getScalarType();
1166 VPTypeAnalysis TypeInfo(CanonicalIVType);
1167 VPBasicBlock *PH = Plan.getVectorPreheader();
1168 for (VPBasicBlock *VPBB : VPBlockUtils::blocksOnly<VPBasicBlock>(
1170 for (VPRecipeBase &R : make_early_inc_range(*VPBB)) {
1173 continue;
1174
1175 VPValue *ResultVPV = R.getVPSingleValue();
1176 auto *UI = cast_or_null<Instruction>(ResultVPV->getUnderlyingValue());
1177 unsigned NewResSizeInBits = MinBWs.lookup(UI);
1178 if (!NewResSizeInBits)
1179 continue;
1180
1181#ifndef NDEBUG
1182 NumProcessedRecipes++;
1183#endif
1184 // If the value wasn't vectorized, we must maintain the original scalar
1185 // type. Skip those here, after incrementing NumProcessedRecipes. Also
1186 // skip casts which do not need to be handled explicitly here, as
1187 // redundant casts will be removed during recipe simplification.
1188 if (isa<VPReplicateRecipe, VPWidenCastRecipe>(&R)) {
1189#ifndef NDEBUG
1190 // If any of the operands is a live-in and not used by VPWidenRecipe or
1191 // VPWidenSelectRecipe, but in MinBWs, make sure it is counted as
1192 // processed as well. When MinBWs is currently constructed, there is no
1193 // information about whether recipes are widened or replicated and in
1194 // case they are reciplicated the operands are not truncated. Counting
1195 // them them here ensures we do not miss any recipes in MinBWs.
1196 // TODO: Remove once the analysis is done on VPlan.
1197 for (VPValue *Op : R.operands()) {
1198 if (!Op->isLiveIn())
1199 continue;
1200 auto *UV = dyn_cast_or_null<Instruction>(Op->getUnderlyingValue());
1201 if (UV && MinBWs.contains(UV) && !ProcessedTruncs.contains(Op) &&
1202 none_of(Op->users(),
1203 IsaPred<VPWidenRecipe, VPWidenSelectRecipe>)) {
1204 // Add an entry to ProcessedTruncs to avoid counting the same
1205 // operand multiple times.
1206 ProcessedTruncs[Op] = nullptr;
1207 NumProcessedRecipes += 1;
1208 }
1209 }
1210#endif
1211 continue;
1212 }
1213
1214 Type *OldResTy = TypeInfo.inferScalarType(ResultVPV);
1215 unsigned OldResSizeInBits = OldResTy->getScalarSizeInBits();
1216 assert(OldResTy->isIntegerTy() && "only integer types supported");
1217 (void)OldResSizeInBits;
1218
1219 LLVMContext &Ctx = CanonicalIVType->getContext();
1220 auto *NewResTy = IntegerType::get(Ctx, NewResSizeInBits);
1221
1222 // Any wrapping introduced by shrinking this operation shouldn't be
1223 // considered undefined behavior. So, we can't unconditionally copy
1224 // arithmetic wrapping flags to VPW.
1225 if (auto *VPW = dyn_cast<VPRecipeWithIRFlags>(&R))
1226 VPW->dropPoisonGeneratingFlags();
1227
1228 using namespace llvm::VPlanPatternMatch;
1229 if (OldResSizeInBits != NewResSizeInBits &&
1230 !match(&R, m_Binary<Instruction::ICmp>(m_VPValue(), m_VPValue()))) {
1231 // Extend result to original width.
1232 auto *Ext =
1233 new VPWidenCastRecipe(Instruction::ZExt, ResultVPV, OldResTy);
1234 Ext->insertAfter(&R);
1235 ResultVPV->replaceAllUsesWith(Ext);
1236 Ext->setOperand(0, ResultVPV);
1237 assert(OldResSizeInBits > NewResSizeInBits && "Nothing to shrink?");
1238 } else {
1239 assert(
1240 match(&R, m_Binary<Instruction::ICmp>(m_VPValue(), m_VPValue())) &&
1241 "Only ICmps should not need extending the result.");
1242 }
1243
1244 assert(!isa<VPWidenStoreRecipe>(&R) && "stores cannot be narrowed");
1245 if (isa<VPWidenLoadRecipe>(&R))
1246 continue;
1247
1248 // Shrink operands by introducing truncates as needed.
1249 unsigned StartIdx = isa<VPWidenSelectRecipe>(&R) ? 1 : 0;
1250 for (unsigned Idx = StartIdx; Idx != R.getNumOperands(); ++Idx) {
1251 auto *Op = R.getOperand(Idx);
1252 unsigned OpSizeInBits =
1254 if (OpSizeInBits == NewResSizeInBits)
1255 continue;
1256 assert(OpSizeInBits > NewResSizeInBits && "nothing to truncate");
1257 auto [ProcessedIter, IterIsEmpty] =
1258 ProcessedTruncs.insert({Op, nullptr});
1259 VPWidenCastRecipe *NewOp =
1260 IterIsEmpty
1261 ? new VPWidenCastRecipe(Instruction::Trunc, Op, NewResTy)
1262 : ProcessedIter->second;
1263 R.setOperand(Idx, NewOp);
1264 if (!IterIsEmpty)
1265 continue;
1266 ProcessedIter->second = NewOp;
1267 if (!Op->isLiveIn()) {
1268 NewOp->insertBefore(&R);
1269 } else {
1270 PH->appendRecipe(NewOp);
1271#ifndef NDEBUG
1272 auto *OpInst = dyn_cast<Instruction>(Op->getLiveInIRValue());
1273 bool IsContained = MinBWs.contains(OpInst);
1274 NumProcessedRecipes += IsContained;
1275#endif
1276 }
1277 }
1278
1279 }
1280 }
1281
1282 assert(MinBWs.size() == NumProcessedRecipes &&
1283 "some entries in MinBWs haven't been processed");
1284}
1285
1289
1294 removeDeadRecipes(Plan);
1295
1298 licm(Plan);
1299}
1300
1301// Add a VPActiveLaneMaskPHIRecipe and related recipes to \p Plan and replace
1302// the loop terminator with a branch-on-cond recipe with the negated
1303// active-lane-mask as operand. Note that this turns the loop into an
1304// uncountable one. Only the existing terminator is replaced, all other existing
1305// recipes/users remain unchanged, except for poison-generating flags being
1306// dropped from the canonical IV increment. Return the created
1307// VPActiveLaneMaskPHIRecipe.
1308//
1309// The function uses the following definitions:
1310//
1311// %TripCount = DataWithControlFlowWithoutRuntimeCheck ?
1312// calculate-trip-count-minus-VF (original TC) : original TC
1313// %IncrementValue = DataWithControlFlowWithoutRuntimeCheck ?
1314// CanonicalIVPhi : CanonicalIVIncrement
1315// %StartV is the canonical induction start value.
1316//
1317// The function adds the following recipes:
1318//
1319// vector.ph:
1320// %TripCount = calculate-trip-count-minus-VF (original TC)
1321// [if DataWithControlFlowWithoutRuntimeCheck]
1322// %EntryInc = canonical-iv-increment-for-part %StartV
1323// %EntryALM = active-lane-mask %EntryInc, %TripCount
1324//
1325// vector.body:
1326// ...
1327// %P = active-lane-mask-phi [ %EntryALM, %vector.ph ], [ %ALM, %vector.body ]
1328// ...
1329// %InLoopInc = canonical-iv-increment-for-part %IncrementValue
1330// %ALM = active-lane-mask %InLoopInc, TripCount
1331// %Negated = Not %ALM
1332// branch-on-cond %Negated
1333//
1336 VPRegionBlock *TopRegion = Plan.getVectorLoopRegion();
1337 VPBasicBlock *EB = TopRegion->getExitingBasicBlock();
1338 auto *CanonicalIVPHI = Plan.getCanonicalIV();
1339 VPValue *StartV = CanonicalIVPHI->getStartValue();
1340
1341 auto *CanonicalIVIncrement =
1342 cast<VPInstruction>(CanonicalIVPHI->getBackedgeValue());
1343 // TODO: Check if dropping the flags is needed if
1344 // !DataAndControlFlowWithoutRuntimeCheck.
1345 CanonicalIVIncrement->dropPoisonGeneratingFlags();
1346 DebugLoc DL = CanonicalIVIncrement->getDebugLoc();
1347 // We can't use StartV directly in the ActiveLaneMask VPInstruction, since
1348 // we have to take unrolling into account. Each part needs to start at
1349 // Part * VF
1350 auto *VecPreheader = Plan.getVectorPreheader();
1351 VPBuilder Builder(VecPreheader);
1352
1353 // Create the ActiveLaneMask instruction using the correct start values.
1354 VPValue *TC = Plan.getTripCount();
1355
1356 VPValue *TripCount, *IncrementValue;
1358 // When the loop is guarded by a runtime overflow check for the loop
1359 // induction variable increment by VF, we can increment the value before
1360 // the get.active.lane mask and use the unmodified tripcount.
1361 IncrementValue = CanonicalIVIncrement;
1362 TripCount = TC;
1363 } else {
1364 // When avoiding a runtime check, the active.lane.mask inside the loop
1365 // uses a modified trip count and the induction variable increment is
1366 // done after the active.lane.mask intrinsic is called.
1367 IncrementValue = CanonicalIVPHI;
1369 {TC}, DL);
1370 }
1371 auto *EntryIncrement = Builder.createOverflowingOp(
1372 VPInstruction::CanonicalIVIncrementForPart, {StartV}, {false, false}, DL,
1373 "index.part.next");
1374
1375 // Create the active lane mask instruction in the VPlan preheader.
1376 auto *EntryALM =
1377 Builder.createNaryOp(VPInstruction::ActiveLaneMask, {EntryIncrement, TC},
1378 DL, "active.lane.mask.entry");
1379
1380 // Now create the ActiveLaneMaskPhi recipe in the main loop using the
1381 // preheader ActiveLaneMask instruction.
1382 auto *LaneMaskPhi = new VPActiveLaneMaskPHIRecipe(EntryALM, DebugLoc());
1383 LaneMaskPhi->insertAfter(CanonicalIVPHI);
1384
1385 // Create the active lane mask for the next iteration of the loop before the
1386 // original terminator.
1387 VPRecipeBase *OriginalTerminator = EB->getTerminator();
1388 Builder.setInsertPoint(OriginalTerminator);
1389 auto *InLoopIncrement =
1391 {IncrementValue}, {false, false}, DL);
1392 auto *ALM = Builder.createNaryOp(VPInstruction::ActiveLaneMask,
1393 {InLoopIncrement, TripCount}, DL,
1394 "active.lane.mask.next");
1395 LaneMaskPhi->addOperand(ALM);
1396
1397 // Replace the original terminator with BranchOnCond. We have to invert the
1398 // mask here because a true condition means jumping to the exit block.
1399 auto *NotMask = Builder.createNot(ALM, DL);
1400 Builder.createNaryOp(VPInstruction::BranchOnCond, {NotMask}, DL);
1401 OriginalTerminator->eraseFromParent();
1402 return LaneMaskPhi;
1403}
1404
1405/// Collect all VPValues representing a header mask through the (ICMP_ULE,
1406/// WideCanonicalIV, backedge-taken-count) pattern.
1407/// TODO: Introduce explicit recipe for header-mask instead of searching
1408/// for the header-mask pattern manually.
1410 SmallVector<VPValue *> WideCanonicalIVs;
1411 auto *FoundWidenCanonicalIVUser =
1412 find_if(Plan.getCanonicalIV()->users(),
1413 [](VPUser *U) { return isa<VPWidenCanonicalIVRecipe>(U); });
1415 [](VPUser *U) { return isa<VPWidenCanonicalIVRecipe>(U); }) <=
1416 1 &&
1417 "Must have at most one VPWideCanonicalIVRecipe");
1418 if (FoundWidenCanonicalIVUser != Plan.getCanonicalIV()->users().end()) {
1419 auto *WideCanonicalIV =
1420 cast<VPWidenCanonicalIVRecipe>(*FoundWidenCanonicalIVUser);
1421 WideCanonicalIVs.push_back(WideCanonicalIV);
1422 }
1423
1424 // Also include VPWidenIntOrFpInductionRecipes that represent a widened
1425 // version of the canonical induction.
1426 VPBasicBlock *HeaderVPBB = Plan.getVectorLoopRegion()->getEntryBasicBlock();
1427 for (VPRecipeBase &Phi : HeaderVPBB->phis()) {
1428 auto *WidenOriginalIV = dyn_cast<VPWidenIntOrFpInductionRecipe>(&Phi);
1429 if (WidenOriginalIV && WidenOriginalIV->isCanonical())
1430 WideCanonicalIVs.push_back(WidenOriginalIV);
1431 }
1432
1433 // Walk users of wide canonical IVs and collect to all compares of the form
1434 // (ICMP_ULE, WideCanonicalIV, backedge-taken-count).
1435 SmallVector<VPValue *> HeaderMasks;
1436 for (auto *Wide : WideCanonicalIVs) {
1437 for (VPUser *U : SmallVector<VPUser *>(Wide->users())) {
1438 auto *HeaderMask = dyn_cast<VPInstruction>(U);
1439 if (!HeaderMask || !vputils::isHeaderMask(HeaderMask, Plan))
1440 continue;
1441
1442 assert(HeaderMask->getOperand(0) == Wide &&
1443 "WidenCanonicalIV must be the first operand of the compare");
1444 HeaderMasks.push_back(HeaderMask);
1445 }
1446 }
1447 return HeaderMasks;
1448}
1449
1451 VPlan &Plan, bool UseActiveLaneMaskForControlFlow,
1454 UseActiveLaneMaskForControlFlow) &&
1455 "DataAndControlFlowWithoutRuntimeCheck implies "
1456 "UseActiveLaneMaskForControlFlow");
1457
1458 auto *FoundWidenCanonicalIVUser =
1459 find_if(Plan.getCanonicalIV()->users(),
1460 [](VPUser *U) { return isa<VPWidenCanonicalIVRecipe>(U); });
1461 assert(FoundWidenCanonicalIVUser &&
1462 "Must have widened canonical IV when tail folding!");
1463 auto *WideCanonicalIV =
1464 cast<VPWidenCanonicalIVRecipe>(*FoundWidenCanonicalIVUser);
1465 VPSingleDefRecipe *LaneMask;
1466 if (UseActiveLaneMaskForControlFlow) {
1469 } else {
1470 VPBuilder B = VPBuilder::getToInsertAfter(WideCanonicalIV);
1471 LaneMask = B.createNaryOp(VPInstruction::ActiveLaneMask,
1472 {WideCanonicalIV, Plan.getTripCount()}, nullptr,
1473 "active.lane.mask");
1474 }
1475
1476 // Walk users of WideCanonicalIV and replace all compares of the form
1477 // (ICMP_ULE, WideCanonicalIV, backedge-taken-count) with an
1478 // active-lane-mask.
1479 for (VPValue *HeaderMask : collectAllHeaderMasks(Plan))
1480 HeaderMask->replaceAllUsesWith(LaneMask);
1481}
1482
1483/// Try to convert \p CurRecipe to a corresponding EVL-based recipe. Returns
1484/// nullptr if no EVL-based recipe could be created.
1485/// \p HeaderMask Header Mask.
1486/// \p CurRecipe Recipe to be transform.
1487/// \p TypeInfo VPlan-based type analysis.
1488/// \p AllOneMask The vector mask parameter of vector-predication intrinsics.
1489/// \p EVL The explicit vector length parameter of vector-predication
1490/// intrinsics.
1492 VPRecipeBase &CurRecipe,
1493 VPTypeAnalysis &TypeInfo,
1494 VPValue &AllOneMask, VPValue &EVL) {
1495 using namespace llvm::VPlanPatternMatch;
1496 auto GetNewMask = [&](VPValue *OrigMask) -> VPValue * {
1497 assert(OrigMask && "Unmasked recipe when folding tail");
1498 return HeaderMask == OrigMask ? nullptr : OrigMask;
1499 };
1500
1503 VPValue *NewMask = GetNewMask(L->getMask());
1504 return new VPWidenLoadEVLRecipe(*L, EVL, NewMask);
1505 })
1506 .Case<VPWidenStoreRecipe>([&](VPWidenStoreRecipe *S) {
1507 VPValue *NewMask = GetNewMask(S->getMask());
1508 return new VPWidenStoreEVLRecipe(*S, EVL, NewMask);
1509 })
1510 .Case<VPWidenRecipe>([&](VPWidenRecipe *W) -> VPRecipeBase * {
1511 unsigned Opcode = W->getOpcode();
1512 if (!Instruction::isBinaryOp(Opcode) && !Instruction::isUnaryOp(Opcode))
1513 return nullptr;
1514 return new VPWidenEVLRecipe(*W, EVL);
1515 })
1516 .Case<VPReductionRecipe>([&](VPReductionRecipe *Red) {
1517 VPValue *NewMask = GetNewMask(Red->getCondOp());
1518 return new VPReductionEVLRecipe(*Red, EVL, NewMask);
1519 })
1520 .Case<VPWidenIntrinsicRecipe, VPWidenCastRecipe>(
1521 [&](auto *CR) -> VPRecipeBase * {
1522 Intrinsic::ID VPID;
1523 if (auto *CallR = dyn_cast<VPWidenIntrinsicRecipe>(CR)) {
1524 VPID =
1525 VPIntrinsic::getForIntrinsic(CallR->getVectorIntrinsicID());
1526 } else {
1527 auto *CastR = cast<VPWidenCastRecipe>(CR);
1528 VPID = VPIntrinsic::getForOpcode(CastR->getOpcode());
1529 }
1530
1531 // Not all intrinsics have a corresponding VP intrinsic.
1532 if (VPID == Intrinsic::not_intrinsic)
1533 return nullptr;
1536 "Expected VP intrinsic to have mask and EVL");
1537
1538 SmallVector<VPValue *> Ops(CR->operands());
1539 Ops.push_back(&AllOneMask);
1540 Ops.push_back(&EVL);
1541 return new VPWidenIntrinsicRecipe(
1542 VPID, Ops, TypeInfo.inferScalarType(CR), CR->getDebugLoc());
1543 })
1544 .Case<VPWidenSelectRecipe>([&](VPWidenSelectRecipe *Sel) {
1545 SmallVector<VPValue *> Ops(Sel->operands());
1546 Ops.push_back(&EVL);
1547 return new VPWidenIntrinsicRecipe(Intrinsic::vp_select, Ops,
1548 TypeInfo.inferScalarType(Sel),
1549 Sel->getDebugLoc());
1550 })
1551 .Case<VPInstruction>([&](VPInstruction *VPI) -> VPRecipeBase * {
1552 VPValue *LHS, *RHS;
1553 // Transform select with a header mask condition
1554 // select(header_mask, LHS, RHS)
1555 // into vector predication merge.
1556 // vp.merge(all-true, LHS, RHS, EVL)
1557 if (!match(VPI, m_Select(m_Specific(HeaderMask), m_VPValue(LHS),
1558 m_VPValue(RHS))))
1559 return nullptr;
1560 // Use all true as the condition because this transformation is
1561 // limited to selects whose condition is a header mask.
1562 return new VPWidenIntrinsicRecipe(
1563 Intrinsic::vp_merge, {&AllOneMask, LHS, RHS, &EVL},
1564 TypeInfo.inferScalarType(LHS), VPI->getDebugLoc());
1565 })
1566 .Default([&](VPRecipeBase *R) { return nullptr; });
1567}
1568
1569/// Replace recipes with their EVL variants.
1571 Type *CanonicalIVType = Plan.getCanonicalIV()->getScalarType();
1572 VPTypeAnalysis TypeInfo(CanonicalIVType);
1573 LLVMContext &Ctx = CanonicalIVType->getContext();
1574 VPValue *AllOneMask = Plan.getOrAddLiveIn(ConstantInt::getTrue(Ctx));
1575
1576 for (VPUser *U : Plan.getVF().users()) {
1577 if (auto *R = dyn_cast<VPReverseVectorPointerRecipe>(U))
1578 R->setOperand(1, &EVL);
1579 }
1580
1582
1583 for (VPValue *HeaderMask : collectAllHeaderMasks(Plan)) {
1584 for (VPUser *U : collectUsersRecursively(HeaderMask)) {
1585 auto *CurRecipe = cast<VPRecipeBase>(U);
1586 VPRecipeBase *EVLRecipe =
1587 createEVLRecipe(HeaderMask, *CurRecipe, TypeInfo, *AllOneMask, EVL);
1588 if (!EVLRecipe)
1589 continue;
1590
1591 [[maybe_unused]] unsigned NumDefVal = EVLRecipe->getNumDefinedValues();
1592 assert(NumDefVal == CurRecipe->getNumDefinedValues() &&
1593 "New recipe must define the same number of values as the "
1594 "original.");
1595 assert(
1596 NumDefVal <= 1 &&
1597 "Only supports recipes with a single definition or without users.");
1598 EVLRecipe->insertBefore(CurRecipe);
1599 if (isa<VPSingleDefRecipe, VPWidenLoadEVLRecipe>(EVLRecipe)) {
1600 VPValue *CurVPV = CurRecipe->getVPSingleValue();
1601 CurVPV->replaceAllUsesWith(EVLRecipe->getVPSingleValue());
1602 }
1603 // Defer erasing recipes till the end so that we don't invalidate the
1604 // VPTypeAnalysis cache.
1605 ToErase.push_back(CurRecipe);
1606 }
1607 }
1608
1609 for (VPRecipeBase *R : reverse(ToErase)) {
1610 SmallVector<VPValue *> PossiblyDead(R->operands());
1611 R->eraseFromParent();
1612 for (VPValue *Op : PossiblyDead)
1614 }
1615}
1616
1617/// Add a VPEVLBasedIVPHIRecipe and related recipes to \p Plan and
1618/// replaces all uses except the canonical IV increment of
1619/// VPCanonicalIVPHIRecipe with a VPEVLBasedIVPHIRecipe. VPCanonicalIVPHIRecipe
1620/// is used only for loop iterations counting after this transformation.
1621///
1622/// The function uses the following definitions:
1623/// %StartV is the canonical induction start value.
1624///
1625/// The function adds the following recipes:
1626///
1627/// vector.ph:
1628/// ...
1629///
1630/// vector.body:
1631/// ...
1632/// %EVLPhi = EXPLICIT-VECTOR-LENGTH-BASED-IV-PHI [ %StartV, %vector.ph ],
1633/// [ %NextEVLIV, %vector.body ]
1634/// %AVL = sub original TC, %EVLPhi
1635/// %VPEVL = EXPLICIT-VECTOR-LENGTH %AVL
1636/// ...
1637/// %NextEVLIV = add IVSize (cast i32 %VPEVVL to IVSize), %EVLPhi
1638/// ...
1639///
1640/// If MaxSafeElements is provided, the function adds the following recipes:
1641/// vector.ph:
1642/// ...
1643///
1644/// vector.body:
1645/// ...
1646/// %EVLPhi = EXPLICIT-VECTOR-LENGTH-BASED-IV-PHI [ %StartV, %vector.ph ],
1647/// [ %NextEVLIV, %vector.body ]
1648/// %AVL = sub original TC, %EVLPhi
1649/// %cmp = cmp ult %AVL, MaxSafeElements
1650/// %SAFE_AVL = select %cmp, %AVL, MaxSafeElements
1651/// %VPEVL = EXPLICIT-VECTOR-LENGTH %SAFE_AVL
1652/// ...
1653/// %NextEVLIV = add IVSize (cast i32 %VPEVL to IVSize), %EVLPhi
1654/// ...
1655///
1657 VPlan &Plan, const std::optional<unsigned> &MaxSafeElements) {
1659 // The transform updates all users of inductions to work based on EVL, instead
1660 // of the VF directly. At the moment, widened inductions cannot be updated, so
1661 // bail out if the plan contains any.
1662 bool ContainsWidenInductions = any_of(
1663 Header->phis(),
1664 IsaPred<VPWidenIntOrFpInductionRecipe, VPWidenPointerInductionRecipe>);
1665 if (ContainsWidenInductions)
1666 return false;
1667
1668 auto *CanonicalIVPHI = Plan.getCanonicalIV();
1669 VPValue *StartV = CanonicalIVPHI->getStartValue();
1670
1671 // Create the ExplicitVectorLengthPhi recipe in the main loop.
1672 auto *EVLPhi = new VPEVLBasedIVPHIRecipe(StartV, DebugLoc());
1673 EVLPhi->insertAfter(CanonicalIVPHI);
1674 VPBuilder Builder(Header, Header->getFirstNonPhi());
1675 // Compute original TC - IV as the AVL (application vector length).
1676 VPValue *AVL = Builder.createNaryOp(
1677 Instruction::Sub, {Plan.getTripCount(), EVLPhi}, DebugLoc(), "avl");
1678 if (MaxSafeElements) {
1679 // Support for MaxSafeDist for correct loop emission.
1680 VPValue *AVLSafe = Plan.getOrAddLiveIn(
1681 ConstantInt::get(CanonicalIVPHI->getScalarType(), *MaxSafeElements));
1682 VPValue *Cmp = Builder.createICmp(ICmpInst::ICMP_ULT, AVL, AVLSafe);
1683 AVL = Builder.createSelect(Cmp, AVL, AVLSafe, DebugLoc(), "safe_avl");
1684 }
1685 auto *VPEVL = Builder.createNaryOp(VPInstruction::ExplicitVectorLength, AVL,
1686 DebugLoc());
1687
1688 auto *CanonicalIVIncrement =
1689 cast<VPInstruction>(CanonicalIVPHI->getBackedgeValue());
1690 VPSingleDefRecipe *OpVPEVL = VPEVL;
1691 if (unsigned IVSize = CanonicalIVPHI->getScalarType()->getScalarSizeInBits();
1692 IVSize != 32) {
1693 OpVPEVL = new VPScalarCastRecipe(
1694 IVSize < 32 ? Instruction::Trunc : Instruction::ZExt, OpVPEVL,
1695 CanonicalIVPHI->getScalarType(), CanonicalIVIncrement->getDebugLoc());
1696 OpVPEVL->insertBefore(CanonicalIVIncrement);
1697 }
1698 auto *NextEVLIV =
1699 new VPInstruction(Instruction::Add, {OpVPEVL, EVLPhi},
1700 {CanonicalIVIncrement->hasNoUnsignedWrap(),
1701 CanonicalIVIncrement->hasNoSignedWrap()},
1702 CanonicalIVIncrement->getDebugLoc(), "index.evl.next");
1703 NextEVLIV->insertBefore(CanonicalIVIncrement);
1704 EVLPhi->addOperand(NextEVLIV);
1705
1706 transformRecipestoEVLRecipes(Plan, *VPEVL);
1707
1708 // Replace all uses of VPCanonicalIVPHIRecipe by
1709 // VPEVLBasedIVPHIRecipe except for the canonical IV increment.
1710 CanonicalIVPHI->replaceAllUsesWith(EVLPhi);
1711 CanonicalIVIncrement->setOperand(0, CanonicalIVPHI);
1712 // TODO: support unroll factor > 1.
1713 Plan.setUF(1);
1714 return true;
1715}
1716
1718 VPlan &Plan, function_ref<bool(BasicBlock *)> BlockNeedsPredication) {
1719 // Collect recipes in the backward slice of `Root` that may generate a poison
1720 // value that is used after vectorization.
1722 auto CollectPoisonGeneratingInstrsInBackwardSlice([&](VPRecipeBase *Root) {
1724 Worklist.push_back(Root);
1725
1726 // Traverse the backward slice of Root through its use-def chain.
1727 while (!Worklist.empty()) {
1728 VPRecipeBase *CurRec = Worklist.pop_back_val();
1729
1730 if (!Visited.insert(CurRec).second)
1731 continue;
1732
1733 // Prune search if we find another recipe generating a widen memory
1734 // instruction. Widen memory instructions involved in address computation
1735 // will lead to gather/scatter instructions, which don't need to be
1736 // handled.
1737 if (isa<VPWidenMemoryRecipe, VPInterleaveRecipe, VPScalarIVStepsRecipe,
1738 VPHeaderPHIRecipe>(CurRec))
1739 continue;
1740
1741 // This recipe contributes to the address computation of a widen
1742 // load/store. If the underlying instruction has poison-generating flags,
1743 // drop them directly.
1744 if (auto *RecWithFlags = dyn_cast<VPRecipeWithIRFlags>(CurRec)) {
1745 VPValue *A, *B;
1746 using namespace llvm::VPlanPatternMatch;
1747 // Dropping disjoint from an OR may yield incorrect results, as some
1748 // analysis may have converted it to an Add implicitly (e.g. SCEV used
1749 // for dependence analysis). Instead, replace it with an equivalent Add.
1750 // This is possible as all users of the disjoint OR only access lanes
1751 // where the operands are disjoint or poison otherwise.
1752 if (match(RecWithFlags, m_BinaryOr(m_VPValue(A), m_VPValue(B))) &&
1753 RecWithFlags->isDisjoint()) {
1754 VPBuilder Builder(RecWithFlags);
1755 VPInstruction *New = Builder.createOverflowingOp(
1756 Instruction::Add, {A, B}, {false, false},
1757 RecWithFlags->getDebugLoc());
1758 New->setUnderlyingValue(RecWithFlags->getUnderlyingValue());
1759 RecWithFlags->replaceAllUsesWith(New);
1760 RecWithFlags->eraseFromParent();
1761 CurRec = New;
1762 } else
1763 RecWithFlags->dropPoisonGeneratingFlags();
1764 } else {
1765 Instruction *Instr = dyn_cast_or_null<Instruction>(
1766 CurRec->getVPSingleValue()->getUnderlyingValue());
1767 (void)Instr;
1768 assert((!Instr || !Instr->hasPoisonGeneratingFlags()) &&
1769 "found instruction with poison generating flags not covered by "
1770 "VPRecipeWithIRFlags");
1771 }
1772
1773 // Add new definitions to the worklist.
1774 for (VPValue *Operand : CurRec->operands())
1775 if (VPRecipeBase *OpDef = Operand->getDefiningRecipe())
1776 Worklist.push_back(OpDef);
1777 }
1778 });
1779
1780 // Traverse all the recipes in the VPlan and collect the poison-generating
1781 // recipes in the backward slice starting at the address of a VPWidenRecipe or
1782 // VPInterleaveRecipe.
1783 auto Iter = vp_depth_first_deep(Plan.getEntry());
1784 for (VPBasicBlock *VPBB : VPBlockUtils::blocksOnly<VPBasicBlock>(Iter)) {
1785 for (VPRecipeBase &Recipe : *VPBB) {
1786 if (auto *WidenRec = dyn_cast<VPWidenMemoryRecipe>(&Recipe)) {
1787 Instruction &UnderlyingInstr = WidenRec->getIngredient();
1788 VPRecipeBase *AddrDef = WidenRec->getAddr()->getDefiningRecipe();
1789 if (AddrDef && WidenRec->isConsecutive() &&
1790 BlockNeedsPredication(UnderlyingInstr.getParent()))
1791 CollectPoisonGeneratingInstrsInBackwardSlice(AddrDef);
1792 } else if (auto *InterleaveRec = dyn_cast<VPInterleaveRecipe>(&Recipe)) {
1793 VPRecipeBase *AddrDef = InterleaveRec->getAddr()->getDefiningRecipe();
1794 if (AddrDef) {
1795 // Check if any member of the interleave group needs predication.
1796 const InterleaveGroup<Instruction> *InterGroup =
1797 InterleaveRec->getInterleaveGroup();
1798 bool NeedPredication = false;
1799 for (int I = 0, NumMembers = InterGroup->getNumMembers();
1800 I < NumMembers; ++I) {
1801 Instruction *Member = InterGroup->getMember(I);
1802 if (Member)
1803 NeedPredication |= BlockNeedsPredication(Member->getParent());
1804 }
1805
1806 if (NeedPredication)
1807 CollectPoisonGeneratingInstrsInBackwardSlice(AddrDef);
1808 }
1809 }
1810 }
1811 }
1812}
1813
1815 VPlan &Plan,
1817 &InterleaveGroups,
1818 VPRecipeBuilder &RecipeBuilder, bool ScalarEpilogueAllowed) {
1819 if (InterleaveGroups.empty())
1820 return;
1821
1822 // Interleave memory: for each Interleave Group we marked earlier as relevant
1823 // for this VPlan, replace the Recipes widening its memory instructions with a
1824 // single VPInterleaveRecipe at its insertion point.
1825 VPDominatorTree VPDT;
1826 VPDT.recalculate(Plan);
1827 for (const auto *IG : InterleaveGroups) {
1828 SmallVector<VPValue *, 4> StoredValues;
1829 for (unsigned i = 0; i < IG->getFactor(); ++i)
1830 if (auto *SI = dyn_cast_or_null<StoreInst>(IG->getMember(i))) {
1831 auto *StoreR = cast<VPWidenStoreRecipe>(RecipeBuilder.getRecipe(SI));
1832 StoredValues.push_back(StoreR->getStoredValue());
1833 }
1834
1835 bool NeedsMaskForGaps =
1836 IG->requiresScalarEpilogue() && !ScalarEpilogueAllowed;
1837
1838 Instruction *IRInsertPos = IG->getInsertPos();
1839 auto *InsertPos =
1840 cast<VPWidenMemoryRecipe>(RecipeBuilder.getRecipe(IRInsertPos));
1841
1842 // Get or create the start address for the interleave group.
1843 auto *Start =
1844 cast<VPWidenMemoryRecipe>(RecipeBuilder.getRecipe(IG->getMember(0)));
1845 VPValue *Addr = Start->getAddr();
1846 VPRecipeBase *AddrDef = Addr->getDefiningRecipe();
1847 if (AddrDef && !VPDT.properlyDominates(AddrDef, InsertPos)) {
1848 // TODO: Hoist Addr's defining recipe (and any operands as needed) to
1849 // InsertPos or sink loads above zero members to join it.
1850 bool InBounds = false;
1851 if (auto *Gep = dyn_cast<GetElementPtrInst>(
1852 getLoadStorePointerOperand(IRInsertPos)->stripPointerCasts()))
1853 InBounds = Gep->isInBounds();
1854
1855 // We cannot re-use the address of member zero because it does not
1856 // dominate the insert position. Instead, use the address of the insert
1857 // position and create a PtrAdd adjusting it to the address of member
1858 // zero.
1859 assert(IG->getIndex(IRInsertPos) != 0 &&
1860 "index of insert position shouldn't be zero");
1861 auto &DL = IRInsertPos->getDataLayout();
1862 APInt Offset(32,
1863 DL.getTypeAllocSize(getLoadStoreType(IRInsertPos)) *
1864 IG->getIndex(IRInsertPos),
1865 /*IsSigned=*/true);
1866 VPValue *OffsetVPV = Plan.getOrAddLiveIn(
1867 ConstantInt::get(IRInsertPos->getParent()->getContext(), -Offset));
1868 VPBuilder B(InsertPos);
1869 Addr = InBounds ? B.createInBoundsPtrAdd(InsertPos->getAddr(), OffsetVPV)
1870 : B.createPtrAdd(InsertPos->getAddr(), OffsetVPV);
1871 }
1872 auto *VPIG = new VPInterleaveRecipe(IG, Addr, StoredValues,
1873 InsertPos->getMask(), NeedsMaskForGaps);
1874 VPIG->insertBefore(InsertPos);
1875
1876 unsigned J = 0;
1877 for (unsigned i = 0; i < IG->getFactor(); ++i)
1878 if (Instruction *Member = IG->getMember(i)) {
1879 VPRecipeBase *MemberR = RecipeBuilder.getRecipe(Member);
1880 if (!Member->getType()->isVoidTy()) {
1881 VPValue *OriginalV = MemberR->getVPSingleValue();
1882 OriginalV->replaceAllUsesWith(VPIG->getVPValue(J));
1883 J++;
1884 }
1885 MemberR->eraseFromParent();
1886 }
1887 }
1888}
1889
1891 for (VPBasicBlock *VPBB : VPBlockUtils::blocksOnly<VPBasicBlock>(
1892 vp_depth_first_deep(Plan.getEntry()))) {
1893 for (VPRecipeBase &R : make_early_inc_range(VPBB->phis())) {
1894 if (!isa<VPCanonicalIVPHIRecipe, VPEVLBasedIVPHIRecipe>(&R))
1895 continue;
1896 auto *PhiR = cast<VPHeaderPHIRecipe>(&R);
1897 StringRef Name =
1898 isa<VPCanonicalIVPHIRecipe>(PhiR) ? "index" : "evl.based.iv";
1899 auto *ScalarR =
1900 new VPScalarPHIRecipe(PhiR->getStartValue(), PhiR->getBackedgeValue(),
1901 PhiR->getDebugLoc(), Name);
1902 ScalarR->insertBefore(PhiR);
1903 PhiR->replaceAllUsesWith(ScalarR);
1904 PhiR->eraseFromParent();
1905 }
1906 }
1907}
1908
1910 VPlan &Plan, ScalarEvolution &SE, Loop *OrigLoop,
1911 BasicBlock *UncountableExitingBlock, VPRecipeBuilder &RecipeBuilder) {
1912 VPRegionBlock *LoopRegion = Plan.getVectorLoopRegion();
1913 auto *LatchVPBB = cast<VPBasicBlock>(LoopRegion->getExiting());
1914 VPBuilder Builder(LatchVPBB->getTerminator());
1915 auto *MiddleVPBB = Plan.getMiddleBlock();
1916 VPValue *IsEarlyExitTaken = nullptr;
1917
1918 // Process the uncountable exiting block. Update IsEarlyExitTaken, which
1919 // tracks if the uncountable early exit has been taken. Also split the middle
1920 // block and have it conditionally branch to the early exit block if
1921 // EarlyExitTaken.
1922 auto *EarlyExitingBranch =
1923 cast<BranchInst>(UncountableExitingBlock->getTerminator());
1924 BasicBlock *TrueSucc = EarlyExitingBranch->getSuccessor(0);
1925 BasicBlock *FalseSucc = EarlyExitingBranch->getSuccessor(1);
1926
1927 // The early exit block may or may not be the same as the "countable" exit
1928 // block. Creates a new VPIRBB for the early exit block in case it is distinct
1929 // from the countable exit block.
1930 // TODO: Introduce both exit blocks during VPlan skeleton construction.
1931 VPIRBasicBlock *VPEarlyExitBlock;
1932 if (OrigLoop->getUniqueExitBlock()) {
1933 VPEarlyExitBlock = cast<VPIRBasicBlock>(MiddleVPBB->getSuccessors()[0]);
1934 } else {
1935 VPEarlyExitBlock = Plan.createVPIRBasicBlock(
1936 !OrigLoop->contains(TrueSucc) ? TrueSucc : FalseSucc);
1937 }
1938
1939 VPValue *EarlyExitNotTakenCond = RecipeBuilder.getBlockInMask(
1940 OrigLoop->contains(TrueSucc) ? TrueSucc : FalseSucc);
1941 auto *EarlyExitTakenCond = Builder.createNot(EarlyExitNotTakenCond);
1942 IsEarlyExitTaken =
1943 Builder.createNaryOp(VPInstruction::AnyOf, {EarlyExitTakenCond});
1944
1945 VPBasicBlock *NewMiddle = Plan.createVPBasicBlock("middle.split");
1946 VPBlockUtils::insertOnEdge(LoopRegion, MiddleVPBB, NewMiddle);
1947 VPBlockUtils::connectBlocks(NewMiddle, VPEarlyExitBlock);
1948 NewMiddle->swapSuccessors();
1949
1950 VPBuilder MiddleBuilder(NewMiddle);
1951 MiddleBuilder.createNaryOp(VPInstruction::BranchOnCond, {IsEarlyExitTaken});
1952
1953 // Replace the condition controlling the non-early exit from the vector loop
1954 // with one exiting if either the original condition of the vector latch is
1955 // true or the early exit has been taken.
1956 auto *LatchExitingBranch = cast<VPInstruction>(LatchVPBB->getTerminator());
1957 assert(LatchExitingBranch->getOpcode() == VPInstruction::BranchOnCount &&
1958 "Unexpected terminator");
1959 auto *IsLatchExitTaken =
1960 Builder.createICmp(CmpInst::ICMP_EQ, LatchExitingBranch->getOperand(0),
1961 LatchExitingBranch->getOperand(1));
1962 auto *AnyExitTaken = Builder.createNaryOp(
1963 Instruction::Or, {IsEarlyExitTaken, IsLatchExitTaken});
1964 Builder.createNaryOp(VPInstruction::BranchOnCond, AnyExitTaken);
1965 LatchExitingBranch->eraseFromParent();
1966}
for(const MachineOperand &MO :llvm::drop_begin(OldMI.operands(), Desc.getNumOperands()))
ReachingDefAnalysis InstSet & ToRemove
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
uint64_t Addr
std::string Name
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
Hexagon Common GEP
iv Induction Variable Users
Definition: IVUsers.cpp:48
licm
Definition: LICM.cpp:378
static bool mergeBlocksIntoPredecessors(Loop &L, DominatorTree &DT, LoopInfo &LI, MemorySSAUpdater *MSSAU, ScalarEvolution &SE)
#define I(x, y, z)
Definition: MD5.cpp:58
uint64_t IntrinsicInst * II
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
if(PassOpts->AAPipeline)
This file builds on the ADT/GraphTraits.h file to build a generic graph post order iterator.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file contains some templates that are useful if you are working with the STL at all.
This file implements a set that has insertion order iteration characteristics.
This file implements the TypeSwitch template, which mimics a switch() statement whose cases are type ...
This file implements dominator tree analysis for a single level of a VPlan's H-CFG.
static bool sinkScalarOperands(VPlan &Plan)
static void removeRedundantInductionCasts(VPlan &Plan)
Remove redundant casts of inductions.
static VPScalarIVStepsRecipe * createScalarIVSteps(VPlan &Plan, InductionDescriptor::InductionKind Kind, Instruction::BinaryOps InductionOpcode, FPMathOperator *FPBinOp, Instruction *TruncI, VPValue *StartV, VPValue *Step, DebugLoc DL, VPBuilder &Builder)
static bool sinkRecurrenceUsersAfterPrevious(VPFirstOrderRecurrencePHIRecipe *FOR, VPRecipeBase *Previous, VPDominatorTree &VPDT)
Sink users of FOR after the recipe defining the previous value Previous of the recurrence.
static bool mergeReplicateRegionsIntoSuccessors(VPlan &Plan)
static VPActiveLaneMaskPHIRecipe * addVPLaneMaskPhiAndUpdateExitBranch(VPlan &Plan, bool DataAndControlFlowWithoutRuntimeCheck)
static void transformRecipestoEVLRecipes(VPlan &Plan, VPValue &EVL)
Replace recipes with their EVL variants.
static bool isDeadRecipe(VPRecipeBase &R)
Returns true if R is dead and can be removed.
static void legalizeAndOptimizeInductions(VPlan &Plan)
Legalize VPWidenPointerInductionRecipe, by replacing it with a PtrAdd (IndStart, ScalarIVSteps (0,...
static void addReplicateRegions(VPlan &Plan)
static void simplifyRecipes(VPlan &Plan, Type *CanonicalIVTy)
Try to simplify the recipes in Plan.
static void simplifyRecipe(VPRecipeBase &R, VPTypeAnalysis &TypeInfo)
Try to simplify recipe R.
static void removeRedundantExpandSCEVRecipes(VPlan &Plan)
Remove redundant EpxandSCEVRecipes in Plan's entry block by replacing them with already existing reci...
static SmallVector< VPValue * > collectAllHeaderMasks(VPlan &Plan)
Collect all VPValues representing a header mask through the (ICMP_ULE, WideCanonicalIV,...
static bool hoistPreviousBeforeFORUsers(VPFirstOrderRecurrencePHIRecipe *FOR, VPRecipeBase *Previous, VPDominatorTree &VPDT)
Try to hoist Previous and its operands before all users of FOR.
static SmallVector< VPUser * > collectUsersRecursively(VPValue *V)
static void recursivelyDeleteDeadRecipes(VPValue *V)
static VPRegionBlock * createReplicateRegion(VPReplicateRecipe *PredRecipe, VPlan &Plan)
static VPBasicBlock * getPredicatedThenBlock(VPRegionBlock *R)
If R is a triangle region, return the 'then' block of the triangle.
static VPRecipeBase * createEVLRecipe(VPValue *HeaderMask, VPRecipeBase &CurRecipe, VPTypeAnalysis &TypeInfo, VPValue &AllOneMask, VPValue &EVL)
Try to convert CurRecipe to a corresponding EVL-based recipe.
VPValue * getPredicatedMask(VPRegionBlock *R)
If R is a region with a VPBranchOnMaskRecipe in the entry block, return the mask.
static void removeRedundantCanonicalIVs(VPlan &Plan)
Try to replace VPWidenCanonicalIVRecipes with a widened canonical IV recipe, if it exists.
This file provides utility VPlan to VPlan transformations.
This file contains the declarations of the Vectorization Plan base classes:
Value * RHS
Value * LHS
static const uint32_t IV[8]
Definition: blake3_impl.h:78
Class for arbitrary precision integers.
Definition: APInt.h:78
LLVM Basic Block Representation.
Definition: BasicBlock.h:61
const Function * getParent() const
Return the enclosing method, or null if none.
Definition: BasicBlock.h:219
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition: BasicBlock.h:239
This class represents a function call, abstracting a target machine's calling convention.
@ ICMP_ULT
unsigned less than
Definition: InstrTypes.h:698
@ ICMP_EQ
equal
Definition: InstrTypes.h:694
@ ICMP_ULE
unsigned less or equal
Definition: InstrTypes.h:699
static ConstantInt * getTrue(LLVMContext &Context)
Definition: Constants.cpp:866
This class represents an Operation in the Expression.
A debug info location.
Definition: DebugLoc.h:33
bool contains(const_arg_type_t< KeyT > Val) const
Return true if the specified key is in the map, false otherwise.
Definition: DenseMap.h:147
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition: DenseMap.h:211
void recalculate(ParentType &Func)
recalculate - compute a dominator tree for the given function
static constexpr ElementCount getFixed(ScalarTy MinVal)
Definition: TypeSize.h:311
Utility class for floating point operations which can have information about relaxed accuracy require...
Definition: Operator.h:205
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
Definition: Instructions.h:933
A struct for saving information about induction variables.
InductionKind
This enum represents the kinds of inductions that we support.
@ IK_IntInduction
Integer induction variable. Step = C.
bool isBinaryOp() const
Definition: Instruction.h:279
bool isUnaryOp() const
Definition: Instruction.h:278
const DataLayout & getDataLayout() const
Get the data layout of the module this instruction belongs to.
Definition: Instruction.cpp:76
static IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
Definition: Type.cpp:311
The group of interleaved loads/stores sharing the same stride and close to each other.
Definition: VectorUtils.h:480
InstTy * getMember(uint32_t Index) const
Get the member with the given index Index.
Definition: VectorUtils.h:550
uint32_t getNumMembers() const
Definition: VectorUtils.h:498
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:67
An instruction for reading from memory.
Definition: Instructions.h:176
bool contains(const LoopT *L) const
Return true if the specified loop is contained within in this loop.
BlockT * getUniqueExitBlock() const
If getUniqueExitBlocks would return exactly one block, return that block.
Represents a single loop in the control flow graph.
Definition: LoopInfo.h:39
This class implements a map that also provides access to all stored values in a deterministic order.
Definition: MapVector.h:36
bool contains(const KeyT &Key) const
Definition: MapVector.h:163
ValueT lookup(const KeyT &Key) const
Definition: MapVector.h:110
size_type size() const
Definition: MapVector.h:60
An interface layer with SCEV used to manage how we see SCEV expressions for values in the context of ...
ScalarEvolution * getSE() const
Returns the ScalarEvolution analysis used.
The RecurrenceDescriptor is used to identify recurrences variables in a loop.
Definition: IVDescriptors.h:77
RecurKind getRecurrenceKind() const
This class represents an analyzed expression in the program.
bool isZero() const
Return true if the expression is a constant zero.
Type * getType() const
Return the LLVM type of this SCEV expression.
The main scalar evolution driver.
bool isKnownPredicate(ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS)
Test if the given expression is known to satisfy the condition described by Pred, LHS,...
const SCEV * getElementCount(Type *Ty, ElementCount EC)
LLVMContext & getContext() const
This class represents the LLVM 'select' instruction.
A vector that has set insertion semantics.
Definition: SetVector.h:57
size_type size() const
Determine the number of elements in the SetVector.
Definition: SetVector.h:98
bool insert(const value_type &X)
Insert a new element into the SetVector.
Definition: SetVector.h:162
size_type size() const
Definition: SmallPtrSet.h:94
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
Definition: SmallPtrSet.h:363
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:384
iterator begin() const
Definition: SmallPtrSet.h:472
bool contains(ConstPtrType Ptr) const
Definition: SmallPtrSet.h:458
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
Definition: SmallPtrSet.h:519
bool empty() const
Definition: SmallVector.h:81
size_t size() const
Definition: SmallVector.h:78
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
Definition: SmallVector.h:683
void push_back(const T &Elt)
Definition: SmallVector.h:413
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1196
An instruction for storing to memory.
Definition: Instructions.h:292
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:51
Provides information about what library functions are available for the current target.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition: Twine.h:81
This class implements a switch-like dispatch statement for a value of 'T' using dyn_cast functionalit...
Definition: TypeSwitch.h:87
TypeSwitch< T, ResultT > & Case(CallableT &&caseFn)
Add a case on the given type.
Definition: TypeSwitch.h:96
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
Definition: Type.h:128
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition: Type.h:237
op_range operands()
Definition: User.h:288
A recipe for generating the active lane mask for the vector loop that is used to predicate the vector...
Definition: VPlan.h:3233
VPBasicBlock serves as the leaf of the Hierarchical Control-Flow Graph.
Definition: VPlan.h:3475
void appendRecipe(VPRecipeBase *Recipe)
Augment the existing recipes of a VPBasicBlock with an additional Recipe as the last recipe.
Definition: VPlan.h:3547
iterator end()
Definition: VPlan.h:3509
iterator_range< iterator > phis()
Returns an iterator range over the PHI-like recipes in the block.
Definition: VPlan.h:3560
iterator getFirstNonPhi()
Return the position of the first non-phi node recipe in the block.
Definition: VPlan.cpp:208
VPRegionBlock * getEnclosingLoopRegion()
Definition: VPlan.cpp:566
VPBasicBlock * splitAt(iterator SplitAt)
Split current block at SplitAt by inserting a new block between the current block and its successors ...
Definition: VPlan.cpp:536
VPRecipeBase * getTerminator()
If the block has multiple successors, return the branch recipe terminating the block.
Definition: VPlan.cpp:602
const VPRecipeBase & back() const
Definition: VPlan.h:3521
A recipe for vectorizing a phi-node as a sequence of mask-based select instructions.
Definition: VPlan.h:2433
VPBlockBase is the building block of the Hierarchical Control-Flow Graph.
Definition: VPlan.h:397
VPRegionBlock * getParent()
Definition: VPlan.h:489
const VPBasicBlock * getExitingBasicBlock() const
Definition: VPlan.cpp:178
void swapSuccessors()
Swap successors of the block. The block must have exactly 2 successors.
Definition: VPlan.h:628
VPBlockBase * getSinglePredecessor() const
Definition: VPlan.h:531
const VPBasicBlock * getEntryBasicBlock() const
Definition: VPlan.cpp:158
VPBlockBase * getSingleHierarchicalPredecessor()
Definition: VPlan.h:577
VPBlockBase * getSingleSuccessor() const
Definition: VPlan.h:525
const VPBlocksTy & getSuccessors() const
Definition: VPlan.h:514
static void insertOnEdge(VPBlockBase *From, VPBlockBase *To, VPBlockBase *BlockPtr)
Inserts BlockPtr on the edge between From and To.
Definition: VPlan.h:4265
static void insertTwoBlocksAfter(VPBlockBase *IfTrue, VPBlockBase *IfFalse, VPBlockBase *BlockPtr)
Insert disconnected VPBlockBases IfTrue and IfFalse after BlockPtr.
Definition: VPlan.h:4184
static void connectBlocks(VPBlockBase *From, VPBlockBase *To, unsigned PredIdx=-1u, unsigned SuccIdx=-1u)
Connect VPBlockBases From and To bi-directionally.
Definition: VPlan.h:4203
static void disconnectBlocks(VPBlockBase *From, VPBlockBase *To)
Disconnect VPBlockBases From and To bi-directionally.
Definition: VPlan.h:4222
A recipe for generating conditional branches on the bits of a mask.
Definition: VPlan.h:2793
RAII object that stores the current insertion point and restores it when the object is destroyed.
VPlan-based builder utility analogous to IRBuilder.
VPValue * createICmp(CmpInst::Predicate Pred, VPValue *A, VPValue *B, DebugLoc DL={}, const Twine &Name="")
Create a new ICmp VPInstruction with predicate Pred and operands A and B.
VPDerivedIVRecipe * createDerivedIV(InductionDescriptor::InductionKind Kind, FPMathOperator *FPBinOp, VPValue *Start, VPValue *Current, VPValue *Step, const Twine &Name="")
Convert the input value Current to the corresponding value of an induction with Start and Step values...
VPInstruction * createPtrAdd(VPValue *Ptr, VPValue *Offset, DebugLoc DL={}, const Twine &Name="")
VPScalarCastRecipe * createScalarCast(Instruction::CastOps Opcode, VPValue *Op, Type *ResultTy, DebugLoc DL)
static VPBuilder getToInsertAfter(VPRecipeBase *R)
Create a VPBuilder to insert after R.
VPScalarIVStepsRecipe * createScalarIVSteps(Instruction::BinaryOps InductionOpcode, FPMathOperator *FPBinOp, VPValue *IV, VPValue *Step)
VPInstruction * createOverflowingOp(unsigned Opcode, std::initializer_list< VPValue * > Operands, VPRecipeWithIRFlags::WrapFlagsTy WrapFlags, DebugLoc DL={}, const Twine &Name="")
VPInstruction * createNaryOp(unsigned Opcode, ArrayRef< VPValue * > Operands, Instruction *Inst=nullptr, const Twine &Name="")
Create an N-ary operation with Opcode, Operands and set Inst as its underlying Instruction.
VPValue * createNot(VPValue *Operand, DebugLoc DL={}, const Twine &Name="")
VPValue * createSelect(VPValue *Cond, VPValue *TrueVal, VPValue *FalseVal, DebugLoc DL={}, const Twine &Name="", std::optional< FastMathFlags > FMFs=std::nullopt)
void setInsertPoint(VPBasicBlock *TheBB)
This specifies that created VPInstructions should be appended to the end of the specified block.
Canonical scalar induction phi of the vector loop.
Definition: VPlan.h:3172
Type * getScalarType() const
Returns the scalar type of the induction.
Definition: VPlan.h:3203
unsigned getNumDefinedValues() const
Returns the number of values defined by the VPDef.
Definition: VPlanValue.h:414
ArrayRef< VPValue * > definedValues()
Returns an ArrayRef of the values defined by the VPDef.
Definition: VPlanValue.h:409
VPValue * getVPSingleValue()
Returns the only VPValue defined by the VPDef.
Definition: VPlanValue.h:387
Template specialization of the standard LLVM dominator tree utility for VPBlockBases.
bool properlyDominates(const VPRecipeBase *A, const VPRecipeBase *B)
Returns true if A properly dominates B.
A recipe for generating the phi node for the current index of elements, adjusted in accordance with E...
Definition: VPlan.h:3268
A special type of VPBasicBlock that wraps an existing IR basic block.
Definition: VPlan.h:3614
This is a concrete Recipe that models a single VPlan-level instruction.
Definition: VPlan.h:1191
@ FirstOrderRecurrenceSplice
Definition: VPlan.h:1197
@ CanonicalIVIncrementForPart
Definition: VPlan.h:1212
@ CalculateTripCountMinusVF
Definition: VPlan.h:1210
VPInterleaveRecipe is a recipe for transforming an interleave group of load or stores into one wide l...
Definition: VPlan.h:2500
static std::optional< unsigned > getMaskParamPos(Intrinsic::ID IntrinsicID)
static std::optional< unsigned > getVectorLengthParamPos(Intrinsic::ID IntrinsicID)
static Intrinsic::ID getForOpcode(unsigned OC)
The llvm.vp.* intrinsics for this instruction Opcode.
static Intrinsic::ID getForIntrinsic(Intrinsic::ID Id)
The llvm.vp.
VPPredInstPHIRecipe is a recipe for generating the phi nodes needed when control converges back from ...
Definition: VPlan.h:2848
VPRecipeBase is a base class modeling a sequence of one or more output IR instructions.
Definition: VPlan.h:714
bool mayReadFromMemory() const
Returns true if the recipe may read from memory.
bool mayReadOrWriteMemory() const
Returns true if the recipe may read from or write to memory.
Definition: VPlan.h:803
bool mayHaveSideEffects() const
Returns true if the recipe may have side-effects.
VPBasicBlock * getParent()
Definition: VPlan.h:739
DebugLoc getDebugLoc() const
Returns the debug location of the recipe.
Definition: VPlan.h:808
void moveBefore(VPBasicBlock &BB, iplist< VPRecipeBase >::iterator I)
Unlink this recipe and insert into BB before I.
void insertBefore(VPRecipeBase *InsertPos)
Insert an unlinked recipe into a basic block immediately before the specified recipe.
iplist< VPRecipeBase >::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
Helper class to create VPRecipies from IR instructions.
VPValue * getBlockInMask(BasicBlock *BB) const
Returns the entry mask for the block BB.
VPRecipeBase * getRecipe(Instruction *I)
Return the recipe created for given ingredient.
A recipe to represent inloop reduction operations with vector-predication intrinsics,...
Definition: VPlan.h:2675
A recipe to represent inloop reduction operations, performing a reduction on a vector operand into a ...
Definition: VPlan.h:2595
VPRegionBlock represents a collection of VPBasicBlocks and VPRegionBlocks which form a Single-Entry-S...
Definition: VPlan.h:3646
const VPBlockBase * getEntry() const
Definition: VPlan.h:3679
const VPBlockBase * getExiting() const
Definition: VPlan.h:3691
VPReplicateRecipe replicates a given instruction producing multiple scalar copies of the original sca...
Definition: VPlan.h:2716
bool isUniform() const
Definition: VPlan.h:2760
VPValue * getMask()
Return the mask of a predicated VPReplicateRecipe.
Definition: VPlan.h:2784
VPScalarCastRecipe is a recipe to create scalar cast instructions.
Definition: VPlan.h:1575
A recipe for handling phi nodes of integer and floating-point inductions, producing their scalar valu...
Definition: VPlan.h:3418
Recipe to generate a scalar PHI.
Definition: VPlan.h:2258
VPSingleDef is a base class for recipes for modeling a sequence of one or more output IR that define ...
Definition: VPlan.h:841
Instruction * getUnderlyingInstr()
Returns the underlying instruction.
Definition: VPlan.h:910
An analysis for type-inference for VPValues.
Definition: VPlanAnalysis.h:40
Type * inferScalarType(const VPValue *V)
Infer the type of V. Returns the scalar type of V.
This class augments VPValue with operands which provide the inverse def-use edges from VPValue's user...
Definition: VPlanValue.h:200
operand_range operands()
Definition: VPlanValue.h:257
void setOperand(unsigned I, VPValue *New)
Definition: VPlanValue.h:242
operand_iterator op_end()
Definition: VPlanValue.h:255
operand_iterator op_begin()
Definition: VPlanValue.h:253
void addOperand(VPValue *Operand)
Definition: VPlanValue.h:231
VPRecipeBase * getDefiningRecipe()
Returns the recipe defining this VPValue or nullptr if it is not defined by a recipe,...
Definition: VPlan.cpp:123
Value * getUnderlyingValue() const
Return the underlying Value attached to this VPValue.
Definition: VPlanValue.h:77
void replaceAllUsesWith(VPValue *New)
Definition: VPlan.cpp:1420
unsigned getNumUsers() const
Definition: VPlanValue.h:111
void replaceUsesWithIf(VPValue *New, llvm::function_ref< bool(VPUser &U, unsigned Idx)> ShouldReplace)
Go through the uses list for this VPValue and make each use point to New if the callback ShouldReplac...
Definition: VPlan.cpp:1424
user_range users()
Definition: VPlanValue.h:132
A Recipe for widening the canonical induction variable of the vector loop.
Definition: VPlan.h:3313
VPWidenCastRecipe is a recipe to create vector cast instructions.
Definition: VPlan.h:1523
A recipe for widening operations with vector-predication intrinsics with explicit vector length (EVL)...
Definition: VPlan.h:1476
A recipe for handling GEP instructions.
Definition: VPlan.h:1850
A recipe for handling phi nodes of integer and floating-point inductions, producing their vector valu...
Definition: VPlan.h:2137
A recipe for widening vector intrinsics.
Definition: VPlan.h:1623
VPValue * getMask() const
Return the mask used by this recipe.
Definition: VPlan.h:2950
VPWidenRecipe is a recipe for producing a widened instruction using the opcode and operands of the re...
Definition: VPlan.h:1425
VPlan models a candidate for vectorization, encoding various decisions take to produce efficient outp...
Definition: VPlan.h:3742
bool hasScalableVF()
Definition: VPlan.h:3934
VPBasicBlock * getEntry()
Definition: VPlan.h:3855
VPRegionBlock * createVPRegionBlock(VPBlockBase *Entry, VPBlockBase *Exiting, const std::string &Name="", bool IsReplicator=false)
Create a new VPRegionBlock with Entry, Exiting and Name.
Definition: VPlan.h:4035
VPValue & getVF()
Returns the VF of the vector loop region.
Definition: VPlan.h:3920
VPValue * getTripCount() const
The trip count of the original loop.
Definition: VPlan.h:3896
bool hasVF(ElementCount VF)
Definition: VPlan.h:3933
bool hasUF(unsigned UF) const
Definition: VPlan.h:3946
void setVF(ElementCount VF)
Definition: VPlan.h:3927
VPRegionBlock * getVectorLoopRegion()
Returns the VPRegionBlock of the vector loop.
Definition: VPlan.cpp:1052
const VPBasicBlock * getMiddleBlock() const
Returns the 'middle' block of the plan, that is the block that selects whether to execute the scalar ...
Definition: VPlan.h:3874
VPBasicBlock * createVPBasicBlock(const Twine &Name, VPRecipeBase *Recipe=nullptr)
Create a new VPBasicBlock with Name and containing Recipe if present.
Definition: VPlan.h:4025
VPIRBasicBlock * createVPIRBasicBlock(BasicBlock *IRBB)
Create a VPIRBasicBlock from IRBB containing VPIRInstructions for all instructions in IRBB,...
Definition: VPlan.cpp:1252
VPValue * getOrAddLiveIn(Value *V)
Gets the live-in VPValue for V or adds a new live-in (if none exists yet) for V.
Definition: VPlan.h:3966
bool hasScalarVFOnly() const
Definition: VPlan.h:3944
VPCanonicalIVPHIRecipe * getCanonicalIV()
Returns the canonical induction recipe of the vector loop.
Definition: VPlan.h:4000
VPBasicBlock * getVectorPreheader()
Returns the preheader of the vector loop region, if one exists, or null otherwise.
Definition: VPlan.h:3860
void setUF(unsigned UF)
Definition: VPlan.h:3953
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:255
void setName(const Twine &Name)
Change the name of the value.
Definition: Value.cpp:377
bool hasName() const
Definition: Value.h:261
StringRef getName() const
Return a constant reference to the value's name.
Definition: Value.cpp:309
constexpr LeafTy multiplyCoefficientBy(ScalarTy RHS) const
Definition: TypeSize.h:258
An efficient, type-erasing, non-owning reference to a callable.
const ParentTy * getParent() const
Definition: ilist_node.h:32
self_iterator getIterator()
Definition: ilist_node.h:132
IteratorT end() const
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
CastInst_match< OpTy, TruncInst > m_Trunc(const OpTy &Op)
Matches Trunc.
specific_intval< false > m_SpecificInt(const APInt &V)
Match a specific integer value or vector with all elements equal to the value.
Definition: PatternMatch.h:982
bool match(Val *V, const Pattern &P)
Definition: PatternMatch.h:49
specificval_ty m_Specific(const Value *V)
Match if we have a specific specified value.
Definition: PatternMatch.h:885
ThreeOps_match< Cond, LHS, RHS, Instruction::Select > m_Select(const Cond &C, const LHS &L, const RHS &R)
Matches SelectInst.
match_combine_or< CastInst_match< OpTy, ZExtInst >, CastInst_match< OpTy, SExtInst > > m_ZExtOrSExt(const OpTy &Op)
auto m_LogicalAnd()
Matches L && R where L and R are arbitrary values.
BinaryOp_match< cst_pred_ty< is_all_ones >, ValTy, Instruction::Xor, true > m_Not(const ValTy &V)
Matches a 'Not' as 'xor V, -1' or 'xor -1, V'.
CastInst_match< OpTy, SExtInst > m_SExt(const OpTy &Op)
Matches SExt.
BinaryOp_match< LHS, RHS, Instruction::Mul, true > m_c_Mul(const LHS &L, const RHS &R)
Matches a Mul with LHS and RHS in either order.
VPValue * getOrCreateVPValueForSCEVExpr(VPlan &Plan, const SCEV *Expr, ScalarEvolution &SE)
Get or create a VPValue that corresponds to the expansion of Expr.
Definition: VPlanUtils.cpp:26
const SCEV * getSCEVExprForVPValue(VPValue *V, ScalarEvolution &SE)
Return the SCEV expression for V.
Definition: VPlanUtils.cpp:65
bool onlyFirstLaneUsed(const VPValue *Def)
Returns true if only the first lane of Def is used.
Definition: VPlanUtils.cpp:16
bool isHeaderMask(const VPValue *V, VPlan &Plan)
Return true if V is a header mask in Plan.
Definition: VPlanUtils.cpp:43
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
@ Offset
Definition: DWP.cpp:480
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1739
Intrinsic::ID getVectorIntrinsicIDForCall(const CallInst *CI, const TargetLibraryInfo *TLI)
Returns intrinsic ID for call.
const Value * getLoadStorePointerOperand(const Value *V)
A helper function that returns the pointer operand of a load or store instruction.
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
Definition: STLExtras.h:657
iterator_range< df_iterator< VPBlockShallowTraversalWrapper< VPBlockBase * > > > vp_depth_first_shallow(VPBlockBase *G)
Returns an iterator range to traverse the graph starting at G in depth-first order.
Definition: VPlanCFG.h:214
iterator_range< df_iterator< VPBlockDeepTraversalWrapper< VPBlockBase * > > > vp_depth_first_deep(VPBlockBase *G)
Returns an iterator range to traverse the graph starting at G in depth-first order while traversing t...
Definition: VPlanCFG.h:226
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1746
auto reverse(ContainerTy &&C)
Definition: STLExtras.h:420
void sort(IteratorTy Start, IteratorTy End)
Definition: STLExtras.h:1664
std::unique_ptr< VPlan > VPlanPtr
Definition: VPlan.h:144
bool none_of(R &&Range, UnaryPredicate P)
Provide wrappers to std::none_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1753
SmallVector< ValueTypeFromRangeType< R >, Size > to_vector(R &&Range)
Given a range of type R, iterate the entire range and return a SmallVector with elements of the vecto...
Definition: SmallVector.h:1299
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition: Casting.h:548
RecurKind
These are the kinds of recurrences that we support.
Definition: IVDescriptors.h:33
@ Mul
Product of integers.
@ Add
Sum of integers.
DWARFExpression::Operation Op
auto count_if(R &&Range, UnaryPredicate P)
Wrapper function around std::count_if to count the number of times an element satisfying a given pred...
Definition: STLExtras.h:1945
BasicBlock * SplitBlock(BasicBlock *Old, BasicBlock::iterator SplitPt, DominatorTree *DT, LoopInfo *LI=nullptr, MemorySSAUpdater *MSSAU=nullptr, const Twine &BBName="", bool Before=false)
Split the specified block at the specified instruction.
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1766
Type * getLoadStoreType(const Value *I)
A helper function that returns the type of a load or store instruction.
@ DataAndControlFlowWithoutRuntimeCheck
Use predicate to control both data and control flow, but modify the trip count so that a runtime over...
@ Default
The result values are uniform if and only if all operands are uniform.
A recipe for handling first-order recurrence phis.
Definition: VPlan.h:2342
A recipe for widening load operations with vector-predication intrinsics, using the address to load f...
Definition: VPlan.h:3008
A recipe for widening load operations, using the address to load from and an optional mask.
Definition: VPlan.h:2969
A recipe for widening select instructions.
Definition: VPlan.h:1812
A recipe for widening store operations with vector-predication intrinsics, using the value to store,...
Definition: VPlan.h:3088
A recipe for widening store operations, using the stored value, the address to store to and an option...
Definition: VPlan.h:3047
static void handleUncountableEarlyExit(VPlan &Plan, ScalarEvolution &SE, Loop *OrigLoop, BasicBlock *UncountableExitingBlock, VPRecipeBuilder &RecipeBuilder)
Update Plan to account for the uncountable early exit block in UncountableExitingBlock by.
static void createAndOptimizeReplicateRegions(VPlan &Plan)
Wrap predicated VPReplicateRecipes with a mask operand in an if-then region block and remove the mask...
static void convertToConcreteRecipes(VPlan &Plan)
Lower abstract recipes to concrete ones, that can be codegen'd.
static void dropPoisonGeneratingRecipes(VPlan &Plan, function_ref< bool(BasicBlock *)> BlockNeedsPredication)
Drop poison flags from recipes that may generate a poison value that is used after vectorization,...
static void createInterleaveGroups(VPlan &Plan, const SmallPtrSetImpl< const InterleaveGroup< Instruction > * > &InterleaveGroups, VPRecipeBuilder &RecipeBuilder, bool ScalarEpilogueAllowed)
static void removeDeadRecipes(VPlan &Plan)
Remove dead recipes from Plan.
static void clearReductionWrapFlags(VPlan &Plan)
Clear NSW/NUW flags from reduction instructions if necessary.
static bool tryAddExplicitVectorLength(VPlan &Plan, const std::optional< unsigned > &MaxEVLSafeElements)
Add a VPEVLBasedIVPHIRecipe and related recipes to Plan and replaces all uses except the canonical IV...
static void VPInstructionsToVPRecipes(VPlanPtr &Plan, function_ref< const InductionDescriptor *(PHINode *)> GetIntOrFpInductionDescriptor, ScalarEvolution &SE, const TargetLibraryInfo &TLI)
Replaces the VPInstructions in Plan with corresponding widen recipes.
static void addActiveLaneMask(VPlan &Plan, bool UseActiveLaneMaskForControlFlow, bool DataAndControlFlowWithoutRuntimeCheck)
Replace (ICMP_ULE, wide canonical IV, backedge-taken-count) checks with an (active-lane-mask recipe,...
static void optimize(VPlan &Plan)
Apply VPlan-to-VPlan optimizations to Plan, including induction recipe optimizations,...
static void truncateToMinimalBitwidths(VPlan &Plan, const MapVector< Instruction *, uint64_t > &MinBWs)
Insert truncates and extends for any truncated recipe.
static bool adjustFixedOrderRecurrences(VPlan &Plan, VPBuilder &Builder)
Try to have all users of fixed-order recurrences appear after the recipe defining their previous valu...
static void optimizeForVFAndUF(VPlan &Plan, ElementCount BestVF, unsigned BestUF, PredicatedScalarEvolution &PSE)
Optimize Plan based on BestVF and BestUF.