LLVM 23.0.0git
VPlanUnroll.cpp
Go to the documentation of this file.
1//===-- VPlanUnroll.cpp - VPlan unroller ----------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8///
9/// \file
10/// This file implements explicit unrolling for VPlans.
11///
12//===----------------------------------------------------------------------===//
13
14#include "VPRecipeBuilder.h"
15#include "VPlan.h"
16#include "VPlanAnalysis.h"
17#include "VPlanCFG.h"
18#include "VPlanHelpers.h"
19#include "VPlanPatternMatch.h"
20#include "VPlanTransforms.h"
21#include "VPlanUtils.h"
23#include "llvm/ADT/STLExtras.h"
24#include "llvm/ADT/ScopeExit.h"
26#include "llvm/IR/Intrinsics.h"
27
28using namespace llvm;
29using namespace llvm::VPlanPatternMatch;
30
31namespace {
32
33/// Helper to hold state needed for unrolling. It holds the Plan to unroll by
34/// UF. It also holds copies of VPValues across UF-1 unroll parts to facilitate
35/// the unrolling transformation, where the original VPValues are retained for
36/// part zero.
37class UnrollState {
38 /// Plan to unroll.
39 VPlan &Plan;
40 /// Unroll factor to unroll by.
41 const unsigned UF;
42 /// Analysis for types.
43 VPTypeAnalysis TypeInfo;
44
45 /// Unrolling may create recipes that should not be unrolled themselves.
46 /// Those are tracked in ToSkip.
47 SmallPtrSet<VPRecipeBase *, 8> ToSkip;
48
49 // Associate with each VPValue of part 0 its unrolled instances of parts 1,
50 // ..., UF-1.
51 DenseMap<VPValue *, SmallVector<VPValue *>> VPV2Parts;
52
53 /// Unroll replicate region \p VPR by cloning the region UF - 1 times.
54 void unrollReplicateRegionByUF(VPRegionBlock *VPR);
55
56 /// Add a start index operand to \p Steps for \p Part.
57 void addStartIndexForScalarSteps(VPScalarIVStepsRecipe *Steps, unsigned Part);
58
59 /// Unroll recipe \p R by cloning it UF - 1 times, unless it is uniform across
60 /// all parts.
61 void unrollRecipeByUF(VPRecipeBase &R);
62
63 /// Unroll header phi recipe \p R. How exactly the recipe gets unrolled
64 /// depends on the concrete header phi. Inserts newly created recipes at \p
65 /// InsertPtForPhi.
66 void unrollHeaderPHIByUF(VPHeaderPHIRecipe *R,
67 VPBasicBlock::iterator InsertPtForPhi);
68
69 /// Unroll a widen induction recipe \p IV. This introduces recipes to compute
70 /// the induction steps for each part.
71 void unrollWidenInductionByUF(VPWidenInductionRecipe *IV,
72 VPBasicBlock::iterator InsertPtForPhi);
73
74 VPValue *getConstantInt(unsigned Part) {
75 Type *CanIVIntTy = Plan.getVectorLoopRegion()->getCanonicalIVType();
76 return Plan.getConstantInt(CanIVIntTy, Part);
77 }
78
79public:
80 UnrollState(VPlan &Plan, unsigned UF) : Plan(Plan), UF(UF), TypeInfo(Plan) {}
81
82 void unrollBlock(VPBlockBase *VPB);
83
84 VPValue *getValueForPart(VPValue *V, unsigned Part) {
85 if (Part == 0 || isa<VPIRValue, VPSymbolicValue>(V))
86 return V;
87 assert((VPV2Parts.contains(V) && VPV2Parts[V].size() >= Part) &&
88 "accessed value does not exist");
89 return VPV2Parts[V][Part - 1];
90 }
91
92 /// Given a single original recipe \p OrigR (of part zero), and its copy \p
93 /// CopyR for part \p Part, map every VPValue defined by \p OrigR to its
94 /// corresponding VPValue defined by \p CopyR.
95 void addRecipeForPart(VPRecipeBase *OrigR, VPRecipeBase *CopyR,
96 unsigned Part) {
97 for (const auto &[Idx, VPV] : enumerate(OrigR->definedValues())) {
98 const auto &[V, _] = VPV2Parts.try_emplace(VPV);
99 assert(V->second.size() == Part - 1 && "earlier parts not set");
100 V->second.push_back(CopyR->getVPValue(Idx));
101 }
102 }
103
104 /// Given a uniform recipe \p R, add it for all parts.
105 void addUniformForAllParts(VPSingleDefRecipe *R) {
106 const auto &[V, Inserted] = VPV2Parts.try_emplace(R);
107 assert(Inserted && "uniform value already added");
108 for (unsigned Part = 0; Part != UF; ++Part)
109 V->second.push_back(R);
110 }
111
112 bool contains(VPValue *VPV) const { return VPV2Parts.contains(VPV); }
113
114 /// Update \p R's operand at \p OpIdx with its corresponding VPValue for part
115 /// \p P.
116 void remapOperand(VPRecipeBase *R, unsigned OpIdx, unsigned Part) {
117 auto *Op = R->getOperand(OpIdx);
118 R->setOperand(OpIdx, getValueForPart(Op, Part));
119 }
120
121 /// Update \p R's operands with their corresponding VPValues for part \p P.
122 void remapOperands(VPRecipeBase *R, unsigned Part) {
123 for (const auto &[OpIdx, Op] : enumerate(R->operands()))
124 R->setOperand(OpIdx, getValueForPart(Op, Part));
125 }
126};
127} // namespace
128
129void UnrollState::addStartIndexForScalarSteps(VPScalarIVStepsRecipe *Steps,
130 unsigned Part) {
131 if (Part == 0)
132 return;
133
134 VPBuilder Builder(Steps);
135 Type *BaseIVTy = TypeInfo.inferScalarType(Steps->getOperand(0));
136 Type *IntStepTy =
137 IntegerType::get(BaseIVTy->getContext(), BaseIVTy->getScalarSizeInBits());
138 VPValue *StartIndex = Steps->getVFValue();
139 if (Part > 1) {
140 StartIndex = Builder.createOverflowingOp(
141 Instruction::Mul,
142 {StartIndex,
143 Plan.getConstantInt(TypeInfo.inferScalarType(StartIndex), Part)});
144 }
145 StartIndex = Builder.createScalarSExtOrTrunc(
146 StartIndex, IntStepTy, TypeInfo.inferScalarType(StartIndex),
147 Steps->getDebugLoc());
148
149 if (BaseIVTy->isFloatingPointTy())
150 StartIndex = Builder.createScalarCast(Instruction::SIToFP, StartIndex,
151 BaseIVTy, Steps->getDebugLoc());
152
153 Steps->addOperand(StartIndex);
154}
155
156void UnrollState::unrollReplicateRegionByUF(VPRegionBlock *VPR) {
157 VPBlockBase *InsertPt = VPR->getSingleSuccessor();
158 for (unsigned Part = 1; Part != UF; ++Part) {
159 auto *Copy = VPR->clone();
160 VPBlockUtils::insertBlockBefore(Copy, InsertPt);
161
162 auto PartI = vp_depth_first_shallow(Copy->getEntry());
163 auto Part0 = vp_depth_first_shallow(VPR->getEntry());
164 for (const auto &[PartIVPBB, Part0VPBB] :
167 for (const auto &[PartIR, Part0R] : zip(*PartIVPBB, *Part0VPBB)) {
168 remapOperands(&PartIR, Part);
169 if (auto *Steps = dyn_cast<VPScalarIVStepsRecipe>(&PartIR))
170 addStartIndexForScalarSteps(Steps, Part);
171
172 addRecipeForPart(&Part0R, &PartIR, Part);
173 }
174 }
175 }
176}
177
178void UnrollState::unrollWidenInductionByUF(
179 VPWidenInductionRecipe *IV, VPBasicBlock::iterator InsertPtForPhi) {
180 VPBasicBlock *PH = cast<VPBasicBlock>(
181 IV->getParent()->getEnclosingLoopRegion()->getSinglePredecessor());
182 Type *IVTy = TypeInfo.inferScalarType(IV);
183 auto &ID = IV->getInductionDescriptor();
184 VPIRFlags Flags;
185 if (isa_and_present<FPMathOperator>(ID.getInductionBinOp()))
186 Flags = ID.getInductionBinOp()->getFastMathFlags();
187
188 VPValue *ScalarStep = IV->getStepValue();
189 VPBuilder Builder(PH);
190 Type *VectorStepTy =
191 IVTy->isPointerTy() ? TypeInfo.inferScalarType(ScalarStep) : IVTy;
192 VPInstruction *VectorStep = Builder.createNaryOp(
193 VPInstruction::WideIVStep, {&Plan.getVF(), ScalarStep}, VectorStepTy,
194 Flags, IV->getDebugLoc());
195
196 ToSkip.insert(VectorStep);
197
198 // Now create recipes to compute the induction steps for part 1 .. UF. Part 0
199 // remains the header phi. Parts > 0 are computed by adding Step to the
200 // previous part. The header phi recipe will get 2 new operands: the step
201 // value for a single part and the last part, used to compute the backedge
202 // value during VPWidenInductionRecipe::execute.
203 // %Part.0 = VPWidenInductionRecipe %Start, %ScalarStep, %VectorStep, %Part.3
204 // %Part.1 = %Part.0 + %VectorStep
205 // %Part.2 = %Part.1 + %VectorStep
206 // %Part.3 = %Part.2 + %VectorStep
207 //
208 // The newly added recipes are added to ToSkip to avoid interleaving them
209 // again.
210 VPValue *Prev = IV;
211 Builder.setInsertPoint(IV->getParent(), InsertPtForPhi);
212 unsigned AddOpc;
213 if (IVTy->isPointerTy())
215 else if (IVTy->isFloatingPointTy())
216 AddOpc = ID.getInductionOpcode();
217 else
218 AddOpc = Instruction::Add;
219 for (unsigned Part = 1; Part != UF; ++Part) {
220 std::string Name =
221 Part > 1 ? "step.add." + std::to_string(Part) : "step.add";
222
223 VPInstruction *Add = Builder.createNaryOp(AddOpc,
224 {
225 Prev,
226 VectorStep,
227 },
228 Flags, IV->getDebugLoc(), Name);
229 ToSkip.insert(Add);
230 addRecipeForPart(IV, Add, Part);
231 Prev = Add;
232 }
233 IV->addOperand(VectorStep);
234 IV->addOperand(Prev);
235}
236
237void UnrollState::unrollHeaderPHIByUF(VPHeaderPHIRecipe *R,
238 VPBasicBlock::iterator InsertPtForPhi) {
239 // First-order recurrences pass a single vector or scalar through their header
240 // phis, irrespective of interleaving.
242 return;
243
244 // Generate step vectors for each unrolled part.
245 if (auto *IV = dyn_cast<VPWidenInductionRecipe>(R)) {
246 unrollWidenInductionByUF(IV, InsertPtForPhi);
247 return;
248 }
249
250 auto *RdxPhi = dyn_cast<VPReductionPHIRecipe>(R);
251 if (RdxPhi && RdxPhi->isOrdered())
252 return;
253
254 auto InsertPt = std::next(R->getIterator());
255 for (unsigned Part = 1; Part != UF; ++Part) {
256 VPRecipeBase *Copy = R->clone();
257 Copy->insertBefore(*R->getParent(), InsertPt);
258 addRecipeForPart(R, Copy, Part);
259 if (RdxPhi) {
260 // If the start value is a ReductionStartVector, use the identity value
261 // (second operand) for unrolled parts. If the scaling factor is > 1,
262 // create a new ReductionStartVector with the scale factor and both
263 // operands set to the identity value.
264 if (auto *VPI = dyn_cast<VPInstruction>(RdxPhi->getStartValue())) {
265 assert(VPI->getOpcode() == VPInstruction::ReductionStartVector &&
266 "unexpected start VPInstruction");
267 if (Part != 1)
268 continue;
269 VPValue *StartV;
270 if (match(VPI->getOperand(2), m_One())) {
271 StartV = VPI->getOperand(1);
272 } else {
273 auto *C = VPI->clone();
274 C->setOperand(0, C->getOperand(1));
275 C->insertAfter(VPI);
276 StartV = C;
277 }
278 for (unsigned Part = 1; Part != UF; ++Part)
279 VPV2Parts[VPI][Part - 1] = StartV;
280 }
281 Copy->addOperand(getConstantInt(Part));
282 } else {
284 "unexpected header phi recipe not needing unrolled part");
285 }
286 }
287}
288
289/// Handle non-header-phi recipes.
290void UnrollState::unrollRecipeByUF(VPRecipeBase &R) {
292 return;
293
294 if (auto *VPI = dyn_cast<VPInstruction>(&R)) {
296 addUniformForAllParts(VPI);
297 return;
298 }
299 }
300 if (auto *RepR = dyn_cast<VPReplicateRecipe>(&R)) {
301 if (isa<StoreInst>(RepR->getUnderlyingValue()) &&
302 RepR->getOperand(1)->isDefinedOutsideLoopRegions()) {
303 // Stores to an invariant address only need to store the last part.
304 remapOperands(&R, UF - 1);
305 return;
306 }
307 if (match(RepR,
309 addUniformForAllParts(RepR);
310 return;
311 }
312 }
313
314 // Unroll non-uniform recipes.
315 auto InsertPt = std::next(R.getIterator());
316 VPBasicBlock &VPBB = *R.getParent();
317 for (unsigned Part = 1; Part != UF; ++Part) {
318 VPRecipeBase *Copy = R.clone();
319 Copy->insertBefore(VPBB, InsertPt);
320 addRecipeForPart(&R, Copy, Part);
321
322 VPValue *Op;
324 m_VPValue(), m_VPValue(Op)))) {
325 Copy->setOperand(0, getValueForPart(Op, Part - 1));
326 Copy->setOperand(1, getValueForPart(Op, Part));
327 continue;
328 }
329 if (auto *VPR = dyn_cast<VPVectorPointerRecipe>(&R)) {
330 VPBuilder Builder(VPR);
331 const DataLayout &DL =
333 Type *IndexTy = DL.getIndexType(TypeInfo.inferScalarType(VPR));
334 Type *VFTy = TypeInfo.inferScalarType(&Plan.getVF());
335 VPValue *VF = Builder.createScalarZExtOrTrunc(
336 &Plan.getVF(), IndexTy, VFTy, DebugLoc::getUnknown());
337 // VFxUF does not wrap, so VF * Part also cannot wrap.
338 VPValue *VFxPart = Builder.createOverflowingOp(
339 Instruction::Mul, {VF, Plan.getConstantInt(IndexTy, Part)},
340 {true, true});
341 Copy->setOperand(0, VPR->getOperand(0));
342 Copy->addOperand(VFxPart);
343 continue;
344 }
345 if (auto *Red = dyn_cast<VPReductionRecipe>(&R)) {
346 auto *Phi = dyn_cast<VPReductionPHIRecipe>(R.getOperand(0));
347 if (Phi && Phi->isOrdered()) {
348 auto &Parts = VPV2Parts[Phi];
349 if (Part == 1) {
350 Parts.clear();
351 Parts.push_back(Red);
352 }
353 Parts.push_back(Copy->getVPSingleValue());
354 Phi->setOperand(1, Copy->getVPSingleValue());
355 }
356 }
357 remapOperands(Copy, Part);
358
359 if (auto *ScalarIVSteps = dyn_cast<VPScalarIVStepsRecipe>(Copy))
360 addStartIndexForScalarSteps(ScalarIVSteps, Part);
361
362 // Add operand indicating the part to generate code for, to recipes still
363 // requiring it.
365 match(Copy,
367 Copy->addOperand(getConstantInt(Part));
368
370 Copy->setOperand(0, R.getOperand(0));
371 }
372}
373
374void UnrollState::unrollBlock(VPBlockBase *VPB) {
375 auto *VPR = dyn_cast<VPRegionBlock>(VPB);
376 if (VPR) {
377 if (VPR->isReplicator())
378 return unrollReplicateRegionByUF(VPR);
379
380 // Traverse blocks in region in RPO to ensure defs are visited before uses
381 // across blocks.
382 ReversePostOrderTraversal<VPBlockShallowTraversalWrapper<VPBlockBase *>>
383 RPOT(VPR->getEntry());
384 for (VPBlockBase *VPB : RPOT)
385 unrollBlock(VPB);
386 return;
387 }
388
389 // VPB is a VPBasicBlock; unroll it, i.e., unroll its recipes.
390 auto *VPBB = cast<VPBasicBlock>(VPB);
391 auto InsertPtForPhi = VPBB->getFirstNonPhi();
392 for (VPRecipeBase &R : make_early_inc_range(*VPBB)) {
393 if (ToSkip.contains(&R) || isa<VPIRInstruction>(&R))
394 continue;
395
396 // Add all VPValues for all parts to AnyOf, FirstActiveLaneMask and
397 // Compute*Result which combine all parts to compute the final value.
398 VPValue *Op1;
400 match(&R, m_FirstActiveLane(m_VPValue(Op1))) ||
401 match(&R, m_LastActiveLane(m_VPValue(Op1))) ||
402 match(&R,
405 addUniformForAllParts(cast<VPInstruction>(&R));
406 for (unsigned Part = 1; Part != UF; ++Part)
407 R.addOperand(getValueForPart(Op1, Part));
408 continue;
409 }
410 VPValue *Op0;
411 if (match(&R, m_ExtractLane(m_VPValue(Op0), m_VPValue(Op1)))) {
412 addUniformForAllParts(cast<VPInstruction>(&R));
413 for (unsigned Part = 1; Part != UF; ++Part)
414 R.addOperand(getValueForPart(Op1, Part));
415 continue;
416 }
417
418 if (Plan.hasScalarVFOnly()) {
419 if (match(&R, m_ExtractLastPart(m_VPValue(Op0))) ||
421 auto *I = cast<VPInstruction>(&R);
422 bool IsPenultimatePart =
424 unsigned PartIdx = IsPenultimatePart ? UF - 2 : UF - 1;
425 // For scalar VF, directly use the scalar part value.
426 I->replaceAllUsesWith(getValueForPart(Op0, PartIdx));
427 continue;
428 }
429 }
430 // For vector VF, the penultimate element is always extracted from the last part.
433 addUniformForAllParts(cast<VPSingleDefRecipe>(&R));
434 R.setOperand(0, getValueForPart(Op0, UF - 1));
435 continue;
436 }
437
438 auto *SingleDef = dyn_cast<VPSingleDefRecipe>(&R);
439 if (SingleDef && vputils::isUniformAcrossVFsAndUFs(SingleDef)) {
440 addUniformForAllParts(SingleDef);
441 continue;
442 }
443
444 if (auto *H = dyn_cast<VPHeaderPHIRecipe>(&R)) {
445 unrollHeaderPHIByUF(H, InsertPtForPhi);
446 continue;
447 }
448
449 unrollRecipeByUF(R);
450 }
451}
452
453void VPlanTransforms::unrollByUF(VPlan &Plan, unsigned UF) {
454 assert(UF > 0 && "Unroll factor must be positive");
455 Plan.setUF(UF);
456 llvm::scope_exit Cleanup([&Plan]() {
457 auto Iter = vp_depth_first_deep(Plan.getEntry());
458 // Remove recipes that are redundant after unrolling.
460 for (VPRecipeBase &R : make_early_inc_range(*VPBB)) {
461 auto *VPI = dyn_cast<VPInstruction>(&R);
462 if (VPI &&
463 VPI->getOpcode() == VPInstruction::CanonicalIVIncrementForPart &&
464 VPI->getNumOperands() == 1) {
465 VPI->replaceAllUsesWith(VPI->getOperand(0));
466 VPI->eraseFromParent();
467 }
468 }
469 }
470 });
471 if (UF == 1) {
472 return;
473 }
474
475 UnrollState Unroller(Plan, UF);
476
477 // Iterate over all blocks in the plan starting from Entry, and unroll
478 // recipes inside them. This includes the vector preheader and middle blocks,
479 // which may set up or post-process per-part values.
481 Plan.getEntry());
482 for (VPBlockBase *VPB : RPOT)
483 Unroller.unrollBlock(VPB);
484
485 unsigned Part = 1;
486 // Remap operands of cloned header phis to update backedge values. The header
487 // phis cloned during unrolling are just after the header phi for part 0.
488 // Reset Part to 1 when reaching the first (part 0) recipe of a block.
489 for (VPRecipeBase &H :
491 // The second operand of Fixed Order Recurrence phi's, feeding the spliced
492 // value across the backedge, needs to remap to the last part of the spliced
493 // value.
495 Unroller.remapOperand(&H, 1, UF - 1);
496 continue;
497 }
498 if (Unroller.contains(H.getVPSingleValue())) {
499 Part = 1;
500 continue;
501 }
502 Unroller.remapOperands(&H, Part);
503 Part++;
504 }
505
507}
508
509/// Create a single-scalar clone of \p DefR (must be a VPReplicateRecipe or
510/// VPInstruction) for lane \p Lane. Use \p Def2LaneDefs to look up scalar
511/// definitions for operands of \DefR.
512static VPValue *
513cloneForLane(VPlan &Plan, VPBuilder &Builder, Type *IdxTy,
514 VPSingleDefRecipe *DefR, VPLane Lane,
515 const DenseMap<VPValue *, SmallVector<VPValue *>> &Def2LaneDefs) {
516 VPValue *Op;
518 auto LaneDefs = Def2LaneDefs.find(Op);
519 if (LaneDefs != Def2LaneDefs.end())
520 return LaneDefs->second[Lane.getKnownLane()];
521
522 VPValue *Idx = Plan.getConstantInt(IdxTy, Lane.getKnownLane());
523 return Builder.createNaryOp(Instruction::ExtractElement, {Op, Idx});
524 }
525
526 // Collect the operands at Lane, creating extracts as needed.
528 for (VPValue *Op : DefR->operands()) {
529 // If Op is a definition that has been unrolled, directly use the clone for
530 // the corresponding lane.
531 auto LaneDefs = Def2LaneDefs.find(Op);
532 if (LaneDefs != Def2LaneDefs.end()) {
533 NewOps.push_back(LaneDefs->second[Lane.getKnownLane()]);
534 continue;
535 }
536 if (Lane.getKind() == VPLane::Kind::ScalableLast) {
537 // Look through mandatory Unpack.
538 [[maybe_unused]] bool Matched =
540 assert(Matched && "original op must have been Unpack");
541 auto *ExtractPart =
542 Builder.createNaryOp(VPInstruction::ExtractLastPart, {Op});
543 NewOps.push_back(
544 Builder.createNaryOp(VPInstruction::ExtractLastLane, {ExtractPart}));
545 continue;
546 }
548 NewOps.push_back(Op);
549 continue;
550 }
551
552 // Look through buildvector to avoid unnecessary extracts.
553 if (match(Op, m_BuildVector())) {
554 NewOps.push_back(
555 cast<VPInstruction>(Op)->getOperand(Lane.getKnownLane()));
556 continue;
557 }
558 VPValue *Idx = Plan.getConstantInt(IdxTy, Lane.getKnownLane());
559 VPValue *Ext = Builder.createNaryOp(Instruction::ExtractElement, {Op, Idx});
560 NewOps.push_back(Ext);
561 }
562
564 if (auto *RepR = dyn_cast<VPReplicateRecipe>(DefR)) {
565 // TODO: have cloning of replicate recipes also provide the desired result
566 // coupled with setting its operands to NewOps (deriving IsSingleScalar and
567 // Mask from the operands?)
568 New = new VPReplicateRecipe(RepR->getUnderlyingInstr(), NewOps,
569 /*IsSingleScalar=*/true, /*Mask=*/nullptr,
570 *RepR, *RepR, RepR->getDebugLoc());
571 } else {
573 "DefR must be a VPReplicateRecipe or VPInstruction");
574 New = DefR->clone();
575 for (const auto &[Idx, Op] : enumerate(NewOps)) {
576 New->setOperand(Idx, Op);
577 }
578 }
579 New->insertBefore(DefR);
580 return New;
581}
582
584 Type *IdxTy = IntegerType::get(
586
587 // Visit all VPBBs outside the loop region and directly inside the top-level
588 // loop region.
589 auto VPBBsOutsideLoopRegion = VPBlockUtils::blocksOnly<VPBasicBlock>(
591 auto VPBBsInsideLoopRegion = VPBlockUtils::blocksOnly<VPBasicBlock>(
593 auto VPBBsToUnroll =
594 concat<VPBasicBlock *>(VPBBsOutsideLoopRegion, VPBBsInsideLoopRegion);
595 // A mapping of current VPValue definitions to collections of new VPValues
596 // defined per lane. Serves to hook-up potential users of current VPValue
597 // definition that are replicated-per-VF later.
599 // The removal of current recipes being replaced by new ones needs to be
600 // delayed after Def2LaneDefs is no longer in use.
602 for (VPBasicBlock *VPBB : VPBBsToUnroll) {
603 for (VPRecipeBase &R : make_early_inc_range(*VPBB)) {
606 cast<VPReplicateRecipe>(&R)->isSingleScalar()) ||
607 (isa<VPInstruction>(&R) &&
608 !cast<VPInstruction>(&R)->doesGeneratePerAllLanes() &&
610 continue;
611
612 auto *DefR = cast<VPSingleDefRecipe>(&R);
613 VPBuilder Builder(DefR);
614 if (DefR->getNumUsers() == 0) {
615 // Create single-scalar version of DefR for all lanes.
616 for (unsigned I = 0; I != VF.getKnownMinValue(); ++I)
617 cloneForLane(Plan, Builder, IdxTy, DefR, VPLane(I), Def2LaneDefs);
618 DefR->eraseFromParent();
619 continue;
620 }
621 /// Create single-scalar version of DefR for all lanes.
622 SmallVector<VPValue *> LaneDefs;
623 for (unsigned I = 0; I != VF.getKnownMinValue(); ++I)
624 LaneDefs.push_back(
625 cloneForLane(Plan, Builder, IdxTy, DefR, VPLane(I), Def2LaneDefs));
626
627 Def2LaneDefs[DefR] = LaneDefs;
628 /// Users that only demand the first lane can use the definition for lane
629 /// 0.
630 DefR->replaceUsesWithIf(LaneDefs[0], [DefR](VPUser &U, unsigned) {
631 return U.usesFirstLaneOnly(DefR);
632 });
633
634 // Update each build vector user that currently has DefR as its only
635 // operand, to have all LaneDefs as its operands.
636 for (VPUser *U : to_vector(DefR->users())) {
637 auto *VPI = dyn_cast<VPInstruction>(U);
638 if (!VPI || (VPI->getOpcode() != VPInstruction::BuildVector &&
639 VPI->getOpcode() != VPInstruction::BuildStructVector))
640 continue;
641 assert(VPI->getNumOperands() == 1 &&
642 "Build(Struct)Vector must have a single operand before "
643 "replicating by VF");
644 VPI->setOperand(0, LaneDefs[0]);
645 for (VPValue *LaneDef : drop_begin(LaneDefs))
646 VPI->addOperand(LaneDef);
647 }
648 ToRemove.push_back(DefR);
649 }
650 }
651 for (auto *R : reverse(ToRemove))
652 R->eraseFromParent();
653}
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
ReachingDefInfo InstSet & ToRemove
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static const HTTPClientCleanup Cleanup
#define _
#define I(x, y, z)
Definition MD5.cpp:57
#define H(x, y, z)
Definition MD5.cpp:56
MachineInstr unsigned OpIdx
This file builds on the ADT/GraphTraits.h file to build a generic graph post order iterator.
This file contains some templates that are useful if you are working with the STL at all.
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
Definition Value.cpp:487
This file defines the make_scope_exit function, which executes user-defined cleanup logic at scope ex...
static ConstantInt * getConstantInt(Value *V, const DataLayout &DL)
Extract ConstantInt from value, looking through IntToPtr and PointerNullValue.
This file contains the declarations of different VPlan-related auxiliary helpers.
static std::optional< unsigned > getOpcode(ArrayRef< VPValue * > Values)
Returns the opcode of Values or ~0 if they do not all agree.
Definition VPlanSLP.cpp:247
This file provides utility VPlan to VPlan transformations.
static VPValue * cloneForLane(VPlan &Plan, VPBuilder &Builder, Type *IdxTy, VPSingleDefRecipe *DefR, VPLane Lane, const DenseMap< VPValue *, SmallVector< VPValue * > > &Def2LaneDefs)
Create a single-scalar clone of DefR (must be a VPReplicateRecipe or VPInstruction) for lane Lane.
static void remapOperands(VPBlockBase *Entry, VPBlockBase *NewEntry, DenseMap< VPValue *, VPValue * > &Old2NewVPValues)
Definition VPlan.cpp:1126
This file contains the declarations of the Vectorization Plan base classes:
static const uint32_t IV[8]
Definition blake3_impl.h:83
LLVM_ABI const DataLayout & getDataLayout() const
Get the data layout of the module this basic block belongs to.
LLVM_ABI LLVMContext & getContext() const
Get the context in which this basic block lives.
static DebugLoc getUnknown()
Definition DebugLoc.h:161
static LLVM_ABI IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
Definition Type.cpp:318
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
bool contains(ConstPtrType Ptr) const
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:45
bool isPointerTy() const
True if this is an instance of PointerType.
Definition Type.h:267
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
Definition Type.h:128
LLVM_ABI unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
Definition Type.cpp:230
bool isFloatingPointTy() const
Return true if this is one of the floating-point types.
Definition Type.h:184
VPBasicBlock serves as the leaf of the Hierarchical Control-Flow Graph.
Definition VPlan.h:4017
RecipeListTy::iterator iterator
Instruction iterators...
Definition VPlan.h:4044
iterator_range< iterator > phis()
Returns an iterator range over the PHI-like recipes in the block.
Definition VPlan.h:4105
iterator getFirstNonPhi()
Return the position of the first non-phi node recipe in the block.
Definition VPlan.cpp:228
VPBlockBase is the building block of the Hierarchical Control-Flow Graph.
Definition VPlan.h:81
const VPBasicBlock * getEntryBasicBlock() const
Definition VPlan.cpp:178
VPBlockBase * getSingleSuccessor() const
Definition VPlan.h:209
static auto blocksOnly(const T &Range)
Return an iterator range over Range which only includes BlockTy blocks.
Definition VPlanUtils.h:221
static void insertBlockBefore(VPBlockBase *NewBlock, VPBlockBase *BlockPtr)
Insert disconnected block NewBlock before Blockptr.
Definition VPlanUtils.h:135
VPlan-based builder utility analogous to IRBuilder.
VPValue * getVPValue(unsigned I)
Returns the VPValue with index I defined by the VPDef.
Definition VPlanValue.h:461
ArrayRef< VPRecipeValue * > definedValues()
Returns an ArrayRef of the values defined by the VPDef.
Definition VPlanValue.h:471
BasicBlock * getIRBasicBlock() const
Definition VPlan.h:4194
@ WideIVStep
Scale the first operand (vector step) by the second operand (scalar-step).
Definition VPlan.h:1176
@ Unpack
Extracts all lanes from its (non-scalable) vector operand.
Definition VPlan.h:1129
@ ReductionStartVector
Start vector for reductions with 3 operands: the original start value, the identity value for the red...
Definition VPlan.h:1180
@ BuildVector
Creates a fixed-width vector containing all operands.
Definition VPlan.h:1124
@ BuildStructVector
Given operands of (the same) struct type, creates a struct of fixed- width vectors each containing a ...
Definition VPlan.h:1121
@ CanonicalIVIncrementForPart
Definition VPlan.h:1105
In what follows, the term "input IR" refers to code that is fed into the vectorizer whereas the term ...
Kind getKind() const
Returns the Kind of lane offset.
unsigned getKnownLane() const
Returns a compile-time known value for the lane index and asserts if the lane can only be calculated ...
@ ScalableLast
For ScalableLast, Lane is the offset from the start of the last N-element subvector in a scalable vec...
VPRecipeBase is a base class modeling a sequence of one or more output IR instructions.
Definition VPlan.h:387
DebugLoc getDebugLoc() const
Returns the debug location of the recipe.
Definition VPlan.h:479
VPRegionBlock * clone() override
Clone all blocks in the single-entry single-exit region of the block and their recipes without updati...
Definition VPlan.cpp:740
const VPBlockBase * getEntry() const
Definition VPlan.h:4241
bool isReplicator() const
An indicator whether this region is to generate multiple replicated instances of output IR correspond...
Definition VPlan.h:4273
VPReplicateRecipe replicates a given instruction producing multiple scalar copies of the original sca...
Definition VPlan.h:2980
A recipe for handling phi nodes of integer and floating-point inductions, producing their scalar valu...
Definition VPlan.h:3833
VPValue * getVFValue() const
Return the number of scalars to produce per unroll part, used to compute StartIndex during unrolling.
Definition VPlan.h:3879
VPSingleDef is a base class for recipes for modeling a sequence of one or more output IR that define ...
Definition VPlan.h:531
VPSingleDefRecipe * clone() override=0
Clone the current recipe.
Type * inferScalarType(const VPValue *V)
Infer the type of V. Returns the scalar type of V.
This class augments VPValue with operands which provide the inverse def-use edges from VPValue's user...
Definition VPlanValue.h:253
operand_range operands()
Definition VPlanValue.h:321
VPValue * getOperand(unsigned N) const
Definition VPlanValue.h:292
void addOperand(VPValue *Operand)
Definition VPlanValue.h:286
This is the base class of the VPlan Def/Use graph, used for modeling the data flow into,...
Definition VPlanValue.h:47
VPlan models a candidate for vectorization, encoding various decisions take to produce efficient outp...
Definition VPlan.h:4335
VPBasicBlock * getEntry()
Definition VPlan.h:4424
VPValue & getVF()
Returns the VF of the vector loop region.
Definition VPlan.h:4514
LLVM_ABI_FOR_TEST VPRegionBlock * getVectorLoopRegion()
Returns the VPRegionBlock of the vector loop.
Definition VPlan.cpp:1022
bool hasScalarVFOnly() const
Definition VPlan.h:4543
VPIRBasicBlock * getScalarHeader() const
Return the VPIRBasicBlock wrapping the header of the scalar loop.
Definition VPlan.h:4468
void setUF(unsigned UF)
Definition VPlan.h:4557
VPIRValue * getConstantInt(Type *Ty, uint64_t Val, bool IsSigned=false)
Return a VPIRValue wrapping a ConstantInt with the given type and value.
Definition VPlan.h:4600
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
Definition TypeSize.h:165
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
bool match(Val *V, const Pattern &P)
cst_pred_ty< is_one > m_One()
Match an integer 1 or a vector with all elements equal to 1.
IntrinsicID_match m_Intrinsic()
Match intrinsic calls like this: m_Intrinsic<Intrinsic::fabs>(m_Value(X))
match_combine_or< LTy, RTy > m_CombineOr(const LTy &L, const RTy &R)
Combine two pattern matchers matching L || R.
VPInstruction_match< VPInstruction::ExtractLastLane, VPInstruction_match< VPInstruction::ExtractLastPart, Op0_t > > m_ExtractLastLaneOfLastPart(const Op0_t &Op0)
VPInstruction_match< VPInstruction::ComputeReductionResult, Op0_t > m_ComputeReductionResult(const Op0_t &Op0)
VPInstruction_match< VPInstruction::LastActiveLane, Op0_t > m_LastActiveLane(const Op0_t &Op0)
VPInstruction_match< VPInstruction::BranchOnCount > m_BranchOnCount()
VPInstruction_match< VPInstruction::ExtractLastPart, Op0_t > m_ExtractLastPart(const Op0_t &Op0)
class_match< VPValue > m_VPValue()
Match an arbitrary VPValue and ignore it.
VPInstruction_match< VPInstruction::BuildVector > m_BuildVector()
BuildVector is matches only its opcode, w/o matching its operands as the number of operands is not fi...
VPInstruction_match< VPInstruction::ExtractPenultimateElement, Op0_t > m_ExtractPenultimateElement(const Op0_t &Op0)
VPInstruction_match< VPInstruction::FirstActiveLane, Op0_t > m_FirstActiveLane(const Op0_t &Op0)
bind_ty< VPInstruction > m_VPInstruction(VPInstruction *&V)
Match a VPInstruction, capturing if we match.
VPInstruction_match< VPInstruction::ComputeAnyOfResult, Op0_t, Op1_t, Op2_t > m_ComputeAnyOfResult(const Op0_t &Op0, const Op1_t &Op1, const Op2_t &Op2)
VPInstruction_match< VPInstruction::BranchOnCond > m_BranchOnCond()
VPInstruction_match< VPInstruction::ExtractLane, Op0_t, Op1_t > m_ExtractLane(const Op0_t &Op0, const Op1_t &Op1)
NodeAddr< PhiNode * > Phi
Definition RDFGraph.h:390
bool isSingleScalar(const VPValue *VPV)
Returns true if VPV is a single scalar, either because it produces the same value for all lanes or on...
bool isUniformAcrossVFsAndUFs(VPValue *V)
Checks if V is uniform across all VF lanes and UF parts.
bool onlyFirstPartUsed(const VPValue *Def)
Returns true if only the first part of Def is used.
This is an optimization pass for GlobalISel generic memory operations.
Definition Types.h:26
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Definition STLExtras.h:316
detail::zippy< detail::zip_shortest, T, U, Args... > zip(T &&t, U &&u, Args &&...args)
zip iterator for two or more iteratable types.
Definition STLExtras.h:829
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
Definition STLExtras.h:2544
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
Definition STLExtras.h:632
iterator_range< df_iterator< VPBlockShallowTraversalWrapper< VPBlockBase * > > > vp_depth_first_shallow(VPBlockBase *G)
Returns an iterator range to traverse the graph starting at G in depth-first order.
Definition VPlanCFG.h:216
iterator_range< df_iterator< VPBlockDeepTraversalWrapper< VPBlockBase * > > > vp_depth_first_deep(VPBlockBase *G)
Returns an iterator range to traverse the graph starting at G in depth-first order while traversing t...
Definition VPlanCFG.h:243
detail::concat_range< ValueT, RangeTs... > concat(RangeTs &&...Ranges)
Returns a concatenated range across two or more ranges.
Definition STLExtras.h:1150
auto reverse(ContainerTy &&C)
Definition STLExtras.h:406
bool isa_and_present(const Y &Val)
isa_and_present<X> - Functionally identical to isa, except that a null value is accepted.
Definition Casting.h:669
SmallVector< ValueTypeFromRangeType< R >, Size > to_vector(R &&Range)
Given a range of type R, iterate the entire range and return a SmallVector with elements of the vecto...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
@ Add
Sum of integers.
DWARFExpression::Operation Op
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
static void unrollByUF(VPlan &Plan, unsigned UF)
Explicitly unroll Plan by UF.
static void removeDeadRecipes(VPlan &Plan)
Remove dead recipes from Plan.
static void replicateByVF(VPlan &Plan, ElementCount VF)
Replace each replicating VPReplicateRecipe and VPInstruction outside of any replicate region in Plan ...