LLVM 22.0.0git
VPlanUnroll.cpp
Go to the documentation of this file.
1//===-- VPlanUnroll.cpp - VPlan unroller ----------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8///
9/// \file
10/// This file implements explicit unrolling for VPlans.
11///
12//===----------------------------------------------------------------------===//
13
14#include "VPRecipeBuilder.h"
15#include "VPlan.h"
16#include "VPlanAnalysis.h"
17#include "VPlanCFG.h"
18#include "VPlanHelpers.h"
19#include "VPlanPatternMatch.h"
20#include "VPlanTransforms.h"
21#include "VPlanUtils.h"
23#include "llvm/ADT/STLExtras.h"
24#include "llvm/ADT/ScopeExit.h"
26#include "llvm/IR/Intrinsics.h"
27
28using namespace llvm;
29using namespace llvm::VPlanPatternMatch;
30
31namespace {
32
33/// Helper to hold state needed for unrolling. It holds the Plan to unroll by
34/// UF. It also holds copies of VPValues across UF-1 unroll parts to facilitate
35/// the unrolling transformation, where the original VPValues are retained for
36/// part zero.
37class UnrollState {
38 /// Plan to unroll.
39 VPlan &Plan;
40 /// Unroll factor to unroll by.
41 const unsigned UF;
42 /// Analysis for types.
43 VPTypeAnalysis TypeInfo;
44
45 /// Unrolling may create recipes that should not be unrolled themselves.
46 /// Those are tracked in ToSkip.
47 SmallPtrSet<VPRecipeBase *, 8> ToSkip;
48
49 // Associate with each VPValue of part 0 its unrolled instances of parts 1,
50 // ..., UF-1.
51 DenseMap<VPValue *, SmallVector<VPValue *>> VPV2Parts;
52
53 /// Unroll replicate region \p VPR by cloning the region UF - 1 times.
54 void unrollReplicateRegionByUF(VPRegionBlock *VPR);
55
56 /// Unroll recipe \p R by cloning it UF - 1 times, unless it is uniform across
57 /// all parts.
58 void unrollRecipeByUF(VPRecipeBase &R);
59
60 /// Unroll header phi recipe \p R. How exactly the recipe gets unrolled
61 /// depends on the concrete header phi. Inserts newly created recipes at \p
62 /// InsertPtForPhi.
63 void unrollHeaderPHIByUF(VPHeaderPHIRecipe *R,
64 VPBasicBlock::iterator InsertPtForPhi);
65
66 /// Unroll a widen induction recipe \p IV. This introduces recipes to compute
67 /// the induction steps for each part.
68 void unrollWidenInductionByUF(VPWidenInductionRecipe *IV,
69 VPBasicBlock::iterator InsertPtForPhi);
70
71 VPValue *getConstantInt(unsigned Part) {
72 Type *CanIVIntTy = Plan.getVectorLoopRegion()->getCanonicalIVType();
73 return Plan.getConstantInt(CanIVIntTy, Part);
74 }
75
76public:
77 UnrollState(VPlan &Plan, unsigned UF) : Plan(Plan), UF(UF), TypeInfo(Plan) {}
78
79 void unrollBlock(VPBlockBase *VPB);
80
81 VPValue *getValueForPart(VPValue *V, unsigned Part) {
82 if (Part == 0 || V->isLiveIn())
83 return V;
84 assert((VPV2Parts.contains(V) && VPV2Parts[V].size() >= Part) &&
85 "accessed value does not exist");
86 return VPV2Parts[V][Part - 1];
87 }
88
89 /// Given a single original recipe \p OrigR (of part zero), and its copy \p
90 /// CopyR for part \p Part, map every VPValue defined by \p OrigR to its
91 /// corresponding VPValue defined by \p CopyR.
92 void addRecipeForPart(VPRecipeBase *OrigR, VPRecipeBase *CopyR,
93 unsigned Part) {
94 for (const auto &[Idx, VPV] : enumerate(OrigR->definedValues())) {
95 const auto &[V, _] = VPV2Parts.try_emplace(VPV);
96 assert(V->second.size() == Part - 1 && "earlier parts not set");
97 V->second.push_back(CopyR->getVPValue(Idx));
98 }
99 }
100
101 /// Given a uniform recipe \p R, add it for all parts.
102 void addUniformForAllParts(VPSingleDefRecipe *R) {
103 const auto &[V, Inserted] = VPV2Parts.try_emplace(R);
104 assert(Inserted && "uniform value already added");
105 for (unsigned Part = 0; Part != UF; ++Part)
106 V->second.push_back(R);
107 }
108
109 bool contains(VPValue *VPV) const { return VPV2Parts.contains(VPV); }
110
111 /// Update \p R's operand at \p OpIdx with its corresponding VPValue for part
112 /// \p P.
113 void remapOperand(VPRecipeBase *R, unsigned OpIdx, unsigned Part) {
114 auto *Op = R->getOperand(OpIdx);
115 R->setOperand(OpIdx, getValueForPart(Op, Part));
116 }
117
118 /// Update \p R's operands with their corresponding VPValues for part \p P.
119 void remapOperands(VPRecipeBase *R, unsigned Part) {
120 for (const auto &[OpIdx, Op] : enumerate(R->operands()))
121 R->setOperand(OpIdx, getValueForPart(Op, Part));
122 }
123};
124} // namespace
125
126void UnrollState::unrollReplicateRegionByUF(VPRegionBlock *VPR) {
127 VPBlockBase *InsertPt = VPR->getSingleSuccessor();
128 for (unsigned Part = 1; Part != UF; ++Part) {
129 auto *Copy = VPR->clone();
130 VPBlockUtils::insertBlockBefore(Copy, InsertPt);
131
132 auto PartI = vp_depth_first_shallow(Copy->getEntry());
133 auto Part0 = vp_depth_first_shallow(VPR->getEntry());
134 for (const auto &[PartIVPBB, Part0VPBB] :
137 for (const auto &[PartIR, Part0R] : zip(*PartIVPBB, *Part0VPBB)) {
138 remapOperands(&PartIR, Part);
139 if (auto *ScalarIVSteps = dyn_cast<VPScalarIVStepsRecipe>(&PartIR)) {
140 ScalarIVSteps->addOperand(getConstantInt(Part));
141 }
142
143 addRecipeForPart(&Part0R, &PartIR, Part);
144 }
145 }
146 }
147}
148
149void UnrollState::unrollWidenInductionByUF(
150 VPWidenInductionRecipe *IV, VPBasicBlock::iterator InsertPtForPhi) {
151 VPBasicBlock *PH = cast<VPBasicBlock>(
152 IV->getParent()->getEnclosingLoopRegion()->getSinglePredecessor());
153 Type *IVTy = TypeInfo.inferScalarType(IV);
154 auto &ID = IV->getInductionDescriptor();
155 VPIRFlags Flags;
156 if (isa_and_present<FPMathOperator>(ID.getInductionBinOp()))
157 Flags = ID.getInductionBinOp()->getFastMathFlags();
158
159 VPValue *ScalarStep = IV->getStepValue();
160 VPBuilder Builder(PH);
161 Type *VectorStepTy =
162 IVTy->isPointerTy() ? TypeInfo.inferScalarType(ScalarStep) : IVTy;
163 VPInstruction *VectorStep = Builder.createNaryOp(
164 VPInstruction::WideIVStep, {&Plan.getVF(), ScalarStep}, VectorStepTy,
165 Flags, IV->getDebugLoc());
166
167 ToSkip.insert(VectorStep);
168
169 // Now create recipes to compute the induction steps for part 1 .. UF. Part 0
170 // remains the header phi. Parts > 0 are computed by adding Step to the
171 // previous part. The header phi recipe will get 2 new operands: the step
172 // value for a single part and the last part, used to compute the backedge
173 // value during VPWidenInductionRecipe::execute.
174 // %Part.0 = VPWidenInductionRecipe %Start, %ScalarStep, %VectorStep, %Part.3
175 // %Part.1 = %Part.0 + %VectorStep
176 // %Part.2 = %Part.1 + %VectorStep
177 // %Part.3 = %Part.2 + %VectorStep
178 //
179 // The newly added recipes are added to ToSkip to avoid interleaving them
180 // again.
181 VPValue *Prev = IV;
182 Builder.setInsertPoint(IV->getParent(), InsertPtForPhi);
183 unsigned AddOpc;
184 if (IVTy->isPointerTy())
186 else if (IVTy->isFloatingPointTy())
187 AddOpc = ID.getInductionOpcode();
188 else
189 AddOpc = Instruction::Add;
190 for (unsigned Part = 1; Part != UF; ++Part) {
191 std::string Name =
192 Part > 1 ? "step.add." + std::to_string(Part) : "step.add";
193
194 VPInstruction *Add = Builder.createNaryOp(AddOpc,
195 {
196 Prev,
197 VectorStep,
198 },
199 Flags, IV->getDebugLoc(), Name);
200 ToSkip.insert(Add);
201 addRecipeForPart(IV, Add, Part);
202 Prev = Add;
203 }
204 IV->addOperand(VectorStep);
205 IV->addOperand(Prev);
206}
207
208void UnrollState::unrollHeaderPHIByUF(VPHeaderPHIRecipe *R,
209 VPBasicBlock::iterator InsertPtForPhi) {
210 // First-order recurrences pass a single vector or scalar through their header
211 // phis, irrespective of interleaving.
213 return;
214
215 // Generate step vectors for each unrolled part.
216 if (auto *IV = dyn_cast<VPWidenInductionRecipe>(R)) {
217 unrollWidenInductionByUF(IV, InsertPtForPhi);
218 return;
219 }
220
221 auto *RdxPhi = dyn_cast<VPReductionPHIRecipe>(R);
222 if (RdxPhi && RdxPhi->isOrdered())
223 return;
224
225 auto InsertPt = std::next(R->getIterator());
226 for (unsigned Part = 1; Part != UF; ++Part) {
227 VPRecipeBase *Copy = R->clone();
228 Copy->insertBefore(*R->getParent(), InsertPt);
229 addRecipeForPart(R, Copy, Part);
230 if (RdxPhi) {
231 // If the start value is a ReductionStartVector, use the identity value
232 // (second operand) for unrolled parts. If the scaling factor is > 1,
233 // create a new ReductionStartVector with the scale factor and both
234 // operands set to the identity value.
235 if (auto *VPI = dyn_cast<VPInstruction>(RdxPhi->getStartValue())) {
236 assert(VPI->getOpcode() == VPInstruction::ReductionStartVector &&
237 "unexpected start VPInstruction");
238 if (Part != 1)
239 continue;
240 VPValue *StartV;
241 if (match(VPI->getOperand(2), m_One())) {
242 StartV = VPI->getOperand(1);
243 } else {
244 auto *C = VPI->clone();
245 C->setOperand(0, C->getOperand(1));
246 C->insertAfter(VPI);
247 StartV = C;
248 }
249 for (unsigned Part = 1; Part != UF; ++Part)
250 VPV2Parts[VPI][Part - 1] = StartV;
251 }
252 Copy->addOperand(getConstantInt(Part));
253 } else {
255 "unexpected header phi recipe not needing unrolled part");
256 }
257 }
258}
259
260/// Handle non-header-phi recipes.
261void UnrollState::unrollRecipeByUF(VPRecipeBase &R) {
263 return;
264
265 if (auto *VPI = dyn_cast<VPInstruction>(&R)) {
267 addUniformForAllParts(VPI);
268 return;
269 }
270 }
271 if (auto *RepR = dyn_cast<VPReplicateRecipe>(&R)) {
272 if (isa<StoreInst>(RepR->getUnderlyingValue()) &&
273 RepR->getOperand(1)->isDefinedOutsideLoopRegions()) {
274 // Stores to an invariant address only need to store the last part.
275 remapOperands(&R, UF - 1);
276 return;
277 }
278 if (auto *II = dyn_cast<IntrinsicInst>(RepR->getUnderlyingValue())) {
279 if (II->getIntrinsicID() == Intrinsic::experimental_noalias_scope_decl) {
280 addUniformForAllParts(RepR);
281 return;
282 }
283 }
284 }
285
286 // Unroll non-uniform recipes.
287 auto InsertPt = std::next(R.getIterator());
288 VPBasicBlock &VPBB = *R.getParent();
289 for (unsigned Part = 1; Part != UF; ++Part) {
290 VPRecipeBase *Copy = R.clone();
291 Copy->insertBefore(VPBB, InsertPt);
292 addRecipeForPart(&R, Copy, Part);
293
294 VPValue *Op;
296 m_VPValue(), m_VPValue(Op)))) {
297 Copy->setOperand(0, getValueForPart(Op, Part - 1));
298 Copy->setOperand(1, getValueForPart(Op, Part));
299 continue;
300 }
301 if (auto *Red = dyn_cast<VPReductionRecipe>(&R)) {
302 auto *Phi = dyn_cast<VPReductionPHIRecipe>(R.getOperand(0));
303 if (Phi && Phi->isOrdered()) {
304 auto &Parts = VPV2Parts[Phi];
305 if (Part == 1) {
306 Parts.clear();
307 Parts.push_back(Red);
308 }
309 Parts.push_back(Copy->getVPSingleValue());
310 Phi->setOperand(1, Copy->getVPSingleValue());
311 }
312 }
313 remapOperands(Copy, Part);
314
315 // Add operand indicating the part to generate code for, to recipes still
316 // requiring it.
317 if (isa<VPScalarIVStepsRecipe, VPWidenCanonicalIVRecipe,
318 VPVectorPointerRecipe, VPVectorEndPointerRecipe>(Copy) ||
319 match(Copy,
321 Copy->addOperand(getConstantInt(Part));
322
324 Copy->setOperand(0, R.getOperand(0));
325 }
326}
327
328void UnrollState::unrollBlock(VPBlockBase *VPB) {
329 auto *VPR = dyn_cast<VPRegionBlock>(VPB);
330 if (VPR) {
331 if (VPR->isReplicator())
332 return unrollReplicateRegionByUF(VPR);
333
334 // Traverse blocks in region in RPO to ensure defs are visited before uses
335 // across blocks.
336 ReversePostOrderTraversal<VPBlockShallowTraversalWrapper<VPBlockBase *>>
337 RPOT(VPR->getEntry());
338 for (VPBlockBase *VPB : RPOT)
339 unrollBlock(VPB);
340 return;
341 }
342
343 // VPB is a VPBasicBlock; unroll it, i.e., unroll its recipes.
344 auto *VPBB = cast<VPBasicBlock>(VPB);
345 auto InsertPtForPhi = VPBB->getFirstNonPhi();
346 for (VPRecipeBase &R : make_early_inc_range(*VPBB)) {
347 if (ToSkip.contains(&R) || isa<VPIRInstruction>(&R))
348 continue;
349
350 // Add all VPValues for all parts to AnyOf, FirstActiveLaneMask and
351 // Compute*Result which combine all parts to compute the final value.
352 VPValue *Op1;
354 match(&R, m_FirstActiveLane(m_VPValue(Op1))) ||
356 m_VPValue(), m_VPValue(), m_VPValue(Op1))) ||
358 m_VPValue(), m_VPValue(Op1))) ||
360 m_VPValue(), m_VPValue(), m_VPValue(), m_VPValue(Op1)))) {
361 addUniformForAllParts(cast<VPInstruction>(&R));
362 for (unsigned Part = 1; Part != UF; ++Part)
363 R.addOperand(getValueForPart(Op1, Part));
364 continue;
365 }
366 VPValue *Op0;
368 m_VPValue(Op0), m_VPValue(Op1)))) {
369 addUniformForAllParts(cast<VPInstruction>(&R));
370 for (unsigned Part = 1; Part != UF; ++Part)
371 R.addOperand(getValueForPart(Op1, Part));
372 continue;
373 }
374 if (match(&R, m_ExtractLastElement(m_VPValue(Op0))) ||
376 m_VPValue(Op0)))) {
377 addUniformForAllParts(cast<VPSingleDefRecipe>(&R));
378 if (Plan.hasScalarVFOnly()) {
379 auto *I = cast<VPInstruction>(&R);
380 // Extracting from end with VF = 1 implies retrieving the last or
381 // penultimate scalar part (UF-1 or UF-2).
382 unsigned Offset =
383 I->getOpcode() == VPInstruction::ExtractLastElement ? 1 : 2;
384 I->replaceAllUsesWith(getValueForPart(Op0, UF - Offset));
385 R.eraseFromParent();
386 } else {
387 // Otherwise we extract from the last part.
388 remapOperands(&R, UF - 1);
389 }
390 continue;
391 }
392
393 auto *SingleDef = dyn_cast<VPSingleDefRecipe>(&R);
394 if (SingleDef && vputils::isUniformAcrossVFsAndUFs(SingleDef)) {
395 addUniformForAllParts(SingleDef);
396 continue;
397 }
398
399 if (auto *H = dyn_cast<VPHeaderPHIRecipe>(&R)) {
400 unrollHeaderPHIByUF(H, InsertPtForPhi);
401 continue;
402 }
403
404 unrollRecipeByUF(R);
405 }
406}
407
408void VPlanTransforms::unrollByUF(VPlan &Plan, unsigned UF) {
409 assert(UF > 0 && "Unroll factor must be positive");
410 Plan.setUF(UF);
411 auto Cleanup = make_scope_exit([&Plan]() {
412 auto Iter = vp_depth_first_deep(Plan.getEntry());
413 // Remove recipes that are redundant after unrolling.
415 for (VPRecipeBase &R : make_early_inc_range(*VPBB)) {
416 auto *VPI = dyn_cast<VPInstruction>(&R);
417 if (VPI &&
418 VPI->getOpcode() == VPInstruction::CanonicalIVIncrementForPart &&
419 VPI->getNumOperands() == 1) {
420 VPI->replaceAllUsesWith(VPI->getOperand(0));
421 VPI->eraseFromParent();
422 }
423 }
424 }
425 });
426 if (UF == 1) {
427 return;
428 }
429
430 UnrollState Unroller(Plan, UF);
431
432 // Iterate over all blocks in the plan starting from Entry, and unroll
433 // recipes inside them. This includes the vector preheader and middle blocks,
434 // which may set up or post-process per-part values.
436 Plan.getEntry());
437 for (VPBlockBase *VPB : RPOT)
438 Unroller.unrollBlock(VPB);
439
440 unsigned Part = 1;
441 // Remap operands of cloned header phis to update backedge values. The header
442 // phis cloned during unrolling are just after the header phi for part 0.
443 // Reset Part to 1 when reaching the first (part 0) recipe of a block.
444 for (VPRecipeBase &H :
446 // The second operand of Fixed Order Recurrence phi's, feeding the spliced
447 // value across the backedge, needs to remap to the last part of the spliced
448 // value.
450 Unroller.remapOperand(&H, 1, UF - 1);
451 continue;
452 }
453 if (Unroller.contains(H.getVPSingleValue())) {
454 Part = 1;
455 continue;
456 }
457 Unroller.remapOperands(&H, Part);
458 Part++;
459 }
460
462}
463
464/// Create a single-scalar clone of \p DefR (must be a VPReplicateRecipe or
465/// VPInstruction) for lane \p Lane. Use \p Def2LaneDefs to look up scalar
466/// definitions for operands of \DefR.
467static VPValue *
468cloneForLane(VPlan &Plan, VPBuilder &Builder, Type *IdxTy,
469 VPRecipeWithIRFlags *DefR, VPLane Lane,
470 const DenseMap<VPValue *, SmallVector<VPValue *>> &Def2LaneDefs) {
471 VPValue *Op;
473 auto LaneDefs = Def2LaneDefs.find(Op);
474 if (LaneDefs != Def2LaneDefs.end())
475 return LaneDefs->second[Lane.getKnownLane()];
476
477 VPValue *Idx = Plan.getConstantInt(IdxTy, Lane.getKnownLane());
478 return Builder.createNaryOp(Instruction::ExtractElement, {Op, Idx});
479 }
480
481 // Collect the operands at Lane, creating extracts as needed.
483 for (VPValue *Op : DefR->operands()) {
484 // If Op is a definition that has been unrolled, directly use the clone for
485 // the corresponding lane.
486 auto LaneDefs = Def2LaneDefs.find(Op);
487 if (LaneDefs != Def2LaneDefs.end()) {
488 NewOps.push_back(LaneDefs->second[Lane.getKnownLane()]);
489 continue;
490 }
491 if (Lane.getKind() == VPLane::Kind::ScalableLast) {
492 // Look through mandatory Unpack.
493 [[maybe_unused]] bool Matched =
495 assert(Matched && "original op must have been Unpack");
496 NewOps.push_back(
497 Builder.createNaryOp(VPInstruction::ExtractLastElement, {Op}));
498 continue;
499 }
501 NewOps.push_back(Op);
502 continue;
503 }
504
505 // Look through buildvector to avoid unnecessary extracts.
506 if (match(Op, m_BuildVector())) {
507 NewOps.push_back(
508 cast<VPInstruction>(Op)->getOperand(Lane.getKnownLane()));
509 continue;
510 }
511 VPValue *Idx = Plan.getConstantInt(IdxTy, Lane.getKnownLane());
512 VPValue *Ext = Builder.createNaryOp(Instruction::ExtractElement, {Op, Idx});
513 NewOps.push_back(Ext);
514 }
515
517 if (auto *RepR = dyn_cast<VPReplicateRecipe>(DefR)) {
518 // TODO: have cloning of replicate recipes also provide the desired result
519 // coupled with setting its operands to NewOps (deriving IsSingleScalar and
520 // Mask from the operands?)
521 New =
522 new VPReplicateRecipe(RepR->getUnderlyingInstr(), NewOps,
523 /*IsSingleScalar=*/true, /*Mask=*/nullptr, *RepR);
524 } else {
526 "DefR must be a VPReplicateRecipe or VPInstruction");
527 New = DefR->clone();
528 for (const auto &[Idx, Op] : enumerate(NewOps)) {
529 New->setOperand(Idx, Op);
530 }
531 }
532 New->transferFlags(*DefR);
533 New->insertBefore(DefR);
534 return New;
535}
536
538 Type *IdxTy = IntegerType::get(
540
541 // Visit all VPBBs outside the loop region and directly inside the top-level
542 // loop region.
543 auto VPBBsOutsideLoopRegion = VPBlockUtils::blocksOnly<VPBasicBlock>(
545 auto VPBBsInsideLoopRegion = VPBlockUtils::blocksOnly<VPBasicBlock>(
547 auto VPBBsToUnroll =
548 concat<VPBasicBlock *>(VPBBsOutsideLoopRegion, VPBBsInsideLoopRegion);
549 // A mapping of current VPValue definitions to collections of new VPValues
550 // defined per lane. Serves to hook-up potential users of current VPValue
551 // definition that are replicated-per-VF later.
553 // The removal of current recipes being replaced by new ones needs to be
554 // delayed after Def2LaneDefs is no longer in use.
556 for (VPBasicBlock *VPBB : VPBBsToUnroll) {
557 for (VPRecipeBase &R : make_early_inc_range(*VPBB)) {
560 cast<VPReplicateRecipe>(&R)->isSingleScalar()) ||
561 (isa<VPInstruction>(&R) &&
562 !cast<VPInstruction>(&R)->doesGeneratePerAllLanes() &&
564 continue;
565
566 auto *DefR = cast<VPRecipeWithIRFlags>(&R);
567 VPBuilder Builder(DefR);
568 if (DefR->getNumUsers() == 0) {
569 // Create single-scalar version of DefR for all lanes.
570 for (unsigned I = 0; I != VF.getKnownMinValue(); ++I)
571 cloneForLane(Plan, Builder, IdxTy, DefR, VPLane(I), Def2LaneDefs);
572 DefR->eraseFromParent();
573 continue;
574 }
575 /// Create single-scalar version of DefR for all lanes.
576 SmallVector<VPValue *> LaneDefs;
577 for (unsigned I = 0; I != VF.getKnownMinValue(); ++I)
578 LaneDefs.push_back(
579 cloneForLane(Plan, Builder, IdxTy, DefR, VPLane(I), Def2LaneDefs));
580
581 Def2LaneDefs[DefR] = LaneDefs;
582 /// Users that only demand the first lane can use the definition for lane
583 /// 0.
584 DefR->replaceUsesWithIf(LaneDefs[0], [DefR](VPUser &U, unsigned) {
585 return U.onlyFirstLaneUsed(DefR);
586 });
587
588 // Update each build vector user that currently has DefR as its only
589 // operand, to have all LaneDefs as its operands.
590 for (VPUser *U : to_vector(DefR->users())) {
591 auto *VPI = dyn_cast<VPInstruction>(U);
592 if (!VPI || (VPI->getOpcode() != VPInstruction::BuildVector &&
593 VPI->getOpcode() != VPInstruction::BuildStructVector))
594 continue;
595 assert(VPI->getNumOperands() == 1 &&
596 "Build(Struct)Vector must have a single operand before "
597 "replicating by VF");
598 VPI->setOperand(0, LaneDefs[0]);
599 for (VPValue *LaneDef : drop_begin(LaneDefs))
600 VPI->addOperand(LaneDef);
601 }
602 ToRemove.push_back(DefR);
603 }
604 }
605 for (auto *R : reverse(ToRemove))
606 R->eraseFromParent();
607}
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
ReachingDefInfo InstSet & ToRemove
static const HTTPClientCleanup Cleanup
#define _
#define I(x, y, z)
Definition MD5.cpp:58
#define H(x, y, z)
Definition MD5.cpp:57
MachineInstr unsigned OpIdx
uint64_t IntrinsicInst * II
This file builds on the ADT/GraphTraits.h file to build a generic graph post order iterator.
This file contains some templates that are useful if you are working with the STL at all.
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
Definition Value.cpp:480
This file defines the make_scope_exit function, which executes user-defined cleanup logic at scope ex...
static ConstantInt * getConstantInt(Value *V, const DataLayout &DL)
Extract ConstantInt from value, looking through IntToPtr and PointerNullValue.
This file contains the declarations of different VPlan-related auxiliary helpers.
static std::optional< unsigned > getOpcode(ArrayRef< VPValue * > Values)
Returns the opcode of Values or ~0 if they do not all agree.
Definition VPlanSLP.cpp:247
This file provides utility VPlan to VPlan transformations.
static VPValue * cloneForLane(VPlan &Plan, VPBuilder &Builder, Type *IdxTy, VPRecipeWithIRFlags *DefR, VPLane Lane, const DenseMap< VPValue *, SmallVector< VPValue * > > &Def2LaneDefs)
Create a single-scalar clone of DefR (must be a VPReplicateRecipe or VPInstruction) for lane Lane.
static void remapOperands(VPBlockBase *Entry, VPBlockBase *NewEntry, DenseMap< VPValue *, VPValue * > &Old2NewVPValues)
Definition VPlan.cpp:1131
This file contains the declarations of the Vectorization Plan base classes:
static const uint32_t IV[8]
Definition blake3_impl.h:83
LLVM_ABI LLVMContext & getContext() const
Get the context in which this basic block lives.
static LLVM_ABI IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
Definition Type.cpp:319
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
bool contains(ConstPtrType Ptr) const
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:45
bool isPointerTy() const
True if this is an instance of PointerType.
Definition Type.h:267
bool isFloatingPointTy() const
Return true if this is one of the floating-point types.
Definition Type.h:184
VPBasicBlock serves as the leaf of the Hierarchical Control-Flow Graph.
Definition VPlan.h:3820
RecipeListTy::iterator iterator
Instruction iterators...
Definition VPlan.h:3847
iterator_range< iterator > phis()
Returns an iterator range over the PHI-like recipes in the block.
Definition VPlan.h:3908
iterator getFirstNonPhi()
Return the position of the first non-phi node recipe in the block.
Definition VPlan.cpp:220
VPBlockBase is the building block of the Hierarchical Control-Flow Graph.
Definition VPlan.h:80
const VPBasicBlock * getEntryBasicBlock() const
Definition VPlan.cpp:170
VPBlockBase * getSingleSuccessor() const
Definition VPlan.h:208
static auto blocksOnly(const T &Range)
Return an iterator range over Range which only includes BlockTy blocks.
Definition VPlanUtils.h:187
static void insertBlockBefore(VPBlockBase *NewBlock, VPBlockBase *BlockPtr)
Insert disconnected block NewBlock before Blockptr.
Definition VPlanUtils.h:108
VPlan-based builder utility analogous to IRBuilder.
ArrayRef< VPValue * > definedValues()
Returns an ArrayRef of the values defined by the VPDef.
Definition VPlanValue.h:419
VPValue * getVPValue(unsigned I)
Returns the VPValue with index I defined by the VPDef.
Definition VPlanValue.h:409
BasicBlock * getIRBasicBlock() const
Definition VPlan.h:3997
@ WideIVStep
Scale the first operand (vector step) by the second operand (scalar-step).
Definition VPlan.h:1050
@ Unpack
Extracts all lanes from its (non-scalable) vector operand.
Definition VPlan.h:1011
@ ReductionStartVector
Start vector for reductions with 3 operands: the original start value, the identity value for the red...
Definition VPlan.h:1054
@ BuildVector
Creates a fixed-width vector containing all operands.
Definition VPlan.h:1006
@ BuildStructVector
Given operands of (the same) struct type, creates a struct of fixed- width vectors each containing a ...
Definition VPlan.h:1003
@ CanonicalIVIncrementForPart
Definition VPlan.h:996
In what follows, the term "input IR" refers to code that is fed into the vectorizer whereas the term ...
Kind getKind() const
Returns the Kind of lane offset.
unsigned getKnownLane() const
Returns a compile-time known value for the lane index and asserts if the lane can only be calculated ...
@ ScalableLast
For ScalableLast, Lane is the offset from the start of the last N-element subvector in a scalable vec...
VPRecipeBase is a base class modeling a sequence of one or more output IR instructions.
Definition VPlan.h:386
VPRegionBlock represents a collection of VPBasicBlocks and VPRegionBlocks which form a Single-Entry-S...
Definition VPlan.h:4008
VPRegionBlock * clone() override
Clone all blocks in the single-entry single-exit region of the block and their recipes without updati...
Definition VPlan.cpp:743
const VPBlockBase * getEntry() const
Definition VPlan.h:4044
bool isReplicator() const
An indicator whether this region is to generate multiple replicated instances of output IR correspond...
Definition VPlan.h:4076
VPReplicateRecipe replicates a given instruction producing multiple scalar copies of the original sca...
Definition VPlan.h:2877
Type * inferScalarType(const VPValue *V)
Infer the type of V. Returns the scalar type of V.
This class augments VPValue with operands which provide the inverse def-use edges from VPValue's user...
Definition VPlanValue.h:199
operand_range operands()
Definition VPlanValue.h:267
This is the base class of the VPlan Def/Use graph, used for modeling the data flow into,...
Definition VPlanValue.h:48
VPlan models a candidate for vectorization, encoding various decisions take to produce efficient outp...
Definition VPlan.h:4138
VPBasicBlock * getEntry()
Definition VPlan.h:4232
VPValue & getVF()
Returns the VF of the vector loop region.
Definition VPlan.h:4326
VPValue * getConstantInt(Type *Ty, uint64_t Val, bool IsSigned=false)
Return a VPValue wrapping a ConstantInt with the given type and value.
Definition VPlan.h:4407
LLVM_ABI_FOR_TEST VPRegionBlock * getVectorLoopRegion()
Returns the VPRegionBlock of the vector loop.
Definition VPlan.cpp:1027
bool hasScalarVFOnly() const
Definition VPlan.h:4355
VPIRBasicBlock * getScalarHeader() const
Return the VPIRBasicBlock wrapping the header of the scalar loop.
Definition VPlan.h:4280
void setUF(unsigned UF)
Definition VPlan.h:4369
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
Definition TypeSize.h:166
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
bool match(Val *V, const Pattern &P)
cst_pred_ty< is_one > m_One()
Match an integer 1 or a vector with all elements equal to 1.
match_combine_or< LTy, RTy > m_CombineOr(const LTy &L, const RTy &R)
Combine two pattern matchers matching L || R.
VPInstruction_match< VPInstruction::ExtractLastElement, Op0_t > m_ExtractLastElement(const Op0_t &Op0)
VPInstruction_match< VPInstruction::BranchOnCount > m_BranchOnCount()
class_match< VPValue > m_VPValue()
Match an arbitrary VPValue and ignore it.
VPInstruction_match< VPInstruction::BuildVector > m_BuildVector()
BuildVector is matches only its opcode, w/o matching its operands as the number of operands is not fi...
VPInstruction_match< VPInstruction::FirstActiveLane, Op0_t > m_FirstActiveLane(const Op0_t &Op0)
bind_ty< VPInstruction > m_VPInstruction(VPInstruction *&V)
Match a VPInstruction, capturing if we match.
VPInstruction_match< VPInstruction::BranchOnCond > m_BranchOnCond()
NodeAddr< PhiNode * > Phi
Definition RDFGraph.h:390
bool isSingleScalar(const VPValue *VPV)
Returns true if VPV is a single scalar, either because it produces the same value for all lanes or on...
bool isUniformAcrossVFsAndUFs(VPValue *V)
Checks if V is uniform across all VF lanes and UF parts.
bool onlyFirstPartUsed(const VPValue *Def)
Returns true if only the first part of Def is used.
This is an optimization pass for GlobalISel generic memory operations.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Definition STLExtras.h:316
@ Offset
Definition DWP.cpp:477
detail::zippy< detail::zip_shortest, T, U, Args... > zip(T &&t, U &&u, Args &&...args)
zip iterator for two or more iteratable types.
Definition STLExtras.h:829
detail::scope_exit< std::decay_t< Callable > > make_scope_exit(Callable &&F)
Definition ScopeExit.h:59
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
Definition STLExtras.h:2472
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
Definition STLExtras.h:632
iterator_range< df_iterator< VPBlockShallowTraversalWrapper< VPBlockBase * > > > vp_depth_first_shallow(VPBlockBase *G)
Returns an iterator range to traverse the graph starting at G in depth-first order.
Definition VPlanCFG.h:216
iterator_range< df_iterator< VPBlockDeepTraversalWrapper< VPBlockBase * > > > vp_depth_first_deep(VPBlockBase *G)
Returns an iterator range to traverse the graph starting at G in depth-first order while traversing t...
Definition VPlanCFG.h:243
detail::concat_range< ValueT, RangeTs... > concat(RangeTs &&...Ranges)
Returns a concatenated range across two or more ranges.
Definition STLExtras.h:1150
auto reverse(ContainerTy &&C)
Definition STLExtras.h:406
bool isa_and_present(const Y &Val)
isa_and_present<X> - Functionally identical to isa, except that a null value is accepted.
Definition Casting.h:669
SmallVector< ValueTypeFromRangeType< R >, Size > to_vector(R &&Range)
Given a range of type R, iterate the entire range and return a SmallVector with elements of the vecto...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
@ Add
Sum of integers.
DWARFExpression::Operation Op
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
A pure-virtual common base class for recipes defining a single VPValue and using IR flags.
Definition VPlan.h:871
VPRecipeWithIRFlags * clone() override=0
Clone the current recipe.
static void unrollByUF(VPlan &Plan, unsigned UF)
Explicitly unroll Plan by UF.
static void removeDeadRecipes(VPlan &Plan)
Remove dead recipes from Plan.
static void replicateByVF(VPlan &Plan, ElementCount VF)
Replace each replicating VPReplicateRecipe and VPInstruction outside of any replicate region in Plan ...