LLVM 20.0.0git
VPlanRecipes.cpp
Go to the documentation of this file.
1//===- VPlanRecipes.cpp - Implementations for VPlan recipes ---------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8///
9/// \file
10/// This file contains implementations for different VPlan recipes.
11///
12//===----------------------------------------------------------------------===//
13
14#include "VPlan.h"
15#include "VPlanAnalysis.h"
16#include "llvm/ADT/STLExtras.h"
18#include "llvm/ADT/Twine.h"
20#include "llvm/IR/BasicBlock.h"
21#include "llvm/IR/IRBuilder.h"
22#include "llvm/IR/Instruction.h"
24#include "llvm/IR/Type.h"
25#include "llvm/IR/Value.h"
28#include "llvm/Support/Debug.h"
33#include <cassert>
34
35using namespace llvm;
36
38
39namespace llvm {
41}
43
44#define LV_NAME "loop-vectorize"
45#define DEBUG_TYPE LV_NAME
46
48 switch (getVPDefID()) {
49 case VPInterleaveSC:
50 return cast<VPInterleaveRecipe>(this)->getNumStoreOperands() > 0;
51 case VPWidenStoreEVLSC:
52 case VPWidenStoreSC:
53 return true;
54 case VPReplicateSC:
55 return cast<Instruction>(getVPSingleValue()->getUnderlyingValue())
56 ->mayWriteToMemory();
57 case VPWidenCallSC:
58 return !cast<VPWidenCallRecipe>(this)
59 ->getCalledScalarFunction()
60 ->onlyReadsMemory();
61 case VPBranchOnMaskSC:
62 case VPScalarIVStepsSC:
63 case VPPredInstPHISC:
64 return false;
65 case VPBlendSC:
66 case VPReductionEVLSC:
67 case VPReductionSC:
68 case VPWidenCanonicalIVSC:
69 case VPWidenCastSC:
70 case VPWidenGEPSC:
71 case VPWidenIntOrFpInductionSC:
72 case VPWidenLoadEVLSC:
73 case VPWidenLoadSC:
74 case VPWidenPHISC:
75 case VPWidenSC:
76 case VPWidenSelectSC: {
77 const Instruction *I =
78 dyn_cast_or_null<Instruction>(getVPSingleValue()->getUnderlyingValue());
79 (void)I;
80 assert((!I || !I->mayWriteToMemory()) &&
81 "underlying instruction may write to memory");
82 return false;
83 }
84 default:
85 return true;
86 }
87}
88
90 switch (getVPDefID()) {
91 case VPWidenLoadEVLSC:
92 case VPWidenLoadSC:
93 return true;
94 case VPReplicateSC:
95 return cast<Instruction>(getVPSingleValue()->getUnderlyingValue())
96 ->mayReadFromMemory();
97 case VPWidenCallSC:
98 return !cast<VPWidenCallRecipe>(this)
99 ->getCalledScalarFunction()
100 ->onlyWritesMemory();
101 case VPBranchOnMaskSC:
102 case VPPredInstPHISC:
103 case VPScalarIVStepsSC:
104 case VPWidenStoreEVLSC:
105 case VPWidenStoreSC:
106 return false;
107 case VPBlendSC:
108 case VPReductionEVLSC:
109 case VPReductionSC:
110 case VPWidenCanonicalIVSC:
111 case VPWidenCastSC:
112 case VPWidenGEPSC:
113 case VPWidenIntOrFpInductionSC:
114 case VPWidenPHISC:
115 case VPWidenSC:
116 case VPWidenSelectSC: {
117 const Instruction *I =
118 dyn_cast_or_null<Instruction>(getVPSingleValue()->getUnderlyingValue());
119 (void)I;
120 assert((!I || !I->mayReadFromMemory()) &&
121 "underlying instruction may read from memory");
122 return false;
123 }
124 default:
125 return true;
126 }
127}
128
130 switch (getVPDefID()) {
131 case VPDerivedIVSC:
132 case VPPredInstPHISC:
133 case VPScalarCastSC:
134 return false;
135 case VPInstructionSC:
136 switch (cast<VPInstruction>(this)->getOpcode()) {
137 case Instruction::Or:
138 case Instruction::ICmp:
139 case Instruction::Select:
147 return false;
148 default:
149 return true;
150 }
151 case VPWidenCallSC: {
152 Function *Fn = cast<VPWidenCallRecipe>(this)->getCalledScalarFunction();
153 return mayWriteToMemory() || !Fn->doesNotThrow() || !Fn->willReturn();
154 }
155 case VPBlendSC:
156 case VPReductionEVLSC:
157 case VPReductionSC:
158 case VPScalarIVStepsSC:
159 case VPWidenCanonicalIVSC:
160 case VPWidenCastSC:
161 case VPWidenGEPSC:
162 case VPWidenIntOrFpInductionSC:
163 case VPWidenPHISC:
164 case VPWidenPointerInductionSC:
165 case VPWidenSC:
166 case VPWidenSelectSC: {
167 const Instruction *I =
168 dyn_cast_or_null<Instruction>(getVPSingleValue()->getUnderlyingValue());
169 (void)I;
170 assert((!I || !I->mayHaveSideEffects()) &&
171 "underlying instruction has side-effects");
172 return false;
173 }
174 case VPInterleaveSC:
175 return mayWriteToMemory();
176 case VPWidenLoadEVLSC:
177 case VPWidenLoadSC:
178 case VPWidenStoreEVLSC:
179 case VPWidenStoreSC:
180 assert(
181 cast<VPWidenMemoryRecipe>(this)->getIngredient().mayHaveSideEffects() ==
183 "mayHaveSideffects result for ingredient differs from this "
184 "implementation");
185 return mayWriteToMemory();
186 case VPReplicateSC: {
187 auto *R = cast<VPReplicateRecipe>(this);
188 return R->getUnderlyingInstr()->mayHaveSideEffects();
189 }
190 default:
191 return true;
192 }
193}
194
196 VPValue *ExitValue = getOperand(0);
197 auto Lane = vputils::isUniformAfterVectorization(ExitValue)
200 VPBasicBlock *MiddleVPBB =
201 cast<VPBasicBlock>(Plan.getVectorLoopRegion()->getSingleSuccessor());
202 VPRecipeBase *ExitingRecipe = ExitValue->getDefiningRecipe();
203 auto *ExitingVPBB = ExitingRecipe ? ExitingRecipe->getParent() : nullptr;
204 // Values leaving the vector loop reach live out phi's in the exiting block
205 // via middle block.
206 auto *PredVPBB = !ExitingVPBB || ExitingVPBB->getEnclosingLoopRegion()
207 ? MiddleVPBB
208 : ExitingVPBB;
209 BasicBlock *PredBB = State.CFG.VPBB2IRBB[PredVPBB];
210 // Set insertion point in PredBB in case an extract needs to be generated.
211 // TODO: Model extracts explicitly.
212 State.Builder.SetInsertPoint(PredBB, PredBB->getFirstNonPHIIt());
213 Value *V = State.get(ExitValue, VPIteration(State.UF - 1, Lane));
214 if (Phi->getBasicBlockIndex(PredBB) != -1)
215 Phi->setIncomingValueForBlock(PredBB, V);
216 else
217 Phi->addIncoming(V, PredBB);
218}
219
220#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
222 O << "Live-out ";
224 O << " = ";
226 O << "\n";
227}
228#endif
229
231 assert(!Parent && "Recipe already in some VPBasicBlock");
232 assert(InsertPos->getParent() &&
233 "Insertion position not in any VPBasicBlock");
234 InsertPos->getParent()->insert(this, InsertPos->getIterator());
235}
236
239 assert(!Parent && "Recipe already in some VPBasicBlock");
240 assert(I == BB.end() || I->getParent() == &BB);
241 BB.insert(this, I);
242}
243
245 assert(!Parent && "Recipe already in some VPBasicBlock");
246 assert(InsertPos->getParent() &&
247 "Insertion position not in any VPBasicBlock");
248 InsertPos->getParent()->insert(this, std::next(InsertPos->getIterator()));
249}
250
252 assert(getParent() && "Recipe not in any VPBasicBlock");
254 Parent = nullptr;
255}
256
258 assert(getParent() && "Recipe not in any VPBasicBlock");
260}
261
264 insertAfter(InsertPos);
265}
266
270 insertBefore(BB, I);
271}
272
273/// Return the underlying instruction to be used for computing \p R's cost via
274/// the legacy cost model. Return nullptr if there's no suitable instruction.
276 if (auto *S = dyn_cast<VPSingleDefRecipe>(R))
277 return dyn_cast_or_null<Instruction>(S->getUnderlyingValue());
278 if (auto *IG = dyn_cast<VPInterleaveRecipe>(R))
279 return IG->getInsertPos();
280 if (auto *WidenMem = dyn_cast<VPWidenMemoryRecipe>(R))
281 return &WidenMem->getIngredient();
282 return nullptr;
283}
284
286 auto *UI = getInstructionForCost(this);
287 if (UI && Ctx.skipCostComputation(UI, VF.isVector()))
288 return 0;
289
290 InstructionCost RecipeCost = computeCost(VF, Ctx);
291 if (UI && ForceTargetInstructionCost.getNumOccurrences() > 0 &&
292 RecipeCost.isValid())
294
295 LLVM_DEBUG({
296 dbgs() << "Cost of " << RecipeCost << " for VF " << VF << ": ";
297 dump();
298 });
299 return RecipeCost;
300}
301
303 VPCostContext &Ctx) const {
304 // Compute the cost for the recipe falling back to the legacy cost model using
305 // the underlying instruction. If there is no underlying instruction, returns
306 // 0.
308 if (UI && isa<VPReplicateRecipe>(this)) {
309 // VPReplicateRecipe may be cloned as part of an existing VPlan-to-VPlan
310 // transform, avoid computing their cost multiple times for now.
311 Ctx.SkipCostComputation.insert(UI);
312 }
313 return UI ? Ctx.getLegacyCost(UI, VF) : 0;
314}
315
317 assert(OpType == OperationType::FPMathOp &&
318 "recipe doesn't have fast math flags");
319 FastMathFlags Res;
320 Res.setAllowReassoc(FMFs.AllowReassoc);
321 Res.setNoNaNs(FMFs.NoNaNs);
322 Res.setNoInfs(FMFs.NoInfs);
323 Res.setNoSignedZeros(FMFs.NoSignedZeros);
324 Res.setAllowReciprocal(FMFs.AllowReciprocal);
325 Res.setAllowContract(FMFs.AllowContract);
326 Res.setApproxFunc(FMFs.ApproxFunc);
327 return Res;
328}
329
332 const Twine &Name)
333 : VPRecipeWithIRFlags(VPDef::VPInstructionSC, ArrayRef<VPValue *>({A, B}),
334 Pred, DL),
335 Opcode(Opcode), Name(Name.str()) {
336 assert(Opcode == Instruction::ICmp &&
337 "only ICmp predicates supported at the moment");
338}
339
341 std::initializer_list<VPValue *> Operands,
342 FastMathFlags FMFs, DebugLoc DL, const Twine &Name)
343 : VPRecipeWithIRFlags(VPDef::VPInstructionSC, Operands, FMFs, DL),
344 Opcode(Opcode), Name(Name.str()) {
345 // Make sure the VPInstruction is a floating-point operation.
346 assert(isFPMathOp() && "this op can't take fast-math flags");
347}
348
349bool VPInstruction::doesGeneratePerAllLanes() const {
350 return Opcode == VPInstruction::PtrAdd && !vputils::onlyFirstLaneUsed(this);
351}
352
353bool VPInstruction::canGenerateScalarForFirstLane() const {
355 return true;
357 return true;
358 switch (Opcode) {
359 case Instruction::ICmp:
366 return true;
367 default:
368 return false;
369 }
370}
371
372Value *VPInstruction::generatePerLane(VPTransformState &State,
373 const VPIteration &Lane) {
374 IRBuilderBase &Builder = State.Builder;
375
377 "only PtrAdd opcodes are supported for now");
378 return Builder.CreatePtrAdd(State.get(getOperand(0), Lane),
379 State.get(getOperand(1), Lane), Name);
380}
381
382Value *VPInstruction::generatePerPart(VPTransformState &State, unsigned Part) {
383 IRBuilderBase &Builder = State.Builder;
384
386 bool OnlyFirstLaneUsed = vputils::onlyFirstLaneUsed(this);
387 Value *A = State.get(getOperand(0), Part, OnlyFirstLaneUsed);
388 Value *B = State.get(getOperand(1), Part, OnlyFirstLaneUsed);
389 auto *Res =
390 Builder.CreateBinOp((Instruction::BinaryOps)getOpcode(), A, B, Name);
391 if (auto *I = dyn_cast<Instruction>(Res))
392 setFlags(I);
393 return Res;
394 }
395
396 switch (getOpcode()) {
397 case VPInstruction::Not: {
398 Value *A = State.get(getOperand(0), Part);
399 return Builder.CreateNot(A, Name);
400 }
401 case Instruction::ICmp: {
402 bool OnlyFirstLaneUsed = vputils::onlyFirstLaneUsed(this);
403 Value *A = State.get(getOperand(0), Part, OnlyFirstLaneUsed);
404 Value *B = State.get(getOperand(1), Part, OnlyFirstLaneUsed);
405 return Builder.CreateCmp(getPredicate(), A, B, Name);
406 }
407 case Instruction::Select: {
408 Value *Cond = State.get(getOperand(0), Part);
409 Value *Op1 = State.get(getOperand(1), Part);
410 Value *Op2 = State.get(getOperand(2), Part);
411 return Builder.CreateSelect(Cond, Op1, Op2, Name);
412 }
414 // Get first lane of vector induction variable.
415 Value *VIVElem0 = State.get(getOperand(0), VPIteration(Part, 0));
416 // Get the original loop tripcount.
417 Value *ScalarTC = State.get(getOperand(1), VPIteration(Part, 0));
418
419 // If this part of the active lane mask is scalar, generate the CMP directly
420 // to avoid unnecessary extracts.
421 if (State.VF.isScalar())
422 return Builder.CreateCmp(CmpInst::Predicate::ICMP_ULT, VIVElem0, ScalarTC,
423 Name);
424
425 auto *Int1Ty = Type::getInt1Ty(Builder.getContext());
426 auto *PredTy = VectorType::get(Int1Ty, State.VF);
427 return Builder.CreateIntrinsic(Intrinsic::get_active_lane_mask,
428 {PredTy, ScalarTC->getType()},
429 {VIVElem0, ScalarTC}, nullptr, Name);
430 }
432 // Generate code to combine the previous and current values in vector v3.
433 //
434 // vector.ph:
435 // v_init = vector(..., ..., ..., a[-1])
436 // br vector.body
437 //
438 // vector.body
439 // i = phi [0, vector.ph], [i+4, vector.body]
440 // v1 = phi [v_init, vector.ph], [v2, vector.body]
441 // v2 = a[i, i+1, i+2, i+3];
442 // v3 = vector(v1(3), v2(0, 1, 2))
443
444 // For the first part, use the recurrence phi (v1), otherwise v2.
445 auto *V1 = State.get(getOperand(0), 0);
446 Value *PartMinus1 = Part == 0 ? V1 : State.get(getOperand(1), Part - 1);
447 if (!PartMinus1->getType()->isVectorTy())
448 return PartMinus1;
449 Value *V2 = State.get(getOperand(1), Part);
450 return Builder.CreateVectorSplice(PartMinus1, V2, -1, Name);
451 }
453 if (Part != 0)
454 return State.get(this, 0, /*IsScalar*/ true);
455
456 Value *ScalarTC = State.get(getOperand(0), {0, 0});
457 Value *Step =
458 createStepForVF(Builder, ScalarTC->getType(), State.VF, State.UF);
459 Value *Sub = Builder.CreateSub(ScalarTC, Step);
460 Value *Cmp = Builder.CreateICmp(CmpInst::Predicate::ICMP_UGT, ScalarTC, Step);
461 Value *Zero = ConstantInt::get(ScalarTC->getType(), 0);
462 return Builder.CreateSelect(Cmp, Sub, Zero);
463 }
465 // Compute EVL
466 auto GetEVL = [=](VPTransformState &State, Value *AVL) {
467 assert(AVL->getType()->isIntegerTy() &&
468 "Requested vector length should be an integer.");
469
470 // TODO: Add support for MaxSafeDist for correct loop emission.
471 assert(State.VF.isScalable() && "Expected scalable vector factor.");
472 Value *VFArg = State.Builder.getInt32(State.VF.getKnownMinValue());
473
474 Value *EVL = State.Builder.CreateIntrinsic(
475 State.Builder.getInt32Ty(), Intrinsic::experimental_get_vector_length,
476 {AVL, VFArg, State.Builder.getTrue()});
477 return EVL;
478 };
479 // TODO: Restructure this code with an explicit remainder loop, vsetvli can
480 // be outside of the main loop.
481 assert(Part == 0 && "No unrolling expected for predicated vectorization.");
482 // Compute VTC - IV as the AVL (requested vector length).
483 Value *Index = State.get(getOperand(0), VPIteration(0, 0));
484 Value *TripCount = State.get(getOperand(1), VPIteration(0, 0));
485 Value *AVL = State.Builder.CreateSub(TripCount, Index);
486 Value *EVL = GetEVL(State, AVL);
487 return EVL;
488 }
490 auto *IV = State.get(getOperand(0), VPIteration(0, 0));
491 if (Part == 0)
492 return IV;
493
494 // The canonical IV is incremented by the vectorization factor (num of SIMD
495 // elements) times the unroll part.
496 Value *Step = createStepForVF(Builder, IV->getType(), State.VF, Part);
497 return Builder.CreateAdd(IV, Step, Name, hasNoUnsignedWrap(),
499 }
501 if (Part != 0)
502 return nullptr;
503
504 Value *Cond = State.get(getOperand(0), VPIteration(Part, 0));
505 // Replace the temporary unreachable terminator with a new conditional
506 // branch, hooking it up to backward destination for exiting blocks now and
507 // to forward destination(s) later when they are created.
508 BranchInst *CondBr =
509 Builder.CreateCondBr(Cond, Builder.GetInsertBlock(), nullptr);
510 CondBr->setSuccessor(0, nullptr);
512
513 if (!getParent()->isExiting())
514 return CondBr;
515
516 VPRegionBlock *ParentRegion = getParent()->getParent();
517 VPBasicBlock *Header = ParentRegion->getEntryBasicBlock();
518 CondBr->setSuccessor(1, State.CFG.VPBB2IRBB[Header]);
519 return CondBr;
520 }
522 if (Part != 0)
523 return nullptr;
524 // First create the compare.
525 Value *IV = State.get(getOperand(0), Part, /*IsScalar*/ true);
526 Value *TC = State.get(getOperand(1), Part, /*IsScalar*/ true);
527 Value *Cond = Builder.CreateICmpEQ(IV, TC);
528
529 // Now create the branch.
530 auto *Plan = getParent()->getPlan();
531 VPRegionBlock *TopRegion = Plan->getVectorLoopRegion();
532 VPBasicBlock *Header = TopRegion->getEntry()->getEntryBasicBlock();
533
534 // Replace the temporary unreachable terminator with a new conditional
535 // branch, hooking it up to backward destination (the header) now and to the
536 // forward destination (the exit/middle block) later when it is created.
537 // Note that CreateCondBr expects a valid BB as first argument, so we need
538 // to set it to nullptr later.
539 BranchInst *CondBr = Builder.CreateCondBr(Cond, Builder.GetInsertBlock(),
540 State.CFG.VPBB2IRBB[Header]);
541 CondBr->setSuccessor(0, nullptr);
543 return CondBr;
544 }
546 if (Part != 0)
547 return State.get(this, 0, /*IsScalar*/ true);
548
549 // FIXME: The cross-recipe dependency on VPReductionPHIRecipe is temporary
550 // and will be removed by breaking up the recipe further.
551 auto *PhiR = cast<VPReductionPHIRecipe>(getOperand(0));
552 auto *OrigPhi = cast<PHINode>(PhiR->getUnderlyingValue());
553 // Get its reduction variable descriptor.
554 const RecurrenceDescriptor &RdxDesc = PhiR->getRecurrenceDescriptor();
555
556 RecurKind RK = RdxDesc.getRecurrenceKind();
557
558 VPValue *LoopExitingDef = getOperand(1);
559 Type *PhiTy = OrigPhi->getType();
560 VectorParts RdxParts(State.UF);
561 for (unsigned Part = 0; Part < State.UF; ++Part)
562 RdxParts[Part] = State.get(LoopExitingDef, Part, PhiR->isInLoop());
563
564 // If the vector reduction can be performed in a smaller type, we truncate
565 // then extend the loop exit value to enable InstCombine to evaluate the
566 // entire expression in the smaller type.
567 // TODO: Handle this in truncateToMinBW.
568 if (State.VF.isVector() && PhiTy != RdxDesc.getRecurrenceType()) {
569 Type *RdxVecTy = VectorType::get(RdxDesc.getRecurrenceType(), State.VF);
570 for (unsigned Part = 0; Part < State.UF; ++Part)
571 RdxParts[Part] = Builder.CreateTrunc(RdxParts[Part], RdxVecTy);
572 }
573 // Reduce all of the unrolled parts into a single vector.
574 Value *ReducedPartRdx = RdxParts[0];
575 unsigned Op = RecurrenceDescriptor::getOpcode(RK);
577 Op = Instruction::Or;
578
579 if (PhiR->isOrdered()) {
580 ReducedPartRdx = RdxParts[State.UF - 1];
581 } else {
582 // Floating-point operations should have some FMF to enable the reduction.
584 Builder.setFastMathFlags(RdxDesc.getFastMathFlags());
585 for (unsigned Part = 1; Part < State.UF; ++Part) {
586 Value *RdxPart = RdxParts[Part];
587 if (Op != Instruction::ICmp && Op != Instruction::FCmp)
588 ReducedPartRdx = Builder.CreateBinOp(
589 (Instruction::BinaryOps)Op, RdxPart, ReducedPartRdx, "bin.rdx");
590 else
591 ReducedPartRdx = createMinMaxOp(Builder, RK, ReducedPartRdx, RdxPart);
592 }
593 }
594
595 // Create the reduction after the loop. Note that inloop reductions create
596 // the target reduction in the loop using a Reduction recipe.
597 if ((State.VF.isVector() ||
599 !PhiR->isInLoop()) {
600 ReducedPartRdx =
601 createTargetReduction(Builder, RdxDesc, ReducedPartRdx, OrigPhi);
602 // If the reduction can be performed in a smaller type, we need to extend
603 // the reduction to the wider type before we branch to the original loop.
604 if (PhiTy != RdxDesc.getRecurrenceType())
605 ReducedPartRdx = RdxDesc.isSigned()
606 ? Builder.CreateSExt(ReducedPartRdx, PhiTy)
607 : Builder.CreateZExt(ReducedPartRdx, PhiTy);
608 }
609
610 // If there were stores of the reduction value to a uniform memory address
611 // inside the loop, create the final store here.
612 if (StoreInst *SI = RdxDesc.IntermediateStore) {
613 auto *NewSI = Builder.CreateAlignedStore(
614 ReducedPartRdx, SI->getPointerOperand(), SI->getAlign());
615 propagateMetadata(NewSI, SI);
616 }
617
618 return ReducedPartRdx;
619 }
621 if (Part != 0)
622 return State.get(this, 0, /*IsScalar*/ true);
623
624 auto *CI = cast<ConstantInt>(getOperand(1)->getLiveInIRValue());
625 unsigned Offset = CI->getZExtValue();
626 assert(Offset > 0 && "Offset from end must be positive");
627 Value *Res;
628 if (State.VF.isVector()) {
629 assert(Offset <= State.VF.getKnownMinValue() &&
630 "invalid offset to extract from");
631 // Extract lane VF - Offset from the operand.
632 Res = State.get(
633 getOperand(0),
634 VPIteration(State.UF - 1, VPLane::getLaneFromEnd(State.VF, Offset)));
635 } else {
636 assert(Offset <= State.UF && "invalid offset to extract from");
637 // When loop is unrolled without vectorizing, retrieve UF - Offset.
638 Res = State.get(getOperand(0), State.UF - Offset);
639 }
640 if (isa<ExtractElementInst>(Res))
641 Res->setName(Name);
642 return Res;
643 }
645 Value *A = State.get(getOperand(0), Part);
646 Value *B = State.get(getOperand(1), Part);
647 return Builder.CreateLogicalAnd(A, B, Name);
648 }
651 "can only generate first lane for PtrAdd");
652 Value *Ptr = State.get(getOperand(0), Part, /* IsScalar */ true);
653 Value *Addend = State.get(getOperand(1), Part, /* IsScalar */ true);
654 return Builder.CreatePtrAdd(Ptr, Addend, Name);
655 }
657 if (Part != 0)
658 return State.get(this, 0, /*IsScalar*/ true);
659 Value *IncomingFromVPlanPred =
660 State.get(getOperand(0), Part, /* IsScalar */ true);
661 Value *IncomingFromOtherPreds =
662 State.get(getOperand(1), Part, /* IsScalar */ true);
663 auto *NewPhi =
664 Builder.CreatePHI(IncomingFromOtherPreds->getType(), 2, Name);
665 BasicBlock *VPlanPred =
666 State.CFG
667 .VPBB2IRBB[cast<VPBasicBlock>(getParent()->getSinglePredecessor())];
668 NewPhi->addIncoming(IncomingFromVPlanPred, VPlanPred);
669 for (auto *OtherPred : predecessors(Builder.GetInsertBlock())) {
670 assert(OtherPred != VPlanPred &&
671 "VPlan predecessors should not be connected yet");
672 NewPhi->addIncoming(IncomingFromOtherPreds, OtherPred);
673 }
674 return NewPhi;
675 }
676
677 default:
678 llvm_unreachable("Unsupported opcode for instruction");
679 }
680}
681
685}
686
689}
690
691#if !defined(NDEBUG)
692bool VPInstruction::isFPMathOp() const {
693 // Inspired by FPMathOperator::classof. Notable differences are that we don't
694 // support Call, PHI and Select opcodes here yet.
695 return Opcode == Instruction::FAdd || Opcode == Instruction::FMul ||
696 Opcode == Instruction::FNeg || Opcode == Instruction::FSub ||
697 Opcode == Instruction::FDiv || Opcode == Instruction::FRem ||
698 Opcode == Instruction::FCmp || Opcode == Instruction::Select;
699}
700#endif
701
703 assert(!State.Instance && "VPInstruction executing an Instance");
705 assert((hasFastMathFlags() == isFPMathOp() ||
706 getOpcode() == Instruction::Select) &&
707 "Recipe not a FPMathOp but has fast-math flags?");
708 if (hasFastMathFlags())
711 bool GeneratesPerFirstLaneOnly = canGenerateScalarForFirstLane() &&
714 bool GeneratesPerAllLanes = doesGeneratePerAllLanes();
715 bool OnlyFirstPartUsed = vputils::onlyFirstPartUsed(this);
716 for (unsigned Part = 0; Part < State.UF; ++Part) {
717 if (GeneratesPerAllLanes) {
718 for (unsigned Lane = 0, NumLanes = State.VF.getKnownMinValue();
719 Lane != NumLanes; ++Lane) {
720 Value *GeneratedValue = generatePerLane(State, VPIteration(Part, Lane));
721 assert(GeneratedValue && "generatePerLane must produce a value");
722 State.set(this, GeneratedValue, VPIteration(Part, Lane));
723 }
724 continue;
725 }
726
727 if (Part != 0 && OnlyFirstPartUsed && hasResult()) {
728 Value *Part0 = State.get(this, 0, /*IsScalar*/ GeneratesPerFirstLaneOnly);
729 State.set(this, Part0, Part,
730 /*IsScalar*/ GeneratesPerFirstLaneOnly);
731 continue;
732 }
733
734 Value *GeneratedValue = generatePerPart(State, Part);
735 if (!hasResult())
736 continue;
737 assert(GeneratedValue && "generatePerPart must produce a value");
738 assert((GeneratedValue->getType()->isVectorTy() ==
739 !GeneratesPerFirstLaneOnly ||
740 State.VF.isScalar()) &&
741 "scalar value but not only first lane defined");
742 State.set(this, GeneratedValue, Part,
743 /*IsScalar*/ GeneratesPerFirstLaneOnly);
744 }
745}
746
748 assert(is_contained(operands(), Op) && "Op must be an operand of the recipe");
750 return vputils::onlyFirstLaneUsed(this);
751
752 switch (getOpcode()) {
753 default:
754 return false;
755 case Instruction::ICmp:
757 // TODO: Cover additional opcodes.
758 return vputils::onlyFirstLaneUsed(this);
766 return true;
767 };
768 llvm_unreachable("switch should return");
769}
770
772 assert(is_contained(operands(), Op) && "Op must be an operand of the recipe");
774 return vputils::onlyFirstPartUsed(this);
775
776 switch (getOpcode()) {
777 default:
778 return false;
779 case Instruction::ICmp:
780 case Instruction::Select:
781 return vputils::onlyFirstPartUsed(this);
785 return true;
786 };
787 llvm_unreachable("switch should return");
788}
789
790#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
792 VPSlotTracker SlotTracker(getParent()->getPlan());
793 print(dbgs(), "", SlotTracker);
794}
795
797 VPSlotTracker &SlotTracker) const {
798 O << Indent << "EMIT ";
799
800 if (hasResult()) {
802 O << " = ";
803 }
804
805 switch (getOpcode()) {
807 O << "not";
808 break;
810 O << "combined load";
811 break;
813 O << "combined store";
814 break;
816 O << "active lane mask";
817 break;
819 O << "resume-phi";
820 break;
822 O << "EXPLICIT-VECTOR-LENGTH";
823 break;
825 O << "first-order splice";
826 break;
828 O << "branch-on-cond";
829 break;
831 O << "TC > VF ? TC - VF : 0";
832 break;
834 O << "VF * Part +";
835 break;
837 O << "branch-on-count";
838 break;
840 O << "extract-from-end";
841 break;
843 O << "compute-reduction-result";
844 break;
846 O << "logical-and";
847 break;
849 O << "ptradd";
850 break;
851 default:
853 }
854
855 printFlags(O);
857
858 if (auto DL = getDebugLoc()) {
859 O << ", !dbg ";
860 DL.print(O);
861 }
862}
863#endif
864
866 assert(State.VF.isVector() && "not widening");
867 Function *CalledScalarFn = getCalledScalarFunction();
868 assert(!isDbgInfoIntrinsic(CalledScalarFn->getIntrinsicID()) &&
869 "DbgInfoIntrinsic should have been dropped during VPlan construction");
871
872 bool UseIntrinsic = VectorIntrinsicID != Intrinsic::not_intrinsic;
873 FunctionType *VFTy = nullptr;
874 if (Variant)
875 VFTy = Variant->getFunctionType();
876 for (unsigned Part = 0; Part < State.UF; ++Part) {
877 SmallVector<Type *, 2> TysForDecl;
878 // Add return type if intrinsic is overloaded on it.
879 if (UseIntrinsic &&
880 isVectorIntrinsicWithOverloadTypeAtArg(VectorIntrinsicID, -1))
881 TysForDecl.push_back(VectorType::get(
882 CalledScalarFn->getReturnType()->getScalarType(), State.VF));
884 for (const auto &I : enumerate(arg_operands())) {
885 // Some intrinsics have a scalar argument - don't replace it with a
886 // vector.
887 Value *Arg;
888 if (UseIntrinsic &&
889 isVectorIntrinsicWithScalarOpAtArg(VectorIntrinsicID, I.index()))
890 Arg = State.get(I.value(), VPIteration(0, 0));
891 // Some vectorized function variants may also take a scalar argument,
892 // e.g. linear parameters for pointers. This needs to be the scalar value
893 // from the start of the respective part when interleaving.
894 else if (VFTy && !VFTy->getParamType(I.index())->isVectorTy())
895 Arg = State.get(I.value(), VPIteration(Part, 0));
896 else
897 Arg = State.get(I.value(), Part);
898 if (UseIntrinsic &&
899 isVectorIntrinsicWithOverloadTypeAtArg(VectorIntrinsicID, I.index()))
900 TysForDecl.push_back(Arg->getType());
901 Args.push_back(Arg);
902 }
903
904 Function *VectorF;
905 if (UseIntrinsic) {
906 // Use vector version of the intrinsic.
907 Module *M = State.Builder.GetInsertBlock()->getModule();
908 VectorF = Intrinsic::getDeclaration(M, VectorIntrinsicID, TysForDecl);
909 assert(VectorF && "Can't retrieve vector intrinsic.");
910 } else {
911#ifndef NDEBUG
912 assert(Variant != nullptr && "Can't create vector function.");
913#endif
914 VectorF = Variant;
915 }
916
917 auto *CI = cast_or_null<CallInst>(getUnderlyingInstr());
919 if (CI)
920 CI->getOperandBundlesAsDefs(OpBundles);
921
922 CallInst *V = State.Builder.CreateCall(VectorF, Args, OpBundles);
923
924 if (isa<FPMathOperator>(V))
925 V->copyFastMathFlags(CI);
926
927 if (!V->getType()->isVoidTy())
928 State.set(this, V, Part);
929 State.addMetadata(V, CI);
930 }
931}
932
933#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
935 VPSlotTracker &SlotTracker) const {
936 O << Indent << "WIDEN-CALL ";
937
938 Function *CalledFn = getCalledScalarFunction();
939 if (CalledFn->getReturnType()->isVoidTy())
940 O << "void ";
941 else {
943 O << " = ";
944 }
945
946 O << "call @" << CalledFn->getName() << "(";
948 Op->printAsOperand(O, SlotTracker);
949 });
950 O << ")";
951
952 if (VectorIntrinsicID)
953 O << " (using vector intrinsic)";
954 else {
955 O << " (using library function";
956 if (Variant->hasName())
957 O << ": " << Variant->getName();
958 O << ")";
959 }
960}
961
963 VPSlotTracker &SlotTracker) const {
964 O << Indent << "WIDEN-SELECT ";
966 O << " = select ";
968 O << ", ";
970 O << ", ";
972 O << (isInvariantCond() ? " (condition is loop invariant)" : "");
973}
974#endif
975
978
979 // The condition can be loop invariant but still defined inside the
980 // loop. This means that we can't just use the original 'cond' value.
981 // We have to take the 'vectorized' value and pick the first lane.
982 // Instcombine will make this a no-op.
983 auto *InvarCond =
984 isInvariantCond() ? State.get(getCond(), VPIteration(0, 0)) : nullptr;
985
986 for (unsigned Part = 0; Part < State.UF; ++Part) {
987 Value *Cond = InvarCond ? InvarCond : State.get(getCond(), Part);
988 Value *Op0 = State.get(getOperand(1), Part);
989 Value *Op1 = State.get(getOperand(2), Part);
990 Value *Sel = State.Builder.CreateSelect(Cond, Op0, Op1);
991 State.set(this, Sel, Part);
992 State.addMetadata(Sel, dyn_cast_or_null<Instruction>(getUnderlyingValue()));
993 }
994}
995
996VPRecipeWithIRFlags::FastMathFlagsTy::FastMathFlagsTy(
997 const FastMathFlags &FMF) {
998 AllowReassoc = FMF.allowReassoc();
999 NoNaNs = FMF.noNaNs();
1000 NoInfs = FMF.noInfs();
1001 NoSignedZeros = FMF.noSignedZeros();
1002 AllowReciprocal = FMF.allowReciprocal();
1003 AllowContract = FMF.allowContract();
1004 ApproxFunc = FMF.approxFunc();
1005}
1006
1007#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1009 switch (OpType) {
1010 case OperationType::Cmp:
1012 break;
1013 case OperationType::DisjointOp:
1015 O << " disjoint";
1016 break;
1017 case OperationType::PossiblyExactOp:
1018 if (ExactFlags.IsExact)
1019 O << " exact";
1020 break;
1021 case OperationType::OverflowingBinOp:
1022 if (WrapFlags.HasNUW)
1023 O << " nuw";
1024 if (WrapFlags.HasNSW)
1025 O << " nsw";
1026 break;
1027 case OperationType::FPMathOp:
1029 break;
1030 case OperationType::GEPOp:
1031 if (GEPFlags.IsInBounds)
1032 O << " inbounds";
1033 break;
1034 case OperationType::NonNegOp:
1035 if (NonNegFlags.NonNeg)
1036 O << " nneg";
1037 break;
1038 case OperationType::Other:
1039 break;
1040 }
1041 if (getNumOperands() > 0)
1042 O << " ";
1043}
1044#endif
1045
1048 auto &Builder = State.Builder;
1049 switch (Opcode) {
1050 case Instruction::Call:
1051 case Instruction::Br:
1052 case Instruction::PHI:
1053 case Instruction::GetElementPtr:
1054 case Instruction::Select:
1055 llvm_unreachable("This instruction is handled by a different recipe.");
1056 case Instruction::UDiv:
1057 case Instruction::SDiv:
1058 case Instruction::SRem:
1059 case Instruction::URem:
1060 case Instruction::Add:
1061 case Instruction::FAdd:
1062 case Instruction::Sub:
1063 case Instruction::FSub:
1064 case Instruction::FNeg:
1065 case Instruction::Mul:
1066 case Instruction::FMul:
1067 case Instruction::FDiv:
1068 case Instruction::FRem:
1069 case Instruction::Shl:
1070 case Instruction::LShr:
1071 case Instruction::AShr:
1072 case Instruction::And:
1073 case Instruction::Or:
1074 case Instruction::Xor: {
1075 // Just widen unops and binops.
1076 for (unsigned Part = 0; Part < State.UF; ++Part) {
1078 for (VPValue *VPOp : operands())
1079 Ops.push_back(State.get(VPOp, Part));
1080
1081 Value *V = Builder.CreateNAryOp(Opcode, Ops);
1082
1083 if (auto *VecOp = dyn_cast<Instruction>(V))
1084 setFlags(VecOp);
1085
1086 // Use this vector value for all users of the original instruction.
1087 State.set(this, V, Part);
1088 State.addMetadata(V, dyn_cast_or_null<Instruction>(getUnderlyingValue()));
1089 }
1090
1091 break;
1092 }
1093 case Instruction::Freeze: {
1094 for (unsigned Part = 0; Part < State.UF; ++Part) {
1095 Value *Op = State.get(getOperand(0), Part);
1096
1097 Value *Freeze = Builder.CreateFreeze(Op);
1098 State.set(this, Freeze, Part);
1099 }
1100 break;
1101 }
1102 case Instruction::ICmp:
1103 case Instruction::FCmp: {
1104 // Widen compares. Generate vector compares.
1105 bool FCmp = Opcode == Instruction::FCmp;
1106 for (unsigned Part = 0; Part < State.UF; ++Part) {
1107 Value *A = State.get(getOperand(0), Part);
1108 Value *B = State.get(getOperand(1), Part);
1109 Value *C = nullptr;
1110 if (FCmp) {
1111 // Propagate fast math flags.
1112 IRBuilder<>::FastMathFlagGuard FMFG(Builder);
1113 if (auto *I = dyn_cast_or_null<Instruction>(getUnderlyingValue()))
1114 Builder.setFastMathFlags(I->getFastMathFlags());
1115 C = Builder.CreateFCmp(getPredicate(), A, B);
1116 } else {
1117 C = Builder.CreateICmp(getPredicate(), A, B);
1118 }
1119 State.set(this, C, Part);
1120 State.addMetadata(C, dyn_cast_or_null<Instruction>(getUnderlyingValue()));
1121 }
1122
1123 break;
1124 }
1125 default:
1126 // This instruction is not vectorized by simple widening.
1127 LLVM_DEBUG(dbgs() << "LV: Found an unhandled opcode : "
1128 << Instruction::getOpcodeName(Opcode));
1129 llvm_unreachable("Unhandled instruction!");
1130 } // end of switch.
1131
1132#if !defined(NDEBUG)
1133 // Verify that VPlan type inference results agree with the type of the
1134 // generated values.
1135 for (unsigned Part = 0; Part < State.UF; ++Part) {
1137 State.VF) == State.get(this, Part)->getType() &&
1138 "inferred type and type from generated instructions do not match");
1139 }
1140#endif
1141}
1142
1144 VPCostContext &Ctx) const {
1146 switch (Opcode) {
1147 case Instruction::FNeg: {
1148 Type *VectorTy =
1149 ToVectorTy(Ctx.Types.inferScalarType(this->getVPSingleValue()), VF);
1150 return Ctx.TTI.getArithmeticInstrCost(
1151 Opcode, VectorTy, CostKind,
1154 }
1155
1156 case Instruction::UDiv:
1157 case Instruction::SDiv:
1158 case Instruction::SRem:
1159 case Instruction::URem:
1160 // More complex computation, let the legacy cost-model handle this for now.
1161 return Ctx.getLegacyCost(cast<Instruction>(getUnderlyingValue()), VF);
1162 case Instruction::Add:
1163 case Instruction::FAdd:
1164 case Instruction::Sub:
1165 case Instruction::FSub:
1166 case Instruction::Mul:
1167 case Instruction::FMul:
1168 case Instruction::FDiv:
1169 case Instruction::FRem:
1170 case Instruction::Shl:
1171 case Instruction::LShr:
1172 case Instruction::AShr:
1173 case Instruction::And:
1174 case Instruction::Or:
1175 case Instruction::Xor: {
1176 VPValue *RHS = getOperand(1);
1177 // Certain instructions can be cheaper to vectorize if they have a constant
1178 // second vector operand. One example of this are shifts on x86.
1181 if (RHS->isLiveIn())
1182 RHSInfo = Ctx.TTI.getOperandInfo(RHS->getLiveInIRValue());
1183
1184 if (RHSInfo.Kind == TargetTransformInfo::OK_AnyValue &&
1187 Type *VectorTy =
1188 ToVectorTy(Ctx.Types.inferScalarType(this->getVPSingleValue()), VF);
1189 Instruction *CtxI = dyn_cast_or_null<Instruction>(getUnderlyingValue());
1190
1192 if (CtxI)
1193 Operands.append(CtxI->value_op_begin(), CtxI->value_op_end());
1194 return Ctx.TTI.getArithmeticInstrCost(
1195 Opcode, VectorTy, CostKind,
1197 RHSInfo, Operands, CtxI, &Ctx.TLI);
1198 }
1199 case Instruction::Freeze: {
1200 // This opcode is unknown. Assume that it is the same as 'mul'.
1201 Type *VectorTy =
1202 ToVectorTy(Ctx.Types.inferScalarType(this->getVPSingleValue()), VF);
1203 return Ctx.TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy, CostKind);
1204 }
1205 case Instruction::ICmp:
1206 case Instruction::FCmp: {
1207 Instruction *CtxI = dyn_cast_or_null<Instruction>(getUnderlyingValue());
1208 Type *VectorTy = ToVectorTy(Ctx.Types.inferScalarType(getOperand(0)), VF);
1209 return Ctx.TTI.getCmpSelInstrCost(Opcode, VectorTy, nullptr, getPredicate(),
1210 CostKind, CtxI);
1211 }
1212 default:
1213 llvm_unreachable("Unsupported opcode for instruction");
1214 }
1215}
1216
1217#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1219 VPSlotTracker &SlotTracker) const {
1220 O << Indent << "WIDEN ";
1222 O << " = " << Instruction::getOpcodeName(Opcode);
1223 printFlags(O);
1225}
1226#endif
1227
1230 auto &Builder = State.Builder;
1231 /// Vectorize casts.
1232 assert(State.VF.isVector() && "Not vectorizing?");
1233 Type *DestTy = VectorType::get(getResultType(), State.VF);
1234 VPValue *Op = getOperand(0);
1235 for (unsigned Part = 0; Part < State.UF; ++Part) {
1236 if (Part > 0 && Op->isLiveIn()) {
1237 // FIXME: Remove once explicit unrolling is implemented using VPlan.
1238 State.set(this, State.get(this, 0), Part);
1239 continue;
1240 }
1241 Value *A = State.get(Op, Part);
1242 Value *Cast = Builder.CreateCast(Instruction::CastOps(Opcode), A, DestTy);
1243 State.set(this, Cast, Part);
1244 State.addMetadata(Cast, cast_or_null<Instruction>(getUnderlyingValue()));
1245 }
1246}
1247
1248#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1250 VPSlotTracker &SlotTracker) const {
1251 O << Indent << "WIDEN-CAST ";
1253 O << " = " << Instruction::getOpcodeName(Opcode) << " ";
1254 printFlags(O);
1256 O << " to " << *getResultType();
1257}
1258#endif
1259
1260/// This function adds
1261/// (StartIdx * Step, (StartIdx + 1) * Step, (StartIdx + 2) * Step, ...)
1262/// to each vector element of Val. The sequence starts at StartIndex.
1263/// \p Opcode is relevant for FP induction variable.
1264static Value *getStepVector(Value *Val, Value *StartIdx, Value *Step,
1266 IRBuilderBase &Builder) {
1267 assert(VF.isVector() && "only vector VFs are supported");
1268
1269 // Create and check the types.
1270 auto *ValVTy = cast<VectorType>(Val->getType());
1271 ElementCount VLen = ValVTy->getElementCount();
1272
1273 Type *STy = Val->getType()->getScalarType();
1274 assert((STy->isIntegerTy() || STy->isFloatingPointTy()) &&
1275 "Induction Step must be an integer or FP");
1276 assert(Step->getType() == STy && "Step has wrong type");
1277
1279
1280 // Create a vector of consecutive numbers from zero to VF.
1281 VectorType *InitVecValVTy = ValVTy;
1282 if (STy->isFloatingPointTy()) {
1283 Type *InitVecValSTy =
1285 InitVecValVTy = VectorType::get(InitVecValSTy, VLen);
1286 }
1287 Value *InitVec = Builder.CreateStepVector(InitVecValVTy);
1288
1289 // Splat the StartIdx
1290 Value *StartIdxSplat = Builder.CreateVectorSplat(VLen, StartIdx);
1291
1292 if (STy->isIntegerTy()) {
1293 InitVec = Builder.CreateAdd(InitVec, StartIdxSplat);
1294 Step = Builder.CreateVectorSplat(VLen, Step);
1295 assert(Step->getType() == Val->getType() && "Invalid step vec");
1296 // FIXME: The newly created binary instructions should contain nsw/nuw
1297 // flags, which can be found from the original scalar operations.
1298 Step = Builder.CreateMul(InitVec, Step);
1299 return Builder.CreateAdd(Val, Step, "induction");
1300 }
1301
1302 // Floating point induction.
1303 assert((BinOp == Instruction::FAdd || BinOp == Instruction::FSub) &&
1304 "Binary Opcode should be specified for FP induction");
1305 InitVec = Builder.CreateUIToFP(InitVec, ValVTy);
1306 InitVec = Builder.CreateFAdd(InitVec, StartIdxSplat);
1307
1308 Step = Builder.CreateVectorSplat(VLen, Step);
1309 Value *MulOp = Builder.CreateFMul(InitVec, Step);
1310 return Builder.CreateBinOp(BinOp, Val, MulOp, "induction");
1311}
1312
1313/// A helper function that returns an integer or floating-point constant with
1314/// value C.
1316 return Ty->isIntegerTy() ? ConstantInt::getSigned(Ty, C)
1317 : ConstantFP::get(Ty, C);
1318}
1319
1321 ElementCount VF) {
1322 assert(FTy->isFloatingPointTy() && "Expected floating point type!");
1323 Type *IntTy = IntegerType::get(FTy->getContext(), FTy->getScalarSizeInBits());
1324 Value *RuntimeVF = getRuntimeVF(B, IntTy, VF);
1325 return B.CreateUIToFP(RuntimeVF, FTy);
1326}
1327
1329 assert(!State.Instance && "Int or FP induction being replicated.");
1330
1331 Value *Start = getStartValue()->getLiveInIRValue();
1333 TruncInst *Trunc = getTruncInst();
1334 IRBuilderBase &Builder = State.Builder;
1335 assert(IV->getType() == ID.getStartValue()->getType() && "Types must match");
1336 assert(State.VF.isVector() && "must have vector VF");
1337
1338 // The value from the original loop to which we are mapping the new induction
1339 // variable.
1340 Instruction *EntryVal = Trunc ? cast<Instruction>(Trunc) : IV;
1341
1342 // Fast-math-flags propagate from the original induction instruction.
1343 IRBuilder<>::FastMathFlagGuard FMFG(Builder);
1344 if (ID.getInductionBinOp() && isa<FPMathOperator>(ID.getInductionBinOp()))
1345 Builder.setFastMathFlags(ID.getInductionBinOp()->getFastMathFlags());
1346
1347 // Now do the actual transformations, and start with fetching the step value.
1348 Value *Step = State.get(getStepValue(), VPIteration(0, 0));
1349
1350 assert((isa<PHINode>(EntryVal) || isa<TruncInst>(EntryVal)) &&
1351 "Expected either an induction phi-node or a truncate of it!");
1352
1353 // Construct the initial value of the vector IV in the vector loop preheader
1354 auto CurrIP = Builder.saveIP();
1355 BasicBlock *VectorPH = State.CFG.getPreheaderBBFor(this);
1356 Builder.SetInsertPoint(VectorPH->getTerminator());
1357 if (isa<TruncInst>(EntryVal)) {
1358 assert(Start->getType()->isIntegerTy() &&
1359 "Truncation requires an integer type");
1360 auto *TruncType = cast<IntegerType>(EntryVal->getType());
1361 Step = Builder.CreateTrunc(Step, TruncType);
1362 Start = Builder.CreateCast(Instruction::Trunc, Start, TruncType);
1363 }
1364
1365 Value *Zero = getSignedIntOrFpConstant(Start->getType(), 0);
1366 Value *SplatStart = Builder.CreateVectorSplat(State.VF, Start);
1367 Value *SteppedStart = getStepVector(
1368 SplatStart, Zero, Step, ID.getInductionOpcode(), State.VF, State.Builder);
1369
1370 // We create vector phi nodes for both integer and floating-point induction
1371 // variables. Here, we determine the kind of arithmetic we will perform.
1374 if (Step->getType()->isIntegerTy()) {
1375 AddOp = Instruction::Add;
1376 MulOp = Instruction::Mul;
1377 } else {
1378 AddOp = ID.getInductionOpcode();
1379 MulOp = Instruction::FMul;
1380 }
1381
1382 // Multiply the vectorization factor by the step using integer or
1383 // floating-point arithmetic as appropriate.
1384 Type *StepType = Step->getType();
1385 Value *RuntimeVF;
1386 if (Step->getType()->isFloatingPointTy())
1387 RuntimeVF = getRuntimeVFAsFloat(Builder, StepType, State.VF);
1388 else
1389 RuntimeVF = getRuntimeVF(Builder, StepType, State.VF);
1390 Value *Mul = Builder.CreateBinOp(MulOp, Step, RuntimeVF);
1391
1392 // Create a vector splat to use in the induction update.
1393 //
1394 // FIXME: If the step is non-constant, we create the vector splat with
1395 // IRBuilder. IRBuilder can constant-fold the multiply, but it doesn't
1396 // handle a constant vector splat.
1397 Value *SplatVF = isa<Constant>(Mul)
1398 ? ConstantVector::getSplat(State.VF, cast<Constant>(Mul))
1399 : Builder.CreateVectorSplat(State.VF, Mul);
1400 Builder.restoreIP(CurrIP);
1401
1402 // We may need to add the step a number of times, depending on the unroll
1403 // factor. The last of those goes into the PHI.
1404 PHINode *VecInd = PHINode::Create(SteppedStart->getType(), 2, "vec.ind");
1405 VecInd->insertBefore(State.CFG.PrevBB->getFirstInsertionPt());
1406 VecInd->setDebugLoc(EntryVal->getDebugLoc());
1407 Instruction *LastInduction = VecInd;
1408 for (unsigned Part = 0; Part < State.UF; ++Part) {
1409 State.set(this, LastInduction, Part);
1410
1411 if (isa<TruncInst>(EntryVal))
1412 State.addMetadata(LastInduction, EntryVal);
1413
1414 LastInduction = cast<Instruction>(
1415 Builder.CreateBinOp(AddOp, LastInduction, SplatVF, "step.add"));
1416 LastInduction->setDebugLoc(EntryVal->getDebugLoc());
1417 }
1418
1419 LastInduction->setName("vec.ind.next");
1420 VecInd->addIncoming(SteppedStart, VectorPH);
1421 // Add induction update using an incorrect block temporarily. The phi node
1422 // will be fixed after VPlan execution. Note that at this point the latch
1423 // block cannot be used, as it does not exist yet.
1424 // TODO: Model increment value in VPlan, by turning the recipe into a
1425 // multi-def and a subclass of VPHeaderPHIRecipe.
1426 VecInd->addIncoming(LastInduction, VectorPH);
1427}
1428
1429#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1431 VPSlotTracker &SlotTracker) const {
1432 O << Indent << "WIDEN-INDUCTION";
1433 if (getTruncInst()) {
1434 O << "\\l\"";
1435 O << " +\n" << Indent << "\" " << VPlanIngredient(IV) << "\\l\"";
1436 O << " +\n" << Indent << "\" ";
1438 } else
1439 O << " " << VPlanIngredient(IV);
1440
1441 O << ", ";
1443}
1444#endif
1445
1447 // The step may be defined by a recipe in the preheader (e.g. if it requires
1448 // SCEV expansion), but for the canonical induction the step is required to be
1449 // 1, which is represented as live-in.
1451 return false;
1452 auto *StepC = dyn_cast<ConstantInt>(getStepValue()->getLiveInIRValue());
1453 auto *StartC = dyn_cast<ConstantInt>(getStartValue()->getLiveInIRValue());
1454 auto *CanIV = cast<VPCanonicalIVPHIRecipe>(&*getParent()->begin());
1455 return StartC && StartC->isZero() && StepC && StepC->isOne() &&
1456 getScalarType() == CanIV->getScalarType();
1457}
1458
1459#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1461 VPSlotTracker &SlotTracker) const {
1462 O << Indent;
1464 O << Indent << "= DERIVED-IV ";
1466 O << " + ";
1468 O << " * ";
1470}
1471#endif
1472
1474 // Fast-math-flags propagate from the original induction instruction.
1476 if (hasFastMathFlags())
1478
1479 /// Compute scalar induction steps. \p ScalarIV is the scalar induction
1480 /// variable on which to base the steps, \p Step is the size of the step.
1481
1482 Value *BaseIV = State.get(getOperand(0), VPIteration(0, 0));
1483 Value *Step = State.get(getStepValue(), VPIteration(0, 0));
1484 IRBuilderBase &Builder = State.Builder;
1485
1486 // Ensure step has the same type as that of scalar IV.
1487 Type *BaseIVTy = BaseIV->getType()->getScalarType();
1488 assert(BaseIVTy == Step->getType() && "Types of BaseIV and Step must match!");
1489
1490 // We build scalar steps for both integer and floating-point induction
1491 // variables. Here, we determine the kind of arithmetic we will perform.
1494 if (BaseIVTy->isIntegerTy()) {
1495 AddOp = Instruction::Add;
1496 MulOp = Instruction::Mul;
1497 } else {
1498 AddOp = InductionOpcode;
1499 MulOp = Instruction::FMul;
1500 }
1501
1502 // Determine the number of scalars we need to generate for each unroll
1503 // iteration.
1504 bool FirstLaneOnly = vputils::onlyFirstLaneUsed(this);
1505 // Compute the scalar steps and save the results in State.
1506 Type *IntStepTy =
1507 IntegerType::get(BaseIVTy->getContext(), BaseIVTy->getScalarSizeInBits());
1508 Type *VecIVTy = nullptr;
1509 Value *UnitStepVec = nullptr, *SplatStep = nullptr, *SplatIV = nullptr;
1510 if (!FirstLaneOnly && State.VF.isScalable()) {
1511 VecIVTy = VectorType::get(BaseIVTy, State.VF);
1512 UnitStepVec =
1513 Builder.CreateStepVector(VectorType::get(IntStepTy, State.VF));
1514 SplatStep = Builder.CreateVectorSplat(State.VF, Step);
1515 SplatIV = Builder.CreateVectorSplat(State.VF, BaseIV);
1516 }
1517
1518 unsigned StartPart = 0;
1519 unsigned EndPart = State.UF;
1520 unsigned StartLane = 0;
1521 unsigned EndLane = FirstLaneOnly ? 1 : State.VF.getKnownMinValue();
1522 if (State.Instance) {
1523 StartPart = State.Instance->Part;
1524 EndPart = StartPart + 1;
1525 StartLane = State.Instance->Lane.getKnownLane();
1526 EndLane = StartLane + 1;
1527 }
1528 for (unsigned Part = StartPart; Part < EndPart; ++Part) {
1529 Value *StartIdx0 = createStepForVF(Builder, IntStepTy, State.VF, Part);
1530
1531 if (!FirstLaneOnly && State.VF.isScalable()) {
1532 auto *SplatStartIdx = Builder.CreateVectorSplat(State.VF, StartIdx0);
1533 auto *InitVec = Builder.CreateAdd(SplatStartIdx, UnitStepVec);
1534 if (BaseIVTy->isFloatingPointTy())
1535 InitVec = Builder.CreateSIToFP(InitVec, VecIVTy);
1536 auto *Mul = Builder.CreateBinOp(MulOp, InitVec, SplatStep);
1537 auto *Add = Builder.CreateBinOp(AddOp, SplatIV, Mul);
1538 State.set(this, Add, Part);
1539 // It's useful to record the lane values too for the known minimum number
1540 // of elements so we do those below. This improves the code quality when
1541 // trying to extract the first element, for example.
1542 }
1543
1544 if (BaseIVTy->isFloatingPointTy())
1545 StartIdx0 = Builder.CreateSIToFP(StartIdx0, BaseIVTy);
1546
1547 for (unsigned Lane = StartLane; Lane < EndLane; ++Lane) {
1548 Value *StartIdx = Builder.CreateBinOp(
1549 AddOp, StartIdx0, getSignedIntOrFpConstant(BaseIVTy, Lane));
1550 // The step returned by `createStepForVF` is a runtime-evaluated value
1551 // when VF is scalable. Otherwise, it should be folded into a Constant.
1552 assert((State.VF.isScalable() || isa<Constant>(StartIdx)) &&
1553 "Expected StartIdx to be folded to a constant when VF is not "
1554 "scalable");
1555 auto *Mul = Builder.CreateBinOp(MulOp, StartIdx, Step);
1556 auto *Add = Builder.CreateBinOp(AddOp, BaseIV, Mul);
1557 State.set(this, Add, VPIteration(Part, Lane));
1558 }
1559 }
1560}
1561
1562#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1564 VPSlotTracker &SlotTracker) const {
1565 O << Indent;
1567 O << " = SCALAR-STEPS ";
1569}
1570#endif
1571
1573 assert(State.VF.isVector() && "not widening");
1574 auto *GEP = cast<GetElementPtrInst>(getUnderlyingInstr());
1575 // Construct a vector GEP by widening the operands of the scalar GEP as
1576 // necessary. We mark the vector GEP 'inbounds' if appropriate. A GEP
1577 // results in a vector of pointers when at least one operand of the GEP
1578 // is vector-typed. Thus, to keep the representation compact, we only use
1579 // vector-typed operands for loop-varying values.
1580
1581 if (areAllOperandsInvariant()) {
1582 // If we are vectorizing, but the GEP has only loop-invariant operands,
1583 // the GEP we build (by only using vector-typed operands for
1584 // loop-varying values) would be a scalar pointer. Thus, to ensure we
1585 // produce a vector of pointers, we need to either arbitrarily pick an
1586 // operand to broadcast, or broadcast a clone of the original GEP.
1587 // Here, we broadcast a clone of the original.
1588 //
1589 // TODO: If at some point we decide to scalarize instructions having
1590 // loop-invariant operands, this special case will no longer be
1591 // required. We would add the scalarization decision to
1592 // collectLoopScalars() and teach getVectorValue() to broadcast
1593 // the lane-zero scalar value.
1595 for (unsigned I = 0, E = getNumOperands(); I != E; I++)
1596 Ops.push_back(State.get(getOperand(I), VPIteration(0, 0)));
1597
1598 auto *NewGEP =
1599 State.Builder.CreateGEP(GEP->getSourceElementType(), Ops[0],
1600 ArrayRef(Ops).drop_front(), "", isInBounds());
1601 for (unsigned Part = 0; Part < State.UF; ++Part) {
1602 Value *EntryPart = State.Builder.CreateVectorSplat(State.VF, NewGEP);
1603 State.set(this, EntryPart, Part);
1604 State.addMetadata(EntryPart, GEP);
1605 }
1606 } else {
1607 // If the GEP has at least one loop-varying operand, we are sure to
1608 // produce a vector of pointers. But if we are only unrolling, we want
1609 // to produce a scalar GEP for each unroll part. Thus, the GEP we
1610 // produce with the code below will be scalar (if VF == 1) or vector
1611 // (otherwise). Note that for the unroll-only case, we still maintain
1612 // values in the vector mapping with initVector, as we do for other
1613 // instructions.
1614 for (unsigned Part = 0; Part < State.UF; ++Part) {
1615 // The pointer operand of the new GEP. If it's loop-invariant, we
1616 // won't broadcast it.
1617 auto *Ptr = isPointerLoopInvariant()
1618 ? State.get(getOperand(0), VPIteration(0, 0))
1619 : State.get(getOperand(0), Part);
1620
1621 // Collect all the indices for the new GEP. If any index is
1622 // loop-invariant, we won't broadcast it.
1624 for (unsigned I = 1, E = getNumOperands(); I < E; I++) {
1625 VPValue *Operand = getOperand(I);
1626 if (isIndexLoopInvariant(I - 1))
1627 Indices.push_back(State.get(Operand, VPIteration(0, 0)));
1628 else
1629 Indices.push_back(State.get(Operand, Part));
1630 }
1631
1632 // Create the new GEP. Note that this GEP may be a scalar if VF == 1,
1633 // but it should be a vector, otherwise.
1634 auto *NewGEP = State.Builder.CreateGEP(GEP->getSourceElementType(), Ptr,
1635 Indices, "", isInBounds());
1636 assert((State.VF.isScalar() || NewGEP->getType()->isVectorTy()) &&
1637 "NewGEP is not a pointer vector");
1638 State.set(this, NewGEP, Part);
1639 State.addMetadata(NewGEP, GEP);
1640 }
1641 }
1642}
1643
1644#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1646 VPSlotTracker &SlotTracker) const {
1647 O << Indent << "WIDEN-GEP ";
1648 O << (isPointerLoopInvariant() ? "Inv" : "Var");
1649 for (size_t I = 0; I < getNumOperands() - 1; ++I)
1650 O << "[" << (isIndexLoopInvariant(I) ? "Inv" : "Var") << "]";
1651
1652 O << " ";
1654 O << " = getelementptr";
1655 printFlags(O);
1657}
1658#endif
1659
1660void VPVectorPointerRecipe ::execute(VPTransformState &State) {
1661 auto &Builder = State.Builder;
1663 for (unsigned Part = 0; Part < State.UF; ++Part) {
1664 // Calculate the pointer for the specific unroll-part.
1665 Value *PartPtr = nullptr;
1666 // Use i32 for the gep index type when the value is constant,
1667 // or query DataLayout for a more suitable index type otherwise.
1668 const DataLayout &DL =
1669 Builder.GetInsertBlock()->getDataLayout();
1670 Type *IndexTy = State.VF.isScalable() && (IsReverse || Part > 0)
1671 ? DL.getIndexType(IndexedTy->getPointerTo())
1672 : Builder.getInt32Ty();
1673 Value *Ptr = State.get(getOperand(0), VPIteration(0, 0));
1674 bool InBounds = isInBounds();
1675 if (IsReverse) {
1676 // If the address is consecutive but reversed, then the
1677 // wide store needs to start at the last vector element.
1678 // RunTimeVF = VScale * VF.getKnownMinValue()
1679 // For fixed-width VScale is 1, then RunTimeVF = VF.getKnownMinValue()
1680 Value *RunTimeVF = getRuntimeVF(Builder, IndexTy, State.VF);
1681 // NumElt = -Part * RunTimeVF
1682 Value *NumElt = Builder.CreateMul(
1683 ConstantInt::get(IndexTy, -(int64_t)Part), RunTimeVF);
1684 // LastLane = 1 - RunTimeVF
1685 Value *LastLane =
1686 Builder.CreateSub(ConstantInt::get(IndexTy, 1), RunTimeVF);
1687 PartPtr = Builder.CreateGEP(IndexedTy, Ptr, NumElt, "", InBounds);
1688 PartPtr = Builder.CreateGEP(IndexedTy, PartPtr, LastLane, "", InBounds);
1689 } else {
1690 Value *Increment = createStepForVF(Builder, IndexTy, State.VF, Part);
1691 PartPtr = Builder.CreateGEP(IndexedTy, Ptr, Increment, "", InBounds);
1692 }
1693
1694 State.set(this, PartPtr, Part, /*IsScalar*/ true);
1695 }
1696}
1697
1698#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1700 VPSlotTracker &SlotTracker) const {
1701 O << Indent;
1703 O << " = vector-pointer ";
1704 if (IsReverse)
1705 O << "(reverse) ";
1706
1708}
1709#endif
1710
1713 // We know that all PHIs in non-header blocks are converted into
1714 // selects, so we don't have to worry about the insertion order and we
1715 // can just use the builder.
1716 // At this point we generate the predication tree. There may be
1717 // duplications since this is a simple recursive scan, but future
1718 // optimizations will clean it up.
1719
1720 unsigned NumIncoming = getNumIncomingValues();
1721
1722 // Generate a sequence of selects of the form:
1723 // SELECT(Mask3, In3,
1724 // SELECT(Mask2, In2,
1725 // SELECT(Mask1, In1,
1726 // In0)))
1727 // Note that Mask0 is never used: lanes for which no path reaches this phi and
1728 // are essentially undef are taken from In0.
1729 VectorParts Entry(State.UF);
1730 bool OnlyFirstLaneUsed = vputils::onlyFirstLaneUsed(this);
1731 for (unsigned In = 0; In < NumIncoming; ++In) {
1732 for (unsigned Part = 0; Part < State.UF; ++Part) {
1733 // We might have single edge PHIs (blocks) - use an identity
1734 // 'select' for the first PHI operand.
1735 Value *In0 = State.get(getIncomingValue(In), Part, OnlyFirstLaneUsed);
1736 if (In == 0)
1737 Entry[Part] = In0; // Initialize with the first incoming value.
1738 else {
1739 // Select between the current value and the previous incoming edge
1740 // based on the incoming mask.
1741 Value *Cond = State.get(getMask(In), Part, OnlyFirstLaneUsed);
1742 Entry[Part] =
1743 State.Builder.CreateSelect(Cond, In0, Entry[Part], "predphi");
1744 }
1745 }
1746 }
1747
1748 for (unsigned Part = 0; Part < State.UF; ++Part)
1749 State.set(this, Entry[Part], Part, OnlyFirstLaneUsed);
1750}
1751
1752#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1754 VPSlotTracker &SlotTracker) const {
1755 O << Indent << "BLEND ";
1757 O << " =";
1758 if (getNumIncomingValues() == 1) {
1759 // Not a User of any mask: not really blending, this is a
1760 // single-predecessor phi.
1761 O << " ";
1763 } else {
1764 for (unsigned I = 0, E = getNumIncomingValues(); I < E; ++I) {
1765 O << " ";
1767 if (I == 0)
1768 continue;
1769 O << "/";
1771 }
1772 }
1773}
1774#endif
1775
1777 assert(!State.Instance && "Reduction being replicated.");
1778 Value *PrevInChain = State.get(getChainOp(), 0, /*IsScalar*/ true);
1779 RecurKind Kind = RdxDesc.getRecurrenceKind();
1780 // Propagate the fast-math flags carried by the underlying instruction.
1782 State.Builder.setFastMathFlags(RdxDesc.getFastMathFlags());
1783 for (unsigned Part = 0; Part < State.UF; ++Part) {
1784 Value *NewVecOp = State.get(getVecOp(), Part);
1785 if (VPValue *Cond = getCondOp()) {
1786 Value *NewCond = State.get(Cond, Part, State.VF.isScalar());
1787 VectorType *VecTy = dyn_cast<VectorType>(NewVecOp->getType());
1788 Type *ElementTy = VecTy ? VecTy->getElementType() : NewVecOp->getType();
1789 Value *Iden = RdxDesc.getRecurrenceIdentity(Kind, ElementTy,
1790 RdxDesc.getFastMathFlags());
1791 if (State.VF.isVector()) {
1792 Iden = State.Builder.CreateVectorSplat(VecTy->getElementCount(), Iden);
1793 }
1794
1795 Value *Select = State.Builder.CreateSelect(NewCond, NewVecOp, Iden);
1796 NewVecOp = Select;
1797 }
1798 Value *NewRed;
1799 Value *NextInChain;
1800 if (IsOrdered) {
1801 if (State.VF.isVector())
1802 NewRed = createOrderedReduction(State.Builder, RdxDesc, NewVecOp,
1803 PrevInChain);
1804 else
1805 NewRed = State.Builder.CreateBinOp(
1806 (Instruction::BinaryOps)RdxDesc.getOpcode(Kind), PrevInChain,
1807 NewVecOp);
1808 PrevInChain = NewRed;
1809 } else {
1810 PrevInChain = State.get(getChainOp(), Part, /*IsScalar*/ true);
1811 NewRed = createTargetReduction(State.Builder, RdxDesc, NewVecOp);
1812 }
1814 NextInChain = createMinMaxOp(State.Builder, RdxDesc.getRecurrenceKind(),
1815 NewRed, PrevInChain);
1816 } else if (IsOrdered)
1817 NextInChain = NewRed;
1818 else
1819 NextInChain = State.Builder.CreateBinOp(
1820 (Instruction::BinaryOps)RdxDesc.getOpcode(Kind), NewRed, PrevInChain);
1821 State.set(this, NextInChain, Part, /*IsScalar*/ true);
1822 }
1823}
1824
1826 assert(!State.Instance && "Reduction being replicated.");
1827 assert(State.UF == 1 &&
1828 "Expected only UF == 1 when vectorizing with explicit vector length.");
1829
1830 auto &Builder = State.Builder;
1831 // Propagate the fast-math flags carried by the underlying instruction.
1832 IRBuilderBase::FastMathFlagGuard FMFGuard(Builder);
1834 Builder.setFastMathFlags(RdxDesc.getFastMathFlags());
1835
1836 RecurKind Kind = RdxDesc.getRecurrenceKind();
1837 Value *Prev = State.get(getChainOp(), 0, /*IsScalar*/ true);
1838 Value *VecOp = State.get(getVecOp(), 0);
1839 Value *EVL = State.get(getEVL(), VPIteration(0, 0));
1840
1841 VectorBuilder VBuilder(Builder);
1842 VBuilder.setEVL(EVL);
1843 Value *Mask;
1844 // TODO: move the all-true mask generation into VectorBuilder.
1845 if (VPValue *CondOp = getCondOp())
1846 Mask = State.get(CondOp, 0);
1847 else
1848 Mask = Builder.CreateVectorSplat(State.VF, Builder.getTrue());
1849 VBuilder.setMask(Mask);
1850
1851 Value *NewRed;
1852 if (isOrdered()) {
1853 NewRed = createOrderedReduction(VBuilder, RdxDesc, VecOp, Prev);
1854 } else {
1855 NewRed = createSimpleTargetReduction(VBuilder, VecOp, RdxDesc);
1857 NewRed = createMinMaxOp(Builder, Kind, NewRed, Prev);
1858 else
1859 NewRed = Builder.CreateBinOp(
1860 (Instruction::BinaryOps)RdxDesc.getOpcode(Kind), NewRed, Prev);
1861 }
1862 State.set(this, NewRed, 0, /*IsScalar*/ true);
1863}
1864
1865#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1867 VPSlotTracker &SlotTracker) const {
1868 O << Indent << "REDUCE ";
1870 O << " = ";
1872 O << " +";
1873 if (isa<FPMathOperator>(getUnderlyingInstr()))
1875 O << " reduce." << Instruction::getOpcodeName(RdxDesc.getOpcode()) << " (";
1877 if (isConditional()) {
1878 O << ", ";
1880 }
1881 O << ")";
1882 if (RdxDesc.IntermediateStore)
1883 O << " (with final reduction value stored in invariant address sank "
1884 "outside of loop)";
1885}
1886
1888 VPSlotTracker &SlotTracker) const {
1890 O << Indent << "REDUCE ";
1892 O << " = ";
1894 O << " +";
1895 if (isa<FPMathOperator>(getUnderlyingInstr()))
1897 O << " vp.reduce." << Instruction::getOpcodeName(RdxDesc.getOpcode()) << " (";
1899 O << ", ";
1901 if (isConditional()) {
1902 O << ", ";
1904 }
1905 O << ")";
1906 if (RdxDesc.IntermediateStore)
1907 O << " (with final reduction value stored in invariant address sank "
1908 "outside of loop)";
1909}
1910#endif
1911
1913 // Find if the recipe is used by a widened recipe via an intervening
1914 // VPPredInstPHIRecipe. In this case, also pack the scalar values in a vector.
1915 return any_of(users(), [](const VPUser *U) {
1916 if (auto *PredR = dyn_cast<VPPredInstPHIRecipe>(U))
1917 return any_of(PredR->users(), [PredR](const VPUser *U) {
1918 return !U->usesScalars(PredR);
1919 });
1920 return false;
1921 });
1922}
1923
1924#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1926 VPSlotTracker &SlotTracker) const {
1927 O << Indent << (IsUniform ? "CLONE " : "REPLICATE ");
1928
1929 if (!getUnderlyingInstr()->getType()->isVoidTy()) {
1931 O << " = ";
1932 }
1933 if (auto *CB = dyn_cast<CallBase>(getUnderlyingInstr())) {
1934 O << "call";
1935 printFlags(O);
1936 O << "@" << CB->getCalledFunction()->getName() << "(";
1938 O, [&O, &SlotTracker](VPValue *Op) {
1939 Op->printAsOperand(O, SlotTracker);
1940 });
1941 O << ")";
1942 } else {
1944 printFlags(O);
1946 }
1947
1948 if (shouldPack())
1949 O << " (S->V)";
1950}
1951#endif
1952
1953/// Checks if \p C is uniform across all VFs and UFs. It is considered as such
1954/// if it is either defined outside the vector region or its operand is known to
1955/// be uniform across all VFs and UFs (e.g. VPDerivedIV or VPCanonicalIVPHI).
1956/// TODO: Uniformity should be associated with a VPValue and there should be a
1957/// generic way to check.
1959 return C->isDefinedOutsideVectorRegions() ||
1960 isa<VPDerivedIVRecipe>(C->getOperand(0)) ||
1961 isa<VPCanonicalIVPHIRecipe>(C->getOperand(0));
1962}
1963
1964Value *VPScalarCastRecipe ::generate(VPTransformState &State, unsigned Part) {
1966 "Codegen only implemented for first lane.");
1967 switch (Opcode) {
1968 case Instruction::SExt:
1969 case Instruction::ZExt:
1970 case Instruction::Trunc: {
1971 // Note: SExt/ZExt not used yet.
1972 Value *Op = State.get(getOperand(0), VPIteration(Part, 0));
1973 return State.Builder.CreateCast(Instruction::CastOps(Opcode), Op, ResultTy);
1974 }
1975 default:
1976 llvm_unreachable("opcode not implemented yet");
1977 }
1978}
1979
1980void VPScalarCastRecipe ::execute(VPTransformState &State) {
1981 bool IsUniformAcrossVFsAndUFs = isUniformAcrossVFsAndUFs(this);
1982 for (unsigned Part = 0; Part != State.UF; ++Part) {
1983 Value *Res;
1984 // Only generate a single instance, if the recipe is uniform across UFs and
1985 // VFs.
1986 if (Part > 0 && IsUniformAcrossVFsAndUFs)
1987 Res = State.get(this, VPIteration(0, 0));
1988 else
1989 Res = generate(State, Part);
1990 State.set(this, Res, VPIteration(Part, 0));
1991 }
1992}
1993
1994#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1995void VPScalarCastRecipe ::print(raw_ostream &O, const Twine &Indent,
1996 VPSlotTracker &SlotTracker) const {
1997 O << Indent << "SCALAR-CAST ";
1998 printAsOperand(O, SlotTracker);
1999 O << " = " << Instruction::getOpcodeName(Opcode) << " ";
2000 printOperands(O, SlotTracker);
2001 O << " to " << *ResultTy;
2002}
2003#endif
2004
2006 assert(State.Instance && "Branch on Mask works only on single instance.");
2007
2008 unsigned Part = State.Instance->Part;
2009 unsigned Lane = State.Instance->Lane.getKnownLane();
2010
2011 Value *ConditionBit = nullptr;
2012 VPValue *BlockInMask = getMask();
2013 if (BlockInMask) {
2014 ConditionBit = State.get(BlockInMask, Part);
2015 if (ConditionBit->getType()->isVectorTy())
2016 ConditionBit = State.Builder.CreateExtractElement(
2017 ConditionBit, State.Builder.getInt32(Lane));
2018 } else // Block in mask is all-one.
2019 ConditionBit = State.Builder.getTrue();
2020
2021 // Replace the temporary unreachable terminator with a new conditional branch,
2022 // whose two destinations will be set later when they are created.
2023 auto *CurrentTerminator = State.CFG.PrevBB->getTerminator();
2024 assert(isa<UnreachableInst>(CurrentTerminator) &&
2025 "Expected to replace unreachable terminator with conditional branch.");
2026 auto *CondBr = BranchInst::Create(State.CFG.PrevBB, nullptr, ConditionBit);
2027 CondBr->setSuccessor(0, nullptr);
2028 ReplaceInstWithInst(CurrentTerminator, CondBr);
2029}
2030
2032 assert(State.Instance && "Predicated instruction PHI works per instance.");
2033 Instruction *ScalarPredInst =
2034 cast<Instruction>(State.get(getOperand(0), *State.Instance));
2035 BasicBlock *PredicatedBB = ScalarPredInst->getParent();
2036 BasicBlock *PredicatingBB = PredicatedBB->getSinglePredecessor();
2037 assert(PredicatingBB && "Predicated block has no single predecessor.");
2038 assert(isa<VPReplicateRecipe>(getOperand(0)) &&
2039 "operand must be VPReplicateRecipe");
2040
2041 // By current pack/unpack logic we need to generate only a single phi node: if
2042 // a vector value for the predicated instruction exists at this point it means
2043 // the instruction has vector users only, and a phi for the vector value is
2044 // needed. In this case the recipe of the predicated instruction is marked to
2045 // also do that packing, thereby "hoisting" the insert-element sequence.
2046 // Otherwise, a phi node for the scalar value is needed.
2047 unsigned Part = State.Instance->Part;
2048 if (State.hasVectorValue(getOperand(0), Part)) {
2049 Value *VectorValue = State.get(getOperand(0), Part);
2050 InsertElementInst *IEI = cast<InsertElementInst>(VectorValue);
2051 PHINode *VPhi = State.Builder.CreatePHI(IEI->getType(), 2);
2052 VPhi->addIncoming(IEI->getOperand(0), PredicatingBB); // Unmodified vector.
2053 VPhi->addIncoming(IEI, PredicatedBB); // New vector with inserted element.
2054 if (State.hasVectorValue(this, Part))
2055 State.reset(this, VPhi, Part);
2056 else
2057 State.set(this, VPhi, Part);
2058 // NOTE: Currently we need to update the value of the operand, so the next
2059 // predicated iteration inserts its generated value in the correct vector.
2060 State.reset(getOperand(0), VPhi, Part);
2061 } else {
2062 Type *PredInstType = getOperand(0)->getUnderlyingValue()->getType();
2063 PHINode *Phi = State.Builder.CreatePHI(PredInstType, 2);
2064 Phi->addIncoming(PoisonValue::get(ScalarPredInst->getType()),
2065 PredicatingBB);
2066 Phi->addIncoming(ScalarPredInst, PredicatedBB);
2067 if (State.hasScalarValue(this, *State.Instance))
2068 State.reset(this, Phi, *State.Instance);
2069 else
2070 State.set(this, Phi, *State.Instance);
2071 // NOTE: Currently we need to update the value of the operand, so the next
2072 // predicated iteration inserts its generated value in the correct vector.
2073 State.reset(getOperand(0), Phi, *State.Instance);
2074 }
2075}
2076
2077#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2079 VPSlotTracker &SlotTracker) const {
2080 O << Indent << "PHI-PREDICATED-INSTRUCTION ";
2082 O << " = ";
2084}
2085#endif
2086
2088 auto *LI = cast<LoadInst>(&Ingredient);
2089
2090 Type *ScalarDataTy = getLoadStoreType(&Ingredient);
2091 auto *DataTy = VectorType::get(ScalarDataTy, State.VF);
2092 const Align Alignment = getLoadStoreAlignment(&Ingredient);
2093 bool CreateGather = !isConsecutive();
2094
2095 auto &Builder = State.Builder;
2097 for (unsigned Part = 0; Part < State.UF; ++Part) {
2098 Value *NewLI;
2099 Value *Mask = nullptr;
2100 if (auto *VPMask = getMask()) {
2101 // Mask reversal is only needed for non-all-one (null) masks, as reverse
2102 // of a null all-one mask is a null mask.
2103 Mask = State.get(VPMask, Part);
2104 if (isReverse())
2105 Mask = Builder.CreateVectorReverse(Mask, "reverse");
2106 }
2107
2108 Value *Addr = State.get(getAddr(), Part, /*IsScalar*/ !CreateGather);
2109 if (CreateGather) {
2110 NewLI = Builder.CreateMaskedGather(DataTy, Addr, Alignment, Mask, nullptr,
2111 "wide.masked.gather");
2112 } else if (Mask) {
2113 NewLI = Builder.CreateMaskedLoad(DataTy, Addr, Alignment, Mask,
2114 PoisonValue::get(DataTy),
2115 "wide.masked.load");
2116 } else {
2117 NewLI = Builder.CreateAlignedLoad(DataTy, Addr, Alignment, "wide.load");
2118 }
2119 // Add metadata to the load, but setVectorValue to the reverse shuffle.
2120 State.addMetadata(NewLI, LI);
2121 if (Reverse)
2122 NewLI = Builder.CreateVectorReverse(NewLI, "reverse");
2123 State.set(this, NewLI, Part);
2124 }
2125}
2126
2127#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2129 VPSlotTracker &SlotTracker) const {
2130 O << Indent << "WIDEN ";
2132 O << " = load ";
2134}
2135
2137 VPSlotTracker &SlotTracker) const {
2138 O << Indent << "WIDEN ";
2140 O << " = vp.load ";
2142}
2143#endif
2144
2146 auto *SI = cast<StoreInst>(&Ingredient);
2147
2148 VPValue *StoredVPValue = getStoredValue();
2149 bool CreateScatter = !isConsecutive();
2150 const Align Alignment = getLoadStoreAlignment(&Ingredient);
2151
2152 auto &Builder = State.Builder;
2154
2155 for (unsigned Part = 0; Part < State.UF; ++Part) {
2156 Instruction *NewSI = nullptr;
2157 Value *Mask = nullptr;
2158 if (auto *VPMask = getMask()) {
2159 // Mask reversal is only needed for non-all-one (null) masks, as reverse
2160 // of a null all-one mask is a null mask.
2161 Mask = State.get(VPMask, Part);
2162 if (isReverse())
2163 Mask = Builder.CreateVectorReverse(Mask, "reverse");
2164 }
2165
2166 Value *StoredVal = State.get(StoredVPValue, Part);
2167 if (isReverse()) {
2168 // If we store to reverse consecutive memory locations, then we need
2169 // to reverse the order of elements in the stored value.
2170 StoredVal = Builder.CreateVectorReverse(StoredVal, "reverse");
2171 // We don't want to update the value in the map as it might be used in
2172 // another expression. So don't call resetVectorValue(StoredVal).
2173 }
2174 Value *Addr = State.get(getAddr(), Part, /*IsScalar*/ !CreateScatter);
2175 if (CreateScatter)
2176 NewSI = Builder.CreateMaskedScatter(StoredVal, Addr, Alignment, Mask);
2177 else if (Mask)
2178 NewSI = Builder.CreateMaskedStore(StoredVal, Addr, Alignment, Mask);
2179 else
2180 NewSI = Builder.CreateAlignedStore(StoredVal, Addr, Alignment);
2181 State.addMetadata(NewSI, SI);
2182 }
2183}
2184
2185#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2187 VPSlotTracker &SlotTracker) const {
2188 O << Indent << "WIDEN store ";
2190}
2191
2193 VPSlotTracker &SlotTracker) const {
2194 O << Indent << "WIDEN vp.store ";
2196}
2197#endif
2198
2200 VectorType *DstVTy, const DataLayout &DL) {
2201 // Verify that V is a vector type with same number of elements as DstVTy.
2202 auto VF = DstVTy->getElementCount();
2203 auto *SrcVecTy = cast<VectorType>(V->getType());
2204 assert(VF == SrcVecTy->getElementCount() && "Vector dimensions do not match");
2205 Type *SrcElemTy = SrcVecTy->getElementType();
2206 Type *DstElemTy = DstVTy->getElementType();
2207 assert((DL.getTypeSizeInBits(SrcElemTy) == DL.getTypeSizeInBits(DstElemTy)) &&
2208 "Vector elements must have same size");
2209
2210 // Do a direct cast if element types are castable.
2211 if (CastInst::isBitOrNoopPointerCastable(SrcElemTy, DstElemTy, DL)) {
2212 return Builder.CreateBitOrPointerCast(V, DstVTy);
2213 }
2214 // V cannot be directly casted to desired vector type.
2215 // May happen when V is a floating point vector but DstVTy is a vector of
2216 // pointers or vice-versa. Handle this using a two-step bitcast using an
2217 // intermediate Integer type for the bitcast i.e. Ptr <-> Int <-> Float.
2218 assert((DstElemTy->isPointerTy() != SrcElemTy->isPointerTy()) &&
2219 "Only one type should be a pointer type");
2220 assert((DstElemTy->isFloatingPointTy() != SrcElemTy->isFloatingPointTy()) &&
2221 "Only one type should be a floating point type");
2222 Type *IntTy =
2223 IntegerType::getIntNTy(V->getContext(), DL.getTypeSizeInBits(SrcElemTy));
2224 auto *VecIntTy = VectorType::get(IntTy, VF);
2225 Value *CastVal = Builder.CreateBitOrPointerCast(V, VecIntTy);
2226 return Builder.CreateBitOrPointerCast(CastVal, DstVTy);
2227}
2228
2229/// Return a vector containing interleaved elements from multiple
2230/// smaller input vectors.
2232 const Twine &Name) {
2233 unsigned Factor = Vals.size();
2234 assert(Factor > 1 && "Tried to interleave invalid number of vectors");
2235
2236 VectorType *VecTy = cast<VectorType>(Vals[0]->getType());
2237#ifndef NDEBUG
2238 for (Value *Val : Vals)
2239 assert(Val->getType() == VecTy && "Tried to interleave mismatched types");
2240#endif
2241
2242 // Scalable vectors cannot use arbitrary shufflevectors (only splats), so
2243 // must use intrinsics to interleave.
2244 if (VecTy->isScalableTy()) {
2246 return Builder.CreateIntrinsic(WideVecTy, Intrinsic::vector_interleave2,
2247 Vals,
2248 /*FMFSource=*/nullptr, Name);
2249 }
2250
2251 // Fixed length. Start by concatenating all vectors into a wide vector.
2252 Value *WideVec = concatenateVectors(Builder, Vals);
2253
2254 // Interleave the elements into the wide vector.
2255 const unsigned NumElts = VecTy->getElementCount().getFixedValue();
2256 return Builder.CreateShuffleVector(
2257 WideVec, createInterleaveMask(NumElts, Factor), Name);
2258}
2259
2260// Try to vectorize the interleave group that \p Instr belongs to.
2261//
2262// E.g. Translate following interleaved load group (factor = 3):
2263// for (i = 0; i < N; i+=3) {
2264// R = Pic[i]; // Member of index 0
2265// G = Pic[i+1]; // Member of index 1
2266// B = Pic[i+2]; // Member of index 2
2267// ... // do something to R, G, B
2268// }
2269// To:
2270// %wide.vec = load <12 x i32> ; Read 4 tuples of R,G,B
2271// %R.vec = shuffle %wide.vec, poison, <0, 3, 6, 9> ; R elements
2272// %G.vec = shuffle %wide.vec, poison, <1, 4, 7, 10> ; G elements
2273// %B.vec = shuffle %wide.vec, poison, <2, 5, 8, 11> ; B elements
2274//
2275// Or translate following interleaved store group (factor = 3):
2276// for (i = 0; i < N; i+=3) {
2277// ... do something to R, G, B
2278// Pic[i] = R; // Member of index 0
2279// Pic[i+1] = G; // Member of index 1
2280// Pic[i+2] = B; // Member of index 2
2281// }
2282// To:
2283// %R_G.vec = shuffle %R.vec, %G.vec, <0, 1, 2, ..., 7>
2284// %B_U.vec = shuffle %B.vec, poison, <0, 1, 2, 3, u, u, u, u>
2285// %interleaved.vec = shuffle %R_G.vec, %B_U.vec,
2286// <0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11> ; Interleave R,G,B elements
2287// store <12 x i32> %interleaved.vec ; Write 4 tuples of R,G,B
2289 assert(!State.Instance && "Interleave group being replicated.");
2290 const InterleaveGroup<Instruction> *Group = IG;
2291 Instruction *Instr = Group->getInsertPos();
2292
2293 // Prepare for the vector type of the interleaved load/store.
2294 Type *ScalarTy = getLoadStoreType(Instr);
2295 unsigned InterleaveFactor = Group->getFactor();
2296 auto *VecTy = VectorType::get(ScalarTy, State.VF * InterleaveFactor);
2297
2298 // Prepare for the new pointers.
2299 SmallVector<Value *, 2> AddrParts;
2300 unsigned Index = Group->getIndex(Instr);
2301
2302 // TODO: extend the masked interleaved-group support to reversed access.
2303 VPValue *BlockInMask = getMask();
2304 assert((!BlockInMask || !Group->isReverse()) &&
2305 "Reversed masked interleave-group not supported.");
2306
2307 Value *Idx;
2308 // If the group is reverse, adjust the index to refer to the last vector lane
2309 // instead of the first. We adjust the index from the first vector lane,
2310 // rather than directly getting the pointer for lane VF - 1, because the
2311 // pointer operand of the interleaved access is supposed to be uniform. For
2312 // uniform instructions, we're only required to generate a value for the
2313 // first vector lane in each unroll iteration.
2314 if (Group->isReverse()) {
2315 Value *RuntimeVF =
2316 getRuntimeVF(State.Builder, State.Builder.getInt32Ty(), State.VF);
2317 Idx = State.Builder.CreateSub(RuntimeVF, State.Builder.getInt32(1));
2318 Idx = State.Builder.CreateMul(Idx,
2319 State.Builder.getInt32(Group->getFactor()));
2320 Idx = State.Builder.CreateAdd(Idx, State.Builder.getInt32(Index));
2321 Idx = State.Builder.CreateNeg(Idx);
2322 } else
2323 Idx = State.Builder.getInt32(-Index);
2324
2325 VPValue *Addr = getAddr();
2326 for (unsigned Part = 0; Part < State.UF; Part++) {
2327 Value *AddrPart = State.get(Addr, VPIteration(Part, 0));
2328 if (auto *I = dyn_cast<Instruction>(AddrPart))
2329 State.setDebugLocFrom(I->getDebugLoc());
2330
2331 // Notice current instruction could be any index. Need to adjust the address
2332 // to the member of index 0.
2333 //
2334 // E.g. a = A[i+1]; // Member of index 1 (Current instruction)
2335 // b = A[i]; // Member of index 0
2336 // Current pointer is pointed to A[i+1], adjust it to A[i].
2337 //
2338 // E.g. A[i+1] = a; // Member of index 1
2339 // A[i] = b; // Member of index 0
2340 // A[i+2] = c; // Member of index 2 (Current instruction)
2341 // Current pointer is pointed to A[i+2], adjust it to A[i].
2342
2343 bool InBounds = false;
2344 if (auto *gep = dyn_cast<GetElementPtrInst>(AddrPart->stripPointerCasts()))
2345 InBounds = gep->isInBounds();
2346 AddrPart = State.Builder.CreateGEP(ScalarTy, AddrPart, Idx, "", InBounds);
2347 AddrParts.push_back(AddrPart);
2348 }
2349
2350 State.setDebugLocFrom(Instr->getDebugLoc());
2351 Value *PoisonVec = PoisonValue::get(VecTy);
2352
2353 auto CreateGroupMask = [&BlockInMask, &State, &InterleaveFactor](
2354 unsigned Part, Value *MaskForGaps) -> Value * {
2355 if (State.VF.isScalable()) {
2356 assert(!MaskForGaps && "Interleaved groups with gaps are not supported.");
2357 assert(InterleaveFactor == 2 &&
2358 "Unsupported deinterleave factor for scalable vectors");
2359 auto *BlockInMaskPart = State.get(BlockInMask, Part);
2360 SmallVector<Value *, 2> Ops = {BlockInMaskPart, BlockInMaskPart};
2361 auto *MaskTy = VectorType::get(State.Builder.getInt1Ty(),
2362 State.VF.getKnownMinValue() * 2, true);
2363 return State.Builder.CreateIntrinsic(
2364 MaskTy, Intrinsic::vector_interleave2, Ops,
2365 /*FMFSource=*/nullptr, "interleaved.mask");
2366 }
2367
2368 if (!BlockInMask)
2369 return MaskForGaps;
2370
2371 Value *BlockInMaskPart = State.get(BlockInMask, Part);
2372 Value *ShuffledMask = State.Builder.CreateShuffleVector(
2373 BlockInMaskPart,
2374 createReplicatedMask(InterleaveFactor, State.VF.getKnownMinValue()),
2375 "interleaved.mask");
2376 return MaskForGaps ? State.Builder.CreateBinOp(Instruction::And,
2377 ShuffledMask, MaskForGaps)
2378 : ShuffledMask;
2379 };
2380
2381 const DataLayout &DL = Instr->getDataLayout();
2382 // Vectorize the interleaved load group.
2383 if (isa<LoadInst>(Instr)) {
2384 Value *MaskForGaps = nullptr;
2385 if (NeedsMaskForGaps) {
2386 MaskForGaps = createBitMaskForGaps(State.Builder,
2387 State.VF.getKnownMinValue(), *Group);
2388 assert(MaskForGaps && "Mask for Gaps is required but it is null");
2389 }
2390
2391 // For each unroll part, create a wide load for the group.
2392 SmallVector<Value *, 2> NewLoads;
2393 for (unsigned Part = 0; Part < State.UF; Part++) {
2394 Instruction *NewLoad;
2395 if (BlockInMask || MaskForGaps) {
2396 Value *GroupMask = CreateGroupMask(Part, MaskForGaps);
2397 NewLoad = State.Builder.CreateMaskedLoad(VecTy, AddrParts[Part],
2398 Group->getAlign(), GroupMask,
2399 PoisonVec, "wide.masked.vec");
2400 } else
2401 NewLoad = State.Builder.CreateAlignedLoad(
2402 VecTy, AddrParts[Part], Group->getAlign(), "wide.vec");
2403 Group->addMetadata(NewLoad);
2404 NewLoads.push_back(NewLoad);
2405 }
2406
2408 const DataLayout &DL = State.CFG.PrevBB->getDataLayout();
2409 if (VecTy->isScalableTy()) {
2410 assert(InterleaveFactor == 2 &&
2411 "Unsupported deinterleave factor for scalable vectors");
2412
2413 for (unsigned Part = 0; Part < State.UF; ++Part) {
2414 // Scalable vectors cannot use arbitrary shufflevectors (only splats),
2415 // so must use intrinsics to deinterleave.
2416 Value *DI = State.Builder.CreateIntrinsic(
2417 Intrinsic::vector_deinterleave2, VecTy, NewLoads[Part],
2418 /*FMFSource=*/nullptr, "strided.vec");
2419 unsigned J = 0;
2420 for (unsigned I = 0; I < InterleaveFactor; ++I) {
2421 Instruction *Member = Group->getMember(I);
2422
2423 if (!Member)
2424 continue;
2425
2426 Value *StridedVec = State.Builder.CreateExtractValue(DI, I);
2427 // If this member has different type, cast the result type.
2428 if (Member->getType() != ScalarTy) {
2429 VectorType *OtherVTy = VectorType::get(Member->getType(), State.VF);
2430 StridedVec =
2431 createBitOrPointerCast(State.Builder, StridedVec, OtherVTy, DL);
2432 }
2433
2434 if (Group->isReverse())
2435 StridedVec =
2436 State.Builder.CreateVectorReverse(StridedVec, "reverse");
2437
2438 State.set(VPDefs[J], StridedVec, Part);
2439 ++J;
2440 }
2441 }
2442
2443 return;
2444 }
2445
2446 // For each member in the group, shuffle out the appropriate data from the
2447 // wide loads.
2448 unsigned J = 0;
2449 for (unsigned I = 0; I < InterleaveFactor; ++I) {
2450 Instruction *Member = Group->getMember(I);
2451
2452 // Skip the gaps in the group.
2453 if (!Member)
2454 continue;
2455
2456 auto StrideMask =
2457 createStrideMask(I, InterleaveFactor, State.VF.getKnownMinValue());
2458 for (unsigned Part = 0; Part < State.UF; Part++) {
2459 Value *StridedVec = State.Builder.CreateShuffleVector(
2460 NewLoads[Part], StrideMask, "strided.vec");
2461
2462 // If this member has different type, cast the result type.
2463 if (Member->getType() != ScalarTy) {
2464 assert(!State.VF.isScalable() && "VF is assumed to be non scalable.");
2465 VectorType *OtherVTy = VectorType::get(Member->getType(), State.VF);
2466 StridedVec =
2467 createBitOrPointerCast(State.Builder, StridedVec, OtherVTy, DL);
2468 }
2469
2470 if (Group->isReverse())
2471 StridedVec = State.Builder.CreateVectorReverse(StridedVec, "reverse");
2472
2473 State.set(VPDefs[J], StridedVec, Part);
2474 }
2475 ++J;
2476 }
2477 return;
2478 }
2479
2480 // The sub vector type for current instruction.
2481 auto *SubVT = VectorType::get(ScalarTy, State.VF);
2482
2483 // Vectorize the interleaved store group.
2484 Value *MaskForGaps =
2485 createBitMaskForGaps(State.Builder, State.VF.getKnownMinValue(), *Group);
2486 assert((!MaskForGaps || !State.VF.isScalable()) &&
2487 "masking gaps for scalable vectors is not yet supported.");
2488 ArrayRef<VPValue *> StoredValues = getStoredValues();
2489 for (unsigned Part = 0; Part < State.UF; Part++) {
2490 // Collect the stored vector from each member.
2491 SmallVector<Value *, 4> StoredVecs;
2492 unsigned StoredIdx = 0;
2493 for (unsigned i = 0; i < InterleaveFactor; i++) {
2494 assert((Group->getMember(i) || MaskForGaps) &&
2495 "Fail to get a member from an interleaved store group");
2496 Instruction *Member = Group->getMember(i);
2497
2498 // Skip the gaps in the group.
2499 if (!Member) {
2500 Value *Undef = PoisonValue::get(SubVT);
2501 StoredVecs.push_back(Undef);
2502 continue;
2503 }
2504
2505 Value *StoredVec = State.get(StoredValues[StoredIdx], Part);
2506 ++StoredIdx;
2507
2508 if (Group->isReverse())
2509 StoredVec = State.Builder.CreateVectorReverse(StoredVec, "reverse");
2510
2511 // If this member has different type, cast it to a unified type.
2512
2513 if (StoredVec->getType() != SubVT)
2514 StoredVec = createBitOrPointerCast(State.Builder, StoredVec, SubVT, DL);
2515
2516 StoredVecs.push_back(StoredVec);
2517 }
2518
2519 // Interleave all the smaller vectors into one wider vector.
2520 Value *IVec =
2521 interleaveVectors(State.Builder, StoredVecs, "interleaved.vec");
2522 Instruction *NewStoreInstr;
2523 if (BlockInMask || MaskForGaps) {
2524 Value *GroupMask = CreateGroupMask(Part, MaskForGaps);
2525 NewStoreInstr = State.Builder.CreateMaskedStore(
2526 IVec, AddrParts[Part], Group->getAlign(), GroupMask);
2527 } else
2528 NewStoreInstr = State.Builder.CreateAlignedStore(IVec, AddrParts[Part],
2529 Group->getAlign());
2530
2531 Group->addMetadata(NewStoreInstr);
2532 }
2533}
2534
2535#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2537 VPSlotTracker &SlotTracker) const {
2538 O << Indent << "INTERLEAVE-GROUP with factor " << IG->getFactor() << " at ";
2539 IG->getInsertPos()->printAsOperand(O, false);
2540 O << ", ";
2542 VPValue *Mask = getMask();
2543 if (Mask) {
2544 O << ", ";
2545 Mask->printAsOperand(O, SlotTracker);
2546 }
2547
2548 unsigned OpIdx = 0;
2549 for (unsigned i = 0; i < IG->getFactor(); ++i) {
2550 if (!IG->getMember(i))
2551 continue;
2552 if (getNumStoreOperands() > 0) {
2553 O << "\n" << Indent << " store ";
2554 getOperand(1 + OpIdx)->printAsOperand(O, SlotTracker);
2555 O << " to index " << i;
2556 } else {
2557 O << "\n" << Indent << " ";
2559 O << " = load from index " << i;
2560 }
2561 ++OpIdx;
2562 }
2563}
2564#endif
2565
2567 Value *Start = getStartValue()->getLiveInIRValue();
2568 PHINode *EntryPart = PHINode::Create(Start->getType(), 2, "index");
2569 EntryPart->insertBefore(State.CFG.PrevBB->getFirstInsertionPt());
2570
2571 BasicBlock *VectorPH = State.CFG.getPreheaderBBFor(this);
2572 EntryPart->addIncoming(Start, VectorPH);
2573 EntryPart->setDebugLoc(getDebugLoc());
2574 for (unsigned Part = 0, UF = State.UF; Part < UF; ++Part)
2575 State.set(this, EntryPart, Part, /*IsScalar*/ true);
2576}
2577
2578#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2580 VPSlotTracker &SlotTracker) const {
2581 O << Indent << "EMIT ";
2583 O << " = CANONICAL-INDUCTION ";
2585}
2586#endif
2587
2590 VPValue *Step) const {
2591 // Must be an integer induction.
2593 return false;
2594 // Start must match the start value of this canonical induction.
2595 if (Start != getStartValue())
2596 return false;
2597
2598 // If the step is defined by a recipe, it is not a ConstantInt.
2599 if (Step->getDefiningRecipe())
2600 return false;
2601
2602 ConstantInt *StepC = dyn_cast<ConstantInt>(Step->getLiveInIRValue());
2603 return StepC && StepC->isOne();
2604}
2605
2607 return IsScalarAfterVectorization &&
2608 (!IsScalable || vputils::onlyFirstLaneUsed(this));
2609}
2610
2613 "Not a pointer induction according to InductionDescriptor!");
2614 assert(cast<PHINode>(getUnderlyingInstr())->getType()->isPointerTy() &&
2615 "Unexpected type.");
2617 "Recipe should have been replaced");
2618
2619 auto *IVR = getParent()->getPlan()->getCanonicalIV();
2620 PHINode *CanonicalIV = cast<PHINode>(State.get(IVR, 0, /*IsScalar*/ true));
2621 Type *PhiType = IndDesc.getStep()->getType();
2622
2623 // Build a pointer phi
2624 Value *ScalarStartValue = getStartValue()->getLiveInIRValue();
2625 Type *ScStValueType = ScalarStartValue->getType();
2626 PHINode *NewPointerPhi = PHINode::Create(ScStValueType, 2, "pointer.phi",
2627 CanonicalIV->getIterator());
2628
2629 BasicBlock *VectorPH = State.CFG.getPreheaderBBFor(this);
2630 NewPointerPhi->addIncoming(ScalarStartValue, VectorPH);
2631
2632 // A pointer induction, performed by using a gep
2633 BasicBlock::iterator InductionLoc = State.Builder.GetInsertPoint();
2634
2635 Value *ScalarStepValue = State.get(getOperand(1), VPIteration(0, 0));
2636 Value *RuntimeVF = getRuntimeVF(State.Builder, PhiType, State.VF);
2637 Value *NumUnrolledElems =
2638 State.Builder.CreateMul(RuntimeVF, ConstantInt::get(PhiType, State.UF));
2639 Value *InductionGEP = GetElementPtrInst::Create(
2640 State.Builder.getInt8Ty(), NewPointerPhi,
2641 State.Builder.CreateMul(ScalarStepValue, NumUnrolledElems), "ptr.ind",
2642 InductionLoc);
2643 // Add induction update using an incorrect block temporarily. The phi node
2644 // will be fixed after VPlan execution. Note that at this point the latch
2645 // block cannot be used, as it does not exist yet.
2646 // TODO: Model increment value in VPlan, by turning the recipe into a
2647 // multi-def and a subclass of VPHeaderPHIRecipe.
2648 NewPointerPhi->addIncoming(InductionGEP, VectorPH);
2649
2650 // Create UF many actual address geps that use the pointer
2651 // phi as base and a vectorized version of the step value
2652 // (<step*0, ..., step*N>) as offset.
2653 for (unsigned Part = 0; Part < State.UF; ++Part) {
2654 Type *VecPhiType = VectorType::get(PhiType, State.VF);
2655 Value *StartOffsetScalar =
2656 State.Builder.CreateMul(RuntimeVF, ConstantInt::get(PhiType, Part));
2657 Value *StartOffset =
2658 State.Builder.CreateVectorSplat(State.VF, StartOffsetScalar);
2659 // Create a vector of consecutive numbers from zero to VF.
2660 StartOffset = State.Builder.CreateAdd(
2661 StartOffset, State.Builder.CreateStepVector(VecPhiType));
2662
2663 assert(ScalarStepValue == State.get(getOperand(1), VPIteration(Part, 0)) &&
2664 "scalar step must be the same across all parts");
2665 Value *GEP = State.Builder.CreateGEP(
2666 State.Builder.getInt8Ty(), NewPointerPhi,
2667 State.Builder.CreateMul(
2668 StartOffset,
2669 State.Builder.CreateVectorSplat(State.VF, ScalarStepValue),
2670 "vector.gep"));
2671 State.set(this, GEP, Part);
2672 }
2673}
2674
2675#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2677 VPSlotTracker &SlotTracker) const {
2678 O << Indent << "EMIT ";
2680 O << " = WIDEN-POINTER-INDUCTION ";
2682 O << ", " << *IndDesc.getStep();
2683}
2684#endif
2685
2687 assert(!State.Instance && "cannot be used in per-lane");
2688 const DataLayout &DL = State.CFG.PrevBB->getDataLayout();
2689 SCEVExpander Exp(SE, DL, "induction");
2690
2691 Value *Res = Exp.expandCodeFor(Expr, Expr->getType(),
2692 &*State.Builder.GetInsertPoint());
2693 assert(!State.ExpandedSCEVs.contains(Expr) &&
2694 "Same SCEV expanded multiple times");
2695 State.ExpandedSCEVs[Expr] = Res;
2696 for (unsigned Part = 0, UF = State.UF; Part < UF; ++Part)
2697 State.set(this, Res, {Part, 0});
2698}
2699
2700#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2702 VPSlotTracker &SlotTracker) const {
2703 O << Indent << "EMIT ";
2705 O << " = EXPAND SCEV " << *Expr;
2706}
2707#endif
2708
2710 Value *CanonicalIV = State.get(getOperand(0), 0, /*IsScalar*/ true);
2711 Type *STy = CanonicalIV->getType();
2712 IRBuilder<> Builder(State.CFG.PrevBB->getTerminator());
2713 ElementCount VF = State.VF;
2714 Value *VStart = VF.isScalar()
2715 ? CanonicalIV
2716 : Builder.CreateVectorSplat(VF, CanonicalIV, "broadcast");
2717 for (unsigned Part = 0, UF = State.UF; Part < UF; ++Part) {
2718 Value *VStep = createStepForVF(Builder, STy, VF, Part);
2719 if (VF.isVector()) {
2720 VStep = Builder.CreateVectorSplat(VF, VStep);
2721 VStep =
2722 Builder.CreateAdd(VStep, Builder.CreateStepVector(VStep->getType()));
2723 }
2724 Value *CanonicalVectorIV = Builder.CreateAdd(VStart, VStep, "vec.iv");
2725 State.set(this, CanonicalVectorIV, Part);
2726 }
2727}
2728
2729#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2731 VPSlotTracker &SlotTracker) const {
2732 O << Indent << "EMIT ";
2734 O << " = WIDEN-CANONICAL-INDUCTION ";
2736}
2737#endif
2738
2740 auto &Builder = State.Builder;
2741 // Create a vector from the initial value.
2742 auto *VectorInit = getStartValue()->getLiveInIRValue();
2743
2744 Type *VecTy = State.VF.isScalar()
2745 ? VectorInit->getType()
2746 : VectorType::get(VectorInit->getType(), State.VF);
2747
2748 BasicBlock *VectorPH = State.CFG.getPreheaderBBFor(this);
2749 if (State.VF.isVector()) {
2750 auto *IdxTy = Builder.getInt32Ty();
2751 auto *One = ConstantInt::get(IdxTy, 1);
2752 IRBuilder<>::InsertPointGuard Guard(Builder);
2753 Builder.SetInsertPoint(VectorPH->getTerminator());
2754 auto *RuntimeVF = getRuntimeVF(Builder, IdxTy, State.VF);
2755 auto *LastIdx = Builder.CreateSub(RuntimeVF, One);
2756 VectorInit = Builder.CreateInsertElement(
2757 PoisonValue::get(VecTy), VectorInit, LastIdx, "vector.recur.init");
2758 }
2759
2760 // Create a phi node for the new recurrence.
2761 PHINode *EntryPart = PHINode::Create(VecTy, 2, "vector.recur");
2762 EntryPart->insertBefore(State.CFG.PrevBB->getFirstInsertionPt());
2763 EntryPart->addIncoming(VectorInit, VectorPH);
2764 State.set(this, EntryPart, 0);
2765}
2766
2767#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2769 VPSlotTracker &SlotTracker) const {
2770 O << Indent << "FIRST-ORDER-RECURRENCE-PHI ";
2772 O << " = phi ";
2774}
2775#endif
2776
2778 auto &Builder = State.Builder;
2779
2780 // Reductions do not have to start at zero. They can start with
2781 // any loop invariant values.
2782 VPValue *StartVPV = getStartValue();
2783 Value *StartV = StartVPV->getLiveInIRValue();
2784
2785 // In order to support recurrences we need to be able to vectorize Phi nodes.
2786 // Phi nodes have cycles, so we need to vectorize them in two stages. This is
2787 // stage #1: We create a new vector PHI node with no incoming edges. We'll use
2788 // this value when we vectorize all of the instructions that use the PHI.
2789 bool ScalarPHI = State.VF.isScalar() || IsInLoop;
2790 Type *VecTy = ScalarPHI ? StartV->getType()
2791 : VectorType::get(StartV->getType(), State.VF);
2792
2793 BasicBlock *HeaderBB = State.CFG.PrevBB;
2794 assert(State.CurrentVectorLoop->getHeader() == HeaderBB &&
2795 "recipe must be in the vector loop header");
2796 unsigned LastPartForNewPhi = isOrdered() ? 1 : State.UF;
2797 for (unsigned Part = 0; Part < LastPartForNewPhi; ++Part) {
2798 Instruction *EntryPart = PHINode::Create(VecTy, 2, "vec.phi");
2799 EntryPart->insertBefore(HeaderBB->getFirstInsertionPt());
2800 State.set(this, EntryPart, Part, IsInLoop);
2801 }
2802
2803 BasicBlock *VectorPH = State.CFG.getPreheaderBBFor(this);
2804
2805 Value *Iden = nullptr;
2806 RecurKind RK = RdxDesc.getRecurrenceKind();
2809 // MinMax and AnyOf reductions have the start value as their identity.
2810 if (ScalarPHI) {
2811 Iden = StartV;
2812 } else {
2813 IRBuilderBase::InsertPointGuard IPBuilder(Builder);
2814 Builder.SetInsertPoint(VectorPH->getTerminator());
2815 StartV = Iden =
2816 Builder.CreateVectorSplat(State.VF, StartV, "minmax.ident");
2817 }
2818 } else {
2819 Iden = RdxDesc.getRecurrenceIdentity(RK, VecTy->getScalarType(),
2820 RdxDesc.getFastMathFlags());
2821
2822 if (!ScalarPHI) {
2823 Iden = Builder.CreateVectorSplat(State.VF, Iden);
2824 IRBuilderBase::InsertPointGuard IPBuilder(Builder);
2825 Builder.SetInsertPoint(VectorPH->getTerminator());
2826 Constant *Zero = Builder.getInt32(0);
2827 StartV = Builder.CreateInsertElement(Iden, StartV, Zero);
2828 }
2829 }
2830
2831 for (unsigned Part = 0; Part < LastPartForNewPhi; ++Part) {
2832 Value *EntryPart = State.get(this, Part, IsInLoop);
2833 // Make sure to add the reduction start value only to the
2834 // first unroll part.
2835 Value *StartVal = (Part == 0) ? StartV : Iden;
2836 cast<PHINode>(EntryPart)->addIncoming(StartVal, VectorPH);
2837 }
2838}
2839
2840#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2842 VPSlotTracker &SlotTracker) const {
2843 O << Indent << "WIDEN-REDUCTION-PHI ";
2844
2846 O << " = phi ";
2848}
2849#endif
2850
2853 "Non-native vplans are not expected to have VPWidenPHIRecipes.");
2854
2855 Value *Op0 = State.get(getOperand(0), 0);
2856 Type *VecTy = Op0->getType();
2857 Value *VecPhi = State.Builder.CreatePHI(VecTy, 2, "vec.phi");
2858 State.set(this, VecPhi, 0);
2859}
2860
2861#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2863 VPSlotTracker &SlotTracker) const {
2864 O << Indent << "WIDEN-PHI ";
2865
2866 auto *OriginalPhi = cast<PHINode>(getUnderlyingValue());
2867 // Unless all incoming values are modeled in VPlan print the original PHI
2868 // directly.
2869 // TODO: Remove once all VPWidenPHIRecipe instances keep all relevant incoming
2870 // values as VPValues.
2871 if (getNumOperands() != OriginalPhi->getNumOperands()) {
2872 O << VPlanIngredient(OriginalPhi);
2873 return;
2874 }
2875
2877 O << " = phi ";
2879}
2880#endif
2881
2882// TODO: It would be good to use the existing VPWidenPHIRecipe instead and
2883// remove VPActiveLaneMaskPHIRecipe.
2885 BasicBlock *VectorPH = State.CFG.getPreheaderBBFor(this);
2886 for (unsigned Part = 0, UF = State.UF; Part < UF; ++Part) {
2887 Value *StartMask = State.get(getOperand(0), Part);
2888 PHINode *EntryPart =
2889 State.Builder.CreatePHI(StartMask->getType(), 2, "active.lane.mask");
2890 EntryPart->addIncoming(StartMask, VectorPH);
2891 EntryPart->setDebugLoc(getDebugLoc());
2892 State.set(this, EntryPart, Part);
2893 }
2894}
2895
2896#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2898 VPSlotTracker &SlotTracker) const {
2899 O << Indent << "ACTIVE-LANE-MASK-PHI ";
2900
2902 O << " = phi ";
2904}
2905#endif
2906
2908 BasicBlock *VectorPH = State.CFG.getPreheaderBBFor(this);
2909 assert(State.UF == 1 && "Expected unroll factor 1 for VP vectorization.");
2910 Value *Start = State.get(getOperand(0), VPIteration(0, 0));
2911 PHINode *EntryPart =
2912 State.Builder.CreatePHI(Start->getType(), 2, "evl.based.iv");
2913 EntryPart->addIncoming(Start, VectorPH);
2914 EntryPart->setDebugLoc(getDebugLoc());
2915 State.set(this, EntryPart, 0, /*IsScalar=*/true);
2916}
2917
2918#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2920 VPSlotTracker &SlotTracker) const {
2921 O << Indent << "EXPLICIT-VECTOR-LENGTH-BASED-IV-PHI ";
2922
2924 O << " = phi ";
2926}
2927#endif
amdgpu AMDGPU Register Bank Select
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static cl::opt< TargetTransformInfo::TargetCostKind > CostKind("cost-kind", cl::desc("Target cost kind"), cl::init(TargetTransformInfo::TCK_RecipThroughput), cl::values(clEnumValN(TargetTransformInfo::TCK_RecipThroughput, "throughput", "Reciprocal throughput"), clEnumValN(TargetTransformInfo::TCK_Latency, "latency", "Instruction latency"), clEnumValN(TargetTransformInfo::TCK_CodeSize, "code-size", "Code size"), clEnumValN(TargetTransformInfo::TCK_SizeAndLatency, "size-latency", "Code size and latency")))
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
#define LLVM_DEBUG(X)
Definition: Debug.h:101
uint64_t Addr
std::string Name
Hexagon Common GEP
cl::opt< unsigned > ForceTargetInstructionCost("force-target-instruction-cost", cl::init(0), cl::Hidden, cl::desc("A flag that overrides the target's expected cost for " "an instruction to a single constant value. Mostly " "useful for getting consistent testing."))
#define I(x, y, z)
Definition: MD5.cpp:58
mir Rename Register Operands
static DebugLoc getDebugLoc(MachineBasicBlock::instr_iterator FirstMI, MachineBasicBlock::instr_iterator LastMI)
Return the first found DebugLoc that has a DILocation, given a range of instructions.
const SmallVectorImpl< MachineOperand > & Cond
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file contains some templates that are useful if you are working with the STL at all.
separate const offset from gep
This file defines the SmallVector class.
static SymbolRef::Type getType(const Symbol *Sym)
Definition: TapiFile.cpp:40
static Value * interleaveVectors(IRBuilderBase &Builder, ArrayRef< Value * > Vals, const Twine &Name)
Return a vector containing interleaved elements from multiple smaller input vectors.
static Value * getStepVector(Value *Val, Value *StartIdx, Value *Step, Instruction::BinaryOps BinOp, ElementCount VF, IRBuilderBase &Builder)
This function adds (StartIdx * Step, (StartIdx + 1) * Step, (StartIdx + 2) * Step,...
static Value * createBitOrPointerCast(IRBuilderBase &Builder, Value *V, VectorType *DstVTy, const DataLayout &DL)
cl::opt< unsigned > ForceTargetInstructionCost
static bool isUniformAcrossVFsAndUFs(VPScalarCastRecipe *C)
Checks if C is uniform across all VFs and UFs.
static Instruction * getInstructionForCost(const VPRecipeBase *R)
Return the underlying instruction to be used for computing R's cost via the legacy cost model.
static Constant * getSignedIntOrFpConstant(Type *Ty, int64_t C)
A helper function that returns an integer or floating-point constant with value C.
static Value * getRuntimeVFAsFloat(IRBuilderBase &B, Type *FTy, ElementCount VF)
static std::optional< unsigned > getOpcode(ArrayRef< VPValue * > Values)
Returns the opcode of Values or ~0 if they do not all agree.
Definition: VPlanSLP.cpp:191
This file contains the declarations of the Vectorization Plan base classes:
Value * RHS
static const uint32_t IV[8]
Definition: blake3_impl.h:78
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
size_t size() const
size - Get the array size.
Definition: ArrayRef.h:165
LLVM Basic Block Representation.
Definition: BasicBlock.h:61
const_iterator getFirstInsertionPt() const
Returns an iterator to the first instruction in this block that is suitable for inserting a non-PHI i...
Definition: BasicBlock.cpp:416
InstListType::const_iterator getFirstNonPHIIt() const
Iterator returning form of getFirstNonPHI.
Definition: BasicBlock.cpp:374
const BasicBlock * getSinglePredecessor() const
Return the predecessor of this block if it has a single predecessor block.
Definition: BasicBlock.cpp:459
const DataLayout & getDataLayout() const
Get the data layout of the module this basic block belongs to.
Definition: BasicBlock.cpp:296
InstListType::iterator iterator
Instruction iterators...
Definition: BasicBlock.h:177
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition: BasicBlock.h:239
const Module * getModule() const
Return the module owning the function this basic block belongs to, or nullptr if the function does no...
Definition: BasicBlock.cpp:292
Conditional or Unconditional Branch instruction.
static BranchInst * Create(BasicBlock *IfTrue, InsertPosition InsertBefore=nullptr)
void setSuccessor(unsigned idx, BasicBlock *NewSucc)
This class represents a function call, abstracting a target machine's calling convention.
static bool isBitOrNoopPointerCastable(Type *SrcTy, Type *DestTy, const DataLayout &DL)
Check whether a bitcast, inttoptr, or ptrtoint cast between these types is valid and a no-op.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition: InstrTypes.h:757
@ ICMP_UGT
unsigned greater than
Definition: InstrTypes.h:780
@ ICMP_ULT
unsigned less than
Definition: InstrTypes.h:782
static StringRef getPredicateName(Predicate P)
This is the shared class of boolean and integer constants.
Definition: Constants.h:81
bool isOne() const
This is just a convenience method to make client code smaller for a common case.
Definition: Constants.h:212
static ConstantInt * getSigned(IntegerType *Ty, int64_t V)
Return a ConstantInt with the specified value for the specified type.
Definition: Constants.h:124
static Constant * getSplat(ElementCount EC, Constant *Elt)
Return a ConstantVector with the specified constant in each element.
Definition: Constants.cpp:1450
This is an important base class in LLVM.
Definition: Constant.h:42
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:63
A debug info location.
Definition: DebugLoc.h:33
constexpr bool isVector() const
One or more elements.
Definition: TypeSize.h:326
constexpr bool isScalar() const
Exactly one element.
Definition: TypeSize.h:322
Convenience struct for specifying and reasoning about fast-math flags.
Definition: FMF.h:20
void setAllowContract(bool B=true)
Definition: FMF.h:91
bool noSignedZeros() const
Definition: FMF.h:68
bool noInfs() const
Definition: FMF.h:67
void setAllowReciprocal(bool B=true)
Definition: FMF.h:88
bool allowReciprocal() const
Definition: FMF.h:69
void print(raw_ostream &O) const
Print fast-math flags to O.
Definition: Operator.cpp:260
void setNoSignedZeros(bool B=true)
Definition: FMF.h:85
bool allowReassoc() const
Flag queries.
Definition: FMF.h:65
bool approxFunc() const
Definition: FMF.h:71
void setNoNaNs(bool B=true)
Definition: FMF.h:79
void setAllowReassoc(bool B=true)
Flag setters.
Definition: FMF.h:76
bool noNaNs() const
Definition: FMF.h:66
void setApproxFunc(bool B=true)
Definition: FMF.h:94
void setNoInfs(bool B=true)
Definition: FMF.h:82
bool allowContract() const
Definition: FMF.h:70
Class to represent function types.
Definition: DerivedTypes.h:103
Type * getParamType(unsigned i) const
Parameter type accessors.
Definition: DerivedTypes.h:135
FunctionType * getFunctionType() const
Returns the FunctionType for me.
Definition: Function.h:214
bool willReturn() const
Determine if the function will return.
Definition: Function.h:660
Intrinsic::ID getIntrinsicID() const LLVM_READONLY
getIntrinsicID - This method returns the ID number of the specified function, or Intrinsic::not_intri...
Definition: Function.h:249
bool doesNotThrow() const
Determine if the function cannot unwind.
Definition: Function.h:593
Type * getReturnType() const
Returns the type of the ret val.
Definition: Function.h:219
static GetElementPtrInst * Create(Type *PointeeType, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Definition: Instructions.h:938
Common base class shared among various IRBuilders.
Definition: IRBuilder.h:91
Value * CreateFCmp(CmpInst::Predicate P, Value *LHS, Value *RHS, const Twine &Name="", MDNode *FPMathTag=nullptr)
Definition: IRBuilder.h:2381
Value * CreateInsertElement(Type *VecTy, Value *NewElt, Value *Idx, const Twine &Name="")
Definition: IRBuilder.h:2492
IntegerType * getInt1Ty()
Fetch the type representing a single bit.
Definition: IRBuilder.h:508
Value * CreateSIToFP(Value *V, Type *DestTy, const Twine &Name="")
Definition: IRBuilder.h:2114
Value * CreateExtractElement(Value *Vec, Value *Idx, const Twine &Name="")
Definition: IRBuilder.h:2480
LoadInst * CreateAlignedLoad(Type *Ty, Value *Ptr, MaybeAlign Align, const char *Name)
Definition: IRBuilder.h:1824
Value * CreateFAdd(Value *L, Value *R, const Twine &Name="", MDNode *FPMD=nullptr)
Definition: IRBuilder.h:1550
Value * CreateVectorSplice(Value *V1, Value *V2, int64_t Imm, const Twine &Name="")
Return a vector splice intrinsic if using scalable vectors, otherwise return a shufflevector.
Definition: IRBuilder.cpp:1166
Value * CreateVectorSplat(unsigned NumElts, Value *V, const Twine &Name="")
Return a vector value that contains.
Definition: IRBuilder.cpp:1193
Value * CreateExtractValue(Value *Agg, ArrayRef< unsigned > Idxs, const Twine &Name="")
Definition: IRBuilder.h:2536
ConstantInt * getTrue()
Get the constant value for i1 true.
Definition: IRBuilder.h:463
CallInst * CreateIntrinsic(Intrinsic::ID ID, ArrayRef< Type * > Types, ArrayRef< Value * > Args, Instruction *FMFSource=nullptr, const Twine &Name="")
Create a call to intrinsic ID with Args, mangled using Types.
Definition: IRBuilder.cpp:933
CallInst * CreateMaskedLoad(Type *Ty, Value *Ptr, Align Alignment, Value *Mask, Value *PassThru=nullptr, const Twine &Name="")
Create a call to Masked Load intrinsic.
Definition: IRBuilder.cpp:579
Value * CreateSelect(Value *C, Value *True, Value *False, const Twine &Name="", Instruction *MDFrom=nullptr)
Definition: IRBuilder.cpp:1091
BasicBlock::iterator GetInsertPoint() const
Definition: IRBuilder.h:172
Value * CreateSExt(Value *V, Type *DestTy, const Twine &Name="")
Definition: IRBuilder.h:2053
Value * CreateFreeze(Value *V, const Twine &Name="")
Definition: IRBuilder.h:2555
IntegerType * getInt32Ty()
Fetch the type representing a 32-bit integer.
Definition: IRBuilder.h:523
Value * CreatePtrAdd(Value *Ptr, Value *Offset, const Twine &Name="", GEPNoWrapFlags NW=GEPNoWrapFlags::none())
Definition: IRBuilder.h:1996
Value * CreateUIToFP(Value *V, Type *DestTy, const Twine &Name="", bool IsNonNeg=false)
Definition: IRBuilder.h:2101
BasicBlock * GetInsertBlock() const
Definition: IRBuilder.h:171
void setFastMathFlags(FastMathFlags NewFMF)
Set the fast-math flags to be used with generated fp-math operators.
Definition: IRBuilder.h:308
Value * CreateVectorReverse(Value *V, const Twine &Name="")
Return a vector value that contains the vector V reversed.
Definition: IRBuilder.cpp:1151
Value * CreateGEP(Type *Ty, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &Name="", GEPNoWrapFlags NW=GEPNoWrapFlags::none())
Definition: IRBuilder.h:1883
Value * CreateNeg(Value *V, const Twine &Name="", bool HasNSW=false)
Definition: IRBuilder.h:1738
InsertPoint saveIP() const
Returns the current insert point.
Definition: IRBuilder.h:274
ConstantInt * getInt32(uint32_t C)
Get a constant 32-bit value.
Definition: IRBuilder.h:483
Value * CreateBitOrPointerCast(Value *V, Type *DestTy, const Twine &Name="")
Definition: IRBuilder.h:2225
Value * CreateCmp(CmpInst::Predicate Pred, Value *LHS, Value *RHS, const Twine &Name="", MDNode *FPMathTag=nullptr)
Definition: IRBuilder.h:2386
PHINode * CreatePHI(Type *Ty, unsigned NumReservedValues, const Twine &Name="")
Definition: IRBuilder.h:2417
Value * CreateNot(Value *V, const Twine &Name="")
Definition: IRBuilder.h:1766
Value * CreateICmpEQ(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:2261
Value * CreateSub(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Definition: IRBuilder.h:1361
BranchInst * CreateCondBr(Value *Cond, BasicBlock *True, BasicBlock *False, MDNode *BranchWeights=nullptr, MDNode *Unpredictable=nullptr)
Create a conditional 'br Cond, TrueDest, FalseDest' instruction.
Definition: IRBuilder.h:1137
Value * CreateNAryOp(unsigned Opc, ArrayRef< Value * > Ops, const Twine &Name="", MDNode *FPMathTag=nullptr)
Create either a UnaryOperator or BinaryOperator depending on Opc.
Definition: IRBuilder.cpp:1006
Value * CreateZExt(Value *V, Type *DestTy, const Twine &Name="", bool IsNonNeg=false)
Definition: IRBuilder.h:2041
Value * CreateShuffleVector(Value *V1, Value *V2, Value *Mask, const Twine &Name="")
Definition: IRBuilder.h:2514
LLVMContext & getContext() const
Definition: IRBuilder.h:173
CallInst * CreateMaskedStore(Value *Val, Value *Ptr, Align Alignment, Value *Mask)
Create a call to Masked Store intrinsic.
Definition: IRBuilder.cpp:599
Value * CreateAdd(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Definition: IRBuilder.h:1344
Value * CreateTrunc(Value *V, Type *DestTy, const Twine &Name="", bool IsNUW=false, bool IsNSW=false)
Definition: IRBuilder.h:2027
Value * CreateBinOp(Instruction::BinaryOps Opc, Value *LHS, Value *RHS, const Twine &Name="", MDNode *FPMathTag=nullptr)
Definition: IRBuilder.h:1683
Value * CreateLogicalAnd(Value *Cond1, Value *Cond2, const Twine &Name="")
Definition: IRBuilder.h:1693
Value * CreateCast(Instruction::CastOps Op, Value *V, Type *DestTy, const Twine &Name="")
Definition: IRBuilder.h:2181
void restoreIP(InsertPoint IP)
Sets the current insert point to a previously-saved location.
Definition: IRBuilder.h:286
void SetInsertPoint(BasicBlock *TheBB)
This specifies that created instructions should be appended to the end of the specified block.
Definition: IRBuilder.h:177
StoreInst * CreateAlignedStore(Value *Val, Value *Ptr, MaybeAlign Align, bool isVolatile=false)
Definition: IRBuilder.h:1843
CallInst * CreateCall(FunctionType *FTy, Value *Callee, ArrayRef< Value * > Args=std::nullopt, const Twine &Name="", MDNode *FPMathTag=nullptr)
Definition: IRBuilder.h:2432
Value * CreateICmp(CmpInst::Predicate P, Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:2371
Value * CreateFMul(Value *L, Value *R, const Twine &Name="", MDNode *FPMD=nullptr)
Definition: IRBuilder.h:1604
IntegerType * getInt8Ty()
Fetch the type representing an 8-bit integer.
Definition: IRBuilder.h:513
Value * CreateStepVector(Type *DstType, const Twine &Name="")
Creates a vector of type DstType with the linear sequence <0, 1, ...>
Definition: IRBuilder.cpp:110
Value * CreateMul(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Definition: IRBuilder.h:1378
CallInst * CreateMaskedScatter(Value *Val, Value *Ptrs, Align Alignment, Value *Mask=nullptr)
Create a call to Masked Scatter intrinsic.
Definition: IRBuilder.cpp:662
CallInst * CreateMaskedGather(Type *Ty, Value *Ptrs, Align Alignment, Value *Mask=nullptr, Value *PassThru=nullptr, const Twine &Name="")
Create a call to Masked Gather intrinsic.
Definition: IRBuilder.cpp:631
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition: IRBuilder.h:2686
A struct for saving information about induction variables.
InductionKind getKind() const
const SCEV * getStep() const
InductionKind
This enum represents the kinds of inductions that we support.
@ IK_PtrInduction
Pointer induction var. Step = C.
@ IK_IntInduction
Integer induction variable. Step = C.
This instruction inserts a single (scalar) element into a VectorType value.
VectorType * getType() const
Overload to return most specific vector type.
void insertBefore(Instruction *InsertPos)
Insert an unlinked instruction into a basic block immediately before the specified instruction.
Definition: Instruction.cpp:97
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
Definition: Instruction.h:466
bool isBinaryOp() const
Definition: Instruction.h:279
InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
Definition: Instruction.cpp:92
FastMathFlags getFastMathFlags() const LLVM_READONLY
Convenience function for getting all the fast-math flags, which must be an operator which supports th...
const char * getOpcodeName() const
Definition: Instruction.h:276
void setDebugLoc(DebugLoc Loc)
Set the debug location information for this instruction.
Definition: Instruction.h:463
static IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
Definition: Type.cpp:266
The group of interleaved loads/stores sharing the same stride and close to each other.
Definition: VectorUtils.h:468
uint32_t getFactor() const
Definition: VectorUtils.h:484
InstTy * getMember(uint32_t Index) const
Get the member with the given index Index.
Definition: VectorUtils.h:538
uint32_t getIndex(const InstTy *Instr) const
Get the index for the given member.
Definition: VectorUtils.h:545
bool isReverse() const
Definition: VectorUtils.h:483
InstTy * getInsertPos() const
Definition: VectorUtils.h:554
void addMetadata(InstTy *NewInst) const
Add metadata (e.g.
Align getAlign() const
Definition: VectorUtils.h:485
BlockT * getHeader() const
void print(raw_ostream &OS, const SlotIndexes *=nullptr, bool IsStandalone=true) const
A Module instance is used to store all the information related to an LLVM module.
Definition: Module.h:65
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
void setIncomingValueForBlock(const BasicBlock *BB, Value *V)
Set every incoming value(s) for block BB to V.
int getBasicBlockIndex(const BasicBlock *BB) const
Return the first index of the specified basic block in the value list for this PHI.
static PHINode * Create(Type *Ty, unsigned NumReservedValues, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructors - NumReservedValues is a hint for the number of incoming edges that this phi node will h...
static PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
Definition: Constants.cpp:1852
The RecurrenceDescriptor is used to identify recurrences variables in a loop.
Definition: IVDescriptors.h:70
FastMathFlags getFastMathFlags() const
static unsigned getOpcode(RecurKind Kind)
Returns the opcode corresponding to the RecurrenceKind.
unsigned getOpcode() const
Type * getRecurrenceType() const
Returns the type of the recurrence.
static bool isAnyOfRecurrenceKind(RecurKind Kind)
Returns true if the recurrence kind is of the form select(cmp(),x,y) where one of (x,...
bool isSigned() const
Returns true if all source operands of the recurrence are SExtInsts.
RecurKind getRecurrenceKind() const
Value * getRecurrenceIdentity(RecurKind K, Type *Tp, FastMathFlags FMF) const
Returns identity corresponding to the RecurrenceKind.
StoreInst * IntermediateStore
Reductions may store temporary or final result to an invariant address.
static bool isMinMaxRecurrenceKind(RecurKind Kind)
Returns true if the recurrence kind is any min/max kind.
This class uses information about analyze scalars to rewrite expressions in canonical form.
Type * getType() const
Return the LLVM type of this SCEV expression.
This class provides computation of slot numbers for LLVM Assembly writing.
Definition: AsmWriter.cpp:697
void push_back(const T &Elt)
Definition: SmallVector.h:426
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1209
An instruction for storing to memory.
Definition: Instructions.h:290
InstructionCost getArithmeticInstrCost(unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind=TTI::TCK_RecipThroughput, TTI::OperandValueInfo Opd1Info={TTI::OK_AnyValue, TTI::OP_None}, TTI::OperandValueInfo Opd2Info={TTI::OK_AnyValue, TTI::OP_None}, ArrayRef< const Value * > Args=std::nullopt, const Instruction *CxtI=nullptr, const TargetLibraryInfo *TLibInfo=nullptr) const
This is an approximation of reciprocal throughput of a math/logic op.
static OperandValueInfo getOperandInfo(const Value *V)
Collect properties of V used in cost analysis, e.g. OP_PowerOf2.
TargetCostKind
The kind of cost model.
@ TCK_RecipThroughput
Reciprocal throughput.
InstructionCost getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy, CmpInst::Predicate VecPred, TTI::TargetCostKind CostKind=TTI::TCK_RecipThroughput, const Instruction *I=nullptr) const
This class represents a truncation of integer types.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition: Twine.h:81
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
bool isVectorTy() const
True if this is an instance of VectorType.
Definition: Type.h:261
bool isPointerTy() const
True if this is an instance of PointerType.
Definition: Type.h:251
static IntegerType * getInt1Ty(LLVMContext &C)
static IntegerType * getIntNTy(LLVMContext &C, unsigned N)
unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
Definition: Type.h:128
bool isFloatingPointTy() const
Return true if this is one of the floating-point types.
Definition: Type.h:184
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition: Type.h:224
bool isVoidTy() const
Return true if this is 'void'.
Definition: Type.h:139
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
Definition: Type.h:343
value_op_iterator value_op_end()
Definition: User.h:263
Value * getOperand(unsigned i) const
Definition: User.h:169
value_op_iterator value_op_begin()
Definition: User.h:260
void execute(VPTransformState &State) override
Generate the active lane mask phi of the vector loop.
void print(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
VPBasicBlock serves as the leaf of the Hierarchical Control-Flow Graph.
Definition: VPlan.h:2986
RecipeListTy & getRecipeList()
Returns a reference to the list of recipes.
Definition: VPlan.h:3036
iterator end()
Definition: VPlan.h:3020
VPRegionBlock * getEnclosingLoopRegion()
Definition: VPlan.cpp:580
void insert(VPRecipeBase *Recipe, iterator InsertPt)
Definition: VPlan.h:3049
void print(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
VPValue * getIncomingValue(unsigned Idx) const
Return incoming value number Idx.
Definition: VPlan.h:2064
VPValue * getMask(unsigned Idx) const
Return mask number Idx.
Definition: VPlan.h:2069
unsigned getNumIncomingValues() const
Return the number of incoming values, taking into account that the first incoming value has no mask.
Definition: VPlan.h:2061
void execute(VPTransformState &State) override
Generate the phi/select nodes.
VPRegionBlock * getParent()
Definition: VPlan.h:509
VPlan * getPlan()
Definition: VPlan.cpp:155
const VPBasicBlock * getEntryBasicBlock() const
Definition: VPlan.cpp:160
VPBlockBase * getSingleSuccessor() const
Definition: VPlan.h:544
VPValue * getMask() const
Return the mask used by this recipe.
Definition: VPlan.h:2409
void execute(VPTransformState &State) override
Generate the extraction of the appropriate bit from the block mask and the conditional branch.
void execute(VPTransformState &State) override
Generate the canonical scalar induction phi of the vector loop.
bool isCanonical(InductionDescriptor::InductionKind Kind, VPValue *Start, VPValue *Step) const
Check if the induction described by Kind, /p Start and Step is canonical, i.e.
void print(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
This class augments a recipe with a set of VPValues defined by the recipe.
Definition: VPlanValue.h:307
void dump() const
Dump the VPDef to stderr (for debugging).
Definition: VPlan.cpp:116
ArrayRef< VPValue * > definedValues()
Returns an ArrayRef of the values defined by the VPDef.
Definition: VPlanValue.h:418
VPValue * getVPSingleValue()
Returns the only VPValue defined by the VPDef.
Definition: VPlanValue.h:396
VPValue * getVPValue(unsigned I)
Returns the VPValue with index I defined by the VPDef.
Definition: VPlanValue.h:408
unsigned getVPDefID() const
Definition: VPlanValue.h:428
void print(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
VPValue * getStepValue() const
Definition: VPlan.h:2924
VPValue * getStartValue() const
Definition: VPlan.h:2923
void print(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
void execute(VPTransformState &State) override
Generate phi for handling IV based on EVL over iterations correctly.
void execute(VPTransformState &State) override
Generate a canonical vector induction variable of the vector loop, with.
void print(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
VPValue * getStartValue()
Returns the start value of the phi, if one is set.
Definition: VPlan.h:1760
bool hasResult() const
Definition: VPlan.h:1369
LLVM_DUMP_METHOD void dump() const
Print the VPInstruction to dbgs() (for debugging).
unsigned getOpcode() const
Definition: VPlan.h:1345
bool onlyFirstPartUsed(const VPValue *Op) const override
Returns true if the recipe only uses the first part of operand Op.
bool isVectorToScalar() const
Returns true if this VPInstruction produces a scalar value from a vector, e.g.
void print(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the VPInstruction to O.
bool onlyFirstLaneUsed(const VPValue *Op) const override
Returns true if the recipe only uses the first lane of operand Op.
bool isSingleScalar() const
Returns true if this VPInstruction's operands are single scalars and the result is also a single scal...
@ ResumePhi
Creates a scalar phi in a leaf VPBB with a single predecessor in VPlan.
Definition: VPlan.h:1251
@ FirstOrderRecurrenceSplice
Definition: VPlan.h:1239
@ CanonicalIVIncrementForPart
Definition: VPlan.h:1254
@ CalculateTripCountMinusVF
Definition: VPlan.h:1252
void execute(VPTransformState &State) override
Generate the instruction.
VPValue * getAddr() const
Return the address accessed by this recipe.
Definition: VPlan.h:2139
VPValue * getMask() const
Return the mask used by this recipe.
Definition: VPlan.h:2145
void print(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
void execute(VPTransformState &State) override
Generate the wide load or store, and shuffles.
ArrayRef< VPValue * > getStoredValues() const
Return the VPValues stored by this interleave group.
Definition: VPlan.h:2152
unsigned getNumStoreOperands() const
Returns the number of stored operands of this interleave group.
Definition: VPlan.h:2172
static VPLane getLastLaneForVF(const ElementCount &VF)
Definition: VPlan.h:196
static VPLane getLaneFromEnd(const ElementCount &VF, unsigned Offset)
Definition: VPlan.h:182
static VPLane getFirstLane()
Definition: VPlan.h:180
void print(raw_ostream &O, VPSlotTracker &SlotTracker) const
Print the VPLiveOut to O.
PHINode * getPhi() const
Definition: VPlan.h:728
void fixPhi(VPlan &Plan, VPTransformState &State)
Fix the wrapped phi node.
void execute(VPTransformState &State) override
Generates phi nodes for live-outs as needed to retain SSA form.
void print(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
VPRecipeBase is a base class modeling a sequence of one or more output IR instructions.
Definition: VPlan.h:766
bool mayReadFromMemory() const
Returns true if the recipe may read from memory.
bool mayHaveSideEffects() const
Returns true if the recipe may have side-effects.
bool mayWriteToMemory() const
Returns true if the recipe may write to memory.
virtual InstructionCost computeCost(ElementCount VF, VPCostContext &Ctx) const
Compute the cost of this recipe either using a recipe's specialized implementation or using the legac...
VPBasicBlock * getParent()
Definition: VPlan.h:791
DebugLoc getDebugLoc() const
Returns the debug location of the recipe.
Definition: VPlan.h:862
void moveBefore(VPBasicBlock &BB, iplist< VPRecipeBase >::iterator I)
Unlink this recipe and insert into BB before I.
void insertBefore(VPRecipeBase *InsertPos)
Insert an unlinked recipe into a basic block immediately before the specified recipe.
void insertAfter(VPRecipeBase *InsertPos)
Insert an unlinked Recipe into a basic block immediately after the specified Recipe.
iplist< VPRecipeBase >::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
InstructionCost cost(ElementCount VF, VPCostContext &Ctx)
Return the cost of this recipe, taking into account if the cost computation should be skipped and the...
void removeFromParent()
This method unlinks 'this' from the containing basic block, but does not delete it.
void moveAfter(VPRecipeBase *MovePos)
Unlink this recipe from its current VPBasicBlock and insert it into the VPBasicBlock that MovePos liv...
Class to record LLVM IR flag for a recipe along with it.
Definition: VPlan.h:968
ExactFlagsTy ExactFlags
Definition: VPlan.h:1024
FastMathFlagsTy FMFs
Definition: VPlan.h:1027
NonNegFlagsTy NonNegFlags
Definition: VPlan.h:1026
void setFlags(Instruction *I) const
Set the IR flags for I.
Definition: VPlan.h:1153
bool isInBounds() const
Definition: VPlan.h:1195
bool hasFastMathFlags() const
Returns true if the recipe has fast-math flags.
Definition: VPlan.h:1202
DisjointFlagsTy DisjointFlags
Definition: VPlan.h:1023
WrapFlagsTy WrapFlags
Definition: VPlan.h:1022
bool hasNoUnsignedWrap() const
Definition: VPlan.h:1206
void printFlags(raw_ostream &O) const
CmpInst::Predicate getPredicate() const
Definition: VPlan.h:1189
bool hasNoSignedWrap() const
Definition: VPlan.h:1212
FastMathFlags getFastMathFlags() const
void execute(VPTransformState &State) override
Generate the reduction in the loop.
VPValue * getEVL() const
The VPValue of the explicit vector length.
Definition: VPlan.h:2290
void print(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
bool isOrdered() const
Returns true, if the phi is part of an ordered reduction.
Definition: VPlan.h:2033
void print(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
void execute(VPTransformState &State) override
Generate the phi/select nodes.
bool isConditional() const
Return true if the in-loop reduction is conditional.
Definition: VPlan.h:2248
VPValue * getVecOp() const
The VPValue of the vector value to be reduced.
Definition: VPlan.h:2252
const RecurrenceDescriptor & getRecurrenceDescriptor() const
Return the recurrence decriptor for the in-loop reduction.
Definition: VPlan.h:2242
void print(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
VPValue * getCondOp() const
The VPValue of the condition for the block.
Definition: VPlan.h:2254
bool isOrdered() const
Return true if the in-loop reduction is ordered.
Definition: VPlan.h:2246
VPValue * getChainOp() const
The VPValue of the scalar Chain being accumulated.
Definition: VPlan.h:2250
void execute(VPTransformState &State) override
Generate the reduction in the loop.
VPRegionBlock represents a collection of VPBasicBlocks and VPRegionBlocks which form a Single-Entry-S...
Definition: VPlan.h:3164
const VPBlockBase * getEntry() const
Definition: VPlan.h:3203
void print(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
unsigned getOpcode() const
Definition: VPlan.h:2373
bool shouldPack() const
Returns true if the recipe is used by a widened recipe via an intervening VPPredInstPHIRecipe.
VPScalarCastRecipe is a recipe to create scalar cast instructions.
Definition: VPlan.h:1495
void print(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
VPValue * getStepValue() const
Definition: VPlan.h:2973
void execute(VPTransformState &State) override
Generate the scalarized versions of the phi node as needed by their users.
Instruction * getUnderlyingInstr()
Returns the underlying instruction.
Definition: VPlan.h:959
This class can be used to assign names to VPValues.
Definition: VPlanValue.h:449
Type * inferScalarType(const VPValue *V)
Infer the type of V. Returns the scalar type of V.
This class augments VPValue with operands which provide the inverse def-use edges from VPValue's user...
Definition: VPlanValue.h:202
void printOperands(raw_ostream &O, VPSlotTracker &SlotTracker) const
Print the operands to O.
Definition: VPlan.cpp:1462
operand_range operands()
Definition: VPlanValue.h:272
unsigned getNumOperands() const
Definition: VPlanValue.h:251
operand_iterator op_begin()
Definition: VPlanValue.h:268
VPValue * getOperand(unsigned N) const
Definition: VPlanValue.h:252
VPRecipeBase * getDefiningRecipe()
Returns the recipe defining this VPValue or nullptr if it is not defined by a recipe,...
Definition: VPlan.cpp:125
void printAsOperand(raw_ostream &OS, VPSlotTracker &Tracker) const
Definition: VPlan.cpp:1458
friend class VPInstruction
Definition: VPlanValue.h:47
Value * getUnderlyingValue() const
Return the underlying Value attached to this VPValue.
Definition: VPlanValue.h:77
Value * getLiveInIRValue()
Returns the underlying IR value, if this VPValue is defined outside the scope of VPlan.
Definition: VPlanValue.h:172
user_range users()
Definition: VPlanValue.h:132
bool isDefinedOutsideVectorRegions() const
Returns true if the VPValue is defined outside any vector regions, i.e.
Definition: VPlanValue.h:186
void print(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
void print(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
Function * getCalledScalarFunction() const
Definition: VPlan.h:1568
void execute(VPTransformState &State) override
Produce a widened version of the call instruction.
operand_range arg_operands()
Definition: VPlan.h:1572
void execute(VPTransformState &State) override
Generate a canonical vector induction variable of the vector loop, with start = {<Part*VF,...
void print(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
void print(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
Type * getResultType() const
Returns the result type of the cast.
Definition: VPlan.h:1491
void execute(VPTransformState &State) override
Produce widened copies of the cast.
void print(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
void execute(VPTransformState &State) override
Generate the gep nodes.
TruncInst * getTruncInst()
Returns the first defined value as TruncInst, if it is one or nullptr otherwise.
Definition: VPlan.h:1844
void execute(VPTransformState &State) override
Generate the vectorized and scalarized versions of the phi node as needed by their users.
VPValue * getStepValue()
Returns the step value of the induction.
Definition: VPlan.h:1839
Type * getScalarType() const
Returns the scalar type of the induction.
Definition: VPlan.h:1858
bool isCanonical() const
Returns true if the induction is canonical, i.e.
void print(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
const InductionDescriptor & getInductionDescriptor() const
Returns the induction descriptor for the recipe.
Definition: VPlan.h:1850
bool Reverse
Whether the consecutive accessed addresses are in reverse order.
Definition: VPlan.h:2469
bool isConsecutive() const
Return whether the loaded-from / stored-to addresses are consecutive.
Definition: VPlan.h:2508
Instruction & Ingredient
Definition: VPlan.h:2463
VPValue * getMask() const
Return the mask used by this recipe.
Definition: VPlan.h:2522
VPValue * getAddr() const
Return the address accessed by this recipe.
Definition: VPlan.h:2515
bool isReverse() const
Return whether the consecutive loaded/stored addresses are in reverse order.
Definition: VPlan.h:2512
void print(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
void execute(VPTransformState &State) override
Generate the phi/select nodes.
bool onlyScalarsGenerated(bool IsScalable)
Returns true if only scalar values will be generated.
void execute(VPTransformState &State) override
Generate vector values for the pointer induction.
void print(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
InstructionCost computeCost(ElementCount VF, VPCostContext &Ctx) const override
Return the cost of this VPWidenRecipe.
void execute(VPTransformState &State) override
Produce a widened instruction using the opcode and operands of the recipe, processing State....
void print(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
VPlan models a candidate for vectorization, encoding various decisions take to produce efficient outp...
Definition: VPlan.h:3268
VPRegionBlock * getVectorLoopRegion()
Returns the VPRegionBlock of the vector loop.
Definition: VPlan.h:3470
VPCanonicalIVPHIRecipe * getCanonicalIV()
Returns the canonical induction recipe of the vector loop.
Definition: VPlan.h:3478
LLVM Value Representation.
Definition: Value.h:74
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:255
void setName(const Twine &Name)
Change the name of the value.
Definition: Value.cpp:377
void printAsOperand(raw_ostream &O, bool PrintType=true, const Module *M=nullptr) const
Print the name of this Value out to the specified raw_ostream.
Definition: AsmWriter.cpp:5106
const Value * stripPointerCasts() const
Strip off pointer casts, all-zero GEPs and address space casts.
Definition: Value.cpp:694
bool hasName() const
Definition: Value.h:261
StringRef getName() const
Return a constant reference to the value's name.
Definition: Value.cpp:309
VectorBuilder & setEVL(Value *NewExplicitVectorLength)
Definition: VectorBuilder.h:82
VectorBuilder & setMask(Value *NewMask)
Definition: VectorBuilder.h:78
Base class of all SIMD vector types.
Definition: DerivedTypes.h:403
ElementCount getElementCount() const
Return an ElementCount instance to represent the (possibly scalable) number of elements in the vector...
Definition: DerivedTypes.h:641
static VectorType * get(Type *ElementType, ElementCount EC)
This static method is the primary way to construct an VectorType.
Definition: Type.cpp:664
static VectorType * getDoubleElementsVectorType(VectorType *VTy)
This static method returns a VectorType with twice as many elements as the input type and the same el...
Definition: DerivedTypes.h:517
Type * getElementType() const
Definition: DerivedTypes.h:436
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
Definition: TypeSize.h:171
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
Definition: TypeSize.h:168
const ParentTy * getParent() const
Definition: ilist_node.h:32
self_iterator getIterator()
Definition: ilist_node.h:132
iterator erase(iterator where)
Definition: ilist.h:204
pointer remove(iterator &IT)
Definition: ilist.h:188
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition: raw_ostream.h:52
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
Function * getDeclaration(Module *M, ID id, ArrayRef< Type * > Tys=std::nullopt)
Create or insert an LLVM Function declaration for an intrinsic, and return it.
Definition: Function.cpp:1539
bool isUniformAfterVectorization(const VPValue *VPV)
Returns true if VPV is uniform after vectorization.
Definition: VPlan.h:3818
bool onlyFirstPartUsed(const VPValue *Def)
Returns true if only the first part of Def is used.
Definition: VPlan.cpp:1610
bool onlyFirstLaneUsed(const VPValue *Def)
Returns true if only the first lane of Def is used.
Definition: VPlan.cpp:1605
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
void ReplaceInstWithInst(BasicBlock *BB, BasicBlock::iterator &BI, Instruction *I)
Replace the instruction specified by BI with the instruction specified by I.
@ Offset
Definition: DWP.cpp:480
Value * createSimpleTargetReduction(IRBuilderBase &B, Value *Src, RecurKind RdxKind)
Create a target reduction of the given vector.
Definition: LoopUtils.cpp:1210
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
Definition: STLExtras.h:2431
bool isVectorIntrinsicWithOverloadTypeAtArg(Intrinsic::ID ID, int OpdIdx)
Identifies if the vector form of the intrinsic is overloaded on the type of the operand at index OpdI...
Value * getRuntimeVF(IRBuilderBase &B, Type *Ty, ElementCount VF)
Return the runtime value for VF.
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
void interleaveComma(const Container &c, StreamT &os, UnaryFunctor each_fn)
Definition: STLExtras.h:2190
Value * concatenateVectors(IRBuilderBase &Builder, ArrayRef< Value * > Vecs)
Concatenate a list of vectors.
Align getLoadStoreAlignment(const Value *I)
A helper function that returns the alignment of load or store instruction.
Instruction * propagateMetadata(Instruction *I, ArrayRef< Value * > VL)
Specifically, let Kinds = [MD_tbaa, MD_alias_scope, MD_noalias, MD_fpmath, MD_nontemporal,...
Value * createMinMaxOp(IRBuilderBase &Builder, RecurKind RK, Value *Left, Value *Right)
Returns a Min/Max operation corresponding to MinMaxRecurrenceKind.
Definition: LoopUtils.cpp:1075
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1729
Constant * createBitMaskForGaps(IRBuilderBase &Builder, unsigned VF, const InterleaveGroup< Instruction > &Group)
Create a mask that filters the members of an interleave group where there are gaps.
llvm::SmallVector< int, 16 > createStrideMask(unsigned Start, unsigned Stride, unsigned VF)
Create a stride shuffle mask.
cl::opt< bool > EnableVPlanNativePath("enable-vplan-native-path", cl::Hidden, cl::desc("Enable VPlan-native vectorization path with " "support for outer loop vectorization."))
Definition: VPlan.cpp:56
static bool isDbgInfoIntrinsic(Intrinsic::ID ID)
Check if ID corresponds to a debug info intrinsic.
llvm::SmallVector< int, 16 > createReplicatedMask(unsigned ReplicationFactor, unsigned VF)
Create a mask with replicated elements.
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
bool isPointerTy(const Type *T)
Definition: SPIRVUtils.h:120
Value * createOrderedReduction(IRBuilderBase &B, const RecurrenceDescriptor &Desc, Value *Src, Value *Start)
Create an ordered reduction intrinsic using the given recurrence descriptor Desc.
Definition: LoopUtils.cpp:1281
Type * ToVectorTy(Type *Scalar, ElementCount EC)
A helper function for converting Scalar types to vector types.
Definition: VectorUtils.h:133
llvm::SmallVector< int, 16 > createInterleaveMask(unsigned VF, unsigned NumVecs)
Create an interleave shuffle mask.
RecurKind
These are the kinds of recurrences that we support.
Definition: IVDescriptors.h:33
@ Mul
Product of integers.
@ Add
Sum of integers.
Value * createStepForVF(IRBuilderBase &B, Type *Ty, ElementCount VF, int64_t Step)
Return a value for Step multiplied by VF.
auto predecessors(const MachineBasicBlock *BB)
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition: STLExtras.h:1886
Type * getLoadStoreType(const Value *I)
A helper function that returns the type of a load or store instruction.
bool isVectorIntrinsicWithScalarOpAtArg(Intrinsic::ID ID, unsigned ScalarOpdIdx)
Identifies if the vector form of the intrinsic has a scalar operand.
Value * createTargetReduction(IRBuilderBase &B, const RecurrenceDescriptor &Desc, Value *Src, PHINode *OrigPhi=nullptr)
Create a generic target reduction using a recurrence descriptor Desc The target is queried to determi...
Definition: LoopUtils.cpp:1265
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
Struct to hold various analysis needed for cost computations.
Definition: VPlan.h:737
bool skipCostComputation(Instruction *UI, bool IsVector) const
Return true if the cost for UI shouldn't be computed, e.g.
InstructionCost getLegacyCost(Instruction *UI, ElementCount VF) const
Return the cost for UI with VF using the legacy cost model as fallback until computing the cost of al...
VPTypeAnalysis Types
Definition: VPlan.h:740
const TargetLibraryInfo & TLI
Definition: VPlan.h:739
const TargetTransformInfo & TTI
Definition: VPlan.h:738
SmallPtrSet< Instruction *, 8 > SkipCostComputation
Definition: VPlan.h:743
void execute(VPTransformState &State) override
Generate the phi nodes.
void print(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
VPIteration represents a single point in the iteration space of the output (vectorized and/or unrolle...
Definition: VPlan.h:238
BasicBlock * PrevBB
The previous IR BasicBlock created or used.
Definition: VPlan.h:384
SmallDenseMap< VPBasicBlock *, BasicBlock * > VPBB2IRBB
A mapping of each VPBasicBlock to the corresponding BasicBlock.
Definition: VPlan.h:392
BasicBlock * getPreheaderBBFor(VPRecipeBase *R)
Returns the BasicBlock* mapped to the pre-header of the loop region containing R.
Definition: VPlan.cpp:361
VPTransformState holds information passed down when "executing" a VPlan, needed for generating the ou...
Definition: VPlan.h:255
Value * get(VPValue *Def, unsigned Part, bool IsScalar=false)
Get the generated vector Value for a given VPValue Def and a given Part if IsScalar is false,...
Definition: VPlan.cpp:259
DenseMap< const SCEV *, Value * > ExpandedSCEVs
Map SCEVs to their expanded values.
Definition: VPlan.h:429
VPTypeAnalysis TypeAnalysis
VPlan-based type analysis.
Definition: VPlan.h:432
void addMetadata(Value *To, Instruction *From)
Add metadata from one instruction to another.
Definition: VPlan.cpp:374
void reset(VPValue *Def, Value *V, unsigned Part)
Reset an existing vector value for Def and a given Part.
Definition: VPlan.h:322
struct llvm::VPTransformState::CFGState CFG
void set(VPValue *Def, Value *V, unsigned Part, bool IsScalar=false)
Set the generated vector Value for a given VPValue and a given Part, if IsScalar is false.
Definition: VPlan.h:307
std::optional< VPIteration > Instance
Hold the indices to generate specific scalar instructions.
Definition: VPlan.h:267
IRBuilderBase & Builder
Hold a reference to the IRBuilder used to generate output IR code.
Definition: VPlan.h:409
bool hasScalarValue(VPValue *Def, VPIteration Instance)
Definition: VPlan.h:295
bool hasVectorValue(VPValue *Def, unsigned Part)
Definition: VPlan.h:289
ElementCount VF
The chosen Vectorization and Unroll Factors of the loop being vectorized.
Definition: VPlan.h:261
Loop * CurrentVectorLoop
The loop object for the current parent region, or nullptr.
Definition: VPlan.h:418
void setDebugLocFrom(DebugLoc DL)
Set the debug location in the builder using the debug location DL.
Definition: VPlan.cpp:385
void print(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
void execute(VPTransformState &State) override
Generate a wide load or gather.
void print(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
bool isInvariantCond() const
Definition: VPlan.h:1615
VPValue * getCond() const
Definition: VPlan.h:1611
void print(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
void execute(VPTransformState &State) override
Produce a widened version of the select instruction.
void print(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.
void execute(VPTransformState &State) override
Generate a wide store or scatter.
VPValue * getStoredValue() const
Return the value stored by this recipe.
Definition: VPlan.h:2628
void print(raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const override
Print the recipe.