LLVM 22.0.0git
LoopVectorize.cpp
Go to the documentation of this file.
1//===- LoopVectorize.cpp - A Loop Vectorizer ------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This is the LLVM loop vectorizer. This pass modifies 'vectorizable' loops
10// and generates target-independent LLVM-IR.
11// The vectorizer uses the TargetTransformInfo analysis to estimate the costs
12// of instructions in order to estimate the profitability of vectorization.
13//
14// The loop vectorizer combines consecutive loop iterations into a single
15// 'wide' iteration. After this transformation the index is incremented
16// by the SIMD vector width, and not by one.
17//
18// This pass has three parts:
19// 1. The main loop pass that drives the different parts.
20// 2. LoopVectorizationLegality - A unit that checks for the legality
21// of the vectorization.
22// 3. InnerLoopVectorizer - A unit that performs the actual
23// widening of instructions.
24// 4. LoopVectorizationCostModel - A unit that checks for the profitability
25// of vectorization. It decides on the optimal vector width, which
26// can be one, if vectorization is not profitable.
27//
28// There is a development effort going on to migrate loop vectorizer to the
29// VPlan infrastructure and to introduce outer loop vectorization support (see
30// docs/VectorizationPlan.rst and
31// http://lists.llvm.org/pipermail/llvm-dev/2017-December/119523.html). For this
32// purpose, we temporarily introduced the VPlan-native vectorization path: an
33// alternative vectorization path that is natively implemented on top of the
34// VPlan infrastructure. See EnableVPlanNativePath for enabling.
35//
36//===----------------------------------------------------------------------===//
37//
38// The reduction-variable vectorization is based on the paper:
39// D. Nuzman and R. Henderson. Multi-platform Auto-vectorization.
40//
41// Variable uniformity checks are inspired by:
42// Karrenberg, R. and Hack, S. Whole Function Vectorization.
43//
44// The interleaved access vectorization is based on the paper:
45// Dorit Nuzman, Ira Rosen and Ayal Zaks. Auto-Vectorization of Interleaved
46// Data for SIMD
47//
48// Other ideas/concepts are from:
49// A. Zaks and D. Nuzman. Autovectorization in GCC-two years later.
50//
51// S. Maleki, Y. Gao, M. Garzaran, T. Wong and D. Padua. An Evaluation of
52// Vectorizing Compilers.
53//
54//===----------------------------------------------------------------------===//
55
58#include "VPRecipeBuilder.h"
59#include "VPlan.h"
60#include "VPlanAnalysis.h"
61#include "VPlanCFG.h"
62#include "VPlanHelpers.h"
63#include "VPlanPatternMatch.h"
64#include "VPlanTransforms.h"
65#include "VPlanUtils.h"
66#include "VPlanVerifier.h"
67#include "llvm/ADT/APInt.h"
68#include "llvm/ADT/ArrayRef.h"
69#include "llvm/ADT/DenseMap.h"
71#include "llvm/ADT/Hashing.h"
72#include "llvm/ADT/MapVector.h"
73#include "llvm/ADT/STLExtras.h"
76#include "llvm/ADT/Statistic.h"
77#include "llvm/ADT/StringRef.h"
78#include "llvm/ADT/Twine.h"
79#include "llvm/ADT/TypeSwitch.h"
84#include "llvm/Analysis/CFG.h"
101#include "llvm/IR/Attributes.h"
102#include "llvm/IR/BasicBlock.h"
103#include "llvm/IR/CFG.h"
104#include "llvm/IR/Constant.h"
105#include "llvm/IR/Constants.h"
106#include "llvm/IR/DataLayout.h"
107#include "llvm/IR/DebugInfo.h"
108#include "llvm/IR/DebugLoc.h"
109#include "llvm/IR/DerivedTypes.h"
111#include "llvm/IR/Dominators.h"
112#include "llvm/IR/Function.h"
113#include "llvm/IR/IRBuilder.h"
114#include "llvm/IR/InstrTypes.h"
115#include "llvm/IR/Instruction.h"
116#include "llvm/IR/Instructions.h"
118#include "llvm/IR/Intrinsics.h"
119#include "llvm/IR/MDBuilder.h"
120#include "llvm/IR/Metadata.h"
121#include "llvm/IR/Module.h"
122#include "llvm/IR/Operator.h"
123#include "llvm/IR/PatternMatch.h"
125#include "llvm/IR/Type.h"
126#include "llvm/IR/Use.h"
127#include "llvm/IR/User.h"
128#include "llvm/IR/Value.h"
129#include "llvm/IR/Verifier.h"
130#include "llvm/Support/Casting.h"
132#include "llvm/Support/Debug.h"
147#include <algorithm>
148#include <cassert>
149#include <cstdint>
150#include <functional>
151#include <iterator>
152#include <limits>
153#include <memory>
154#include <string>
155#include <tuple>
156#include <utility>
157
158using namespace llvm;
159using namespace SCEVPatternMatch;
160
161#define LV_NAME "loop-vectorize"
162#define DEBUG_TYPE LV_NAME
163
164#ifndef NDEBUG
165const char VerboseDebug[] = DEBUG_TYPE "-verbose";
166#endif
167
168STATISTIC(LoopsVectorized, "Number of loops vectorized");
169STATISTIC(LoopsAnalyzed, "Number of loops analyzed for vectorization");
170STATISTIC(LoopsEpilogueVectorized, "Number of epilogues vectorized");
171STATISTIC(LoopsEarlyExitVectorized, "Number of early exit loops vectorized");
172
174 "enable-epilogue-vectorization", cl::init(true), cl::Hidden,
175 cl::desc("Enable vectorization of epilogue loops."));
176
178 "epilogue-vectorization-force-VF", cl::init(1), cl::Hidden,
179 cl::desc("When epilogue vectorization is enabled, and a value greater than "
180 "1 is specified, forces the given VF for all applicable epilogue "
181 "loops."));
182
184 "epilogue-vectorization-minimum-VF", cl::Hidden,
185 cl::desc("Only loops with vectorization factor equal to or larger than "
186 "the specified value are considered for epilogue vectorization."));
187
188/// Loops with a known constant trip count below this number are vectorized only
189/// if no scalar iteration overheads are incurred.
191 "vectorizer-min-trip-count", cl::init(16), cl::Hidden,
192 cl::desc("Loops with a constant trip count that is smaller than this "
193 "value are vectorized only if no scalar iteration overheads "
194 "are incurred."));
195
197 "vectorize-memory-check-threshold", cl::init(128), cl::Hidden,
198 cl::desc("The maximum allowed number of runtime memory checks"));
199
200// Option prefer-predicate-over-epilogue indicates that an epilogue is undesired,
201// that predication is preferred, and this lists all options. I.e., the
202// vectorizer will try to fold the tail-loop (epilogue) into the vector body
203// and predicate the instructions accordingly. If tail-folding fails, there are
204// different fallback strategies depending on these values:
211} // namespace PreferPredicateTy
212
214 "prefer-predicate-over-epilogue",
217 cl::desc("Tail-folding and predication preferences over creating a scalar "
218 "epilogue loop."),
220 "scalar-epilogue",
221 "Don't tail-predicate loops, create scalar epilogue"),
223 "predicate-else-scalar-epilogue",
224 "prefer tail-folding, create scalar epilogue if tail "
225 "folding fails."),
227 "predicate-dont-vectorize",
228 "prefers tail-folding, don't attempt vectorization if "
229 "tail-folding fails.")));
230
232 "force-tail-folding-style", cl::desc("Force the tail folding style"),
235 clEnumValN(TailFoldingStyle::None, "none", "Disable tail folding"),
238 "Create lane mask for data only, using active.lane.mask intrinsic"),
240 "data-without-lane-mask",
241 "Create lane mask with compare/stepvector"),
243 "Create lane mask using active.lane.mask intrinsic, and use "
244 "it for both data and control flow"),
246 "data-and-control-without-rt-check",
247 "Similar to data-and-control, but remove the runtime check"),
249 "Use predicated EVL instructions for tail folding. If EVL "
250 "is unsupported, fallback to data-without-lane-mask.")));
251
253 "enable-wide-lane-mask", cl::init(false), cl::Hidden,
254 cl::desc("Enable use of wide lane masks when used for control flow in "
255 "tail-folded loops"));
256
258 "vectorizer-maximize-bandwidth", cl::init(false), cl::Hidden,
259 cl::desc("Maximize bandwidth when selecting vectorization factor which "
260 "will be determined by the smallest type in loop."));
261
263 "enable-interleaved-mem-accesses", cl::init(false), cl::Hidden,
264 cl::desc("Enable vectorization on interleaved memory accesses in a loop"));
265
266/// An interleave-group may need masking if it resides in a block that needs
267/// predication, or in order to mask away gaps.
269 "enable-masked-interleaved-mem-accesses", cl::init(false), cl::Hidden,
270 cl::desc("Enable vectorization on masked interleaved memory accesses in a loop"));
271
273 "force-target-num-scalar-regs", cl::init(0), cl::Hidden,
274 cl::desc("A flag that overrides the target's number of scalar registers."));
275
277 "force-target-num-vector-regs", cl::init(0), cl::Hidden,
278 cl::desc("A flag that overrides the target's number of vector registers."));
279
281 "force-target-max-scalar-interleave", cl::init(0), cl::Hidden,
282 cl::desc("A flag that overrides the target's max interleave factor for "
283 "scalar loops."));
284
286 "force-target-max-vector-interleave", cl::init(0), cl::Hidden,
287 cl::desc("A flag that overrides the target's max interleave factor for "
288 "vectorized loops."));
289
291 "force-target-instruction-cost", cl::init(0), cl::Hidden,
292 cl::desc("A flag that overrides the target's expected cost for "
293 "an instruction to a single constant value. Mostly "
294 "useful for getting consistent testing."));
295
297 "force-target-supports-scalable-vectors", cl::init(false), cl::Hidden,
298 cl::desc(
299 "Pretend that scalable vectors are supported, even if the target does "
300 "not support them. This flag should only be used for testing."));
301
303 "small-loop-cost", cl::init(20), cl::Hidden,
304 cl::desc(
305 "The cost of a loop that is considered 'small' by the interleaver."));
306
308 "loop-vectorize-with-block-frequency", cl::init(true), cl::Hidden,
309 cl::desc("Enable the use of the block frequency analysis to access PGO "
310 "heuristics minimizing code growth in cold regions and being more "
311 "aggressive in hot regions."));
312
313// Runtime interleave loops for load/store throughput.
315 "enable-loadstore-runtime-interleave", cl::init(true), cl::Hidden,
316 cl::desc(
317 "Enable runtime interleaving until load/store ports are saturated"));
318
319/// The number of stores in a loop that are allowed to need predication.
321 "vectorize-num-stores-pred", cl::init(1), cl::Hidden,
322 cl::desc("Max number of stores to be predicated behind an if."));
323
325 "enable-ind-var-reg-heur", cl::init(true), cl::Hidden,
326 cl::desc("Count the induction variable only once when interleaving"));
327
329 "enable-cond-stores-vec", cl::init(true), cl::Hidden,
330 cl::desc("Enable if predication of stores during vectorization."));
331
333 "max-nested-scalar-reduction-interleave", cl::init(2), cl::Hidden,
334 cl::desc("The maximum interleave count to use when interleaving a scalar "
335 "reduction in a nested loop."));
336
337static cl::opt<bool>
338 PreferInLoopReductions("prefer-inloop-reductions", cl::init(false),
340 cl::desc("Prefer in-loop vector reductions, "
341 "overriding the targets preference."));
342
344 "force-ordered-reductions", cl::init(false), cl::Hidden,
345 cl::desc("Enable the vectorisation of loops with in-order (strict) "
346 "FP reductions"));
347
349 "prefer-predicated-reduction-select", cl::init(false), cl::Hidden,
350 cl::desc(
351 "Prefer predicating a reduction operation over an after loop select."));
352
354 "enable-vplan-native-path", cl::Hidden,
355 cl::desc("Enable VPlan-native vectorization path with "
356 "support for outer loop vectorization."));
357
359 llvm::VerifyEachVPlan("vplan-verify-each",
360#ifdef EXPENSIVE_CHECKS
361 cl::init(true),
362#else
363 cl::init(false),
364#endif
366 cl::desc("Verfiy VPlans after VPlan transforms."));
367
368// This flag enables the stress testing of the VPlan H-CFG construction in the
369// VPlan-native vectorization path. It must be used in conjuction with
370// -enable-vplan-native-path. -vplan-verify-hcfg can also be used to enable the
371// verification of the H-CFGs built.
373 "vplan-build-stress-test", cl::init(false), cl::Hidden,
374 cl::desc(
375 "Build VPlan for every supported loop nest in the function and bail "
376 "out right after the build (stress test the VPlan H-CFG construction "
377 "in the VPlan-native vectorization path)."));
378
380 "interleave-loops", cl::init(true), cl::Hidden,
381 cl::desc("Enable loop interleaving in Loop vectorization passes"));
383 "vectorize-loops", cl::init(true), cl::Hidden,
384 cl::desc("Run the Loop vectorization passes"));
385
387 "force-widen-divrem-via-safe-divisor", cl::Hidden,
388 cl::desc(
389 "Override cost based safe divisor widening for div/rem instructions"));
390
392 "vectorizer-maximize-bandwidth-for-vector-calls", cl::init(true),
394 cl::desc("Try wider VFs if they enable the use of vector variants"));
395
397 "enable-early-exit-vectorization", cl::init(true), cl::Hidden,
398 cl::desc(
399 "Enable vectorization of early exit loops with uncountable exits."));
400
402 "vectorizer-consider-reg-pressure", cl::init(false), cl::Hidden,
403 cl::desc("Discard VFs if their register pressure is too high."));
404
405// Likelyhood of bypassing the vectorized loop because there are zero trips left
406// after prolog. See `emitIterationCountCheck`.
407static constexpr uint32_t MinItersBypassWeights[] = {1, 127};
408
409/// A helper function that returns true if the given type is irregular. The
410/// type is irregular if its allocated size doesn't equal the store size of an
411/// element of the corresponding vector type.
412static bool hasIrregularType(Type *Ty, const DataLayout &DL) {
413 // Determine if an array of N elements of type Ty is "bitcast compatible"
414 // with a <N x Ty> vector.
415 // This is only true if there is no padding between the array elements.
416 return DL.getTypeAllocSizeInBits(Ty) != DL.getTypeSizeInBits(Ty);
417}
418
419/// A version of ScalarEvolution::getSmallConstantTripCount that returns an
420/// ElementCount to include loops whose trip count is a function of vscale.
422 const Loop *L) {
423 if (unsigned ExpectedTC = SE->getSmallConstantTripCount(L))
424 return ElementCount::getFixed(ExpectedTC);
425
426 const SCEV *BTC = SE->getBackedgeTakenCount(L);
428 return ElementCount::getFixed(0);
429
430 const SCEV *ExitCount = SE->getTripCountFromExitCount(BTC, BTC->getType(), L);
431 if (isa<SCEVVScale>(ExitCount))
433
434 const APInt *Scale;
435 if (match(ExitCount, m_scev_Mul(m_scev_APInt(Scale), m_SCEVVScale())))
436 if (cast<SCEVMulExpr>(ExitCount)->hasNoUnsignedWrap())
437 if (Scale->getActiveBits() <= 32)
439
440 return ElementCount::getFixed(0);
441}
442
443/// Returns "best known" trip count, which is either a valid positive trip count
444/// or std::nullopt when an estimate cannot be made (including when the trip
445/// count would overflow), for the specified loop \p L as defined by the
446/// following procedure:
447/// 1) Returns exact trip count if it is known.
448/// 2) Returns expected trip count according to profile data if any.
449/// 3) Returns upper bound estimate if known, and if \p CanUseConstantMax.
450/// 4) Returns std::nullopt if all of the above failed.
451static std::optional<ElementCount>
453 bool CanUseConstantMax = true) {
454 // Check if exact trip count is known.
455 if (auto ExpectedTC = getSmallConstantTripCount(PSE.getSE(), L))
456 return ExpectedTC;
457
458 // Check if there is an expected trip count available from profile data.
460 if (auto EstimatedTC = getLoopEstimatedTripCount(L))
461 return ElementCount::getFixed(*EstimatedTC);
462
463 if (!CanUseConstantMax)
464 return std::nullopt;
465
466 // Check if upper bound estimate is known.
467 if (unsigned ExpectedTC = PSE.getSmallConstantMaxTripCount())
468 return ElementCount::getFixed(ExpectedTC);
469
470 return std::nullopt;
471}
472
473namespace {
474// Forward declare GeneratedRTChecks.
475class GeneratedRTChecks;
476
477using SCEV2ValueTy = DenseMap<const SCEV *, Value *>;
478} // namespace
479
480namespace llvm {
481
483
484/// InnerLoopVectorizer vectorizes loops which contain only one basic
485/// block to a specified vectorization factor (VF).
486/// This class performs the widening of scalars into vectors, or multiple
487/// scalars. This class also implements the following features:
488/// * It inserts an epilogue loop for handling loops that don't have iteration
489/// counts that are known to be a multiple of the vectorization factor.
490/// * It handles the code generation for reduction variables.
491/// * Scalarization (implementation using scalars) of un-vectorizable
492/// instructions.
493/// InnerLoopVectorizer does not perform any vectorization-legality
494/// checks, and relies on the caller to check for the different legality
495/// aspects. The InnerLoopVectorizer relies on the
496/// LoopVectorizationLegality class to provide information about the induction
497/// and reduction variables that were found to a given vectorization factor.
499public:
503 ElementCount VecWidth, unsigned UnrollFactor,
505 GeneratedRTChecks &RTChecks, VPlan &Plan)
506 : OrigLoop(OrigLoop), PSE(PSE), LI(LI), DT(DT), TTI(TTI), AC(AC),
507 VF(VecWidth), UF(UnrollFactor), Builder(PSE.getSE()->getContext()),
510 Plan.getVectorLoopRegion()->getSinglePredecessor())) {}
511
512 virtual ~InnerLoopVectorizer() = default;
513
514 /// Creates a basic block for the scalar preheader. Both
515 /// EpilogueVectorizerMainLoop and EpilogueVectorizerEpilogueLoop overwrite
516 /// the method to create additional blocks and checks needed for epilogue
517 /// vectorization.
519
520 /// Fix the vectorized code, taking care of header phi's, and more.
522
523 /// Fix the non-induction PHIs in \p Plan.
525
526 /// Returns the original loop trip count.
527 Value *getTripCount() const { return TripCount; }
528
529 /// Used to set the trip count after ILV's construction and after the
530 /// preheader block has been executed. Note that this always holds the trip
531 /// count of the original loop for both main loop and epilogue vectorization.
532 void setTripCount(Value *TC) { TripCount = TC; }
533
534protected:
536
537 /// Create and return a new IR basic block for the scalar preheader whose name
538 /// is prefixed with \p Prefix.
540
541 /// Allow subclasses to override and print debug traces before/after vplan
542 /// execution, when trace information is requested.
543 virtual void printDebugTracesAtStart() {}
544 virtual void printDebugTracesAtEnd() {}
545
546 /// The original loop.
548
549 /// A wrapper around ScalarEvolution used to add runtime SCEV checks. Applies
550 /// dynamic knowledge to simplify SCEV expressions and converts them to a
551 /// more usable form.
553
554 /// Loop Info.
556
557 /// Dominator Tree.
559
560 /// Target Transform Info.
562
563 /// Assumption Cache.
565
566 /// The vectorization SIMD factor to use. Each vector will have this many
567 /// vector elements.
569
570 /// The vectorization unroll factor to use. Each scalar is vectorized to this
571 /// many different vector instructions.
572 unsigned UF;
573
574 /// The builder that we use
576
577 // --- Vectorization state ---
578
579 /// Trip count of the original loop.
580 Value *TripCount = nullptr;
581
582 /// The profitablity analysis.
584
585 /// Structure to hold information about generated runtime checks, responsible
586 /// for cleaning the checks, if vectorization turns out unprofitable.
587 GeneratedRTChecks &RTChecks;
588
590
591 /// The vector preheader block of \p Plan, used as target for check blocks
592 /// introduced during skeleton creation.
594};
595
596/// Encapsulate information regarding vectorization of a loop and its epilogue.
597/// This information is meant to be updated and used across two stages of
598/// epilogue vectorization.
601 unsigned MainLoopUF = 0;
603 unsigned EpilogueUF = 0;
606 Value *TripCount = nullptr;
609
611 ElementCount EVF, unsigned EUF,
613 : MainLoopVF(MVF), MainLoopUF(MUF), EpilogueVF(EVF), EpilogueUF(EUF),
615 assert(EUF == 1 &&
616 "A high UF for the epilogue loop is likely not beneficial.");
617 }
618};
619
620/// An extension of the inner loop vectorizer that creates a skeleton for a
621/// vectorized loop that has its epilogue (residual) also vectorized.
622/// The idea is to run the vplan on a given loop twice, firstly to setup the
623/// skeleton and vectorize the main loop, and secondly to complete the skeleton
624/// from the first step and vectorize the epilogue. This is achieved by
625/// deriving two concrete strategy classes from this base class and invoking
626/// them in succession from the loop vectorizer planner.
628public:
638
639 /// Holds and updates state information required to vectorize the main loop
640 /// and its epilogue in two separate passes. This setup helps us avoid
641 /// regenerating and recomputing runtime safety checks. It also helps us to
642 /// shorten the iteration-count-check path length for the cases where the
643 /// iteration count of the loop is so small that the main vector loop is
644 /// completely skipped.
646
647protected:
649};
650
651/// A specialized derived class of inner loop vectorizer that performs
652/// vectorization of *main* loops in the process of vectorizing loops and their
653/// epilogues.
655public:
666 /// Implements the interface for creating a vectorized skeleton using the
667 /// *main loop* strategy (i.e., the first pass of VPlan execution).
669
670protected:
671 /// Introduces a new VPIRBasicBlock for \p CheckIRBB to Plan between the
672 /// vector preheader and its predecessor, also connecting the new block to the
673 /// scalar preheader.
674 void introduceCheckBlockInVPlan(BasicBlock *CheckIRBB);
675
676 // Create a check to see if the main vector loop should be executed
678 unsigned UF) const;
679
680 /// Emits an iteration count bypass check once for the main loop (when \p
681 /// ForEpilogue is false) and once for the epilogue loop (when \p
682 /// ForEpilogue is true).
684 bool ForEpilogue);
685 void printDebugTracesAtStart() override;
686 void printDebugTracesAtEnd() override;
687};
688
689// A specialized derived class of inner loop vectorizer that performs
690// vectorization of *epilogue* loops in the process of vectorizing loops and
691// their epilogues.
693public:
700 GeneratedRTChecks &Checks, VPlan &Plan)
702 Checks, Plan, EPI.EpilogueVF,
703 EPI.EpilogueVF, EPI.EpilogueUF) {}
704 /// Implements the interface for creating a vectorized skeleton using the
705 /// *epilogue loop* strategy (i.e., the second pass of VPlan execution).
707
708protected:
709 void printDebugTracesAtStart() override;
710 void printDebugTracesAtEnd() override;
711};
712} // end namespace llvm
713
714/// Look for a meaningful debug location on the instruction or its operands.
716 if (!I)
717 return DebugLoc::getUnknown();
718
720 if (I->getDebugLoc() != Empty)
721 return I->getDebugLoc();
722
723 for (Use &Op : I->operands()) {
724 if (Instruction *OpInst = dyn_cast<Instruction>(Op))
725 if (OpInst->getDebugLoc() != Empty)
726 return OpInst->getDebugLoc();
727 }
728
729 return I->getDebugLoc();
730}
731
732/// Write a \p DebugMsg about vectorization to the debug output stream. If \p I
733/// is passed, the message relates to that particular instruction.
734#ifndef NDEBUG
735static void debugVectorizationMessage(const StringRef Prefix,
736 const StringRef DebugMsg,
737 Instruction *I) {
738 dbgs() << "LV: " << Prefix << DebugMsg;
739 if (I != nullptr)
740 dbgs() << " " << *I;
741 else
742 dbgs() << '.';
743 dbgs() << '\n';
744}
745#endif
746
747/// Create an analysis remark that explains why vectorization failed
748///
749/// \p PassName is the name of the pass (e.g. can be AlwaysPrint). \p
750/// RemarkName is the identifier for the remark. If \p I is passed it is an
751/// instruction that prevents vectorization. Otherwise \p TheLoop is used for
752/// the location of the remark. If \p DL is passed, use it as debug location for
753/// the remark. \return the remark object that can be streamed to.
754static OptimizationRemarkAnalysis
755createLVAnalysis(const char *PassName, StringRef RemarkName, Loop *TheLoop,
756 Instruction *I, DebugLoc DL = {}) {
757 BasicBlock *CodeRegion = I ? I->getParent() : TheLoop->getHeader();
758 // If debug location is attached to the instruction, use it. Otherwise if DL
759 // was not provided, use the loop's.
760 if (I && I->getDebugLoc())
761 DL = I->getDebugLoc();
762 else if (!DL)
763 DL = TheLoop->getStartLoc();
764
765 return OptimizationRemarkAnalysis(PassName, RemarkName, DL, CodeRegion);
766}
767
768namespace llvm {
769
770/// Return a value for Step multiplied by VF.
772 int64_t Step) {
773 assert(Ty->isIntegerTy() && "Expected an integer step");
774 ElementCount VFxStep = VF.multiplyCoefficientBy(Step);
775 assert(isPowerOf2_64(VF.getKnownMinValue()) && "must pass power-of-2 VF");
776 if (VF.isScalable() && isPowerOf2_64(Step)) {
777 return B.CreateShl(
778 B.CreateVScale(Ty),
779 ConstantInt::get(Ty, Log2_64(VFxStep.getKnownMinValue())), "", true);
780 }
781 return B.CreateElementCount(Ty, VFxStep);
782}
783
784/// Return the runtime value for VF.
786 return B.CreateElementCount(Ty, VF);
787}
788
790 const StringRef OREMsg, const StringRef ORETag,
791 OptimizationRemarkEmitter *ORE, Loop *TheLoop,
792 Instruction *I) {
793 LLVM_DEBUG(debugVectorizationMessage("Not vectorizing: ", DebugMsg, I));
794 LoopVectorizeHints Hints(TheLoop, true /* doesn't matter */, *ORE);
795 ORE->emit(
796 createLVAnalysis(Hints.vectorizeAnalysisPassName(), ORETag, TheLoop, I)
797 << "loop not vectorized: " << OREMsg);
798}
799
800/// Reports an informative message: print \p Msg for debugging purposes as well
801/// as an optimization remark. Uses either \p I as location of the remark, or
802/// otherwise \p TheLoop. If \p DL is passed, use it as debug location for the
803/// remark. If \p DL is passed, use it as debug location for the remark.
804static void reportVectorizationInfo(const StringRef Msg, const StringRef ORETag,
806 Loop *TheLoop, Instruction *I = nullptr,
807 DebugLoc DL = {}) {
809 LoopVectorizeHints Hints(TheLoop, true /* doesn't matter */, *ORE);
810 ORE->emit(createLVAnalysis(Hints.vectorizeAnalysisPassName(), ORETag, TheLoop,
811 I, DL)
812 << Msg);
813}
814
815/// Report successful vectorization of the loop. In case an outer loop is
816/// vectorized, prepend "outer" to the vectorization remark.
818 VectorizationFactor VF, unsigned IC) {
820 "Vectorizing: ", TheLoop->isInnermost() ? "innermost loop" : "outer loop",
821 nullptr));
822 StringRef LoopType = TheLoop->isInnermost() ? "" : "outer ";
823 ORE->emit([&]() {
824 return OptimizationRemark(LV_NAME, "Vectorized", TheLoop->getStartLoc(),
825 TheLoop->getHeader())
826 << "vectorized " << LoopType << "loop (vectorization width: "
827 << ore::NV("VectorizationFactor", VF.Width)
828 << ", interleaved count: " << ore::NV("InterleaveCount", IC) << ")";
829 });
830}
831
832} // end namespace llvm
833
834namespace llvm {
835
836// Loop vectorization cost-model hints how the scalar epilogue loop should be
837// lowered.
839
840 // The default: allowing scalar epilogues.
842
843 // Vectorization with OptForSize: don't allow epilogues.
845
846 // A special case of vectorisation with OptForSize: loops with a very small
847 // trip count are considered for vectorization under OptForSize, thereby
848 // making sure the cost of their loop body is dominant, free of runtime
849 // guards and scalar iteration overheads.
851
852 // Loop hint predicate indicating an epilogue is undesired.
854
855 // Directive indicating we must either tail fold or not vectorize
857};
858
859/// LoopVectorizationCostModel - estimates the expected speedups due to
860/// vectorization.
861/// In many cases vectorization is not profitable. This can happen because of
862/// a number of reasons. In this class we mainly attempt to predict the
863/// expected speedup/slowdowns due to the supported instruction set. We use the
864/// TargetTransformInfo to query the different backends for the cost of
865/// different operations.
868
869public:
879 : ScalarEpilogueStatus(SEL), TheLoop(L), PSE(PSE), LI(LI), Legal(Legal),
880 TTI(TTI), TLI(TLI), DB(DB), AC(AC), ORE(ORE), TheFunction(F),
882 if (TTI.supportsScalableVectors() || ForceTargetSupportsScalableVectors)
883 initializeVScaleForTuning();
885 }
886
887 /// \return An upper bound for the vectorization factors (both fixed and
888 /// scalable). If the factors are 0, vectorization and interleaving should be
889 /// avoided up front.
890 FixedScalableVFPair computeMaxVF(ElementCount UserVF, unsigned UserIC);
891
892 /// \return True if runtime checks are required for vectorization, and false
893 /// otherwise.
894 bool runtimeChecksRequired();
895
896 /// Setup cost-based decisions for user vectorization factor.
897 /// \return true if the UserVF is a feasible VF to be chosen.
900 return expectedCost(UserVF).isValid();
901 }
902
903 /// \return True if maximizing vector bandwidth is enabled by the target or
904 /// user options, for the given register kind.
905 bool useMaxBandwidth(TargetTransformInfo::RegisterKind RegKind);
906
907 /// \return True if register pressure should be considered for the given VF.
908 bool shouldConsiderRegPressureForVF(ElementCount VF);
909
910 /// \return The size (in bits) of the smallest and widest types in the code
911 /// that needs to be vectorized. We ignore values that remain scalar such as
912 /// 64 bit loop indices.
913 std::pair<unsigned, unsigned> getSmallestAndWidestTypes();
914
915 /// Memory access instruction may be vectorized in more than one way.
916 /// Form of instruction after vectorization depends on cost.
917 /// This function takes cost-based decisions for Load/Store instructions
918 /// and collects them in a map. This decisions map is used for building
919 /// the lists of loop-uniform and loop-scalar instructions.
920 /// The calculated cost is saved with widening decision in order to
921 /// avoid redundant calculations.
922 void setCostBasedWideningDecision(ElementCount VF);
923
924 /// A call may be vectorized in different ways depending on whether we have
925 /// vectorized variants available and whether the target supports masking.
926 /// This function analyzes all calls in the function at the supplied VF,
927 /// makes a decision based on the costs of available options, and stores that
928 /// decision in a map for use in planning and plan execution.
929 void setVectorizedCallDecision(ElementCount VF);
930
931 /// Collect values we want to ignore in the cost model.
932 void collectValuesToIgnore();
933
934 /// Collect all element types in the loop for which widening is needed.
935 void collectElementTypesForWidening();
936
937 /// Split reductions into those that happen in the loop, and those that happen
938 /// outside. In loop reductions are collected into InLoopReductions.
939 void collectInLoopReductions();
940
941 /// Returns true if we should use strict in-order reductions for the given
942 /// RdxDesc. This is true if the -enable-strict-reductions flag is passed,
943 /// the IsOrdered flag of RdxDesc is set and we do not allow reordering
944 /// of FP operations.
945 bool useOrderedReductions(const RecurrenceDescriptor &RdxDesc) const {
946 return !Hints->allowReordering() && RdxDesc.isOrdered();
947 }
948
949 /// \returns The smallest bitwidth each instruction can be represented with.
950 /// The vector equivalents of these instructions should be truncated to this
951 /// type.
953 return MinBWs;
954 }
955
956 /// \returns True if it is more profitable to scalarize instruction \p I for
957 /// vectorization factor \p VF.
959 assert(VF.isVector() &&
960 "Profitable to scalarize relevant only for VF > 1.");
961 assert(
962 TheLoop->isInnermost() &&
963 "cost-model should not be used for outer loops (in VPlan-native path)");
964
965 auto Scalars = InstsToScalarize.find(VF);
966 assert(Scalars != InstsToScalarize.end() &&
967 "VF not yet analyzed for scalarization profitability");
968 return Scalars->second.contains(I);
969 }
970
971 /// Returns true if \p I is known to be uniform after vectorization.
973 assert(
974 TheLoop->isInnermost() &&
975 "cost-model should not be used for outer loops (in VPlan-native path)");
976 // Pseudo probe needs to be duplicated for each unrolled iteration and
977 // vector lane so that profiled loop trip count can be accurately
978 // accumulated instead of being under counted.
980 return false;
981
982 if (VF.isScalar())
983 return true;
984
985 auto UniformsPerVF = Uniforms.find(VF);
986 assert(UniformsPerVF != Uniforms.end() &&
987 "VF not yet analyzed for uniformity");
988 return UniformsPerVF->second.count(I);
989 }
990
991 /// Returns true if \p I is known to be scalar after vectorization.
993 assert(
994 TheLoop->isInnermost() &&
995 "cost-model should not be used for outer loops (in VPlan-native path)");
996 if (VF.isScalar())
997 return true;
998
999 auto ScalarsPerVF = Scalars.find(VF);
1000 assert(ScalarsPerVF != Scalars.end() &&
1001 "Scalar values are not calculated for VF");
1002 return ScalarsPerVF->second.count(I);
1003 }
1004
1005 /// \returns True if instruction \p I can be truncated to a smaller bitwidth
1006 /// for vectorization factor \p VF.
1008 // Truncs must truncate at most to their destination type.
1009 if (isa_and_nonnull<TruncInst>(I) && MinBWs.contains(I) &&
1010 I->getType()->getScalarSizeInBits() < MinBWs.lookup(I))
1011 return false;
1012 return VF.isVector() && MinBWs.contains(I) &&
1013 !isProfitableToScalarize(I, VF) &&
1015 }
1016
1017 /// Decision that was taken during cost calculation for memory instruction.
1020 CM_Widen, // For consecutive accesses with stride +1.
1021 CM_Widen_Reverse, // For consecutive accesses with stride -1.
1027 };
1028
1029 /// Save vectorization decision \p W and \p Cost taken by the cost model for
1030 /// instruction \p I and vector width \p VF.
1033 assert(VF.isVector() && "Expected VF >=2");
1034 WideningDecisions[{I, VF}] = {W, Cost};
1035 }
1036
1037 /// Save vectorization decision \p W and \p Cost taken by the cost model for
1038 /// interleaving group \p Grp and vector width \p VF.
1042 assert(VF.isVector() && "Expected VF >=2");
1043 /// Broadcast this decicion to all instructions inside the group.
1044 /// When interleaving, the cost will only be assigned one instruction, the
1045 /// insert position. For other cases, add the appropriate fraction of the
1046 /// total cost to each instruction. This ensures accurate costs are used,
1047 /// even if the insert position instruction is not used.
1048 InstructionCost InsertPosCost = Cost;
1049 InstructionCost OtherMemberCost = 0;
1050 if (W != CM_Interleave)
1051 OtherMemberCost = InsertPosCost = Cost / Grp->getNumMembers();
1052 ;
1053 for (unsigned Idx = 0; Idx < Grp->getFactor(); ++Idx) {
1054 if (auto *I = Grp->getMember(Idx)) {
1055 if (Grp->getInsertPos() == I)
1056 WideningDecisions[{I, VF}] = {W, InsertPosCost};
1057 else
1058 WideningDecisions[{I, VF}] = {W, OtherMemberCost};
1059 }
1060 }
1061 }
1062
1063 /// Return the cost model decision for the given instruction \p I and vector
1064 /// width \p VF. Return CM_Unknown if this instruction did not pass
1065 /// through the cost modeling.
1067 assert(VF.isVector() && "Expected VF to be a vector VF");
1068 assert(
1069 TheLoop->isInnermost() &&
1070 "cost-model should not be used for outer loops (in VPlan-native path)");
1071
1072 std::pair<Instruction *, ElementCount> InstOnVF(I, VF);
1073 auto Itr = WideningDecisions.find(InstOnVF);
1074 if (Itr == WideningDecisions.end())
1075 return CM_Unknown;
1076 return Itr->second.first;
1077 }
1078
1079 /// Return the vectorization cost for the given instruction \p I and vector
1080 /// width \p VF.
1082 assert(VF.isVector() && "Expected VF >=2");
1083 std::pair<Instruction *, ElementCount> InstOnVF(I, VF);
1084 assert(WideningDecisions.contains(InstOnVF) &&
1085 "The cost is not calculated");
1086 return WideningDecisions[InstOnVF].second;
1087 }
1088
1096
1098 Function *Variant, Intrinsic::ID IID,
1099 std::optional<unsigned> MaskPos,
1101 assert(!VF.isScalar() && "Expected vector VF");
1102 CallWideningDecisions[{CI, VF}] = {Kind, Variant, IID, MaskPos, Cost};
1103 }
1104
1106 ElementCount VF) const {
1107 assert(!VF.isScalar() && "Expected vector VF");
1108 auto I = CallWideningDecisions.find({CI, VF});
1109 if (I == CallWideningDecisions.end())
1110 return {CM_Unknown, nullptr, Intrinsic::not_intrinsic, std::nullopt, 0};
1111 return I->second;
1112 }
1113
1114 /// Return True if instruction \p I is an optimizable truncate whose operand
1115 /// is an induction variable. Such a truncate will be removed by adding a new
1116 /// induction variable with the destination type.
1118 // If the instruction is not a truncate, return false.
1119 auto *Trunc = dyn_cast<TruncInst>(I);
1120 if (!Trunc)
1121 return false;
1122
1123 // Get the source and destination types of the truncate.
1124 Type *SrcTy = toVectorTy(Trunc->getSrcTy(), VF);
1125 Type *DestTy = toVectorTy(Trunc->getDestTy(), VF);
1126
1127 // If the truncate is free for the given types, return false. Replacing a
1128 // free truncate with an induction variable would add an induction variable
1129 // update instruction to each iteration of the loop. We exclude from this
1130 // check the primary induction variable since it will need an update
1131 // instruction regardless.
1132 Value *Op = Trunc->getOperand(0);
1133 if (Op != Legal->getPrimaryInduction() && TTI.isTruncateFree(SrcTy, DestTy))
1134 return false;
1135
1136 // If the truncated value is not an induction variable, return false.
1137 return Legal->isInductionPhi(Op);
1138 }
1139
1140 /// Collects the instructions to scalarize for each predicated instruction in
1141 /// the loop.
1142 void collectInstsToScalarize(ElementCount VF);
1143
1144 /// Collect values that will not be widened, including Uniforms, Scalars, and
1145 /// Instructions to Scalarize for the given \p VF.
1146 /// The sets depend on CM decision for Load/Store instructions
1147 /// that may be vectorized as interleave, gather-scatter or scalarized.
1148 /// Also make a decision on what to do about call instructions in the loop
1149 /// at that VF -- scalarize, call a known vector routine, or call a
1150 /// vector intrinsic.
1152 // Do the analysis once.
1153 if (VF.isScalar() || Uniforms.contains(VF))
1154 return;
1156 collectLoopUniforms(VF);
1158 collectLoopScalars(VF);
1160 }
1161
1162 /// Returns true if the target machine supports masked store operation
1163 /// for the given \p DataType and kind of access to \p Ptr.
1164 bool isLegalMaskedStore(Type *DataType, Value *Ptr, Align Alignment,
1165 unsigned AddressSpace) const {
1166 return Legal->isConsecutivePtr(DataType, Ptr) &&
1167 TTI.isLegalMaskedStore(DataType, Alignment, AddressSpace);
1168 }
1169
1170 /// Returns true if the target machine supports masked load operation
1171 /// for the given \p DataType and kind of access to \p Ptr.
1172 bool isLegalMaskedLoad(Type *DataType, Value *Ptr, Align Alignment,
1173 unsigned AddressSpace) const {
1174 return Legal->isConsecutivePtr(DataType, Ptr) &&
1175 TTI.isLegalMaskedLoad(DataType, Alignment, AddressSpace);
1176 }
1177
1178 /// Returns true if the target machine can represent \p V as a masked gather
1179 /// or scatter operation.
1181 bool LI = isa<LoadInst>(V);
1182 bool SI = isa<StoreInst>(V);
1183 if (!LI && !SI)
1184 return false;
1185 auto *Ty = getLoadStoreType(V);
1187 if (VF.isVector())
1188 Ty = VectorType::get(Ty, VF);
1189 return (LI && TTI.isLegalMaskedGather(Ty, Align)) ||
1190 (SI && TTI.isLegalMaskedScatter(Ty, Align));
1191 }
1192
1193 /// Returns true if the target machine supports all of the reduction
1194 /// variables found for the given VF.
1196 return (all_of(Legal->getReductionVars(), [&](auto &Reduction) -> bool {
1197 const RecurrenceDescriptor &RdxDesc = Reduction.second;
1198 return TTI.isLegalToVectorizeReduction(RdxDesc, VF);
1199 }));
1200 }
1201
1202 /// Given costs for both strategies, return true if the scalar predication
1203 /// lowering should be used for div/rem. This incorporates an override
1204 /// option so it is not simply a cost comparison.
1206 InstructionCost SafeDivisorCost) const {
1207 switch (ForceSafeDivisor) {
1208 case cl::BOU_UNSET:
1209 return ScalarCost < SafeDivisorCost;
1210 case cl::BOU_TRUE:
1211 return false;
1212 case cl::BOU_FALSE:
1213 return true;
1214 }
1215 llvm_unreachable("impossible case value");
1216 }
1217
1218 /// Returns true if \p I is an instruction which requires predication and
1219 /// for which our chosen predication strategy is scalarization (i.e. we
1220 /// don't have an alternate strategy such as masking available).
1221 /// \p VF is the vectorization factor that will be used to vectorize \p I.
1222 bool isScalarWithPredication(Instruction *I, ElementCount VF) const;
1223
1224 /// Returns true if \p I is an instruction that needs to be predicated
1225 /// at runtime. The result is independent of the predication mechanism.
1226 /// Superset of instructions that return true for isScalarWithPredication.
1227 bool isPredicatedInst(Instruction *I) const;
1228
1229 /// A helper function that returns how much we should divide the cost of a
1230 /// predicated block by. Typically this is the reciprocal of the block
1231 /// probability, i.e. if we return X we are assuming the predicated block will
1232 /// execute once for every X iterations of the loop header so the block should
1233 /// only contribute 1/X of its cost to the total cost calculation, but when
1234 /// optimizing for code size it will just be 1 as code size costs don't depend
1235 /// on execution probabilities.
1236 ///
1237 /// TODO: We should use actual block probability here, if available.
1238 /// Currently, we always assume predicated blocks have a 50% chance of
1239 /// executing, apart from blocks that are only predicated due to tail folding.
1240 inline unsigned
1242 BasicBlock *BB) const {
1243 // If a block wasn't originally predicated but was predicated due to
1244 // e.g. tail folding, don't divide the cost. Tail folded loops may still be
1245 // predicated in the final vector loop iteration, but for most loops that
1246 // don't have low trip counts we can expect their probability to be close to
1247 // zero.
1248 if (!Legal->blockNeedsPredication(BB))
1249 return 1;
1250 return CostKind == TTI::TCK_CodeSize ? 1 : 2;
1251 }
1252
1253 /// Return the costs for our two available strategies for lowering a
1254 /// div/rem operation which requires speculating at least one lane.
1255 /// First result is for scalarization (will be invalid for scalable
1256 /// vectors); second is for the safe-divisor strategy.
1257 std::pair<InstructionCost, InstructionCost>
1258 getDivRemSpeculationCost(Instruction *I,
1259 ElementCount VF) const;
1260
1261 /// Returns true if \p I is a memory instruction with consecutive memory
1262 /// access that can be widened.
1263 bool memoryInstructionCanBeWidened(Instruction *I, ElementCount VF);
1264
1265 /// Returns true if \p I is a memory instruction in an interleaved-group
1266 /// of memory accesses that can be vectorized with wide vector loads/stores
1267 /// and shuffles.
1268 bool interleavedAccessCanBeWidened(Instruction *I, ElementCount VF) const;
1269
1270 /// Check if \p Instr belongs to any interleaved access group.
1272 return InterleaveInfo.isInterleaved(Instr);
1273 }
1274
1275 /// Get the interleaved access group that \p Instr belongs to.
1278 return InterleaveInfo.getInterleaveGroup(Instr);
1279 }
1280
1281 /// Returns true if we're required to use a scalar epilogue for at least
1282 /// the final iteration of the original loop.
1283 bool requiresScalarEpilogue(bool IsVectorizing) const {
1284 if (!isScalarEpilogueAllowed()) {
1285 LLVM_DEBUG(dbgs() << "LV: Loop does not require scalar epilogue\n");
1286 return false;
1287 }
1288 // If we might exit from anywhere but the latch and early exit vectorization
1289 // is disabled, we must run the exiting iteration in scalar form.
1290 if (TheLoop->getExitingBlock() != TheLoop->getLoopLatch() &&
1291 !(EnableEarlyExitVectorization && Legal->hasUncountableEarlyExit())) {
1292 LLVM_DEBUG(dbgs() << "LV: Loop requires scalar epilogue: not exiting "
1293 "from latch block\n");
1294 return true;
1295 }
1296 if (IsVectorizing && InterleaveInfo.requiresScalarEpilogue()) {
1297 LLVM_DEBUG(dbgs() << "LV: Loop requires scalar epilogue: "
1298 "interleaved group requires scalar epilogue\n");
1299 return true;
1300 }
1301 LLVM_DEBUG(dbgs() << "LV: Loop does not require scalar epilogue\n");
1302 return false;
1303 }
1304
1305 /// Returns true if a scalar epilogue is not allowed due to optsize or a
1306 /// loop hint annotation.
1308 return ScalarEpilogueStatus == CM_ScalarEpilogueAllowed;
1309 }
1310
1311 /// Returns true if tail-folding is preferred over a scalar epilogue.
1313 return ScalarEpilogueStatus == CM_ScalarEpilogueNotNeededUsePredicate ||
1314 ScalarEpilogueStatus == CM_ScalarEpilogueNotAllowedUsePredicate;
1315 }
1316
1317 /// Returns the TailFoldingStyle that is best for the current loop.
1318 TailFoldingStyle getTailFoldingStyle(bool IVUpdateMayOverflow = true) const {
1319 if (!ChosenTailFoldingStyle)
1321 return IVUpdateMayOverflow ? ChosenTailFoldingStyle->first
1322 : ChosenTailFoldingStyle->second;
1323 }
1324
1325 /// Selects and saves TailFoldingStyle for 2 options - if IV update may
1326 /// overflow or not.
1327 /// \param IsScalableVF true if scalable vector factors enabled.
1328 /// \param UserIC User specific interleave count.
1329 void setTailFoldingStyles(bool IsScalableVF, unsigned UserIC) {
1330 assert(!ChosenTailFoldingStyle && "Tail folding must not be selected yet.");
1331 if (!Legal->canFoldTailByMasking()) {
1332 ChosenTailFoldingStyle = {TailFoldingStyle::None, TailFoldingStyle::None};
1333 return;
1334 }
1335
1336 // Default to TTI preference, but allow command line override.
1337 ChosenTailFoldingStyle = {
1338 TTI.getPreferredTailFoldingStyle(/*IVUpdateMayOverflow=*/true),
1339 TTI.getPreferredTailFoldingStyle(/*IVUpdateMayOverflow=*/false)};
1340 if (ForceTailFoldingStyle.getNumOccurrences())
1341 ChosenTailFoldingStyle = {ForceTailFoldingStyle.getValue(),
1342 ForceTailFoldingStyle.getValue()};
1343
1344 if (ChosenTailFoldingStyle->first != TailFoldingStyle::DataWithEVL &&
1345 ChosenTailFoldingStyle->second != TailFoldingStyle::DataWithEVL)
1346 return;
1347 // Override EVL styles if needed.
1348 // FIXME: Investigate opportunity for fixed vector factor.
1349 bool EVLIsLegal = UserIC <= 1 && IsScalableVF &&
1350 TTI.hasActiveVectorLength() && !EnableVPlanNativePath;
1351 if (EVLIsLegal)
1352 return;
1353 // If for some reason EVL mode is unsupported, fallback to a scalar epilogue
1354 // if it's allowed, or DataWithoutLaneMask otherwise.
1355 if (ScalarEpilogueStatus == CM_ScalarEpilogueAllowed ||
1356 ScalarEpilogueStatus == CM_ScalarEpilogueNotNeededUsePredicate)
1357 ChosenTailFoldingStyle = {TailFoldingStyle::None, TailFoldingStyle::None};
1358 else
1359 ChosenTailFoldingStyle = {TailFoldingStyle::DataWithoutLaneMask,
1361
1362 LLVM_DEBUG(
1363 dbgs() << "LV: Preference for VP intrinsics indicated. Will "
1364 "not try to generate VP Intrinsics "
1365 << (UserIC > 1
1366 ? "since interleave count specified is greater than 1.\n"
1367 : "due to non-interleaving reasons.\n"));
1368 }
1369
1370 /// Returns true if all loop blocks should be masked to fold tail loop.
1371 bool foldTailByMasking() const {
1372 // TODO: check if it is possible to check for None style independent of
1373 // IVUpdateMayOverflow flag in getTailFoldingStyle.
1375 }
1376
1377 /// Returns true if the use of wide lane masks is requested and the loop is
1378 /// using tail-folding with a lane mask for control flow.
1387
1388 /// Return maximum safe number of elements to be processed per vector
1389 /// iteration, which do not prevent store-load forwarding and are safe with
1390 /// regard to the memory dependencies. Required for EVL-based VPlans to
1391 /// correctly calculate AVL (application vector length) as min(remaining AVL,
1392 /// MaxSafeElements).
1393 /// TODO: need to consider adjusting cost model to use this value as a
1394 /// vectorization factor for EVL-based vectorization.
1395 std::optional<unsigned> getMaxSafeElements() const { return MaxSafeElements; }
1396
1397 /// Returns true if the instructions in this block requires predication
1398 /// for any reason, e.g. because tail folding now requires a predicate
1399 /// or because the block in the original loop was predicated.
1401 return foldTailByMasking() || Legal->blockNeedsPredication(BB);
1402 }
1403
1404 /// Returns true if VP intrinsics with explicit vector length support should
1405 /// be generated in the tail folded loop.
1409
1410 /// Returns true if the Phi is part of an inloop reduction.
1411 bool isInLoopReduction(PHINode *Phi) const {
1412 return InLoopReductions.contains(Phi);
1413 }
1414
1415 /// Returns true if the predicated reduction select should be used to set the
1416 /// incoming value for the reduction phi.
1418 // Force to use predicated reduction select since the EVL of the
1419 // second-to-last iteration might not be VF*UF.
1420 if (foldTailWithEVL())
1421 return true;
1423 TTI.preferPredicatedReductionSelect();
1424 }
1425
1426 /// Estimate cost of an intrinsic call instruction CI if it were vectorized
1427 /// with factor VF. Return the cost of the instruction, including
1428 /// scalarization overhead if it's needed.
1429 InstructionCost getVectorIntrinsicCost(CallInst *CI, ElementCount VF) const;
1430
1431 /// Estimate cost of a call instruction CI if it were vectorized with factor
1432 /// VF. Return the cost of the instruction, including scalarization overhead
1433 /// if it's needed.
1434 InstructionCost getVectorCallCost(CallInst *CI, ElementCount VF) const;
1435
1436 /// Invalidates decisions already taken by the cost model.
1438 WideningDecisions.clear();
1439 CallWideningDecisions.clear();
1440 Uniforms.clear();
1441 Scalars.clear();
1442 }
1443
1444 /// Returns the expected execution cost. The unit of the cost does
1445 /// not matter because we use the 'cost' units to compare different
1446 /// vector widths. The cost that is returned is *not* normalized by
1447 /// the factor width.
1448 InstructionCost expectedCost(ElementCount VF);
1449
1450 bool hasPredStores() const { return NumPredStores > 0; }
1451
1452 /// Returns true if epilogue vectorization is considered profitable, and
1453 /// false otherwise.
1454 /// \p VF is the vectorization factor chosen for the original loop.
1455 /// \p Multiplier is an aditional scaling factor applied to VF before
1456 /// comparing to EpilogueVectorizationMinVF.
1457 bool isEpilogueVectorizationProfitable(const ElementCount VF,
1458 const unsigned IC) const;
1459
1460 /// Returns the execution time cost of an instruction for a given vector
1461 /// width. Vector width of one means scalar.
1462 InstructionCost getInstructionCost(Instruction *I, ElementCount VF);
1463
1464 /// Return the cost of instructions in an inloop reduction pattern, if I is
1465 /// part of that pattern.
1466 std::optional<InstructionCost> getReductionPatternCost(Instruction *I,
1467 ElementCount VF,
1468 Type *VectorTy) const;
1469
1470 /// Returns true if \p Op should be considered invariant and if it is
1471 /// trivially hoistable.
1472 bool shouldConsiderInvariant(Value *Op);
1473
1474 /// Return the value of vscale used for tuning the cost model.
1475 std::optional<unsigned> getVScaleForTuning() const { return VScaleForTuning; }
1476
1477private:
1478 unsigned NumPredStores = 0;
1479
1480 /// Used to store the value of vscale used for tuning the cost model. It is
1481 /// initialized during object construction.
1482 std::optional<unsigned> VScaleForTuning;
1483
1484 /// Initializes the value of vscale used for tuning the cost model. If
1485 /// vscale_range.min == vscale_range.max then return vscale_range.max, else
1486 /// return the value returned by the corresponding TTI method.
1487 void initializeVScaleForTuning() {
1488 const Function *Fn = TheLoop->getHeader()->getParent();
1489 if (Fn->hasFnAttribute(Attribute::VScaleRange)) {
1490 auto Attr = Fn->getFnAttribute(Attribute::VScaleRange);
1491 auto Min = Attr.getVScaleRangeMin();
1492 auto Max = Attr.getVScaleRangeMax();
1493 if (Max && Min == Max) {
1494 VScaleForTuning = Max;
1495 return;
1496 }
1497 }
1498
1499 VScaleForTuning = TTI.getVScaleForTuning();
1500 }
1501
1502 /// \return An upper bound for the vectorization factors for both
1503 /// fixed and scalable vectorization, where the minimum-known number of
1504 /// elements is a power-of-2 larger than zero. If scalable vectorization is
1505 /// disabled or unsupported, then the scalable part will be equal to
1506 /// ElementCount::getScalable(0).
1507 FixedScalableVFPair computeFeasibleMaxVF(unsigned MaxTripCount,
1508 ElementCount UserVF,
1509 bool FoldTailByMasking);
1510
1511 /// If \p VF > MaxTripcount, clamps it to the next lower VF that is <=
1512 /// MaxTripCount.
1513 ElementCount clampVFByMaxTripCount(ElementCount VF, unsigned MaxTripCount,
1514 bool FoldTailByMasking) const;
1515
1516 /// \return the maximized element count based on the targets vector
1517 /// registers and the loop trip-count, but limited to a maximum safe VF.
1518 /// This is a helper function of computeFeasibleMaxVF.
1519 ElementCount getMaximizedVFForTarget(unsigned MaxTripCount,
1520 unsigned SmallestType,
1521 unsigned WidestType,
1522 ElementCount MaxSafeVF,
1523 bool FoldTailByMasking);
1524
1525 /// Checks if scalable vectorization is supported and enabled. Caches the
1526 /// result to avoid repeated debug dumps for repeated queries.
1527 bool isScalableVectorizationAllowed();
1528
1529 /// \return the maximum legal scalable VF, based on the safe max number
1530 /// of elements.
1531 ElementCount getMaxLegalScalableVF(unsigned MaxSafeElements);
1532
1533 /// Calculate vectorization cost of memory instruction \p I.
1534 InstructionCost getMemoryInstructionCost(Instruction *I, ElementCount VF);
1535
1536 /// The cost computation for scalarized memory instruction.
1537 InstructionCost getMemInstScalarizationCost(Instruction *I, ElementCount VF);
1538
1539 /// The cost computation for interleaving group of memory instructions.
1540 InstructionCost getInterleaveGroupCost(Instruction *I, ElementCount VF);
1541
1542 /// The cost computation for Gather/Scatter instruction.
1543 InstructionCost getGatherScatterCost(Instruction *I, ElementCount VF);
1544
1545 /// The cost computation for widening instruction \p I with consecutive
1546 /// memory access.
1547 InstructionCost getConsecutiveMemOpCost(Instruction *I, ElementCount VF);
1548
1549 /// The cost calculation for Load/Store instruction \p I with uniform pointer -
1550 /// Load: scalar load + broadcast.
1551 /// Store: scalar store + (loop invariant value stored? 0 : extract of last
1552 /// element)
1553 InstructionCost getUniformMemOpCost(Instruction *I, ElementCount VF);
1554
1555 /// Estimate the overhead of scalarizing an instruction. This is a
1556 /// convenience wrapper for the type-based getScalarizationOverhead API.
1558 ElementCount VF) const;
1559
1560 /// Returns true if an artificially high cost for emulated masked memrefs
1561 /// should be used.
1562 bool useEmulatedMaskMemRefHack(Instruction *I, ElementCount VF);
1563
1564 /// Map of scalar integer values to the smallest bitwidth they can be legally
1565 /// represented as. The vector equivalents of these values should be truncated
1566 /// to this type.
1567 MapVector<Instruction *, uint64_t> MinBWs;
1568
1569 /// A type representing the costs for instructions if they were to be
1570 /// scalarized rather than vectorized. The entries are Instruction-Cost
1571 /// pairs.
1572 using ScalarCostsTy = MapVector<Instruction *, InstructionCost>;
1573
1574 /// A set containing all BasicBlocks that are known to present after
1575 /// vectorization as a predicated block.
1576 DenseMap<ElementCount, SmallPtrSet<BasicBlock *, 4>>
1577 PredicatedBBsAfterVectorization;
1578
1579 /// Records whether it is allowed to have the original scalar loop execute at
1580 /// least once. This may be needed as a fallback loop in case runtime
1581 /// aliasing/dependence checks fail, or to handle the tail/remainder
1582 /// iterations when the trip count is unknown or doesn't divide by the VF,
1583 /// or as a peel-loop to handle gaps in interleave-groups.
1584 /// Under optsize and when the trip count is very small we don't allow any
1585 /// iterations to execute in the scalar loop.
1586 ScalarEpilogueLowering ScalarEpilogueStatus = CM_ScalarEpilogueAllowed;
1587
1588 /// Control finally chosen tail folding style. The first element is used if
1589 /// the IV update may overflow, the second element - if it does not.
1590 std::optional<std::pair<TailFoldingStyle, TailFoldingStyle>>
1591 ChosenTailFoldingStyle;
1592
1593 /// true if scalable vectorization is supported and enabled.
1594 std::optional<bool> IsScalableVectorizationAllowed;
1595
1596 /// Maximum safe number of elements to be processed per vector iteration,
1597 /// which do not prevent store-load forwarding and are safe with regard to the
1598 /// memory dependencies. Required for EVL-based veectorization, where this
1599 /// value is used as the upper bound of the safe AVL.
1600 std::optional<unsigned> MaxSafeElements;
1601
1602 /// A map holding scalar costs for different vectorization factors. The
1603 /// presence of a cost for an instruction in the mapping indicates that the
1604 /// instruction will be scalarized when vectorizing with the associated
1605 /// vectorization factor. The entries are VF-ScalarCostTy pairs.
1606 MapVector<ElementCount, ScalarCostsTy> InstsToScalarize;
1607
1608 /// Holds the instructions known to be uniform after vectorization.
1609 /// The data is collected per VF.
1610 DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> Uniforms;
1611
1612 /// Holds the instructions known to be scalar after vectorization.
1613 /// The data is collected per VF.
1614 DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> Scalars;
1615
1616 /// Holds the instructions (address computations) that are forced to be
1617 /// scalarized.
1618 DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> ForcedScalars;
1619
1620 /// PHINodes of the reductions that should be expanded in-loop.
1621 SmallPtrSet<PHINode *, 4> InLoopReductions;
1622
1623 /// A Map of inloop reduction operations and their immediate chain operand.
1624 /// FIXME: This can be removed once reductions can be costed correctly in
1625 /// VPlan. This was added to allow quick lookup of the inloop operations.
1626 DenseMap<Instruction *, Instruction *> InLoopReductionImmediateChains;
1627
1628 /// Returns the expected difference in cost from scalarizing the expression
1629 /// feeding a predicated instruction \p PredInst. The instructions to
1630 /// scalarize and their scalar costs are collected in \p ScalarCosts. A
1631 /// non-negative return value implies the expression will be scalarized.
1632 /// Currently, only single-use chains are considered for scalarization.
1633 InstructionCost computePredInstDiscount(Instruction *PredInst,
1634 ScalarCostsTy &ScalarCosts,
1635 ElementCount VF);
1636
1637 /// Collect the instructions that are uniform after vectorization. An
1638 /// instruction is uniform if we represent it with a single scalar value in
1639 /// the vectorized loop corresponding to each vector iteration. Examples of
1640 /// uniform instructions include pointer operands of consecutive or
1641 /// interleaved memory accesses. Note that although uniformity implies an
1642 /// instruction will be scalar, the reverse is not true. In general, a
1643 /// scalarized instruction will be represented by VF scalar values in the
1644 /// vectorized loop, each corresponding to an iteration of the original
1645 /// scalar loop.
1646 void collectLoopUniforms(ElementCount VF);
1647
1648 /// Collect the instructions that are scalar after vectorization. An
1649 /// instruction is scalar if it is known to be uniform or will be scalarized
1650 /// during vectorization. collectLoopScalars should only add non-uniform nodes
1651 /// to the list if they are used by a load/store instruction that is marked as
1652 /// CM_Scalarize. Non-uniform scalarized instructions will be represented by
1653 /// VF values in the vectorized loop, each corresponding to an iteration of
1654 /// the original scalar loop.
1655 void collectLoopScalars(ElementCount VF);
1656
1657 /// Keeps cost model vectorization decision and cost for instructions.
1658 /// Right now it is used for memory instructions only.
1659 using DecisionList = DenseMap<std::pair<Instruction *, ElementCount>,
1660 std::pair<InstWidening, InstructionCost>>;
1661
1662 DecisionList WideningDecisions;
1663
1664 using CallDecisionList =
1665 DenseMap<std::pair<CallInst *, ElementCount>, CallWideningDecision>;
1666
1667 CallDecisionList CallWideningDecisions;
1668
1669 /// Returns true if \p V is expected to be vectorized and it needs to be
1670 /// extracted.
1671 bool needsExtract(Value *V, ElementCount VF) const {
1673 if (VF.isScalar() || !I || !TheLoop->contains(I) ||
1674 TheLoop->isLoopInvariant(I) ||
1675 getWideningDecision(I, VF) == CM_Scalarize ||
1676 (isa<CallInst>(I) &&
1677 getCallWideningDecision(cast<CallInst>(I), VF).Kind == CM_Scalarize))
1678 return false;
1679
1680 // Assume we can vectorize V (and hence we need extraction) if the
1681 // scalars are not computed yet. This can happen, because it is called
1682 // via getScalarizationOverhead from setCostBasedWideningDecision, before
1683 // the scalars are collected. That should be a safe assumption in most
1684 // cases, because we check if the operands have vectorizable types
1685 // beforehand in LoopVectorizationLegality.
1686 return !Scalars.contains(VF) || !isScalarAfterVectorization(I, VF);
1687 };
1688
1689 /// Returns a range containing only operands needing to be extracted.
1690 SmallVector<Value *, 4> filterExtractingOperands(Instruction::op_range Ops,
1691 ElementCount VF) const {
1692
1693 SmallPtrSet<const Value *, 4> UniqueOperands;
1695 for (Value *Op : Ops) {
1696 if (isa<Constant>(Op) || !UniqueOperands.insert(Op).second ||
1697 !needsExtract(Op, VF))
1698 continue;
1699 Res.push_back(Op);
1700 }
1701 return Res;
1702 }
1703
1704public:
1705 /// The loop that we evaluate.
1707
1708 /// Predicated scalar evolution analysis.
1710
1711 /// Loop Info analysis.
1713
1714 /// Vectorization legality.
1716
1717 /// Vector target information.
1719
1720 /// Target Library Info.
1722
1723 /// Demanded bits analysis.
1725
1726 /// Assumption cache.
1728
1729 /// Interface to emit optimization remarks.
1731
1733
1734 /// Loop Vectorize Hint.
1736
1737 /// The interleave access information contains groups of interleaved accesses
1738 /// with the same stride and close to each other.
1740
1741 /// Values to ignore in the cost model.
1743
1744 /// Values to ignore in the cost model when VF > 1.
1746
1747 /// All element types found in the loop.
1749
1750 /// The kind of cost that we are calculating
1752
1753 /// Whether this loop should be optimized for size based on function attribute
1754 /// or profile information.
1756
1757 /// The highest VF possible for this loop, without using MaxBandwidth.
1759};
1760} // end namespace llvm
1761
1762namespace {
1763/// Helper struct to manage generating runtime checks for vectorization.
1764///
1765/// The runtime checks are created up-front in temporary blocks to allow better
1766/// estimating the cost and un-linked from the existing IR. After deciding to
1767/// vectorize, the checks are moved back. If deciding not to vectorize, the
1768/// temporary blocks are completely removed.
1769class GeneratedRTChecks {
1770 /// Basic block which contains the generated SCEV checks, if any.
1771 BasicBlock *SCEVCheckBlock = nullptr;
1772
1773 /// The value representing the result of the generated SCEV checks. If it is
1774 /// nullptr no SCEV checks have been generated.
1775 Value *SCEVCheckCond = nullptr;
1776
1777 /// Basic block which contains the generated memory runtime checks, if any.
1778 BasicBlock *MemCheckBlock = nullptr;
1779
1780 /// The value representing the result of the generated memory runtime checks.
1781 /// If it is nullptr no memory runtime checks have been generated.
1782 Value *MemRuntimeCheckCond = nullptr;
1783
1784 DominatorTree *DT;
1785 LoopInfo *LI;
1787
1788 SCEVExpander SCEVExp;
1789 SCEVExpander MemCheckExp;
1790
1791 bool CostTooHigh = false;
1792
1793 Loop *OuterLoop = nullptr;
1794
1796
1797 /// The kind of cost that we are calculating
1799
1800public:
1801 GeneratedRTChecks(PredicatedScalarEvolution &PSE, DominatorTree *DT,
1804 : DT(DT), LI(LI), TTI(TTI),
1805 SCEVExp(*PSE.getSE(), DL, "scev.check", /*PreserveLCSSA=*/false),
1806 MemCheckExp(*PSE.getSE(), DL, "scev.check", /*PreserveLCSSA=*/false),
1807 PSE(PSE), CostKind(CostKind) {}
1808
1809 /// Generate runtime checks in SCEVCheckBlock and MemCheckBlock, so we can
1810 /// accurately estimate the cost of the runtime checks. The blocks are
1811 /// un-linked from the IR and are added back during vector code generation. If
1812 /// there is no vector code generation, the check blocks are removed
1813 /// completely.
1814 void create(Loop *L, const LoopAccessInfo &LAI,
1815 const SCEVPredicate &UnionPred, ElementCount VF, unsigned IC) {
1816
1817 // Hard cutoff to limit compile-time increase in case a very large number of
1818 // runtime checks needs to be generated.
1819 // TODO: Skip cutoff if the loop is guaranteed to execute, e.g. due to
1820 // profile info.
1821 CostTooHigh =
1823 if (CostTooHigh)
1824 return;
1825
1826 BasicBlock *LoopHeader = L->getHeader();
1827 BasicBlock *Preheader = L->getLoopPreheader();
1828
1829 // Use SplitBlock to create blocks for SCEV & memory runtime checks to
1830 // ensure the blocks are properly added to LoopInfo & DominatorTree. Those
1831 // may be used by SCEVExpander. The blocks will be un-linked from their
1832 // predecessors and removed from LI & DT at the end of the function.
1833 if (!UnionPred.isAlwaysTrue()) {
1834 SCEVCheckBlock = SplitBlock(Preheader, Preheader->getTerminator(), DT, LI,
1835 nullptr, "vector.scevcheck");
1836
1837 SCEVCheckCond = SCEVExp.expandCodeForPredicate(
1838 &UnionPred, SCEVCheckBlock->getTerminator());
1839 if (isa<Constant>(SCEVCheckCond)) {
1840 // Clean up directly after expanding the predicate to a constant, to
1841 // avoid further expansions re-using anything left over from SCEVExp.
1842 SCEVExpanderCleaner SCEVCleaner(SCEVExp);
1843 SCEVCleaner.cleanup();
1844 }
1845 }
1846
1847 const auto &RtPtrChecking = *LAI.getRuntimePointerChecking();
1848 if (RtPtrChecking.Need) {
1849 auto *Pred = SCEVCheckBlock ? SCEVCheckBlock : Preheader;
1850 MemCheckBlock = SplitBlock(Pred, Pred->getTerminator(), DT, LI, nullptr,
1851 "vector.memcheck");
1852
1853 auto DiffChecks = RtPtrChecking.getDiffChecks();
1854 if (DiffChecks) {
1855 Value *RuntimeVF = nullptr;
1856 MemRuntimeCheckCond = addDiffRuntimeChecks(
1857 MemCheckBlock->getTerminator(), *DiffChecks, MemCheckExp,
1858 [VF, &RuntimeVF](IRBuilderBase &B, unsigned Bits) {
1859 if (!RuntimeVF)
1860 RuntimeVF = getRuntimeVF(B, B.getIntNTy(Bits), VF);
1861 return RuntimeVF;
1862 },
1863 IC);
1864 } else {
1865 MemRuntimeCheckCond = addRuntimeChecks(
1866 MemCheckBlock->getTerminator(), L, RtPtrChecking.getChecks(),
1868 }
1869 assert(MemRuntimeCheckCond &&
1870 "no RT checks generated although RtPtrChecking "
1871 "claimed checks are required");
1872 }
1873
1874 SCEVExp.eraseDeadInstructions(SCEVCheckCond);
1875
1876 if (!MemCheckBlock && !SCEVCheckBlock)
1877 return;
1878
1879 // Unhook the temporary block with the checks, update various places
1880 // accordingly.
1881 if (SCEVCheckBlock)
1882 SCEVCheckBlock->replaceAllUsesWith(Preheader);
1883 if (MemCheckBlock)
1884 MemCheckBlock->replaceAllUsesWith(Preheader);
1885
1886 if (SCEVCheckBlock) {
1887 SCEVCheckBlock->getTerminator()->moveBefore(
1888 Preheader->getTerminator()->getIterator());
1889 auto *UI = new UnreachableInst(Preheader->getContext(), SCEVCheckBlock);
1890 UI->setDebugLoc(DebugLoc::getTemporary());
1891 Preheader->getTerminator()->eraseFromParent();
1892 }
1893 if (MemCheckBlock) {
1894 MemCheckBlock->getTerminator()->moveBefore(
1895 Preheader->getTerminator()->getIterator());
1896 auto *UI = new UnreachableInst(Preheader->getContext(), MemCheckBlock);
1897 UI->setDebugLoc(DebugLoc::getTemporary());
1898 Preheader->getTerminator()->eraseFromParent();
1899 }
1900
1901 DT->changeImmediateDominator(LoopHeader, Preheader);
1902 if (MemCheckBlock) {
1903 DT->eraseNode(MemCheckBlock);
1904 LI->removeBlock(MemCheckBlock);
1905 }
1906 if (SCEVCheckBlock) {
1907 DT->eraseNode(SCEVCheckBlock);
1908 LI->removeBlock(SCEVCheckBlock);
1909 }
1910
1911 // Outer loop is used as part of the later cost calculations.
1912 OuterLoop = L->getParentLoop();
1913 }
1914
1916 if (SCEVCheckBlock || MemCheckBlock)
1917 LLVM_DEBUG(dbgs() << "Calculating cost of runtime checks:\n");
1918
1919 if (CostTooHigh) {
1921 Cost.setInvalid();
1922 LLVM_DEBUG(dbgs() << " number of checks exceeded threshold\n");
1923 return Cost;
1924 }
1925
1926 InstructionCost RTCheckCost = 0;
1927 if (SCEVCheckBlock)
1928 for (Instruction &I : *SCEVCheckBlock) {
1929 if (SCEVCheckBlock->getTerminator() == &I)
1930 continue;
1932 LLVM_DEBUG(dbgs() << " " << C << " for " << I << "\n");
1933 RTCheckCost += C;
1934 }
1935 if (MemCheckBlock) {
1936 InstructionCost MemCheckCost = 0;
1937 for (Instruction &I : *MemCheckBlock) {
1938 if (MemCheckBlock->getTerminator() == &I)
1939 continue;
1941 LLVM_DEBUG(dbgs() << " " << C << " for " << I << "\n");
1942 MemCheckCost += C;
1943 }
1944
1945 // If the runtime memory checks are being created inside an outer loop
1946 // we should find out if these checks are outer loop invariant. If so,
1947 // the checks will likely be hoisted out and so the effective cost will
1948 // reduce according to the outer loop trip count.
1949 if (OuterLoop) {
1950 ScalarEvolution *SE = MemCheckExp.getSE();
1951 // TODO: If profitable, we could refine this further by analysing every
1952 // individual memory check, since there could be a mixture of loop
1953 // variant and invariant checks that mean the final condition is
1954 // variant.
1955 const SCEV *Cond = SE->getSCEV(MemRuntimeCheckCond);
1956 if (SE->isLoopInvariant(Cond, OuterLoop)) {
1957 // It seems reasonable to assume that we can reduce the effective
1958 // cost of the checks even when we know nothing about the trip
1959 // count. Assume that the outer loop executes at least twice.
1960 unsigned BestTripCount = 2;
1961
1962 // Get the best known TC estimate.
1963 if (auto EstimatedTC = getSmallBestKnownTC(
1964 PSE, OuterLoop, /* CanUseConstantMax = */ false))
1965 if (EstimatedTC->isFixed())
1966 BestTripCount = EstimatedTC->getFixedValue();
1967
1968 InstructionCost NewMemCheckCost = MemCheckCost / BestTripCount;
1969
1970 // Let's ensure the cost is always at least 1.
1971 NewMemCheckCost = std::max(NewMemCheckCost.getValue(),
1972 (InstructionCost::CostType)1);
1973
1974 if (BestTripCount > 1)
1976 << "We expect runtime memory checks to be hoisted "
1977 << "out of the outer loop. Cost reduced from "
1978 << MemCheckCost << " to " << NewMemCheckCost << '\n');
1979
1980 MemCheckCost = NewMemCheckCost;
1981 }
1982 }
1983
1984 RTCheckCost += MemCheckCost;
1985 }
1986
1987 if (SCEVCheckBlock || MemCheckBlock)
1988 LLVM_DEBUG(dbgs() << "Total cost of runtime checks: " << RTCheckCost
1989 << "\n");
1990
1991 return RTCheckCost;
1992 }
1993
1994 /// Remove the created SCEV & memory runtime check blocks & instructions, if
1995 /// unused.
1996 ~GeneratedRTChecks() {
1997 SCEVExpanderCleaner SCEVCleaner(SCEVExp);
1998 SCEVExpanderCleaner MemCheckCleaner(MemCheckExp);
1999 bool SCEVChecksUsed = !SCEVCheckBlock || !pred_empty(SCEVCheckBlock);
2000 bool MemChecksUsed = !MemCheckBlock || !pred_empty(MemCheckBlock);
2001 if (SCEVChecksUsed)
2002 SCEVCleaner.markResultUsed();
2003
2004 if (MemChecksUsed) {
2005 MemCheckCleaner.markResultUsed();
2006 } else {
2007 auto &SE = *MemCheckExp.getSE();
2008 // Memory runtime check generation creates compares that use expanded
2009 // values. Remove them before running the SCEVExpanderCleaners.
2010 for (auto &I : make_early_inc_range(reverse(*MemCheckBlock))) {
2011 if (MemCheckExp.isInsertedInstruction(&I))
2012 continue;
2013 SE.forgetValue(&I);
2014 I.eraseFromParent();
2015 }
2016 }
2017 MemCheckCleaner.cleanup();
2018 SCEVCleaner.cleanup();
2019
2020 if (!SCEVChecksUsed)
2021 SCEVCheckBlock->eraseFromParent();
2022 if (!MemChecksUsed)
2023 MemCheckBlock->eraseFromParent();
2024 }
2025
2026 /// Retrieves the SCEVCheckCond and SCEVCheckBlock that were generated as IR
2027 /// outside VPlan.
2028 std::pair<Value *, BasicBlock *> getSCEVChecks() const {
2029 using namespace llvm::PatternMatch;
2030 if (!SCEVCheckCond || match(SCEVCheckCond, m_ZeroInt()))
2031 return {nullptr, nullptr};
2032
2033 return {SCEVCheckCond, SCEVCheckBlock};
2034 }
2035
2036 /// Retrieves the MemCheckCond and MemCheckBlock that were generated as IR
2037 /// outside VPlan.
2038 std::pair<Value *, BasicBlock *> getMemRuntimeChecks() const {
2039 using namespace llvm::PatternMatch;
2040 if (MemRuntimeCheckCond && match(MemRuntimeCheckCond, m_ZeroInt()))
2041 return {nullptr, nullptr};
2042 return {MemRuntimeCheckCond, MemCheckBlock};
2043 }
2044
2045 /// Return true if any runtime checks have been added
2046 bool hasChecks() const {
2047 return getSCEVChecks().first || getMemRuntimeChecks().first;
2048 }
2049};
2050} // namespace
2051
2057
2062
2063// Return true if \p OuterLp is an outer loop annotated with hints for explicit
2064// vectorization. The loop needs to be annotated with #pragma omp simd
2065// simdlen(#) or #pragma clang vectorize(enable) vectorize_width(#). If the
2066// vector length information is not provided, vectorization is not considered
2067// explicit. Interleave hints are not allowed either. These limitations will be
2068// relaxed in the future.
2069// Please, note that we are currently forced to abuse the pragma 'clang
2070// vectorize' semantics. This pragma provides *auto-vectorization hints*
2071// (i.e., LV must check that vectorization is legal) whereas pragma 'omp simd'
2072// provides *explicit vectorization hints* (LV can bypass legal checks and
2073// assume that vectorization is legal). However, both hints are implemented
2074// using the same metadata (llvm.loop.vectorize, processed by
2075// LoopVectorizeHints). This will be fixed in the future when the native IR
2076// representation for pragma 'omp simd' is introduced.
2077static bool isExplicitVecOuterLoop(Loop *OuterLp,
2079 assert(!OuterLp->isInnermost() && "This is not an outer loop");
2080 LoopVectorizeHints Hints(OuterLp, true /*DisableInterleaving*/, *ORE);
2081
2082 // Only outer loops with an explicit vectorization hint are supported.
2083 // Unannotated outer loops are ignored.
2085 return false;
2086
2087 Function *Fn = OuterLp->getHeader()->getParent();
2088 if (!Hints.allowVectorization(Fn, OuterLp,
2089 true /*VectorizeOnlyWhenForced*/)) {
2090 LLVM_DEBUG(dbgs() << "LV: Loop hints prevent outer loop vectorization.\n");
2091 return false;
2092 }
2093
2094 if (Hints.getInterleave() > 1) {
2095 // TODO: Interleave support is future work.
2096 LLVM_DEBUG(dbgs() << "LV: Not vectorizing: Interleave is not supported for "
2097 "outer loops.\n");
2098 Hints.emitRemarkWithHints();
2099 return false;
2100 }
2101
2102 return true;
2103}
2104
2108 // Collect inner loops and outer loops without irreducible control flow. For
2109 // now, only collect outer loops that have explicit vectorization hints. If we
2110 // are stress testing the VPlan H-CFG construction, we collect the outermost
2111 // loop of every loop nest.
2112 if (L.isInnermost() || VPlanBuildStressTest ||
2114 LoopBlocksRPO RPOT(&L);
2115 RPOT.perform(LI);
2117 V.push_back(&L);
2118 // TODO: Collect inner loops inside marked outer loops in case
2119 // vectorization fails for the outer loop. Do not invoke
2120 // 'containsIrreducibleCFG' again for inner loops when the outer loop is
2121 // already known to be reducible. We can use an inherited attribute for
2122 // that.
2123 return;
2124 }
2125 }
2126 for (Loop *InnerL : L)
2127 collectSupportedLoops(*InnerL, LI, ORE, V);
2128}
2129
2130//===----------------------------------------------------------------------===//
2131// Implementation of LoopVectorizationLegality, InnerLoopVectorizer and
2132// LoopVectorizationCostModel and LoopVectorizationPlanner.
2133//===----------------------------------------------------------------------===//
2134
2135/// Compute the transformed value of Index at offset StartValue using step
2136/// StepValue.
2137/// For integer induction, returns StartValue + Index * StepValue.
2138/// For pointer induction, returns StartValue[Index * StepValue].
2139/// FIXME: The newly created binary instructions should contain nsw/nuw
2140/// flags, which can be found from the original scalar operations.
2141static Value *
2143 Value *Step,
2145 const BinaryOperator *InductionBinOp) {
2146 using namespace llvm::PatternMatch;
2147 Type *StepTy = Step->getType();
2148 Value *CastedIndex = StepTy->isIntegerTy()
2149 ? B.CreateSExtOrTrunc(Index, StepTy)
2150 : B.CreateCast(Instruction::SIToFP, Index, StepTy);
2151 if (CastedIndex != Index) {
2152 CastedIndex->setName(CastedIndex->getName() + ".cast");
2153 Index = CastedIndex;
2154 }
2155
2156 // Note: the IR at this point is broken. We cannot use SE to create any new
2157 // SCEV and then expand it, hoping that SCEV's simplification will give us
2158 // a more optimal code. Unfortunately, attempt of doing so on invalid IR may
2159 // lead to various SCEV crashes. So all we can do is to use builder and rely
2160 // on InstCombine for future simplifications. Here we handle some trivial
2161 // cases only.
2162 auto CreateAdd = [&B](Value *X, Value *Y) {
2163 assert(X->getType() == Y->getType() && "Types don't match!");
2164 if (match(X, m_ZeroInt()))
2165 return Y;
2166 if (match(Y, m_ZeroInt()))
2167 return X;
2168 return B.CreateAdd(X, Y);
2169 };
2170
2171 // We allow X to be a vector type, in which case Y will potentially be
2172 // splatted into a vector with the same element count.
2173 auto CreateMul = [&B](Value *X, Value *Y) {
2174 assert(X->getType()->getScalarType() == Y->getType() &&
2175 "Types don't match!");
2176 if (match(X, m_One()))
2177 return Y;
2178 if (match(Y, m_One()))
2179 return X;
2180 VectorType *XVTy = dyn_cast<VectorType>(X->getType());
2181 if (XVTy && !isa<VectorType>(Y->getType()))
2182 Y = B.CreateVectorSplat(XVTy->getElementCount(), Y);
2183 return B.CreateMul(X, Y);
2184 };
2185
2186 switch (InductionKind) {
2188 assert(!isa<VectorType>(Index->getType()) &&
2189 "Vector indices not supported for integer inductions yet");
2190 assert(Index->getType() == StartValue->getType() &&
2191 "Index type does not match StartValue type");
2192 if (isa<ConstantInt>(Step) && cast<ConstantInt>(Step)->isMinusOne())
2193 return B.CreateSub(StartValue, Index);
2194 auto *Offset = CreateMul(Index, Step);
2195 return CreateAdd(StartValue, Offset);
2196 }
2198 return B.CreatePtrAdd(StartValue, CreateMul(Index, Step));
2200 assert(!isa<VectorType>(Index->getType()) &&
2201 "Vector indices not supported for FP inductions yet");
2202 assert(Step->getType()->isFloatingPointTy() && "Expected FP Step value");
2203 assert(InductionBinOp &&
2204 (InductionBinOp->getOpcode() == Instruction::FAdd ||
2205 InductionBinOp->getOpcode() == Instruction::FSub) &&
2206 "Original bin op should be defined for FP induction");
2207
2208 Value *MulExp = B.CreateFMul(Step, Index);
2209 return B.CreateBinOp(InductionBinOp->getOpcode(), StartValue, MulExp,
2210 "induction");
2211 }
2213 return nullptr;
2214 }
2215 llvm_unreachable("invalid enum");
2216}
2217
2218static std::optional<unsigned> getMaxVScale(const Function &F,
2219 const TargetTransformInfo &TTI) {
2220 if (std::optional<unsigned> MaxVScale = TTI.getMaxVScale())
2221 return MaxVScale;
2222
2223 if (F.hasFnAttribute(Attribute::VScaleRange))
2224 return F.getFnAttribute(Attribute::VScaleRange).getVScaleRangeMax();
2225
2226 return std::nullopt;
2227}
2228
2229/// For the given VF and UF and maximum trip count computed for the loop, return
2230/// whether the induction variable might overflow in the vectorized loop. If not,
2231/// then we know a runtime overflow check always evaluates to false and can be
2232/// removed.
2234 const LoopVectorizationCostModel *Cost,
2235 ElementCount VF, std::optional<unsigned> UF = std::nullopt) {
2236 // Always be conservative if we don't know the exact unroll factor.
2237 unsigned MaxUF = UF ? *UF : Cost->TTI.getMaxInterleaveFactor(VF);
2238
2239 IntegerType *IdxTy = Cost->Legal->getWidestInductionType();
2240 APInt MaxUIntTripCount = IdxTy->getMask();
2241
2242 // We know the runtime overflow check is known false iff the (max) trip-count
2243 // is known and (max) trip-count + (VF * UF) does not overflow in the type of
2244 // the vector loop induction variable.
2245 if (unsigned TC = Cost->PSE.getSmallConstantMaxTripCount()) {
2246 uint64_t MaxVF = VF.getKnownMinValue();
2247 if (VF.isScalable()) {
2248 std::optional<unsigned> MaxVScale =
2249 getMaxVScale(*Cost->TheFunction, Cost->TTI);
2250 if (!MaxVScale)
2251 return false;
2252 MaxVF *= *MaxVScale;
2253 }
2254
2255 return (MaxUIntTripCount - TC).ugt(MaxVF * MaxUF);
2256 }
2257
2258 return false;
2259}
2260
2261// Return whether we allow using masked interleave-groups (for dealing with
2262// strided loads/stores that reside in predicated blocks, or for dealing
2263// with gaps).
2265 // If an override option has been passed in for interleaved accesses, use it.
2266 if (EnableMaskedInterleavedMemAccesses.getNumOccurrences() > 0)
2268
2269 return TTI.enableMaskedInterleavedAccessVectorization();
2270}
2271
2273 BasicBlock *CheckIRBB) {
2274 // Note: The block with the minimum trip-count check is already connected
2275 // during earlier VPlan construction.
2276 VPBlockBase *ScalarPH = Plan.getScalarPreheader();
2277 VPBlockBase *PreVectorPH = VectorPHVPBB->getSinglePredecessor();
2278 assert(PreVectorPH->getNumSuccessors() == 2 && "Expected 2 successors");
2279 assert(PreVectorPH->getSuccessors()[0] == ScalarPH && "Unexpected successor");
2280 VPIRBasicBlock *CheckVPIRBB = Plan.createVPIRBasicBlock(CheckIRBB);
2281 VPBlockUtils::insertOnEdge(PreVectorPH, VectorPHVPBB, CheckVPIRBB);
2282 PreVectorPH = CheckVPIRBB;
2283 VPBlockUtils::connectBlocks(PreVectorPH, ScalarPH);
2284 PreVectorPH->swapSuccessors();
2285
2286 // We just connected a new block to the scalar preheader. Update all
2287 // VPPhis by adding an incoming value for it, replicating the last value.
2288 unsigned NumPredecessors = ScalarPH->getNumPredecessors();
2289 for (VPRecipeBase &R : cast<VPBasicBlock>(ScalarPH)->phis()) {
2290 assert(isa<VPPhi>(&R) && "Phi expected to be VPPhi");
2291 assert(cast<VPPhi>(&R)->getNumIncoming() == NumPredecessors - 1 &&
2292 "must have incoming values for all operands");
2293 R.addOperand(R.getOperand(NumPredecessors - 2));
2294 }
2295}
2296
2298 BasicBlock *VectorPH, ElementCount VF, unsigned UF) const {
2299 // Generate code to check if the loop's trip count is less than VF * UF, or
2300 // equal to it in case a scalar epilogue is required; this implies that the
2301 // vector trip count is zero. This check also covers the case where adding one
2302 // to the backedge-taken count overflowed leading to an incorrect trip count
2303 // of zero. In this case we will also jump to the scalar loop.
2304 auto P = Cost->requiresScalarEpilogue(VF.isVector()) ? ICmpInst::ICMP_ULE
2306
2307 // Reuse existing vector loop preheader for TC checks.
2308 // Note that new preheader block is generated for vector loop.
2309 BasicBlock *const TCCheckBlock = VectorPH;
2311 TCCheckBlock->getContext(),
2312 InstSimplifyFolder(TCCheckBlock->getDataLayout()));
2313 Builder.SetInsertPoint(TCCheckBlock->getTerminator());
2314
2315 // If tail is to be folded, vector loop takes care of all iterations.
2317 Type *CountTy = Count->getType();
2318 Value *CheckMinIters = Builder.getFalse();
2319 auto CreateStep = [&]() -> Value * {
2320 // Create step with max(MinProTripCount, UF * VF).
2321 if (UF * VF.getKnownMinValue() >= MinProfitableTripCount.getKnownMinValue())
2322 return createStepForVF(Builder, CountTy, VF, UF);
2323
2324 Value *MinProfTC =
2325 Builder.CreateElementCount(CountTy, MinProfitableTripCount);
2326 if (!VF.isScalable())
2327 return MinProfTC;
2328 return Builder.CreateBinaryIntrinsic(
2329 Intrinsic::umax, MinProfTC, createStepForVF(Builder, CountTy, VF, UF));
2330 };
2331
2332 TailFoldingStyle Style = Cost->getTailFoldingStyle();
2333 if (Style == TailFoldingStyle::None) {
2334 Value *Step = CreateStep();
2335 ScalarEvolution &SE = *PSE.getSE();
2336 // TODO: Emit unconditional branch to vector preheader instead of
2337 // conditional branch with known condition.
2338 const SCEV *TripCountSCEV = SE.applyLoopGuards(SE.getSCEV(Count), OrigLoop);
2339 // Check if the trip count is < the step.
2340 if (SE.isKnownPredicate(P, TripCountSCEV, SE.getSCEV(Step))) {
2341 // TODO: Ensure step is at most the trip count when determining max VF and
2342 // UF, w/o tail folding.
2343 CheckMinIters = Builder.getTrue();
2345 TripCountSCEV, SE.getSCEV(Step))) {
2346 // Generate the minimum iteration check only if we cannot prove the
2347 // check is known to be true, or known to be false.
2348 CheckMinIters = Builder.CreateICmp(P, Count, Step, "min.iters.check");
2349 } // else step known to be < trip count, use CheckMinIters preset to false.
2350 } else if (VF.isScalable() && !TTI->isVScaleKnownToBeAPowerOfTwo() &&
2353 // vscale is not necessarily a power-of-2, which means we cannot guarantee
2354 // an overflow to zero when updating induction variables and so an
2355 // additional overflow check is required before entering the vector loop.
2356
2357 // Get the maximum unsigned value for the type.
2358 Value *MaxUIntTripCount =
2359 ConstantInt::get(CountTy, cast<IntegerType>(CountTy)->getMask());
2360 Value *LHS = Builder.CreateSub(MaxUIntTripCount, Count);
2361
2362 // Don't execute the vector loop if (UMax - n) < (VF * UF).
2363 CheckMinIters = Builder.CreateICmp(ICmpInst::ICMP_ULT, LHS, CreateStep());
2364 }
2365 return CheckMinIters;
2366}
2367
2368/// Replace \p VPBB with a VPIRBasicBlock wrapping \p IRBB. All recipes from \p
2369/// VPBB are moved to the end of the newly created VPIRBasicBlock. All
2370/// predecessors and successors of VPBB, if any, are rewired to the new
2371/// VPIRBasicBlock. If \p VPBB may be unreachable, \p Plan must be passed.
2373 BasicBlock *IRBB,
2374 VPlan *Plan = nullptr) {
2375 if (!Plan)
2376 Plan = VPBB->getPlan();
2377 VPIRBasicBlock *IRVPBB = Plan->createVPIRBasicBlock(IRBB);
2378 auto IP = IRVPBB->begin();
2379 for (auto &R : make_early_inc_range(VPBB->phis()))
2380 R.moveBefore(*IRVPBB, IP);
2381
2382 for (auto &R :
2384 R.moveBefore(*IRVPBB, IRVPBB->end());
2385
2386 VPBlockUtils::reassociateBlocks(VPBB, IRVPBB);
2387 // VPBB is now dead and will be cleaned up when the plan gets destroyed.
2388 return IRVPBB;
2389}
2390
2392 BasicBlock *VectorPH = OrigLoop->getLoopPreheader();
2393 assert(VectorPH && "Invalid loop structure");
2394 assert((OrigLoop->getUniqueLatchExitBlock() ||
2395 Cost->requiresScalarEpilogue(VF.isVector())) &&
2396 "loops not exiting via the latch without required epilogue?");
2397
2398 // NOTE: The Plan's scalar preheader VPBB isn't replaced with a VPIRBasicBlock
2399 // wrapping the newly created scalar preheader here at the moment, because the
2400 // Plan's scalar preheader may be unreachable at this point. Instead it is
2401 // replaced in executePlan.
2402 return SplitBlock(VectorPH, VectorPH->getTerminator(), DT, LI, nullptr,
2403 Twine(Prefix) + "scalar.ph");
2404}
2405
2406/// Return the expanded step for \p ID using \p ExpandedSCEVs to look up SCEV
2407/// expansion results.
2409 const SCEV2ValueTy &ExpandedSCEVs) {
2410 const SCEV *Step = ID.getStep();
2411 if (auto *C = dyn_cast<SCEVConstant>(Step))
2412 return C->getValue();
2413 if (auto *U = dyn_cast<SCEVUnknown>(Step))
2414 return U->getValue();
2415 Value *V = ExpandedSCEVs.lookup(Step);
2416 assert(V && "SCEV must be expanded at this point");
2417 return V;
2418}
2419
2420/// Knowing that loop \p L executes a single vector iteration, add instructions
2421/// that will get simplified and thus should not have any cost to \p
2422/// InstsToIgnore.
2425 SmallPtrSetImpl<Instruction *> &InstsToIgnore) {
2426 auto *Cmp = L->getLatchCmpInst();
2427 if (Cmp)
2428 InstsToIgnore.insert(Cmp);
2429 for (const auto &KV : IL) {
2430 // Extract the key by hand so that it can be used in the lambda below. Note
2431 // that captured structured bindings are a C++20 extension.
2432 const PHINode *IV = KV.first;
2433
2434 // Get next iteration value of the induction variable.
2435 Instruction *IVInst =
2436 cast<Instruction>(IV->getIncomingValueForBlock(L->getLoopLatch()));
2437 if (all_of(IVInst->users(),
2438 [&](const User *U) { return U == IV || U == Cmp; }))
2439 InstsToIgnore.insert(IVInst);
2440 }
2441}
2442
2444 // Create a new IR basic block for the scalar preheader.
2445 BasicBlock *ScalarPH = createScalarPreheader("");
2446 return ScalarPH->getSinglePredecessor();
2447}
2448
2449namespace {
2450
2451struct CSEDenseMapInfo {
2452 static bool canHandle(const Instruction *I) {
2455 }
2456
2457 static inline Instruction *getEmptyKey() {
2459 }
2460
2461 static inline Instruction *getTombstoneKey() {
2462 return DenseMapInfo<Instruction *>::getTombstoneKey();
2463 }
2464
2465 static unsigned getHashValue(const Instruction *I) {
2466 assert(canHandle(I) && "Unknown instruction!");
2467 return hash_combine(I->getOpcode(),
2468 hash_combine_range(I->operand_values()));
2469 }
2470
2471 static bool isEqual(const Instruction *LHS, const Instruction *RHS) {
2472 if (LHS == getEmptyKey() || RHS == getEmptyKey() ||
2473 LHS == getTombstoneKey() || RHS == getTombstoneKey())
2474 return LHS == RHS;
2475 return LHS->isIdenticalTo(RHS);
2476 }
2477};
2478
2479} // end anonymous namespace
2480
2481/// FIXME: This legacy common-subexpression-elimination routine is scheduled for
2482/// removal, in favor of the VPlan-based one.
2483static void legacyCSE(BasicBlock *BB) {
2484 // Perform simple cse.
2486 for (Instruction &In : llvm::make_early_inc_range(*BB)) {
2487 if (!CSEDenseMapInfo::canHandle(&In))
2488 continue;
2489
2490 // Check if we can replace this instruction with any of the
2491 // visited instructions.
2492 if (Instruction *V = CSEMap.lookup(&In)) {
2493 In.replaceAllUsesWith(V);
2494 In.eraseFromParent();
2495 continue;
2496 }
2497
2498 CSEMap[&In] = &In;
2499 }
2500}
2501
2502/// This function attempts to return a value that represents the ElementCount
2503/// at runtime. For fixed-width VFs we know this precisely at compile
2504/// time, but for scalable VFs we calculate it based on an estimate of the
2505/// vscale value.
2507 std::optional<unsigned> VScale) {
2508 unsigned EstimatedVF = VF.getKnownMinValue();
2509 if (VF.isScalable())
2510 if (VScale)
2511 EstimatedVF *= *VScale;
2512 assert(EstimatedVF >= 1 && "Estimated VF shouldn't be less than 1");
2513 return EstimatedVF;
2514}
2515
2518 ElementCount VF) const {
2519 // We only need to calculate a cost if the VF is scalar; for actual vectors
2520 // we should already have a pre-calculated cost at each VF.
2521 if (!VF.isScalar())
2522 return getCallWideningDecision(CI, VF).Cost;
2523
2524 Type *RetTy = CI->getType();
2526 if (auto RedCost = getReductionPatternCost(CI, VF, RetTy))
2527 return *RedCost;
2528
2530 for (auto &ArgOp : CI->args())
2531 Tys.push_back(ArgOp->getType());
2532
2533 InstructionCost ScalarCallCost =
2534 TTI.getCallInstrCost(CI->getCalledFunction(), RetTy, Tys, CostKind);
2535
2536 // If this is an intrinsic we may have a lower cost for it.
2539 return std::min(ScalarCallCost, IntrinsicCost);
2540 }
2541 return ScalarCallCost;
2542}
2543
2545 if (VF.isScalar() || !canVectorizeTy(Ty))
2546 return Ty;
2547 return toVectorizedTy(Ty, VF);
2548}
2549
2552 ElementCount VF) const {
2554 assert(ID && "Expected intrinsic call!");
2555 Type *RetTy = maybeVectorizeType(CI->getType(), VF);
2556 FastMathFlags FMF;
2557 if (auto *FPMO = dyn_cast<FPMathOperator>(CI))
2558 FMF = FPMO->getFastMathFlags();
2559
2562 SmallVector<Type *> ParamTys;
2563 std::transform(FTy->param_begin(), FTy->param_end(),
2564 std::back_inserter(ParamTys),
2565 [&](Type *Ty) { return maybeVectorizeType(Ty, VF); });
2566
2567 IntrinsicCostAttributes CostAttrs(ID, RetTy, Arguments, ParamTys, FMF,
2570 return TTI.getIntrinsicInstrCost(CostAttrs, CostKind);
2571}
2572
2574 // Fix widened non-induction PHIs by setting up the PHI operands.
2575 fixNonInductionPHIs(State);
2576
2577 // Don't apply optimizations below when no (vector) loop remains, as they all
2578 // require one at the moment.
2579 VPBasicBlock *HeaderVPBB =
2580 vputils::getFirstLoopHeader(*State.Plan, State.VPDT);
2581 if (!HeaderVPBB)
2582 return;
2583
2584 BasicBlock *HeaderBB = State.CFG.VPBB2IRBB[HeaderVPBB];
2585
2586 // Remove redundant induction instructions.
2587 legacyCSE(HeaderBB);
2588}
2589
2591 auto Iter = vp_depth_first_shallow(Plan.getEntry());
2593 for (VPRecipeBase &P : VPBB->phis()) {
2595 if (!VPPhi)
2596 continue;
2597 PHINode *NewPhi = cast<PHINode>(State.get(VPPhi));
2598 // Make sure the builder has a valid insert point.
2599 Builder.SetInsertPoint(NewPhi);
2600 for (const auto &[Inc, VPBB] : VPPhi->incoming_values_and_blocks())
2601 NewPhi->addIncoming(State.get(Inc), State.CFG.VPBB2IRBB[VPBB]);
2602 }
2603 }
2604}
2605
2606void LoopVectorizationCostModel::collectLoopScalars(ElementCount VF) {
2607 // We should not collect Scalars more than once per VF. Right now, this
2608 // function is called from collectUniformsAndScalars(), which already does
2609 // this check. Collecting Scalars for VF=1 does not make any sense.
2610 assert(VF.isVector() && !Scalars.contains(VF) &&
2611 "This function should not be visited twice for the same VF");
2612
2613 // This avoids any chances of creating a REPLICATE recipe during planning
2614 // since that would result in generation of scalarized code during execution,
2615 // which is not supported for scalable vectors.
2616 if (VF.isScalable()) {
2617 Scalars[VF].insert_range(Uniforms[VF]);
2618 return;
2619 }
2620
2622
2623 // These sets are used to seed the analysis with pointers used by memory
2624 // accesses that will remain scalar.
2626 SmallPtrSet<Instruction *, 8> PossibleNonScalarPtrs;
2627 auto *Latch = TheLoop->getLoopLatch();
2628
2629 // A helper that returns true if the use of Ptr by MemAccess will be scalar.
2630 // The pointer operands of loads and stores will be scalar as long as the
2631 // memory access is not a gather or scatter operation. The value operand of a
2632 // store will remain scalar if the store is scalarized.
2633 auto IsScalarUse = [&](Instruction *MemAccess, Value *Ptr) {
2634 InstWidening WideningDecision = getWideningDecision(MemAccess, VF);
2635 assert(WideningDecision != CM_Unknown &&
2636 "Widening decision should be ready at this moment");
2637 if (auto *Store = dyn_cast<StoreInst>(MemAccess))
2638 if (Ptr == Store->getValueOperand())
2639 return WideningDecision == CM_Scalarize;
2640 assert(Ptr == getLoadStorePointerOperand(MemAccess) &&
2641 "Ptr is neither a value or pointer operand");
2642 return WideningDecision != CM_GatherScatter;
2643 };
2644
2645 // A helper that returns true if the given value is a getelementptr
2646 // instruction contained in the loop.
2647 auto IsLoopVaryingGEP = [&](Value *V) {
2648 return isa<GetElementPtrInst>(V) && !TheLoop->isLoopInvariant(V);
2649 };
2650
2651 // A helper that evaluates a memory access's use of a pointer. If the use will
2652 // be a scalar use and the pointer is only used by memory accesses, we place
2653 // the pointer in ScalarPtrs. Otherwise, the pointer is placed in
2654 // PossibleNonScalarPtrs.
2655 auto EvaluatePtrUse = [&](Instruction *MemAccess, Value *Ptr) {
2656 // We only care about bitcast and getelementptr instructions contained in
2657 // the loop.
2658 if (!IsLoopVaryingGEP(Ptr))
2659 return;
2660
2661 // If the pointer has already been identified as scalar (e.g., if it was
2662 // also identified as uniform), there's nothing to do.
2663 auto *I = cast<Instruction>(Ptr);
2664 if (Worklist.count(I))
2665 return;
2666
2667 // If the use of the pointer will be a scalar use, and all users of the
2668 // pointer are memory accesses, place the pointer in ScalarPtrs. Otherwise,
2669 // place the pointer in PossibleNonScalarPtrs.
2670 if (IsScalarUse(MemAccess, Ptr) &&
2672 ScalarPtrs.insert(I);
2673 else
2674 PossibleNonScalarPtrs.insert(I);
2675 };
2676
2677 // We seed the scalars analysis with three classes of instructions: (1)
2678 // instructions marked uniform-after-vectorization and (2) bitcast,
2679 // getelementptr and (pointer) phi instructions used by memory accesses
2680 // requiring a scalar use.
2681 //
2682 // (1) Add to the worklist all instructions that have been identified as
2683 // uniform-after-vectorization.
2684 Worklist.insert_range(Uniforms[VF]);
2685
2686 // (2) Add to the worklist all bitcast and getelementptr instructions used by
2687 // memory accesses requiring a scalar use. The pointer operands of loads and
2688 // stores will be scalar unless the operation is a gather or scatter.
2689 // The value operand of a store will remain scalar if the store is scalarized.
2690 for (auto *BB : TheLoop->blocks())
2691 for (auto &I : *BB) {
2692 if (auto *Load = dyn_cast<LoadInst>(&I)) {
2693 EvaluatePtrUse(Load, Load->getPointerOperand());
2694 } else if (auto *Store = dyn_cast<StoreInst>(&I)) {
2695 EvaluatePtrUse(Store, Store->getPointerOperand());
2696 EvaluatePtrUse(Store, Store->getValueOperand());
2697 }
2698 }
2699 for (auto *I : ScalarPtrs)
2700 if (!PossibleNonScalarPtrs.count(I)) {
2701 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *I << "\n");
2702 Worklist.insert(I);
2703 }
2704
2705 // Insert the forced scalars.
2706 // FIXME: Currently VPWidenPHIRecipe() often creates a dead vector
2707 // induction variable when the PHI user is scalarized.
2708 auto ForcedScalar = ForcedScalars.find(VF);
2709 if (ForcedScalar != ForcedScalars.end())
2710 for (auto *I : ForcedScalar->second) {
2711 LLVM_DEBUG(dbgs() << "LV: Found (forced) scalar instruction: " << *I << "\n");
2712 Worklist.insert(I);
2713 }
2714
2715 // Expand the worklist by looking through any bitcasts and getelementptr
2716 // instructions we've already identified as scalar. This is similar to the
2717 // expansion step in collectLoopUniforms(); however, here we're only
2718 // expanding to include additional bitcasts and getelementptr instructions.
2719 unsigned Idx = 0;
2720 while (Idx != Worklist.size()) {
2721 Instruction *Dst = Worklist[Idx++];
2722 if (!IsLoopVaryingGEP(Dst->getOperand(0)))
2723 continue;
2724 auto *Src = cast<Instruction>(Dst->getOperand(0));
2725 if (llvm::all_of(Src->users(), [&](User *U) -> bool {
2726 auto *J = cast<Instruction>(U);
2727 return !TheLoop->contains(J) || Worklist.count(J) ||
2728 ((isa<LoadInst>(J) || isa<StoreInst>(J)) &&
2729 IsScalarUse(J, Src));
2730 })) {
2731 Worklist.insert(Src);
2732 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Src << "\n");
2733 }
2734 }
2735
2736 // An induction variable will remain scalar if all users of the induction
2737 // variable and induction variable update remain scalar.
2738 for (const auto &Induction : Legal->getInductionVars()) {
2739 auto *Ind = Induction.first;
2740 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch));
2741
2742 // If tail-folding is applied, the primary induction variable will be used
2743 // to feed a vector compare.
2744 if (Ind == Legal->getPrimaryInduction() && foldTailByMasking())
2745 continue;
2746
2747 // Returns true if \p Indvar is a pointer induction that is used directly by
2748 // load/store instruction \p I.
2749 auto IsDirectLoadStoreFromPtrIndvar = [&](Instruction *Indvar,
2750 Instruction *I) {
2751 return Induction.second.getKind() ==
2754 Indvar == getLoadStorePointerOperand(I) && IsScalarUse(I, Indvar);
2755 };
2756
2757 // Determine if all users of the induction variable are scalar after
2758 // vectorization.
2759 bool ScalarInd = all_of(Ind->users(), [&](User *U) -> bool {
2760 auto *I = cast<Instruction>(U);
2761 return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I) ||
2762 IsDirectLoadStoreFromPtrIndvar(Ind, I);
2763 });
2764 if (!ScalarInd)
2765 continue;
2766
2767 // If the induction variable update is a fixed-order recurrence, neither the
2768 // induction variable or its update should be marked scalar after
2769 // vectorization.
2770 auto *IndUpdatePhi = dyn_cast<PHINode>(IndUpdate);
2771 if (IndUpdatePhi && Legal->isFixedOrderRecurrence(IndUpdatePhi))
2772 continue;
2773
2774 // Determine if all users of the induction variable update instruction are
2775 // scalar after vectorization.
2776 bool ScalarIndUpdate = all_of(IndUpdate->users(), [&](User *U) -> bool {
2777 auto *I = cast<Instruction>(U);
2778 return I == Ind || !TheLoop->contains(I) || Worklist.count(I) ||
2779 IsDirectLoadStoreFromPtrIndvar(IndUpdate, I);
2780 });
2781 if (!ScalarIndUpdate)
2782 continue;
2783
2784 // The induction variable and its update instruction will remain scalar.
2785 Worklist.insert(Ind);
2786 Worklist.insert(IndUpdate);
2787 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Ind << "\n");
2788 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *IndUpdate
2789 << "\n");
2790 }
2791
2792 Scalars[VF].insert_range(Worklist);
2793}
2794
2796 Instruction *I, ElementCount VF) const {
2797 if (!isPredicatedInst(I))
2798 return false;
2799
2800 // Do we have a non-scalar lowering for this predicated
2801 // instruction? No - it is scalar with predication.
2802 switch(I->getOpcode()) {
2803 default:
2804 return true;
2805 case Instruction::Call:
2806 if (VF.isScalar())
2807 return true;
2809 case Instruction::Load:
2810 case Instruction::Store: {
2811 auto *Ptr = getLoadStorePointerOperand(I);
2812 auto *Ty = getLoadStoreType(I);
2813 unsigned AS = getLoadStoreAddressSpace(I);
2814 Type *VTy = Ty;
2815 if (VF.isVector())
2816 VTy = VectorType::get(Ty, VF);
2817 const Align Alignment = getLoadStoreAlignment(I);
2818 return isa<LoadInst>(I) ? !(isLegalMaskedLoad(Ty, Ptr, Alignment, AS) ||
2819 TTI.isLegalMaskedGather(VTy, Alignment))
2820 : !(isLegalMaskedStore(Ty, Ptr, Alignment, AS) ||
2821 TTI.isLegalMaskedScatter(VTy, Alignment));
2822 }
2823 case Instruction::UDiv:
2824 case Instruction::SDiv:
2825 case Instruction::SRem:
2826 case Instruction::URem: {
2827 // We have the option to use the safe-divisor idiom to avoid predication.
2828 // The cost based decision here will always select safe-divisor for
2829 // scalable vectors as scalarization isn't legal.
2830 const auto [ScalarCost, SafeDivisorCost] = getDivRemSpeculationCost(I, VF);
2831 return isDivRemScalarWithPredication(ScalarCost, SafeDivisorCost);
2832 }
2833 }
2834}
2835
2836// TODO: Fold into LoopVectorizationLegality::isMaskRequired.
2838 // TODO: We can use the loop-preheader as context point here and get
2839 // context sensitive reasoning for isSafeToSpeculativelyExecute.
2841 (isa<LoadInst, StoreInst, CallInst>(I) && !Legal->isMaskRequired(I)) ||
2843 return false;
2844
2845 // If the instruction was executed conditionally in the original scalar loop,
2846 // predication is needed with a mask whose lanes are all possibly inactive.
2847 if (Legal->blockNeedsPredication(I->getParent()))
2848 return true;
2849
2850 // If we're not folding the tail by masking, predication is unnecessary.
2851 if (!foldTailByMasking())
2852 return false;
2853
2854 // All that remain are instructions with side-effects originally executed in
2855 // the loop unconditionally, but now execute under a tail-fold mask (only)
2856 // having at least one active lane (the first). If the side-effects of the
2857 // instruction are invariant, executing it w/o (the tail-folding) mask is safe
2858 // - it will cause the same side-effects as when masked.
2859 switch(I->getOpcode()) {
2860 default:
2862 "instruction should have been considered by earlier checks");
2863 case Instruction::Call:
2864 // Side-effects of a Call are assumed to be non-invariant, needing a
2865 // (fold-tail) mask.
2866 assert(Legal->isMaskRequired(I) &&
2867 "should have returned earlier for calls not needing a mask");
2868 return true;
2869 case Instruction::Load:
2870 // If the address is loop invariant no predication is needed.
2871 return !Legal->isInvariant(getLoadStorePointerOperand(I));
2872 case Instruction::Store: {
2873 // For stores, we need to prove both speculation safety (which follows from
2874 // the same argument as loads), but also must prove the value being stored
2875 // is correct. The easiest form of the later is to require that all values
2876 // stored are the same.
2877 return !(Legal->isInvariant(getLoadStorePointerOperand(I)) &&
2878 TheLoop->isLoopInvariant(cast<StoreInst>(I)->getValueOperand()));
2879 }
2880 case Instruction::UDiv:
2881 case Instruction::SDiv:
2882 case Instruction::SRem:
2883 case Instruction::URem:
2884 // If the divisor is loop-invariant no predication is needed.
2885 return !Legal->isInvariant(I->getOperand(1));
2886 }
2887}
2888
2889std::pair<InstructionCost, InstructionCost>
2891 ElementCount VF) const {
2892 assert(I->getOpcode() == Instruction::UDiv ||
2893 I->getOpcode() == Instruction::SDiv ||
2894 I->getOpcode() == Instruction::SRem ||
2895 I->getOpcode() == Instruction::URem);
2897
2898 // Scalarization isn't legal for scalable vector types
2899 InstructionCost ScalarizationCost = InstructionCost::getInvalid();
2900 if (!VF.isScalable()) {
2901 // Get the scalarization cost and scale this amount by the probability of
2902 // executing the predicated block. If the instruction is not predicated,
2903 // we fall through to the next case.
2904 ScalarizationCost = 0;
2905
2906 // These instructions have a non-void type, so account for the phi nodes
2907 // that we will create. This cost is likely to be zero. The phi node
2908 // cost, if any, should be scaled by the block probability because it
2909 // models a copy at the end of each predicated block.
2910 ScalarizationCost +=
2911 VF.getFixedValue() * TTI.getCFInstrCost(Instruction::PHI, CostKind);
2912
2913 // The cost of the non-predicated instruction.
2914 ScalarizationCost +=
2915 VF.getFixedValue() *
2916 TTI.getArithmeticInstrCost(I->getOpcode(), I->getType(), CostKind);
2917
2918 // The cost of insertelement and extractelement instructions needed for
2919 // scalarization.
2920 ScalarizationCost += getScalarizationOverhead(I, VF);
2921
2922 // Scale the cost by the probability of executing the predicated blocks.
2923 // This assumes the predicated block for each vector lane is equally
2924 // likely.
2925 ScalarizationCost =
2926 ScalarizationCost / getPredBlockCostDivisor(CostKind, I->getParent());
2927 }
2928
2929 InstructionCost SafeDivisorCost = 0;
2930 auto *VecTy = toVectorTy(I->getType(), VF);
2931 // The cost of the select guard to ensure all lanes are well defined
2932 // after we speculate above any internal control flow.
2933 SafeDivisorCost +=
2934 TTI.getCmpSelInstrCost(Instruction::Select, VecTy,
2935 toVectorTy(Type::getInt1Ty(I->getContext()), VF),
2937
2938 SmallVector<const Value *, 4> Operands(I->operand_values());
2939 SafeDivisorCost += TTI.getArithmeticInstrCost(
2940 I->getOpcode(), VecTy, CostKind,
2941 {TargetTransformInfo::OK_AnyValue, TargetTransformInfo::OP_None},
2942 {TargetTransformInfo::OK_AnyValue, TargetTransformInfo::OP_None},
2943 Operands, I);
2944 return {ScalarizationCost, SafeDivisorCost};
2945}
2946
2948 Instruction *I, ElementCount VF) const {
2949 assert(isAccessInterleaved(I) && "Expecting interleaved access.");
2951 "Decision should not be set yet.");
2952 auto *Group = getInterleavedAccessGroup(I);
2953 assert(Group && "Must have a group.");
2954 unsigned InterleaveFactor = Group->getFactor();
2955
2956 // If the instruction's allocated size doesn't equal its type size, it
2957 // requires padding and will be scalarized.
2958 auto &DL = I->getDataLayout();
2959 auto *ScalarTy = getLoadStoreType(I);
2960 if (hasIrregularType(ScalarTy, DL))
2961 return false;
2962
2963 // For scalable vectors, the interleave factors must be <= 8 since we require
2964 // the (de)interleaveN intrinsics instead of shufflevectors.
2965 if (VF.isScalable() && InterleaveFactor > 8)
2966 return false;
2967
2968 // If the group involves a non-integral pointer, we may not be able to
2969 // losslessly cast all values to a common type.
2970 bool ScalarNI = DL.isNonIntegralPointerType(ScalarTy);
2971 for (unsigned Idx = 0; Idx < InterleaveFactor; Idx++) {
2972 Instruction *Member = Group->getMember(Idx);
2973 if (!Member)
2974 continue;
2975 auto *MemberTy = getLoadStoreType(Member);
2976 bool MemberNI = DL.isNonIntegralPointerType(MemberTy);
2977 // Don't coerce non-integral pointers to integers or vice versa.
2978 if (MemberNI != ScalarNI)
2979 // TODO: Consider adding special nullptr value case here
2980 return false;
2981 if (MemberNI && ScalarNI &&
2982 ScalarTy->getPointerAddressSpace() !=
2983 MemberTy->getPointerAddressSpace())
2984 return false;
2985 }
2986
2987 // Check if masking is required.
2988 // A Group may need masking for one of two reasons: it resides in a block that
2989 // needs predication, or it was decided to use masking to deal with gaps
2990 // (either a gap at the end of a load-access that may result in a speculative
2991 // load, or any gaps in a store-access).
2992 bool PredicatedAccessRequiresMasking =
2993 blockNeedsPredicationForAnyReason(I->getParent()) &&
2994 Legal->isMaskRequired(I);
2995 bool LoadAccessWithGapsRequiresEpilogMasking =
2996 isa<LoadInst>(I) && Group->requiresScalarEpilogue() &&
2998 bool StoreAccessWithGapsRequiresMasking =
2999 isa<StoreInst>(I) && !Group->isFull();
3000 if (!PredicatedAccessRequiresMasking &&
3001 !LoadAccessWithGapsRequiresEpilogMasking &&
3002 !StoreAccessWithGapsRequiresMasking)
3003 return true;
3004
3005 // If masked interleaving is required, we expect that the user/target had
3006 // enabled it, because otherwise it either wouldn't have been created or
3007 // it should have been invalidated by the CostModel.
3009 "Masked interleave-groups for predicated accesses are not enabled.");
3010
3011 if (Group->isReverse())
3012 return false;
3013
3014 // TODO: Support interleaved access that requires a gap mask for scalable VFs.
3015 bool NeedsMaskForGaps = LoadAccessWithGapsRequiresEpilogMasking ||
3016 StoreAccessWithGapsRequiresMasking;
3017 if (VF.isScalable() && NeedsMaskForGaps)
3018 return false;
3019
3020 auto *Ty = getLoadStoreType(I);
3021 const Align Alignment = getLoadStoreAlignment(I);
3022 unsigned AS = getLoadStoreAddressSpace(I);
3023 return isa<LoadInst>(I) ? TTI.isLegalMaskedLoad(Ty, Alignment, AS)
3024 : TTI.isLegalMaskedStore(Ty, Alignment, AS);
3025}
3026
3028 Instruction *I, ElementCount VF) {
3029 // Get and ensure we have a valid memory instruction.
3030 assert((isa<LoadInst, StoreInst>(I)) && "Invalid memory instruction");
3031
3032 auto *Ptr = getLoadStorePointerOperand(I);
3033 auto *ScalarTy = getLoadStoreType(I);
3034
3035 // In order to be widened, the pointer should be consecutive, first of all.
3036 if (!Legal->isConsecutivePtr(ScalarTy, Ptr))
3037 return false;
3038
3039 // If the instruction is a store located in a predicated block, it will be
3040 // scalarized.
3041 if (isScalarWithPredication(I, VF))
3042 return false;
3043
3044 // If the instruction's allocated size doesn't equal it's type size, it
3045 // requires padding and will be scalarized.
3046 auto &DL = I->getDataLayout();
3047 if (hasIrregularType(ScalarTy, DL))
3048 return false;
3049
3050 return true;
3051}
3052
3053void LoopVectorizationCostModel::collectLoopUniforms(ElementCount VF) {
3054 // We should not collect Uniforms more than once per VF. Right now,
3055 // this function is called from collectUniformsAndScalars(), which
3056 // already does this check. Collecting Uniforms for VF=1 does not make any
3057 // sense.
3058
3059 assert(VF.isVector() && !Uniforms.contains(VF) &&
3060 "This function should not be visited twice for the same VF");
3061
3062 // Visit the list of Uniforms. If we find no uniform value, we won't
3063 // analyze again. Uniforms.count(VF) will return 1.
3064 Uniforms[VF].clear();
3065
3066 // Now we know that the loop is vectorizable!
3067 // Collect instructions inside the loop that will remain uniform after
3068 // vectorization.
3069
3070 // Global values, params and instructions outside of current loop are out of
3071 // scope.
3072 auto IsOutOfScope = [&](Value *V) -> bool {
3074 return (!I || !TheLoop->contains(I));
3075 };
3076
3077 // Worklist containing uniform instructions demanding lane 0.
3078 SetVector<Instruction *> Worklist;
3079
3080 // Add uniform instructions demanding lane 0 to the worklist. Instructions
3081 // that require predication must not be considered uniform after
3082 // vectorization, because that would create an erroneous replicating region
3083 // where only a single instance out of VF should be formed.
3084 auto AddToWorklistIfAllowed = [&](Instruction *I) -> void {
3085 if (IsOutOfScope(I)) {
3086 LLVM_DEBUG(dbgs() << "LV: Found not uniform due to scope: "
3087 << *I << "\n");
3088 return;
3089 }
3090 if (isPredicatedInst(I)) {
3091 LLVM_DEBUG(
3092 dbgs() << "LV: Found not uniform due to requiring predication: " << *I
3093 << "\n");
3094 return;
3095 }
3096 LLVM_DEBUG(dbgs() << "LV: Found uniform instruction: " << *I << "\n");
3097 Worklist.insert(I);
3098 };
3099
3100 // Start with the conditional branches exiting the loop. If the branch
3101 // condition is an instruction contained in the loop that is only used by the
3102 // branch, it is uniform. Note conditions from uncountable early exits are not
3103 // uniform.
3105 TheLoop->getExitingBlocks(Exiting);
3106 for (BasicBlock *E : Exiting) {
3107 if (Legal->hasUncountableEarlyExit() && TheLoop->getLoopLatch() != E)
3108 continue;
3109 auto *Cmp = dyn_cast<Instruction>(E->getTerminator()->getOperand(0));
3110 if (Cmp && TheLoop->contains(Cmp) && Cmp->hasOneUse())
3111 AddToWorklistIfAllowed(Cmp);
3112 }
3113
3114 auto PrevVF = VF.divideCoefficientBy(2);
3115 // Return true if all lanes perform the same memory operation, and we can
3116 // thus choose to execute only one.
3117 auto IsUniformMemOpUse = [&](Instruction *I) {
3118 // If the value was already known to not be uniform for the previous
3119 // (smaller VF), it cannot be uniform for the larger VF.
3120 if (PrevVF.isVector()) {
3121 auto Iter = Uniforms.find(PrevVF);
3122 if (Iter != Uniforms.end() && !Iter->second.contains(I))
3123 return false;
3124 }
3125 if (!Legal->isUniformMemOp(*I, VF))
3126 return false;
3127 if (isa<LoadInst>(I))
3128 // Loading the same address always produces the same result - at least
3129 // assuming aliasing and ordering which have already been checked.
3130 return true;
3131 // Storing the same value on every iteration.
3132 return TheLoop->isLoopInvariant(cast<StoreInst>(I)->getValueOperand());
3133 };
3134
3135 auto IsUniformDecision = [&](Instruction *I, ElementCount VF) {
3136 InstWidening WideningDecision = getWideningDecision(I, VF);
3137 assert(WideningDecision != CM_Unknown &&
3138 "Widening decision should be ready at this moment");
3139
3140 if (IsUniformMemOpUse(I))
3141 return true;
3142
3143 return (WideningDecision == CM_Widen ||
3144 WideningDecision == CM_Widen_Reverse ||
3145 WideningDecision == CM_Interleave);
3146 };
3147
3148 // Returns true if Ptr is the pointer operand of a memory access instruction
3149 // I, I is known to not require scalarization, and the pointer is not also
3150 // stored.
3151 auto IsVectorizedMemAccessUse = [&](Instruction *I, Value *Ptr) -> bool {
3152 if (isa<StoreInst>(I) && I->getOperand(0) == Ptr)
3153 return false;
3154 return getLoadStorePointerOperand(I) == Ptr &&
3155 (IsUniformDecision(I, VF) || Legal->isInvariant(Ptr));
3156 };
3157
3158 // Holds a list of values which are known to have at least one uniform use.
3159 // Note that there may be other uses which aren't uniform. A "uniform use"
3160 // here is something which only demands lane 0 of the unrolled iterations;
3161 // it does not imply that all lanes produce the same value (e.g. this is not
3162 // the usual meaning of uniform)
3163 SetVector<Value *> HasUniformUse;
3164
3165 // Scan the loop for instructions which are either a) known to have only
3166 // lane 0 demanded or b) are uses which demand only lane 0 of their operand.
3167 for (auto *BB : TheLoop->blocks())
3168 for (auto &I : *BB) {
3169 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(&I)) {
3170 switch (II->getIntrinsicID()) {
3171 case Intrinsic::sideeffect:
3172 case Intrinsic::experimental_noalias_scope_decl:
3173 case Intrinsic::assume:
3174 case Intrinsic::lifetime_start:
3175 case Intrinsic::lifetime_end:
3176 if (TheLoop->hasLoopInvariantOperands(&I))
3177 AddToWorklistIfAllowed(&I);
3178 break;
3179 default:
3180 break;
3181 }
3182 }
3183
3184 if (auto *EVI = dyn_cast<ExtractValueInst>(&I)) {
3185 if (IsOutOfScope(EVI->getAggregateOperand())) {
3186 AddToWorklistIfAllowed(EVI);
3187 continue;
3188 }
3189 // Only ExtractValue instructions where the aggregate value comes from a
3190 // call are allowed to be non-uniform.
3191 assert(isa<CallInst>(EVI->getAggregateOperand()) &&
3192 "Expected aggregate value to be call return value");
3193 }
3194
3195 // If there's no pointer operand, there's nothing to do.
3196 auto *Ptr = getLoadStorePointerOperand(&I);
3197 if (!Ptr)
3198 continue;
3199
3200 // If the pointer can be proven to be uniform, always add it to the
3201 // worklist.
3202 if (isa<Instruction>(Ptr) && Legal->isUniform(Ptr, VF))
3203 AddToWorklistIfAllowed(cast<Instruction>(Ptr));
3204
3205 if (IsUniformMemOpUse(&I))
3206 AddToWorklistIfAllowed(&I);
3207
3208 if (IsVectorizedMemAccessUse(&I, Ptr))
3209 HasUniformUse.insert(Ptr);
3210 }
3211
3212 // Add to the worklist any operands which have *only* uniform (e.g. lane 0
3213 // demanding) users. Since loops are assumed to be in LCSSA form, this
3214 // disallows uses outside the loop as well.
3215 for (auto *V : HasUniformUse) {
3216 if (IsOutOfScope(V))
3217 continue;
3218 auto *I = cast<Instruction>(V);
3219 bool UsersAreMemAccesses = all_of(I->users(), [&](User *U) -> bool {
3220 auto *UI = cast<Instruction>(U);
3221 return TheLoop->contains(UI) && IsVectorizedMemAccessUse(UI, V);
3222 });
3223 if (UsersAreMemAccesses)
3224 AddToWorklistIfAllowed(I);
3225 }
3226
3227 // Expand Worklist in topological order: whenever a new instruction
3228 // is added , its users should be already inside Worklist. It ensures
3229 // a uniform instruction will only be used by uniform instructions.
3230 unsigned Idx = 0;
3231 while (Idx != Worklist.size()) {
3232 Instruction *I = Worklist[Idx++];
3233
3234 for (auto *OV : I->operand_values()) {
3235 // isOutOfScope operands cannot be uniform instructions.
3236 if (IsOutOfScope(OV))
3237 continue;
3238 // First order recurrence Phi's should typically be considered
3239 // non-uniform.
3240 auto *OP = dyn_cast<PHINode>(OV);
3241 if (OP && Legal->isFixedOrderRecurrence(OP))
3242 continue;
3243 // If all the users of the operand are uniform, then add the
3244 // operand into the uniform worklist.
3245 auto *OI = cast<Instruction>(OV);
3246 if (llvm::all_of(OI->users(), [&](User *U) -> bool {
3247 auto *J = cast<Instruction>(U);
3248 return Worklist.count(J) || IsVectorizedMemAccessUse(J, OI);
3249 }))
3250 AddToWorklistIfAllowed(OI);
3251 }
3252 }
3253
3254 // For an instruction to be added into Worklist above, all its users inside
3255 // the loop should also be in Worklist. However, this condition cannot be
3256 // true for phi nodes that form a cyclic dependence. We must process phi
3257 // nodes separately. An induction variable will remain uniform if all users
3258 // of the induction variable and induction variable update remain uniform.
3259 // The code below handles both pointer and non-pointer induction variables.
3260 BasicBlock *Latch = TheLoop->getLoopLatch();
3261 for (const auto &Induction : Legal->getInductionVars()) {
3262 auto *Ind = Induction.first;
3263 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch));
3264
3265 // Determine if all users of the induction variable are uniform after
3266 // vectorization.
3267 bool UniformInd = all_of(Ind->users(), [&](User *U) -> bool {
3268 auto *I = cast<Instruction>(U);
3269 return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I) ||
3270 IsVectorizedMemAccessUse(I, Ind);
3271 });
3272 if (!UniformInd)
3273 continue;
3274
3275 // Determine if all users of the induction variable update instruction are
3276 // uniform after vectorization.
3277 bool UniformIndUpdate = all_of(IndUpdate->users(), [&](User *U) -> bool {
3278 auto *I = cast<Instruction>(U);
3279 return I == Ind || Worklist.count(I) ||
3280 IsVectorizedMemAccessUse(I, IndUpdate);
3281 });
3282 if (!UniformIndUpdate)
3283 continue;
3284
3285 // The induction variable and its update instruction will remain uniform.
3286 AddToWorklistIfAllowed(Ind);
3287 AddToWorklistIfAllowed(IndUpdate);
3288 }
3289
3290 Uniforms[VF].insert_range(Worklist);
3291}
3292
3294 LLVM_DEBUG(dbgs() << "LV: Performing code size checks.\n");
3295
3296 if (Legal->getRuntimePointerChecking()->Need) {
3297 reportVectorizationFailure("Runtime ptr check is required with -Os/-Oz",
3298 "runtime pointer checks needed. Enable vectorization of this "
3299 "loop with '#pragma clang loop vectorize(enable)' when "
3300 "compiling with -Os/-Oz",
3301 "CantVersionLoopWithOptForSize", ORE, TheLoop);
3302 return true;
3303 }
3304
3305 if (!PSE.getPredicate().isAlwaysTrue()) {
3306 reportVectorizationFailure("Runtime SCEV check is required with -Os/-Oz",
3307 "runtime SCEV checks needed. Enable vectorization of this "
3308 "loop with '#pragma clang loop vectorize(enable)' when "
3309 "compiling with -Os/-Oz",
3310 "CantVersionLoopWithOptForSize", ORE, TheLoop);
3311 return true;
3312 }
3313
3314 // FIXME: Avoid specializing for stride==1 instead of bailing out.
3315 if (!Legal->getLAI()->getSymbolicStrides().empty()) {
3316 reportVectorizationFailure("Runtime stride check for small trip count",
3317 "runtime stride == 1 checks needed. Enable vectorization of "
3318 "this loop without such check by compiling with -Os/-Oz",
3319 "CantVersionLoopWithOptForSize", ORE, TheLoop);
3320 return true;
3321 }
3322
3323 return false;
3324}
3325
3326bool LoopVectorizationCostModel::isScalableVectorizationAllowed() {
3327 if (IsScalableVectorizationAllowed)
3328 return *IsScalableVectorizationAllowed;
3329
3330 IsScalableVectorizationAllowed = false;
3331 if (!TTI.supportsScalableVectors() && !ForceTargetSupportsScalableVectors)
3332 return false;
3333
3334 if (Hints->isScalableVectorizationDisabled()) {
3335 reportVectorizationInfo("Scalable vectorization is explicitly disabled",
3336 "ScalableVectorizationDisabled", ORE, TheLoop);
3337 return false;
3338 }
3339
3340 LLVM_DEBUG(dbgs() << "LV: Scalable vectorization is available\n");
3341
3342 auto MaxScalableVF = ElementCount::getScalable(
3343 std::numeric_limits<ElementCount::ScalarTy>::max());
3344
3345 // Test that the loop-vectorizer can legalize all operations for this MaxVF.
3346 // FIXME: While for scalable vectors this is currently sufficient, this should
3347 // be replaced by a more detailed mechanism that filters out specific VFs,
3348 // instead of invalidating vectorization for a whole set of VFs based on the
3349 // MaxVF.
3350
3351 // Disable scalable vectorization if the loop contains unsupported reductions.
3352 if (!canVectorizeReductions(MaxScalableVF)) {
3354 "Scalable vectorization not supported for the reduction "
3355 "operations found in this loop.",
3356 "ScalableVFUnfeasible", ORE, TheLoop);
3357 return false;
3358 }
3359
3360 // Disable scalable vectorization if the loop contains any instructions
3361 // with element types not supported for scalable vectors.
3362 if (any_of(ElementTypesInLoop, [&](Type *Ty) {
3363 return !Ty->isVoidTy() &&
3365 })) {
3366 reportVectorizationInfo("Scalable vectorization is not supported "
3367 "for all element types found in this loop.",
3368 "ScalableVFUnfeasible", ORE, TheLoop);
3369 return false;
3370 }
3371
3372 if (!Legal->isSafeForAnyVectorWidth() && !getMaxVScale(*TheFunction, TTI)) {
3373 reportVectorizationInfo("The target does not provide maximum vscale value "
3374 "for safe distance analysis.",
3375 "ScalableVFUnfeasible", ORE, TheLoop);
3376 return false;
3377 }
3378
3379 IsScalableVectorizationAllowed = true;
3380 return true;
3381}
3382
3383ElementCount
3384LoopVectorizationCostModel::getMaxLegalScalableVF(unsigned MaxSafeElements) {
3385 if (!isScalableVectorizationAllowed())
3386 return ElementCount::getScalable(0);
3387
3388 auto MaxScalableVF = ElementCount::getScalable(
3389 std::numeric_limits<ElementCount::ScalarTy>::max());
3390 if (Legal->isSafeForAnyVectorWidth())
3391 return MaxScalableVF;
3392
3393 std::optional<unsigned> MaxVScale = getMaxVScale(*TheFunction, TTI);
3394 // Limit MaxScalableVF by the maximum safe dependence distance.
3395 MaxScalableVF = ElementCount::getScalable(MaxSafeElements / *MaxVScale);
3396
3397 if (!MaxScalableVF)
3399 "Max legal vector width too small, scalable vectorization "
3400 "unfeasible.",
3401 "ScalableVFUnfeasible", ORE, TheLoop);
3402
3403 return MaxScalableVF;
3404}
3405
3406FixedScalableVFPair LoopVectorizationCostModel::computeFeasibleMaxVF(
3407 unsigned MaxTripCount, ElementCount UserVF, bool FoldTailByMasking) {
3408 MinBWs = computeMinimumValueSizes(TheLoop->getBlocks(), *DB, &TTI);
3409 unsigned SmallestType, WidestType;
3410 std::tie(SmallestType, WidestType) = getSmallestAndWidestTypes();
3411
3412 // Get the maximum safe dependence distance in bits computed by LAA.
3413 // It is computed by MaxVF * sizeOf(type) * 8, where type is taken from
3414 // the memory accesses that is most restrictive (involved in the smallest
3415 // dependence distance).
3416 unsigned MaxSafeElementsPowerOf2 =
3417 bit_floor(Legal->getMaxSafeVectorWidthInBits() / WidestType);
3418 if (!Legal->isSafeForAnyStoreLoadForwardDistances()) {
3419 unsigned SLDist = Legal->getMaxStoreLoadForwardSafeDistanceInBits();
3420 MaxSafeElementsPowerOf2 =
3421 std::min(MaxSafeElementsPowerOf2, SLDist / WidestType);
3422 }
3423 auto MaxSafeFixedVF = ElementCount::getFixed(MaxSafeElementsPowerOf2);
3424 auto MaxSafeScalableVF = getMaxLegalScalableVF(MaxSafeElementsPowerOf2);
3425
3426 if (!Legal->isSafeForAnyVectorWidth())
3427 this->MaxSafeElements = MaxSafeElementsPowerOf2;
3428
3429 LLVM_DEBUG(dbgs() << "LV: The max safe fixed VF is: " << MaxSafeFixedVF
3430 << ".\n");
3431 LLVM_DEBUG(dbgs() << "LV: The max safe scalable VF is: " << MaxSafeScalableVF
3432 << ".\n");
3433
3434 // First analyze the UserVF, fall back if the UserVF should be ignored.
3435 if (UserVF) {
3436 auto MaxSafeUserVF =
3437 UserVF.isScalable() ? MaxSafeScalableVF : MaxSafeFixedVF;
3438
3439 if (ElementCount::isKnownLE(UserVF, MaxSafeUserVF)) {
3440 // If `VF=vscale x N` is safe, then so is `VF=N`
3441 if (UserVF.isScalable())
3442 return FixedScalableVFPair(
3443 ElementCount::getFixed(UserVF.getKnownMinValue()), UserVF);
3444
3445 return UserVF;
3446 }
3447
3448 assert(ElementCount::isKnownGT(UserVF, MaxSafeUserVF));
3449
3450 // Only clamp if the UserVF is not scalable. If the UserVF is scalable, it
3451 // is better to ignore the hint and let the compiler choose a suitable VF.
3452 if (!UserVF.isScalable()) {
3453 LLVM_DEBUG(dbgs() << "LV: User VF=" << UserVF
3454 << " is unsafe, clamping to max safe VF="
3455 << MaxSafeFixedVF << ".\n");
3456 ORE->emit([&]() {
3457 return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationFactor",
3458 TheLoop->getStartLoc(),
3459 TheLoop->getHeader())
3460 << "User-specified vectorization factor "
3461 << ore::NV("UserVectorizationFactor", UserVF)
3462 << " is unsafe, clamping to maximum safe vectorization factor "
3463 << ore::NV("VectorizationFactor", MaxSafeFixedVF);
3464 });
3465 return MaxSafeFixedVF;
3466 }
3467
3469 LLVM_DEBUG(dbgs() << "LV: User VF=" << UserVF
3470 << " is ignored because scalable vectors are not "
3471 "available.\n");
3472 ORE->emit([&]() {
3473 return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationFactor",
3474 TheLoop->getStartLoc(),
3475 TheLoop->getHeader())
3476 << "User-specified vectorization factor "
3477 << ore::NV("UserVectorizationFactor", UserVF)
3478 << " is ignored because the target does not support scalable "
3479 "vectors. The compiler will pick a more suitable value.";
3480 });
3481 } else {
3482 LLVM_DEBUG(dbgs() << "LV: User VF=" << UserVF
3483 << " is unsafe. Ignoring scalable UserVF.\n");
3484 ORE->emit([&]() {
3485 return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationFactor",
3486 TheLoop->getStartLoc(),
3487 TheLoop->getHeader())
3488 << "User-specified vectorization factor "
3489 << ore::NV("UserVectorizationFactor", UserVF)
3490 << " is unsafe. Ignoring the hint to let the compiler pick a "
3491 "more suitable value.";
3492 });
3493 }
3494 }
3495
3496 LLVM_DEBUG(dbgs() << "LV: The Smallest and Widest types: " << SmallestType
3497 << " / " << WidestType << " bits.\n");
3498
3499 FixedScalableVFPair Result(ElementCount::getFixed(1),
3501 if (auto MaxVF =
3502 getMaximizedVFForTarget(MaxTripCount, SmallestType, WidestType,
3503 MaxSafeFixedVF, FoldTailByMasking))
3504 Result.FixedVF = MaxVF;
3505
3506 if (auto MaxVF =
3507 getMaximizedVFForTarget(MaxTripCount, SmallestType, WidestType,
3508 MaxSafeScalableVF, FoldTailByMasking))
3509 if (MaxVF.isScalable()) {
3510 Result.ScalableVF = MaxVF;
3511 LLVM_DEBUG(dbgs() << "LV: Found feasible scalable VF = " << MaxVF
3512 << "\n");
3513 }
3514
3515 return Result;
3516}
3517
3518FixedScalableVFPair
3520 if (Legal->getRuntimePointerChecking()->Need && TTI.hasBranchDivergence()) {
3521 // TODO: It may be useful to do since it's still likely to be dynamically
3522 // uniform if the target can skip.
3524 "Not inserting runtime ptr check for divergent target",
3525 "runtime pointer checks needed. Not enabled for divergent target",
3526 "CantVersionLoopWithDivergentTarget", ORE, TheLoop);
3528 }
3529
3530 ScalarEvolution *SE = PSE.getSE();
3532 unsigned MaxTC = PSE.getSmallConstantMaxTripCount();
3533 LLVM_DEBUG(dbgs() << "LV: Found trip count: " << TC << '\n');
3534 if (TC != ElementCount::getFixed(MaxTC))
3535 LLVM_DEBUG(dbgs() << "LV: Found maximum trip count: " << MaxTC << '\n');
3536 if (TC.isScalar()) {
3537 reportVectorizationFailure("Single iteration (non) loop",
3538 "loop trip count is one, irrelevant for vectorization",
3539 "SingleIterationLoop", ORE, TheLoop);
3541 }
3542
3543 // If BTC matches the widest induction type and is -1 then the trip count
3544 // computation will wrap to 0 and the vector trip count will be 0. Do not try
3545 // to vectorize.
3546 const SCEV *BTC = SE->getBackedgeTakenCount(TheLoop);
3547 if (!isa<SCEVCouldNotCompute>(BTC) &&
3548 BTC->getType()->getScalarSizeInBits() >=
3549 Legal->getWidestInductionType()->getScalarSizeInBits() &&
3551 SE->getMinusOne(BTC->getType()))) {
3553 "Trip count computation wrapped",
3554 "backedge-taken count is -1, loop trip count wrapped to 0",
3555 "TripCountWrapped", ORE, TheLoop);
3557 }
3558
3559 switch (ScalarEpilogueStatus) {
3561 return computeFeasibleMaxVF(MaxTC, UserVF, false);
3563 [[fallthrough]];
3565 LLVM_DEBUG(
3566 dbgs() << "LV: vector predicate hint/switch found.\n"
3567 << "LV: Not allowing scalar epilogue, creating predicated "
3568 << "vector loop.\n");
3569 break;
3571 // fallthrough as a special case of OptForSize
3573 if (ScalarEpilogueStatus == CM_ScalarEpilogueNotAllowedOptSize)
3574 LLVM_DEBUG(
3575 dbgs() << "LV: Not allowing scalar epilogue due to -Os/-Oz.\n");
3576 else
3577 LLVM_DEBUG(dbgs() << "LV: Not allowing scalar epilogue due to low trip "
3578 << "count.\n");
3579
3580 // Bail if runtime checks are required, which are not good when optimising
3581 // for size.
3584
3585 break;
3586 }
3587
3588 // Now try the tail folding
3589
3590 // Invalidate interleave groups that require an epilogue if we can't mask
3591 // the interleave-group.
3593 assert(WideningDecisions.empty() && Uniforms.empty() && Scalars.empty() &&
3594 "No decisions should have been taken at this point");
3595 // Note: There is no need to invalidate any cost modeling decisions here, as
3596 // none were taken so far.
3597 InterleaveInfo.invalidateGroupsRequiringScalarEpilogue();
3598 }
3599
3600 FixedScalableVFPair MaxFactors = computeFeasibleMaxVF(MaxTC, UserVF, true);
3601
3602 // Avoid tail folding if the trip count is known to be a multiple of any VF
3603 // we choose.
3604 std::optional<unsigned> MaxPowerOf2RuntimeVF =
3605 MaxFactors.FixedVF.getFixedValue();
3606 if (MaxFactors.ScalableVF) {
3607 std::optional<unsigned> MaxVScale = getMaxVScale(*TheFunction, TTI);
3608 if (MaxVScale && TTI.isVScaleKnownToBeAPowerOfTwo()) {
3609 MaxPowerOf2RuntimeVF = std::max<unsigned>(
3610 *MaxPowerOf2RuntimeVF,
3611 *MaxVScale * MaxFactors.ScalableVF.getKnownMinValue());
3612 } else
3613 MaxPowerOf2RuntimeVF = std::nullopt; // Stick with tail-folding for now.
3614 }
3615
3616 auto NoScalarEpilogueNeeded = [this, &UserIC](unsigned MaxVF) {
3617 // Return false if the loop is neither a single-latch-exit loop nor an
3618 // early-exit loop as tail-folding is not supported in that case.
3619 if (TheLoop->getExitingBlock() != TheLoop->getLoopLatch() &&
3620 !Legal->hasUncountableEarlyExit())
3621 return false;
3622 unsigned MaxVFtimesIC = UserIC ? MaxVF * UserIC : MaxVF;
3623 ScalarEvolution *SE = PSE.getSE();
3624 // Calling getSymbolicMaxBackedgeTakenCount enables support for loops
3625 // with uncountable exits. For countable loops, the symbolic maximum must
3626 // remain identical to the known back-edge taken count.
3627 const SCEV *BackedgeTakenCount = PSE.getSymbolicMaxBackedgeTakenCount();
3628 assert((Legal->hasUncountableEarlyExit() ||
3629 BackedgeTakenCount == PSE.getBackedgeTakenCount()) &&
3630 "Invalid loop count");
3631 const SCEV *ExitCount = SE->getAddExpr(
3632 BackedgeTakenCount, SE->getOne(BackedgeTakenCount->getType()));
3633 const SCEV *Rem = SE->getURemExpr(
3634 SE->applyLoopGuards(ExitCount, TheLoop),
3635 SE->getConstant(BackedgeTakenCount->getType(), MaxVFtimesIC));
3636 return Rem->isZero();
3637 };
3638
3639 if (MaxPowerOf2RuntimeVF > 0u) {
3640 assert((UserVF.isNonZero() || isPowerOf2_32(*MaxPowerOf2RuntimeVF)) &&
3641 "MaxFixedVF must be a power of 2");
3642 if (NoScalarEpilogueNeeded(*MaxPowerOf2RuntimeVF)) {
3643 // Accept MaxFixedVF if we do not have a tail.
3644 LLVM_DEBUG(dbgs() << "LV: No tail will remain for any chosen VF.\n");
3645 return MaxFactors;
3646 }
3647 }
3648
3649 auto ExpectedTC = getSmallBestKnownTC(PSE, TheLoop);
3650 if (ExpectedTC && ExpectedTC->isFixed() &&
3651 ExpectedTC->getFixedValue() <=
3652 TTI.getMinTripCountTailFoldingThreshold()) {
3653 if (MaxPowerOf2RuntimeVF > 0u) {
3654 // If we have a low-trip-count, and the fixed-width VF is known to divide
3655 // the trip count but the scalable factor does not, use the fixed-width
3656 // factor in preference to allow the generation of a non-predicated loop.
3657 if (ScalarEpilogueStatus == CM_ScalarEpilogueNotAllowedLowTripLoop &&
3658 NoScalarEpilogueNeeded(MaxFactors.FixedVF.getFixedValue())) {
3659 LLVM_DEBUG(dbgs() << "LV: Picking a fixed-width so that no tail will "
3660 "remain for any chosen VF.\n");
3661 MaxFactors.ScalableVF = ElementCount::getScalable(0);
3662 return MaxFactors;
3663 }
3664 }
3665
3667 "The trip count is below the minial threshold value.",
3668 "loop trip count is too low, avoiding vectorization", "LowTripCount",
3669 ORE, TheLoop);
3671 }
3672
3673 // If we don't know the precise trip count, or if the trip count that we
3674 // found modulo the vectorization factor is not zero, try to fold the tail
3675 // by masking.
3676 // FIXME: look for a smaller MaxVF that does divide TC rather than masking.
3677 bool ContainsScalableVF = MaxFactors.ScalableVF.isNonZero();
3678 setTailFoldingStyles(ContainsScalableVF, UserIC);
3679 if (foldTailByMasking()) {
3681 LLVM_DEBUG(
3682 dbgs()
3683 << "LV: tail is folded with EVL, forcing unroll factor to be 1. Will "
3684 "try to generate VP Intrinsics with scalable vector "
3685 "factors only.\n");
3686 // Tail folded loop using VP intrinsics restricts the VF to be scalable
3687 // for now.
3688 // TODO: extend it for fixed vectors, if required.
3689 assert(ContainsScalableVF && "Expected scalable vector factor.");
3690
3691 MaxFactors.FixedVF = ElementCount::getFixed(1);
3692 }
3693 return MaxFactors;
3694 }
3695
3696 // If there was a tail-folding hint/switch, but we can't fold the tail by
3697 // masking, fallback to a vectorization with a scalar epilogue.
3698 if (ScalarEpilogueStatus == CM_ScalarEpilogueNotNeededUsePredicate) {
3699 LLVM_DEBUG(dbgs() << "LV: Cannot fold tail by masking: vectorize with a "
3700 "scalar epilogue instead.\n");
3701 ScalarEpilogueStatus = CM_ScalarEpilogueAllowed;
3702 return MaxFactors;
3703 }
3704
3705 if (ScalarEpilogueStatus == CM_ScalarEpilogueNotAllowedUsePredicate) {
3706 LLVM_DEBUG(dbgs() << "LV: Can't fold tail by masking: don't vectorize\n");
3708 }
3709
3710 if (TC.isZero()) {
3712 "unable to calculate the loop count due to complex control flow",
3713 "UnknownLoopCountComplexCFG", ORE, TheLoop);
3715 }
3716
3718 "Cannot optimize for size and vectorize at the same time.",
3719 "cannot optimize for size and vectorize at the same time. "
3720 "Enable vectorization of this loop with '#pragma clang loop "
3721 "vectorize(enable)' when compiling with -Os/-Oz",
3722 "NoTailLoopWithOptForSize", ORE, TheLoop);
3724}
3725
3727 ElementCount VF) {
3728 if (ConsiderRegPressure.getNumOccurrences())
3729 return ConsiderRegPressure;
3730
3731 // TODO: We should eventually consider register pressure for all targets. The
3732 // TTI hook is temporary whilst target-specific issues are being fixed.
3733 if (TTI.shouldConsiderVectorizationRegPressure())
3734 return true;
3735
3736 if (!useMaxBandwidth(VF.isScalable()
3739 return false;
3740 // Only calculate register pressure for VFs enabled by MaxBandwidth.
3742 VF, VF.isScalable() ? MaxPermissibleVFWithoutMaxBW.ScalableVF
3744}
3745
3748 return MaximizeBandwidth || (MaximizeBandwidth.getNumOccurrences() == 0 &&
3749 (TTI.shouldMaximizeVectorBandwidth(RegKind) ||
3751 Legal->hasVectorCallVariants())));
3752}
3753
3754ElementCount LoopVectorizationCostModel::clampVFByMaxTripCount(
3755 ElementCount VF, unsigned MaxTripCount, bool FoldTailByMasking) const {
3756 unsigned EstimatedVF = VF.getKnownMinValue();
3757 if (VF.isScalable() && TheFunction->hasFnAttribute(Attribute::VScaleRange)) {
3758 auto Attr = TheFunction->getFnAttribute(Attribute::VScaleRange);
3759 auto Min = Attr.getVScaleRangeMin();
3760 EstimatedVF *= Min;
3761 }
3762
3763 // When a scalar epilogue is required, at least one iteration of the scalar
3764 // loop has to execute. Adjust MaxTripCount accordingly to avoid picking a
3765 // max VF that results in a dead vector loop.
3766 if (MaxTripCount > 0 && requiresScalarEpilogue(true))
3767 MaxTripCount -= 1;
3768
3769 if (MaxTripCount && MaxTripCount <= EstimatedVF &&
3770 (!FoldTailByMasking || isPowerOf2_32(MaxTripCount))) {
3771 // If upper bound loop trip count (TC) is known at compile time there is no
3772 // point in choosing VF greater than TC (as done in the loop below). Select
3773 // maximum power of two which doesn't exceed TC. If VF is
3774 // scalable, we only fall back on a fixed VF when the TC is less than or
3775 // equal to the known number of lanes.
3776 auto ClampedUpperTripCount = llvm::bit_floor(MaxTripCount);
3777 LLVM_DEBUG(dbgs() << "LV: Clamping the MaxVF to maximum power of two not "
3778 "exceeding the constant trip count: "
3779 << ClampedUpperTripCount << "\n");
3780 return ElementCount::get(ClampedUpperTripCount,
3781 FoldTailByMasking ? VF.isScalable() : false);
3782 }
3783 return VF;
3784}
3785
3786ElementCount LoopVectorizationCostModel::getMaximizedVFForTarget(
3787 unsigned MaxTripCount, unsigned SmallestType, unsigned WidestType,
3788 ElementCount MaxSafeVF, bool FoldTailByMasking) {
3789 bool ComputeScalableMaxVF = MaxSafeVF.isScalable();
3790 const TypeSize WidestRegister = TTI.getRegisterBitWidth(
3791 ComputeScalableMaxVF ? TargetTransformInfo::RGK_ScalableVector
3793
3794 // Convenience function to return the minimum of two ElementCounts.
3795 auto MinVF = [](const ElementCount &LHS, const ElementCount &RHS) {
3796 assert((LHS.isScalable() == RHS.isScalable()) &&
3797 "Scalable flags must match");
3798 return ElementCount::isKnownLT(LHS, RHS) ? LHS : RHS;
3799 };
3800
3801 // Ensure MaxVF is a power of 2; the dependence distance bound may not be.
3802 // Note that both WidestRegister and WidestType may not be a powers of 2.
3803 auto MaxVectorElementCount = ElementCount::get(
3804 llvm::bit_floor(WidestRegister.getKnownMinValue() / WidestType),
3805 ComputeScalableMaxVF);
3806 MaxVectorElementCount = MinVF(MaxVectorElementCount, MaxSafeVF);
3807 LLVM_DEBUG(dbgs() << "LV: The Widest register safe to use is: "
3808 << (MaxVectorElementCount * WidestType) << " bits.\n");
3809
3810 if (!MaxVectorElementCount) {
3811 LLVM_DEBUG(dbgs() << "LV: The target has no "
3812 << (ComputeScalableMaxVF ? "scalable" : "fixed")
3813 << " vector registers.\n");
3814 return ElementCount::getFixed(1);
3815 }
3816
3817 ElementCount MaxVF = clampVFByMaxTripCount(MaxVectorElementCount,
3818 MaxTripCount, FoldTailByMasking);
3819 // If the MaxVF was already clamped, there's no point in trying to pick a
3820 // larger one.
3821 if (MaxVF != MaxVectorElementCount)
3822 return MaxVF;
3823
3825 ComputeScalableMaxVF ? TargetTransformInfo::RGK_ScalableVector
3827
3828 if (MaxVF.isScalable())
3829 MaxPermissibleVFWithoutMaxBW.ScalableVF = MaxVF;
3830 else
3831 MaxPermissibleVFWithoutMaxBW.FixedVF = MaxVF;
3832
3833 if (useMaxBandwidth(RegKind)) {
3834 auto MaxVectorElementCountMaxBW = ElementCount::get(
3835 llvm::bit_floor(WidestRegister.getKnownMinValue() / SmallestType),
3836 ComputeScalableMaxVF);
3837 MaxVF = MinVF(MaxVectorElementCountMaxBW, MaxSafeVF);
3838
3839 if (ElementCount MinVF =
3840 TTI.getMinimumVF(SmallestType, ComputeScalableMaxVF)) {
3841 if (ElementCount::isKnownLT(MaxVF, MinVF)) {
3842 LLVM_DEBUG(dbgs() << "LV: Overriding calculated MaxVF(" << MaxVF
3843 << ") with target's minimum: " << MinVF << '\n');
3844 MaxVF = MinVF;
3845 }
3846 }
3847
3848 MaxVF = clampVFByMaxTripCount(MaxVF, MaxTripCount, FoldTailByMasking);
3849
3850 if (MaxVectorElementCount != MaxVF) {
3851 // Invalidate any widening decisions we might have made, in case the loop
3852 // requires prediction (decided later), but we have already made some
3853 // load/store widening decisions.
3854 invalidateCostModelingDecisions();
3855 }
3856 }
3857 return MaxVF;
3858}
3859
3860bool LoopVectorizationPlanner::isMoreProfitable(const VectorizationFactor &A,
3861 const VectorizationFactor &B,
3862 const unsigned MaxTripCount,
3863 bool HasTail,
3864 bool IsEpilogue) const {
3865 InstructionCost CostA = A.Cost;
3866 InstructionCost CostB = B.Cost;
3867
3868 // Improve estimate for the vector width if it is scalable.
3869 unsigned EstimatedWidthA = A.Width.getKnownMinValue();
3870 unsigned EstimatedWidthB = B.Width.getKnownMinValue();
3871 if (std::optional<unsigned> VScale = CM.getVScaleForTuning()) {
3872 if (A.Width.isScalable())
3873 EstimatedWidthA *= *VScale;
3874 if (B.Width.isScalable())
3875 EstimatedWidthB *= *VScale;
3876 }
3877
3878 // When optimizing for size choose whichever is smallest, which will be the
3879 // one with the smallest cost for the whole loop. On a tie pick the larger
3880 // vector width, on the assumption that throughput will be greater.
3881 if (CM.CostKind == TTI::TCK_CodeSize)
3882 return CostA < CostB ||
3883 (CostA == CostB && EstimatedWidthA > EstimatedWidthB);
3884
3885 // Assume vscale may be larger than 1 (or the value being tuned for),
3886 // so that scalable vectorization is slightly favorable over fixed-width
3887 // vectorization.
3888 bool PreferScalable = !TTI.preferFixedOverScalableIfEqualCost(IsEpilogue) &&
3889 A.Width.isScalable() && !B.Width.isScalable();
3890
3891 auto CmpFn = [PreferScalable](const InstructionCost &LHS,
3892 const InstructionCost &RHS) {
3893 return PreferScalable ? LHS <= RHS : LHS < RHS;
3894 };
3895
3896 // To avoid the need for FP division:
3897 // (CostA / EstimatedWidthA) < (CostB / EstimatedWidthB)
3898 // <=> (CostA * EstimatedWidthB) < (CostB * EstimatedWidthA)
3899 if (!MaxTripCount)
3900 return CmpFn(CostA * EstimatedWidthB, CostB * EstimatedWidthA);
3901
3902 auto GetCostForTC = [MaxTripCount, HasTail](unsigned VF,
3903 InstructionCost VectorCost,
3904 InstructionCost ScalarCost) {
3905 // If the trip count is a known (possibly small) constant, the trip count
3906 // will be rounded up to an integer number of iterations under
3907 // FoldTailByMasking. The total cost in that case will be
3908 // VecCost*ceil(TripCount/VF). When not folding the tail, the total
3909 // cost will be VecCost*floor(TC/VF) + ScalarCost*(TC%VF). There will be
3910 // some extra overheads, but for the purpose of comparing the costs of
3911 // different VFs we can use this to compare the total loop-body cost
3912 // expected after vectorization.
3913 if (HasTail)
3914 return VectorCost * (MaxTripCount / VF) +
3915 ScalarCost * (MaxTripCount % VF);
3916 return VectorCost * divideCeil(MaxTripCount, VF);
3917 };
3918
3919 auto RTCostA = GetCostForTC(EstimatedWidthA, CostA, A.ScalarCost);
3920 auto RTCostB = GetCostForTC(EstimatedWidthB, CostB, B.ScalarCost);
3921 return CmpFn(RTCostA, RTCostB);
3922}
3923
3924bool LoopVectorizationPlanner::isMoreProfitable(const VectorizationFactor &A,
3925 const VectorizationFactor &B,
3926 bool HasTail,
3927 bool IsEpilogue) const {
3928 const unsigned MaxTripCount = PSE.getSmallConstantMaxTripCount();
3929 return LoopVectorizationPlanner::isMoreProfitable(A, B, MaxTripCount, HasTail,
3930 IsEpilogue);
3931}
3932
3935 using RecipeVFPair = std::pair<VPRecipeBase *, ElementCount>;
3936 SmallVector<RecipeVFPair> InvalidCosts;
3937 for (const auto &Plan : VPlans) {
3938 for (ElementCount VF : Plan->vectorFactors()) {
3939 // The VPlan-based cost model is designed for computing vector cost.
3940 // Querying VPlan-based cost model with a scarlar VF will cause some
3941 // errors because we expect the VF is vector for most of the widen
3942 // recipes.
3943 if (VF.isScalar())
3944 continue;
3945
3946 VPCostContext CostCtx(CM.TTI, *CM.TLI, *Plan, CM, CM.CostKind,
3947 *CM.PSE.getSE(), OrigLoop);
3948 precomputeCosts(*Plan, VF, CostCtx);
3949 auto Iter = vp_depth_first_deep(Plan->getVectorLoopRegion()->getEntry());
3951 for (auto &R : *VPBB) {
3952 if (!R.cost(VF, CostCtx).isValid())
3953 InvalidCosts.emplace_back(&R, VF);
3954 }
3955 }
3956 }
3957 }
3958 if (InvalidCosts.empty())
3959 return;
3960
3961 // Emit a report of VFs with invalid costs in the loop.
3962
3963 // Group the remarks per recipe, keeping the recipe order from InvalidCosts.
3965 unsigned I = 0;
3966 for (auto &Pair : InvalidCosts)
3967 if (Numbering.try_emplace(Pair.first, I).second)
3968 ++I;
3969
3970 // Sort the list, first on recipe(number) then on VF.
3971 sort(InvalidCosts, [&Numbering](RecipeVFPair &A, RecipeVFPair &B) {
3972 unsigned NA = Numbering[A.first];
3973 unsigned NB = Numbering[B.first];
3974 if (NA != NB)
3975 return NA < NB;
3976 return ElementCount::isKnownLT(A.second, B.second);
3977 });
3978
3979 // For a list of ordered recipe-VF pairs:
3980 // [(load, VF1), (load, VF2), (store, VF1)]
3981 // group the recipes together to emit separate remarks for:
3982 // load (VF1, VF2)
3983 // store (VF1)
3984 auto Tail = ArrayRef<RecipeVFPair>(InvalidCosts);
3985 auto Subset = ArrayRef<RecipeVFPair>();
3986 do {
3987 if (Subset.empty())
3988 Subset = Tail.take_front(1);
3989
3990 VPRecipeBase *R = Subset.front().first;
3991
3992 unsigned Opcode =
3995 [](const auto *R) { return Instruction::PHI; })
3996 .Case<VPWidenSelectRecipe>(
3997 [](const auto *R) { return Instruction::Select; })
3998 .Case<VPWidenStoreRecipe>(
3999 [](const auto *R) { return Instruction::Store; })
4000 .Case<VPWidenLoadRecipe>(
4001 [](const auto *R) { return Instruction::Load; })
4002 .Case<VPWidenCallRecipe, VPWidenIntrinsicRecipe>(
4003 [](const auto *R) { return Instruction::Call; })
4006 [](const auto *R) { return R->getOpcode(); })
4007 .Case<VPInterleaveRecipe>([](const VPInterleaveRecipe *R) {
4008 return R->getStoredValues().empty() ? Instruction::Load
4009 : Instruction::Store;
4010 })
4011 .Case<VPReductionRecipe>([](const auto *R) {
4012 return RecurrenceDescriptor::getOpcode(R->getRecurrenceKind());
4013 });
4014
4015 // If the next recipe is different, or if there are no other pairs,
4016 // emit a remark for the collated subset. e.g.
4017 // [(load, VF1), (load, VF2))]
4018 // to emit:
4019 // remark: invalid costs for 'load' at VF=(VF1, VF2)
4020 if (Subset == Tail || Tail[Subset.size()].first != R) {
4021 std::string OutString;
4022 raw_string_ostream OS(OutString);
4023 assert(!Subset.empty() && "Unexpected empty range");
4024 OS << "Recipe with invalid costs prevented vectorization at VF=(";
4025 for (const auto &Pair : Subset)
4026 OS << (Pair.second == Subset.front().second ? "" : ", ") << Pair.second;
4027 OS << "):";
4028 if (Opcode == Instruction::Call) {
4029 StringRef Name = "";
4030 if (auto *Int = dyn_cast<VPWidenIntrinsicRecipe>(R)) {
4031 Name = Int->getIntrinsicName();
4032 } else {
4033 auto *WidenCall = dyn_cast<VPWidenCallRecipe>(R);
4034 Function *CalledFn =
4035 WidenCall ? WidenCall->getCalledScalarFunction()
4036 : cast<Function>(R->getOperand(R->getNumOperands() - 1)
4037 ->getLiveInIRValue());
4038 Name = CalledFn->getName();
4039 }
4040 OS << " call to " << Name;
4041 } else
4042 OS << " " << Instruction::getOpcodeName(Opcode);
4043 reportVectorizationInfo(OutString, "InvalidCost", ORE, OrigLoop, nullptr,
4044 R->getDebugLoc());
4045 Tail = Tail.drop_front(Subset.size());
4046 Subset = {};
4047 } else
4048 // Grow the subset by one element
4049 Subset = Tail.take_front(Subset.size() + 1);
4050 } while (!Tail.empty());
4051}
4052
4053/// Check if any recipe of \p Plan will generate a vector value, which will be
4054/// assigned a vector register.
4056 const TargetTransformInfo &TTI) {
4057 assert(VF.isVector() && "Checking a scalar VF?");
4058 VPTypeAnalysis TypeInfo(Plan);
4059 DenseSet<VPRecipeBase *> EphemeralRecipes;
4060 collectEphemeralRecipesForVPlan(Plan, EphemeralRecipes);
4061 // Set of already visited types.
4062 DenseSet<Type *> Visited;
4065 for (VPRecipeBase &R : *VPBB) {
4066 if (EphemeralRecipes.contains(&R))
4067 continue;
4068 // Continue early if the recipe is considered to not produce a vector
4069 // result. Note that this includes VPInstruction where some opcodes may
4070 // produce a vector, to preserve existing behavior as VPInstructions model
4071 // aspects not directly mapped to existing IR instructions.
4072 switch (R.getVPDefID()) {
4073 case VPDef::VPDerivedIVSC:
4074 case VPDef::VPScalarIVStepsSC:
4075 case VPDef::VPReplicateSC:
4076 case VPDef::VPInstructionSC:
4077 case VPDef::VPCanonicalIVPHISC:
4078 case VPDef::VPVectorPointerSC:
4079 case VPDef::VPVectorEndPointerSC:
4080 case VPDef::VPExpandSCEVSC:
4081 case VPDef::VPEVLBasedIVPHISC:
4082 case VPDef::VPPredInstPHISC:
4083 case VPDef::VPBranchOnMaskSC:
4084 continue;
4085 case VPDef::VPReductionSC:
4086 case VPDef::VPActiveLaneMaskPHISC:
4087 case VPDef::VPWidenCallSC:
4088 case VPDef::VPWidenCanonicalIVSC:
4089 case VPDef::VPWidenCastSC:
4090 case VPDef::VPWidenGEPSC:
4091 case VPDef::VPWidenIntrinsicSC:
4092 case VPDef::VPWidenSC:
4093 case VPDef::VPWidenSelectSC:
4094 case VPDef::VPBlendSC:
4095 case VPDef::VPFirstOrderRecurrencePHISC:
4096 case VPDef::VPHistogramSC:
4097 case VPDef::VPWidenPHISC:
4098 case VPDef::VPWidenIntOrFpInductionSC:
4099 case VPDef::VPWidenPointerInductionSC:
4100 case VPDef::VPReductionPHISC:
4101 case VPDef::VPInterleaveEVLSC:
4102 case VPDef::VPInterleaveSC:
4103 case VPDef::VPWidenLoadEVLSC:
4104 case VPDef::VPWidenLoadSC:
4105 case VPDef::VPWidenStoreEVLSC:
4106 case VPDef::VPWidenStoreSC:
4107 break;
4108 default:
4109 llvm_unreachable("unhandled recipe");
4110 }
4111
4112 auto WillGenerateTargetVectors = [&TTI, VF](Type *VectorTy) {
4113 unsigned NumLegalParts = TTI.getNumberOfParts(VectorTy);
4114 if (!NumLegalParts)
4115 return false;
4116 if (VF.isScalable()) {
4117 // <vscale x 1 x iN> is assumed to be profitable over iN because
4118 // scalable registers are a distinct register class from scalar
4119 // ones. If we ever find a target which wants to lower scalable
4120 // vectors back to scalars, we'll need to update this code to
4121 // explicitly ask TTI about the register class uses for each part.
4122 return NumLegalParts <= VF.getKnownMinValue();
4123 }
4124 // Two or more elements that share a register - are vectorized.
4125 return NumLegalParts < VF.getFixedValue();
4126 };
4127
4128 // If no def nor is a store, e.g., branches, continue - no value to check.
4129 if (R.getNumDefinedValues() == 0 &&
4131 continue;
4132 // For multi-def recipes, currently only interleaved loads, suffice to
4133 // check first def only.
4134 // For stores check their stored value; for interleaved stores suffice
4135 // the check first stored value only. In all cases this is the second
4136 // operand.
4137 VPValue *ToCheck =
4138 R.getNumDefinedValues() >= 1 ? R.getVPValue(0) : R.getOperand(1);
4139 Type *ScalarTy = TypeInfo.inferScalarType(ToCheck);
4140 if (!Visited.insert({ScalarTy}).second)
4141 continue;
4142 Type *WideTy = toVectorizedTy(ScalarTy, VF);
4143 if (any_of(getContainedTypes(WideTy), WillGenerateTargetVectors))
4144 return true;
4145 }
4146 }
4147
4148 return false;
4149}
4150
4151static bool hasReplicatorRegion(VPlan &Plan) {
4153 Plan.getVectorLoopRegion()->getEntry())),
4154 [](auto *VPRB) { return VPRB->isReplicator(); });
4155}
4156
4157#ifndef NDEBUG
4158VectorizationFactor LoopVectorizationPlanner::selectVectorizationFactor() {
4159 InstructionCost ExpectedCost = CM.expectedCost(ElementCount::getFixed(1));
4160 LLVM_DEBUG(dbgs() << "LV: Scalar loop costs: " << ExpectedCost << ".\n");
4161 assert(ExpectedCost.isValid() && "Unexpected invalid cost for scalar loop");
4162 assert(
4163 any_of(VPlans,
4164 [](std::unique_ptr<VPlan> &P) { return P->hasScalarVFOnly(); }) &&
4165 "Expected Scalar VF to be a candidate");
4166
4167 const VectorizationFactor ScalarCost(ElementCount::getFixed(1), ExpectedCost,
4168 ExpectedCost);
4169 VectorizationFactor ChosenFactor = ScalarCost;
4170
4171 bool ForceVectorization = Hints.getForce() == LoopVectorizeHints::FK_Enabled;
4172 if (ForceVectorization &&
4173 (VPlans.size() > 1 || !VPlans[0]->hasScalarVFOnly())) {
4174 // Ignore scalar width, because the user explicitly wants vectorization.
4175 // Initialize cost to max so that VF = 2 is, at least, chosen during cost
4176 // evaluation.
4177 ChosenFactor.Cost = InstructionCost::getMax();
4178 }
4179
4180 for (auto &P : VPlans) {
4181 ArrayRef<ElementCount> VFs(P->vectorFactors().begin(),
4182 P->vectorFactors().end());
4183
4185 if (any_of(VFs, [this](ElementCount VF) {
4186 return CM.shouldConsiderRegPressureForVF(VF);
4187 }))
4188 RUs = calculateRegisterUsageForPlan(*P, VFs, TTI, CM.ValuesToIgnore);
4189
4190 for (unsigned I = 0; I < VFs.size(); I++) {
4191 ElementCount VF = VFs[I];
4192 // The cost for scalar VF=1 is already calculated, so ignore it.
4193 if (VF.isScalar())
4194 continue;
4195
4196 /// If the register pressure needs to be considered for VF,
4197 /// don't consider the VF as valid if it exceeds the number
4198 /// of registers for the target.
4199 if (CM.shouldConsiderRegPressureForVF(VF) &&
4200 RUs[I].exceedsMaxNumRegs(TTI, ForceTargetNumVectorRegs))
4201 continue;
4202
4203 InstructionCost C = CM.expectedCost(VF);
4204
4205 // Add on other costs that are modelled in VPlan, but not in the legacy
4206 // cost model.
4207 VPCostContext CostCtx(CM.TTI, *CM.TLI, *P, CM, CM.CostKind,
4208 *CM.PSE.getSE(), OrigLoop);
4209 VPRegionBlock *VectorRegion = P->getVectorLoopRegion();
4210 assert(VectorRegion && "Expected to have a vector region!");
4211 for (VPBasicBlock *VPBB : VPBlockUtils::blocksOnly<VPBasicBlock>(
4212 vp_depth_first_shallow(VectorRegion->getEntry()))) {
4213 for (VPRecipeBase &R : *VPBB) {
4214 auto *VPI = dyn_cast<VPInstruction>(&R);
4215 if (!VPI)
4216 continue;
4217 switch (VPI->getOpcode()) {
4218 // Selects are only modelled in the legacy cost model for safe
4219 // divisors.
4220 case Instruction::Select: {
4221 if (auto *WR =
4222 dyn_cast_or_null<VPWidenRecipe>(VPI->getSingleUser())) {
4223 switch (WR->getOpcode()) {
4224 case Instruction::UDiv:
4225 case Instruction::SDiv:
4226 case Instruction::URem:
4227 case Instruction::SRem:
4228 continue;
4229 default:
4230 break;
4231 }
4232 }
4233 C += VPI->cost(VF, CostCtx);
4234 break;
4235 }
4237 unsigned Multiplier =
4238 cast<ConstantInt>(VPI->getOperand(2)->getLiveInIRValue())
4239 ->getZExtValue();
4240 C += VPI->cost(VF * Multiplier, CostCtx);
4241 break;
4242 }
4244 C += VPI->cost(VF, CostCtx);
4245 break;
4246 default:
4247 break;
4248 }
4249 }
4250 }
4251
4252 VectorizationFactor Candidate(VF, C, ScalarCost.ScalarCost);
4253 unsigned Width =
4254 estimateElementCount(Candidate.Width, CM.getVScaleForTuning());
4255 LLVM_DEBUG(dbgs() << "LV: Vector loop of width " << VF
4256 << " costs: " << (Candidate.Cost / Width));
4257 if (VF.isScalable())
4258 LLVM_DEBUG(dbgs() << " (assuming a minimum vscale of "
4259 << CM.getVScaleForTuning().value_or(1) << ")");
4260 LLVM_DEBUG(dbgs() << ".\n");
4261
4262 if (!ForceVectorization && !willGenerateVectors(*P, VF, TTI)) {
4263 LLVM_DEBUG(
4264 dbgs()
4265 << "LV: Not considering vector loop of width " << VF
4266 << " because it will not generate any vector instructions.\n");
4267 continue;
4268 }
4269
4270 if (CM.OptForSize && !ForceVectorization && hasReplicatorRegion(*P)) {
4271 LLVM_DEBUG(
4272 dbgs()
4273 << "LV: Not considering vector loop of width " << VF
4274 << " because it would cause replicated blocks to be generated,"
4275 << " which isn't allowed when optimizing for size.\n");
4276 continue;
4277 }
4278
4279 if (isMoreProfitable(Candidate, ChosenFactor, P->hasScalarTail()))
4280 ChosenFactor = Candidate;
4281 }
4282 }
4283
4284 if (!EnableCondStoresVectorization && CM.hasPredStores()) {
4286 "There are conditional stores.",
4287 "store that is conditionally executed prevents vectorization",
4288 "ConditionalStore", ORE, OrigLoop);
4289 ChosenFactor = ScalarCost;
4290 }
4291
4292 LLVM_DEBUG(if (ForceVectorization && !ChosenFactor.Width.isScalar() &&
4293 !isMoreProfitable(ChosenFactor, ScalarCost,
4294 !CM.foldTailByMasking())) dbgs()
4295 << "LV: Vectorization seems to be not beneficial, "
4296 << "but was forced by a user.\n");
4297 return ChosenFactor;
4298}
4299#endif
4300
4301bool LoopVectorizationPlanner::isCandidateForEpilogueVectorization(
4302 ElementCount VF) const {
4303 // Cross iteration phis such as fixed-order recurrences and FMaxNum/FMinNum
4304 // reductions need special handling and are currently unsupported.
4305 if (any_of(OrigLoop->getHeader()->phis(), [&](PHINode &Phi) {
4306 if (!Legal->isReductionVariable(&Phi))
4307 return Legal->isFixedOrderRecurrence(&Phi);
4308 return RecurrenceDescriptor::isFPMinMaxNumRecurrenceKind(
4309 Legal->getRecurrenceDescriptor(&Phi).getRecurrenceKind());
4310 }))
4311 return false;
4312
4313 // Phis with uses outside of the loop require special handling and are
4314 // currently unsupported.
4315 for (const auto &Entry : Legal->getInductionVars()) {
4316 // Look for uses of the value of the induction at the last iteration.
4317 Value *PostInc =
4318 Entry.first->getIncomingValueForBlock(OrigLoop->getLoopLatch());
4319 for (User *U : PostInc->users())
4320 if (!OrigLoop->contains(cast<Instruction>(U)))
4321 return false;
4322 // Look for uses of penultimate value of the induction.
4323 for (User *U : Entry.first->users())
4324 if (!OrigLoop->contains(cast<Instruction>(U)))
4325 return false;
4326 }
4327
4328 // Epilogue vectorization code has not been auditted to ensure it handles
4329 // non-latch exits properly. It may be fine, but it needs auditted and
4330 // tested.
4331 // TODO: Add support for loops with an early exit.
4332 if (OrigLoop->getExitingBlock() != OrigLoop->getLoopLatch())
4333 return false;
4334
4335 return true;
4336}
4337
4339 const ElementCount VF, const unsigned IC) const {
4340 // FIXME: We need a much better cost-model to take different parameters such
4341 // as register pressure, code size increase and cost of extra branches into
4342 // account. For now we apply a very crude heuristic and only consider loops
4343 // with vectorization factors larger than a certain value.
4344
4345 // Allow the target to opt out entirely.
4346 if (!TTI.preferEpilogueVectorization())
4347 return false;
4348
4349 // We also consider epilogue vectorization unprofitable for targets that don't
4350 // consider interleaving beneficial (eg. MVE).
4351 if (TTI.getMaxInterleaveFactor(VF) <= 1)
4352 return false;
4353
4354 unsigned MinVFThreshold = EpilogueVectorizationMinVF.getNumOccurrences() > 0
4356 : TTI.getEpilogueVectorizationMinVF();
4357 return estimateElementCount(VF * IC, VScaleForTuning) >= MinVFThreshold;
4358}
4359
4361 const ElementCount MainLoopVF, unsigned IC) {
4364 LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization is disabled.\n");
4365 return Result;
4366 }
4367
4368 if (!CM.isScalarEpilogueAllowed()) {
4369 LLVM_DEBUG(dbgs() << "LEV: Unable to vectorize epilogue because no "
4370 "epilogue is allowed.\n");
4371 return Result;
4372 }
4373
4374 // Not really a cost consideration, but check for unsupported cases here to
4375 // simplify the logic.
4376 if (!isCandidateForEpilogueVectorization(MainLoopVF)) {
4377 LLVM_DEBUG(dbgs() << "LEV: Unable to vectorize epilogue because the loop "
4378 "is not a supported candidate.\n");
4379 return Result;
4380 }
4381
4383 LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization factor is forced.\n");
4385 if (hasPlanWithVF(ForcedEC))
4386 return {ForcedEC, 0, 0};
4387
4388 LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization forced factor is not "
4389 "viable.\n");
4390 return Result;
4391 }
4392
4393 if (OrigLoop->getHeader()->getParent()->hasOptSize()) {
4394 LLVM_DEBUG(
4395 dbgs() << "LEV: Epilogue vectorization skipped due to opt for size.\n");
4396 return Result;
4397 }
4398
4399 if (!CM.isEpilogueVectorizationProfitable(MainLoopVF, IC)) {
4400 LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization is not profitable for "
4401 "this loop\n");
4402 return Result;
4403 }
4404
4405 // If MainLoopVF = vscale x 2, and vscale is expected to be 4, then we know
4406 // the main loop handles 8 lanes per iteration. We could still benefit from
4407 // vectorizing the epilogue loop with VF=4.
4408 ElementCount EstimatedRuntimeVF = ElementCount::getFixed(
4409 estimateElementCount(MainLoopVF, CM.getVScaleForTuning()));
4410
4411 ScalarEvolution &SE = *PSE.getSE();
4412 Type *TCType = Legal->getWidestInductionType();
4413 const SCEV *RemainingIterations = nullptr;
4414 unsigned MaxTripCount = 0;
4415 const SCEV *TC =
4416 vputils::getSCEVExprForVPValue(getPlanFor(MainLoopVF).getTripCount(), SE);
4417 assert(!isa<SCEVCouldNotCompute>(TC) && "Trip count SCEV must be computable");
4418 const SCEV *KnownMinTC;
4419 bool ScalableTC = match(TC, m_scev_c_Mul(m_SCEV(KnownMinTC), m_SCEVVScale()));
4420 bool ScalableRemIter = false;
4421 // Use versions of TC and VF in which both are either scalable or fixed.
4422 if (ScalableTC == MainLoopVF.isScalable()) {
4423 ScalableRemIter = ScalableTC;
4424 RemainingIterations =
4425 SE.getURemExpr(TC, SE.getElementCount(TCType, MainLoopVF * IC));
4426 } else if (ScalableTC) {
4427 const SCEV *EstimatedTC = SE.getMulExpr(
4428 KnownMinTC,
4429 SE.getConstant(TCType, CM.getVScaleForTuning().value_or(1)));
4430 RemainingIterations = SE.getURemExpr(
4431 EstimatedTC, SE.getElementCount(TCType, MainLoopVF * IC));
4432 } else
4433 RemainingIterations =
4434 SE.getURemExpr(TC, SE.getElementCount(TCType, EstimatedRuntimeVF * IC));
4435
4436 // No iterations left to process in the epilogue.
4437 if (RemainingIterations->isZero())
4438 return Result;
4439
4440 if (MainLoopVF.isFixed()) {
4441 MaxTripCount = MainLoopVF.getFixedValue() * IC - 1;
4442 if (SE.isKnownPredicate(CmpInst::ICMP_ULT, RemainingIterations,
4443 SE.getConstant(TCType, MaxTripCount))) {
4444 MaxTripCount = SE.getUnsignedRangeMax(RemainingIterations).getZExtValue();
4445 }
4446 LLVM_DEBUG(dbgs() << "LEV: Maximum Trip Count for Epilogue: "
4447 << MaxTripCount << "\n");
4448 }
4449
4450 auto SkipVF = [&](const SCEV *VF, const SCEV *RemIter) -> bool {
4451 return SE.isKnownPredicate(CmpInst::ICMP_UGT, VF, RemIter);
4452 };
4453 for (auto &NextVF : ProfitableVFs) {
4454 // Skip candidate VFs without a corresponding VPlan.
4455 if (!hasPlanWithVF(NextVF.Width))
4456 continue;
4457
4458 // Skip candidate VFs with widths >= the (estimated) runtime VF (scalable
4459 // vectors) or > the VF of the main loop (fixed vectors).
4460 if ((!NextVF.Width.isScalable() && MainLoopVF.isScalable() &&
4461 ElementCount::isKnownGE(NextVF.Width, EstimatedRuntimeVF)) ||
4462 (NextVF.Width.isScalable() &&
4463 ElementCount::isKnownGE(NextVF.Width, MainLoopVF)) ||
4464 (!NextVF.Width.isScalable() && !MainLoopVF.isScalable() &&
4465 ElementCount::isKnownGT(NextVF.Width, MainLoopVF)))
4466 continue;
4467
4468 // If NextVF is greater than the number of remaining iterations, the
4469 // epilogue loop would be dead. Skip such factors.
4470 // TODO: We should also consider comparing against a scalable
4471 // RemainingIterations when SCEV be able to evaluate non-canonical
4472 // vscale-based expressions.
4473 if (!ScalableRemIter) {
4474 // Handle the case where NextVF and RemainingIterations are in different
4475 // numerical spaces.
4476 ElementCount EC = NextVF.Width;
4477 if (NextVF.Width.isScalable())
4479 estimateElementCount(NextVF.Width, CM.getVScaleForTuning()));
4480 if (SkipVF(SE.getElementCount(TCType, EC), RemainingIterations))
4481 continue;
4482 }
4483
4484 if (Result.Width.isScalar() ||
4485 isMoreProfitable(NextVF, Result, MaxTripCount, !CM.foldTailByMasking(),
4486 /*IsEpilogue*/ true))
4487 Result = NextVF;
4488 }
4489
4490 if (Result != VectorizationFactor::Disabled())
4491 LLVM_DEBUG(dbgs() << "LEV: Vectorizing epilogue loop with VF = "
4492 << Result.Width << "\n");
4493 return Result;
4494}
4495
4496std::pair<unsigned, unsigned>
4498 unsigned MinWidth = -1U;
4499 unsigned MaxWidth = 8;
4500 const DataLayout &DL = TheFunction->getDataLayout();
4501 // For in-loop reductions, no element types are added to ElementTypesInLoop
4502 // if there are no loads/stores in the loop. In this case, check through the
4503 // reduction variables to determine the maximum width.
4504 if (ElementTypesInLoop.empty() && !Legal->getReductionVars().empty()) {
4505 for (const auto &PhiDescriptorPair : Legal->getReductionVars()) {
4506 const RecurrenceDescriptor &RdxDesc = PhiDescriptorPair.second;
4507 // When finding the min width used by the recurrence we need to account
4508 // for casts on the input operands of the recurrence.
4509 MinWidth = std::min(
4510 MinWidth,
4511 std::min(RdxDesc.getMinWidthCastToRecurrenceTypeInBits(),
4513 MaxWidth = std::max(MaxWidth,
4515 }
4516 } else {
4517 for (Type *T : ElementTypesInLoop) {
4518 MinWidth = std::min<unsigned>(
4519 MinWidth, DL.getTypeSizeInBits(T->getScalarType()).getFixedValue());
4520 MaxWidth = std::max<unsigned>(
4521 MaxWidth, DL.getTypeSizeInBits(T->getScalarType()).getFixedValue());
4522 }
4523 }
4524 return {MinWidth, MaxWidth};
4525}
4526
4528 ElementTypesInLoop.clear();
4529 // For each block.
4530 for (BasicBlock *BB : TheLoop->blocks()) {
4531 // For each instruction in the loop.
4532 for (Instruction &I : BB->instructionsWithoutDebug()) {
4533 Type *T = I.getType();
4534
4535 // Skip ignored values.
4536 if (ValuesToIgnore.count(&I))
4537 continue;
4538
4539 // Only examine Loads, Stores and PHINodes.
4540 if (!isa<LoadInst>(I) && !isa<StoreInst>(I) && !isa<PHINode>(I))
4541 continue;
4542
4543 // Examine PHI nodes that are reduction variables. Update the type to
4544 // account for the recurrence type.
4545 if (auto *PN = dyn_cast<PHINode>(&I)) {
4546 if (!Legal->isReductionVariable(PN))
4547 continue;
4548 const RecurrenceDescriptor &RdxDesc =
4549 Legal->getRecurrenceDescriptor(PN);
4551 TTI.preferInLoopReduction(RdxDesc.getRecurrenceKind(),
4552 RdxDesc.getRecurrenceType()))
4553 continue;
4554 T = RdxDesc.getRecurrenceType();
4555 }
4556
4557 // Examine the stored values.
4558 if (auto *ST = dyn_cast<StoreInst>(&I))
4559 T = ST->getValueOperand()->getType();
4560
4561 assert(T->isSized() &&
4562 "Expected the load/store/recurrence type to be sized");
4563
4564 ElementTypesInLoop.insert(T);
4565 }
4566 }
4567}
4568
4569unsigned
4571 InstructionCost LoopCost) {
4572 // -- The interleave heuristics --
4573 // We interleave the loop in order to expose ILP and reduce the loop overhead.
4574 // There are many micro-architectural considerations that we can't predict
4575 // at this level. For example, frontend pressure (on decode or fetch) due to
4576 // code size, or the number and capabilities of the execution ports.
4577 //
4578 // We use the following heuristics to select the interleave count:
4579 // 1. If the code has reductions, then we interleave to break the cross
4580 // iteration dependency.
4581 // 2. If the loop is really small, then we interleave to reduce the loop
4582 // overhead.
4583 // 3. We don't interleave if we think that we will spill registers to memory
4584 // due to the increased register pressure.
4585
4586 // Only interleave tail-folded loops if wide lane masks are requested, as the
4587 // overhead of multiple instructions to calculate the predicate is likely
4588 // not beneficial. If a scalar epilogue is not allowed for any other reason,
4589 // do not interleave.
4590 if (!CM.isScalarEpilogueAllowed() &&
4591 !(CM.preferPredicatedLoop() && CM.useWideActiveLaneMask()))
4592 return 1;
4593
4596 LLVM_DEBUG(dbgs() << "LV: Preference for VP intrinsics indicated. "
4597 "Unroll factor forced to be 1.\n");
4598 return 1;
4599 }
4600
4601 // We used the distance for the interleave count.
4602 if (!Legal->isSafeForAnyVectorWidth())
4603 return 1;
4604
4605 // We don't attempt to perform interleaving for loops with uncountable early
4606 // exits because the VPInstruction::AnyOf code cannot currently handle
4607 // multiple parts.
4608 if (Plan.hasEarlyExit())
4609 return 1;
4610
4611 const bool HasReductions =
4614
4615 // If we did not calculate the cost for VF (because the user selected the VF)
4616 // then we calculate the cost of VF here.
4617 if (LoopCost == 0) {
4618 if (VF.isScalar())
4619 LoopCost = CM.expectedCost(VF);
4620 else
4621 LoopCost = cost(Plan, VF);
4622 assert(LoopCost.isValid() && "Expected to have chosen a VF with valid cost");
4623
4624 // Loop body is free and there is no need for interleaving.
4625 if (LoopCost == 0)
4626 return 1;
4627 }
4628
4629 VPRegisterUsage R =
4630 calculateRegisterUsageForPlan(Plan, {VF}, TTI, CM.ValuesToIgnore)[0];
4631 // We divide by these constants so assume that we have at least one
4632 // instruction that uses at least one register.
4633 for (auto &Pair : R.MaxLocalUsers) {
4634 Pair.second = std::max(Pair.second, 1U);
4635 }
4636
4637 // We calculate the interleave count using the following formula.
4638 // Subtract the number of loop invariants from the number of available
4639 // registers. These registers are used by all of the interleaved instances.
4640 // Next, divide the remaining registers by the number of registers that is
4641 // required by the loop, in order to estimate how many parallel instances
4642 // fit without causing spills. All of this is rounded down if necessary to be
4643 // a power of two. We want power of two interleave count to simplify any
4644 // addressing operations or alignment considerations.
4645 // We also want power of two interleave counts to ensure that the induction
4646 // variable of the vector loop wraps to zero, when tail is folded by masking;
4647 // this currently happens when OptForSize, in which case IC is set to 1 above.
4648 unsigned IC = UINT_MAX;
4649
4650 for (const auto &Pair : R.MaxLocalUsers) {
4651 unsigned TargetNumRegisters = TTI.getNumberOfRegisters(Pair.first);
4652 LLVM_DEBUG(dbgs() << "LV: The target has " << TargetNumRegisters
4653 << " registers of "
4654 << TTI.getRegisterClassName(Pair.first)
4655 << " register class\n");
4656 if (VF.isScalar()) {
4657 if (ForceTargetNumScalarRegs.getNumOccurrences() > 0)
4658 TargetNumRegisters = ForceTargetNumScalarRegs;
4659 } else {
4660 if (ForceTargetNumVectorRegs.getNumOccurrences() > 0)
4661 TargetNumRegisters = ForceTargetNumVectorRegs;
4662 }
4663 unsigned MaxLocalUsers = Pair.second;
4664 unsigned LoopInvariantRegs = 0;
4665 if (R.LoopInvariantRegs.contains(Pair.first))
4666 LoopInvariantRegs = R.LoopInvariantRegs[Pair.first];
4667
4668 unsigned TmpIC = llvm::bit_floor((TargetNumRegisters - LoopInvariantRegs) /
4669 MaxLocalUsers);
4670 // Don't count the induction variable as interleaved.
4672 TmpIC = llvm::bit_floor((TargetNumRegisters - LoopInvariantRegs - 1) /
4673 std::max(1U, (MaxLocalUsers - 1)));
4674 }
4675
4676 IC = std::min(IC, TmpIC);
4677 }
4678
4679 // Clamp the interleave ranges to reasonable counts.
4680 unsigned MaxInterleaveCount = TTI.getMaxInterleaveFactor(VF);
4681
4682 // Check if the user has overridden the max.
4683 if (VF.isScalar()) {
4684 if (ForceTargetMaxScalarInterleaveFactor.getNumOccurrences() > 0)
4685 MaxInterleaveCount = ForceTargetMaxScalarInterleaveFactor;
4686 } else {
4687 if (ForceTargetMaxVectorInterleaveFactor.getNumOccurrences() > 0)
4688 MaxInterleaveCount = ForceTargetMaxVectorInterleaveFactor;
4689 }
4690
4691 // Try to get the exact trip count, or an estimate based on profiling data or
4692 // ConstantMax from PSE, failing that.
4693 auto BestKnownTC = getSmallBestKnownTC(PSE, OrigLoop);
4694
4695 // For fixed length VFs treat a scalable trip count as unknown.
4696 if (BestKnownTC && (BestKnownTC->isFixed() || VF.isScalable())) {
4697 // Re-evaluate trip counts and VFs to be in the same numerical space.
4698 unsigned AvailableTC =
4699 estimateElementCount(*BestKnownTC, CM.getVScaleForTuning());
4700 unsigned EstimatedVF = estimateElementCount(VF, CM.getVScaleForTuning());
4701
4702 // At least one iteration must be scalar when this constraint holds. So the
4703 // maximum available iterations for interleaving is one less.
4704 if (CM.requiresScalarEpilogue(VF.isVector()))
4705 --AvailableTC;
4706
4707 unsigned InterleaveCountLB = bit_floor(std::max(
4708 1u, std::min(AvailableTC / (EstimatedVF * 2), MaxInterleaveCount)));
4709
4710 if (getSmallConstantTripCount(PSE.getSE(), OrigLoop).isNonZero()) {
4711 // If the best known trip count is exact, we select between two
4712 // prospective ICs, where
4713 //
4714 // 1) the aggressive IC is capped by the trip count divided by VF
4715 // 2) the conservative IC is capped by the trip count divided by (VF * 2)
4716 //
4717 // The final IC is selected in a way that the epilogue loop trip count is
4718 // minimized while maximizing the IC itself, so that we either run the
4719 // vector loop at least once if it generates a small epilogue loop, or
4720 // else we run the vector loop at least twice.
4721
4722 unsigned InterleaveCountUB = bit_floor(std::max(
4723 1u, std::min(AvailableTC / EstimatedVF, MaxInterleaveCount)));
4724 MaxInterleaveCount = InterleaveCountLB;
4725
4726 if (InterleaveCountUB != InterleaveCountLB) {
4727 unsigned TailTripCountUB =
4728 (AvailableTC % (EstimatedVF * InterleaveCountUB));
4729 unsigned TailTripCountLB =
4730 (AvailableTC % (EstimatedVF * InterleaveCountLB));
4731 // If both produce same scalar tail, maximize the IC to do the same work
4732 // in fewer vector loop iterations
4733 if (TailTripCountUB == TailTripCountLB)
4734 MaxInterleaveCount = InterleaveCountUB;
4735 }
4736 } else {
4737 // If trip count is an estimated compile time constant, limit the
4738 // IC to be capped by the trip count divided by VF * 2, such that the
4739 // vector loop runs at least twice to make interleaving seem profitable
4740 // when there is an epilogue loop present. Since exact Trip count is not
4741 // known we choose to be conservative in our IC estimate.
4742 MaxInterleaveCount = InterleaveCountLB;
4743 }
4744 }
4745
4746 assert(MaxInterleaveCount > 0 &&
4747 "Maximum interleave count must be greater than 0");
4748
4749 // Clamp the calculated IC to be between the 1 and the max interleave count
4750 // that the target and trip count allows.
4751 if (IC > MaxInterleaveCount)
4752 IC = MaxInterleaveCount;
4753 else
4754 // Make sure IC is greater than 0.
4755 IC = std::max(1u, IC);
4756
4757 assert(IC > 0 && "Interleave count must be greater than 0.");
4758
4759 // Interleave if we vectorized this loop and there is a reduction that could
4760 // benefit from interleaving.
4761 if (VF.isVector() && HasReductions) {
4762 LLVM_DEBUG(dbgs() << "LV: Interleaving because of reductions.\n");
4763 return IC;
4764 }
4765
4766 // For any scalar loop that either requires runtime checks or predication we
4767 // are better off leaving this to the unroller. Note that if we've already
4768 // vectorized the loop we will have done the runtime check and so interleaving
4769 // won't require further checks.
4770 bool ScalarInterleavingRequiresPredication =
4771 (VF.isScalar() && any_of(OrigLoop->blocks(), [this](BasicBlock *BB) {
4772 return Legal->blockNeedsPredication(BB);
4773 }));
4774 bool ScalarInterleavingRequiresRuntimePointerCheck =
4775 (VF.isScalar() && Legal->getRuntimePointerChecking()->Need);
4776
4777 // We want to interleave small loops in order to reduce the loop overhead and
4778 // potentially expose ILP opportunities.
4779 LLVM_DEBUG(dbgs() << "LV: Loop cost is " << LoopCost << '\n'
4780 << "LV: IC is " << IC << '\n'
4781 << "LV: VF is " << VF << '\n');
4782 const bool AggressivelyInterleaveReductions =
4783 TTI.enableAggressiveInterleaving(HasReductions);
4784 if (!ScalarInterleavingRequiresRuntimePointerCheck &&
4785 !ScalarInterleavingRequiresPredication && LoopCost < SmallLoopCost) {
4786 // We assume that the cost overhead is 1 and we use the cost model
4787 // to estimate the cost of the loop and interleave until the cost of the
4788 // loop overhead is about 5% of the cost of the loop.
4789 unsigned SmallIC = std::min(IC, (unsigned)llvm::bit_floor<uint64_t>(
4790 SmallLoopCost / LoopCost.getValue()));
4791
4792 // Interleave until store/load ports (estimated by max interleave count) are
4793 // saturated.
4794 unsigned NumStores = 0;
4795 unsigned NumLoads = 0;
4798 for (VPRecipeBase &R : *VPBB) {
4800 NumLoads++;
4801 continue;
4802 }
4804 NumStores++;
4805 continue;
4806 }
4807
4808 if (auto *InterleaveR = dyn_cast<VPInterleaveRecipe>(&R)) {
4809 if (unsigned StoreOps = InterleaveR->getNumStoreOperands())
4810 NumStores += StoreOps;
4811 else
4812 NumLoads += InterleaveR->getNumDefinedValues();
4813 continue;
4814 }
4815 if (auto *RepR = dyn_cast<VPReplicateRecipe>(&R)) {
4816 NumLoads += isa<LoadInst>(RepR->getUnderlyingInstr());
4817 NumStores += isa<StoreInst>(RepR->getUnderlyingInstr());
4818 continue;
4819 }
4820 if (isa<VPHistogramRecipe>(&R)) {
4821 NumLoads++;
4822 NumStores++;
4823 continue;
4824 }
4825 }
4826 }
4827 unsigned StoresIC = IC / (NumStores ? NumStores : 1);
4828 unsigned LoadsIC = IC / (NumLoads ? NumLoads : 1);
4829
4830 // There is little point in interleaving for reductions containing selects
4831 // and compares when VF=1 since it may just create more overhead than it's
4832 // worth for loops with small trip counts. This is because we still have to
4833 // do the final reduction after the loop.
4834 bool HasSelectCmpReductions =
4835 HasReductions &&
4837 [](VPRecipeBase &R) {
4838 auto *RedR = dyn_cast<VPReductionPHIRecipe>(&R);
4839 return RedR && (RecurrenceDescriptor::isAnyOfRecurrenceKind(
4840 RedR->getRecurrenceKind()) ||
4841 RecurrenceDescriptor::isFindIVRecurrenceKind(
4842 RedR->getRecurrenceKind()));
4843 });
4844 if (HasSelectCmpReductions) {
4845 LLVM_DEBUG(dbgs() << "LV: Not interleaving select-cmp reductions.\n");
4846 return 1;
4847 }
4848
4849 // If we have a scalar reduction (vector reductions are already dealt with
4850 // by this point), we can increase the critical path length if the loop
4851 // we're interleaving is inside another loop. For tree-wise reductions
4852 // set the limit to 2, and for ordered reductions it's best to disable
4853 // interleaving entirely.
4854 if (HasReductions && OrigLoop->getLoopDepth() > 1) {
4855 bool HasOrderedReductions =
4857 [](VPRecipeBase &R) {
4858 auto *RedR = dyn_cast<VPReductionPHIRecipe>(&R);
4859
4860 return RedR && RedR->isOrdered();
4861 });
4862 if (HasOrderedReductions) {
4863 LLVM_DEBUG(
4864 dbgs() << "LV: Not interleaving scalar ordered reductions.\n");
4865 return 1;
4866 }
4867
4868 unsigned F = MaxNestedScalarReductionIC;
4869 SmallIC = std::min(SmallIC, F);
4870 StoresIC = std::min(StoresIC, F);
4871 LoadsIC = std::min(LoadsIC, F);
4872 }
4873
4875 std::max(StoresIC, LoadsIC) > SmallIC) {
4876 LLVM_DEBUG(
4877 dbgs() << "LV: Interleaving to saturate store or load ports.\n");
4878 return std::max(StoresIC, LoadsIC);
4879 }
4880
4881 // If there are scalar reductions and TTI has enabled aggressive
4882 // interleaving for reductions, we will interleave to expose ILP.
4883 if (VF.isScalar() && AggressivelyInterleaveReductions) {
4884 LLVM_DEBUG(dbgs() << "LV: Interleaving to expose ILP.\n");
4885 // Interleave no less than SmallIC but not as aggressive as the normal IC
4886 // to satisfy the rare situation when resources are too limited.
4887 return std::max(IC / 2, SmallIC);
4888 }
4889
4890 LLVM_DEBUG(dbgs() << "LV: Interleaving to reduce branch cost.\n");
4891 return SmallIC;
4892 }
4893
4894 // Interleave if this is a large loop (small loops are already dealt with by
4895 // this point) that could benefit from interleaving.
4896 if (AggressivelyInterleaveReductions) {
4897 LLVM_DEBUG(dbgs() << "LV: Interleaving to expose ILP.\n");
4898 return IC;
4899 }
4900
4901 LLVM_DEBUG(dbgs() << "LV: Not Interleaving.\n");
4902 return 1;
4903}
4904
4905bool LoopVectorizationCostModel::useEmulatedMaskMemRefHack(Instruction *I,
4906 ElementCount VF) {
4907 // TODO: Cost model for emulated masked load/store is completely
4908 // broken. This hack guides the cost model to use an artificially
4909 // high enough value to practically disable vectorization with such
4910 // operations, except where previously deployed legality hack allowed
4911 // using very low cost values. This is to avoid regressions coming simply
4912 // from moving "masked load/store" check from legality to cost model.
4913 // Masked Load/Gather emulation was previously never allowed.
4914 // Limited number of Masked Store/Scatter emulation was allowed.
4915 assert((isPredicatedInst(I)) &&
4916 "Expecting a scalar emulated instruction");
4917 return isa<LoadInst>(I) ||
4918 (isa<StoreInst>(I) &&
4919 NumPredStores > NumberOfStoresToPredicate);
4920}
4921
4923 assert(VF.isVector() && "Expected VF >= 2");
4924
4925 // If we've already collected the instructions to scalarize or the predicated
4926 // BBs after vectorization, there's nothing to do. Collection may already have
4927 // occurred if we have a user-selected VF and are now computing the expected
4928 // cost for interleaving.
4929 if (InstsToScalarize.contains(VF) ||
4930 PredicatedBBsAfterVectorization.contains(VF))
4931 return;
4932
4933 // Initialize a mapping for VF in InstsToScalalarize. If we find that it's
4934 // not profitable to scalarize any instructions, the presence of VF in the
4935 // map will indicate that we've analyzed it already.
4936 ScalarCostsTy &ScalarCostsVF = InstsToScalarize[VF];
4937
4938 // Find all the instructions that are scalar with predication in the loop and
4939 // determine if it would be better to not if-convert the blocks they are in.
4940 // If so, we also record the instructions to scalarize.
4941 for (BasicBlock *BB : TheLoop->blocks()) {
4943 continue;
4944 for (Instruction &I : *BB)
4945 if (isScalarWithPredication(&I, VF)) {
4946 ScalarCostsTy ScalarCosts;
4947 // Do not apply discount logic for:
4948 // 1. Scalars after vectorization, as there will only be a single copy
4949 // of the instruction.
4950 // 2. Scalable VF, as that would lead to invalid scalarization costs.
4951 // 3. Emulated masked memrefs, if a hacked cost is needed.
4952 if (!isScalarAfterVectorization(&I, VF) && !VF.isScalable() &&
4953 !useEmulatedMaskMemRefHack(&I, VF) &&
4954 computePredInstDiscount(&I, ScalarCosts, VF) >= 0) {
4955 for (const auto &[I, IC] : ScalarCosts)
4956 ScalarCostsVF.insert({I, IC});
4957 // Check if we decided to scalarize a call. If so, update the widening
4958 // decision of the call to CM_Scalarize with the computed scalar cost.
4959 for (const auto &[I, Cost] : ScalarCosts) {
4960 auto *CI = dyn_cast<CallInst>(I);
4961 if (!CI || !CallWideningDecisions.contains({CI, VF}))
4962 continue;
4963 CallWideningDecisions[{CI, VF}].Kind = CM_Scalarize;
4964 CallWideningDecisions[{CI, VF}].Cost = Cost;
4965 }
4966 }
4967 // Remember that BB will remain after vectorization.
4968 PredicatedBBsAfterVectorization[VF].insert(BB);
4969 for (auto *Pred : predecessors(BB)) {
4970 if (Pred->getSingleSuccessor() == BB)
4971 PredicatedBBsAfterVectorization[VF].insert(Pred);
4972 }
4973 }
4974 }
4975}
4976
4977InstructionCost LoopVectorizationCostModel::computePredInstDiscount(
4978 Instruction *PredInst, ScalarCostsTy &ScalarCosts, ElementCount VF) {
4979 assert(!isUniformAfterVectorization(PredInst, VF) &&
4980 "Instruction marked uniform-after-vectorization will be predicated");
4981
4982 // Initialize the discount to zero, meaning that the scalar version and the
4983 // vector version cost the same.
4984 InstructionCost Discount = 0;
4985
4986 // Holds instructions to analyze. The instructions we visit are mapped in
4987 // ScalarCosts. Those instructions are the ones that would be scalarized if
4988 // we find that the scalar version costs less.
4990
4991 // Returns true if the given instruction can be scalarized.
4992 auto CanBeScalarized = [&](Instruction *I) -> bool {
4993 // We only attempt to scalarize instructions forming a single-use chain
4994 // from the original predicated block that would otherwise be vectorized.
4995 // Although not strictly necessary, we give up on instructions we know will
4996 // already be scalar to avoid traversing chains that are unlikely to be
4997 // beneficial.
4998 if (!I->hasOneUse() || PredInst->getParent() != I->getParent() ||
4999 isScalarAfterVectorization(I, VF))
5000 return false;
5001
5002 // If the instruction is scalar with predication, it will be analyzed
5003 // separately. We ignore it within the context of PredInst.
5004 if (isScalarWithPredication(I, VF))
5005 return false;
5006
5007 // If any of the instruction's operands are uniform after vectorization,
5008 // the instruction cannot be scalarized. This prevents, for example, a
5009 // masked load from being scalarized.
5010 //
5011 // We assume we will only emit a value for lane zero of an instruction
5012 // marked uniform after vectorization, rather than VF identical values.
5013 // Thus, if we scalarize an instruction that uses a uniform, we would
5014 // create uses of values corresponding to the lanes we aren't emitting code
5015 // for. This behavior can be changed by allowing getScalarValue to clone
5016 // the lane zero values for uniforms rather than asserting.
5017 for (Use &U : I->operands())
5018 if (auto *J = dyn_cast<Instruction>(U.get()))
5019 if (isUniformAfterVectorization(J, VF))
5020 return false;
5021
5022 // Otherwise, we can scalarize the instruction.
5023 return true;
5024 };
5025
5026 // Compute the expected cost discount from scalarizing the entire expression
5027 // feeding the predicated instruction. We currently only consider expressions
5028 // that are single-use instruction chains.
5029 Worklist.push_back(PredInst);
5030 while (!Worklist.empty()) {
5031 Instruction *I = Worklist.pop_back_val();
5032
5033 // If we've already analyzed the instruction, there's nothing to do.
5034 if (ScalarCosts.contains(I))
5035 continue;
5036
5037 // Cannot scalarize fixed-order recurrence phis at the moment.
5038 if (isa<PHINode>(I) && Legal->isFixedOrderRecurrence(cast<PHINode>(I)))
5039 continue;
5040
5041 // Compute the cost of the vector instruction. Note that this cost already
5042 // includes the scalarization overhead of the predicated instruction.
5043 InstructionCost VectorCost = getInstructionCost(I, VF);
5044
5045 // Compute the cost of the scalarized instruction. This cost is the cost of
5046 // the instruction as if it wasn't if-converted and instead remained in the
5047 // predicated block. We will scale this cost by block probability after
5048 // computing the scalarization overhead.
5049 InstructionCost ScalarCost =
5050 VF.getFixedValue() * getInstructionCost(I, ElementCount::getFixed(1));
5051
5052 // Compute the scalarization overhead of needed insertelement instructions
5053 // and phi nodes.
5054 if (isScalarWithPredication(I, VF) && !I->getType()->isVoidTy()) {
5055 Type *WideTy = toVectorizedTy(I->getType(), VF);
5056 for (Type *VectorTy : getContainedTypes(WideTy)) {
5057 ScalarCost += TTI.getScalarizationOverhead(
5059 /*Insert=*/true,
5060 /*Extract=*/false, CostKind);
5061 }
5062 ScalarCost +=
5063 VF.getFixedValue() * TTI.getCFInstrCost(Instruction::PHI, CostKind);
5064 }
5065
5066 // Compute the scalarization overhead of needed extractelement
5067 // instructions. For each of the instruction's operands, if the operand can
5068 // be scalarized, add it to the worklist; otherwise, account for the
5069 // overhead.
5070 for (Use &U : I->operands())
5071 if (auto *J = dyn_cast<Instruction>(U.get())) {
5072 assert(canVectorizeTy(J->getType()) &&
5073 "Instruction has non-scalar type");
5074 if (CanBeScalarized(J))
5075 Worklist.push_back(J);
5076 else if (needsExtract(J, VF)) {
5077 Type *WideTy = toVectorizedTy(J->getType(), VF);
5078 for (Type *VectorTy : getContainedTypes(WideTy)) {
5079 ScalarCost += TTI.getScalarizationOverhead(
5080 cast<VectorType>(VectorTy),
5081 APInt::getAllOnes(VF.getFixedValue()), /*Insert*/ false,
5082 /*Extract*/ true, CostKind);
5083 }
5084 }
5085 }
5086
5087 // Scale the total scalar cost by block probability.
5088 ScalarCost /= getPredBlockCostDivisor(CostKind, I->getParent());
5089
5090 // Compute the discount. A non-negative discount means the vector version
5091 // of the instruction costs more, and scalarizing would be beneficial.
5092 Discount += VectorCost - ScalarCost;
5093 ScalarCosts[I] = ScalarCost;
5094 }
5095
5096 return Discount;
5097}
5098
5101
5102 // If the vector loop gets executed exactly once with the given VF, ignore the
5103 // costs of comparison and induction instructions, as they'll get simplified
5104 // away.
5105 SmallPtrSet<Instruction *, 2> ValuesToIgnoreForVF;
5106 auto TC = getSmallConstantTripCount(PSE.getSE(), TheLoop);
5107 if (TC == VF && !foldTailByMasking())
5109 ValuesToIgnoreForVF);
5110
5111 // For each block.
5112 for (BasicBlock *BB : TheLoop->blocks()) {
5113 InstructionCost BlockCost;
5114
5115 // For each instruction in the old loop.
5116 for (Instruction &I : BB->instructionsWithoutDebug()) {
5117 // Skip ignored values.
5118 if (ValuesToIgnore.count(&I) || ValuesToIgnoreForVF.count(&I) ||
5119 (VF.isVector() && VecValuesToIgnore.count(&I)))
5120 continue;
5121
5123
5124 // Check if we should override the cost.
5125 if (C.isValid() && ForceTargetInstructionCost.getNumOccurrences() > 0)
5127
5128 BlockCost += C;
5129 LLVM_DEBUG(dbgs() << "LV: Found an estimated cost of " << C << " for VF "
5130 << VF << " For instruction: " << I << '\n');
5131 }
5132
5133 // If we are vectorizing a predicated block, it will have been
5134 // if-converted. This means that the block's instructions (aside from
5135 // stores and instructions that may divide by zero) will now be
5136 // unconditionally executed. For the scalar case, we may not always execute
5137 // the predicated block, if it is an if-else block. Thus, scale the block's
5138 // cost by the probability of executing it.
5139 // getPredBlockCostDivisor will return 1 for blocks that are only predicated
5140 // by the header mask when folding the tail.
5141 if (VF.isScalar())
5142 BlockCost /= getPredBlockCostDivisor(CostKind, BB);
5143
5144 Cost += BlockCost;
5145 }
5146
5147 return Cost;
5148}
5149
5150/// Gets Address Access SCEV after verifying that the access pattern
5151/// is loop invariant except the induction variable dependence.
5152///
5153/// This SCEV can be sent to the Target in order to estimate the address
5154/// calculation cost.
5156 Value *Ptr,
5159 const Loop *TheLoop) {
5160
5161 auto *Gep = dyn_cast<GetElementPtrInst>(Ptr);
5162 if (!Gep)
5163 return nullptr;
5164
5165 // We are looking for a gep with all loop invariant indices except for one
5166 // which should be an induction variable.
5167 auto *SE = PSE.getSE();
5168 unsigned NumOperands = Gep->getNumOperands();
5169 for (unsigned Idx = 1; Idx < NumOperands; ++Idx) {
5170 Value *Opd = Gep->getOperand(Idx);
5171 if (!SE->isLoopInvariant(SE->getSCEV(Opd), TheLoop) &&
5172 !Legal->isInductionVariable(Opd))
5173 return nullptr;
5174 }
5175
5176 // Now we know we have a GEP ptr, %inv, %ind, %inv. return the Ptr SCEV.
5177 return PSE.getSCEV(Ptr);
5178}
5179
5181LoopVectorizationCostModel::getMemInstScalarizationCost(Instruction *I,
5182 ElementCount VF) {
5183 assert(VF.isVector() &&
5184 "Scalarization cost of instruction implies vectorization.");
5185 if (VF.isScalable())
5186 return InstructionCost::getInvalid();
5187
5188 Type *ValTy = getLoadStoreType(I);
5189 auto *SE = PSE.getSE();
5190
5191 unsigned AS = getLoadStoreAddressSpace(I);
5193 Type *PtrTy = toVectorTy(Ptr->getType(), VF);
5194 // NOTE: PtrTy is a vector to signal `TTI::getAddressComputationCost`
5195 // that it is being called from this specific place.
5196
5197 // Figure out whether the access is strided and get the stride value
5198 // if it's known in compile time
5199 const SCEV *PtrSCEV = getAddressAccessSCEV(Ptr, Legal, PSE, TheLoop);
5200
5201 // Get the cost of the scalar memory instruction and address computation.
5203 PtrTy, SE, PtrSCEV, CostKind);
5204
5205 // Don't pass *I here, since it is scalar but will actually be part of a
5206 // vectorized loop where the user of it is a vectorized instruction.
5207 const Align Alignment = getLoadStoreAlignment(I);
5208 TTI::OperandValueInfo OpInfo = TTI::getOperandInfo(I->getOperand(0));
5209 Cost += VF.getFixedValue() *
5210 TTI.getMemoryOpCost(I->getOpcode(), ValTy->getScalarType(), Alignment,
5211 AS, CostKind, OpInfo);
5212
5213 // Get the overhead of the extractelement and insertelement instructions
5214 // we might create due to scalarization.
5216
5217 // If we have a predicated load/store, it will need extra i1 extracts and
5218 // conditional branches, but may not be executed for each vector lane. Scale
5219 // the cost by the probability of executing the predicated block.
5220 if (isPredicatedInst(I)) {
5221 Cost /= getPredBlockCostDivisor(CostKind, I->getParent());
5222
5223 // Add the cost of an i1 extract and a branch
5224 auto *VecI1Ty =
5225 VectorType::get(IntegerType::getInt1Ty(ValTy->getContext()), VF);
5227 VecI1Ty, APInt::getAllOnes(VF.getFixedValue()),
5228 /*Insert=*/false, /*Extract=*/true, CostKind);
5229 Cost += TTI.getCFInstrCost(Instruction::Br, CostKind);
5230
5231 if (useEmulatedMaskMemRefHack(I, VF))
5232 // Artificially setting to a high enough value to practically disable
5233 // vectorization with such operations.
5234 Cost = 3000000;
5235 }
5236
5237 return Cost;
5238}
5239
5241LoopVectorizationCostModel::getConsecutiveMemOpCost(Instruction *I,
5242 ElementCount VF) {
5243 Type *ValTy = getLoadStoreType(I);
5244 auto *VectorTy = cast<VectorType>(toVectorTy(ValTy, VF));
5246 unsigned AS = getLoadStoreAddressSpace(I);
5247 int ConsecutiveStride = Legal->isConsecutivePtr(ValTy, Ptr);
5248
5249 assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) &&
5250 "Stride should be 1 or -1 for consecutive memory access");
5251 const Align Alignment = getLoadStoreAlignment(I);
5253 if (Legal->isMaskRequired(I)) {
5254 unsigned IID = I->getOpcode() == Instruction::Load
5255 ? Intrinsic::masked_load
5256 : Intrinsic::masked_store;
5258 MemIntrinsicCostAttributes(IID, VectorTy, Alignment, AS), CostKind);
5259 } else {
5260 TTI::OperandValueInfo OpInfo = TTI::getOperandInfo(I->getOperand(0));
5261 Cost += TTI.getMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS,
5262 CostKind, OpInfo, I);
5263 }
5264
5265 bool Reverse = ConsecutiveStride < 0;
5266 if (Reverse)
5268 VectorTy, {}, CostKind, 0);
5269 return Cost;
5270}
5271
5273LoopVectorizationCostModel::getUniformMemOpCost(Instruction *I,
5274 ElementCount VF) {
5275 assert(Legal->isUniformMemOp(*I, VF));
5276
5277 Type *ValTy = getLoadStoreType(I);
5279 auto *VectorTy = cast<VectorType>(toVectorTy(ValTy, VF));
5280 const Align Alignment = getLoadStoreAlignment(I);
5281 unsigned AS = getLoadStoreAddressSpace(I);
5282 if (isa<LoadInst>(I)) {
5283 return TTI.getAddressComputationCost(PtrTy, nullptr, nullptr, CostKind) +
5284 TTI.getMemoryOpCost(Instruction::Load, ValTy, Alignment, AS,
5285 CostKind) +
5287 VectorTy, {}, CostKind);
5288 }
5289 StoreInst *SI = cast<StoreInst>(I);
5290
5291 bool IsLoopInvariantStoreValue = Legal->isInvariant(SI->getValueOperand());
5292 // TODO: We have existing tests that request the cost of extracting element
5293 // VF.getKnownMinValue() - 1 from a scalable vector. This does not represent
5294 // the actual generated code, which involves extracting the last element of
5295 // a scalable vector where the lane to extract is unknown at compile time.
5297 TTI.getAddressComputationCost(PtrTy, nullptr, nullptr, CostKind) +
5298 TTI.getMemoryOpCost(Instruction::Store, ValTy, Alignment, AS, CostKind);
5299 if (!IsLoopInvariantStoreValue)
5300 Cost += TTI.getIndexedVectorInstrCostFromEnd(Instruction::ExtractElement,
5301 VectorTy, CostKind, 0);
5302 return Cost;
5303}
5304
5306LoopVectorizationCostModel::getGatherScatterCost(Instruction *I,
5307 ElementCount VF) {
5308 Type *ValTy = getLoadStoreType(I);
5309 auto *VectorTy = cast<VectorType>(toVectorTy(ValTy, VF));
5310 const Align Alignment = getLoadStoreAlignment(I);
5312 Type *PtrTy = Ptr->getType();
5313
5314 if (!Legal->isUniform(Ptr, VF))
5315 PtrTy = toVectorTy(PtrTy, VF);
5316
5317 unsigned IID = I->getOpcode() == Instruction::Load
5318 ? Intrinsic::masked_gather
5319 : Intrinsic::masked_scatter;
5320 return TTI.getAddressComputationCost(PtrTy, nullptr, nullptr, CostKind) +
5322 MemIntrinsicCostAttributes(IID, VectorTy, Ptr,
5323 Legal->isMaskRequired(I), Alignment, I),
5324 CostKind);
5325}
5326
5328LoopVectorizationCostModel::getInterleaveGroupCost(Instruction *I,
5329 ElementCount VF) {
5330 const auto *Group = getInterleavedAccessGroup(I);
5331 assert(Group && "Fail to get an interleaved access group.");
5332
5333 Instruction *InsertPos = Group->getInsertPos();
5334 Type *ValTy = getLoadStoreType(InsertPos);
5335 auto *VectorTy = cast<VectorType>(toVectorTy(ValTy, VF));
5336 unsigned AS = getLoadStoreAddressSpace(InsertPos);
5337
5338 unsigned InterleaveFactor = Group->getFactor();
5339 auto *WideVecTy = VectorType::get(ValTy, VF * InterleaveFactor);
5340
5341 // Holds the indices of existing members in the interleaved group.
5342 SmallVector<unsigned, 4> Indices;
5343 for (unsigned IF = 0; IF < InterleaveFactor; IF++)
5344 if (Group->getMember(IF))
5345 Indices.push_back(IF);
5346
5347 // Calculate the cost of the whole interleaved group.
5348 bool UseMaskForGaps =
5349 (Group->requiresScalarEpilogue() && !isScalarEpilogueAllowed()) ||
5350 (isa<StoreInst>(I) && !Group->isFull());
5352 InsertPos->getOpcode(), WideVecTy, Group->getFactor(), Indices,
5353 Group->getAlign(), AS, CostKind, Legal->isMaskRequired(I),
5354 UseMaskForGaps);
5355
5356 if (Group->isReverse()) {
5357 // TODO: Add support for reversed masked interleaved access.
5358 assert(!Legal->isMaskRequired(I) &&
5359 "Reverse masked interleaved access not supported.");
5360 Cost += Group->getNumMembers() *
5362 VectorTy, {}, CostKind, 0);
5363 }
5364 return Cost;
5365}
5366
5367std::optional<InstructionCost>
5369 ElementCount VF,
5370 Type *Ty) const {
5371 using namespace llvm::PatternMatch;
5372 // Early exit for no inloop reductions
5373 if (InLoopReductions.empty() || VF.isScalar() || !isa<VectorType>(Ty))
5374 return std::nullopt;
5375 auto *VectorTy = cast<VectorType>(Ty);
5376
5377 // We are looking for a pattern of, and finding the minimal acceptable cost:
5378 // reduce(mul(ext(A), ext(B))) or
5379 // reduce(mul(A, B)) or
5380 // reduce(ext(A)) or
5381 // reduce(A).
5382 // The basic idea is that we walk down the tree to do that, finding the root
5383 // reduction instruction in InLoopReductionImmediateChains. From there we find
5384 // the pattern of mul/ext and test the cost of the entire pattern vs the cost
5385 // of the components. If the reduction cost is lower then we return it for the
5386 // reduction instruction and 0 for the other instructions in the pattern. If
5387 // it is not we return an invalid cost specifying the orignal cost method
5388 // should be used.
5389 Instruction *RetI = I;
5390 if (match(RetI, m_ZExtOrSExt(m_Value()))) {
5391 if (!RetI->hasOneUser())
5392 return std::nullopt;
5393 RetI = RetI->user_back();
5394 }
5395
5396 if (match(RetI, m_OneUse(m_Mul(m_Value(), m_Value()))) &&
5397 RetI->user_back()->getOpcode() == Instruction::Add) {
5398 RetI = RetI->user_back();
5399 }
5400
5401 // Test if the found instruction is a reduction, and if not return an invalid
5402 // cost specifying the parent to use the original cost modelling.
5403 Instruction *LastChain = InLoopReductionImmediateChains.lookup(RetI);
5404 if (!LastChain)
5405 return std::nullopt;
5406
5407 // Find the reduction this chain is a part of and calculate the basic cost of
5408 // the reduction on its own.
5409 Instruction *ReductionPhi = LastChain;
5410 while (!isa<PHINode>(ReductionPhi))
5411 ReductionPhi = InLoopReductionImmediateChains.at(ReductionPhi);
5412
5413 const RecurrenceDescriptor &RdxDesc =
5414 Legal->getRecurrenceDescriptor(cast<PHINode>(ReductionPhi));
5415
5416 InstructionCost BaseCost;
5417 RecurKind RK = RdxDesc.getRecurrenceKind();
5420 BaseCost = TTI.getMinMaxReductionCost(MinMaxID, VectorTy,
5421 RdxDesc.getFastMathFlags(), CostKind);
5422 } else {
5423 BaseCost = TTI.getArithmeticReductionCost(
5424 RdxDesc.getOpcode(), VectorTy, RdxDesc.getFastMathFlags(), CostKind);
5425 }
5426
5427 // For a call to the llvm.fmuladd intrinsic we need to add the cost of a
5428 // normal fmul instruction to the cost of the fadd reduction.
5429 if (RK == RecurKind::FMulAdd)
5430 BaseCost +=
5431 TTI.getArithmeticInstrCost(Instruction::FMul, VectorTy, CostKind);
5432
5433 // If we're using ordered reductions then we can just return the base cost
5434 // here, since getArithmeticReductionCost calculates the full ordered
5435 // reduction cost when FP reassociation is not allowed.
5436 if (useOrderedReductions(RdxDesc))
5437 return BaseCost;
5438
5439 // Get the operand that was not the reduction chain and match it to one of the
5440 // patterns, returning the better cost if it is found.
5441 Instruction *RedOp = RetI->getOperand(1) == LastChain
5444
5445 VectorTy = VectorType::get(I->getOperand(0)->getType(), VectorTy);
5446
5447 Instruction *Op0, *Op1;
5448 if (RedOp && RdxDesc.getOpcode() == Instruction::Add &&
5449 match(RedOp,
5451 match(Op0, m_ZExtOrSExt(m_Value())) &&
5452 Op0->getOpcode() == Op1->getOpcode() &&
5453 Op0->getOperand(0)->getType() == Op1->getOperand(0)->getType() &&
5454 !TheLoop->isLoopInvariant(Op0) && !TheLoop->isLoopInvariant(Op1) &&
5455 (Op0->getOpcode() == RedOp->getOpcode() || Op0 == Op1)) {
5456
5457 // Matched reduce.add(ext(mul(ext(A), ext(B)))
5458 // Note that the extend opcodes need to all match, or if A==B they will have
5459 // been converted to zext(mul(sext(A), sext(A))) as it is known positive,
5460 // which is equally fine.
5461 bool IsUnsigned = isa<ZExtInst>(Op0);
5462 auto *ExtType = VectorType::get(Op0->getOperand(0)->getType(), VectorTy);
5463 auto *MulType = VectorType::get(Op0->getType(), VectorTy);
5464
5465 InstructionCost ExtCost =
5466 TTI.getCastInstrCost(Op0->getOpcode(), MulType, ExtType,
5468 InstructionCost MulCost =
5469 TTI.getArithmeticInstrCost(Instruction::Mul, MulType, CostKind);
5470 InstructionCost Ext2Cost =
5471 TTI.getCastInstrCost(RedOp->getOpcode(), VectorTy, MulType,
5473
5474 InstructionCost RedCost = TTI.getMulAccReductionCost(
5475 IsUnsigned, RdxDesc.getOpcode(), RdxDesc.getRecurrenceType(), ExtType,
5476 CostKind);
5477
5478 if (RedCost.isValid() &&
5479 RedCost < ExtCost * 2 + MulCost + Ext2Cost + BaseCost)
5480 return I == RetI ? RedCost : 0;
5481 } else if (RedOp && match(RedOp, m_ZExtOrSExt(m_Value())) &&
5482 !TheLoop->isLoopInvariant(RedOp)) {
5483 // Matched reduce(ext(A))
5484 bool IsUnsigned = isa<ZExtInst>(RedOp);
5485 auto *ExtType = VectorType::get(RedOp->getOperand(0)->getType(), VectorTy);
5486 InstructionCost RedCost = TTI.getExtendedReductionCost(
5487 RdxDesc.getOpcode(), IsUnsigned, RdxDesc.getRecurrenceType(), ExtType,
5488 RdxDesc.getFastMathFlags(), CostKind);
5489
5490 InstructionCost ExtCost =
5491 TTI.getCastInstrCost(RedOp->getOpcode(), VectorTy, ExtType,
5493 if (RedCost.isValid() && RedCost < BaseCost + ExtCost)
5494 return I == RetI ? RedCost : 0;
5495 } else if (RedOp && RdxDesc.getOpcode() == Instruction::Add &&
5496 match(RedOp, m_Mul(m_Instruction(Op0), m_Instruction(Op1)))) {
5497 if (match(Op0, m_ZExtOrSExt(m_Value())) &&
5498 Op0->getOpcode() == Op1->getOpcode() &&
5499 !TheLoop->isLoopInvariant(Op0) && !TheLoop->isLoopInvariant(Op1)) {
5500 bool IsUnsigned = isa<ZExtInst>(Op0);
5501 Type *Op0Ty = Op0->getOperand(0)->getType();
5502 Type *Op1Ty = Op1->getOperand(0)->getType();
5503 Type *LargestOpTy =
5504 Op0Ty->getIntegerBitWidth() < Op1Ty->getIntegerBitWidth() ? Op1Ty
5505 : Op0Ty;
5506 auto *ExtType = VectorType::get(LargestOpTy, VectorTy);
5507
5508 // Matched reduce.add(mul(ext(A), ext(B))), where the two ext may be of
5509 // different sizes. We take the largest type as the ext to reduce, and add
5510 // the remaining cost as, for example reduce(mul(ext(ext(A)), ext(B))).
5511 InstructionCost ExtCost0 = TTI.getCastInstrCost(
5512 Op0->getOpcode(), VectorTy, VectorType::get(Op0Ty, VectorTy),
5514 InstructionCost ExtCost1 = TTI.getCastInstrCost(
5515 Op1->getOpcode(), VectorTy, VectorType::get(Op1Ty, VectorTy),
5517 InstructionCost MulCost =
5518 TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy, CostKind);
5519
5520 InstructionCost RedCost = TTI.getMulAccReductionCost(
5521 IsUnsigned, RdxDesc.getOpcode(), RdxDesc.getRecurrenceType(), ExtType,
5522 CostKind);
5523 InstructionCost ExtraExtCost = 0;
5524 if (Op0Ty != LargestOpTy || Op1Ty != LargestOpTy) {
5525 Instruction *ExtraExtOp = (Op0Ty != LargestOpTy) ? Op0 : Op1;
5526 ExtraExtCost = TTI.getCastInstrCost(
5527 ExtraExtOp->getOpcode(), ExtType,
5528 VectorType::get(ExtraExtOp->getOperand(0)->getType(), VectorTy),
5530 }
5531
5532 if (RedCost.isValid() &&
5533 (RedCost + ExtraExtCost) < (ExtCost0 + ExtCost1 + MulCost + BaseCost))
5534 return I == RetI ? RedCost : 0;
5535 } else if (!match(I, m_ZExtOrSExt(m_Value()))) {
5536 // Matched reduce.add(mul())
5537 InstructionCost MulCost =
5538 TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy, CostKind);
5539
5540 InstructionCost RedCost = TTI.getMulAccReductionCost(
5541 true, RdxDesc.getOpcode(), RdxDesc.getRecurrenceType(), VectorTy,
5542 CostKind);
5543
5544 if (RedCost.isValid() && RedCost < MulCost + BaseCost)
5545 return I == RetI ? RedCost : 0;
5546 }
5547 }
5548
5549 return I == RetI ? std::optional<InstructionCost>(BaseCost) : std::nullopt;
5550}
5551
5553LoopVectorizationCostModel::getMemoryInstructionCost(Instruction *I,
5554 ElementCount VF) {
5555 // Calculate scalar cost only. Vectorization cost should be ready at this
5556 // moment.
5557 if (VF.isScalar()) {
5558 Type *ValTy = getLoadStoreType(I);
5560 const Align Alignment = getLoadStoreAlignment(I);
5561 unsigned AS = getLoadStoreAddressSpace(I);
5562
5563 TTI::OperandValueInfo OpInfo = TTI::getOperandInfo(I->getOperand(0));
5564 return TTI.getAddressComputationCost(PtrTy, nullptr, nullptr, CostKind) +
5565 TTI.getMemoryOpCost(I->getOpcode(), ValTy, Alignment, AS, CostKind,
5566 OpInfo, I);
5567 }
5568 return getWideningCost(I, VF);
5569}
5570
5572LoopVectorizationCostModel::getScalarizationOverhead(Instruction *I,
5573 ElementCount VF) const {
5574
5575 // There is no mechanism yet to create a scalable scalarization loop,
5576 // so this is currently Invalid.
5577 if (VF.isScalable())
5578 return InstructionCost::getInvalid();
5579
5580 if (VF.isScalar())
5581 return 0;
5582
5584 Type *RetTy = toVectorizedTy(I->getType(), VF);
5585 if (!RetTy->isVoidTy() &&
5587
5588 for (Type *VectorTy : getContainedTypes(RetTy)) {
5591 /*Insert=*/true,
5592 /*Extract=*/false, CostKind);
5593 }
5594 }
5595
5596 // Some targets keep addresses scalar.
5598 return Cost;
5599
5600 // Some targets support efficient element stores.
5602 return Cost;
5603
5604 // Collect operands to consider.
5605 CallInst *CI = dyn_cast<CallInst>(I);
5606 Instruction::op_range Ops = CI ? CI->args() : I->operands();
5607
5608 // Skip operands that do not require extraction/scalarization and do not incur
5609 // any overhead.
5611 for (auto *V : filterExtractingOperands(Ops, VF))
5612 Tys.push_back(maybeVectorizeType(V->getType(), VF));
5614}
5615
5617 if (VF.isScalar())
5618 return;
5619 NumPredStores = 0;
5620 for (BasicBlock *BB : TheLoop->blocks()) {
5621 // For each instruction in the old loop.
5622 for (Instruction &I : *BB) {
5624 if (!Ptr)
5625 continue;
5626
5627 // TODO: We should generate better code and update the cost model for
5628 // predicated uniform stores. Today they are treated as any other
5629 // predicated store (see added test cases in
5630 // invariant-store-vectorization.ll).
5632 NumPredStores++;
5633
5634 if (Legal->isUniformMemOp(I, VF)) {
5635 auto IsLegalToScalarize = [&]() {
5636 if (!VF.isScalable())
5637 // Scalarization of fixed length vectors "just works".
5638 return true;
5639
5640 // We have dedicated lowering for unpredicated uniform loads and
5641 // stores. Note that even with tail folding we know that at least
5642 // one lane is active (i.e. generalized predication is not possible
5643 // here), and the logic below depends on this fact.
5644 if (!foldTailByMasking())
5645 return true;
5646
5647 // For scalable vectors, a uniform memop load is always
5648 // uniform-by-parts and we know how to scalarize that.
5649 if (isa<LoadInst>(I))
5650 return true;
5651
5652 // A uniform store isn't neccessarily uniform-by-part
5653 // and we can't assume scalarization.
5654 auto &SI = cast<StoreInst>(I);
5655 return TheLoop->isLoopInvariant(SI.getValueOperand());
5656 };
5657
5658 const InstructionCost GatherScatterCost =
5660 getGatherScatterCost(&I, VF) : InstructionCost::getInvalid();
5661
5662 // Load: Scalar load + broadcast
5663 // Store: Scalar store + isLoopInvariantStoreValue ? 0 : extract
5664 // FIXME: This cost is a significant under-estimate for tail folded
5665 // memory ops.
5666 const InstructionCost ScalarizationCost =
5667 IsLegalToScalarize() ? getUniformMemOpCost(&I, VF)
5669
5670 // Choose better solution for the current VF, Note that Invalid
5671 // costs compare as maximumal large. If both are invalid, we get
5672 // scalable invalid which signals a failure and a vectorization abort.
5673 if (GatherScatterCost < ScalarizationCost)
5674 setWideningDecision(&I, VF, CM_GatherScatter, GatherScatterCost);
5675 else
5676 setWideningDecision(&I, VF, CM_Scalarize, ScalarizationCost);
5677 continue;
5678 }
5679
5680 // We assume that widening is the best solution when possible.
5681 if (memoryInstructionCanBeWidened(&I, VF)) {
5682 InstructionCost Cost = getConsecutiveMemOpCost(&I, VF);
5683 int ConsecutiveStride = Legal->isConsecutivePtr(
5685 assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) &&
5686 "Expected consecutive stride.");
5687 InstWidening Decision =
5688 ConsecutiveStride == 1 ? CM_Widen : CM_Widen_Reverse;
5689 setWideningDecision(&I, VF, Decision, Cost);
5690 continue;
5691 }
5692
5693 // Choose between Interleaving, Gather/Scatter or Scalarization.
5695 unsigned NumAccesses = 1;
5696 if (isAccessInterleaved(&I)) {
5697 const auto *Group = getInterleavedAccessGroup(&I);
5698 assert(Group && "Fail to get an interleaved access group.");
5699
5700 // Make one decision for the whole group.
5701 if (getWideningDecision(&I, VF) != CM_Unknown)
5702 continue;
5703
5704 NumAccesses = Group->getNumMembers();
5706 InterleaveCost = getInterleaveGroupCost(&I, VF);
5707 }
5708
5709 InstructionCost GatherScatterCost =
5711 ? getGatherScatterCost(&I, VF) * NumAccesses
5713
5714 InstructionCost ScalarizationCost =
5715 getMemInstScalarizationCost(&I, VF) * NumAccesses;
5716
5717 // Choose better solution for the current VF,
5718 // write down this decision and use it during vectorization.
5720 InstWidening Decision;
5721 if (InterleaveCost <= GatherScatterCost &&
5722 InterleaveCost < ScalarizationCost) {
5723 Decision = CM_Interleave;
5724 Cost = InterleaveCost;
5725 } else if (GatherScatterCost < ScalarizationCost) {
5726 Decision = CM_GatherScatter;
5727 Cost = GatherScatterCost;
5728 } else {
5729 Decision = CM_Scalarize;
5730 Cost = ScalarizationCost;
5731 }
5732 // If the instructions belongs to an interleave group, the whole group
5733 // receives the same decision. The whole group receives the cost, but
5734 // the cost will actually be assigned to one instruction.
5735 if (const auto *Group = getInterleavedAccessGroup(&I)) {
5736 if (Decision == CM_Scalarize) {
5737 for (unsigned Idx = 0; Idx < Group->getFactor(); ++Idx) {
5738 if (auto *I = Group->getMember(Idx)) {
5739 setWideningDecision(I, VF, Decision,
5740 getMemInstScalarizationCost(I, VF));
5741 }
5742 }
5743 } else {
5744 setWideningDecision(Group, VF, Decision, Cost);
5745 }
5746 } else
5747 setWideningDecision(&I, VF, Decision, Cost);
5748 }
5749 }
5750
5751 // Make sure that any load of address and any other address computation
5752 // remains scalar unless there is gather/scatter support. This avoids
5753 // inevitable extracts into address registers, and also has the benefit of
5754 // activating LSR more, since that pass can't optimize vectorized
5755 // addresses.
5756 if (TTI.prefersVectorizedAddressing())
5757 return;
5758
5759 // Start with all scalar pointer uses.
5761 for (BasicBlock *BB : TheLoop->blocks())
5762 for (Instruction &I : *BB) {
5763 Instruction *PtrDef =
5765 if (PtrDef && TheLoop->contains(PtrDef) &&
5767 AddrDefs.insert(PtrDef);
5768 }
5769
5770 // Add all instructions used to generate the addresses.
5772 append_range(Worklist, AddrDefs);
5773 while (!Worklist.empty()) {
5774 Instruction *I = Worklist.pop_back_val();
5775 for (auto &Op : I->operands())
5776 if (auto *InstOp = dyn_cast<Instruction>(Op))
5777 if (TheLoop->contains(InstOp) && !isa<PHINode>(InstOp) &&
5778 AddrDefs.insert(InstOp).second)
5779 Worklist.push_back(InstOp);
5780 }
5781
5782 auto UpdateMemOpUserCost = [this, VF](LoadInst *LI) {
5783 // If there are direct memory op users of the newly scalarized load,
5784 // their cost may have changed because there's no scalarization
5785 // overhead for the operand. Update it.
5786 for (User *U : LI->users()) {
5788 continue;
5790 continue;
5793 getMemInstScalarizationCost(cast<Instruction>(U), VF));
5794 }
5795 };
5796 for (auto *I : AddrDefs) {
5797 if (isa<LoadInst>(I)) {
5798 // Setting the desired widening decision should ideally be handled in
5799 // by cost functions, but since this involves the task of finding out
5800 // if the loaded register is involved in an address computation, it is
5801 // instead changed here when we know this is the case.
5802 InstWidening Decision = getWideningDecision(I, VF);
5803 if (Decision == CM_Widen || Decision == CM_Widen_Reverse ||
5804 (!isPredicatedInst(I) && !Legal->isUniformMemOp(*I, VF) &&
5805 Decision == CM_Scalarize)) {
5806 // Scalarize a widened load of address or update the cost of a scalar
5807 // load of an address.
5809 I, VF, CM_Scalarize,
5810 (VF.getKnownMinValue() *
5811 getMemoryInstructionCost(I, ElementCount::getFixed(1))));
5812 UpdateMemOpUserCost(cast<LoadInst>(I));
5813 } else if (const auto *Group = getInterleavedAccessGroup(I)) {
5814 // Scalarize all members of this interleaved group when any member
5815 // is used as an address. The address-used load skips scalarization
5816 // overhead, other members include it.
5817 for (unsigned Idx = 0; Idx < Group->getFactor(); ++Idx) {
5818 if (Instruction *Member = Group->getMember(Idx)) {
5820 AddrDefs.contains(Member)
5821 ? (VF.getKnownMinValue() *
5822 getMemoryInstructionCost(Member,
5824 : getMemInstScalarizationCost(Member, VF);
5826 UpdateMemOpUserCost(cast<LoadInst>(Member));
5827 }
5828 }
5829 }
5830 } else {
5831 // Cannot scalarize fixed-order recurrence phis at the moment.
5832 if (isa<PHINode>(I) && Legal->isFixedOrderRecurrence(cast<PHINode>(I)))
5833 continue;
5834
5835 // Make sure I gets scalarized and a cost estimate without
5836 // scalarization overhead.
5837 ForcedScalars[VF].insert(I);
5838 }
5839 }
5840}
5841
5843 assert(!VF.isScalar() &&
5844 "Trying to set a vectorization decision for a scalar VF");
5845
5846 auto ForcedScalar = ForcedScalars.find(VF);
5847 for (BasicBlock *BB : TheLoop->blocks()) {
5848 // For each instruction in the old loop.
5849 for (Instruction &I : *BB) {
5851
5852 if (!CI)
5853 continue;
5854
5858 Function *ScalarFunc = CI->getCalledFunction();
5859 Type *ScalarRetTy = CI->getType();
5860 SmallVector<Type *, 4> Tys, ScalarTys;
5861 for (auto &ArgOp : CI->args())
5862 ScalarTys.push_back(ArgOp->getType());
5863
5864 // Estimate cost of scalarized vector call. The source operands are
5865 // assumed to be vectors, so we need to extract individual elements from
5866 // there, execute VF scalar calls, and then gather the result into the
5867 // vector return value.
5868 if (VF.isFixed()) {
5869 InstructionCost ScalarCallCost =
5870 TTI.getCallInstrCost(ScalarFunc, ScalarRetTy, ScalarTys, CostKind);
5871
5872 // Compute costs of unpacking argument values for the scalar calls and
5873 // packing the return values to a vector.
5874 InstructionCost ScalarizationCost = getScalarizationOverhead(CI, VF);
5875 ScalarCost = ScalarCallCost * VF.getKnownMinValue() + ScalarizationCost;
5876 } else {
5877 // There is no point attempting to calculate the scalar cost for a
5878 // scalable VF as we know it will be Invalid.
5880 "Unexpected valid cost for scalarizing scalable vectors");
5881 ScalarCost = InstructionCost::getInvalid();
5882 }
5883
5884 // Honor ForcedScalars and UniformAfterVectorization decisions.
5885 // TODO: For calls, it might still be more profitable to widen. Use
5886 // VPlan-based cost model to compare different options.
5887 if (VF.isVector() && ((ForcedScalar != ForcedScalars.end() &&
5888 ForcedScalar->second.contains(CI)) ||
5889 isUniformAfterVectorization(CI, VF))) {
5890 setCallWideningDecision(CI, VF, CM_Scalarize, nullptr,
5891 Intrinsic::not_intrinsic, std::nullopt,
5892 ScalarCost);
5893 continue;
5894 }
5895
5896 bool MaskRequired = Legal->isMaskRequired(CI);
5897 // Compute corresponding vector type for return value and arguments.
5898 Type *RetTy = toVectorizedTy(ScalarRetTy, VF);
5899 for (Type *ScalarTy : ScalarTys)
5900 Tys.push_back(toVectorizedTy(ScalarTy, VF));
5901
5902 // An in-loop reduction using an fmuladd intrinsic is a special case;
5903 // we don't want the normal cost for that intrinsic.
5905 if (auto RedCost = getReductionPatternCost(CI, VF, RetTy)) {
5908 std::nullopt, *RedCost);
5909 continue;
5910 }
5911
5912 // Find the cost of vectorizing the call, if we can find a suitable
5913 // vector variant of the function.
5914 VFInfo FuncInfo;
5915 Function *VecFunc = nullptr;
5916 // Search through any available variants for one we can use at this VF.
5917 for (VFInfo &Info : VFDatabase::getMappings(*CI)) {
5918 // Must match requested VF.
5919 if (Info.Shape.VF != VF)
5920 continue;
5921
5922 // Must take a mask argument if one is required
5923 if (MaskRequired && !Info.isMasked())
5924 continue;
5925
5926 // Check that all parameter kinds are supported
5927 bool ParamsOk = true;
5928 for (VFParameter Param : Info.Shape.Parameters) {
5929 switch (Param.ParamKind) {
5931 break;
5933 Value *ScalarParam = CI->getArgOperand(Param.ParamPos);
5934 // Make sure the scalar parameter in the loop is invariant.
5935 if (!PSE.getSE()->isLoopInvariant(PSE.getSCEV(ScalarParam),
5936 TheLoop))
5937 ParamsOk = false;
5938 break;
5939 }
5941 Value *ScalarParam = CI->getArgOperand(Param.ParamPos);
5942 // Find the stride for the scalar parameter in this loop and see if
5943 // it matches the stride for the variant.
5944 // TODO: do we need to figure out the cost of an extract to get the
5945 // first lane? Or do we hope that it will be folded away?
5946 ScalarEvolution *SE = PSE.getSE();
5947 if (!match(SE->getSCEV(ScalarParam),
5949 m_SCEV(), m_scev_SpecificSInt(Param.LinearStepOrPos),
5951 ParamsOk = false;
5952 break;
5953 }
5955 break;
5956 default:
5957 ParamsOk = false;
5958 break;
5959 }
5960 }
5961
5962 if (!ParamsOk)
5963 continue;
5964
5965 // Found a suitable candidate, stop here.
5966 VecFunc = CI->getModule()->getFunction(Info.VectorName);
5967 FuncInfo = Info;
5968 break;
5969 }
5970
5971 if (TLI && VecFunc && !CI->isNoBuiltin())
5972 VectorCost = TTI.getCallInstrCost(nullptr, RetTy, Tys, CostKind);
5973
5974 // Find the cost of an intrinsic; some targets may have instructions that
5975 // perform the operation without needing an actual call.
5977 if (IID != Intrinsic::not_intrinsic)
5979
5980 InstructionCost Cost = ScalarCost;
5981 InstWidening Decision = CM_Scalarize;
5982
5983 if (VectorCost <= Cost) {
5984 Cost = VectorCost;
5985 Decision = CM_VectorCall;
5986 }
5987
5988 if (IntrinsicCost <= Cost) {
5990 Decision = CM_IntrinsicCall;
5991 }
5992
5993 setCallWideningDecision(CI, VF, Decision, VecFunc, IID,
5995 }
5996 }
5997}
5998
6000 if (!Legal->isInvariant(Op))
6001 return false;
6002 // Consider Op invariant, if it or its operands aren't predicated
6003 // instruction in the loop. In that case, it is not trivially hoistable.
6004 auto *OpI = dyn_cast<Instruction>(Op);
6005 return !OpI || !TheLoop->contains(OpI) ||
6006 (!isPredicatedInst(OpI) &&
6007 (!isa<PHINode>(OpI) || OpI->getParent() != TheLoop->getHeader()) &&
6008 all_of(OpI->operands(),
6009 [this](Value *Op) { return shouldConsiderInvariant(Op); }));
6010}
6011
6014 ElementCount VF) {
6015 // If we know that this instruction will remain uniform, check the cost of
6016 // the scalar version.
6018 VF = ElementCount::getFixed(1);
6019
6020 if (VF.isVector() && isProfitableToScalarize(I, VF))
6021 return InstsToScalarize[VF][I];
6022
6023 // Forced scalars do not have any scalarization overhead.
6024 auto ForcedScalar = ForcedScalars.find(VF);
6025 if (VF.isVector() && ForcedScalar != ForcedScalars.end()) {
6026 auto InstSet = ForcedScalar->second;
6027 if (InstSet.count(I))
6029 VF.getKnownMinValue();
6030 }
6031
6032 Type *RetTy = I->getType();
6034 RetTy = IntegerType::get(RetTy->getContext(), MinBWs[I]);
6035 auto *SE = PSE.getSE();
6036
6037 Type *VectorTy;
6038 if (isScalarAfterVectorization(I, VF)) {
6039 [[maybe_unused]] auto HasSingleCopyAfterVectorization =
6040 [this](Instruction *I, ElementCount VF) -> bool {
6041 if (VF.isScalar())
6042 return true;
6043
6044 auto Scalarized = InstsToScalarize.find(VF);
6045 assert(Scalarized != InstsToScalarize.end() &&
6046 "VF not yet analyzed for scalarization profitability");
6047 return !Scalarized->second.count(I) &&
6048 llvm::all_of(I->users(), [&](User *U) {
6049 auto *UI = cast<Instruction>(U);
6050 return !Scalarized->second.count(UI);
6051 });
6052 };
6053
6054 // With the exception of GEPs and PHIs, after scalarization there should
6055 // only be one copy of the instruction generated in the loop. This is
6056 // because the VF is either 1, or any instructions that need scalarizing
6057 // have already been dealt with by the time we get here. As a result,
6058 // it means we don't have to multiply the instruction cost by VF.
6059 assert(I->getOpcode() == Instruction::GetElementPtr ||
6060 I->getOpcode() == Instruction::PHI ||
6061 (I->getOpcode() == Instruction::BitCast &&
6062 I->getType()->isPointerTy()) ||
6063 HasSingleCopyAfterVectorization(I, VF));
6064 VectorTy = RetTy;
6065 } else
6066 VectorTy = toVectorizedTy(RetTy, VF);
6067
6068 if (VF.isVector() && VectorTy->isVectorTy() &&
6069 !TTI.getNumberOfParts(VectorTy))
6071
6072 // TODO: We need to estimate the cost of intrinsic calls.
6073 switch (I->getOpcode()) {
6074 case Instruction::GetElementPtr:
6075 // We mark this instruction as zero-cost because the cost of GEPs in
6076 // vectorized code depends on whether the corresponding memory instruction
6077 // is scalarized or not. Therefore, we handle GEPs with the memory
6078 // instruction cost.
6079 return 0;
6080 case Instruction::Br: {
6081 // In cases of scalarized and predicated instructions, there will be VF
6082 // predicated blocks in the vectorized loop. Each branch around these
6083 // blocks requires also an extract of its vector compare i1 element.
6084 // Note that the conditional branch from the loop latch will be replaced by
6085 // a single branch controlling the loop, so there is no extra overhead from
6086 // scalarization.
6087 bool ScalarPredicatedBB = false;
6089 if (VF.isVector() && BI->isConditional() &&
6090 (PredicatedBBsAfterVectorization[VF].count(BI->getSuccessor(0)) ||
6091 PredicatedBBsAfterVectorization[VF].count(BI->getSuccessor(1))) &&
6092 BI->getParent() != TheLoop->getLoopLatch())
6093 ScalarPredicatedBB = true;
6094
6095 if (ScalarPredicatedBB) {
6096 // Not possible to scalarize scalable vector with predicated instructions.
6097 if (VF.isScalable())
6099 // Return cost for branches around scalarized and predicated blocks.
6100 auto *VecI1Ty =
6102 return (
6103 TTI.getScalarizationOverhead(
6104 VecI1Ty, APInt::getAllOnes(VF.getFixedValue()),
6105 /*Insert*/ false, /*Extract*/ true, CostKind) +
6106 (TTI.getCFInstrCost(Instruction::Br, CostKind) * VF.getFixedValue()));
6107 }
6108
6109 if (I->getParent() == TheLoop->getLoopLatch() || VF.isScalar())
6110 // The back-edge branch will remain, as will all scalar branches.
6111 return TTI.getCFInstrCost(Instruction::Br, CostKind);
6112
6113 // This branch will be eliminated by if-conversion.
6114 return 0;
6115 // Note: We currently assume zero cost for an unconditional branch inside
6116 // a predicated block since it will become a fall-through, although we
6117 // may decide in the future to call TTI for all branches.
6118 }
6119 case Instruction::Switch: {
6120 if (VF.isScalar())
6121 return TTI.getCFInstrCost(Instruction::Switch, CostKind);
6122 auto *Switch = cast<SwitchInst>(I);
6123 return Switch->getNumCases() *
6124 TTI.getCmpSelInstrCost(
6125 Instruction::ICmp,
6126 toVectorTy(Switch->getCondition()->getType(), VF),
6127 toVectorTy(Type::getInt1Ty(I->getContext()), VF),
6129 }
6130 case Instruction::PHI: {
6131 auto *Phi = cast<PHINode>(I);
6132
6133 // First-order recurrences are replaced by vector shuffles inside the loop.
6134 if (VF.isVector() && Legal->isFixedOrderRecurrence(Phi)) {
6136 std::iota(Mask.begin(), Mask.end(), VF.getKnownMinValue() - 1);
6137 return TTI.getShuffleCost(TargetTransformInfo::SK_Splice,
6138 cast<VectorType>(VectorTy),
6139 cast<VectorType>(VectorTy), Mask, CostKind,
6140 VF.getKnownMinValue() - 1);
6141 }
6142
6143 // Phi nodes in non-header blocks (not inductions, reductions, etc.) are
6144 // converted into select instructions. We require N - 1 selects per phi
6145 // node, where N is the number of incoming values.
6146 if (VF.isVector() && Phi->getParent() != TheLoop->getHeader()) {
6147 Type *ResultTy = Phi->getType();
6148
6149 // All instructions in an Any-of reduction chain are narrowed to bool.
6150 // Check if that is the case for this phi node.
6151 auto *HeaderUser = cast_if_present<PHINode>(
6152 find_singleton<User>(Phi->users(), [this](User *U, bool) -> User * {
6153 auto *Phi = dyn_cast<PHINode>(U);
6154 if (Phi && Phi->getParent() == TheLoop->getHeader())
6155 return Phi;
6156 return nullptr;
6157 }));
6158 if (HeaderUser) {
6159 auto &ReductionVars = Legal->getReductionVars();
6160 auto Iter = ReductionVars.find(HeaderUser);
6161 if (Iter != ReductionVars.end() &&
6163 Iter->second.getRecurrenceKind()))
6164 ResultTy = Type::getInt1Ty(Phi->getContext());
6165 }
6166 return (Phi->getNumIncomingValues() - 1) *
6167 TTI.getCmpSelInstrCost(
6168 Instruction::Select, toVectorTy(ResultTy, VF),
6169 toVectorTy(Type::getInt1Ty(Phi->getContext()), VF),
6171 }
6172
6173 // When tail folding with EVL, if the phi is part of an out of loop
6174 // reduction then it will be transformed into a wide vp_merge.
6175 if (VF.isVector() && foldTailWithEVL() &&
6176 Legal->getReductionVars().contains(Phi) && !isInLoopReduction(Phi)) {
6178 Intrinsic::vp_merge, toVectorTy(Phi->getType(), VF),
6179 {toVectorTy(Type::getInt1Ty(Phi->getContext()), VF)});
6180 return TTI.getIntrinsicInstrCost(ICA, CostKind);
6181 }
6182
6183 return TTI.getCFInstrCost(Instruction::PHI, CostKind);
6184 }
6185 case Instruction::UDiv:
6186 case Instruction::SDiv:
6187 case Instruction::URem:
6188 case Instruction::SRem:
6189 if (VF.isVector() && isPredicatedInst(I)) {
6190 const auto [ScalarCost, SafeDivisorCost] = getDivRemSpeculationCost(I, VF);
6191 return isDivRemScalarWithPredication(ScalarCost, SafeDivisorCost) ?
6192 ScalarCost : SafeDivisorCost;
6193 }
6194 // We've proven all lanes safe to speculate, fall through.
6195 [[fallthrough]];
6196 case Instruction::Add:
6197 case Instruction::Sub: {
6198 auto Info = Legal->getHistogramInfo(I);
6199 if (Info && VF.isVector()) {
6200 const HistogramInfo *HGram = Info.value();
6201 // Assume that a non-constant update value (or a constant != 1) requires
6202 // a multiply, and add that into the cost.
6204 ConstantInt *RHS = dyn_cast<ConstantInt>(I->getOperand(1));
6205 if (!RHS || RHS->getZExtValue() != 1)
6206 MulCost =
6207 TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy, CostKind);
6208
6209 // Find the cost of the histogram operation itself.
6210 Type *PtrTy = VectorType::get(HGram->Load->getPointerOperandType(), VF);
6211 Type *ScalarTy = I->getType();
6212 Type *MaskTy = VectorType::get(Type::getInt1Ty(I->getContext()), VF);
6213 IntrinsicCostAttributes ICA(Intrinsic::experimental_vector_histogram_add,
6214 Type::getVoidTy(I->getContext()),
6215 {PtrTy, ScalarTy, MaskTy});
6216
6217 // Add the costs together with the add/sub operation.
6218 return TTI.getIntrinsicInstrCost(ICA, CostKind) + MulCost +
6219 TTI.getArithmeticInstrCost(I->getOpcode(), VectorTy, CostKind);
6220 }
6221 [[fallthrough]];
6222 }
6223 case Instruction::FAdd:
6224 case Instruction::FSub:
6225 case Instruction::Mul:
6226 case Instruction::FMul:
6227 case Instruction::FDiv:
6228 case Instruction::FRem:
6229 case Instruction::Shl:
6230 case Instruction::LShr:
6231 case Instruction::AShr:
6232 case Instruction::And:
6233 case Instruction::Or:
6234 case Instruction::Xor: {
6235 // If we're speculating on the stride being 1, the multiplication may
6236 // fold away. We can generalize this for all operations using the notion
6237 // of neutral elements. (TODO)
6238 if (I->getOpcode() == Instruction::Mul &&
6239 ((TheLoop->isLoopInvariant(I->getOperand(0)) &&
6240 PSE.getSCEV(I->getOperand(0))->isOne()) ||
6241 (TheLoop->isLoopInvariant(I->getOperand(1)) &&
6242 PSE.getSCEV(I->getOperand(1))->isOne())))
6243 return 0;
6244
6245 // Detect reduction patterns
6246 if (auto RedCost = getReductionPatternCost(I, VF, VectorTy))
6247 return *RedCost;
6248
6249 // Certain instructions can be cheaper to vectorize if they have a constant
6250 // second vector operand. One example of this are shifts on x86.
6251 Value *Op2 = I->getOperand(1);
6252 if (!isa<Constant>(Op2) && TheLoop->isLoopInvariant(Op2) &&
6253 PSE.getSE()->isSCEVable(Op2->getType()) &&
6254 isa<SCEVConstant>(PSE.getSCEV(Op2))) {
6255 Op2 = cast<SCEVConstant>(PSE.getSCEV(Op2))->getValue();
6256 }
6257 auto Op2Info = TTI.getOperandInfo(Op2);
6258 if (Op2Info.Kind == TargetTransformInfo::OK_AnyValue &&
6261
6262 SmallVector<const Value *, 4> Operands(I->operand_values());
6263 return TTI.getArithmeticInstrCost(
6264 I->getOpcode(), VectorTy, CostKind,
6265 {TargetTransformInfo::OK_AnyValue, TargetTransformInfo::OP_None},
6266 Op2Info, Operands, I, TLI);
6267 }
6268 case Instruction::FNeg: {
6269 return TTI.getArithmeticInstrCost(
6270 I->getOpcode(), VectorTy, CostKind,
6271 {TargetTransformInfo::OK_AnyValue, TargetTransformInfo::OP_None},
6272 {TargetTransformInfo::OK_AnyValue, TargetTransformInfo::OP_None},
6273 I->getOperand(0), I);
6274 }
6275 case Instruction::Select: {
6277 const SCEV *CondSCEV = SE->getSCEV(SI->getCondition());
6278 bool ScalarCond = (SE->isLoopInvariant(CondSCEV, TheLoop));
6279
6280 const Value *Op0, *Op1;
6281 using namespace llvm::PatternMatch;
6282 if (!ScalarCond && (match(I, m_LogicalAnd(m_Value(Op0), m_Value(Op1))) ||
6283 match(I, m_LogicalOr(m_Value(Op0), m_Value(Op1))))) {
6284 // select x, y, false --> x & y
6285 // select x, true, y --> x | y
6286 const auto [Op1VK, Op1VP] = TTI::getOperandInfo(Op0);
6287 const auto [Op2VK, Op2VP] = TTI::getOperandInfo(Op1);
6288 assert(Op0->getType()->getScalarSizeInBits() == 1 &&
6289 Op1->getType()->getScalarSizeInBits() == 1);
6290
6291 return TTI.getArithmeticInstrCost(
6292 match(I, m_LogicalOr()) ? Instruction::Or : Instruction::And,
6293 VectorTy, CostKind, {Op1VK, Op1VP}, {Op2VK, Op2VP}, {Op0, Op1}, I);
6294 }
6295
6296 Type *CondTy = SI->getCondition()->getType();
6297 if (!ScalarCond)
6298 CondTy = VectorType::get(CondTy, VF);
6299
6301 if (auto *Cmp = dyn_cast<CmpInst>(SI->getCondition()))
6302 Pred = Cmp->getPredicate();
6303 return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, CondTy, Pred,
6304 CostKind, {TTI::OK_AnyValue, TTI::OP_None},
6305 {TTI::OK_AnyValue, TTI::OP_None}, I);
6306 }
6307 case Instruction::ICmp:
6308 case Instruction::FCmp: {
6309 Type *ValTy = I->getOperand(0)->getType();
6310
6312 [[maybe_unused]] Instruction *Op0AsInstruction =
6313 dyn_cast<Instruction>(I->getOperand(0));
6314 assert((!canTruncateToMinimalBitwidth(Op0AsInstruction, VF) ||
6315 MinBWs[I] == MinBWs[Op0AsInstruction]) &&
6316 "if both the operand and the compare are marked for "
6317 "truncation, they must have the same bitwidth");
6318 ValTy = IntegerType::get(ValTy->getContext(), MinBWs[I]);
6319 }
6320
6321 VectorTy = toVectorTy(ValTy, VF);
6322 return TTI.getCmpSelInstrCost(
6323 I->getOpcode(), VectorTy, CmpInst::makeCmpResultType(VectorTy),
6324 cast<CmpInst>(I)->getPredicate(), CostKind,
6325 {TTI::OK_AnyValue, TTI::OP_None}, {TTI::OK_AnyValue, TTI::OP_None}, I);
6326 }
6327 case Instruction::Store:
6328 case Instruction::Load: {
6329 ElementCount Width = VF;
6330 if (Width.isVector()) {
6331 InstWidening Decision = getWideningDecision(I, Width);
6332 assert(Decision != CM_Unknown &&
6333 "CM decision should be taken at this point");
6336 if (Decision == CM_Scalarize)
6337 Width = ElementCount::getFixed(1);
6338 }
6339 VectorTy = toVectorTy(getLoadStoreType(I), Width);
6340 return getMemoryInstructionCost(I, VF);
6341 }
6342 case Instruction::BitCast:
6343 if (I->getType()->isPointerTy())
6344 return 0;
6345 [[fallthrough]];
6346 case Instruction::ZExt:
6347 case Instruction::SExt:
6348 case Instruction::FPToUI:
6349 case Instruction::FPToSI:
6350 case Instruction::FPExt:
6351 case Instruction::PtrToInt:
6352 case Instruction::IntToPtr:
6353 case Instruction::SIToFP:
6354 case Instruction::UIToFP:
6355 case Instruction::Trunc:
6356 case Instruction::FPTrunc: {
6357 // Computes the CastContextHint from a Load/Store instruction.
6358 auto ComputeCCH = [&](Instruction *I) -> TTI::CastContextHint {
6360 "Expected a load or a store!");
6361
6362 if (VF.isScalar() || !TheLoop->contains(I))
6364
6365 switch (getWideningDecision(I, VF)) {
6377 llvm_unreachable("Instr did not go through cost modelling?");
6380 llvm_unreachable_internal("Instr has invalid widening decision");
6381 }
6382
6383 llvm_unreachable("Unhandled case!");
6384 };
6385
6386 unsigned Opcode = I->getOpcode();
6388 // For Trunc, the context is the only user, which must be a StoreInst.
6389 if (Opcode == Instruction::Trunc || Opcode == Instruction::FPTrunc) {
6390 if (I->hasOneUse())
6391 if (StoreInst *Store = dyn_cast<StoreInst>(*I->user_begin()))
6392 CCH = ComputeCCH(Store);
6393 }
6394 // For Z/Sext, the context is the operand, which must be a LoadInst.
6395 else if (Opcode == Instruction::ZExt || Opcode == Instruction::SExt ||
6396 Opcode == Instruction::FPExt) {
6397 if (LoadInst *Load = dyn_cast<LoadInst>(I->getOperand(0)))
6398 CCH = ComputeCCH(Load);
6399 }
6400
6401 // We optimize the truncation of induction variables having constant
6402 // integer steps. The cost of these truncations is the same as the scalar
6403 // operation.
6404 if (isOptimizableIVTruncate(I, VF)) {
6405 auto *Trunc = cast<TruncInst>(I);
6406 return TTI.getCastInstrCost(Instruction::Trunc, Trunc->getDestTy(),
6407 Trunc->getSrcTy(), CCH, CostKind, Trunc);
6408 }
6409
6410 // Detect reduction patterns
6411 if (auto RedCost = getReductionPatternCost(I, VF, VectorTy))
6412 return *RedCost;
6413
6414 Type *SrcScalarTy = I->getOperand(0)->getType();
6415 Instruction *Op0AsInstruction = dyn_cast<Instruction>(I->getOperand(0));
6416 if (canTruncateToMinimalBitwidth(Op0AsInstruction, VF))
6417 SrcScalarTy =
6418 IntegerType::get(SrcScalarTy->getContext(), MinBWs[Op0AsInstruction]);
6419 Type *SrcVecTy =
6420 VectorTy->isVectorTy() ? toVectorTy(SrcScalarTy, VF) : SrcScalarTy;
6421
6423 // If the result type is <= the source type, there will be no extend
6424 // after truncating the users to the minimal required bitwidth.
6425 if (VectorTy->getScalarSizeInBits() <= SrcVecTy->getScalarSizeInBits() &&
6426 (I->getOpcode() == Instruction::ZExt ||
6427 I->getOpcode() == Instruction::SExt))
6428 return 0;
6429 }
6430
6431 return TTI.getCastInstrCost(Opcode, VectorTy, SrcVecTy, CCH, CostKind, I);
6432 }
6433 case Instruction::Call:
6434 return getVectorCallCost(cast<CallInst>(I), VF);
6435 case Instruction::ExtractValue:
6436 return TTI.getInstructionCost(I, CostKind);
6437 case Instruction::Alloca:
6438 // We cannot easily widen alloca to a scalable alloca, as
6439 // the result would need to be a vector of pointers.
6440 if (VF.isScalable())
6442 [[fallthrough]];
6443 default:
6444 // This opcode is unknown. Assume that it is the same as 'mul'.
6445 return TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy, CostKind);
6446 } // end of switch.
6447}
6448
6450 // Ignore ephemeral values.
6452
6453 SmallVector<Value *, 4> DeadInterleavePointerOps;
6455
6456 // If a scalar epilogue is required, users outside the loop won't use
6457 // live-outs from the vector loop but from the scalar epilogue. Ignore them if
6458 // that is the case.
6459 bool RequiresScalarEpilogue = requiresScalarEpilogue(true);
6460 auto IsLiveOutDead = [this, RequiresScalarEpilogue](User *U) {
6461 return RequiresScalarEpilogue &&
6462 !TheLoop->contains(cast<Instruction>(U)->getParent());
6463 };
6464
6466 DFS.perform(LI);
6467 for (BasicBlock *BB : reverse(make_range(DFS.beginRPO(), DFS.endRPO())))
6468 for (Instruction &I : reverse(*BB)) {
6469 if (VecValuesToIgnore.contains(&I) || ValuesToIgnore.contains(&I))
6470 continue;
6471
6472 // Add instructions that would be trivially dead and are only used by
6473 // values already ignored to DeadOps to seed worklist.
6475 all_of(I.users(), [this, IsLiveOutDead](User *U) {
6476 return VecValuesToIgnore.contains(U) ||
6477 ValuesToIgnore.contains(U) || IsLiveOutDead(U);
6478 }))
6479 DeadOps.push_back(&I);
6480
6481 // For interleave groups, we only create a pointer for the start of the
6482 // interleave group. Queue up addresses of group members except the insert
6483 // position for further processing.
6484 if (isAccessInterleaved(&I)) {
6485 auto *Group = getInterleavedAccessGroup(&I);
6486 if (Group->getInsertPos() == &I)
6487 continue;
6488 Value *PointerOp = getLoadStorePointerOperand(&I);
6489 DeadInterleavePointerOps.push_back(PointerOp);
6490 }
6491
6492 // Queue branches for analysis. They are dead, if their successors only
6493 // contain dead instructions.
6494 if (auto *Br = dyn_cast<BranchInst>(&I)) {
6495 if (Br->isConditional())
6496 DeadOps.push_back(&I);
6497 }
6498 }
6499
6500 // Mark ops feeding interleave group members as free, if they are only used
6501 // by other dead computations.
6502 for (unsigned I = 0; I != DeadInterleavePointerOps.size(); ++I) {
6503 auto *Op = dyn_cast<Instruction>(DeadInterleavePointerOps[I]);
6504 if (!Op || !TheLoop->contains(Op) || any_of(Op->users(), [this](User *U) {
6505 Instruction *UI = cast<Instruction>(U);
6506 return !VecValuesToIgnore.contains(U) &&
6507 (!isAccessInterleaved(UI) ||
6508 getInterleavedAccessGroup(UI)->getInsertPos() == UI);
6509 }))
6510 continue;
6511 VecValuesToIgnore.insert(Op);
6512 append_range(DeadInterleavePointerOps, Op->operands());
6513 }
6514
6515 // Mark ops that would be trivially dead and are only used by ignored
6516 // instructions as free.
6517 BasicBlock *Header = TheLoop->getHeader();
6518
6519 // Returns true if the block contains only dead instructions. Such blocks will
6520 // be removed by VPlan-to-VPlan transforms and won't be considered by the
6521 // VPlan-based cost model, so skip them in the legacy cost-model as well.
6522 auto IsEmptyBlock = [this](BasicBlock *BB) {
6523 return all_of(*BB, [this](Instruction &I) {
6524 return ValuesToIgnore.contains(&I) || VecValuesToIgnore.contains(&I) ||
6525 (isa<BranchInst>(&I) && !cast<BranchInst>(&I)->isConditional());
6526 });
6527 };
6528 for (unsigned I = 0; I != DeadOps.size(); ++I) {
6529 auto *Op = dyn_cast<Instruction>(DeadOps[I]);
6530
6531 // Check if the branch should be considered dead.
6532 if (auto *Br = dyn_cast_or_null<BranchInst>(Op)) {
6533 BasicBlock *ThenBB = Br->getSuccessor(0);
6534 BasicBlock *ElseBB = Br->getSuccessor(1);
6535 // Don't considers branches leaving the loop for simplification.
6536 if (!TheLoop->contains(ThenBB) || !TheLoop->contains(ElseBB))
6537 continue;
6538 bool ThenEmpty = IsEmptyBlock(ThenBB);
6539 bool ElseEmpty = IsEmptyBlock(ElseBB);
6540 if ((ThenEmpty && ElseEmpty) ||
6541 (ThenEmpty && ThenBB->getSingleSuccessor() == ElseBB &&
6542 ElseBB->phis().empty()) ||
6543 (ElseEmpty && ElseBB->getSingleSuccessor() == ThenBB &&
6544 ThenBB->phis().empty())) {
6545 VecValuesToIgnore.insert(Br);
6546 DeadOps.push_back(Br->getCondition());
6547 }
6548 continue;
6549 }
6550
6551 // Skip any op that shouldn't be considered dead.
6552 if (!Op || !TheLoop->contains(Op) ||
6553 (isa<PHINode>(Op) && Op->getParent() == Header) ||
6555 any_of(Op->users(), [this, IsLiveOutDead](User *U) {
6556 return !VecValuesToIgnore.contains(U) &&
6557 !ValuesToIgnore.contains(U) && !IsLiveOutDead(U);
6558 }))
6559 continue;
6560
6561 // If all of Op's users are in ValuesToIgnore, add it to ValuesToIgnore
6562 // which applies for both scalar and vector versions. Otherwise it is only
6563 // dead in vector versions, so only add it to VecValuesToIgnore.
6564 if (all_of(Op->users(),
6565 [this](User *U) { return ValuesToIgnore.contains(U); }))
6566 ValuesToIgnore.insert(Op);
6567
6568 VecValuesToIgnore.insert(Op);
6569 append_range(DeadOps, Op->operands());
6570 }
6571
6572 // Ignore type-promoting instructions we identified during reduction
6573 // detection.
6574 for (const auto &Reduction : Legal->getReductionVars()) {
6575 const RecurrenceDescriptor &RedDes = Reduction.second;
6576 const SmallPtrSetImpl<Instruction *> &Casts = RedDes.getCastInsts();
6577 VecValuesToIgnore.insert_range(Casts);
6578 }
6579 // Ignore type-casting instructions we identified during induction
6580 // detection.
6581 for (const auto &Induction : Legal->getInductionVars()) {
6582 const InductionDescriptor &IndDes = Induction.second;
6583 VecValuesToIgnore.insert_range(IndDes.getCastInsts());
6584 }
6585}
6586
6588 // Avoid duplicating work finding in-loop reductions.
6589 if (!InLoopReductions.empty())
6590 return;
6591
6592 for (const auto &Reduction : Legal->getReductionVars()) {
6593 PHINode *Phi = Reduction.first;
6594 const RecurrenceDescriptor &RdxDesc = Reduction.second;
6595
6596 // Multi-use reductions (e.g., used in FindLastIV patterns) are handled
6597 // separately and should not be considered for in-loop reductions.
6598 if (RdxDesc.hasUsesOutsideReductionChain())
6599 continue;
6600
6601 // We don't collect reductions that are type promoted (yet).
6602 if (RdxDesc.getRecurrenceType() != Phi->getType())
6603 continue;
6604
6605 // In-loop AnyOf and FindIV reductions are not yet supported.
6606 RecurKind Kind = RdxDesc.getRecurrenceKind();
6609 continue;
6610
6611 // If the target would prefer this reduction to happen "in-loop", then we
6612 // want to record it as such.
6613 if (!PreferInLoopReductions && !useOrderedReductions(RdxDesc) &&
6614 !TTI.preferInLoopReduction(Kind, Phi->getType()))
6615 continue;
6616
6617 // Check that we can correctly put the reductions into the loop, by
6618 // finding the chain of operations that leads from the phi to the loop
6619 // exit value.
6620 SmallVector<Instruction *, 4> ReductionOperations =
6621 RdxDesc.getReductionOpChain(Phi, TheLoop);
6622 bool InLoop = !ReductionOperations.empty();
6623
6624 if (InLoop) {
6625 InLoopReductions.insert(Phi);
6626 // Add the elements to InLoopReductionImmediateChains for cost modelling.
6627 Instruction *LastChain = Phi;
6628 for (auto *I : ReductionOperations) {
6629 InLoopReductionImmediateChains[I] = LastChain;
6630 LastChain = I;
6631 }
6632 }
6633 LLVM_DEBUG(dbgs() << "LV: Using " << (InLoop ? "inloop" : "out of loop")
6634 << " reduction for phi: " << *Phi << "\n");
6635 }
6636}
6637
6638// This function will select a scalable VF if the target supports scalable
6639// vectors and a fixed one otherwise.
6640// TODO: we could return a pair of values that specify the max VF and
6641// min VF, to be used in `buildVPlans(MinVF, MaxVF)` instead of
6642// `buildVPlans(VF, VF)`. We cannot do it because VPLAN at the moment
6643// doesn't have a cost model that can choose which plan to execute if
6644// more than one is generated.
6647 unsigned WidestType;
6648 std::tie(std::ignore, WidestType) = CM.getSmallestAndWidestTypes();
6649
6651 TTI.enableScalableVectorization()
6654
6655 TypeSize RegSize = TTI.getRegisterBitWidth(RegKind);
6656 unsigned N = RegSize.getKnownMinValue() / WidestType;
6657 return ElementCount::get(N, RegSize.isScalable());
6658}
6659
6662 ElementCount VF = UserVF;
6663 // Outer loop handling: They may require CFG and instruction level
6664 // transformations before even evaluating whether vectorization is profitable.
6665 // Since we cannot modify the incoming IR, we need to build VPlan upfront in
6666 // the vectorization pipeline.
6667 if (!OrigLoop->isInnermost()) {
6668 // If the user doesn't provide a vectorization factor, determine a
6669 // reasonable one.
6670 if (UserVF.isZero()) {
6671 VF = determineVPlanVF(TTI, CM);
6672 LLVM_DEBUG(dbgs() << "LV: VPlan computed VF " << VF << ".\n");
6673
6674 // Make sure we have a VF > 1 for stress testing.
6675 if (VPlanBuildStressTest && (VF.isScalar() || VF.isZero())) {
6676 LLVM_DEBUG(dbgs() << "LV: VPlan stress testing: "
6677 << "overriding computed VF.\n");
6678 VF = ElementCount::getFixed(4);
6679 }
6680 } else if (UserVF.isScalable() && !TTI.supportsScalableVectors() &&
6682 LLVM_DEBUG(dbgs() << "LV: Not vectorizing. Scalable VF requested, but "
6683 << "not supported by the target.\n");
6685 "Scalable vectorization requested but not supported by the target",
6686 "the scalable user-specified vectorization width for outer-loop "
6687 "vectorization cannot be used because the target does not support "
6688 "scalable vectors.",
6689 "ScalableVFUnfeasible", ORE, OrigLoop);
6691 }
6692 assert(EnableVPlanNativePath && "VPlan-native path is not enabled.");
6694 "VF needs to be a power of two");
6695 LLVM_DEBUG(dbgs() << "LV: Using " << (!UserVF.isZero() ? "user " : "")
6696 << "VF " << VF << " to build VPlans.\n");
6697 buildVPlans(VF, VF);
6698
6699 if (VPlans.empty())
6701
6702 // For VPlan build stress testing, we bail out after VPlan construction.
6705
6706 return {VF, 0 /*Cost*/, 0 /* ScalarCost */};
6707 }
6708
6709 LLVM_DEBUG(
6710 dbgs() << "LV: Not vectorizing. Inner loops aren't supported in the "
6711 "VPlan-native path.\n");
6713}
6714
6715void LoopVectorizationPlanner::plan(ElementCount UserVF, unsigned UserIC) {
6716 assert(OrigLoop->isInnermost() && "Inner loop expected.");
6717 CM.collectValuesToIgnore();
6718 CM.collectElementTypesForWidening();
6719
6720 FixedScalableVFPair MaxFactors = CM.computeMaxVF(UserVF, UserIC);
6721 if (!MaxFactors) // Cases that should not to be vectorized nor interleaved.
6722 return;
6723
6724 // Invalidate interleave groups if all blocks of loop will be predicated.
6725 if (CM.blockNeedsPredicationForAnyReason(OrigLoop->getHeader()) &&
6727 LLVM_DEBUG(
6728 dbgs()
6729 << "LV: Invalidate all interleaved groups due to fold-tail by masking "
6730 "which requires masked-interleaved support.\n");
6731 if (CM.InterleaveInfo.invalidateGroups())
6732 // Invalidating interleave groups also requires invalidating all decisions
6733 // based on them, which includes widening decisions and uniform and scalar
6734 // values.
6735 CM.invalidateCostModelingDecisions();
6736 }
6737
6738 if (CM.foldTailByMasking())
6739 Legal->prepareToFoldTailByMasking();
6740
6741 ElementCount MaxUserVF =
6742 UserVF.isScalable() ? MaxFactors.ScalableVF : MaxFactors.FixedVF;
6743 if (UserVF) {
6744 if (!ElementCount::isKnownLE(UserVF, MaxUserVF)) {
6746 "UserVF ignored because it may be larger than the maximal safe VF",
6747 "InvalidUserVF", ORE, OrigLoop);
6748 } else {
6750 "VF needs to be a power of two");
6751 // Collect the instructions (and their associated costs) that will be more
6752 // profitable to scalarize.
6753 CM.collectInLoopReductions();
6754 if (CM.selectUserVectorizationFactor(UserVF)) {
6755 LLVM_DEBUG(dbgs() << "LV: Using user VF " << UserVF << ".\n");
6756 buildVPlansWithVPRecipes(UserVF, UserVF);
6758 return;
6759 }
6760 reportVectorizationInfo("UserVF ignored because of invalid costs.",
6761 "InvalidCost", ORE, OrigLoop);
6762 }
6763 }
6764
6765 // Collect the Vectorization Factor Candidates.
6766 SmallVector<ElementCount> VFCandidates;
6767 for (auto VF = ElementCount::getFixed(1);
6768 ElementCount::isKnownLE(VF, MaxFactors.FixedVF); VF *= 2)
6769 VFCandidates.push_back(VF);
6770 for (auto VF = ElementCount::getScalable(1);
6771 ElementCount::isKnownLE(VF, MaxFactors.ScalableVF); VF *= 2)
6772 VFCandidates.push_back(VF);
6773
6774 CM.collectInLoopReductions();
6775 for (const auto &VF : VFCandidates) {
6776 // Collect Uniform and Scalar instructions after vectorization with VF.
6777 CM.collectNonVectorizedAndSetWideningDecisions(VF);
6778 }
6779
6780 buildVPlansWithVPRecipes(ElementCount::getFixed(1), MaxFactors.FixedVF);
6781 buildVPlansWithVPRecipes(ElementCount::getScalable(1), MaxFactors.ScalableVF);
6782
6784}
6785
6787 ElementCount VF) const {
6788 InstructionCost Cost = CM.getInstructionCost(UI, VF);
6789 if (Cost.isValid() && ForceTargetInstructionCost.getNumOccurrences())
6791 return Cost;
6792}
6793
6795 ElementCount VF) const {
6796 return CM.isUniformAfterVectorization(I, VF);
6797}
6798
6799bool VPCostContext::skipCostComputation(Instruction *UI, bool IsVector) const {
6800 return CM.ValuesToIgnore.contains(UI) ||
6801 (IsVector && CM.VecValuesToIgnore.contains(UI)) ||
6802 SkipCostComputation.contains(UI);
6803}
6804
6806 return CM.getPredBlockCostDivisor(CostKind, BB);
6807}
6808
6810LoopVectorizationPlanner::precomputeCosts(VPlan &Plan, ElementCount VF,
6811 VPCostContext &CostCtx) const {
6813 // Cost modeling for inductions is inaccurate in the legacy cost model
6814 // compared to the recipes that are generated. To match here initially during
6815 // VPlan cost model bring up directly use the induction costs from the legacy
6816 // cost model. Note that we do this as pre-processing; the VPlan may not have
6817 // any recipes associated with the original induction increment instruction
6818 // and may replace truncates with VPWidenIntOrFpInductionRecipe. We precompute
6819 // the cost of induction phis and increments (both that are represented by
6820 // recipes and those that are not), to avoid distinguishing between them here,
6821 // and skip all recipes that represent induction phis and increments (the
6822 // former case) later on, if they exist, to avoid counting them twice.
6823 // Similarly we pre-compute the cost of any optimized truncates.
6824 // TODO: Switch to more accurate costing based on VPlan.
6825 for (const auto &[IV, IndDesc] : Legal->getInductionVars()) {
6827 IV->getIncomingValueForBlock(OrigLoop->getLoopLatch()));
6828 SmallVector<Instruction *> IVInsts = {IVInc};
6829 for (unsigned I = 0; I != IVInsts.size(); I++) {
6830 for (Value *Op : IVInsts[I]->operands()) {
6831 auto *OpI = dyn_cast<Instruction>(Op);
6832 if (Op == IV || !OpI || !OrigLoop->contains(OpI) || !Op->hasOneUse())
6833 continue;
6834 IVInsts.push_back(OpI);
6835 }
6836 }
6837 IVInsts.push_back(IV);
6838 for (User *U : IV->users()) {
6839 auto *CI = cast<Instruction>(U);
6840 if (!CostCtx.CM.isOptimizableIVTruncate(CI, VF))
6841 continue;
6842 IVInsts.push_back(CI);
6843 }
6844
6845 // If the vector loop gets executed exactly once with the given VF, ignore
6846 // the costs of comparison and induction instructions, as they'll get
6847 // simplified away.
6848 // TODO: Remove this code after stepping away from the legacy cost model and
6849 // adding code to simplify VPlans before calculating their costs.
6850 auto TC = getSmallConstantTripCount(PSE.getSE(), OrigLoop);
6851 if (TC == VF && !CM.foldTailByMasking())
6852 addFullyUnrolledInstructionsToIgnore(OrigLoop, Legal->getInductionVars(),
6853 CostCtx.SkipCostComputation);
6854
6855 for (Instruction *IVInst : IVInsts) {
6856 if (CostCtx.skipCostComputation(IVInst, VF.isVector()))
6857 continue;
6858 InstructionCost InductionCost = CostCtx.getLegacyCost(IVInst, VF);
6859 LLVM_DEBUG({
6860 dbgs() << "Cost of " << InductionCost << " for VF " << VF
6861 << ": induction instruction " << *IVInst << "\n";
6862 });
6863 Cost += InductionCost;
6864 CostCtx.SkipCostComputation.insert(IVInst);
6865 }
6866 }
6867
6868 /// Compute the cost of all exiting conditions of the loop using the legacy
6869 /// cost model. This is to match the legacy behavior, which adds the cost of
6870 /// all exit conditions. Note that this over-estimates the cost, as there will
6871 /// be a single condition to control the vector loop.
6873 CM.TheLoop->getExitingBlocks(Exiting);
6874 SetVector<Instruction *> ExitInstrs;
6875 // Collect all exit conditions.
6876 for (BasicBlock *EB : Exiting) {
6877 auto *Term = dyn_cast<BranchInst>(EB->getTerminator());
6878 if (!Term || CostCtx.skipCostComputation(Term, VF.isVector()))
6879 continue;
6880 if (auto *CondI = dyn_cast<Instruction>(Term->getOperand(0))) {
6881 ExitInstrs.insert(CondI);
6882 }
6883 }
6884 // Compute the cost of all instructions only feeding the exit conditions.
6885 for (unsigned I = 0; I != ExitInstrs.size(); ++I) {
6886 Instruction *CondI = ExitInstrs[I];
6887 if (!OrigLoop->contains(CondI) ||
6888 !CostCtx.SkipCostComputation.insert(CondI).second)
6889 continue;
6890 InstructionCost CondICost = CostCtx.getLegacyCost(CondI, VF);
6891 LLVM_DEBUG({
6892 dbgs() << "Cost of " << CondICost << " for VF " << VF
6893 << ": exit condition instruction " << *CondI << "\n";
6894 });
6895 Cost += CondICost;
6896 for (Value *Op : CondI->operands()) {
6897 auto *OpI = dyn_cast<Instruction>(Op);
6898 if (!OpI || CostCtx.skipCostComputation(OpI, VF.isVector()) ||
6899 any_of(OpI->users(), [&ExitInstrs, this](User *U) {
6900 return OrigLoop->contains(cast<Instruction>(U)->getParent()) &&
6901 !ExitInstrs.contains(cast<Instruction>(U));
6902 }))
6903 continue;
6904 ExitInstrs.insert(OpI);
6905 }
6906 }
6907
6908 // Pre-compute the costs for branches except for the backedge, as the number
6909 // of replicate regions in a VPlan may not directly match the number of
6910 // branches, which would lead to different decisions.
6911 // TODO: Compute cost of branches for each replicate region in the VPlan,
6912 // which is more accurate than the legacy cost model.
6913 for (BasicBlock *BB : OrigLoop->blocks()) {
6914 if (CostCtx.skipCostComputation(BB->getTerminator(), VF.isVector()))
6915 continue;
6916 CostCtx.SkipCostComputation.insert(BB->getTerminator());
6917 if (BB == OrigLoop->getLoopLatch())
6918 continue;
6919 auto BranchCost = CostCtx.getLegacyCost(BB->getTerminator(), VF);
6920 Cost += BranchCost;
6921 }
6922
6923 // Pre-compute costs for instructions that are forced-scalar or profitable to
6924 // scalarize. Their costs will be computed separately in the legacy cost
6925 // model.
6926 for (Instruction *ForcedScalar : CM.ForcedScalars[VF]) {
6927 if (CostCtx.skipCostComputation(ForcedScalar, VF.isVector()))
6928 continue;
6929 CostCtx.SkipCostComputation.insert(ForcedScalar);
6930 InstructionCost ForcedCost = CostCtx.getLegacyCost(ForcedScalar, VF);
6931 LLVM_DEBUG({
6932 dbgs() << "Cost of " << ForcedCost << " for VF " << VF
6933 << ": forced scalar " << *ForcedScalar << "\n";
6934 });
6935 Cost += ForcedCost;
6936 }
6937 for (const auto &[Scalarized, ScalarCost] : CM.InstsToScalarize[VF]) {
6938 if (CostCtx.skipCostComputation(Scalarized, VF.isVector()))
6939 continue;
6940 CostCtx.SkipCostComputation.insert(Scalarized);
6941 LLVM_DEBUG({
6942 dbgs() << "Cost of " << ScalarCost << " for VF " << VF
6943 << ": profitable to scalarize " << *Scalarized << "\n";
6944 });
6945 Cost += ScalarCost;
6946 }
6947
6948 return Cost;
6949}
6950
6951InstructionCost LoopVectorizationPlanner::cost(VPlan &Plan,
6952 ElementCount VF) const {
6953 VPCostContext CostCtx(CM.TTI, *CM.TLI, Plan, CM, CM.CostKind, *PSE.getSE(),
6954 OrigLoop);
6955 InstructionCost Cost = precomputeCosts(Plan, VF, CostCtx);
6956
6957 // Now compute and add the VPlan-based cost.
6958 Cost += Plan.cost(VF, CostCtx);
6959#ifndef NDEBUG
6960 unsigned EstimatedWidth = estimateElementCount(VF, CM.getVScaleForTuning());
6961 LLVM_DEBUG(dbgs() << "Cost for VF " << VF << ": " << Cost
6962 << " (Estimated cost per lane: ");
6963 if (Cost.isValid()) {
6964 double CostPerLane = double(Cost.getValue()) / EstimatedWidth;
6965 LLVM_DEBUG(dbgs() << format("%.1f", CostPerLane));
6966 } else /* No point dividing an invalid cost - it will still be invalid */
6967 LLVM_DEBUG(dbgs() << "Invalid");
6968 LLVM_DEBUG(dbgs() << ")\n");
6969#endif
6970 return Cost;
6971}
6972
6973#ifndef NDEBUG
6974/// Return true if the original loop \ TheLoop contains any instructions that do
6975/// not have corresponding recipes in \p Plan and are not marked to be ignored
6976/// in \p CostCtx. This means the VPlan contains simplification that the legacy
6977/// cost-model did not account for.
6979 VPCostContext &CostCtx,
6980 Loop *TheLoop,
6981 ElementCount VF) {
6982 // First collect all instructions for the recipes in Plan.
6983 auto GetInstructionForCost = [](const VPRecipeBase *R) -> Instruction * {
6984 if (auto *S = dyn_cast<VPSingleDefRecipe>(R))
6985 return dyn_cast_or_null<Instruction>(S->getUnderlyingValue());
6986 if (auto *WidenMem = dyn_cast<VPWidenMemoryRecipe>(R))
6987 return &WidenMem->getIngredient();
6988 return nullptr;
6989 };
6990
6991 // Check if a select for a safe divisor was hoisted to the pre-header. If so,
6992 // the select doesn't need to be considered for the vector loop cost; go with
6993 // the more accurate VPlan-based cost model.
6994 for (VPRecipeBase &R : *Plan.getVectorPreheader()) {
6995 auto *VPI = dyn_cast<VPInstruction>(&R);
6996 if (!VPI || VPI->getOpcode() != Instruction::Select)
6997 continue;
6998
6999 if (auto *WR = dyn_cast_or_null<VPWidenRecipe>(VPI->getSingleUser())) {
7000 switch (WR->getOpcode()) {
7001 case Instruction::UDiv:
7002 case Instruction::SDiv:
7003 case Instruction::URem:
7004 case Instruction::SRem:
7005 return true;
7006 default:
7007 break;
7008 }
7009 }
7010 }
7011
7012 DenseSet<Instruction *> SeenInstrs;
7013 auto Iter = vp_depth_first_deep(Plan.getVectorLoopRegion()->getEntry());
7015 for (VPRecipeBase &R : *VPBB) {
7016 if (auto *IR = dyn_cast<VPInterleaveRecipe>(&R)) {
7017 auto *IG = IR->getInterleaveGroup();
7018 unsigned NumMembers = IG->getNumMembers();
7019 for (unsigned I = 0; I != NumMembers; ++I) {
7020 if (Instruction *M = IG->getMember(I))
7021 SeenInstrs.insert(M);
7022 }
7023 continue;
7024 }
7025 // Unused FOR splices are removed by VPlan transforms, so the VPlan-based
7026 // cost model won't cost it whilst the legacy will.
7027 if (auto *FOR = dyn_cast<VPFirstOrderRecurrencePHIRecipe>(&R)) {
7028 using namespace VPlanPatternMatch;
7029 if (none_of(FOR->users(),
7030 match_fn(m_VPInstruction<
7032 return true;
7033 }
7034 // The VPlan-based cost model is more accurate for partial reductions and
7035 // comparing against the legacy cost isn't desirable.
7036 if (auto *VPR = dyn_cast<VPReductionRecipe>(&R))
7037 if (VPR->isPartialReduction())
7038 return true;
7039
7040 // The VPlan-based cost model can analyze if recipes are scalar
7041 // recursively, but the legacy cost model cannot.
7042 if (auto *WidenMemR = dyn_cast<VPWidenMemoryRecipe>(&R)) {
7043 auto *AddrI = dyn_cast<Instruction>(
7044 getLoadStorePointerOperand(&WidenMemR->getIngredient()));
7045 if (AddrI && vputils::isSingleScalar(WidenMemR->getAddr()) !=
7046 CostCtx.isLegacyUniformAfterVectorization(AddrI, VF))
7047 return true;
7048 }
7049
7050 /// If a VPlan transform folded a recipe to one producing a single-scalar,
7051 /// but the original instruction wasn't uniform-after-vectorization in the
7052 /// legacy cost model, the legacy cost overestimates the actual cost.
7053 if (auto *RepR = dyn_cast<VPReplicateRecipe>(&R)) {
7054 if (RepR->isSingleScalar() &&
7056 RepR->getUnderlyingInstr(), VF))
7057 return true;
7058 }
7059 if (Instruction *UI = GetInstructionForCost(&R)) {
7060 // If we adjusted the predicate of the recipe, the cost in the legacy
7061 // cost model may be different.
7062 using namespace VPlanPatternMatch;
7063 CmpPredicate Pred;
7064 if (match(&R, m_Cmp(Pred, m_VPValue(), m_VPValue())) &&
7065 cast<VPRecipeWithIRFlags>(R).getPredicate() !=
7066 cast<CmpInst>(UI)->getPredicate())
7067 return true;
7068 SeenInstrs.insert(UI);
7069 }
7070 }
7071 }
7072
7073 // Return true if the loop contains any instructions that are not also part of
7074 // the VPlan or are skipped for VPlan-based cost computations. This indicates
7075 // that the VPlan contains extra simplifications.
7076 return any_of(TheLoop->blocks(), [&SeenInstrs, &CostCtx,
7077 TheLoop](BasicBlock *BB) {
7078 return any_of(*BB, [&SeenInstrs, &CostCtx, TheLoop, BB](Instruction &I) {
7079 // Skip induction phis when checking for simplifications, as they may not
7080 // be lowered directly be lowered to a corresponding PHI recipe.
7081 if (isa<PHINode>(&I) && BB == TheLoop->getHeader() &&
7082 CostCtx.CM.Legal->isInductionPhi(cast<PHINode>(&I)))
7083 return false;
7084 return !SeenInstrs.contains(&I) && !CostCtx.skipCostComputation(&I, true);
7085 });
7086 });
7087}
7088#endif
7089
7091 if (VPlans.empty())
7093 // If there is a single VPlan with a single VF, return it directly.
7094 VPlan &FirstPlan = *VPlans[0];
7095 if (VPlans.size() == 1 && size(FirstPlan.vectorFactors()) == 1)
7096 return {*FirstPlan.vectorFactors().begin(), 0, 0};
7097
7098 LLVM_DEBUG(dbgs() << "LV: Computing best VF using cost kind: "
7099 << (CM.CostKind == TTI::TCK_RecipThroughput
7100 ? "Reciprocal Throughput\n"
7101 : CM.CostKind == TTI::TCK_Latency
7102 ? "Instruction Latency\n"
7103 : CM.CostKind == TTI::TCK_CodeSize ? "Code Size\n"
7104 : CM.CostKind == TTI::TCK_SizeAndLatency
7105 ? "Code Size and Latency\n"
7106 : "Unknown\n"));
7107
7109 assert(hasPlanWithVF(ScalarVF) &&
7110 "More than a single plan/VF w/o any plan having scalar VF");
7111
7112 // TODO: Compute scalar cost using VPlan-based cost model.
7113 InstructionCost ScalarCost = CM.expectedCost(ScalarVF);
7114 LLVM_DEBUG(dbgs() << "LV: Scalar loop costs: " << ScalarCost << ".\n");
7115 VectorizationFactor ScalarFactor(ScalarVF, ScalarCost, ScalarCost);
7116 VectorizationFactor BestFactor = ScalarFactor;
7117
7118 bool ForceVectorization = Hints.getForce() == LoopVectorizeHints::FK_Enabled;
7119 if (ForceVectorization) {
7120 // Ignore scalar width, because the user explicitly wants vectorization.
7121 // Initialize cost to max so that VF = 2 is, at least, chosen during cost
7122 // evaluation.
7123 BestFactor.Cost = InstructionCost::getMax();
7124 }
7125
7126 for (auto &P : VPlans) {
7127 ArrayRef<ElementCount> VFs(P->vectorFactors().begin(),
7128 P->vectorFactors().end());
7129
7131 if (any_of(VFs, [this](ElementCount VF) {
7132 return CM.shouldConsiderRegPressureForVF(VF);
7133 }))
7134 RUs = calculateRegisterUsageForPlan(*P, VFs, TTI, CM.ValuesToIgnore);
7135
7136 for (unsigned I = 0; I < VFs.size(); I++) {
7137 ElementCount VF = VFs[I];
7138 if (VF.isScalar())
7139 continue;
7140 if (!ForceVectorization && !willGenerateVectors(*P, VF, TTI)) {
7141 LLVM_DEBUG(
7142 dbgs()
7143 << "LV: Not considering vector loop of width " << VF
7144 << " because it will not generate any vector instructions.\n");
7145 continue;
7146 }
7147 if (CM.OptForSize && !ForceVectorization && hasReplicatorRegion(*P)) {
7148 LLVM_DEBUG(
7149 dbgs()
7150 << "LV: Not considering vector loop of width " << VF
7151 << " because it would cause replicated blocks to be generated,"
7152 << " which isn't allowed when optimizing for size.\n");
7153 continue;
7154 }
7155
7156 InstructionCost Cost = cost(*P, VF);
7157 VectorizationFactor CurrentFactor(VF, Cost, ScalarCost);
7158
7159 if (CM.shouldConsiderRegPressureForVF(VF) &&
7160 RUs[I].exceedsMaxNumRegs(TTI, ForceTargetNumVectorRegs)) {
7161 LLVM_DEBUG(dbgs() << "LV(REG): Not considering vector loop of width "
7162 << VF << " because it uses too many registers\n");
7163 continue;
7164 }
7165
7166 if (isMoreProfitable(CurrentFactor, BestFactor, P->hasScalarTail()))
7167 BestFactor = CurrentFactor;
7168
7169 // If profitable add it to ProfitableVF list.
7170 if (isMoreProfitable(CurrentFactor, ScalarFactor, P->hasScalarTail()))
7171 ProfitableVFs.push_back(CurrentFactor);
7172 }
7173 }
7174
7175#ifndef NDEBUG
7176 // Select the optimal vectorization factor according to the legacy cost-model.
7177 // This is now only used to verify the decisions by the new VPlan-based
7178 // cost-model and will be retired once the VPlan-based cost-model is
7179 // stabilized.
7180 VectorizationFactor LegacyVF = selectVectorizationFactor();
7181 VPlan &BestPlan = getPlanFor(BestFactor.Width);
7182
7183 // Pre-compute the cost and use it to check if BestPlan contains any
7184 // simplifications not accounted for in the legacy cost model. If that's the
7185 // case, don't trigger the assertion, as the extra simplifications may cause a
7186 // different VF to be picked by the VPlan-based cost model.
7187 VPCostContext CostCtx(CM.TTI, *CM.TLI, BestPlan, CM, CM.CostKind,
7188 *CM.PSE.getSE(), OrigLoop);
7189 precomputeCosts(BestPlan, BestFactor.Width, CostCtx);
7190 // Verify that the VPlan-based and legacy cost models agree, except for
7191 // * VPlans with early exits,
7192 // * VPlans with additional VPlan simplifications,
7193 // * EVL-based VPlans with gather/scatters (the VPlan-based cost model uses
7194 // vp_scatter/vp_gather).
7195 // The legacy cost model doesn't properly model costs for such loops.
7196 bool UsesEVLGatherScatter =
7198 BestPlan.getVectorLoopRegion()->getEntry())),
7199 [](VPBasicBlock *VPBB) {
7200 return any_of(*VPBB, [](VPRecipeBase &R) {
7201 return isa<VPWidenLoadEVLRecipe, VPWidenStoreEVLRecipe>(&R) &&
7202 !cast<VPWidenMemoryRecipe>(&R)->isConsecutive();
7203 });
7204 });
7205 assert(
7206 (BestFactor.Width == LegacyVF.Width || BestPlan.hasEarlyExit() ||
7207 !Legal->getLAI()->getSymbolicStrides().empty() || UsesEVLGatherScatter ||
7209 getPlanFor(BestFactor.Width), CostCtx, OrigLoop, BestFactor.Width) ||
7211 getPlanFor(LegacyVF.Width), CostCtx, OrigLoop, LegacyVF.Width)) &&
7212 " VPlan cost model and legacy cost model disagreed");
7213 assert((BestFactor.Width.isScalar() || BestFactor.ScalarCost > 0) &&
7214 "when vectorizing, the scalar cost must be computed.");
7215#endif
7216
7217 LLVM_DEBUG(dbgs() << "LV: Selecting VF: " << BestFactor.Width << ".\n");
7218 return BestFactor;
7219}
7220
7222 using namespace VPlanPatternMatch;
7224 "RdxResult must be ComputeFindIVResult");
7225 VPValue *StartVPV = RdxResult->getOperand(1);
7226 match(StartVPV, m_Freeze(m_VPValue(StartVPV)));
7227 return StartVPV->getLiveInIRValue();
7228}
7229
7230// If \p EpiResumePhiR is resume VPPhi for a reduction when vectorizing the
7231// epilog loop, fix the reduction's scalar PHI node by adding the incoming value
7232// from the main vector loop.
7234 VPPhi *EpiResumePhiR, PHINode &EpiResumePhi, BasicBlock *BypassBlock) {
7235 // Get the VPInstruction computing the reduction result in the middle block.
7236 // The first operand may not be from the middle block if it is not connected
7237 // to the scalar preheader. In that case, there's nothing to fix.
7238 VPValue *Incoming = EpiResumePhiR->getOperand(0);
7241 auto *EpiRedResult = dyn_cast<VPInstruction>(Incoming);
7242 if (!EpiRedResult ||
7243 (EpiRedResult->getOpcode() != VPInstruction::ComputeAnyOfResult &&
7244 EpiRedResult->getOpcode() != VPInstruction::ComputeReductionResult &&
7245 EpiRedResult->getOpcode() != VPInstruction::ComputeFindIVResult))
7246 return;
7247
7248 auto *EpiRedHeaderPhi =
7249 cast<VPReductionPHIRecipe>(EpiRedResult->getOperand(0));
7250 RecurKind Kind = EpiRedHeaderPhi->getRecurrenceKind();
7251 Value *MainResumeValue;
7252 if (auto *VPI = dyn_cast<VPInstruction>(EpiRedHeaderPhi->getStartValue())) {
7253 assert((VPI->getOpcode() == VPInstruction::Broadcast ||
7254 VPI->getOpcode() == VPInstruction::ReductionStartVector) &&
7255 "unexpected start recipe");
7256 MainResumeValue = VPI->getOperand(0)->getUnderlyingValue();
7257 } else
7258 MainResumeValue = EpiRedHeaderPhi->getStartValue()->getUnderlyingValue();
7260 [[maybe_unused]] Value *StartV =
7261 EpiRedResult->getOperand(1)->getLiveInIRValue();
7262 auto *Cmp = cast<ICmpInst>(MainResumeValue);
7263 assert(Cmp->getPredicate() == CmpInst::ICMP_NE &&
7264 "AnyOf expected to start with ICMP_NE");
7265 assert(Cmp->getOperand(1) == StartV &&
7266 "AnyOf expected to start by comparing main resume value to original "
7267 "start value");
7268 MainResumeValue = Cmp->getOperand(0);
7270 Value *StartV = getStartValueFromReductionResult(EpiRedResult);
7271 Value *SentinelV = EpiRedResult->getOperand(2)->getLiveInIRValue();
7272 using namespace llvm::PatternMatch;
7273 Value *Cmp, *OrigResumeV, *CmpOp;
7274 [[maybe_unused]] bool IsExpectedPattern =
7275 match(MainResumeValue,
7276 m_Select(m_OneUse(m_Value(Cmp)), m_Specific(SentinelV),
7277 m_Value(OrigResumeV))) &&
7279 m_Value(CmpOp))) &&
7280 ((CmpOp == StartV && isGuaranteedNotToBeUndefOrPoison(CmpOp))));
7281 assert(IsExpectedPattern && "Unexpected reduction resume pattern");
7282 MainResumeValue = OrigResumeV;
7283 }
7284 PHINode *MainResumePhi = cast<PHINode>(MainResumeValue);
7285
7286 // When fixing reductions in the epilogue loop we should already have
7287 // created a bc.merge.rdx Phi after the main vector body. Ensure that we carry
7288 // over the incoming values correctly.
7289 EpiResumePhi.setIncomingValueForBlock(
7290 BypassBlock, MainResumePhi->getIncomingValueForBlock(BypassBlock));
7291}
7292
7294 ElementCount BestVF, unsigned BestUF, VPlan &BestVPlan,
7295 InnerLoopVectorizer &ILV, DominatorTree *DT, bool VectorizingEpilogue) {
7296 assert(BestVPlan.hasVF(BestVF) &&
7297 "Trying to execute plan with unsupported VF");
7298 assert(BestVPlan.hasUF(BestUF) &&
7299 "Trying to execute plan with unsupported UF");
7300 if (BestVPlan.hasEarlyExit())
7301 ++LoopsEarlyExitVectorized;
7302 // TODO: Move to VPlan transform stage once the transition to the VPlan-based
7303 // cost model is complete for better cost estimates.
7306 BestVPlan);
7309 bool HasBranchWeights =
7310 hasBranchWeightMD(*OrigLoop->getLoopLatch()->getTerminator());
7311 if (HasBranchWeights) {
7312 std::optional<unsigned> VScale = CM.getVScaleForTuning();
7314 BestVPlan, BestVF, VScale);
7315 }
7316
7317 // Checks are the same for all VPlans, added to BestVPlan only for
7318 // compactness.
7319 attachRuntimeChecks(BestVPlan, ILV.RTChecks, HasBranchWeights);
7320
7321 // Retrieving VectorPH now when it's easier while VPlan still has Regions.
7322 VPBasicBlock *VectorPH = cast<VPBasicBlock>(BestVPlan.getVectorPreheader());
7323
7324 VPlanTransforms::optimizeForVFAndUF(BestVPlan, BestVF, BestUF, PSE);
7327 if (BestVPlan.getEntry()->getSingleSuccessor() ==
7328 BestVPlan.getScalarPreheader()) {
7329 // TODO: The vector loop would be dead, should not even try to vectorize.
7330 ORE->emit([&]() {
7331 return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationDead",
7332 OrigLoop->getStartLoc(),
7333 OrigLoop->getHeader())
7334 << "Created vector loop never executes due to insufficient trip "
7335 "count.";
7336 });
7338 }
7339
7341 BestVPlan, BestVF,
7342 TTI.getRegisterBitWidth(BestVF.isScalable()
7346
7348 // Regions are dissolved after optimizing for VF and UF, which completely
7349 // removes unneeded loop regions first.
7351 // Canonicalize EVL loops after regions are dissolved.
7355 BestVPlan, VectorPH, CM.foldTailByMasking(),
7356 CM.requiresScalarEpilogue(BestVF.isVector()));
7357 VPlanTransforms::materializeVFAndVFxUF(BestVPlan, VectorPH, BestVF);
7358 VPlanTransforms::cse(BestVPlan);
7360
7361 // 0. Generate SCEV-dependent code in the entry, including TripCount, before
7362 // making any changes to the CFG.
7363 DenseMap<const SCEV *, Value *> ExpandedSCEVs =
7364 VPlanTransforms::expandSCEVs(BestVPlan, *PSE.getSE());
7365 if (!ILV.getTripCount())
7366 ILV.setTripCount(BestVPlan.getTripCount()->getLiveInIRValue());
7367 else
7368 assert(VectorizingEpilogue && "should only re-use the existing trip "
7369 "count during epilogue vectorization");
7370
7371 // Perform the actual loop transformation.
7372 VPTransformState State(&TTI, BestVF, LI, DT, ILV.AC, ILV.Builder, &BestVPlan,
7373 OrigLoop->getParentLoop(),
7374 Legal->getWidestInductionType());
7375
7376#ifdef EXPENSIVE_CHECKS
7377 assert(DT->verify(DominatorTree::VerificationLevel::Fast));
7378#endif
7379
7380 // 1. Set up the skeleton for vectorization, including vector pre-header and
7381 // middle block. The vector loop is created during VPlan execution.
7382 State.CFG.PrevBB = ILV.createVectorizedLoopSkeleton();
7384 State.CFG.PrevBB->getSingleSuccessor(), &BestVPlan);
7386
7387 assert(verifyVPlanIsValid(BestVPlan, true /*VerifyLate*/) &&
7388 "final VPlan is invalid");
7389
7390 // After vectorization, the exit blocks of the original loop will have
7391 // additional predecessors. Invalidate SCEVs for the exit phis in case SE
7392 // looked through single-entry phis.
7393 ScalarEvolution &SE = *PSE.getSE();
7394 for (VPIRBasicBlock *Exit : BestVPlan.getExitBlocks()) {
7395 if (!Exit->hasPredecessors())
7396 continue;
7397 for (VPRecipeBase &PhiR : Exit->phis())
7399 &cast<VPIRPhi>(PhiR).getIRPhi());
7400 }
7401 // Forget the original loop and block dispositions.
7402 SE.forgetLoop(OrigLoop);
7404
7406
7407 //===------------------------------------------------===//
7408 //
7409 // Notice: any optimization or new instruction that go
7410 // into the code below should also be implemented in
7411 // the cost-model.
7412 //
7413 //===------------------------------------------------===//
7414
7415 // Retrieve loop information before executing the plan, which may remove the
7416 // original loop, if it becomes unreachable.
7417 MDNode *LID = OrigLoop->getLoopID();
7418 unsigned OrigLoopInvocationWeight = 0;
7419 std::optional<unsigned> OrigAverageTripCount =
7420 getLoopEstimatedTripCount(OrigLoop, &OrigLoopInvocationWeight);
7421
7422 BestVPlan.execute(&State);
7423
7424 // 2.6. Maintain Loop Hints
7425 // Keep all loop hints from the original loop on the vector loop (we'll
7426 // replace the vectorizer-specific hints below).
7427 VPBasicBlock *HeaderVPBB = vputils::getFirstLoopHeader(BestVPlan, State.VPDT);
7428 // Add metadata to disable runtime unrolling a scalar loop when there
7429 // are no runtime checks about strides and memory. A scalar loop that is
7430 // rarely used is not worth unrolling.
7431 bool DisableRuntimeUnroll = !ILV.RTChecks.hasChecks() && !BestVF.isScalar();
7433 HeaderVPBB ? LI->getLoopFor(State.CFG.VPBB2IRBB.lookup(HeaderVPBB))
7434 : nullptr,
7435 HeaderVPBB, BestVPlan, VectorizingEpilogue, LID, OrigAverageTripCount,
7436 OrigLoopInvocationWeight,
7437 estimateElementCount(BestVF * BestUF, CM.getVScaleForTuning()),
7438 DisableRuntimeUnroll);
7439
7440 // 3. Fix the vectorized code: take care of header phi's, live-outs,
7441 // predication, updating analyses.
7442 ILV.fixVectorizedLoop(State);
7443
7445
7446 return ExpandedSCEVs;
7447}
7448
7449//===--------------------------------------------------------------------===//
7450// EpilogueVectorizerMainLoop
7451//===--------------------------------------------------------------------===//
7452
7453/// This function is partially responsible for generating the control flow
7454/// depicted in https://llvm.org/docs/Vectorizers.html#epilogue-vectorization.
7456 BasicBlock *ScalarPH = createScalarPreheader("");
7457 BasicBlock *VectorPH = ScalarPH->getSinglePredecessor();
7458
7459 // Generate the code to check the minimum iteration count of the vector
7460 // epilogue (see below).
7461 EPI.EpilogueIterationCountCheck =
7462 emitIterationCountCheck(VectorPH, ScalarPH, true);
7463 EPI.EpilogueIterationCountCheck->setName("iter.check");
7464
7465 VectorPH = cast<BranchInst>(EPI.EpilogueIterationCountCheck->getTerminator())
7466 ->getSuccessor(1);
7467 // Generate the iteration count check for the main loop, *after* the check
7468 // for the epilogue loop, so that the path-length is shorter for the case
7469 // that goes directly through the vector epilogue. The longer-path length for
7470 // the main loop is compensated for, by the gain from vectorizing the larger
7471 // trip count. Note: the branch will get updated later on when we vectorize
7472 // the epilogue.
7473 EPI.MainLoopIterationCountCheck =
7474 emitIterationCountCheck(VectorPH, ScalarPH, false);
7475
7476 return cast<BranchInst>(EPI.MainLoopIterationCountCheck->getTerminator())
7477 ->getSuccessor(1);
7478}
7479
7481 LLVM_DEBUG({
7482 dbgs() << "Create Skeleton for epilogue vectorized loop (first pass)\n"
7483 << "Main Loop VF:" << EPI.MainLoopVF
7484 << ", Main Loop UF:" << EPI.MainLoopUF
7485 << ", Epilogue Loop VF:" << EPI.EpilogueVF
7486 << ", Epilogue Loop UF:" << EPI.EpilogueUF << "\n";
7487 });
7488}
7489
7492 dbgs() << "intermediate fn:\n"
7493 << *OrigLoop->getHeader()->getParent() << "\n";
7494 });
7495}
7496
7498 BasicBlock *VectorPH, BasicBlock *Bypass, bool ForEpilogue) {
7499 assert(Bypass && "Expected valid bypass basic block.");
7502 Value *CheckMinIters = createIterationCountCheck(
7503 VectorPH, ForEpilogue ? EPI.EpilogueVF : EPI.MainLoopVF,
7504 ForEpilogue ? EPI.EpilogueUF : EPI.MainLoopUF);
7505
7506 BasicBlock *const TCCheckBlock = VectorPH;
7507 if (!ForEpilogue)
7508 TCCheckBlock->setName("vector.main.loop.iter.check");
7509
7510 // Create new preheader for vector loop.
7511 VectorPH = SplitBlock(TCCheckBlock, TCCheckBlock->getTerminator(),
7512 static_cast<DominatorTree *>(nullptr), LI, nullptr,
7513 "vector.ph");
7514 if (ForEpilogue) {
7515 // Save the trip count so we don't have to regenerate it in the
7516 // vec.epilog.iter.check. This is safe to do because the trip count
7517 // generated here dominates the vector epilog iter check.
7518 EPI.TripCount = Count;
7519 } else {
7521 }
7522
7523 BranchInst &BI = *BranchInst::Create(Bypass, VectorPH, CheckMinIters);
7524 if (hasBranchWeightMD(*OrigLoop->getLoopLatch()->getTerminator()))
7525 setBranchWeights(BI, MinItersBypassWeights, /*IsExpected=*/false);
7526 ReplaceInstWithInst(TCCheckBlock->getTerminator(), &BI);
7527
7528 // When vectorizing the main loop, its trip-count check is placed in a new
7529 // block, whereas the overall trip-count check is placed in the VPlan entry
7530 // block. When vectorizing the epilogue loop, its trip-count check is placed
7531 // in the VPlan entry block.
7532 if (!ForEpilogue)
7533 introduceCheckBlockInVPlan(TCCheckBlock);
7534 return TCCheckBlock;
7535}
7536
7537//===--------------------------------------------------------------------===//
7538// EpilogueVectorizerEpilogueLoop
7539//===--------------------------------------------------------------------===//
7540
7541/// This function creates a new scalar preheader, using the previous one as
7542/// entry block to the epilogue VPlan. The minimum iteration check is being
7543/// represented in VPlan.
7545 BasicBlock *NewScalarPH = createScalarPreheader("vec.epilog.");
7546 BasicBlock *OriginalScalarPH = NewScalarPH->getSinglePredecessor();
7547 OriginalScalarPH->setName("vec.epilog.iter.check");
7548 VPIRBasicBlock *NewEntry = Plan.createVPIRBasicBlock(OriginalScalarPH);
7549 VPBasicBlock *OldEntry = Plan.getEntry();
7550 for (auto &R : make_early_inc_range(*OldEntry)) {
7551 // Skip moving VPIRInstructions (including VPIRPhis), which are unmovable by
7552 // defining.
7553 if (isa<VPIRInstruction>(&R))
7554 continue;
7555 R.moveBefore(*NewEntry, NewEntry->end());
7556 }
7557
7558 VPBlockUtils::reassociateBlocks(OldEntry, NewEntry);
7559 Plan.setEntry(NewEntry);
7560 // OldEntry is now dead and will be cleaned up when the plan gets destroyed.
7561
7562 return OriginalScalarPH;
7563}
7564
7566 LLVM_DEBUG({
7567 dbgs() << "Create Skeleton for epilogue vectorized loop (second pass)\n"
7568 << "Epilogue Loop VF:" << EPI.EpilogueVF
7569 << ", Epilogue Loop UF:" << EPI.EpilogueUF << "\n";
7570 });
7571}
7572
7575 dbgs() << "final fn:\n" << *OrigLoop->getHeader()->getParent() << "\n";
7576 });
7577}
7578
7579VPWidenMemoryRecipe *VPRecipeBuilder::tryToWidenMemory(VPInstruction *VPI,
7580 VFRange &Range) {
7581 assert((VPI->getOpcode() == Instruction::Load ||
7582 VPI->getOpcode() == Instruction::Store) &&
7583 "Must be called with either a load or store");
7585
7586 auto WillWiden = [&](ElementCount VF) -> bool {
7588 CM.getWideningDecision(I, VF);
7590 "CM decision should be taken at this point.");
7592 return true;
7593 if (CM.isScalarAfterVectorization(I, VF) ||
7594 CM.isProfitableToScalarize(I, VF))
7595 return false;
7597 };
7598
7600 return nullptr;
7601
7602 VPValue *Mask = nullptr;
7603 if (Legal->isMaskRequired(I))
7604 Mask = getBlockInMask(Builder.getInsertBlock());
7605
7606 // Determine if the pointer operand of the access is either consecutive or
7607 // reverse consecutive.
7609 CM.getWideningDecision(I, Range.Start);
7611 bool Consecutive =
7613
7614 VPValue *Ptr = VPI->getOpcode() == Instruction::Load ? VPI->getOperand(0)
7615 : VPI->getOperand(1);
7616 if (Consecutive) {
7619 VPSingleDefRecipe *VectorPtr;
7620 if (Reverse) {
7621 // When folding the tail, we may compute an address that we don't in the
7622 // original scalar loop: drop the GEP no-wrap flags in this case.
7623 // Otherwise preserve existing flags without no-unsigned-wrap, as we will
7624 // emit negative indices.
7625 GEPNoWrapFlags Flags =
7626 CM.foldTailByMasking() || !GEP
7628 : GEP->getNoWrapFlags().withoutNoUnsignedWrap();
7629 VectorPtr = new VPVectorEndPointerRecipe(
7630 Ptr, &Plan.getVF(), getLoadStoreType(I),
7631 /*Stride*/ -1, Flags, VPI->getDebugLoc());
7632 } else {
7633 VectorPtr = new VPVectorPointerRecipe(Ptr, getLoadStoreType(I),
7634 GEP ? GEP->getNoWrapFlags()
7636 VPI->getDebugLoc());
7637 }
7638 Builder.insert(VectorPtr);
7639 Ptr = VectorPtr;
7640 }
7641 if (VPI->getOpcode() == Instruction::Load) {
7642 auto *Load = cast<LoadInst>(I);
7643 return new VPWidenLoadRecipe(*Load, Ptr, Mask, Consecutive, Reverse, *VPI,
7644 VPI->getDebugLoc());
7645 }
7646
7647 StoreInst *Store = cast<StoreInst>(I);
7648 return new VPWidenStoreRecipe(*Store, Ptr, VPI->getOperand(0), Mask,
7649 Consecutive, Reverse, *VPI, VPI->getDebugLoc());
7650}
7651
7652/// Creates a VPWidenIntOrFpInductionRecipe for \p PhiR. If needed, it will
7653/// also insert a recipe to expand the step for the induction recipe.
7656 const InductionDescriptor &IndDesc, VPlan &Plan,
7657 ScalarEvolution &SE, Loop &OrigLoop) {
7658 assert(SE.isLoopInvariant(IndDesc.getStep(), &OrigLoop) &&
7659 "step must be loop invariant");
7660
7661 VPValue *Start = PhiR->getOperand(0);
7662 assert(Plan.getLiveIn(IndDesc.getStartValue()) == Start &&
7663 "Start VPValue must match IndDesc's start value");
7664
7665 // It is always safe to copy over the NoWrap and FastMath flags. In
7666 // particular, when folding tail by masking, the masked-off lanes are never
7667 // used, so it is safe.
7668 VPIRFlags Flags = vputils::getFlagsFromIndDesc(IndDesc);
7669 VPValue *Step =
7671
7672 // Update wide induction increments to use the same step as the corresponding
7673 // wide induction. This enables detecting induction increments directly in
7674 // VPlan and removes redundant splats.
7675 using namespace llvm::VPlanPatternMatch;
7676 if (match(PhiR->getOperand(1), m_Add(m_Specific(PhiR), m_VPValue())))
7677 PhiR->getOperand(1)->getDefiningRecipe()->setOperand(1, Step);
7678
7680 return new VPWidenIntOrFpInductionRecipe(Phi, Start, Step, &Plan.getVF(),
7681 IndDesc, Flags, PhiR->getDebugLoc());
7682}
7683
7685VPRecipeBuilder::tryToOptimizeInductionPHI(VPInstruction *VPI) {
7686 auto *Phi = cast<PHINode>(VPI->getUnderlyingInstr());
7687
7688 // Check if this is an integer or fp induction. If so, build the recipe that
7689 // produces its scalar and vector values.
7690 if (auto *II = Legal->getIntOrFpInductionDescriptor(Phi))
7691 return createWidenInductionRecipes(VPI, *II, Plan, *PSE.getSE(), *OrigLoop);
7692
7693 // Check if this is pointer induction. If so, build the recipe for it.
7694 if (auto *II = Legal->getPointerInductionDescriptor(Phi)) {
7695 VPValue *Step = vputils::getOrCreateVPValueForSCEVExpr(Plan, II->getStep());
7696 return new VPWidenPointerInductionRecipe(Phi, VPI->getOperand(0), Step,
7697 &Plan.getVFxUF(), *II,
7698 VPI->getDebugLoc());
7699 }
7700 return nullptr;
7701}
7702
7704VPRecipeBuilder::tryToOptimizeInductionTruncate(VPInstruction *VPI,
7705 VFRange &Range) {
7706 auto *I = cast<TruncInst>(VPI->getUnderlyingInstr());
7707 // Optimize the special case where the source is a constant integer
7708 // induction variable. Notice that we can only optimize the 'trunc' case
7709 // because (a) FP conversions lose precision, (b) sext/zext may wrap, and
7710 // (c) other casts depend on pointer size.
7711
7712 // Determine whether \p K is a truncation based on an induction variable that
7713 // can be optimized.
7714 auto IsOptimizableIVTruncate =
7715 [&](Instruction *K) -> std::function<bool(ElementCount)> {
7716 return [=](ElementCount VF) -> bool {
7717 return CM.isOptimizableIVTruncate(K, VF);
7718 };
7719 };
7720
7722 IsOptimizableIVTruncate(I), Range))
7723 return nullptr;
7724
7726 VPI->getOperand(0)->getDefiningRecipe());
7727 PHINode *Phi = WidenIV->getPHINode();
7728 VPValue *Start = WidenIV->getStartValue();
7729 const InductionDescriptor &IndDesc = WidenIV->getInductionDescriptor();
7730
7731 // It is always safe to copy over the NoWrap and FastMath flags. In
7732 // particular, when folding tail by masking, the masked-off lanes are never
7733 // used, so it is safe.
7734 VPIRFlags Flags = vputils::getFlagsFromIndDesc(IndDesc);
7735 VPValue *Step =
7737 return new VPWidenIntOrFpInductionRecipe(
7738 Phi, Start, Step, &Plan.getVF(), IndDesc, I, Flags, VPI->getDebugLoc());
7739}
7740
7741VPSingleDefRecipe *VPRecipeBuilder::tryToWidenCall(VPInstruction *VPI,
7742 VFRange &Range) {
7743 CallInst *CI = cast<CallInst>(VPI->getUnderlyingInstr());
7745 [this, CI](ElementCount VF) {
7746 return CM.isScalarWithPredication(CI, VF);
7747 },
7748 Range);
7749
7750 if (IsPredicated)
7751 return nullptr;
7752
7754 if (ID && (ID == Intrinsic::assume || ID == Intrinsic::lifetime_end ||
7755 ID == Intrinsic::lifetime_start || ID == Intrinsic::sideeffect ||
7756 ID == Intrinsic::pseudoprobe ||
7757 ID == Intrinsic::experimental_noalias_scope_decl))
7758 return nullptr;
7759
7761 VPI->op_begin() + CI->arg_size());
7762
7763 // Is it beneficial to perform intrinsic call compared to lib call?
7764 bool ShouldUseVectorIntrinsic =
7766 [&](ElementCount VF) -> bool {
7767 return CM.getCallWideningDecision(CI, VF).Kind ==
7769 },
7770 Range);
7771 if (ShouldUseVectorIntrinsic)
7772 return new VPWidenIntrinsicRecipe(*CI, ID, Ops, CI->getType(), *VPI, *VPI,
7773 VPI->getDebugLoc());
7774
7775 Function *Variant = nullptr;
7776 std::optional<unsigned> MaskPos;
7777 // Is better to call a vectorized version of the function than to to scalarize
7778 // the call?
7779 auto ShouldUseVectorCall = LoopVectorizationPlanner::getDecisionAndClampRange(
7780 [&](ElementCount VF) -> bool {
7781 // The following case may be scalarized depending on the VF.
7782 // The flag shows whether we can use a usual Call for vectorized
7783 // version of the instruction.
7784
7785 // If we've found a variant at a previous VF, then stop looking. A
7786 // vectorized variant of a function expects input in a certain shape
7787 // -- basically the number of input registers, the number of lanes
7788 // per register, and whether there's a mask required.
7789 // We store a pointer to the variant in the VPWidenCallRecipe, so
7790 // once we have an appropriate variant it's only valid for that VF.
7791 // This will force a different vplan to be generated for each VF that
7792 // finds a valid variant.
7793 if (Variant)
7794 return false;
7795 LoopVectorizationCostModel::CallWideningDecision Decision =
7796 CM.getCallWideningDecision(CI, VF);
7798 Variant = Decision.Variant;
7799 MaskPos = Decision.MaskPos;
7800 return true;
7801 }
7802
7803 return false;
7804 },
7805 Range);
7806 if (ShouldUseVectorCall) {
7807 if (MaskPos.has_value()) {
7808 // We have 2 cases that would require a mask:
7809 // 1) The block needs to be predicated, either due to a conditional
7810 // in the scalar loop or use of an active lane mask with
7811 // tail-folding, and we use the appropriate mask for the block.
7812 // 2) No mask is required for the block, but the only available
7813 // vector variant at this VF requires a mask, so we synthesize an
7814 // all-true mask.
7815 VPValue *Mask = nullptr;
7816 if (Legal->isMaskRequired(CI))
7817 Mask = getBlockInMask(Builder.getInsertBlock());
7818 else
7819 Mask = Plan.getOrAddLiveIn(
7820 ConstantInt::getTrue(IntegerType::getInt1Ty(Plan.getContext())));
7821
7822 Ops.insert(Ops.begin() + *MaskPos, Mask);
7823 }
7824
7825 Ops.push_back(VPI->getOperand(VPI->getNumOperands() - 1));
7826 return new VPWidenCallRecipe(CI, Variant, Ops, *VPI, *VPI,
7827 VPI->getDebugLoc());
7828 }
7829
7830 return nullptr;
7831}
7832
7833bool VPRecipeBuilder::shouldWiden(Instruction *I, VFRange &Range) const {
7835 !isa<StoreInst>(I) && "Instruction should have been handled earlier");
7836 // Instruction should be widened, unless it is scalar after vectorization,
7837 // scalarization is profitable or it is predicated.
7838 auto WillScalarize = [this, I](ElementCount VF) -> bool {
7839 return CM.isScalarAfterVectorization(I, VF) ||
7840 CM.isProfitableToScalarize(I, VF) ||
7841 CM.isScalarWithPredication(I, VF);
7842 };
7844 Range);
7845}
7846
7847VPWidenRecipe *VPRecipeBuilder::tryToWiden(VPInstruction *VPI) {
7848 auto *I = VPI->getUnderlyingInstr();
7849 switch (VPI->getOpcode()) {
7850 default:
7851 return nullptr;
7852 case Instruction::SDiv:
7853 case Instruction::UDiv:
7854 case Instruction::SRem:
7855 case Instruction::URem: {
7856 // If not provably safe, use a select to form a safe divisor before widening the
7857 // div/rem operation itself. Otherwise fall through to general handling below.
7858 if (CM.isPredicatedInst(I)) {
7860 VPValue *Mask = getBlockInMask(Builder.getInsertBlock());
7861 VPValue *One = Plan.getConstantInt(I->getType(), 1u);
7862 auto *SafeRHS =
7863 Builder.createSelect(Mask, Ops[1], One, VPI->getDebugLoc());
7864 Ops[1] = SafeRHS;
7865 return new VPWidenRecipe(*I, Ops, *VPI, *VPI, VPI->getDebugLoc());
7866 }
7867 [[fallthrough]];
7868 }
7869 case Instruction::Add:
7870 case Instruction::And:
7871 case Instruction::AShr:
7872 case Instruction::FAdd:
7873 case Instruction::FCmp:
7874 case Instruction::FDiv:
7875 case Instruction::FMul:
7876 case Instruction::FNeg:
7877 case Instruction::FRem:
7878 case Instruction::FSub:
7879 case Instruction::ICmp:
7880 case Instruction::LShr:
7881 case Instruction::Mul:
7882 case Instruction::Or:
7883 case Instruction::Select:
7884 case Instruction::Shl:
7885 case Instruction::Sub:
7886 case Instruction::Xor:
7887 case Instruction::Freeze: {
7888 SmallVector<VPValue *> NewOps(VPI->operands());
7889 if (Instruction::isBinaryOp(VPI->getOpcode())) {
7890 // The legacy cost model uses SCEV to check if some of the operands are
7891 // constants. To match the legacy cost model's behavior, use SCEV to try
7892 // to replace operands with constants.
7893 ScalarEvolution &SE = *PSE.getSE();
7894 auto GetConstantViaSCEV = [this, &SE](VPValue *Op) {
7895 if (!Op->isLiveIn())
7896 return Op;
7897 Value *V = Op->getUnderlyingValue();
7898 if (isa<Constant>(V) || !SE.isSCEVable(V->getType()))
7899 return Op;
7900 auto *C = dyn_cast<SCEVConstant>(SE.getSCEV(V));
7901 if (!C)
7902 return Op;
7903 return Plan.getOrAddLiveIn(C->getValue());
7904 };
7905 // For Mul, the legacy cost model checks both operands.
7906 if (VPI->getOpcode() == Instruction::Mul)
7907 NewOps[0] = GetConstantViaSCEV(NewOps[0]);
7908 // For other binops, the legacy cost model only checks the second operand.
7909 NewOps[1] = GetConstantViaSCEV(NewOps[1]);
7910 }
7911 return new VPWidenRecipe(*I, NewOps, *VPI, *VPI, VPI->getDebugLoc());
7912 }
7913 case Instruction::ExtractValue: {
7914 SmallVector<VPValue *> NewOps(VPI->operands());
7915 auto *EVI = cast<ExtractValueInst>(I);
7916 assert(EVI->getNumIndices() == 1 && "Expected one extractvalue index");
7917 unsigned Idx = EVI->getIndices()[0];
7918 NewOps.push_back(Plan.getConstantInt(32, Idx));
7919 return new VPWidenRecipe(*I, NewOps, *VPI, *VPI, VPI->getDebugLoc());
7920 }
7921 };
7922}
7923
7924VPHistogramRecipe *VPRecipeBuilder::tryToWidenHistogram(const HistogramInfo *HI,
7925 VPInstruction *VPI) {
7926 // FIXME: Support other operations.
7927 unsigned Opcode = HI->Update->getOpcode();
7928 assert((Opcode == Instruction::Add || Opcode == Instruction::Sub) &&
7929 "Histogram update operation must be an Add or Sub");
7930
7932 // Bucket address.
7933 HGramOps.push_back(VPI->getOperand(1));
7934 // Increment value.
7935 HGramOps.push_back(getVPValueOrAddLiveIn(HI->Update->getOperand(1)));
7936
7937 // In case of predicated execution (due to tail-folding, or conditional
7938 // execution, or both), pass the relevant mask.
7939 if (Legal->isMaskRequired(HI->Store))
7940 HGramOps.push_back(getBlockInMask(Builder.getInsertBlock()));
7941
7942 return new VPHistogramRecipe(Opcode, HGramOps, VPI->getDebugLoc());
7943}
7944
7946 VFRange &Range) {
7947 auto *I = VPI->getUnderlyingInstr();
7949 [&](ElementCount VF) { return CM.isUniformAfterVectorization(I, VF); },
7950 Range);
7951
7952 bool IsPredicated = CM.isPredicatedInst(I);
7953
7954 // Even if the instruction is not marked as uniform, there are certain
7955 // intrinsic calls that can be effectively treated as such, so we check for
7956 // them here. Conservatively, we only do this for scalable vectors, since
7957 // for fixed-width VFs we can always fall back on full scalarization.
7958 if (!IsUniform && Range.Start.isScalable() && isa<IntrinsicInst>(I)) {
7959 switch (cast<IntrinsicInst>(I)->getIntrinsicID()) {
7960 case Intrinsic::assume:
7961 case Intrinsic::lifetime_start:
7962 case Intrinsic::lifetime_end:
7963 // For scalable vectors if one of the operands is variant then we still
7964 // want to mark as uniform, which will generate one instruction for just
7965 // the first lane of the vector. We can't scalarize the call in the same
7966 // way as for fixed-width vectors because we don't know how many lanes
7967 // there are.
7968 //
7969 // The reasons for doing it this way for scalable vectors are:
7970 // 1. For the assume intrinsic generating the instruction for the first
7971 // lane is still be better than not generating any at all. For
7972 // example, the input may be a splat across all lanes.
7973 // 2. For the lifetime start/end intrinsics the pointer operand only
7974 // does anything useful when the input comes from a stack object,
7975 // which suggests it should always be uniform. For non-stack objects
7976 // the effect is to poison the object, which still allows us to
7977 // remove the call.
7978 IsUniform = true;
7979 break;
7980 default:
7981 break;
7982 }
7983 }
7984 VPValue *BlockInMask = nullptr;
7985 if (!IsPredicated) {
7986 // Finalize the recipe for Instr, first if it is not predicated.
7987 LLVM_DEBUG(dbgs() << "LV: Scalarizing:" << *I << "\n");
7988 } else {
7989 LLVM_DEBUG(dbgs() << "LV: Scalarizing and predicating:" << *I << "\n");
7990 // Instructions marked for predication are replicated and a mask operand is
7991 // added initially. Masked replicate recipes will later be placed under an
7992 // if-then construct to prevent side-effects. Generate recipes to compute
7993 // the block mask for this region.
7994 BlockInMask = getBlockInMask(Builder.getInsertBlock());
7995 }
7996
7997 // Note that there is some custom logic to mark some intrinsics as uniform
7998 // manually above for scalable vectors, which this assert needs to account for
7999 // as well.
8000 assert((Range.Start.isScalar() || !IsUniform || !IsPredicated ||
8001 (Range.Start.isScalable() && isa<IntrinsicInst>(I))) &&
8002 "Should not predicate a uniform recipe");
8003 auto *Recipe =
8004 new VPReplicateRecipe(I, VPI->operands(), IsUniform, BlockInMask, *VPI,
8005 *VPI, VPI->getDebugLoc());
8006 return Recipe;
8007}
8008
8009/// Find all possible partial reductions in the loop and track all of those that
8010/// are valid so recipes can be formed later.
8012 // Find all possible partial reductions, grouping chains by their PHI. This
8013 // grouping allows invalidating the whole chain, if any link is not a valid
8014 // partial reduction.
8017 ChainsByPhi;
8018 for (const auto &[Phi, RdxDesc] : Legal->getReductionVars()) {
8019 if (Instruction *RdxExitInstr = RdxDesc.getLoopExitInstr())
8020 getScaledReductions(Phi, RdxExitInstr, Range, ChainsByPhi[Phi]);
8021 }
8022
8023 // A partial reduction is invalid if any of its extends are used by
8024 // something that isn't another partial reduction. This is because the
8025 // extends are intended to be lowered along with the reduction itself.
8026
8027 // Build up a set of partial reduction ops for efficient use checking.
8028 SmallPtrSet<User *, 4> PartialReductionOps;
8029 for (const auto &[_, Chains] : ChainsByPhi)
8030 for (const auto &[PartialRdx, _] : Chains)
8031 PartialReductionOps.insert(PartialRdx.ExtendUser);
8032
8033 auto ExtendIsOnlyUsedByPartialReductions =
8034 [&PartialReductionOps](Instruction *Extend) {
8035 return all_of(Extend->users(), [&](const User *U) {
8036 return PartialReductionOps.contains(U);
8037 });
8038 };
8039
8040 // Check if each use of a chain's two extends is a partial reduction
8041 // and only add those that don't have non-partial reduction users.
8042 for (const auto &[_, Chains] : ChainsByPhi) {
8043 for (const auto &[Chain, Scale] : Chains) {
8044 if (ExtendIsOnlyUsedByPartialReductions(Chain.ExtendA) &&
8045 (!Chain.ExtendB ||
8046 ExtendIsOnlyUsedByPartialReductions(Chain.ExtendB)))
8047 ScaledReductionMap.try_emplace(Chain.Reduction, Scale);
8048 }
8049 }
8050
8051 // Check that all partial reductions in a chain are only used by other
8052 // partial reductions with the same scale factor. Otherwise we end up creating
8053 // users of scaled reductions where the types of the other operands don't
8054 // match.
8055 for (const auto &[Phi, Chains] : ChainsByPhi) {
8056 for (const auto &[Chain, Scale] : Chains) {
8057 auto AllUsersPartialRdx = [ScaleVal = Scale, RdxPhi = Phi,
8058 this](const User *U) {
8059 auto *UI = cast<Instruction>(U);
8060 if (isa<PHINode>(UI) && UI->getParent() == OrigLoop->getHeader())
8061 return UI == RdxPhi;
8062 return ScaledReductionMap.lookup_or(UI, 0) == ScaleVal ||
8063 !OrigLoop->contains(UI->getParent());
8064 };
8065
8066 // If any partial reduction entry for the phi is invalid, invalidate the
8067 // whole chain.
8068 if (!all_of(Chain.Reduction->users(), AllUsersPartialRdx)) {
8069 for (const auto &[Chain, _] : Chains)
8070 ScaledReductionMap.erase(Chain.Reduction);
8071 break;
8072 }
8073 }
8074 }
8075}
8076
8077bool VPRecipeBuilder::getScaledReductions(
8078 Instruction *PHI, Instruction *RdxExitInstr, VFRange &Range,
8079 SmallVectorImpl<std::pair<PartialReductionChain, unsigned>> &Chains) {
8080 if (!CM.TheLoop->contains(RdxExitInstr))
8081 return false;
8082
8083 auto *Update = dyn_cast<BinaryOperator>(RdxExitInstr);
8084 if (!Update)
8085 return false;
8086
8087 Value *Op = Update->getOperand(0);
8088 Value *PhiOp = Update->getOperand(1);
8089 if (Op == PHI)
8090 std::swap(Op, PhiOp);
8091
8092 using namespace llvm::PatternMatch;
8093 // If Op is an extend, then it's still a valid partial reduction if the
8094 // extended mul fulfills the other requirements.
8095 // For example, reduce.add(ext(mul(ext(A), ext(B)))) is still a valid partial
8096 // reduction since the inner extends will be widened. We already have oneUse
8097 // checks on the inner extends so widening them is safe.
8098 std::optional<TTI::PartialReductionExtendKind> OuterExtKind = std::nullopt;
8099 if (match(Op, m_ZExtOrSExt(m_Mul(m_Value(), m_Value())))) {
8100 auto *Cast = cast<CastInst>(Op);
8101 OuterExtKind = TTI::getPartialReductionExtendKind(Cast->getOpcode());
8102 Op = Cast->getOperand(0);
8103 }
8104
8105 // Try and get a scaled reduction from the first non-phi operand.
8106 // If one is found, we use the discovered reduction instruction in
8107 // place of the accumulator for costing.
8108 if (auto *OpInst = dyn_cast<Instruction>(Op)) {
8109 if (getScaledReductions(PHI, OpInst, Range, Chains)) {
8110 PHI = Chains.rbegin()->first.Reduction;
8111
8112 Op = Update->getOperand(0);
8113 PhiOp = Update->getOperand(1);
8114 if (Op == PHI)
8115 std::swap(Op, PhiOp);
8116 }
8117 }
8118 if (PhiOp != PHI)
8119 return false;
8120
8121 // If the update is a binary operator, check both of its operands to see if
8122 // they are extends. Otherwise, see if the update comes directly from an
8123 // extend.
8124 Instruction *Exts[2] = {nullptr};
8125 BinaryOperator *ExtendUser = dyn_cast<BinaryOperator>(Op);
8126 std::optional<unsigned> BinOpc;
8127 Type *ExtOpTypes[2] = {nullptr};
8129
8130 auto CollectExtInfo = [this, OuterExtKind, &Exts, &ExtOpTypes,
8131 &ExtKinds](SmallVectorImpl<Value *> &Ops) -> bool {
8132 for (const auto &[I, OpI] : enumerate(Ops)) {
8133 const APInt *C;
8134 if (I > 0 && match(OpI, m_APInt(C)) &&
8135 canConstantBeExtended(C, ExtOpTypes[0], ExtKinds[0])) {
8136 ExtOpTypes[I] = ExtOpTypes[0];
8137 ExtKinds[I] = ExtKinds[0];
8138 continue;
8139 }
8140 Value *ExtOp;
8141 if (!match(OpI, m_ZExtOrSExt(m_Value(ExtOp))))
8142 return false;
8143 Exts[I] = cast<Instruction>(OpI);
8144
8145 // TODO: We should be able to support live-ins.
8146 if (!CM.TheLoop->contains(Exts[I]))
8147 return false;
8148
8149 ExtOpTypes[I] = ExtOp->getType();
8150 ExtKinds[I] = TTI::getPartialReductionExtendKind(Exts[I]);
8151 // The outer extend kind must be the same as the inner extends, so that
8152 // they can be folded together.
8153 if (OuterExtKind.has_value() && OuterExtKind.value() != ExtKinds[I])
8154 return false;
8155 }
8156 return true;
8157 };
8158
8159 if (ExtendUser) {
8160 if (!ExtendUser->hasOneUse())
8161 return false;
8162
8163 // Use the side-effect of match to replace BinOp only if the pattern is
8164 // matched, we don't care at this point whether it actually matched.
8165 match(ExtendUser, m_Neg(m_BinOp(ExtendUser)));
8166
8167 SmallVector<Value *> Ops(ExtendUser->operands());
8168 if (!CollectExtInfo(Ops))
8169 return false;
8170
8171 BinOpc = std::make_optional(ExtendUser->getOpcode());
8172 } else if (match(Update, m_Add(m_Value(), m_Value()))) {
8173 // We already know the operands for Update are Op and PhiOp.
8175 if (!CollectExtInfo(Ops))
8176 return false;
8177
8178 ExtendUser = Update;
8179 BinOpc = std::nullopt;
8180 } else
8181 return false;
8182
8183 PartialReductionChain Chain(RdxExitInstr, Exts[0], Exts[1], ExtendUser);
8184
8185 TypeSize PHISize = PHI->getType()->getPrimitiveSizeInBits();
8186 TypeSize ASize = ExtOpTypes[0]->getPrimitiveSizeInBits();
8187 if (!PHISize.hasKnownScalarFactor(ASize))
8188 return false;
8189 unsigned TargetScaleFactor = PHISize.getKnownScalarFactor(ASize);
8190
8192 [&](ElementCount VF) {
8193 InstructionCost Cost = TTI->getPartialReductionCost(
8194 Update->getOpcode(), ExtOpTypes[0], ExtOpTypes[1],
8195 PHI->getType(), VF, ExtKinds[0], ExtKinds[1], BinOpc,
8196 CM.CostKind);
8197 return Cost.isValid();
8198 },
8199 Range)) {
8200 Chains.emplace_back(Chain, TargetScaleFactor);
8201 return true;
8202 }
8203
8204 return false;
8205}
8206
8208 VFRange &Range) {
8209 // First, check for specific widening recipes that deal with inductions, Phi
8210 // nodes, calls and memory operations.
8211 VPRecipeBase *Recipe;
8212 if (auto *PhiR = dyn_cast<VPPhi>(R)) {
8213 VPBasicBlock *Parent = PhiR->getParent();
8214 [[maybe_unused]] VPRegionBlock *LoopRegionOf =
8215 Parent->getEnclosingLoopRegion();
8216 assert(LoopRegionOf && LoopRegionOf->getEntry() == Parent &&
8217 "Non-header phis should have been handled during predication");
8218 auto *Phi = cast<PHINode>(R->getUnderlyingInstr());
8219 assert(R->getNumOperands() == 2 && "Must have 2 operands for header phis");
8220 if ((Recipe = tryToOptimizeInductionPHI(PhiR)))
8221 return Recipe;
8222
8223 VPHeaderPHIRecipe *PhiRecipe = nullptr;
8224 assert((Legal->isReductionVariable(Phi) ||
8225 Legal->isFixedOrderRecurrence(Phi)) &&
8226 "can only widen reductions and fixed-order recurrences here");
8227 VPValue *StartV = R->getOperand(0);
8228 if (Legal->isReductionVariable(Phi)) {
8229 const RecurrenceDescriptor &RdxDesc = Legal->getRecurrenceDescriptor(Phi);
8230 assert(RdxDesc.getRecurrenceStartValue() ==
8231 Phi->getIncomingValueForBlock(OrigLoop->getLoopPreheader()));
8232
8233 // If the PHI is used by a partial reduction, set the scale factor.
8234 bool UseInLoopReduction = CM.isInLoopReduction(Phi);
8235 bool UseOrderedReductions = CM.useOrderedReductions(RdxDesc);
8236 unsigned ScaleFactor =
8237 getScalingForReduction(RdxDesc.getLoopExitInstr()).value_or(1);
8238
8239 PhiRecipe = new VPReductionPHIRecipe(
8240 Phi, RdxDesc.getRecurrenceKind(), *StartV,
8241 getReductionStyle(UseInLoopReduction, UseOrderedReductions,
8242 ScaleFactor),
8244 } else {
8245 // TODO: Currently fixed-order recurrences are modeled as chains of
8246 // first-order recurrences. If there are no users of the intermediate
8247 // recurrences in the chain, the fixed order recurrence should be modeled
8248 // directly, enabling more efficient codegen.
8249 PhiRecipe = new VPFirstOrderRecurrencePHIRecipe(Phi, *StartV);
8250 }
8251 // Add backedge value.
8252 PhiRecipe->addOperand(R->getOperand(1));
8253 return PhiRecipe;
8254 }
8255 assert(!R->isPhi() && "only VPPhi nodes expected at this point");
8256
8257 auto *VPI = cast<VPInstruction>(R);
8258 Instruction *Instr = R->getUnderlyingInstr();
8259 if (VPI->getOpcode() == Instruction::Trunc &&
8260 (Recipe = tryToOptimizeInductionTruncate(VPI, Range)))
8261 return Recipe;
8262
8263 // All widen recipes below deal only with VF > 1.
8265 [&](ElementCount VF) { return VF.isScalar(); }, Range))
8266 return nullptr;
8267
8268 if (VPI->getOpcode() == Instruction::Call)
8269 return tryToWidenCall(VPI, Range);
8270
8271 if (VPI->getOpcode() == Instruction::Store)
8272 if (auto HistInfo = Legal->getHistogramInfo(cast<StoreInst>(Instr)))
8273 return tryToWidenHistogram(*HistInfo, VPI);
8274
8275 if (VPI->getOpcode() == Instruction::Load ||
8276 VPI->getOpcode() == Instruction::Store)
8277 return tryToWidenMemory(VPI, Range);
8278
8279 if (std::optional<unsigned> ScaleFactor = getScalingForReduction(Instr))
8280 return tryToCreatePartialReduction(VPI, ScaleFactor.value());
8281
8282 if (!shouldWiden(Instr, Range))
8283 return nullptr;
8284
8285 if (VPI->getOpcode() == Instruction::GetElementPtr)
8286 return new VPWidenGEPRecipe(cast<GetElementPtrInst>(Instr), R->operands(),
8287 *VPI, VPI->getDebugLoc());
8288
8289 if (VPI->getOpcode() == Instruction::Select)
8290 return new VPWidenSelectRecipe(cast<SelectInst>(Instr), R->operands(), *VPI,
8291 *VPI, VPI->getDebugLoc());
8292
8293 if (Instruction::isCast(VPI->getOpcode())) {
8294 auto *CI = cast<CastInst>(Instr);
8295 auto *CastR = cast<VPInstructionWithType>(VPI);
8296 return new VPWidenCastRecipe(CI->getOpcode(), VPI->getOperand(0),
8297 CastR->getResultType(), CI, *VPI, *VPI,
8298 VPI->getDebugLoc());
8299 }
8300
8301 return tryToWiden(VPI);
8302}
8303
8306 unsigned ScaleFactor) {
8307 assert(Reduction->getNumOperands() == 2 &&
8308 "Unexpected number of operands for partial reduction");
8309
8310 VPValue *BinOp = Reduction->getOperand(0);
8311 VPValue *Accumulator = Reduction->getOperand(1);
8312 VPRecipeBase *BinOpRecipe = BinOp->getDefiningRecipe();
8313 if (isa<VPReductionPHIRecipe>(BinOpRecipe) ||
8314 (isa<VPReductionRecipe>(BinOpRecipe) &&
8315 cast<VPReductionRecipe>(BinOpRecipe)->isPartialReduction()))
8316 std::swap(BinOp, Accumulator);
8317
8318 assert(ScaleFactor ==
8319 vputils::getVFScaleFactor(Accumulator->getDefiningRecipe()) &&
8320 "all accumulators in chain must have same scale factor");
8321
8322 auto *ReductionI = Reduction->getUnderlyingInstr();
8323 if (Reduction->getOpcode() == Instruction::Sub) {
8324 auto *const Zero = ConstantInt::get(ReductionI->getType(), 0);
8326 Ops.push_back(Plan.getOrAddLiveIn(Zero));
8327 Ops.push_back(BinOp);
8328 BinOp = new VPWidenRecipe(*ReductionI, Ops, VPIRFlags(*ReductionI),
8329 VPIRMetadata(), ReductionI->getDebugLoc());
8330 Builder.insert(BinOp->getDefiningRecipe());
8331 }
8332
8333 VPValue *Cond = nullptr;
8334 if (CM.blockNeedsPredicationForAnyReason(ReductionI->getParent()))
8335 Cond = getBlockInMask(Builder.getInsertBlock());
8336
8337 return new VPReductionRecipe(
8338 RecurKind::Add, FastMathFlags(), ReductionI, Accumulator, BinOp, Cond,
8339 RdxUnordered{/*VFScaleFactor=*/ScaleFactor}, ReductionI->getDebugLoc());
8340}
8341
8342void LoopVectorizationPlanner::buildVPlansWithVPRecipes(ElementCount MinVF,
8343 ElementCount MaxVF) {
8344 if (ElementCount::isKnownGT(MinVF, MaxVF))
8345 return;
8346
8347 assert(OrigLoop->isInnermost() && "Inner loop expected.");
8348
8349 const LoopAccessInfo *LAI = Legal->getLAI();
8351 OrigLoop, LI, DT, PSE.getSE());
8352 if (!LAI->getRuntimePointerChecking()->getChecks().empty() &&
8354 // Only use noalias metadata when using memory checks guaranteeing no
8355 // overlap across all iterations.
8356 LVer.prepareNoAliasMetadata();
8357 }
8358
8359 // Create initial base VPlan0, to serve as common starting point for all
8360 // candidates built later for specific VF ranges.
8361 auto VPlan0 = VPlanTransforms::buildVPlan0(
8362 OrigLoop, *LI, Legal->getWidestInductionType(),
8363 getDebugLocFromInstOrOperands(Legal->getPrimaryInduction()), PSE, &LVer);
8364
8365 auto MaxVFTimes2 = MaxVF * 2;
8366 for (ElementCount VF = MinVF; ElementCount::isKnownLT(VF, MaxVFTimes2);) {
8367 VFRange SubRange = {VF, MaxVFTimes2};
8368 if (auto Plan = tryToBuildVPlanWithVPRecipes(
8369 std::unique_ptr<VPlan>(VPlan0->duplicate()), SubRange, &LVer)) {
8370 // Now optimize the initial VPlan.
8371 VPlanTransforms::hoistPredicatedLoads(*Plan, *PSE.getSE(), OrigLoop);
8373 *Plan, CM.getMinimalBitwidths());
8375 // TODO: try to put it close to addActiveLaneMask().
8376 if (CM.foldTailWithEVL())
8378 *Plan, CM.getMaxSafeElements());
8379 assert(verifyVPlanIsValid(*Plan) && "VPlan is invalid");
8380 VPlans.push_back(std::move(Plan));
8381 }
8382 VF = SubRange.End;
8383 }
8384}
8385
8386VPlanPtr LoopVectorizationPlanner::tryToBuildVPlanWithVPRecipes(
8387 VPlanPtr Plan, VFRange &Range, LoopVersioning *LVer) {
8388
8389 using namespace llvm::VPlanPatternMatch;
8390 SmallPtrSet<const InterleaveGroup<Instruction> *, 1> InterleaveGroups;
8391
8392 // ---------------------------------------------------------------------------
8393 // Build initial VPlan: Scan the body of the loop in a topological order to
8394 // visit each basic block after having visited its predecessor basic blocks.
8395 // ---------------------------------------------------------------------------
8396
8397 bool RequiresScalarEpilogueCheck =
8399 [this](ElementCount VF) {
8400 return !CM.requiresScalarEpilogue(VF.isVector());
8401 },
8402 Range);
8403 VPlanTransforms::handleEarlyExits(*Plan, Legal->hasUncountableEarlyExit());
8404 VPlanTransforms::addMiddleCheck(*Plan, RequiresScalarEpilogueCheck,
8405 CM.foldTailByMasking());
8406
8408
8409 // Don't use getDecisionAndClampRange here, because we don't know the UF
8410 // so this function is better to be conservative, rather than to split
8411 // it up into different VPlans.
8412 // TODO: Consider using getDecisionAndClampRange here to split up VPlans.
8413 bool IVUpdateMayOverflow = false;
8414 for (ElementCount VF : Range)
8415 IVUpdateMayOverflow |= !isIndvarOverflowCheckKnownFalse(&CM, VF);
8416
8417 TailFoldingStyle Style = CM.getTailFoldingStyle(IVUpdateMayOverflow);
8418 // Use NUW for the induction increment if we proved that it won't overflow in
8419 // the vector loop or when not folding the tail. In the later case, we know
8420 // that the canonical induction increment will not overflow as the vector trip
8421 // count is >= increment and a multiple of the increment.
8422 VPRegionBlock *LoopRegion = Plan->getVectorLoopRegion();
8423 bool HasNUW = !IVUpdateMayOverflow || Style == TailFoldingStyle::None;
8424 if (!HasNUW) {
8425 auto *IVInc =
8426 LoopRegion->getExitingBasicBlock()->getTerminator()->getOperand(0);
8427 assert(match(IVInc,
8428 m_VPInstruction<Instruction::Add>(
8429 m_Specific(LoopRegion->getCanonicalIV()), m_VPValue())) &&
8430 "Did not find the canonical IV increment");
8431 cast<VPRecipeWithIRFlags>(IVInc)->dropPoisonGeneratingFlags();
8432 }
8433
8434 // ---------------------------------------------------------------------------
8435 // Pre-construction: record ingredients whose recipes we'll need to further
8436 // process after constructing the initial VPlan.
8437 // ---------------------------------------------------------------------------
8438
8439 // For each interleave group which is relevant for this (possibly trimmed)
8440 // Range, add it to the set of groups to be later applied to the VPlan and add
8441 // placeholders for its members' Recipes which we'll be replacing with a
8442 // single VPInterleaveRecipe.
8443 for (InterleaveGroup<Instruction> *IG : IAI.getInterleaveGroups()) {
8444 auto ApplyIG = [IG, this](ElementCount VF) -> bool {
8445 bool Result = (VF.isVector() && // Query is illegal for VF == 1
8446 CM.getWideningDecision(IG->getInsertPos(), VF) ==
8448 // For scalable vectors, the interleave factors must be <= 8 since we
8449 // require the (de)interleaveN intrinsics instead of shufflevectors.
8450 assert((!Result || !VF.isScalable() || IG->getFactor() <= 8) &&
8451 "Unsupported interleave factor for scalable vectors");
8452 return Result;
8453 };
8454 if (!getDecisionAndClampRange(ApplyIG, Range))
8455 continue;
8456 InterleaveGroups.insert(IG);
8457 }
8458
8459 // ---------------------------------------------------------------------------
8460 // Predicate and linearize the top-level loop region.
8461 // ---------------------------------------------------------------------------
8462 auto BlockMaskCache = VPlanTransforms::introduceMasksAndLinearize(
8463 *Plan, CM.foldTailByMasking());
8464
8465 // ---------------------------------------------------------------------------
8466 // Construct wide recipes and apply predication for original scalar
8467 // VPInstructions in the loop.
8468 // ---------------------------------------------------------------------------
8469 VPRecipeBuilder RecipeBuilder(*Plan, OrigLoop, TLI, &TTI, Legal, CM, PSE,
8470 Builder, BlockMaskCache);
8471 // TODO: Handle partial reductions with EVL tail folding.
8472 if (!CM.foldTailWithEVL())
8473 RecipeBuilder.collectScaledReductions(Range);
8474
8475 // Scan the body of the loop in a topological order to visit each basic block
8476 // after having visited its predecessor basic blocks.
8477 VPBasicBlock *HeaderVPBB = LoopRegion->getEntryBasicBlock();
8478 ReversePostOrderTraversal<VPBlockShallowTraversalWrapper<VPBlockBase *>> RPOT(
8479 HeaderVPBB);
8480
8481 auto *MiddleVPBB = Plan->getMiddleBlock();
8482 VPBasicBlock::iterator MBIP = MiddleVPBB->getFirstNonPhi();
8483 // Mapping from VPValues in the initial plan to their widened VPValues. Needed
8484 // temporarily to update created block masks.
8485 DenseMap<VPValue *, VPValue *> Old2New;
8486 for (VPBasicBlock *VPBB : VPBlockUtils::blocksOnly<VPBasicBlock>(RPOT)) {
8487 // Convert input VPInstructions to widened recipes.
8488 for (VPRecipeBase &R : make_early_inc_range(*VPBB)) {
8489 auto *SingleDef = cast<VPSingleDefRecipe>(&R);
8490 auto *UnderlyingValue = SingleDef->getUnderlyingValue();
8491 // Skip recipes that do not need transforming, including canonical IV,
8492 // wide canonical IV and VPInstructions without underlying values. The
8493 // latter are added above for masking.
8494 // FIXME: Migrate code relying on the underlying instruction from VPlan0
8495 // to construct recipes below to not use the underlying instruction.
8497 &R) ||
8498 (isa<VPInstruction>(&R) && !UnderlyingValue))
8499 continue;
8500 assert(isa<VPInstruction>(&R) && UnderlyingValue && "unsupported recipe");
8501
8502 // TODO: Gradually replace uses of underlying instruction by analyses on
8503 // VPlan.
8504 Instruction *Instr = cast<Instruction>(UnderlyingValue);
8505 Builder.setInsertPoint(SingleDef);
8506
8507 // The stores with invariant address inside the loop will be deleted, and
8508 // in the exit block, a uniform store recipe will be created for the final
8509 // invariant store of the reduction.
8510 StoreInst *SI;
8511 if ((SI = dyn_cast<StoreInst>(Instr)) &&
8512 Legal->isInvariantAddressOfReduction(SI->getPointerOperand())) {
8513 // Only create recipe for the final invariant store of the reduction.
8514 if (Legal->isInvariantStoreOfReduction(SI)) {
8515 auto *VPI = cast<VPInstruction>(SingleDef);
8516 auto *Recipe = new VPReplicateRecipe(
8517 SI, R.operands(), true /* IsUniform */, nullptr /*Mask*/, *VPI,
8518 *VPI, VPI->getDebugLoc());
8519 Recipe->insertBefore(*MiddleVPBB, MBIP);
8520 }
8521 R.eraseFromParent();
8522 continue;
8523 }
8524
8525 VPRecipeBase *Recipe =
8526 RecipeBuilder.tryToCreateWidenRecipe(SingleDef, Range);
8527 if (!Recipe)
8528 Recipe = RecipeBuilder.handleReplication(cast<VPInstruction>(SingleDef),
8529 Range);
8530
8531 RecipeBuilder.setRecipe(Instr, Recipe);
8532 if (isa<VPWidenIntOrFpInductionRecipe>(Recipe) && isa<TruncInst>(Instr)) {
8533 // Optimized a truncate to VPWidenIntOrFpInductionRecipe. It needs to be
8534 // moved to the phi section in the header.
8535 Recipe->insertBefore(*HeaderVPBB, HeaderVPBB->getFirstNonPhi());
8536 } else {
8537 Builder.insert(Recipe);
8538 }
8539 if (Recipe->getNumDefinedValues() == 1) {
8540 SingleDef->replaceAllUsesWith(Recipe->getVPSingleValue());
8541 Old2New[SingleDef] = Recipe->getVPSingleValue();
8542 } else {
8543 assert(Recipe->getNumDefinedValues() == 0 &&
8544 "Unexpected multidef recipe");
8545 R.eraseFromParent();
8546 }
8547 }
8548 }
8549
8550 // replaceAllUsesWith above may invalidate the block masks. Update them here.
8551 // TODO: Include the masks as operands in the predicated VPlan directly
8552 // to remove the need to keep a map of masks beyond the predication
8553 // transform.
8554 RecipeBuilder.updateBlockMaskCache(Old2New);
8555 for (VPValue *Old : Old2New.keys())
8556 Old->getDefiningRecipe()->eraseFromParent();
8557
8558 assert(isa<VPRegionBlock>(LoopRegion) &&
8559 !LoopRegion->getEntryBasicBlock()->empty() &&
8560 "entry block must be set to a VPRegionBlock having a non-empty entry "
8561 "VPBasicBlock");
8562
8563 // TODO: We can't call runPass on these transforms yet, due to verifier
8564 // failures.
8566 DenseMap<VPValue *, VPValue *> IVEndValues;
8567 VPlanTransforms::updateScalarResumePhis(*Plan, IVEndValues);
8568
8569 // ---------------------------------------------------------------------------
8570 // Transform initial VPlan: Apply previously taken decisions, in order, to
8571 // bring the VPlan to its final state.
8572 // ---------------------------------------------------------------------------
8573
8574 // Adjust the recipes for any inloop reductions.
8575 adjustRecipesForReductions(Plan, RecipeBuilder, Range.Start);
8576
8577 // Apply mandatory transformation to handle reductions with multiple in-loop
8578 // uses if possible, bail out otherwise.
8580 *Plan))
8581 return nullptr;
8582 // Apply mandatory transformation to handle FP maxnum/minnum reduction with
8583 // NaNs if possible, bail out otherwise.
8585 *Plan))
8586 return nullptr;
8587
8588 // Transform recipes to abstract recipes if it is legal and beneficial and
8589 // clamp the range for better cost estimation.
8590 // TODO: Enable following transform when the EVL-version of extended-reduction
8591 // and mulacc-reduction are implemented.
8592 if (!CM.foldTailWithEVL()) {
8593 VPCostContext CostCtx(CM.TTI, *CM.TLI, *Plan, CM, CM.CostKind,
8594 *CM.PSE.getSE(), OrigLoop);
8596 CostCtx, Range);
8597 }
8598
8599 for (ElementCount VF : Range)
8600 Plan->addVF(VF);
8601 Plan->setName("Initial VPlan");
8602
8603 // Interleave memory: for each Interleave Group we marked earlier as relevant
8604 // for this VPlan, replace the Recipes widening its memory instructions with a
8605 // single VPInterleaveRecipe at its insertion point.
8607 InterleaveGroups, RecipeBuilder,
8608 CM.isScalarEpilogueAllowed());
8609
8610 // Replace VPValues for known constant strides.
8612 Legal->getLAI()->getSymbolicStrides());
8613
8614 auto BlockNeedsPredication = [this](BasicBlock *BB) {
8615 return Legal->blockNeedsPredication(BB);
8616 };
8618 BlockNeedsPredication);
8619
8620 // Sink users of fixed-order recurrence past the recipe defining the previous
8621 // value and introduce FirstOrderRecurrenceSplice VPInstructions.
8623 *Plan, Builder))
8624 return nullptr;
8625
8626 if (useActiveLaneMask(Style)) {
8627 // TODO: Move checks to VPlanTransforms::addActiveLaneMask once
8628 // TailFoldingStyle is visible there.
8629 bool ForControlFlow = useActiveLaneMaskForControlFlow(Style);
8630 bool WithoutRuntimeCheck =
8632 VPlanTransforms::addActiveLaneMask(*Plan, ForControlFlow,
8633 WithoutRuntimeCheck);
8634 }
8635 VPlanTransforms::optimizeInductionExitUsers(*Plan, IVEndValues, *PSE.getSE());
8636
8637 assert(verifyVPlanIsValid(*Plan) && "VPlan is invalid");
8638 return Plan;
8639}
8640
8641VPlanPtr LoopVectorizationPlanner::tryToBuildVPlan(VFRange &Range) {
8642 // Outer loop handling: They may require CFG and instruction level
8643 // transformations before even evaluating whether vectorization is profitable.
8644 // Since we cannot modify the incoming IR, we need to build VPlan upfront in
8645 // the vectorization pipeline.
8646 assert(!OrigLoop->isInnermost());
8647 assert(EnableVPlanNativePath && "VPlan-native path is not enabled.");
8648
8649 auto Plan = VPlanTransforms::buildVPlan0(
8650 OrigLoop, *LI, Legal->getWidestInductionType(),
8651 getDebugLocFromInstOrOperands(Legal->getPrimaryInduction()), PSE);
8653 /*HasUncountableExit*/ false);
8654 VPlanTransforms::addMiddleCheck(*Plan, /*RequiresScalarEpilogue*/ true,
8655 /*TailFolded*/ false);
8656
8658
8659 for (ElementCount VF : Range)
8660 Plan->addVF(VF);
8661
8663 *Plan,
8664 [this](PHINode *P) {
8665 return Legal->getIntOrFpInductionDescriptor(P);
8666 },
8667 *TLI))
8668 return nullptr;
8669
8670 // TODO: IVEndValues are not used yet in the native path, to optimize exit
8671 // values.
8672 // TODO: We can't call runPass on the transform yet, due to verifier
8673 // failures.
8674 DenseMap<VPValue *, VPValue *> IVEndValues;
8675 VPlanTransforms::updateScalarResumePhis(*Plan, IVEndValues);
8676
8677 assert(verifyVPlanIsValid(*Plan) && "VPlan is invalid");
8678 return Plan;
8679}
8680
8681// Adjust the recipes for reductions. For in-loop reductions the chain of
8682// instructions leading from the loop exit instr to the phi need to be converted
8683// to reductions, with one operand being vector and the other being the scalar
8684// reduction chain. For other reductions, a select is introduced between the phi
8685// and users outside the vector region when folding the tail.
8686//
8687// A ComputeReductionResult recipe is added to the middle block, also for
8688// in-loop reductions which compute their result in-loop, because generating
8689// the subsequent bc.merge.rdx phi is driven by ComputeReductionResult recipes.
8690//
8691// Adjust AnyOf reductions; replace the reduction phi for the selected value
8692// with a boolean reduction phi node to check if the condition is true in any
8693// iteration. The final value is selected by the final ComputeReductionResult.
8694void LoopVectorizationPlanner::adjustRecipesForReductions(
8695 VPlanPtr &Plan, VPRecipeBuilder &RecipeBuilder, ElementCount MinVF) {
8696 using namespace VPlanPatternMatch;
8697 VPTypeAnalysis TypeInfo(*Plan);
8698 VPRegionBlock *VectorLoopRegion = Plan->getVectorLoopRegion();
8699 VPBasicBlock *Header = VectorLoopRegion->getEntryBasicBlock();
8700 VPBasicBlock *MiddleVPBB = Plan->getMiddleBlock();
8702
8703 for (VPRecipeBase &R : Header->phis()) {
8704 auto *PhiR = dyn_cast<VPReductionPHIRecipe>(&R);
8705 if (!PhiR || !PhiR->isInLoop() || (MinVF.isScalar() && !PhiR->isOrdered()))
8706 continue;
8707
8708 RecurKind Kind = PhiR->getRecurrenceKind();
8709 assert(
8712 "AnyOf and FindIV reductions are not allowed for in-loop reductions");
8713
8714 bool IsFPRecurrence =
8716 FastMathFlags FMFs =
8717 IsFPRecurrence ? FastMathFlags::getFast() : FastMathFlags();
8718
8719 // Collect the chain of "link" recipes for the reduction starting at PhiR.
8720 SetVector<VPSingleDefRecipe *> Worklist;
8721 Worklist.insert(PhiR);
8722 for (unsigned I = 0; I != Worklist.size(); ++I) {
8723 VPSingleDefRecipe *Cur = Worklist[I];
8724 for (VPUser *U : Cur->users()) {
8725 auto *UserRecipe = cast<VPSingleDefRecipe>(U);
8726 if (!UserRecipe->getParent()->getEnclosingLoopRegion()) {
8727 assert((UserRecipe->getParent() == MiddleVPBB ||
8728 UserRecipe->getParent() == Plan->getScalarPreheader()) &&
8729 "U must be either in the loop region, the middle block or the "
8730 "scalar preheader.");
8731 continue;
8732 }
8733 Worklist.insert(UserRecipe);
8734 }
8735 }
8736
8737 // Visit operation "Links" along the reduction chain top-down starting from
8738 // the phi until LoopExitValue. We keep track of the previous item
8739 // (PreviousLink) to tell which of the two operands of a Link will remain
8740 // scalar and which will be reduced. For minmax by select(cmp), Link will be
8741 // the select instructions. Blend recipes of in-loop reduction phi's will
8742 // get folded to their non-phi operand, as the reduction recipe handles the
8743 // condition directly.
8744 VPSingleDefRecipe *PreviousLink = PhiR; // Aka Worklist[0].
8745 for (VPSingleDefRecipe *CurrentLink : drop_begin(Worklist)) {
8746 if (auto *Blend = dyn_cast<VPBlendRecipe>(CurrentLink)) {
8747 assert(Blend->getNumIncomingValues() == 2 &&
8748 "Blend must have 2 incoming values");
8749 if (Blend->getIncomingValue(0) == PhiR) {
8750 Blend->replaceAllUsesWith(Blend->getIncomingValue(1));
8751 } else {
8752 assert(Blend->getIncomingValue(1) == PhiR &&
8753 "PhiR must be an operand of the blend");
8754 Blend->replaceAllUsesWith(Blend->getIncomingValue(0));
8755 }
8756 continue;
8757 }
8758
8759 if (IsFPRecurrence) {
8760 FastMathFlags CurFMF =
8761 cast<VPRecipeWithIRFlags>(CurrentLink)->getFastMathFlags();
8762 if (match(CurrentLink, m_Select(m_VPValue(), m_VPValue(), m_VPValue())))
8763 CurFMF |= cast<VPRecipeWithIRFlags>(CurrentLink->getOperand(0))
8764 ->getFastMathFlags();
8765 FMFs &= CurFMF;
8766 }
8767
8768 Instruction *CurrentLinkI = CurrentLink->getUnderlyingInstr();
8769
8770 // Index of the first operand which holds a non-mask vector operand.
8771 unsigned IndexOfFirstOperand;
8772 // Recognize a call to the llvm.fmuladd intrinsic.
8773 bool IsFMulAdd = (Kind == RecurKind::FMulAdd);
8774 VPValue *VecOp;
8775 VPBasicBlock *LinkVPBB = CurrentLink->getParent();
8776 if (IsFMulAdd) {
8777 assert(
8779 "Expected instruction to be a call to the llvm.fmuladd intrinsic");
8780 assert(((MinVF.isScalar() && isa<VPReplicateRecipe>(CurrentLink)) ||
8781 isa<VPWidenIntrinsicRecipe>(CurrentLink)) &&
8782 CurrentLink->getOperand(2) == PreviousLink &&
8783 "expected a call where the previous link is the added operand");
8784
8785 // If the instruction is a call to the llvm.fmuladd intrinsic then we
8786 // need to create an fmul recipe (multiplying the first two operands of
8787 // the fmuladd together) to use as the vector operand for the fadd
8788 // reduction.
8789 VPInstruction *FMulRecipe = new VPInstruction(
8790 Instruction::FMul,
8791 {CurrentLink->getOperand(0), CurrentLink->getOperand(1)},
8792 CurrentLinkI->getFastMathFlags());
8793 LinkVPBB->insert(FMulRecipe, CurrentLink->getIterator());
8794 VecOp = FMulRecipe;
8795 } else if (PhiR->isInLoop() && Kind == RecurKind::AddChainWithSubs &&
8796 match(CurrentLink, m_Sub(m_VPValue(), m_VPValue()))) {
8797 Type *PhiTy = TypeInfo.inferScalarType(PhiR);
8798 auto *Zero = Plan->getConstantInt(PhiTy, 0);
8799 VPWidenRecipe *Sub = new VPWidenRecipe(
8800 Instruction::Sub, {Zero, CurrentLink->getOperand(1)}, {},
8801 VPIRMetadata(), CurrentLinkI->getDebugLoc());
8802 Sub->setUnderlyingValue(CurrentLinkI);
8803 LinkVPBB->insert(Sub, CurrentLink->getIterator());
8804 VecOp = Sub;
8805 } else {
8807 if (match(CurrentLink, m_Cmp(m_VPValue(), m_VPValue())))
8808 continue;
8809 assert(isa<VPWidenSelectRecipe>(CurrentLink) &&
8810 "must be a select recipe");
8811 IndexOfFirstOperand = 1;
8812 } else {
8813 assert((MinVF.isScalar() || isa<VPWidenRecipe>(CurrentLink)) &&
8814 "Expected to replace a VPWidenSC");
8815 IndexOfFirstOperand = 0;
8816 }
8817 // Note that for non-commutable operands (cmp-selects), the semantics of
8818 // the cmp-select are captured in the recurrence kind.
8819 unsigned VecOpId =
8820 CurrentLink->getOperand(IndexOfFirstOperand) == PreviousLink
8821 ? IndexOfFirstOperand + 1
8822 : IndexOfFirstOperand;
8823 VecOp = CurrentLink->getOperand(VecOpId);
8824 assert(VecOp != PreviousLink &&
8825 CurrentLink->getOperand(CurrentLink->getNumOperands() - 1 -
8826 (VecOpId - IndexOfFirstOperand)) ==
8827 PreviousLink &&
8828 "PreviousLink must be the operand other than VecOp");
8829 }
8830
8831 VPValue *CondOp = nullptr;
8832 if (CM.blockNeedsPredicationForAnyReason(CurrentLinkI->getParent()))
8833 CondOp = RecipeBuilder.getBlockInMask(CurrentLink->getParent());
8834
8835 ReductionStyle Style = getReductionStyle(true, PhiR->isOrdered(), 1);
8836 auto *RedRecipe =
8837 new VPReductionRecipe(Kind, FMFs, CurrentLinkI, PreviousLink, VecOp,
8838 CondOp, Style, CurrentLinkI->getDebugLoc());
8839 // Append the recipe to the end of the VPBasicBlock because we need to
8840 // ensure that it comes after all of it's inputs, including CondOp.
8841 // Delete CurrentLink as it will be invalid if its operand is replaced
8842 // with a reduction defined at the bottom of the block in the next link.
8843 if (LinkVPBB->getNumSuccessors() == 0)
8844 RedRecipe->insertBefore(&*std::prev(std::prev(LinkVPBB->end())));
8845 else
8846 LinkVPBB->appendRecipe(RedRecipe);
8847
8848 CurrentLink->replaceAllUsesWith(RedRecipe);
8849 ToDelete.push_back(CurrentLink);
8850 PreviousLink = RedRecipe;
8851 }
8852 }
8853 VPBasicBlock *LatchVPBB = VectorLoopRegion->getExitingBasicBlock();
8854 Builder.setInsertPoint(&*std::prev(std::prev(LatchVPBB->end())));
8855 VPBasicBlock::iterator IP = MiddleVPBB->getFirstNonPhi();
8856 for (VPRecipeBase &R :
8857 Plan->getVectorLoopRegion()->getEntryBasicBlock()->phis()) {
8858 VPReductionPHIRecipe *PhiR = dyn_cast<VPReductionPHIRecipe>(&R);
8859 if (!PhiR)
8860 continue;
8861
8862 const RecurrenceDescriptor &RdxDesc = Legal->getRecurrenceDescriptor(
8864 Type *PhiTy = TypeInfo.inferScalarType(PhiR);
8865 // If tail is folded by masking, introduce selects between the phi
8866 // and the users outside the vector region of each reduction, at the
8867 // beginning of the dedicated latch block.
8868 auto *OrigExitingVPV = PhiR->getBackedgeValue();
8869 auto *NewExitingVPV = PhiR->getBackedgeValue();
8870 // Don't output selects for partial reductions because they have an output
8871 // with fewer lanes than the VF. So the operands of the select would have
8872 // different numbers of lanes. Partial reductions mask the input instead.
8873 auto *RR = dyn_cast<VPReductionRecipe>(OrigExitingVPV->getDefiningRecipe());
8874 if (!PhiR->isInLoop() && CM.foldTailByMasking() &&
8875 (!RR || !RR->isPartialReduction())) {
8876 VPValue *Cond = RecipeBuilder.getBlockInMask(PhiR->getParent());
8877 std::optional<FastMathFlags> FMFs =
8878 PhiTy->isFloatingPointTy()
8879 ? std::make_optional(RdxDesc.getFastMathFlags())
8880 : std::nullopt;
8881 NewExitingVPV =
8882 Builder.createSelect(Cond, OrigExitingVPV, PhiR, {}, "", FMFs);
8883 OrigExitingVPV->replaceUsesWithIf(NewExitingVPV, [](VPUser &U, unsigned) {
8884 return isa<VPInstruction>(&U) &&
8885 (cast<VPInstruction>(&U)->getOpcode() ==
8887 cast<VPInstruction>(&U)->getOpcode() ==
8889 cast<VPInstruction>(&U)->getOpcode() ==
8891 });
8892 if (CM.usePredicatedReductionSelect())
8893 PhiR->setOperand(1, NewExitingVPV);
8894 }
8895
8896 // We want code in the middle block to appear to execute on the location of
8897 // the scalar loop's latch terminator because: (a) it is all compiler
8898 // generated, (b) these instructions are always executed after evaluating
8899 // the latch conditional branch, and (c) other passes may add new
8900 // predecessors which terminate on this line. This is the easiest way to
8901 // ensure we don't accidentally cause an extra step back into the loop while
8902 // debugging.
8903 DebugLoc ExitDL = OrigLoop->getLoopLatch()->getTerminator()->getDebugLoc();
8904
8905 // TODO: At the moment ComputeReductionResult also drives creation of the
8906 // bc.merge.rdx phi nodes, hence it needs to be created unconditionally here
8907 // even for in-loop reductions, until the reduction resume value handling is
8908 // also modeled in VPlan.
8909 VPInstruction *FinalReductionResult;
8910 VPBuilder::InsertPointGuard Guard(Builder);
8911 Builder.setInsertPoint(MiddleVPBB, IP);
8912 RecurKind RecurrenceKind = PhiR->getRecurrenceKind();
8914 VPValue *Start = PhiR->getStartValue();
8915 VPValue *Sentinel = Plan->getOrAddLiveIn(RdxDesc.getSentinelValue());
8916 FinalReductionResult =
8917 Builder.createNaryOp(VPInstruction::ComputeFindIVResult,
8918 {PhiR, Start, Sentinel, NewExitingVPV}, ExitDL);
8919 } else if (RecurrenceDescriptor::isAnyOfRecurrenceKind(RecurrenceKind)) {
8920 VPValue *Start = PhiR->getStartValue();
8921 FinalReductionResult =
8922 Builder.createNaryOp(VPInstruction::ComputeAnyOfResult,
8923 {PhiR, Start, NewExitingVPV}, ExitDL);
8924 } else {
8925 VPIRFlags Flags =
8927 ? VPIRFlags(RdxDesc.getFastMathFlags())
8928 : VPIRFlags();
8929 FinalReductionResult =
8930 Builder.createNaryOp(VPInstruction::ComputeReductionResult,
8931 {PhiR, NewExitingVPV}, Flags, ExitDL);
8932 }
8933 // If the vector reduction can be performed in a smaller type, we truncate
8934 // then extend the loop exit value to enable InstCombine to evaluate the
8935 // entire expression in the smaller type.
8936 if (MinVF.isVector() && PhiTy != RdxDesc.getRecurrenceType() &&
8938 assert(!PhiR->isInLoop() && "Unexpected truncated inloop reduction!");
8940 "Unexpected truncated min-max recurrence!");
8941 Type *RdxTy = RdxDesc.getRecurrenceType();
8942 VPWidenCastRecipe *Trunc;
8943 Instruction::CastOps ExtendOpc =
8944 RdxDesc.isSigned() ? Instruction::SExt : Instruction::ZExt;
8945 VPWidenCastRecipe *Extnd;
8946 {
8947 VPBuilder::InsertPointGuard Guard(Builder);
8948 Builder.setInsertPoint(
8949 NewExitingVPV->getDefiningRecipe()->getParent(),
8950 std::next(NewExitingVPV->getDefiningRecipe()->getIterator()));
8951 Trunc =
8952 Builder.createWidenCast(Instruction::Trunc, NewExitingVPV, RdxTy);
8953 Extnd = Builder.createWidenCast(ExtendOpc, Trunc, PhiTy);
8954 }
8955 if (PhiR->getOperand(1) == NewExitingVPV)
8956 PhiR->setOperand(1, Extnd->getVPSingleValue());
8957
8958 // Update ComputeReductionResult with the truncated exiting value and
8959 // extend its result.
8960 FinalReductionResult->setOperand(1, Trunc);
8961 FinalReductionResult =
8962 Builder.createScalarCast(ExtendOpc, FinalReductionResult, PhiTy, {});
8963 }
8964
8965 // Update all users outside the vector region. Also replace redundant
8966 // ExtractLastElement.
8967 for (auto *U : to_vector(OrigExitingVPV->users())) {
8968 auto *Parent = cast<VPRecipeBase>(U)->getParent();
8969 if (FinalReductionResult == U || Parent->getParent())
8970 continue;
8971 U->replaceUsesOfWith(OrigExitingVPV, FinalReductionResult);
8974 cast<VPInstruction>(U)->replaceAllUsesWith(FinalReductionResult);
8975 }
8976
8977 // Adjust AnyOf reductions; replace the reduction phi for the selected value
8978 // with a boolean reduction phi node to check if the condition is true in
8979 // any iteration. The final value is selected by the final
8980 // ComputeReductionResult.
8981 if (RecurrenceDescriptor::isAnyOfRecurrenceKind(RecurrenceKind)) {
8982 auto *Select = cast<VPRecipeBase>(*find_if(PhiR->users(), [](VPUser *U) {
8983 return isa<VPWidenSelectRecipe>(U) ||
8984 (isa<VPReplicateRecipe>(U) &&
8985 cast<VPReplicateRecipe>(U)->getUnderlyingInstr()->getOpcode() ==
8986 Instruction::Select);
8987 }));
8988 VPValue *Cmp = Select->getOperand(0);
8989 // If the compare is checking the reduction PHI node, adjust it to check
8990 // the start value.
8991 if (VPRecipeBase *CmpR = Cmp->getDefiningRecipe())
8992 CmpR->replaceUsesOfWith(PhiR, PhiR->getStartValue());
8993 Builder.setInsertPoint(Select);
8994
8995 // If the true value of the select is the reduction phi, the new value is
8996 // selected if the negated condition is true in any iteration.
8997 if (Select->getOperand(1) == PhiR)
8998 Cmp = Builder.createNot(Cmp);
8999 VPValue *Or = Builder.createOr(PhiR, Cmp);
9000 Select->getVPSingleValue()->replaceAllUsesWith(Or);
9001 // Delete Select now that it has invalid types.
9002 ToDelete.push_back(Select);
9003
9004 // Convert the reduction phi to operate on bools.
9005 PhiR->setOperand(0, Plan->getFalse());
9006 continue;
9007 }
9008
9010 RdxDesc.getRecurrenceKind())) {
9011 // Adjust the start value for FindFirstIV/FindLastIV recurrences to use
9012 // the sentinel value after generating the ResumePhi recipe, which uses
9013 // the original start value.
9014 PhiR->setOperand(0, Plan->getOrAddLiveIn(RdxDesc.getSentinelValue()));
9015 }
9016 RecurKind RK = RdxDesc.getRecurrenceKind();
9020 VPBuilder PHBuilder(Plan->getVectorPreheader());
9021 VPValue *Iden = Plan->getOrAddLiveIn(
9022 getRecurrenceIdentity(RK, PhiTy, RdxDesc.getFastMathFlags()));
9023 // If the PHI is used by a partial reduction, set the scale factor.
9024 unsigned ScaleFactor =
9025 RecipeBuilder.getScalingForReduction(RdxDesc.getLoopExitInstr())
9026 .value_or(1);
9027 auto *ScaleFactorVPV = Plan->getConstantInt(32, ScaleFactor);
9028 VPValue *StartV = PHBuilder.createNaryOp(
9030 {PhiR->getStartValue(), Iden, ScaleFactorVPV},
9031 PhiTy->isFloatingPointTy() ? RdxDesc.getFastMathFlags()
9032 : FastMathFlags());
9033 PhiR->setOperand(0, StartV);
9034 }
9035 }
9036 for (VPRecipeBase *R : ToDelete)
9037 R->eraseFromParent();
9038
9040}
9041
9042void LoopVectorizationPlanner::attachRuntimeChecks(
9043 VPlan &Plan, GeneratedRTChecks &RTChecks, bool HasBranchWeights) const {
9044 const auto &[SCEVCheckCond, SCEVCheckBlock] = RTChecks.getSCEVChecks();
9045 if (SCEVCheckBlock && SCEVCheckBlock->hasNPredecessors(0)) {
9046 assert((!CM.OptForSize ||
9047 CM.Hints->getForce() == LoopVectorizeHints::FK_Enabled) &&
9048 "Cannot SCEV check stride or overflow when optimizing for size");
9049 VPlanTransforms::attachCheckBlock(Plan, SCEVCheckCond, SCEVCheckBlock,
9050 HasBranchWeights);
9051 }
9052 const auto &[MemCheckCond, MemCheckBlock] = RTChecks.getMemRuntimeChecks();
9053 if (MemCheckBlock && MemCheckBlock->hasNPredecessors(0)) {
9054 // VPlan-native path does not do any analysis for runtime checks
9055 // currently.
9056 assert((!EnableVPlanNativePath || OrigLoop->isInnermost()) &&
9057 "Runtime checks are not supported for outer loops yet");
9058
9059 if (CM.OptForSize) {
9060 assert(
9061 CM.Hints->getForce() == LoopVectorizeHints::FK_Enabled &&
9062 "Cannot emit memory checks when optimizing for size, unless forced "
9063 "to vectorize.");
9064 ORE->emit([&]() {
9065 return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationCodeSize",
9066 OrigLoop->getStartLoc(),
9067 OrigLoop->getHeader())
9068 << "Code-size may be reduced by not forcing "
9069 "vectorization, or by source-code modifications "
9070 "eliminating the need for runtime checks "
9071 "(e.g., adding 'restrict').";
9072 });
9073 }
9074 VPlanTransforms::attachCheckBlock(Plan, MemCheckCond, MemCheckBlock,
9075 HasBranchWeights);
9076 }
9077}
9078
9080 VPlan &Plan, ElementCount VF, unsigned UF,
9081 ElementCount MinProfitableTripCount) const {
9082 // vscale is not necessarily a power-of-2, which means we cannot guarantee
9083 // an overflow to zero when updating induction variables and so an
9084 // additional overflow check is required before entering the vector loop.
9085 bool IsIndvarOverflowCheckNeededForVF =
9086 VF.isScalable() && !TTI.isVScaleKnownToBeAPowerOfTwo() &&
9087 !isIndvarOverflowCheckKnownFalse(&CM, VF, UF) &&
9088 CM.getTailFoldingStyle() !=
9090 const uint32_t *BranchWeigths =
9091 hasBranchWeightMD(*OrigLoop->getLoopLatch()->getTerminator())
9093 : nullptr;
9095 Plan, VF, UF, MinProfitableTripCount,
9096 CM.requiresScalarEpilogue(VF.isVector()), CM.foldTailByMasking(),
9097 IsIndvarOverflowCheckNeededForVF, OrigLoop, BranchWeigths,
9098 OrigLoop->getLoopPredecessor()->getTerminator()->getDebugLoc(),
9099 *PSE.getSE());
9100}
9101
9103 assert(!State.Lane && "VPDerivedIVRecipe being replicated.");
9104
9105 // Fast-math-flags propagate from the original induction instruction.
9106 IRBuilder<>::FastMathFlagGuard FMFG(State.Builder);
9107 if (FPBinOp)
9108 State.Builder.setFastMathFlags(FPBinOp->getFastMathFlags());
9109
9110 Value *Step = State.get(getStepValue(), VPLane(0));
9111 Value *Index = State.get(getOperand(1), VPLane(0));
9112 Value *DerivedIV = emitTransformedIndex(
9113 State.Builder, Index, getStartValue()->getLiveInIRValue(), Step, Kind,
9115 DerivedIV->setName(Name);
9116 State.set(this, DerivedIV, VPLane(0));
9117}
9118
9119// Determine how to lower the scalar epilogue, which depends on 1) optimising
9120// for minimum code-size, 2) predicate compiler options, 3) loop hints forcing
9121// predication, and 4) a TTI hook that analyses whether the loop is suitable
9122// for predication.
9124 Function *F, Loop *L, LoopVectorizeHints &Hints, bool OptForSize,
9127 // 1) OptSize takes precedence over all other options, i.e. if this is set,
9128 // don't look at hints or options, and don't request a scalar epilogue.
9129 if (F->hasOptSize() ||
9130 (OptForSize && Hints.getForce() != LoopVectorizeHints::FK_Enabled))
9132
9133 // 2) If set, obey the directives
9134 if (PreferPredicateOverEpilogue.getNumOccurrences()) {
9142 };
9143 }
9144
9145 // 3) If set, obey the hints
9146 switch (Hints.getPredicate()) {
9151 };
9152
9153 // 4) if the TTI hook indicates this is profitable, request predication.
9154 TailFoldingInfo TFI(TLI, &LVL, IAI);
9155 if (TTI->preferPredicateOverEpilogue(&TFI))
9157
9159}
9160
9161// Process the loop in the VPlan-native vectorization path. This path builds
9162// VPlan upfront in the vectorization pipeline, which allows to apply
9163// VPlan-to-VPlan transformations from the very beginning without modifying the
9164// input LLVM IR.
9169 OptimizationRemarkEmitter *ORE, bool OptForSize, LoopVectorizeHints &Hints,
9170 LoopVectorizationRequirements &Requirements) {
9171
9173 LLVM_DEBUG(dbgs() << "LV: cannot compute the outer-loop trip count\n");
9174 return false;
9175 }
9176 assert(EnableVPlanNativePath && "VPlan-native path is disabled.");
9177 Function *F = L->getHeader()->getParent();
9178 InterleavedAccessInfo IAI(PSE, L, DT, LI, LVL->getLAI());
9179
9181 getScalarEpilogueLowering(F, L, Hints, OptForSize, TTI, TLI, *LVL, &IAI);
9182
9183 LoopVectorizationCostModel CM(SEL, L, PSE, LI, LVL, *TTI, TLI, DB, AC, ORE, F,
9184 &Hints, IAI, OptForSize);
9185 // Use the planner for outer loop vectorization.
9186 // TODO: CM is not used at this point inside the planner. Turn CM into an
9187 // optional argument if we don't need it in the future.
9188 LoopVectorizationPlanner LVP(L, LI, DT, TLI, *TTI, LVL, CM, IAI, PSE, Hints,
9189 ORE);
9190
9191 // Get user vectorization factor.
9192 ElementCount UserVF = Hints.getWidth();
9193
9195
9196 // Plan how to best vectorize, return the best VF and its cost.
9197 const VectorizationFactor VF = LVP.planInVPlanNativePath(UserVF);
9198
9199 // If we are stress testing VPlan builds, do not attempt to generate vector
9200 // code. Masked vector code generation support will follow soon.
9201 // Also, do not attempt to vectorize if no vector code will be produced.
9203 return false;
9204
9205 VPlan &BestPlan = LVP.getPlanFor(VF.Width);
9206
9207 {
9208 GeneratedRTChecks Checks(PSE, DT, LI, TTI, F->getDataLayout(), CM.CostKind);
9209 InnerLoopVectorizer LB(L, PSE, LI, DT, TTI, AC, VF.Width, /*UF=*/1, &CM,
9210 Checks, BestPlan);
9211 LLVM_DEBUG(dbgs() << "Vectorizing outer loop in \""
9212 << L->getHeader()->getParent()->getName() << "\"\n");
9213 LVP.addMinimumIterationCheck(BestPlan, VF.Width, /*UF=*/1,
9215
9216 LVP.executePlan(VF.Width, /*UF=*/1, BestPlan, LB, DT, false);
9217 }
9218
9219 reportVectorization(ORE, L, VF, 1);
9220
9221 assert(!verifyFunction(*L->getHeader()->getParent(), &dbgs()));
9222 return true;
9223}
9224
9225// Emit a remark if there are stores to floats that required a floating point
9226// extension. If the vectorized loop was generated with floating point there
9227// will be a performance penalty from the conversion overhead and the change in
9228// the vector width.
9231 for (BasicBlock *BB : L->getBlocks()) {
9232 for (Instruction &Inst : *BB) {
9233 if (auto *S = dyn_cast<StoreInst>(&Inst)) {
9234 if (S->getValueOperand()->getType()->isFloatTy())
9235 Worklist.push_back(S);
9236 }
9237 }
9238 }
9239
9240 // Traverse the floating point stores upwards searching, for floating point
9241 // conversions.
9244 while (!Worklist.empty()) {
9245 auto *I = Worklist.pop_back_val();
9246 if (!L->contains(I))
9247 continue;
9248 if (!Visited.insert(I).second)
9249 continue;
9250
9251 // Emit a remark if the floating point store required a floating
9252 // point conversion.
9253 // TODO: More work could be done to identify the root cause such as a
9254 // constant or a function return type and point the user to it.
9255 if (isa<FPExtInst>(I) && EmittedRemark.insert(I).second)
9256 ORE->emit([&]() {
9257 return OptimizationRemarkAnalysis(LV_NAME, "VectorMixedPrecision",
9258 I->getDebugLoc(), L->getHeader())
9259 << "floating point conversion changes vector width. "
9260 << "Mixed floating point precision requires an up/down "
9261 << "cast that will negatively impact performance.";
9262 });
9263
9264 for (Use &Op : I->operands())
9265 if (auto *OpI = dyn_cast<Instruction>(Op))
9266 Worklist.push_back(OpI);
9267 }
9268}
9269
9270/// For loops with uncountable early exits, find the cost of doing work when
9271/// exiting the loop early, such as calculating the final exit values of
9272/// variables used outside the loop.
9273/// TODO: This is currently overly pessimistic because the loop may not take
9274/// the early exit, but better to keep this conservative for now. In future,
9275/// it might be possible to relax this by using branch probabilities.
9277 VPlan &Plan, ElementCount VF) {
9278 InstructionCost Cost = 0;
9279 for (auto *ExitVPBB : Plan.getExitBlocks()) {
9280 for (auto *PredVPBB : ExitVPBB->getPredecessors()) {
9281 // If the predecessor is not the middle.block, then it must be the
9282 // vector.early.exit block, which may contain work to calculate the exit
9283 // values of variables used outside the loop.
9284 if (PredVPBB != Plan.getMiddleBlock()) {
9285 LLVM_DEBUG(dbgs() << "Calculating cost of work in exit block "
9286 << PredVPBB->getName() << ":\n");
9287 Cost += PredVPBB->cost(VF, CostCtx);
9288 }
9289 }
9290 }
9291 return Cost;
9292}
9293
9294/// This function determines whether or not it's still profitable to vectorize
9295/// the loop given the extra work we have to do outside of the loop:
9296/// 1. Perform the runtime checks before entering the loop to ensure it's safe
9297/// to vectorize.
9298/// 2. In the case of loops with uncountable early exits, we may have to do
9299/// extra work when exiting the loop early, such as calculating the final
9300/// exit values of variables used outside the loop.
9301/// 3. The middle block, if expected TC <= VF.Width.
9302static bool isOutsideLoopWorkProfitable(GeneratedRTChecks &Checks,
9303 VectorizationFactor &VF, Loop *L,
9305 VPCostContext &CostCtx, VPlan &Plan,
9307 std::optional<unsigned> VScale) {
9308 InstructionCost TotalCost = Checks.getCost();
9309 if (!TotalCost.isValid())
9310 return false;
9311
9312 // Add on the cost of any work required in the vector early exit block, if
9313 // one exists.
9314 TotalCost += calculateEarlyExitCost(CostCtx, Plan, VF.Width);
9315
9316 // If the expected trip count is less than the VF, the vector loop will only
9317 // execute a single iteration. Then the middle block is executed the same
9318 // number of times as the vector region.
9319 // TODO: Extend logic to always account for the cost of the middle block.
9320 auto ExpectedTC = getSmallBestKnownTC(PSE, L);
9321 if (ExpectedTC && ElementCount::isKnownLE(*ExpectedTC, VF.Width))
9322 TotalCost += Plan.getMiddleBlock()->cost(VF.Width, CostCtx);
9323
9324 // When interleaving only scalar and vector cost will be equal, which in turn
9325 // would lead to a divide by 0. Fall back to hard threshold.
9326 if (VF.Width.isScalar()) {
9327 // TODO: Should we rename VectorizeMemoryCheckThreshold?
9328 if (TotalCost > VectorizeMemoryCheckThreshold) {
9329 LLVM_DEBUG(
9330 dbgs()
9331 << "LV: Interleaving only is not profitable due to runtime checks\n");
9332 return false;
9333 }
9334 return true;
9335 }
9336
9337 // The scalar cost should only be 0 when vectorizing with a user specified
9338 // VF/IC. In those cases, runtime checks should always be generated.
9339 uint64_t ScalarC = VF.ScalarCost.getValue();
9340 if (ScalarC == 0)
9341 return true;
9342
9343 // First, compute the minimum iteration count required so that the vector
9344 // loop outperforms the scalar loop.
9345 // The total cost of the scalar loop is
9346 // ScalarC * TC
9347 // where
9348 // * TC is the actual trip count of the loop.
9349 // * ScalarC is the cost of a single scalar iteration.
9350 //
9351 // The total cost of the vector loop is
9352 // RtC + VecC * (TC / VF) + EpiC
9353 // where
9354 // * RtC is the sum of the costs cost of
9355 // - the generated runtime checks
9356 // - performing any additional work in the vector.early.exit block for
9357 // loops with uncountable early exits.
9358 // - the middle block, if ExpectedTC <= VF.Width.
9359 // * VecC is the cost of a single vector iteration.
9360 // * TC is the actual trip count of the loop
9361 // * VF is the vectorization factor
9362 // * EpiCost is the cost of the generated epilogue, including the cost
9363 // of the remaining scalar operations.
9364 //
9365 // Vectorization is profitable once the total vector cost is less than the
9366 // total scalar cost:
9367 // RtC + VecC * (TC / VF) + EpiC < ScalarC * TC
9368 //
9369 // Now we can compute the minimum required trip count TC as
9370 // VF * (RtC + EpiC) / (ScalarC * VF - VecC) < TC
9371 //
9372 // For now we assume the epilogue cost EpiC = 0 for simplicity. Note that
9373 // the computations are performed on doubles, not integers and the result
9374 // is rounded up, hence we get an upper estimate of the TC.
9375 unsigned IntVF = estimateElementCount(VF.Width, VScale);
9376 uint64_t RtC = TotalCost.getValue();
9377 uint64_t Div = ScalarC * IntVF - VF.Cost.getValue();
9378 uint64_t MinTC1 = Div == 0 ? 0 : divideCeil(RtC * IntVF, Div);
9379
9380 // Second, compute a minimum iteration count so that the cost of the
9381 // runtime checks is only a fraction of the total scalar loop cost. This
9382 // adds a loop-dependent bound on the overhead incurred if the runtime
9383 // checks fail. In case the runtime checks fail, the cost is RtC + ScalarC
9384 // * TC. To bound the runtime check to be a fraction 1/X of the scalar
9385 // cost, compute
9386 // RtC < ScalarC * TC * (1 / X) ==> RtC * X / ScalarC < TC
9387 uint64_t MinTC2 = divideCeil(RtC * 10, ScalarC);
9388
9389 // Now pick the larger minimum. If it is not a multiple of VF and a scalar
9390 // epilogue is allowed, choose the next closest multiple of VF. This should
9391 // partly compensate for ignoring the epilogue cost.
9392 uint64_t MinTC = std::max(MinTC1, MinTC2);
9393 if (SEL == CM_ScalarEpilogueAllowed)
9394 MinTC = alignTo(MinTC, IntVF);
9396
9397 LLVM_DEBUG(
9398 dbgs() << "LV: Minimum required TC for runtime checks to be profitable:"
9399 << VF.MinProfitableTripCount << "\n");
9400
9401 // Skip vectorization if the expected trip count is less than the minimum
9402 // required trip count.
9403 if (auto ExpectedTC = getSmallBestKnownTC(PSE, L)) {
9404 if (ElementCount::isKnownLT(*ExpectedTC, VF.MinProfitableTripCount)) {
9405 LLVM_DEBUG(dbgs() << "LV: Vectorization is not beneficial: expected "
9406 "trip count < minimum profitable VF ("
9407 << *ExpectedTC << " < " << VF.MinProfitableTripCount
9408 << ")\n");
9409
9410 return false;
9411 }
9412 }
9413 return true;
9414}
9415
9417 : InterleaveOnlyWhenForced(Opts.InterleaveOnlyWhenForced ||
9419 VectorizeOnlyWhenForced(Opts.VectorizeOnlyWhenForced ||
9421
9422/// Prepare \p MainPlan for vectorizing the main vector loop during epilogue
9423/// vectorization. Remove ResumePhis from \p MainPlan for inductions that
9424/// don't have a corresponding wide induction in \p EpiPlan.
9425static void preparePlanForMainVectorLoop(VPlan &MainPlan, VPlan &EpiPlan) {
9426 // Collect PHI nodes of widened phis in the VPlan for the epilogue. Those
9427 // will need their resume-values computed in the main vector loop. Others
9428 // can be removed from the main VPlan.
9429 SmallPtrSet<PHINode *, 2> EpiWidenedPhis;
9430 for (VPRecipeBase &R :
9433 continue;
9434 EpiWidenedPhis.insert(
9435 cast<PHINode>(R.getVPSingleValue()->getUnderlyingValue()));
9436 }
9437 for (VPRecipeBase &R :
9438 make_early_inc_range(MainPlan.getScalarHeader()->phis())) {
9439 auto *VPIRInst = cast<VPIRPhi>(&R);
9440 if (EpiWidenedPhis.contains(&VPIRInst->getIRPhi()))
9441 continue;
9442 // There is no corresponding wide induction in the epilogue plan that would
9443 // need a resume value. Remove the VPIRInst wrapping the scalar header phi
9444 // together with the corresponding ResumePhi. The resume values for the
9445 // scalar loop will be created during execution of EpiPlan.
9446 VPRecipeBase *ResumePhi = VPIRInst->getOperand(0)->getDefiningRecipe();
9447 VPIRInst->eraseFromParent();
9448 ResumePhi->eraseFromParent();
9449 }
9451
9452 using namespace VPlanPatternMatch;
9453 // When vectorizing the epilogue, FindFirstIV & FindLastIV reductions can
9454 // introduce multiple uses of undef/poison. If the reduction start value may
9455 // be undef or poison it needs to be frozen and the frozen start has to be
9456 // used when computing the reduction result. We also need to use the frozen
9457 // value in the resume phi generated by the main vector loop, as this is also
9458 // used to compute the reduction result after the epilogue vector loop.
9459 auto AddFreezeForFindLastIVReductions = [](VPlan &Plan,
9460 bool UpdateResumePhis) {
9461 VPBuilder Builder(Plan.getEntry());
9462 for (VPRecipeBase &R : *Plan.getMiddleBlock()) {
9463 auto *VPI = dyn_cast<VPInstruction>(&R);
9464 if (!VPI || VPI->getOpcode() != VPInstruction::ComputeFindIVResult)
9465 continue;
9466 VPValue *OrigStart = VPI->getOperand(1);
9468 continue;
9469 VPInstruction *Freeze =
9470 Builder.createNaryOp(Instruction::Freeze, {OrigStart}, {}, "fr");
9471 VPI->setOperand(1, Freeze);
9472 if (UpdateResumePhis)
9473 OrigStart->replaceUsesWithIf(Freeze, [Freeze](VPUser &U, unsigned) {
9474 return Freeze != &U && isa<VPPhi>(&U);
9475 });
9476 }
9477 };
9478 AddFreezeForFindLastIVReductions(MainPlan, true);
9479 AddFreezeForFindLastIVReductions(EpiPlan, false);
9480
9481 VPBasicBlock *MainScalarPH = MainPlan.getScalarPreheader();
9482 VPValue *VectorTC = &MainPlan.getVectorTripCount();
9483 // If there is a suitable resume value for the canonical induction in the
9484 // scalar (which will become vector) epilogue loop, use it and move it to the
9485 // beginning of the scalar preheader. Otherwise create it below.
9486 auto ResumePhiIter =
9487 find_if(MainScalarPH->phis(), [VectorTC](VPRecipeBase &R) {
9488 return match(&R, m_VPInstruction<Instruction::PHI>(m_Specific(VectorTC),
9489 m_ZeroInt()));
9490 });
9491 VPPhi *ResumePhi = nullptr;
9492 if (ResumePhiIter == MainScalarPH->phis().end()) {
9493 VPBuilder ScalarPHBuilder(MainScalarPH, MainScalarPH->begin());
9494 ResumePhi = ScalarPHBuilder.createScalarPhi(
9495 {VectorTC,
9497 {}, "vec.epilog.resume.val");
9498 } else {
9499 ResumePhi = cast<VPPhi>(&*ResumePhiIter);
9500 if (MainScalarPH->begin() == MainScalarPH->end())
9501 ResumePhi->moveBefore(*MainScalarPH, MainScalarPH->end());
9502 else if (&*MainScalarPH->begin() != ResumePhi)
9503 ResumePhi->moveBefore(*MainScalarPH, MainScalarPH->begin());
9504 }
9505 // Add a user to to make sure the resume phi won't get removed.
9506 VPBuilder(MainScalarPH)
9508}
9509
9510/// Prepare \p Plan for vectorizing the epilogue loop. That is, re-use expanded
9511/// SCEVs from \p ExpandedSCEVs and set resume values for header recipes. Some
9512/// reductions require creating new instructions to compute the resume values.
9513/// They are collected in a vector and returned. They must be moved to the
9514/// preheader of the vector epilogue loop, after created by the execution of \p
9515/// Plan.
9517 VPlan &Plan, Loop *L, const SCEV2ValueTy &ExpandedSCEVs,
9519 ScalarEvolution &SE) {
9520 VPRegionBlock *VectorLoop = Plan.getVectorLoopRegion();
9521 VPBasicBlock *Header = VectorLoop->getEntryBasicBlock();
9522 Header->setName("vec.epilog.vector.body");
9523
9524 VPCanonicalIVPHIRecipe *IV = VectorLoop->getCanonicalIV();
9525 // When vectorizing the epilogue loop, the canonical induction needs to be
9526 // adjusted by the value after the main vector loop. Find the resume value
9527 // created during execution of the main VPlan. It must be the first phi in the
9528 // loop preheader. Use the value to increment the canonical IV, and update all
9529 // users in the loop region to use the adjusted value.
9530 // FIXME: Improve modeling for canonical IV start values in the epilogue
9531 // loop.
9532 using namespace llvm::PatternMatch;
9533 PHINode *EPResumeVal = &*L->getLoopPreheader()->phis().begin();
9534 for (Value *Inc : EPResumeVal->incoming_values()) {
9535 if (match(Inc, m_SpecificInt(0)))
9536 continue;
9537 assert(!EPI.VectorTripCount &&
9538 "Must only have a single non-zero incoming value");
9539 EPI.VectorTripCount = Inc;
9540 }
9541 // If we didn't find a non-zero vector trip count, all incoming values
9542 // must be zero, which also means the vector trip count is zero. Pick the
9543 // first zero as vector trip count.
9544 // TODO: We should not choose VF * UF so the main vector loop is known to
9545 // be dead.
9546 if (!EPI.VectorTripCount) {
9547 assert(EPResumeVal->getNumIncomingValues() > 0 &&
9548 all_of(EPResumeVal->incoming_values(),
9549 [](Value *Inc) { return match(Inc, m_SpecificInt(0)); }) &&
9550 "all incoming values must be 0");
9551 EPI.VectorTripCount = EPResumeVal->getOperand(0);
9552 }
9553 VPValue *VPV = Plan.getOrAddLiveIn(EPResumeVal);
9554 assert(all_of(IV->users(),
9555 [](const VPUser *U) {
9556 return isa<VPScalarIVStepsRecipe>(U) ||
9557 isa<VPDerivedIVRecipe>(U) ||
9558 cast<VPRecipeBase>(U)->isScalarCast() ||
9559 cast<VPInstruction>(U)->getOpcode() ==
9560 Instruction::Add;
9561 }) &&
9562 "the canonical IV should only be used by its increment or "
9563 "ScalarIVSteps when resetting the start value");
9564 VPBuilder Builder(Header, Header->getFirstNonPhi());
9565 VPInstruction *Add = Builder.createNaryOp(Instruction::Add, {IV, VPV});
9566 IV->replaceAllUsesWith(Add);
9567 Add->setOperand(0, IV);
9568
9570 SmallVector<Instruction *> InstsToMove;
9571 // Ensure that the start values for all header phi recipes are updated before
9572 // vectorizing the epilogue loop. Skip the canonical IV, which has been
9573 // handled above.
9574 for (VPRecipeBase &R : drop_begin(Header->phis())) {
9575 Value *ResumeV = nullptr;
9576 // TODO: Move setting of resume values to prepareToExecute.
9577 if (auto *ReductionPhi = dyn_cast<VPReductionPHIRecipe>(&R)) {
9578 auto *RdxResult =
9579 cast<VPInstruction>(*find_if(ReductionPhi->users(), [](VPUser *U) {
9580 auto *VPI = dyn_cast<VPInstruction>(U);
9581 return VPI &&
9582 (VPI->getOpcode() == VPInstruction::ComputeAnyOfResult ||
9583 VPI->getOpcode() == VPInstruction::ComputeReductionResult ||
9584 VPI->getOpcode() == VPInstruction::ComputeFindIVResult);
9585 }));
9586 ResumeV = cast<PHINode>(ReductionPhi->getUnderlyingInstr())
9587 ->getIncomingValueForBlock(L->getLoopPreheader());
9588 RecurKind RK = ReductionPhi->getRecurrenceKind();
9590 Value *StartV = RdxResult->getOperand(1)->getLiveInIRValue();
9591 // VPReductionPHIRecipes for AnyOf reductions expect a boolean as
9592 // start value; compare the final value from the main vector loop
9593 // to the start value.
9594 BasicBlock *PBB = cast<Instruction>(ResumeV)->getParent();
9595 IRBuilder<> Builder(PBB, PBB->getFirstNonPHIIt());
9596 ResumeV = Builder.CreateICmpNE(ResumeV, StartV);
9597 if (auto *I = dyn_cast<Instruction>(ResumeV))
9598 InstsToMove.push_back(I);
9600 Value *StartV = getStartValueFromReductionResult(RdxResult);
9601 ToFrozen[StartV] = cast<PHINode>(ResumeV)->getIncomingValueForBlock(
9603
9604 // VPReductionPHIRecipe for FindFirstIV/FindLastIV reductions requires
9605 // an adjustment to the resume value. The resume value is adjusted to
9606 // the sentinel value when the final value from the main vector loop
9607 // equals the start value. This ensures correctness when the start value
9608 // might not be less than the minimum value of a monotonically
9609 // increasing induction variable.
9610 BasicBlock *ResumeBB = cast<Instruction>(ResumeV)->getParent();
9611 IRBuilder<> Builder(ResumeBB, ResumeBB->getFirstNonPHIIt());
9612 Value *Cmp = Builder.CreateICmpEQ(ResumeV, ToFrozen[StartV]);
9613 if (auto *I = dyn_cast<Instruction>(Cmp))
9614 InstsToMove.push_back(I);
9615 Value *Sentinel = RdxResult->getOperand(2)->getLiveInIRValue();
9616 ResumeV = Builder.CreateSelect(Cmp, Sentinel, ResumeV);
9617 if (auto *I = dyn_cast<Instruction>(ResumeV))
9618 InstsToMove.push_back(I);
9619 } else {
9620 VPValue *StartVal = Plan.getOrAddLiveIn(ResumeV);
9621 auto *PhiR = dyn_cast<VPReductionPHIRecipe>(&R);
9622 if (auto *VPI = dyn_cast<VPInstruction>(PhiR->getStartValue())) {
9624 "unexpected start value");
9625 VPI->setOperand(0, StartVal);
9626 continue;
9627 }
9628 }
9629 } else {
9630 // Retrieve the induction resume values for wide inductions from
9631 // their original phi nodes in the scalar loop.
9632 PHINode *IndPhi = cast<VPWidenInductionRecipe>(&R)->getPHINode();
9633 // Hook up to the PHINode generated by a ResumePhi recipe of main
9634 // loop VPlan, which feeds the scalar loop.
9635 ResumeV = IndPhi->getIncomingValueForBlock(L->getLoopPreheader());
9636 }
9637 assert(ResumeV && "Must have a resume value");
9638 VPValue *StartVal = Plan.getOrAddLiveIn(ResumeV);
9639 cast<VPHeaderPHIRecipe>(&R)->setStartValue(StartVal);
9640 }
9641
9642 // For some VPValues in the epilogue plan we must re-use the generated IR
9643 // values from the main plan. Replace them with live-in VPValues.
9644 // TODO: This is a workaround needed for epilogue vectorization and it
9645 // should be removed once induction resume value creation is done
9646 // directly in VPlan.
9647 for (auto &R : make_early_inc_range(*Plan.getEntry())) {
9648 // Re-use frozen values from the main plan for Freeze VPInstructions in the
9649 // epilogue plan. This ensures all users use the same frozen value.
9650 auto *VPI = dyn_cast<VPInstruction>(&R);
9651 if (VPI && VPI->getOpcode() == Instruction::Freeze) {
9653 ToFrozen.lookup(VPI->getOperand(0)->getLiveInIRValue())));
9654 continue;
9655 }
9656
9657 // Re-use the trip count and steps expanded for the main loop, as
9658 // skeleton creation needs it as a value that dominates both the scalar
9659 // and vector epilogue loops
9660 auto *ExpandR = dyn_cast<VPExpandSCEVRecipe>(&R);
9661 if (!ExpandR)
9662 continue;
9663 VPValue *ExpandedVal =
9664 Plan.getOrAddLiveIn(ExpandedSCEVs.lookup(ExpandR->getSCEV()));
9665 ExpandR->replaceAllUsesWith(ExpandedVal);
9666 if (Plan.getTripCount() == ExpandR)
9667 Plan.resetTripCount(ExpandedVal);
9668 ExpandR->eraseFromParent();
9669 }
9670
9671 auto VScale = CM.getVScaleForTuning();
9672 unsigned MainLoopStep =
9673 estimateElementCount(EPI.MainLoopVF * EPI.MainLoopUF, VScale);
9674 unsigned EpilogueLoopStep =
9675 estimateElementCount(EPI.EpilogueVF * EPI.EpilogueUF, VScale);
9677 Plan, EPI.TripCount, EPI.VectorTripCount,
9679 EPI.EpilogueUF, MainLoopStep, EpilogueLoopStep, SE);
9680
9681 return InstsToMove;
9682}
9683
9684// Generate bypass values from the additional bypass block. Note that when the
9685// vectorized epilogue is skipped due to iteration count check, then the
9686// resume value for the induction variable comes from the trip count of the
9687// main vector loop, passed as the second argument.
9689 PHINode *OrigPhi, const InductionDescriptor &II, IRBuilder<> &BypassBuilder,
9690 const SCEV2ValueTy &ExpandedSCEVs, Value *MainVectorTripCount,
9691 Instruction *OldInduction) {
9692 Value *Step = getExpandedStep(II, ExpandedSCEVs);
9693 // For the primary induction the additional bypass end value is known.
9694 // Otherwise it is computed.
9695 Value *EndValueFromAdditionalBypass = MainVectorTripCount;
9696 if (OrigPhi != OldInduction) {
9697 auto *BinOp = II.getInductionBinOp();
9698 // Fast-math-flags propagate from the original induction instruction.
9700 BypassBuilder.setFastMathFlags(BinOp->getFastMathFlags());
9701
9702 // Compute the end value for the additional bypass.
9703 EndValueFromAdditionalBypass =
9704 emitTransformedIndex(BypassBuilder, MainVectorTripCount,
9705 II.getStartValue(), Step, II.getKind(), BinOp);
9706 EndValueFromAdditionalBypass->setName("ind.end");
9707 }
9708 return EndValueFromAdditionalBypass;
9709}
9710
9712 VPlan &BestEpiPlan,
9714 const SCEV2ValueTy &ExpandedSCEVs,
9715 Value *MainVectorTripCount) {
9716 // Fix reduction resume values from the additional bypass block.
9717 BasicBlock *PH = L->getLoopPreheader();
9718 for (auto *Pred : predecessors(PH)) {
9719 for (PHINode &Phi : PH->phis()) {
9720 if (Phi.getBasicBlockIndex(Pred) != -1)
9721 continue;
9722 Phi.addIncoming(Phi.getIncomingValueForBlock(BypassBlock), Pred);
9723 }
9724 }
9725 auto *ScalarPH = cast<VPIRBasicBlock>(BestEpiPlan.getScalarPreheader());
9726 if (ScalarPH->hasPredecessors()) {
9727 // If ScalarPH has predecessors, we may need to update its reduction
9728 // resume values.
9729 for (const auto &[R, IRPhi] :
9730 zip(ScalarPH->phis(), ScalarPH->getIRBasicBlock()->phis())) {
9732 BypassBlock);
9733 }
9734 }
9735
9736 // Fix induction resume values from the additional bypass block.
9737 IRBuilder<> BypassBuilder(BypassBlock, BypassBlock->getFirstInsertionPt());
9738 for (const auto &[IVPhi, II] : LVL.getInductionVars()) {
9739 auto *Inc = cast<PHINode>(IVPhi->getIncomingValueForBlock(PH));
9741 IVPhi, II, BypassBuilder, ExpandedSCEVs, MainVectorTripCount,
9742 LVL.getPrimaryInduction());
9743 // TODO: Directly add as extra operand to the VPResumePHI recipe.
9744 Inc->setIncomingValueForBlock(BypassBlock, V);
9745 }
9746}
9747
9748/// Connect the epilogue vector loop generated for \p EpiPlan to the main vector
9749// loop, after both plans have executed, updating branches from the iteration
9750// and runtime checks of the main loop, as well as updating various phis. \p
9751// InstsToMove contains instructions that need to be moved to the preheader of
9752// the epilogue vector loop.
9754 VPlan &EpiPlan, Loop *L, EpilogueLoopVectorizationInfo &EPI,
9756 DenseMap<const SCEV *, Value *> &ExpandedSCEVs, GeneratedRTChecks &Checks,
9757 ArrayRef<Instruction *> InstsToMove) {
9758 BasicBlock *VecEpilogueIterationCountCheck =
9759 cast<VPIRBasicBlock>(EpiPlan.getEntry())->getIRBasicBlock();
9760
9761 BasicBlock *VecEpiloguePreHeader =
9762 cast<BranchInst>(VecEpilogueIterationCountCheck->getTerminator())
9763 ->getSuccessor(1);
9764 // Adjust the control flow taking the state info from the main loop
9765 // vectorization into account.
9767 "expected this to be saved from the previous pass.");
9768 DomTreeUpdater DTU(DT, DomTreeUpdater::UpdateStrategy::Eager);
9770 VecEpilogueIterationCountCheck, VecEpiloguePreHeader);
9771
9773 VecEpilogueIterationCountCheck},
9775 VecEpiloguePreHeader}});
9776
9777 BasicBlock *ScalarPH =
9778 cast<VPIRBasicBlock>(EpiPlan.getScalarPreheader())->getIRBasicBlock();
9780 VecEpilogueIterationCountCheck, ScalarPH);
9781 DTU.applyUpdates(
9783 VecEpilogueIterationCountCheck},
9785
9786 // Adjust the terminators of runtime check blocks and phis using them.
9787 BasicBlock *SCEVCheckBlock = Checks.getSCEVChecks().second;
9788 BasicBlock *MemCheckBlock = Checks.getMemRuntimeChecks().second;
9789 if (SCEVCheckBlock) {
9790 SCEVCheckBlock->getTerminator()->replaceUsesOfWith(
9791 VecEpilogueIterationCountCheck, ScalarPH);
9792 DTU.applyUpdates({{DominatorTree::Delete, SCEVCheckBlock,
9793 VecEpilogueIterationCountCheck},
9794 {DominatorTree::Insert, SCEVCheckBlock, ScalarPH}});
9795 }
9796 if (MemCheckBlock) {
9797 MemCheckBlock->getTerminator()->replaceUsesOfWith(
9798 VecEpilogueIterationCountCheck, ScalarPH);
9799 DTU.applyUpdates(
9800 {{DominatorTree::Delete, MemCheckBlock, VecEpilogueIterationCountCheck},
9801 {DominatorTree::Insert, MemCheckBlock, ScalarPH}});
9802 }
9803
9804 // The vec.epilog.iter.check block may contain Phi nodes from inductions
9805 // or reductions which merge control-flow from the latch block and the
9806 // middle block. Update the incoming values here and move the Phi into the
9807 // preheader.
9808 SmallVector<PHINode *, 4> PhisInBlock(
9809 llvm::make_pointer_range(VecEpilogueIterationCountCheck->phis()));
9810
9811 for (PHINode *Phi : PhisInBlock) {
9812 Phi->moveBefore(VecEpiloguePreHeader->getFirstNonPHIIt());
9813 Phi->replaceIncomingBlockWith(
9814 VecEpilogueIterationCountCheck->getSinglePredecessor(),
9815 VecEpilogueIterationCountCheck);
9816
9817 // If the phi doesn't have an incoming value from the
9818 // EpilogueIterationCountCheck, we are done. Otherwise remove the
9819 // incoming value and also those from other check blocks. This is needed
9820 // for reduction phis only.
9821 if (none_of(Phi->blocks(), [&](BasicBlock *IncB) {
9822 return EPI.EpilogueIterationCountCheck == IncB;
9823 }))
9824 continue;
9825 Phi->removeIncomingValue(EPI.EpilogueIterationCountCheck);
9826 if (SCEVCheckBlock)
9827 Phi->removeIncomingValue(SCEVCheckBlock);
9828 if (MemCheckBlock)
9829 Phi->removeIncomingValue(MemCheckBlock);
9830 }
9831
9832 auto IP = VecEpiloguePreHeader->getFirstNonPHIIt();
9833 for (auto *I : InstsToMove)
9834 I->moveBefore(IP);
9835
9836 // VecEpilogueIterationCountCheck conditionally skips over the epilogue loop
9837 // after executing the main loop. We need to update the resume values of
9838 // inductions and reductions during epilogue vectorization.
9839 fixScalarResumeValuesFromBypass(VecEpilogueIterationCountCheck, L, EpiPlan,
9840 LVL, ExpandedSCEVs, EPI.VectorTripCount);
9841}
9842
9844 assert((EnableVPlanNativePath || L->isInnermost()) &&
9845 "VPlan-native path is not enabled. Only process inner loops.");
9846
9847 LLVM_DEBUG(dbgs() << "\nLV: Checking a loop in '"
9848 << L->getHeader()->getParent()->getName() << "' from "
9849 << L->getLocStr() << "\n");
9850
9851 LoopVectorizeHints Hints(L, InterleaveOnlyWhenForced, *ORE, TTI);
9852
9853 LLVM_DEBUG(
9854 dbgs() << "LV: Loop hints:"
9855 << " force="
9857 ? "disabled"
9859 ? "enabled"
9860 : "?"))
9861 << " width=" << Hints.getWidth()
9862 << " interleave=" << Hints.getInterleave() << "\n");
9863
9864 // Function containing loop
9865 Function *F = L->getHeader()->getParent();
9866
9867 // Looking at the diagnostic output is the only way to determine if a loop
9868 // was vectorized (other than looking at the IR or machine code), so it
9869 // is important to generate an optimization remark for each loop. Most of
9870 // these messages are generated as OptimizationRemarkAnalysis. Remarks
9871 // generated as OptimizationRemark and OptimizationRemarkMissed are
9872 // less verbose reporting vectorized loops and unvectorized loops that may
9873 // benefit from vectorization, respectively.
9874
9875 if (!Hints.allowVectorization(F, L, VectorizeOnlyWhenForced)) {
9876 LLVM_DEBUG(dbgs() << "LV: Loop hints prevent vectorization.\n");
9877 return false;
9878 }
9879
9880 PredicatedScalarEvolution PSE(*SE, *L);
9881
9882 // Query this against the original loop and save it here because the profile
9883 // of the original loop header may change as the transformation happens.
9884 bool OptForSize = llvm::shouldOptimizeForSize(L->getHeader(), PSI, BFI,
9886
9887 // Check if it is legal to vectorize the loop.
9888 LoopVectorizationRequirements Requirements;
9889 LoopVectorizationLegality LVL(L, PSE, DT, TTI, TLI, F, *LAIs, LI, ORE,
9890 &Requirements, &Hints, DB, AC,
9891 /*AllowRuntimeSCEVChecks=*/!OptForSize, AA);
9893 LLVM_DEBUG(dbgs() << "LV: Not vectorizing: Cannot prove legality.\n");
9894 Hints.emitRemarkWithHints();
9895 return false;
9896 }
9897
9899 reportVectorizationFailure("Auto-vectorization of loops with uncountable "
9900 "early exit is not enabled",
9901 "UncountableEarlyExitLoopsDisabled", ORE, L);
9902 return false;
9903 }
9904
9905 if (!LVL.getPotentiallyFaultingLoads().empty()) {
9906 reportVectorizationFailure("Auto-vectorization of loops with potentially "
9907 "faulting load is not supported",
9908 "PotentiallyFaultingLoadsNotSupported", ORE, L);
9909 return false;
9910 }
9911
9912 // Entrance to the VPlan-native vectorization path. Outer loops are processed
9913 // here. They may require CFG and instruction level transformations before
9914 // even evaluating whether vectorization is profitable. Since we cannot modify
9915 // the incoming IR, we need to build VPlan upfront in the vectorization
9916 // pipeline.
9917 if (!L->isInnermost())
9918 return processLoopInVPlanNativePath(L, PSE, LI, DT, &LVL, TTI, TLI, DB, AC,
9919 ORE, OptForSize, Hints, Requirements);
9920
9921 assert(L->isInnermost() && "Inner loop expected.");
9922
9923 InterleavedAccessInfo IAI(PSE, L, DT, LI, LVL.getLAI());
9924 bool UseInterleaved = TTI->enableInterleavedAccessVectorization();
9925
9926 // If an override option has been passed in for interleaved accesses, use it.
9927 if (EnableInterleavedMemAccesses.getNumOccurrences() > 0)
9928 UseInterleaved = EnableInterleavedMemAccesses;
9929
9930 // Analyze interleaved memory accesses.
9931 if (UseInterleaved)
9933
9934 if (LVL.hasUncountableEarlyExit()) {
9935 BasicBlock *LoopLatch = L->getLoopLatch();
9936 if (IAI.requiresScalarEpilogue() ||
9938 [LoopLatch](BasicBlock *BB) { return BB != LoopLatch; })) {
9939 reportVectorizationFailure("Auto-vectorization of early exit loops "
9940 "requiring a scalar epilogue is unsupported",
9941 "UncountableEarlyExitUnsupported", ORE, L);
9942 return false;
9943 }
9944 }
9945
9946 // Check the function attributes and profiles to find out if this function
9947 // should be optimized for size.
9949 getScalarEpilogueLowering(F, L, Hints, OptForSize, TTI, TLI, LVL, &IAI);
9950
9951 // Check the loop for a trip count threshold: vectorize loops with a tiny trip
9952 // count by optimizing for size, to minimize overheads.
9953 auto ExpectedTC = getSmallBestKnownTC(PSE, L);
9954 if (ExpectedTC && ExpectedTC->isFixed() &&
9955 ExpectedTC->getFixedValue() < TinyTripCountVectorThreshold) {
9956 LLVM_DEBUG(dbgs() << "LV: Found a loop with a very small trip count. "
9957 << "This loop is worth vectorizing only if no scalar "
9958 << "iteration overheads are incurred.");
9960 LLVM_DEBUG(dbgs() << " But vectorizing was explicitly forced.\n");
9961 else {
9962 LLVM_DEBUG(dbgs() << "\n");
9963 // Predicate tail-folded loops are efficient even when the loop
9964 // iteration count is low. However, setting the epilogue policy to
9965 // `CM_ScalarEpilogueNotAllowedLowTripLoop` prevents vectorizing loops
9966 // with runtime checks. It's more effective to let
9967 // `isOutsideLoopWorkProfitable` determine if vectorization is
9968 // beneficial for the loop.
9971 }
9972 }
9973
9974 // Check the function attributes to see if implicit floats or vectors are
9975 // allowed.
9976 if (F->hasFnAttribute(Attribute::NoImplicitFloat)) {
9978 "Can't vectorize when the NoImplicitFloat attribute is used",
9979 "loop not vectorized due to NoImplicitFloat attribute",
9980 "NoImplicitFloat", ORE, L);
9981 Hints.emitRemarkWithHints();
9982 return false;
9983 }
9984
9985 // Check if the target supports potentially unsafe FP vectorization.
9986 // FIXME: Add a check for the type of safety issue (denormal, signaling)
9987 // for the target we're vectorizing for, to make sure none of the
9988 // additional fp-math flags can help.
9989 if (Hints.isPotentiallyUnsafe() &&
9990 TTI->isFPVectorizationPotentiallyUnsafe()) {
9992 "Potentially unsafe FP op prevents vectorization",
9993 "loop not vectorized due to unsafe FP support.",
9994 "UnsafeFP", ORE, L);
9995 Hints.emitRemarkWithHints();
9996 return false;
9997 }
9998
9999 bool AllowOrderedReductions;
10000 // If the flag is set, use that instead and override the TTI behaviour.
10001 if (ForceOrderedReductions.getNumOccurrences() > 0)
10002 AllowOrderedReductions = ForceOrderedReductions;
10003 else
10004 AllowOrderedReductions = TTI->enableOrderedReductions();
10005 if (!LVL.canVectorizeFPMath(AllowOrderedReductions)) {
10006 ORE->emit([&]() {
10007 auto *ExactFPMathInst = Requirements.getExactFPInst();
10008 return OptimizationRemarkAnalysisFPCommute(DEBUG_TYPE, "CantReorderFPOps",
10009 ExactFPMathInst->getDebugLoc(),
10010 ExactFPMathInst->getParent())
10011 << "loop not vectorized: cannot prove it is safe to reorder "
10012 "floating-point operations";
10013 });
10014 LLVM_DEBUG(dbgs() << "LV: loop not vectorized: cannot prove it is safe to "
10015 "reorder floating-point operations\n");
10016 Hints.emitRemarkWithHints();
10017 return false;
10018 }
10019
10020 // Use the cost model.
10021 LoopVectorizationCostModel CM(SEL, L, PSE, LI, &LVL, *TTI, TLI, DB, AC, ORE,
10022 F, &Hints, IAI, OptForSize);
10023 // Use the planner for vectorization.
10024 LoopVectorizationPlanner LVP(L, LI, DT, TLI, *TTI, &LVL, CM, IAI, PSE, Hints,
10025 ORE);
10026
10027 // Get user vectorization factor and interleave count.
10028 ElementCount UserVF = Hints.getWidth();
10029 unsigned UserIC = Hints.getInterleave();
10030 if (UserIC > 1 && !LVL.isSafeForAnyVectorWidth())
10031 UserIC = 1;
10032
10033 // Plan how to best vectorize.
10034 LVP.plan(UserVF, UserIC);
10036 unsigned IC = 1;
10037
10038 if (ORE->allowExtraAnalysis(LV_NAME))
10040
10041 GeneratedRTChecks Checks(PSE, DT, LI, TTI, F->getDataLayout(), CM.CostKind);
10042 if (LVP.hasPlanWithVF(VF.Width)) {
10043 // Select the interleave count.
10044 IC = LVP.selectInterleaveCount(LVP.getPlanFor(VF.Width), VF.Width, VF.Cost);
10045
10046 unsigned SelectedIC = std::max(IC, UserIC);
10047 // Optimistically generate runtime checks if they are needed. Drop them if
10048 // they turn out to not be profitable.
10049 if (VF.Width.isVector() || SelectedIC > 1) {
10050 Checks.create(L, *LVL.getLAI(), PSE.getPredicate(), VF.Width, SelectedIC);
10051
10052 // Bail out early if either the SCEV or memory runtime checks are known to
10053 // fail. In that case, the vector loop would never execute.
10054 using namespace llvm::PatternMatch;
10055 if (Checks.getSCEVChecks().first &&
10056 match(Checks.getSCEVChecks().first, m_One()))
10057 return false;
10058 if (Checks.getMemRuntimeChecks().first &&
10059 match(Checks.getMemRuntimeChecks().first, m_One()))
10060 return false;
10061 }
10062
10063 // Check if it is profitable to vectorize with runtime checks.
10064 bool ForceVectorization =
10066 VPCostContext CostCtx(CM.TTI, *CM.TLI, LVP.getPlanFor(VF.Width), CM,
10067 CM.CostKind, *CM.PSE.getSE(), L);
10068 if (!ForceVectorization &&
10069 !isOutsideLoopWorkProfitable(Checks, VF, L, PSE, CostCtx,
10070 LVP.getPlanFor(VF.Width), SEL,
10071 CM.getVScaleForTuning())) {
10072 ORE->emit([&]() {
10074 DEBUG_TYPE, "CantReorderMemOps", L->getStartLoc(),
10075 L->getHeader())
10076 << "loop not vectorized: cannot prove it is safe to reorder "
10077 "memory operations";
10078 });
10079 LLVM_DEBUG(dbgs() << "LV: Too many memory checks needed.\n");
10080 Hints.emitRemarkWithHints();
10081 return false;
10082 }
10083 }
10084
10085 // Identify the diagnostic messages that should be produced.
10086 std::pair<StringRef, std::string> VecDiagMsg, IntDiagMsg;
10087 bool VectorizeLoop = true, InterleaveLoop = true;
10088 if (VF.Width.isScalar()) {
10089 LLVM_DEBUG(dbgs() << "LV: Vectorization is possible but not beneficial.\n");
10090 VecDiagMsg = {
10091 "VectorizationNotBeneficial",
10092 "the cost-model indicates that vectorization is not beneficial"};
10093 VectorizeLoop = false;
10094 }
10095
10096 if (UserIC == 1 && Hints.getInterleave() > 1) {
10098 "UserIC should only be ignored due to unsafe dependencies");
10099 LLVM_DEBUG(dbgs() << "LV: Ignoring user-specified interleave count.\n");
10100 IntDiagMsg = {"InterleavingUnsafe",
10101 "Ignoring user-specified interleave count due to possibly "
10102 "unsafe dependencies in the loop."};
10103 InterleaveLoop = false;
10104 } else if (!LVP.hasPlanWithVF(VF.Width) && UserIC > 1) {
10105 // Tell the user interleaving was avoided up-front, despite being explicitly
10106 // requested.
10107 LLVM_DEBUG(dbgs() << "LV: Ignoring UserIC, because vectorization and "
10108 "interleaving should be avoided up front\n");
10109 IntDiagMsg = {"InterleavingAvoided",
10110 "Ignoring UserIC, because interleaving was avoided up front"};
10111 InterleaveLoop = false;
10112 } else if (IC == 1 && UserIC <= 1) {
10113 // Tell the user interleaving is not beneficial.
10114 LLVM_DEBUG(dbgs() << "LV: Interleaving is not beneficial.\n");
10115 IntDiagMsg = {
10116 "InterleavingNotBeneficial",
10117 "the cost-model indicates that interleaving is not beneficial"};
10118 InterleaveLoop = false;
10119 if (UserIC == 1) {
10120 IntDiagMsg.first = "InterleavingNotBeneficialAndDisabled";
10121 IntDiagMsg.second +=
10122 " and is explicitly disabled or interleave count is set to 1";
10123 }
10124 } else if (IC > 1 && UserIC == 1) {
10125 // Tell the user interleaving is beneficial, but it explicitly disabled.
10126 LLVM_DEBUG(dbgs() << "LV: Interleaving is beneficial but is explicitly "
10127 "disabled.\n");
10128 IntDiagMsg = {"InterleavingBeneficialButDisabled",
10129 "the cost-model indicates that interleaving is beneficial "
10130 "but is explicitly disabled or interleave count is set to 1"};
10131 InterleaveLoop = false;
10132 }
10133
10134 // If there is a histogram in the loop, do not just interleave without
10135 // vectorizing. The order of operations will be incorrect without the
10136 // histogram intrinsics, which are only used for recipes with VF > 1.
10137 if (!VectorizeLoop && InterleaveLoop && LVL.hasHistograms()) {
10138 LLVM_DEBUG(dbgs() << "LV: Not interleaving without vectorization due "
10139 << "to histogram operations.\n");
10140 IntDiagMsg = {
10141 "HistogramPreventsScalarInterleaving",
10142 "Unable to interleave without vectorization due to constraints on "
10143 "the order of histogram operations"};
10144 InterleaveLoop = false;
10145 }
10146
10147 // Override IC if user provided an interleave count.
10148 IC = UserIC > 0 ? UserIC : IC;
10149
10150 // Emit diagnostic messages, if any.
10151 const char *VAPassName = Hints.vectorizeAnalysisPassName();
10152 if (!VectorizeLoop && !InterleaveLoop) {
10153 // Do not vectorize or interleaving the loop.
10154 ORE->emit([&]() {
10155 return OptimizationRemarkMissed(VAPassName, VecDiagMsg.first,
10156 L->getStartLoc(), L->getHeader())
10157 << VecDiagMsg.second;
10158 });
10159 ORE->emit([&]() {
10160 return OptimizationRemarkMissed(LV_NAME, IntDiagMsg.first,
10161 L->getStartLoc(), L->getHeader())
10162 << IntDiagMsg.second;
10163 });
10164 return false;
10165 }
10166
10167 if (!VectorizeLoop && InterleaveLoop) {
10168 LLVM_DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n');
10169 ORE->emit([&]() {
10170 return OptimizationRemarkAnalysis(VAPassName, VecDiagMsg.first,
10171 L->getStartLoc(), L->getHeader())
10172 << VecDiagMsg.second;
10173 });
10174 } else if (VectorizeLoop && !InterleaveLoop) {
10175 LLVM_DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width
10176 << ") in " << L->getLocStr() << '\n');
10177 ORE->emit([&]() {
10178 return OptimizationRemarkAnalysis(LV_NAME, IntDiagMsg.first,
10179 L->getStartLoc(), L->getHeader())
10180 << IntDiagMsg.second;
10181 });
10182 } else if (VectorizeLoop && InterleaveLoop) {
10183 LLVM_DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width
10184 << ") in " << L->getLocStr() << '\n');
10185 LLVM_DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n');
10186 }
10187
10188 // Report the vectorization decision.
10189 if (VF.Width.isScalar()) {
10190 using namespace ore;
10191 assert(IC > 1);
10192 ORE->emit([&]() {
10193 return OptimizationRemark(LV_NAME, "Interleaved", L->getStartLoc(),
10194 L->getHeader())
10195 << "interleaved loop (interleaved count: "
10196 << NV("InterleaveCount", IC) << ")";
10197 });
10198 } else {
10199 // Report the vectorization decision.
10200 reportVectorization(ORE, L, VF, IC);
10201 }
10202 if (ORE->allowExtraAnalysis(LV_NAME))
10204
10205 // If we decided that it is *legal* to interleave or vectorize the loop, then
10206 // do it.
10207
10208 VPlan &BestPlan = LVP.getPlanFor(VF.Width);
10209 // Consider vectorizing the epilogue too if it's profitable.
10210 VectorizationFactor EpilogueVF =
10212 if (EpilogueVF.Width.isVector()) {
10213 std::unique_ptr<VPlan> BestMainPlan(BestPlan.duplicate());
10214
10215 // The first pass vectorizes the main loop and creates a scalar epilogue
10216 // to be vectorized by executing the plan (potentially with a different
10217 // factor) again shortly afterwards.
10218 VPlan &BestEpiPlan = LVP.getPlanFor(EpilogueVF.Width);
10219 BestEpiPlan.getMiddleBlock()->setName("vec.epilog.middle.block");
10220 BestEpiPlan.getVectorPreheader()->setName("vec.epilog.ph");
10221 preparePlanForMainVectorLoop(*BestMainPlan, BestEpiPlan);
10222 EpilogueLoopVectorizationInfo EPI(VF.Width, IC, EpilogueVF.Width, 1,
10223 BestEpiPlan);
10224 EpilogueVectorizerMainLoop MainILV(L, PSE, LI, DT, TTI, AC, EPI, &CM,
10225 Checks, *BestMainPlan);
10226 auto ExpandedSCEVs = LVP.executePlan(EPI.MainLoopVF, EPI.MainLoopUF,
10227 *BestMainPlan, MainILV, DT, false);
10228 ++LoopsVectorized;
10229
10230 // Second pass vectorizes the epilogue and adjusts the control flow
10231 // edges from the first pass.
10232 EpilogueVectorizerEpilogueLoop EpilogILV(L, PSE, LI, DT, TTI, AC, EPI, &CM,
10233 Checks, BestEpiPlan);
10235 BestEpiPlan, L, ExpandedSCEVs, EPI, CM, *PSE.getSE());
10236 LVP.executePlan(EPI.EpilogueVF, EPI.EpilogueUF, BestEpiPlan, EpilogILV, DT,
10237 true);
10238 connectEpilogueVectorLoop(BestEpiPlan, L, EPI, DT, LVL, ExpandedSCEVs,
10239 Checks, InstsToMove);
10240 ++LoopsEpilogueVectorized;
10241 } else {
10242 InnerLoopVectorizer LB(L, PSE, LI, DT, TTI, AC, VF.Width, IC, &CM, Checks,
10243 BestPlan);
10244 // TODO: Move to general VPlan pipeline once epilogue loops are also
10245 // supported.
10248 IC, PSE);
10249 LVP.addMinimumIterationCheck(BestPlan, VF.Width, IC,
10251
10252 LVP.executePlan(VF.Width, IC, BestPlan, LB, DT, false);
10253 ++LoopsVectorized;
10254 }
10255
10256 assert(DT->verify(DominatorTree::VerificationLevel::Fast) &&
10257 "DT not preserved correctly");
10258 assert(!verifyFunction(*F, &dbgs()));
10259
10260 return true;
10261}
10262
10264
10265 // Don't attempt if
10266 // 1. the target claims to have no vector registers, and
10267 // 2. interleaving won't help ILP.
10268 //
10269 // The second condition is necessary because, even if the target has no
10270 // vector registers, loop vectorization may still enable scalar
10271 // interleaving.
10272 if (!TTI->getNumberOfRegisters(TTI->getRegisterClassForType(true)) &&
10273 TTI->getMaxInterleaveFactor(ElementCount::getFixed(1)) < 2)
10274 return LoopVectorizeResult(false, false);
10275
10276 bool Changed = false, CFGChanged = false;
10277
10278 // The vectorizer requires loops to be in simplified form.
10279 // Since simplification may add new inner loops, it has to run before the
10280 // legality and profitability checks. This means running the loop vectorizer
10281 // will simplify all loops, regardless of whether anything end up being
10282 // vectorized.
10283 for (const auto &L : *LI)
10284 Changed |= CFGChanged |=
10285 simplifyLoop(L, DT, LI, SE, AC, nullptr, false /* PreserveLCSSA */);
10286
10287 // Build up a worklist of inner-loops to vectorize. This is necessary as
10288 // the act of vectorizing or partially unrolling a loop creates new loops
10289 // and can invalidate iterators across the loops.
10290 SmallVector<Loop *, 8> Worklist;
10291
10292 for (Loop *L : *LI)
10293 collectSupportedLoops(*L, LI, ORE, Worklist);
10294
10295 LoopsAnalyzed += Worklist.size();
10296
10297 // Now walk the identified inner loops.
10298 while (!Worklist.empty()) {
10299 Loop *L = Worklist.pop_back_val();
10300
10301 // For the inner loops we actually process, form LCSSA to simplify the
10302 // transform.
10303 Changed |= formLCSSARecursively(*L, *DT, LI, SE);
10304
10305 Changed |= CFGChanged |= processLoop(L);
10306
10307 if (Changed) {
10308 LAIs->clear();
10309
10310#ifndef NDEBUG
10311 if (VerifySCEV)
10312 SE->verify();
10313#endif
10314 }
10315 }
10316
10317 // Process each loop nest in the function.
10318 return LoopVectorizeResult(Changed, CFGChanged);
10319}
10320
10323 LI = &AM.getResult<LoopAnalysis>(F);
10324 // There are no loops in the function. Return before computing other
10325 // expensive analyses.
10326 if (LI->empty())
10327 return PreservedAnalyses::all();
10336 AA = &AM.getResult<AAManager>(F);
10337
10338 auto &MAMProxy = AM.getResult<ModuleAnalysisManagerFunctionProxy>(F);
10339 PSI = MAMProxy.getCachedResult<ProfileSummaryAnalysis>(*F.getParent());
10340 BFI = nullptr;
10341 if (PSI && PSI->hasProfileSummary())
10343 LoopVectorizeResult Result = runImpl(F);
10344 if (!Result.MadeAnyChange)
10345 return PreservedAnalyses::all();
10347
10348 if (isAssignmentTrackingEnabled(*F.getParent())) {
10349 for (auto &BB : F)
10351 }
10352
10353 PA.preserve<LoopAnalysis>();
10357
10358 if (Result.MadeCFGChange) {
10359 // Making CFG changes likely means a loop got vectorized. Indicate that
10360 // extra simplification passes should be run.
10361 // TODO: MadeCFGChanges is not a prefect proxy. Extra passes should only
10362 // be run if runtime checks have been added.
10365 } else {
10367 }
10368 return PA;
10369}
10370
10372 raw_ostream &OS, function_ref<StringRef(StringRef)> MapClassName2PassName) {
10373 static_cast<PassInfoMixin<LoopVectorizePass> *>(this)->printPipeline(
10374 OS, MapClassName2PassName);
10375
10376 OS << '<';
10377 OS << (InterleaveOnlyWhenForced ? "" : "no-") << "interleave-forced-only;";
10378 OS << (VectorizeOnlyWhenForced ? "" : "no-") << "vectorize-forced-only;";
10379 OS << '>';
10380}
for(const MachineOperand &MO :llvm::drop_begin(OldMI.operands(), Desc.getNumOperands()))
static unsigned getIntrinsicID(const SDNode *N)
unsigned RegSize
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
aarch64 promote const
AMDGPU Lower Kernel Arguments
AMDGPU Register Bank Select
Rewrite undef for PHI
This file implements a class to represent arbitrary precision integral constant values and operations...
@ PostInc
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static bool isEqual(const Function &Caller, const Function &Callee)
This file contains the simple types necessary to represent the attributes associated with functions a...
static const Function * getParent(const Value *V)
This is the interface for LLVM's primary stateless and local alias analysis.
static bool IsEmptyBlock(MachineBasicBlock *MBB)
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
#define clEnumValN(ENUMVAL, FLAGNAME, DESC)
This file contains the declarations for the subclasses of Constant, which represent the different fla...
static cl::opt< OutputCostKind > CostKind("cost-kind", cl::desc("Target cost kind"), cl::init(OutputCostKind::RecipThroughput), cl::values(clEnumValN(OutputCostKind::RecipThroughput, "throughput", "Reciprocal throughput"), clEnumValN(OutputCostKind::Latency, "latency", "Instruction latency"), clEnumValN(OutputCostKind::CodeSize, "code-size", "Code size"), clEnumValN(OutputCostKind::SizeAndLatency, "size-latency", "Code size and latency"), clEnumValN(OutputCostKind::All, "all", "Print all cost kinds")))
static cl::opt< IntrinsicCostStrategy > IntrinsicCost("intrinsic-cost-strategy", cl::desc("Costing strategy for intrinsic instructions"), cl::init(IntrinsicCostStrategy::InstructionCost), cl::values(clEnumValN(IntrinsicCostStrategy::InstructionCost, "instruction-cost", "Use TargetTransformInfo::getInstructionCost"), clEnumValN(IntrinsicCostStrategy::IntrinsicCost, "intrinsic-cost", "Use TargetTransformInfo::getIntrinsicInstrCost"), clEnumValN(IntrinsicCostStrategy::TypeBasedIntrinsicCost, "type-based-intrinsic-cost", "Calculate the intrinsic cost based only on argument types")))
static InstructionCost getCost(Instruction &Inst, TTI::TargetCostKind CostKind, TargetTransformInfo &TTI, TargetLibraryInfo &TLI)
Definition CostModel.cpp:74
This file defines DenseMapInfo traits for DenseMap.
This file defines the DenseMap class.
#define DEBUG_TYPE
This is the interface for a simple mod/ref and alias analysis over globals.
Hexagon Common GEP
#define _
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
Module.h This file contains the declarations for the Module class.
This defines the Use class.
static bool hasNoUnsignedWrap(BinaryOperator &I)
This file defines an InstructionCost class that is used when calculating the cost of an instruction,...
static std::pair< Value *, APInt > getMask(Value *WideMask, unsigned Factor, ElementCount LeafValueEC)
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
Legalize the Machine IR a function s Machine IR
Definition Legalizer.cpp:80
static cl::opt< unsigned, true > VectorizationFactor("force-vector-width", cl::Hidden, cl::desc("Sets the SIMD width. Zero is autoselect."), cl::location(VectorizerParams::VectorizationFactor))
This header provides classes for managing per-loop analyses.
static cl::opt< bool > WidenIV("loop-flatten-widen-iv", cl::Hidden, cl::init(true), cl::desc("Widen the loop induction variables, if possible, so " "overflow checks won't reject flattening"))
static const char * VerboseDebug
#define LV_NAME
This file defines the LoopVectorizationLegality class.
This file provides a LoopVectorizationPlanner class.
static void collectSupportedLoops(Loop &L, LoopInfo *LI, OptimizationRemarkEmitter *ORE, SmallVectorImpl< Loop * > &V)
static cl::opt< unsigned > EpilogueVectorizationMinVF("epilogue-vectorization-minimum-VF", cl::Hidden, cl::desc("Only loops with vectorization factor equal to or larger than " "the specified value are considered for epilogue vectorization."))
static cl::opt< unsigned > EpilogueVectorizationForceVF("epilogue-vectorization-force-VF", cl::init(1), cl::Hidden, cl::desc("When epilogue vectorization is enabled, and a value greater than " "1 is specified, forces the given VF for all applicable epilogue " "loops."))
static Type * maybeVectorizeType(Type *Ty, ElementCount VF)
static ElementCount determineVPlanVF(const TargetTransformInfo &TTI, LoopVectorizationCostModel &CM)
static ElementCount getSmallConstantTripCount(ScalarEvolution *SE, const Loop *L)
A version of ScalarEvolution::getSmallConstantTripCount that returns an ElementCount to include loops...
static cl::opt< unsigned > VectorizeMemoryCheckThreshold("vectorize-memory-check-threshold", cl::init(128), cl::Hidden, cl::desc("The maximum allowed number of runtime memory checks"))
static void preparePlanForMainVectorLoop(VPlan &MainPlan, VPlan &EpiPlan)
Prepare MainPlan for vectorizing the main vector loop during epilogue vectorization.
static cl::opt< unsigned > TinyTripCountVectorThreshold("vectorizer-min-trip-count", cl::init(16), cl::Hidden, cl::desc("Loops with a constant trip count that is smaller than this " "value are vectorized only if no scalar iteration overheads " "are incurred."))
Loops with a known constant trip count below this number are vectorized only if no scalar iteration o...
static void debugVectorizationMessage(const StringRef Prefix, const StringRef DebugMsg, Instruction *I)
Write a DebugMsg about vectorization to the debug output stream.
static cl::opt< bool > EnableCondStoresVectorization("enable-cond-stores-vec", cl::init(true), cl::Hidden, cl::desc("Enable if predication of stores during vectorization."))
static void legacyCSE(BasicBlock *BB)
FIXME: This legacy common-subexpression-elimination routine is scheduled for removal,...
static VPIRBasicBlock * replaceVPBBWithIRVPBB(VPBasicBlock *VPBB, BasicBlock *IRBB, VPlan *Plan=nullptr)
Replace VPBB with a VPIRBasicBlock wrapping IRBB.
static Value * emitTransformedIndex(IRBuilderBase &B, Value *Index, Value *StartValue, Value *Step, InductionDescriptor::InductionKind InductionKind, const BinaryOperator *InductionBinOp)
Compute the transformed value of Index at offset StartValue using step StepValue.
static DebugLoc getDebugLocFromInstOrOperands(Instruction *I)
Look for a meaningful debug location on the instruction or its operands.
static Value * createInductionAdditionalBypassValues(PHINode *OrigPhi, const InductionDescriptor &II, IRBuilder<> &BypassBuilder, const SCEV2ValueTy &ExpandedSCEVs, Value *MainVectorTripCount, Instruction *OldInduction)
static void fixReductionScalarResumeWhenVectorizingEpilog(VPPhi *EpiResumePhiR, PHINode &EpiResumePhi, BasicBlock *BypassBlock)
static Value * getStartValueFromReductionResult(VPInstruction *RdxResult)
static cl::opt< bool > ForceTargetSupportsScalableVectors("force-target-supports-scalable-vectors", cl::init(false), cl::Hidden, cl::desc("Pretend that scalable vectors are supported, even if the target does " "not support them. This flag should only be used for testing."))
static bool useActiveLaneMaskForControlFlow(TailFoldingStyle Style)
static VPWidenIntOrFpInductionRecipe * createWidenInductionRecipes(VPInstruction *PhiR, const InductionDescriptor &IndDesc, VPlan &Plan, ScalarEvolution &SE, Loop &OrigLoop)
Creates a VPWidenIntOrFpInductionRecipe for PhiR.
static cl::opt< bool > EnableEarlyExitVectorization("enable-early-exit-vectorization", cl::init(true), cl::Hidden, cl::desc("Enable vectorization of early exit loops with uncountable exits."))
static cl::opt< bool > ConsiderRegPressure("vectorizer-consider-reg-pressure", cl::init(false), cl::Hidden, cl::desc("Discard VFs if their register pressure is too high."))
static unsigned estimateElementCount(ElementCount VF, std::optional< unsigned > VScale)
This function attempts to return a value that represents the ElementCount at runtime.
static constexpr uint32_t MinItersBypassWeights[]
static cl::opt< unsigned > ForceTargetNumScalarRegs("force-target-num-scalar-regs", cl::init(0), cl::Hidden, cl::desc("A flag that overrides the target's number of scalar registers."))
static cl::opt< bool > UseWiderVFIfCallVariantsPresent("vectorizer-maximize-bandwidth-for-vector-calls", cl::init(true), cl::Hidden, cl::desc("Try wider VFs if they enable the use of vector variants"))
static std::optional< unsigned > getMaxVScale(const Function &F, const TargetTransformInfo &TTI)
static cl::opt< unsigned > SmallLoopCost("small-loop-cost", cl::init(20), cl::Hidden, cl::desc("The cost of a loop that is considered 'small' by the interleaver."))
static void connectEpilogueVectorLoop(VPlan &EpiPlan, Loop *L, EpilogueLoopVectorizationInfo &EPI, DominatorTree *DT, LoopVectorizationLegality &LVL, DenseMap< const SCEV *, Value * > &ExpandedSCEVs, GeneratedRTChecks &Checks, ArrayRef< Instruction * > InstsToMove)
Connect the epilogue vector loop generated for EpiPlan to the main vector.
static bool planContainsAdditionalSimplifications(VPlan &Plan, VPCostContext &CostCtx, Loop *TheLoop, ElementCount VF)
Return true if the original loop \ TheLoop contains any instructions that do not have corresponding r...
static cl::opt< unsigned > ForceTargetNumVectorRegs("force-target-num-vector-regs", cl::init(0), cl::Hidden, cl::desc("A flag that overrides the target's number of vector registers."))
static bool isExplicitVecOuterLoop(Loop *OuterLp, OptimizationRemarkEmitter *ORE)
static cl::opt< bool > EnableIndVarRegisterHeur("enable-ind-var-reg-heur", cl::init(true), cl::Hidden, cl::desc("Count the induction variable only once when interleaving"))
static bool processLoopInVPlanNativePath(Loop *L, PredicatedScalarEvolution &PSE, LoopInfo *LI, DominatorTree *DT, LoopVectorizationLegality *LVL, TargetTransformInfo *TTI, TargetLibraryInfo *TLI, DemandedBits *DB, AssumptionCache *AC, OptimizationRemarkEmitter *ORE, bool OptForSize, LoopVectorizeHints &Hints, LoopVectorizationRequirements &Requirements)
static cl::opt< TailFoldingStyle > ForceTailFoldingStyle("force-tail-folding-style", cl::desc("Force the tail folding style"), cl::init(TailFoldingStyle::None), cl::values(clEnumValN(TailFoldingStyle::None, "none", "Disable tail folding"), clEnumValN(TailFoldingStyle::Data, "data", "Create lane mask for data only, using active.lane.mask intrinsic"), clEnumValN(TailFoldingStyle::DataWithoutLaneMask, "data-without-lane-mask", "Create lane mask with compare/stepvector"), clEnumValN(TailFoldingStyle::DataAndControlFlow, "data-and-control", "Create lane mask using active.lane.mask intrinsic, and use " "it for both data and control flow"), clEnumValN(TailFoldingStyle::DataAndControlFlowWithoutRuntimeCheck, "data-and-control-without-rt-check", "Similar to data-and-control, but remove the runtime check"), clEnumValN(TailFoldingStyle::DataWithEVL, "data-with-evl", "Use predicated EVL instructions for tail folding. If EVL " "is unsupported, fallback to data-without-lane-mask.")))
static ScalarEpilogueLowering getScalarEpilogueLowering(Function *F, Loop *L, LoopVectorizeHints &Hints, bool OptForSize, TargetTransformInfo *TTI, TargetLibraryInfo *TLI, LoopVectorizationLegality &LVL, InterleavedAccessInfo *IAI)
static cl::opt< bool > EnableEpilogueVectorization("enable-epilogue-vectorization", cl::init(true), cl::Hidden, cl::desc("Enable vectorization of epilogue loops."))
static cl::opt< bool > PreferPredicatedReductionSelect("prefer-predicated-reduction-select", cl::init(false), cl::Hidden, cl::desc("Prefer predicating a reduction operation over an after loop select."))
static cl::opt< bool > PreferInLoopReductions("prefer-inloop-reductions", cl::init(false), cl::Hidden, cl::desc("Prefer in-loop vector reductions, " "overriding the targets preference."))
static SmallVector< Instruction * > preparePlanForEpilogueVectorLoop(VPlan &Plan, Loop *L, const SCEV2ValueTy &ExpandedSCEVs, EpilogueLoopVectorizationInfo &EPI, LoopVectorizationCostModel &CM, ScalarEvolution &SE)
Prepare Plan for vectorizing the epilogue loop.
static cl::opt< bool > EnableLoadStoreRuntimeInterleave("enable-loadstore-runtime-interleave", cl::init(true), cl::Hidden, cl::desc("Enable runtime interleaving until load/store ports are saturated"))
static cl::opt< bool > VPlanBuildStressTest("vplan-build-stress-test", cl::init(false), cl::Hidden, cl::desc("Build VPlan for every supported loop nest in the function and bail " "out right after the build (stress test the VPlan H-CFG construction " "in the VPlan-native vectorization path)."))
static bool hasIrregularType(Type *Ty, const DataLayout &DL)
A helper function that returns true if the given type is irregular.
static cl::opt< bool > LoopVectorizeWithBlockFrequency("loop-vectorize-with-block-frequency", cl::init(true), cl::Hidden, cl::desc("Enable the use of the block frequency analysis to access PGO " "heuristics minimizing code growth in cold regions and being more " "aggressive in hot regions."))
static std::optional< ElementCount > getSmallBestKnownTC(PredicatedScalarEvolution &PSE, Loop *L, bool CanUseConstantMax=true)
Returns "best known" trip count, which is either a valid positive trip count or std::nullopt when an ...
static Value * getExpandedStep(const InductionDescriptor &ID, const SCEV2ValueTy &ExpandedSCEVs)
Return the expanded step for ID using ExpandedSCEVs to look up SCEV expansion results.
static bool useActiveLaneMask(TailFoldingStyle Style)
static bool hasReplicatorRegion(VPlan &Plan)
static bool isIndvarOverflowCheckKnownFalse(const LoopVectorizationCostModel *Cost, ElementCount VF, std::optional< unsigned > UF=std::nullopt)
For the given VF and UF and maximum trip count computed for the loop, return whether the induction va...
static void addFullyUnrolledInstructionsToIgnore(Loop *L, const LoopVectorizationLegality::InductionList &IL, SmallPtrSetImpl< Instruction * > &InstsToIgnore)
Knowing that loop L executes a single vector iteration, add instructions that will get simplified and...
static cl::opt< PreferPredicateTy::Option > PreferPredicateOverEpilogue("prefer-predicate-over-epilogue", cl::init(PreferPredicateTy::ScalarEpilogue), cl::Hidden, cl::desc("Tail-folding and predication preferences over creating a scalar " "epilogue loop."), cl::values(clEnumValN(PreferPredicateTy::ScalarEpilogue, "scalar-epilogue", "Don't tail-predicate loops, create scalar epilogue"), clEnumValN(PreferPredicateTy::PredicateElseScalarEpilogue, "predicate-else-scalar-epilogue", "prefer tail-folding, create scalar epilogue if tail " "folding fails."), clEnumValN(PreferPredicateTy::PredicateOrDontVectorize, "predicate-dont-vectorize", "prefers tail-folding, don't attempt vectorization if " "tail-folding fails.")))
static cl::opt< bool > EnableInterleavedMemAccesses("enable-interleaved-mem-accesses", cl::init(false), cl::Hidden, cl::desc("Enable vectorization on interleaved memory accesses in a loop"))
static cl::opt< bool > EnableMaskedInterleavedMemAccesses("enable-masked-interleaved-mem-accesses", cl::init(false), cl::Hidden, cl::desc("Enable vectorization on masked interleaved memory accesses in a loop"))
An interleave-group may need masking if it resides in a block that needs predication,...
static cl::opt< bool > ForceOrderedReductions("force-ordered-reductions", cl::init(false), cl::Hidden, cl::desc("Enable the vectorisation of loops with in-order (strict) " "FP reductions"))
static const SCEV * getAddressAccessSCEV(Value *Ptr, LoopVectorizationLegality *Legal, PredicatedScalarEvolution &PSE, const Loop *TheLoop)
Gets Address Access SCEV after verifying that the access pattern is loop invariant except the inducti...
static cl::opt< cl::boolOrDefault > ForceSafeDivisor("force-widen-divrem-via-safe-divisor", cl::Hidden, cl::desc("Override cost based safe divisor widening for div/rem instructions"))
static InstructionCost calculateEarlyExitCost(VPCostContext &CostCtx, VPlan &Plan, ElementCount VF)
For loops with uncountable early exits, find the cost of doing work when exiting the loop early,...
static cl::opt< unsigned > ForceTargetMaxVectorInterleaveFactor("force-target-max-vector-interleave", cl::init(0), cl::Hidden, cl::desc("A flag that overrides the target's max interleave factor for " "vectorized loops."))
static bool useMaskedInterleavedAccesses(const TargetTransformInfo &TTI)
static cl::opt< unsigned > NumberOfStoresToPredicate("vectorize-num-stores-pred", cl::init(1), cl::Hidden, cl::desc("Max number of stores to be predicated behind an if."))
The number of stores in a loop that are allowed to need predication.
static cl::opt< unsigned > MaxNestedScalarReductionIC("max-nested-scalar-reduction-interleave", cl::init(2), cl::Hidden, cl::desc("The maximum interleave count to use when interleaving a scalar " "reduction in a nested loop."))
static cl::opt< unsigned > ForceTargetMaxScalarInterleaveFactor("force-target-max-scalar-interleave", cl::init(0), cl::Hidden, cl::desc("A flag that overrides the target's max interleave factor for " "scalar loops."))
static void checkMixedPrecision(Loop *L, OptimizationRemarkEmitter *ORE)
static bool willGenerateVectors(VPlan &Plan, ElementCount VF, const TargetTransformInfo &TTI)
Check if any recipe of Plan will generate a vector value, which will be assigned a vector register.
static bool isOutsideLoopWorkProfitable(GeneratedRTChecks &Checks, VectorizationFactor &VF, Loop *L, PredicatedScalarEvolution &PSE, VPCostContext &CostCtx, VPlan &Plan, ScalarEpilogueLowering SEL, std::optional< unsigned > VScale)
This function determines whether or not it's still profitable to vectorize the loop given the extra w...
static void fixScalarResumeValuesFromBypass(BasicBlock *BypassBlock, Loop *L, VPlan &BestEpiPlan, LoopVectorizationLegality &LVL, const SCEV2ValueTy &ExpandedSCEVs, Value *MainVectorTripCount)
static cl::opt< bool > MaximizeBandwidth("vectorizer-maximize-bandwidth", cl::init(false), cl::Hidden, cl::desc("Maximize bandwidth when selecting vectorization factor which " "will be determined by the smallest type in loop."))
static OptimizationRemarkAnalysis createLVAnalysis(const char *PassName, StringRef RemarkName, Loop *TheLoop, Instruction *I, DebugLoc DL={})
Create an analysis remark that explains why vectorization failed.
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
This file implements a map that provides insertion order iteration.
This file contains the declarations for metadata subclasses.
#define T
ConstantRange Range(APInt(BitWidth, Low), APInt(BitWidth, High))
uint64_t IntrinsicInst * II
#define P(N)
This file contains the declarations for profiling metadata utility functions.
const SmallVectorImpl< MachineOperand > & Cond
static BinaryOperator * CreateMul(Value *S1, Value *S2, const Twine &Name, BasicBlock::iterator InsertBefore, Value *FlagsOp)
static BinaryOperator * CreateAdd(Value *S1, Value *S2, const Twine &Name, BasicBlock::iterator InsertBefore, Value *FlagsOp)
static bool isValid(const char C)
Returns true if C is a valid mangled character: <0-9a-zA-Z_>.
static InstructionCost getScalarizationOverhead(const TargetTransformInfo &TTI, Type *ScalarTy, VectorType *Ty, const APInt &DemandedElts, bool Insert, bool Extract, TTI::TargetCostKind CostKind, bool ForPoisonSrc=true, ArrayRef< Value * > VL={})
This is similar to TargetTransformInfo::getScalarizationOverhead, but if ScalarTy is a FixedVectorTyp...
This file contains some templates that are useful if you are working with the STL at all.
#define OP(OPC)
Definition Instruction.h:46
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Definition Statistic.h:171
#define LLVM_DEBUG(...)
Definition Debug.h:114
#define DEBUG_WITH_TYPE(TYPE,...)
DEBUG_WITH_TYPE macro - This macro should be used by passes to emit debug information.
Definition Debug.h:72
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
static TableGen::Emitter::OptClass< SkeletonEmitter > X("gen-skeleton-class", "Generate example skeleton class")
This pass exposes codegen information to IR-level passes.
LocallyHashedType DenseMapInfo< LocallyHashedType >::Empty
This file implements the TypeSwitch template, which mimics a switch() statement whose cases are type ...
This file contains the declarations of different VPlan-related auxiliary helpers.
This file provides utility VPlan to VPlan transformations.
This file declares the class VPlanVerifier, which contains utility functions to check the consistency...
This file contains the declarations of the Vectorization Plan base classes:
static const char PassName[]
Value * RHS
Value * LHS
static const uint32_t IV[8]
Definition blake3_impl.h:83
A manager for alias analyses.
Class for arbitrary precision integers.
Definition APInt.h:78
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
Definition APInt.h:235
uint64_t getZExtValue() const
Get zero extended value.
Definition APInt.h:1541
unsigned getActiveBits() const
Compute the number of active bits in the value.
Definition APInt.h:1513
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:40
size_t size() const
size - Get the array size.
Definition ArrayRef.h:142
A function analysis which provides an AssumptionCache.
A cache of @llvm.assume calls within a function.
LLVM_ABI unsigned getVScaleRangeMin() const
Returns the minimum value for the vscale_range attribute.
LLVM Basic Block Representation.
Definition BasicBlock.h:62
iterator_range< const_phi_iterator > phis() const
Returns a range that iterates over the phis in the basic block.
Definition BasicBlock.h:528
LLVM_ABI const_iterator getFirstInsertionPt() const
Returns an iterator to the first instruction in this block that is suitable for inserting a non-PHI i...
const Function * getParent() const
Return the enclosing method, or null if none.
Definition BasicBlock.h:213
LLVM_ABI InstListType::const_iterator getFirstNonPHIIt() const
Returns an iterator to the first instruction in this block that is not a PHINode instruction.
LLVM_ABI const BasicBlock * getSinglePredecessor() const
Return the predecessor of this block if it has a single predecessor block.
LLVM_ABI const BasicBlock * getSingleSuccessor() const
Return the successor of this block if it has a single successor.
LLVM_ABI const DataLayout & getDataLayout() const
Get the data layout of the module this basic block belongs to.
LLVM_ABI LLVMContext & getContext() const
Get the context in which this basic block lives.
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition BasicBlock.h:233
BinaryOps getOpcode() const
Definition InstrTypes.h:374
Analysis pass which computes BlockFrequencyInfo.
Conditional or Unconditional Branch instruction.
bool isConditional() const
static BranchInst * Create(BasicBlock *IfTrue, InsertPosition InsertBefore=nullptr)
BasicBlock * getSuccessor(unsigned i) const
Represents analyses that only rely on functions' control flow.
Definition Analysis.h:73
bool isNoBuiltin() const
Return true if the call should not be treated as a call to a builtin.
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
Value * getArgOperand(unsigned i) const
iterator_range< User::op_iterator > args()
Iteration adapter for range-for loops.
unsigned arg_size() const
This class represents a function call, abstracting a target machine's calling convention.
static Type * makeCmpResultType(Type *opnd_type)
Create a result type for fcmp/icmp.
Definition InstrTypes.h:982
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition InstrTypes.h:676
@ ICMP_UGT
unsigned greater than
Definition InstrTypes.h:699
@ ICMP_ULT
unsigned less than
Definition InstrTypes.h:701
@ ICMP_NE
not equal
Definition InstrTypes.h:698
@ ICMP_ULE
unsigned less or equal
Definition InstrTypes.h:702
Predicate getInversePredicate() const
For example, EQ -> NE, UGT -> ULE, SLT -> SGE, OEQ -> UNE, UGT -> OLE, OLT -> UGE,...
Definition InstrTypes.h:789
An abstraction over a floating-point predicate, and a pack of an integer predicate with samesign info...
This is the shared class of boolean and integer constants.
Definition Constants.h:87
static LLVM_ABI ConstantInt * getTrue(LLVMContext &Context)
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:63
A debug info location.
Definition DebugLoc.h:124
static DebugLoc getTemporary()
Definition DebugLoc.h:161
static DebugLoc getUnknown()
Definition DebugLoc.h:162
An analysis that produces DemandedBits for a function.
ValueT lookup(const_arg_type_t< KeyT > Val) const
lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...
Definition DenseMap.h:205
iterator find(const_arg_type_t< KeyT > Val)
Definition DenseMap.h:178
std::pair< iterator, bool > try_emplace(KeyT &&Key, Ts &&...Args)
Definition DenseMap.h:256
iterator end()
Definition DenseMap.h:81
bool contains(const_arg_type_t< KeyT > Val) const
Return true if the specified key is in the map, false otherwise.
Definition DenseMap.h:169
void insert_range(Range &&R)
Inserts range of 'std::pair<KeyT, ValueT>' values into the map.
Definition DenseMap.h:294
Implements a dense probed hash-table based set.
Definition DenseSet.h:279
Analysis pass which computes a DominatorTree.
Definition Dominators.h:283
void changeImmediateDominator(DomTreeNodeBase< NodeT > *N, DomTreeNodeBase< NodeT > *NewIDom)
changeImmediateDominator - This method is used to update the dominator tree information when a node's...
void eraseNode(NodeT *BB)
eraseNode - Removes a node from the dominator tree.
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition Dominators.h:164
constexpr bool isVector() const
One or more elements.
Definition TypeSize.h:324
static constexpr ElementCount getScalable(ScalarTy MinVal)
Definition TypeSize.h:312
static constexpr ElementCount getFixed(ScalarTy MinVal)
Definition TypeSize.h:309
static constexpr ElementCount get(ScalarTy MinVal, bool Scalable)
Definition TypeSize.h:315
constexpr bool isScalar() const
Exactly one element.
Definition TypeSize.h:320
EpilogueVectorizerEpilogueLoop(Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI, DominatorTree *DT, const TargetTransformInfo *TTI, AssumptionCache *AC, EpilogueLoopVectorizationInfo &EPI, LoopVectorizationCostModel *CM, GeneratedRTChecks &Checks, VPlan &Plan)
BasicBlock * createVectorizedLoopSkeleton() final
Implements the interface for creating a vectorized skeleton using the epilogue loop strategy (i....
void printDebugTracesAtStart() override
Allow subclasses to override and print debug traces before/after vplan execution, when trace informat...
A specialized derived class of inner loop vectorizer that performs vectorization of main loops in the...
void introduceCheckBlockInVPlan(BasicBlock *CheckIRBB)
Introduces a new VPIRBasicBlock for CheckIRBB to Plan between the vector preheader and its predecesso...
BasicBlock * emitIterationCountCheck(BasicBlock *VectorPH, BasicBlock *Bypass, bool ForEpilogue)
Emits an iteration count bypass check once for the main loop (when ForEpilogue is false) and once for...
Value * createIterationCountCheck(BasicBlock *VectorPH, ElementCount VF, unsigned UF) const
void printDebugTracesAtStart() override
Allow subclasses to override and print debug traces before/after vplan execution, when trace informat...
EpilogueVectorizerMainLoop(Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI, DominatorTree *DT, const TargetTransformInfo *TTI, AssumptionCache *AC, EpilogueLoopVectorizationInfo &EPI, LoopVectorizationCostModel *CM, GeneratedRTChecks &Check, VPlan &Plan)
BasicBlock * createVectorizedLoopSkeleton() final
Implements the interface for creating a vectorized skeleton using the main loop strategy (i....
Convenience struct for specifying and reasoning about fast-math flags.
Definition FMF.h:22
static FastMathFlags getFast()
Definition FMF.h:50
Class to represent function types.
param_iterator param_begin() const
param_iterator param_end() const
FunctionType * getFunctionType() const
Returns the FunctionType for me.
Definition Function.h:209
Attribute getFnAttribute(Attribute::AttrKind Kind) const
Return the attribute for the given attribute kind.
Definition Function.cpp:765
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
Definition Function.cpp:730
Represents flags for the getelementptr instruction/expression.
static GEPNoWrapFlags none()
void applyUpdates(ArrayRef< UpdateT > Updates)
Submit updates to all available trees.
Common base class shared among various IRBuilders.
Definition IRBuilder.h:114
void setFastMathFlags(FastMathFlags NewFMF)
Set the fast-math flags to be used with generated fp-math operators.
Definition IRBuilder.h:345
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition IRBuilder.h:2788
A struct for saving information about induction variables.
const SCEV * getStep() const
ArrayRef< Instruction * > getCastInsts() const
Returns an ArrayRef to the type cast instructions in the induction update chain, that are redundant w...
InductionKind
This enum represents the kinds of inductions that we support.
@ IK_NoInduction
Not an induction variable.
@ IK_FpInduction
Floating point induction variable.
@ IK_PtrInduction
Pointer induction var. Step = C.
@ IK_IntInduction
Integer induction variable. Step = C.
Value * getStartValue() const
InnerLoopAndEpilogueVectorizer(Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI, DominatorTree *DT, const TargetTransformInfo *TTI, AssumptionCache *AC, EpilogueLoopVectorizationInfo &EPI, LoopVectorizationCostModel *CM, GeneratedRTChecks &Checks, VPlan &Plan, ElementCount VecWidth, ElementCount MinProfitableTripCount, unsigned UnrollFactor)
EpilogueLoopVectorizationInfo & EPI
Holds and updates state information required to vectorize the main loop and its epilogue in two separ...
InnerLoopVectorizer vectorizes loops which contain only one basic block to a specified vectorization ...
virtual void printDebugTracesAtStart()
Allow subclasses to override and print debug traces before/after vplan execution, when trace informat...
Value * TripCount
Trip count of the original loop.
const TargetTransformInfo * TTI
Target Transform Info.
LoopVectorizationCostModel * Cost
The profitablity analysis.
Value * getTripCount() const
Returns the original loop trip count.
friend class LoopVectorizationPlanner
InnerLoopVectorizer(Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI, DominatorTree *DT, const TargetTransformInfo *TTI, AssumptionCache *AC, ElementCount VecWidth, unsigned UnrollFactor, LoopVectorizationCostModel *CM, GeneratedRTChecks &RTChecks, VPlan &Plan)
PredicatedScalarEvolution & PSE
A wrapper around ScalarEvolution used to add runtime SCEV checks.
LoopInfo * LI
Loop Info.
DominatorTree * DT
Dominator Tree.
void setTripCount(Value *TC)
Used to set the trip count after ILV's construction and after the preheader block has been executed.
void fixVectorizedLoop(VPTransformState &State)
Fix the vectorized code, taking care of header phi's, and more.
virtual BasicBlock * createVectorizedLoopSkeleton()
Creates a basic block for the scalar preheader.
virtual void printDebugTracesAtEnd()
AssumptionCache * AC
Assumption Cache.
IRBuilder Builder
The builder that we use.
void fixNonInductionPHIs(VPTransformState &State)
Fix the non-induction PHIs in Plan.
VPBasicBlock * VectorPHVPBB
The vector preheader block of Plan, used as target for check blocks introduced during skeleton creati...
unsigned UF
The vectorization unroll factor to use.
GeneratedRTChecks & RTChecks
Structure to hold information about generated runtime checks, responsible for cleaning the checks,...
virtual ~InnerLoopVectorizer()=default
ElementCount VF
The vectorization SIMD factor to use.
Loop * OrigLoop
The original loop.
BasicBlock * createScalarPreheader(StringRef Prefix)
Create and return a new IR basic block for the scalar preheader whose name is prefixed with Prefix.
InstSimplifyFolder - Use InstructionSimplify to fold operations to existing values.
static InstructionCost getInvalid(CostType Val=0)
static InstructionCost getMax()
CostType getValue() const
This function is intended to be used as sparingly as possible, since the class provides the full rang...
bool isCast() const
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
LLVM_ABI const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
LLVM_ABI void moveBefore(InstListType::iterator InsertPos)
Unlink this instruction from its current basic block and insert it into the basic block that MovePos ...
bool isBinaryOp() const
LLVM_ABI InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
Instruction * user_back()
Specialize the methods defined in Value, as we know that an instruction can only be used by other ins...
LLVM_ABI FastMathFlags getFastMathFlags() const LLVM_READONLY
Convenience function for getting all the fast-math flags, which must be an operator which supports th...
const char * getOpcodeName() const
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
Class to represent integer types.
static LLVM_ABI IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
Definition Type.cpp:318
LLVM_ABI APInt getMask() const
For example, this is 0xFF for an 8 bit integer, 0xFFFF for i16, etc.
Definition Type.cpp:342
The group of interleaved loads/stores sharing the same stride and close to each other.
uint32_t getFactor() const
InstTy * getMember(uint32_t Index) const
Get the member with the given index Index.
InstTy * getInsertPos() const
uint32_t getNumMembers() const
Drive the analysis of interleaved memory accesses in the loop.
bool requiresScalarEpilogue() const
Returns true if an interleaved group that may access memory out-of-bounds requires a scalar epilogue ...
LLVM_ABI void analyzeInterleaving(bool EnableMaskedInterleavedGroup)
Analyze the interleaved accesses and collect them in interleave groups.
An instruction for reading from memory.
Type * getPointerOperandType() const
This analysis provides dependence information for the memory accesses of a loop.
Drive the analysis of memory accesses in the loop.
const RuntimePointerChecking * getRuntimePointerChecking() const
unsigned getNumRuntimePointerChecks() const
Number of memchecks required to prove independence of otherwise may-alias pointers.
Analysis pass that exposes the LoopInfo for a function.
Definition LoopInfo.h:569
bool contains(const LoopT *L) const
Return true if the specified loop is contained within in this loop.
BlockT * getLoopLatch() const
If there is a single latch block for this loop, return it.
bool isInnermost() const
Return true if the loop does not contain any (natural) loops.
void getExitingBlocks(SmallVectorImpl< BlockT * > &ExitingBlocks) const
Return all blocks inside the loop that have successors outside of the loop.
BlockT * getHeader() const
iterator_range< block_iterator > blocks() const
ArrayRef< BlockT * > getBlocks() const
Get a list of the basic blocks which make up this loop.
Store the result of a depth first search within basic blocks contained by a single loop.
RPOIterator beginRPO() const
Reverse iterate over the cached postorder blocks.
void perform(const LoopInfo *LI)
Traverse the loop blocks and store the DFS result.
RPOIterator endRPO() const
Wrapper class to LoopBlocksDFS that provides a standard begin()/end() interface for the DFS reverse p...
void perform(const LoopInfo *LI)
Traverse the loop blocks and store the DFS result.
void removeBlock(BlockT *BB)
This method completely removes BB from all data structures, including all of the Loop objects it is n...
LoopVectorizationCostModel - estimates the expected speedups due to vectorization.
SmallPtrSet< Type *, 16 > ElementTypesInLoop
All element types found in the loop.
bool isLegalMaskedLoad(Type *DataType, Value *Ptr, Align Alignment, unsigned AddressSpace) const
Returns true if the target machine supports masked load operation for the given DataType and kind of ...
void collectElementTypesForWidening()
Collect all element types in the loop for which widening is needed.
bool canVectorizeReductions(ElementCount VF) const
Returns true if the target machine supports all of the reduction variables found for the given VF.
bool isLegalMaskedStore(Type *DataType, Value *Ptr, Align Alignment, unsigned AddressSpace) const
Returns true if the target machine supports masked store operation for the given DataType and kind of...
bool isEpilogueVectorizationProfitable(const ElementCount VF, const unsigned IC) const
Returns true if epilogue vectorization is considered profitable, and false otherwise.
bool useWideActiveLaneMask() const
Returns true if the use of wide lane masks is requested and the loop is using tail-folding with a lan...
bool isPredicatedInst(Instruction *I) const
Returns true if I is an instruction that needs to be predicated at runtime.
void collectValuesToIgnore()
Collect values we want to ignore in the cost model.
void collectInLoopReductions()
Split reductions into those that happen in the loop, and those that happen outside.
std::pair< unsigned, unsigned > getSmallestAndWidestTypes()
bool isUniformAfterVectorization(Instruction *I, ElementCount VF) const
Returns true if I is known to be uniform after vectorization.
void collectNonVectorizedAndSetWideningDecisions(ElementCount VF)
Collect values that will not be widened, including Uniforms, Scalars, and Instructions to Scalarize f...
PredicatedScalarEvolution & PSE
Predicated scalar evolution analysis.
const LoopVectorizeHints * Hints
Loop Vectorize Hint.
std::optional< unsigned > getMaxSafeElements() const
Return maximum safe number of elements to be processed per vector iteration, which do not prevent sto...
LoopVectorizationCostModel(ScalarEpilogueLowering SEL, Loop *L, PredicatedScalarEvolution &PSE, LoopInfo *LI, LoopVectorizationLegality *Legal, const TargetTransformInfo &TTI, const TargetLibraryInfo *TLI, DemandedBits *DB, AssumptionCache *AC, OptimizationRemarkEmitter *ORE, const Function *F, const LoopVectorizeHints *Hints, InterleavedAccessInfo &IAI, bool OptForSize)
const TargetTransformInfo & TTI
Vector target information.
LoopVectorizationLegality * Legal
Vectorization legality.
std::optional< InstructionCost > getReductionPatternCost(Instruction *I, ElementCount VF, Type *VectorTy) const
Return the cost of instructions in an inloop reduction pattern, if I is part of that pattern.
InstructionCost getInstructionCost(Instruction *I, ElementCount VF)
Returns the execution time cost of an instruction for a given vector width.
DemandedBits * DB
Demanded bits analysis.
bool interleavedAccessCanBeWidened(Instruction *I, ElementCount VF) const
Returns true if I is a memory instruction in an interleaved-group of memory accesses that can be vect...
const TargetLibraryInfo * TLI
Target Library Info.
bool memoryInstructionCanBeWidened(Instruction *I, ElementCount VF)
Returns true if I is a memory instruction with consecutive memory access that can be widened.
const InterleaveGroup< Instruction > * getInterleavedAccessGroup(Instruction *Instr) const
Get the interleaved access group that Instr belongs to.
InstructionCost getVectorIntrinsicCost(CallInst *CI, ElementCount VF) const
Estimate cost of an intrinsic call instruction CI if it were vectorized with factor VF.
bool OptForSize
Whether this loop should be optimized for size based on function attribute or profile information.
bool useMaxBandwidth(TargetTransformInfo::RegisterKind RegKind)
bool isScalarAfterVectorization(Instruction *I, ElementCount VF) const
Returns true if I is known to be scalar after vectorization.
bool isOptimizableIVTruncate(Instruction *I, ElementCount VF)
Return True if instruction I is an optimizable truncate whose operand is an induction variable.
FixedScalableVFPair computeMaxVF(ElementCount UserVF, unsigned UserIC)
bool shouldConsiderRegPressureForVF(ElementCount VF)
Loop * TheLoop
The loop that we evaluate.
TTI::TargetCostKind CostKind
The kind of cost that we are calculating.
TailFoldingStyle getTailFoldingStyle(bool IVUpdateMayOverflow=true) const
Returns the TailFoldingStyle that is best for the current loop.
InterleavedAccessInfo & InterleaveInfo
The interleave access information contains groups of interleaved accesses with the same stride and cl...
SmallPtrSet< const Value *, 16 > ValuesToIgnore
Values to ignore in the cost model.
void setVectorizedCallDecision(ElementCount VF)
A call may be vectorized in different ways depending on whether we have vectorized variants available...
void invalidateCostModelingDecisions()
Invalidates decisions already taken by the cost model.
bool isAccessInterleaved(Instruction *Instr) const
Check if Instr belongs to any interleaved access group.
bool selectUserVectorizationFactor(ElementCount UserVF)
Setup cost-based decisions for user vectorization factor.
std::optional< unsigned > getVScaleForTuning() const
Return the value of vscale used for tuning the cost model.
OptimizationRemarkEmitter * ORE
Interface to emit optimization remarks.
bool preferPredicatedLoop() const
Returns true if tail-folding is preferred over a scalar epilogue.
LoopInfo * LI
Loop Info analysis.
bool requiresScalarEpilogue(bool IsVectorizing) const
Returns true if we're required to use a scalar epilogue for at least the final iteration of the origi...
SmallPtrSet< const Value *, 16 > VecValuesToIgnore
Values to ignore in the cost model when VF > 1.
unsigned getPredBlockCostDivisor(TargetTransformInfo::TargetCostKind CostKind, BasicBlock *BB) const
A helper function that returns how much we should divide the cost of a predicated block by.
bool isInLoopReduction(PHINode *Phi) const
Returns true if the Phi is part of an inloop reduction.
bool isProfitableToScalarize(Instruction *I, ElementCount VF) const
void setWideningDecision(const InterleaveGroup< Instruction > *Grp, ElementCount VF, InstWidening W, InstructionCost Cost)
Save vectorization decision W and Cost taken by the cost model for interleaving group Grp and vector ...
const MapVector< Instruction *, uint64_t > & getMinimalBitwidths() const
CallWideningDecision getCallWideningDecision(CallInst *CI, ElementCount VF) const
bool isLegalGatherOrScatter(Value *V, ElementCount VF)
Returns true if the target machine can represent V as a masked gather or scatter operation.
bool canTruncateToMinimalBitwidth(Instruction *I, ElementCount VF) const
bool shouldConsiderInvariant(Value *Op)
Returns true if Op should be considered invariant and if it is trivially hoistable.
bool foldTailByMasking() const
Returns true if all loop blocks should be masked to fold tail loop.
bool foldTailWithEVL() const
Returns true if VP intrinsics with explicit vector length support should be generated in the tail fol...
bool usePredicatedReductionSelect() const
Returns true if the predicated reduction select should be used to set the incoming value for the redu...
bool blockNeedsPredicationForAnyReason(BasicBlock *BB) const
Returns true if the instructions in this block requires predication for any reason,...
void setCallWideningDecision(CallInst *CI, ElementCount VF, InstWidening Kind, Function *Variant, Intrinsic::ID IID, std::optional< unsigned > MaskPos, InstructionCost Cost)
void setTailFoldingStyles(bool IsScalableVF, unsigned UserIC)
Selects and saves TailFoldingStyle for 2 options - if IV update may overflow or not.
AssumptionCache * AC
Assumption cache.
void setWideningDecision(Instruction *I, ElementCount VF, InstWidening W, InstructionCost Cost)
Save vectorization decision W and Cost taken by the cost model for instruction I and vector width VF.
InstWidening
Decision that was taken during cost calculation for memory instruction.
bool isScalarWithPredication(Instruction *I, ElementCount VF) const
Returns true if I is an instruction which requires predication and for which our chosen predication s...
InstructionCost getVectorCallCost(CallInst *CI, ElementCount VF) const
Estimate cost of a call instruction CI if it were vectorized with factor VF.
bool useOrderedReductions(const RecurrenceDescriptor &RdxDesc) const
Returns true if we should use strict in-order reductions for the given RdxDesc.
std::pair< InstructionCost, InstructionCost > getDivRemSpeculationCost(Instruction *I, ElementCount VF) const
Return the costs for our two available strategies for lowering a div/rem operation which requires spe...
bool isDivRemScalarWithPredication(InstructionCost ScalarCost, InstructionCost SafeDivisorCost) const
Given costs for both strategies, return true if the scalar predication lowering should be used for di...
InstructionCost expectedCost(ElementCount VF)
Returns the expected execution cost.
void setCostBasedWideningDecision(ElementCount VF)
Memory access instruction may be vectorized in more than one way.
InstWidening getWideningDecision(Instruction *I, ElementCount VF) const
Return the cost model decision for the given instruction I and vector width VF.
FixedScalableVFPair MaxPermissibleVFWithoutMaxBW
The highest VF possible for this loop, without using MaxBandwidth.
bool isScalarEpilogueAllowed() const
Returns true if a scalar epilogue is not allowed due to optsize or a loop hint annotation.
InstructionCost getWideningCost(Instruction *I, ElementCount VF)
Return the vectorization cost for the given instruction I and vector width VF.
void collectInstsToScalarize(ElementCount VF)
Collects the instructions to scalarize for each predicated instruction in the loop.
LoopVectorizationLegality checks if it is legal to vectorize a loop, and to what vectorization factor...
MapVector< PHINode *, InductionDescriptor > InductionList
InductionList saves induction variables and maps them to the induction descriptor.
const SmallPtrSetImpl< const Instruction * > & getPotentiallyFaultingLoads() const
Returns potentially faulting loads.
bool canVectorize(bool UseVPlanNativePath)
Returns true if it is legal to vectorize this loop.
bool canVectorizeFPMath(bool EnableStrictReductions)
Returns true if it is legal to vectorize the FP math operations in this loop.
PHINode * getPrimaryInduction()
Returns the primary induction variable.
const SmallVector< BasicBlock *, 4 > & getCountableExitingBlocks() const
Returns all exiting blocks with a countable exit, i.e.
const InductionList & getInductionVars() const
Returns the induction variables found in the loop.
bool hasUncountableEarlyExit() const
Returns true if the loop has exactly one uncountable early exit, i.e.
bool hasHistograms() const
Returns a list of all known histogram operations in the loop.
const LoopAccessInfo * getLAI() const
Planner drives the vectorization process after having passed Legality checks.
VectorizationFactor selectEpilogueVectorizationFactor(const ElementCount MaxVF, unsigned IC)
VPlan & getPlanFor(ElementCount VF) const
Return the VPlan for VF.
Definition VPlan.cpp:1576
VectorizationFactor planInVPlanNativePath(ElementCount UserVF)
Use the VPlan-native path to plan how to best vectorize, return the best VF and its cost.
void updateLoopMetadataAndProfileInfo(Loop *VectorLoop, VPBasicBlock *HeaderVPBB, const VPlan &Plan, bool VectorizingEpilogue, MDNode *OrigLoopID, std::optional< unsigned > OrigAverageTripCount, unsigned OrigLoopInvocationWeight, unsigned EstimatedVFxUF, bool DisableRuntimeUnroll)
Update loop metadata and profile info for both the scalar remainder loop and VectorLoop,...
Definition VPlan.cpp:1627
void buildVPlans(ElementCount MinVF, ElementCount MaxVF)
Build VPlans for power-of-2 VF's between MinVF and MaxVF inclusive, according to the information gath...
Definition VPlan.cpp:1560
VectorizationFactor computeBestVF()
Compute and return the most profitable vectorization factor.
DenseMap< const SCEV *, Value * > executePlan(ElementCount VF, unsigned UF, VPlan &BestPlan, InnerLoopVectorizer &LB, DominatorTree *DT, bool VectorizingEpilogue)
Generate the IR code for the vectorized loop captured in VPlan BestPlan according to the best selecte...
unsigned selectInterleaveCount(VPlan &Plan, ElementCount VF, InstructionCost LoopCost)
void emitInvalidCostRemarks(OptimizationRemarkEmitter *ORE)
Emit remarks for recipes with invalid costs in the available VPlans.
static bool getDecisionAndClampRange(const std::function< bool(ElementCount)> &Predicate, VFRange &Range)
Test a Predicate on a Range of VF's.
Definition VPlan.cpp:1541
void printPlans(raw_ostream &O)
Definition VPlan.cpp:1705
void plan(ElementCount UserVF, unsigned UserIC)
Build VPlans for the specified UserVF and UserIC if they are non-zero or all applicable candidate VFs...
void addMinimumIterationCheck(VPlan &Plan, ElementCount VF, unsigned UF, ElementCount MinProfitableTripCount) const
Create a check to Plan to see if the vector loop should be executed based on its trip count.
bool hasPlanWithVF(ElementCount VF) const
Look through the existing plans and return true if we have one with vectorization factor VF.
This holds vectorization requirements that must be verified late in the process.
Utility class for getting and setting loop vectorizer hints in the form of loop metadata.
bool allowVectorization(Function *F, Loop *L, bool VectorizeOnlyWhenForced) const
void emitRemarkWithHints() const
Dumps all the hint information.
const char * vectorizeAnalysisPassName() const
If hints are provided that force vectorization, use the AlwaysPrint pass name to force the frontend t...
This class emits a version of the loop where run-time checks ensure that may-alias pointers can't ove...
Represents a single loop in the control flow graph.
Definition LoopInfo.h:40
bool hasLoopInvariantOperands(const Instruction *I) const
Return true if all the operands of the specified instruction are loop invariant.
Definition LoopInfo.cpp:67
DebugLoc getStartLoc() const
Return the debug location of the start of this loop.
Definition LoopInfo.cpp:632
bool isLoopInvariant(const Value *V) const
Return true if the specified value is loop invariant.
Definition LoopInfo.cpp:61
Metadata node.
Definition Metadata.h:1078
This class implements a map that also provides access to all stored values in a deterministic order.
Definition MapVector.h:36
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition MapVector.h:124
Function * getFunction(StringRef Name) const
Look up the specified function in the module symbol table.
Definition Module.cpp:230
Diagnostic information for optimization analysis remarks related to pointer aliasing.
Diagnostic information for optimization analysis remarks related to floating-point non-commutativity.
Diagnostic information for optimization analysis remarks.
The optimization diagnostic interface.
LLVM_ABI void emit(DiagnosticInfoOptimizationBase &OptDiag)
Output the remark via the diagnostic handler and to the optimization record file.
Diagnostic information for missed-optimization remarks.
Diagnostic information for applied optimization remarks.
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
op_range incoming_values()
void setIncomingValueForBlock(const BasicBlock *BB, Value *V)
Set every incoming value(s) for block BB to V.
Value * getIncomingValueForBlock(const BasicBlock *BB) const
unsigned getNumIncomingValues() const
Return the number of incoming edges.
An interface layer with SCEV used to manage how we see SCEV expressions for values in the context of ...
ScalarEvolution * getSE() const
Returns the ScalarEvolution analysis used.
LLVM_ABI const SCEVPredicate & getPredicate() const
LLVM_ABI unsigned getSmallConstantMaxTripCount()
Returns the upper bound of the loop trip count as a normal unsigned value, or 0 if the trip count is ...
LLVM_ABI const SCEV * getBackedgeTakenCount()
Get the (predicated) backedge count for the analyzed loop.
LLVM_ABI const SCEV * getSCEV(Value *V)
Returns the SCEV expression of V, in the context of the current SCEV predicate.
A set of analyses that are preserved following a run of a transformation pass.
Definition Analysis.h:112
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition Analysis.h:118
PreservedAnalyses & preserveSet()
Mark an analysis set as preserved.
Definition Analysis.h:151
PreservedAnalyses & preserve()
Mark an analysis as preserved.
Definition Analysis.h:132
An analysis pass based on the new PM to deliver ProfileSummaryInfo.
The RecurrenceDescriptor is used to identify recurrences variables in a loop.
static bool isFMulAddIntrinsic(Instruction *I)
Returns true if the instruction is a call to the llvm.fmuladd intrinsic.
FastMathFlags getFastMathFlags() const
Instruction * getLoopExitInstr() const
static LLVM_ABI unsigned getOpcode(RecurKind Kind)
Returns the opcode corresponding to the RecurrenceKind.
Type * getRecurrenceType() const
Returns the type of the recurrence.
bool hasUsesOutsideReductionChain() const
Returns true if the reduction PHI has any uses outside the reduction chain.
const SmallPtrSet< Instruction *, 8 > & getCastInsts() const
Returns a reference to the instructions used for type-promoting the recurrence.
unsigned getMinWidthCastToRecurrenceTypeInBits() const
Returns the minimum width used by the recurrence in bits.
TrackingVH< Value > getRecurrenceStartValue() const
LLVM_ABI SmallVector< Instruction *, 4 > getReductionOpChain(PHINode *Phi, Loop *L) const
Attempts to find a chain of operations from Phi to LoopExitInst that can be treated as a set of reduc...
static bool isAnyOfRecurrenceKind(RecurKind Kind)
Returns true if the recurrence kind is of the form select(cmp(),x,y) where one of (x,...
bool isSigned() const
Returns true if all source operands of the recurrence are SExtInsts.
RecurKind getRecurrenceKind() const
bool isOrdered() const
Expose an ordered FP reduction to the instance users.
static LLVM_ABI bool isFloatingPointRecurrenceKind(RecurKind Kind)
Returns true if the recurrence kind is a floating point kind.
static bool isFindIVRecurrenceKind(RecurKind Kind)
Returns true if the recurrence kind is of the form select(cmp(),x,y) where one of (x,...
Value * getSentinelValue() const
Returns the sentinel value for FindFirstIV & FindLastIV recurrences to replace the start value.
static bool isMinMaxRecurrenceKind(RecurKind Kind)
Returns true if the recurrence kind is any min/max kind.
std::optional< ArrayRef< PointerDiffInfo > > getDiffChecks() const
const SmallVectorImpl< RuntimePointerCheck > & getChecks() const
Returns the checks that generateChecks created.
This class uses information about analyze scalars to rewrite expressions in canonical form.
ScalarEvolution * getSE()
bool isInsertedInstruction(Instruction *I) const
Return true if the specified instruction was inserted by the code rewriter.
LLVM_ABI Value * expandCodeForPredicate(const SCEVPredicate *Pred, Instruction *Loc)
Generates a code sequence that evaluates this predicate.
void eraseDeadInstructions(Value *Root)
Remove inserted instructions that are dead, e.g.
virtual bool isAlwaysTrue() const =0
Returns true if the predicate is always true.
This class represents an analyzed expression in the program.
LLVM_ABI bool isZero() const
Return true if the expression is a constant zero.
LLVM_ABI Type * getType() const
Return the LLVM type of this SCEV expression.
Analysis pass that exposes the ScalarEvolution for a function.
The main scalar evolution driver.
LLVM_ABI const SCEV * getURemExpr(const SCEV *LHS, const SCEV *RHS)
Represents an unsigned remainder expression based on unsigned division.
LLVM_ABI const SCEV * getBackedgeTakenCount(const Loop *L, ExitCountKind Kind=Exact)
If the specified loop has a predictable backedge-taken count, return it, otherwise return a SCEVCould...
LLVM_ABI const SCEV * getConstant(ConstantInt *V)
LLVM_ABI const SCEV * getSCEV(Value *V)
Return a SCEV expression for the full generality of the specified expression.
LLVM_ABI const SCEV * getTripCountFromExitCount(const SCEV *ExitCount)
A version of getTripCountFromExitCount below which always picks an evaluation type which can not resu...
const SCEV * getOne(Type *Ty)
Return a SCEV for the constant 1 of a specific type.
LLVM_ABI void forgetLoop(const Loop *L)
This method should be called by the client when it has changed a loop in a way that may effect Scalar...
LLVM_ABI bool isLoopInvariant(const SCEV *S, const Loop *L)
Return true if the value of the given SCEV is unchanging in the specified loop.
LLVM_ABI bool isSCEVable(Type *Ty) const
Test if values of the given type are analyzable within the SCEV framework.
LLVM_ABI const SCEV * getElementCount(Type *Ty, ElementCount EC, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap)
LLVM_ABI void forgetValue(Value *V)
This method should be called by the client when it has changed a value in a way that may effect its v...
LLVM_ABI void forgetBlockAndLoopDispositions(Value *V=nullptr)
Called when the client has changed the disposition of values in a loop or block.
const SCEV * getMinusOne(Type *Ty)
Return a SCEV for the constant -1 of a specific type.
LLVM_ABI void forgetLcssaPhiWithNewPredecessor(Loop *L, PHINode *V)
Forget LCSSA phi node V of loop L to which a new predecessor was added, such that it may no longer be...
LLVM_ABI unsigned getSmallConstantTripCount(const Loop *L)
Returns the exact trip count of the loop if we can compute it, and the result is a small constant.
APInt getUnsignedRangeMax(const SCEV *S)
Determine the max of the unsigned range for a particular SCEV.
LLVM_ABI const SCEV * applyLoopGuards(const SCEV *Expr, const Loop *L)
Try to apply information from loop guards for L to Expr.
LLVM_ABI const SCEV * getMulExpr(SmallVectorImpl< const SCEV * > &Ops, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)
Get a canonical multiply expression, or something simpler if possible.
LLVM_ABI const SCEV * getAddExpr(SmallVectorImpl< const SCEV * > &Ops, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)
Get a canonical add expression, or something simpler if possible.
LLVM_ABI bool isKnownPredicate(CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS)
Test if the given expression is known to satisfy the condition described by Pred, LHS,...
This class represents the LLVM 'select' instruction.
A vector that has set insertion semantics.
Definition SetVector.h:58
size_type size() const
Determine the number of elements in the SetVector.
Definition SetVector.h:101
void insert_range(Range &&R)
Definition SetVector.h:174
size_type count(const key_type &key) const
Count the number of elements of a given key in the SetVector.
Definition SetVector.h:260
bool insert(const value_type &X)
Insert a new element into the SetVector.
Definition SetVector.h:149
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
bool contains(ConstPtrType Ptr) const
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
A SetVector that performs no allocations if smaller than a certain size.
Definition SetVector.h:337
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
An instruction for storing to memory.
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
Analysis pass providing the TargetTransformInfo.
Analysis pass providing the TargetLibraryInfo.
Provides information about what library functions are available for the current target.
This pass provides access to the codegen interfaces that are needed for IR-level transformations.
LLVM_ABI std::optional< unsigned > getVScaleForTuning() const
LLVM_ABI InstructionCost getScalarizationOverhead(VectorType *Ty, const APInt &DemandedElts, bool Insert, bool Extract, TTI::TargetCostKind CostKind, bool ForPoisonSrc=true, ArrayRef< Value * > VL={}) const
Estimate the overhead of scalarizing an instruction.
LLVM_ABI bool supportsEfficientVectorElementLoadStore() const
If target has efficient vector element load/store instructions, it can return true here so that inser...
LLVM_ABI bool prefersVectorizedAddressing() const
Return true if target doesn't mind addresses in vectors.
LLVM_ABI TypeSize getRegisterBitWidth(RegisterKind K) const
LLVM_ABI bool preferFixedOverScalableIfEqualCost(bool IsEpilogue) const
LLVM_ABI InstructionCost getMemoryOpCost(unsigned Opcode, Type *Src, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind=TTI::TCK_RecipThroughput, OperandValueInfo OpdInfo={OK_AnyValue, OP_None}, const Instruction *I=nullptr) const
LLVM_ABI InstructionCost getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef< unsigned > Indices, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind=TTI::TCK_RecipThroughput, bool UseMaskForCond=false, bool UseMaskForGaps=false) const
LLVM_ABI InstructionCost getShuffleCost(ShuffleKind Kind, VectorType *DstTy, VectorType *SrcTy, ArrayRef< int > Mask={}, TTI::TargetCostKind CostKind=TTI::TCK_RecipThroughput, int Index=0, VectorType *SubTp=nullptr, ArrayRef< const Value * > Args={}, const Instruction *CxtI=nullptr) const
static LLVM_ABI PartialReductionExtendKind getPartialReductionExtendKind(Instruction *I)
Get the kind of extension that an instruction represents.
static LLVM_ABI OperandValueInfo getOperandInfo(const Value *V)
Collect properties of V used in cost analysis, e.g. OP_PowerOf2.
LLVM_ABI bool isElementTypeLegalForScalableVector(Type *Ty) const
LLVM_ABI ElementCount getMinimumVF(unsigned ElemWidth, bool IsScalable) const
TargetCostKind
The kind of cost model.
@ TCK_RecipThroughput
Reciprocal throughput.
@ TCK_CodeSize
Instruction code size.
@ TCK_SizeAndLatency
The weighted sum of size and latency.
@ TCK_Latency
The latency of instruction.
LLVM_ABI InstructionCost getMemIntrinsicInstrCost(const MemIntrinsicCostAttributes &MICA, TTI::TargetCostKind CostKind) const
LLVM_ABI InstructionCost getAddressComputationCost(Type *PtrTy, ScalarEvolution *SE, const SCEV *Ptr, TTI::TargetCostKind CostKind) const
LLVM_ABI bool supportsScalableVectors() const
@ TCC_Free
Expected to fold away in lowering.
LLVM_ABI InstructionCost getInstructionCost(const User *U, ArrayRef< const Value * > Operands, TargetCostKind CostKind) const
Estimate the cost of a given IR user when lowered.
LLVM_ABI InstructionCost getIndexedVectorInstrCostFromEnd(unsigned Opcode, Type *Val, TTI::TargetCostKind CostKind, unsigned Index) const
LLVM_ABI InstructionCost getOperandsScalarizationOverhead(ArrayRef< Type * > Tys, TTI::TargetCostKind CostKind) const
Estimate the overhead of scalarizing operands with the given types.
@ SK_Splice
Concatenates elements from the first input vector with elements of the second input vector.
@ SK_Broadcast
Broadcast element 0 to all other elements.
@ SK_Reverse
Reverse the order of the vector.
LLVM_ABI InstructionCost getCFInstrCost(unsigned Opcode, TTI::TargetCostKind CostKind=TTI::TCK_SizeAndLatency, const Instruction *I=nullptr) const
CastContextHint
Represents a hint about the context in which a cast is used.
@ Reversed
The cast is used with a reversed load/store.
@ Masked
The cast is used with a masked load/store.
@ None
The cast is not used with a load/store of any kind.
@ Normal
The cast is used with a normal load/store.
@ Interleave
The cast is used with an interleaved load/store.
@ GatherScatter
The cast is used with a gather/scatter.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition Twine.h:82
This class implements a switch-like dispatch statement for a value of 'T' using dyn_cast functionalit...
Definition TypeSwitch.h:88
TypeSwitch< T, ResultT > & Case(CallableT &&caseFn)
Add a case on the given type.
Definition TypeSwitch.h:97
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:45
LLVM_ABI unsigned getIntegerBitWidth() const
bool isVectorTy() const
True if this is an instance of VectorType.
Definition Type.h:273
static LLVM_ABI Type * getVoidTy(LLVMContext &C)
Definition Type.cpp:280
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
Definition Type.h:352
LLVM_ABI TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
Definition Type.cpp:197
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
Definition Type.h:128
LLVM_ABI unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
Definition Type.cpp:230
static LLVM_ABI IntegerType * getInt1Ty(LLVMContext &C)
Definition Type.cpp:293
bool isFloatingPointTy() const
Return true if this is one of the floating-point types.
Definition Type.h:184
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition Type.h:240
bool isVoidTy() const
Return true if this is 'void'.
Definition Type.h:139
A Use represents the edge between a Value definition and its users.
Definition Use.h:35
op_range operands()
Definition User.h:292
LLVM_ABI bool replaceUsesOfWith(Value *From, Value *To)
Replace uses of one Value with another.
Definition User.cpp:24
Value * getOperand(unsigned i) const
Definition User.h:232
static SmallVector< VFInfo, 8 > getMappings(const CallInst &CI)
Retrieve all the VFInfo instances associated to the CallInst CI.
Definition VectorUtils.h:74
VPBasicBlock serves as the leaf of the Hierarchical Control-Flow Graph.
Definition VPlan.h:3971
void appendRecipe(VPRecipeBase *Recipe)
Augment the existing recipes of a VPBasicBlock with an additional Recipe as the last recipe.
Definition VPlan.h:4046
RecipeListTy::iterator iterator
Instruction iterators...
Definition VPlan.h:3998
iterator end()
Definition VPlan.h:4008
iterator begin()
Recipe iterator methods.
Definition VPlan.h:4006
iterator_range< iterator > phis()
Returns an iterator range over the PHI-like recipes in the block.
Definition VPlan.h:4059
InstructionCost cost(ElementCount VF, VPCostContext &Ctx) override
Return the cost of this VPBasicBlock.
Definition VPlan.cpp:763
iterator getFirstNonPhi()
Return the position of the first non-phi node recipe in the block.
Definition VPlan.cpp:216
VPRegionBlock * getEnclosingLoopRegion()
Definition VPlan.cpp:578
VPRecipeBase * getTerminator()
If the block has multiple successors, return the branch recipe terminating the block.
Definition VPlan.cpp:623
void insert(VPRecipeBase *Recipe, iterator InsertPt)
Definition VPlan.h:4037
bool empty() const
Definition VPlan.h:4017
VPBlockBase is the building block of the Hierarchical Control-Flow Graph.
Definition VPlan.h:81
VPRegionBlock * getParent()
Definition VPlan.h:173
const VPBasicBlock * getExitingBasicBlock() const
Definition VPlan.cpp:186
void setName(const Twine &newName)
Definition VPlan.h:166
size_t getNumSuccessors() const
Definition VPlan.h:219
void swapSuccessors()
Swap successors of the block. The block must have exactly 2 successors.
Definition VPlan.h:322
size_t getNumPredecessors() const
Definition VPlan.h:220
VPlan * getPlan()
Definition VPlan.cpp:161
const VPBasicBlock * getEntryBasicBlock() const
Definition VPlan.cpp:166
VPBlockBase * getSingleSuccessor() const
Definition VPlan.h:209
const VPBlocksTy & getSuccessors() const
Definition VPlan.h:198
static auto blocksOnly(const T &Range)
Return an iterator range over Range which only includes BlockTy blocks.
Definition VPlanUtils.h:211
static void insertOnEdge(VPBlockBase *From, VPBlockBase *To, VPBlockBase *BlockPtr)
Inserts BlockPtr on the edge between From and To.
Definition VPlanUtils.h:232
static void connectBlocks(VPBlockBase *From, VPBlockBase *To, unsigned PredIdx=-1u, unsigned SuccIdx=-1u)
Connect VPBlockBases From and To bi-directionally.
Definition VPlanUtils.h:170
static void reassociateBlocks(VPBlockBase *Old, VPBlockBase *New)
Reassociate all the blocks connected to Old so that they now point to New.
Definition VPlanUtils.h:197
VPlan-based builder utility analogous to IRBuilder.
VPPhi * createScalarPhi(ArrayRef< VPValue * > IncomingValues, DebugLoc DL, const Twine &Name="")
VPInstruction * createNaryOp(unsigned Opcode, ArrayRef< VPValue * > Operands, Instruction *Inst=nullptr, const VPIRFlags &Flags={}, const VPIRMetadata &MD={}, DebugLoc DL=DebugLoc::getUnknown(), const Twine &Name="")
Create an N-ary operation with Opcode, Operands and set Inst as its underlying Instruction.
Canonical scalar induction phi of the vector loop.
Definition VPlan.h:3552
unsigned getNumDefinedValues() const
Returns the number of values defined by the VPDef.
Definition VPlanValue.h:431
VPValue * getVPSingleValue()
Returns the only VPValue defined by the VPDef.
Definition VPlanValue.h:404
void execute(VPTransformState &State) override
Generate the transformed value of the induction at offset StartValue (1.
VPValue * getStepValue() const
Definition VPlan.h:3771
VPValue * getStartValue() const
Definition VPlan.h:3770
A pure virtual base class for all recipes modeling header phis, including phis for first order recurr...
Definition VPlan.h:2054
virtual VPValue * getBackedgeValue()
Returns the incoming value from the loop backedge.
Definition VPlan.h:2097
VPValue * getStartValue()
Returns the start value of the phi, if one is set.
Definition VPlan.h:2086
A recipe representing a sequence of load -> update -> store as part of a histogram operation.
Definition VPlan.h:1762
A special type of VPBasicBlock that wraps an existing IR basic block.
Definition VPlan.h:4124
Class to record and manage LLVM IR flags.
Definition VPlan.h:609
Helper to manage IR metadata for recipes.
Definition VPlan.h:982
This is a concrete Recipe that models a single VPlan-level instruction.
Definition VPlan.h:1031
@ ComputeAnyOfResult
Compute the final result of a AnyOf reduction with select(cmp(),x,y), where one of (x,...
Definition VPlan.h:1069
@ ResumeForEpilogue
Explicit user for the resume phi of the canonical induction in the main VPlan, used by the epilogue v...
Definition VPlan.h:1127
@ ReductionStartVector
Start vector for reductions with 3 operands: the original start value, the identity value for the red...
Definition VPlan.h:1118
unsigned getOpcode() const
Definition VPlan.h:1179
VPInterleaveRecipe is a recipe for transforming an interleave group of load or stores into one wide l...
Definition VPlan.h:2691
In what follows, the term "input IR" refers to code that is fed into the vectorizer whereas the term ...
detail::zippy< llvm::detail::zip_first, VPUser::const_operand_range, const_incoming_blocks_range > incoming_values_and_blocks() const
Returns an iterator range over pairs of incoming values and corresponding incoming blocks.
Definition VPlan.h:1359
VPRecipeBase is a base class modeling a sequence of one or more output IR instructions.
Definition VPlan.h:387
VPBasicBlock * getParent()
Definition VPlan.h:408
DebugLoc getDebugLoc() const
Returns the debug location of the recipe.
Definition VPlan.h:479
void moveBefore(VPBasicBlock &BB, iplist< VPRecipeBase >::iterator I)
Unlink this recipe and insert into BB before I.
void insertBefore(VPRecipeBase *InsertPos)
Insert an unlinked recipe into a basic block immediately before the specified recipe.
iplist< VPRecipeBase >::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
Helper class to create VPRecipies from IR instructions.
VPRecipeBase * tryToCreateWidenRecipe(VPSingleDefRecipe *R, VFRange &Range)
Create and return a widened recipe for R if one can be created within the given VF Range.
VPValue * getBlockInMask(VPBasicBlock *VPBB) const
Returns the entry mask for block VPBB or null if the mask is all-true.
VPValue * getVPValueOrAddLiveIn(Value *V)
VPRecipeBase * tryToCreatePartialReduction(VPInstruction *Reduction, unsigned ScaleFactor)
Create and return a partial reduction recipe for a reduction instruction along with binary operation ...
std::optional< unsigned > getScalingForReduction(const Instruction *ExitInst)
void collectScaledReductions(VFRange &Range)
Find all possible partial reductions in the loop and track all of those that are valid so recipes can...
VPReplicateRecipe * handleReplication(VPInstruction *VPI, VFRange &Range)
Build a VPReplicationRecipe for VPI.
A recipe for handling reduction phis.
Definition VPlan.h:2432
bool isInLoop() const
Returns true if the phi is part of an in-loop reduction.
Definition VPlan.h:2487
RecurKind getRecurrenceKind() const
Returns the recurrence kind of the reduction.
Definition VPlan.h:2481
A recipe to represent inloop, ordered or partial reduction operations.
Definition VPlan.h:2784
VPRegionBlock represents a collection of VPBasicBlocks and VPRegionBlocks which form a Single-Entry-S...
Definition VPlan.h:4159
const VPBlockBase * getEntry() const
Definition VPlan.h:4195
VPCanonicalIVPHIRecipe * getCanonicalIV()
Returns the canonical induction recipe of the region.
Definition VPlan.h:4257
VPReplicateRecipe replicates a given instruction producing multiple scalar copies of the original sca...
Definition VPlan.h:2940
VPSingleDef is a base class for recipes for modeling a sequence of one or more output IR that define ...
Definition VPlan.h:531
Instruction * getUnderlyingInstr()
Returns the underlying instruction.
Definition VPlan.h:595
An analysis for type-inference for VPValues.
Type * inferScalarType(const VPValue *V)
Infer the type of V. Returns the scalar type of V.
This class augments VPValue with operands which provide the inverse def-use edges from VPValue's user...
Definition VPlanValue.h:207
operand_range operands()
Definition VPlanValue.h:275
void setOperand(unsigned I, VPValue *New)
Definition VPlanValue.h:251
unsigned getNumOperands() const
Definition VPlanValue.h:245
operand_iterator op_begin()
Definition VPlanValue.h:271
VPValue * getOperand(unsigned N) const
Definition VPlanValue.h:246
void addOperand(VPValue *Operand)
Definition VPlanValue.h:240
This is the base class of the VPlan Def/Use graph, used for modeling the data flow into,...
Definition VPlanValue.h:48
VPRecipeBase * getDefiningRecipe()
Returns the recipe defining this VPValue or nullptr if it is not defined by a recipe,...
Definition VPlan.cpp:131
Value * getLiveInIRValue() const
Returns the underlying IR value, if this VPValue is defined outside the scope of VPlan.
Definition VPlanValue.h:183
Value * getUnderlyingValue() const
Return the underlying Value attached to this VPValue.
Definition VPlanValue.h:85
void replaceAllUsesWith(VPValue *New)
Definition VPlan.cpp:1377
void replaceUsesWithIf(VPValue *New, llvm::function_ref< bool(VPUser &U, unsigned Idx)> ShouldReplace)
Go through the uses list for this VPValue and make each use point to New if the callback ShouldReplac...
Definition VPlan.cpp:1381
user_range users()
Definition VPlanValue.h:134
A recipe to compute a pointer to the last element of each part of a widened memory access for widened...
Definition VPlan.h:1916
VPWidenCastRecipe is a recipe to create vector cast instructions.
Definition VPlan.h:1557
A recipe for handling GEP instructions.
Definition VPlan.h:1853
A recipe for handling phi nodes of integer and floating-point inductions, producing their vector valu...
Definition VPlan.h:2197
A common base class for widening memory operations.
Definition VPlan.h:3251
A recipe for widened phis.
Definition VPlan.h:2331
VPWidenRecipe is a recipe for producing a widened instruction using the opcode and operands of the re...
Definition VPlan.h:1509
VPlan models a candidate for vectorization, encoding various decisions take to produce efficient outp...
Definition VPlan.h:4289
bool hasVF(ElementCount VF) const
Definition VPlan.h:4494
VPBasicBlock * getEntry()
Definition VPlan.h:4382
VPValue & getVectorTripCount()
The vector trip count.
Definition VPlan.h:4473
VPValue & getVF()
Returns the VF of the vector loop region.
Definition VPlan.h:4476
VPValue * getTripCount() const
The trip count of the original loop.
Definition VPlan.h:4444
iterator_range< SmallSetVector< ElementCount, 2 >::iterator > vectorFactors() const
Returns an iterator range over all VFs of the plan.
Definition VPlan.h:4501
bool hasUF(unsigned UF) const
Definition VPlan.h:4512
ArrayRef< VPIRBasicBlock * > getExitBlocks() const
Return an ArrayRef containing VPIRBasicBlocks wrapping the exit blocks of the original scalar loop.
Definition VPlan.h:4434
LLVM_ABI_FOR_TEST VPRegionBlock * getVectorLoopRegion()
Returns the VPRegionBlock of the vector loop.
Definition VPlan.cpp:1011
bool hasEarlyExit() const
Returns true if the VPlan is based on a loop with an early exit.
Definition VPlan.h:4650
InstructionCost cost(ElementCount VF, VPCostContext &Ctx)
Return the cost of this plan.
Definition VPlan.cpp:993
void resetTripCount(VPValue *NewTripCount)
Resets the trip count for the VPlan.
Definition VPlan.h:4458
VPBasicBlock * getMiddleBlock()
Returns the 'middle' block of the plan, that is the block that selects whether to execute the scalar ...
Definition VPlan.h:4407
VPValue * getOrAddLiveIn(Value *V)
Gets the live-in VPValue for V or adds a new live-in (if none exists yet) for V.
Definition VPlan.h:4536
VPBasicBlock * getScalarPreheader() const
Return the VPBasicBlock for the preheader of the scalar loop.
Definition VPlan.h:4425
void execute(VPTransformState *State)
Generate the IR code for this VPlan.
Definition VPlan.cpp:905
VPIRBasicBlock * getScalarHeader() const
Return the VPIRBasicBlock wrapping the header of the scalar loop.
Definition VPlan.h:4430
VPValue * getLiveIn(Value *V) const
Return the live-in VPValue for V, if there is one or nullptr otherwise.
Definition VPlan.h:4573
VPBasicBlock * getVectorPreheader()
Returns the preheader of the vector loop region, if one exists, or null otherwise.
Definition VPlan.h:4387
LLVM_ABI_FOR_TEST VPlan * duplicate()
Clone the current VPlan, update all VPValues of the new VPlan and cloned recipes to refer to the clon...
Definition VPlan.cpp:1153
LLVM Value Representation.
Definition Value.h:75
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:256
LLVM_ABI bool hasOneUser() const
Return true if there is exactly one user of this value.
Definition Value.cpp:166
LLVM_ABI void setName(const Twine &Name)
Change the name of the value.
Definition Value.cpp:390
bool hasOneUse() const
Return true if there is exactly one use of this value.
Definition Value.h:439
LLVM_ABI void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
Definition Value.cpp:546
iterator_range< user_iterator > users()
Definition Value.h:426
LLVM_ABI const Value * stripPointerCasts() const
Strip off pointer casts, all-zero GEPs and address space casts.
Definition Value.cpp:701
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
Definition Value.cpp:322
static LLVM_ABI VectorType * get(Type *ElementType, ElementCount EC)
This static method is the primary way to construct an VectorType.
std::pair< iterator, bool > insert(const ValueT &V)
Definition DenseSet.h:202
bool contains(const_arg_type_t< ValueT > V) const
Check if the set contains the given element.
Definition DenseSet.h:175
constexpr bool hasKnownScalarFactor(const FixedOrScalableQuantity &RHS) const
Returns true if there exists a value X where RHS.multiplyCoefficientBy(X) will result in a value whos...
Definition TypeSize.h:269
constexpr ScalarTy getFixedValue() const
Definition TypeSize.h:200
static constexpr bool isKnownLE(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
Definition TypeSize.h:230
constexpr bool isNonZero() const
Definition TypeSize.h:155
constexpr ScalarTy getKnownScalarFactor(const FixedOrScalableQuantity &RHS) const
Returns a value X where RHS.multiplyCoefficientBy(X) will result in a value whose quantity matches ou...
Definition TypeSize.h:277
static constexpr bool isKnownLT(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
Definition TypeSize.h:216
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
Definition TypeSize.h:168
constexpr LeafTy multiplyCoefficientBy(ScalarTy RHS) const
Definition TypeSize.h:256
constexpr bool isFixed() const
Returns true if the quantity is not scaled by vscale.
Definition TypeSize.h:171
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
Definition TypeSize.h:165
constexpr bool isZero() const
Definition TypeSize.h:153
static constexpr bool isKnownGT(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
Definition TypeSize.h:223
constexpr LeafTy divideCoefficientBy(ScalarTy RHS) const
We do not provide the '/' operator here because division for polynomial types does not work in the sa...
Definition TypeSize.h:252
static constexpr bool isKnownGE(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
Definition TypeSize.h:237
An efficient, type-erasing, non-owning reference to a callable.
const ParentTy * getParent() const
Definition ilist_node.h:34
self_iterator getIterator()
Definition ilist_node.h:123
IteratorT end() const
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition raw_ostream.h:53
A raw_ostream that writes to an std::string.
Changed
This provides a very simple, boring adaptor for a begin and end iterator into a range type.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
@ Entry
Definition COFF.h:862
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
@ Tail
Attemps to make calls as fast as possible while guaranteeing that tail call optimization can always b...
Definition CallingConv.h:76
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
@ BasicBlock
Various leaf nodes.
Definition ISDOpcodes.h:81
std::variant< std::monostate, Loc::Single, Loc::Multi, Loc::MMI, Loc::EntryValue > Variant
Alias for the std::variant specialization base class of DbgVariable.
Definition DwarfDebug.h:189
SpecificConstantMatch m_ZeroInt()
Convenience matchers for specific integer values.
BinaryOp_match< SpecificConstantMatch, SrcTy, TargetOpcode::G_SUB > m_Neg(const SrcTy &&Src)
Matches a register negated by a G_SUB.
OneUse_match< SubPat > m_OneUse(const SubPat &SP)
BinaryOp_match< LHS, RHS, Instruction::Add > m_Add(const LHS &L, const RHS &R)
class_match< BinaryOperator > m_BinOp()
Match an arbitrary binary operation and ignore it.
OneOps_match< OpTy, Instruction::Freeze > m_Freeze(const OpTy &Op)
Matches FreezeInst.
ap_match< APInt > m_APInt(const APInt *&Res)
Match a ConstantInt or splatted ConstantVector, binding the specified pointer to the contained APInt.
specific_intval< false > m_SpecificInt(const APInt &V)
Match a specific integer value or vector with all elements equal to the value.
bool match(Val *V, const Pattern &P)
bind_ty< Instruction > m_Instruction(Instruction *&I)
Match an instruction, capturing it if we match.
specificval_ty m_Specific(const Value *V)
Match if we have a specific specified value.
cst_pred_ty< is_one > m_One()
Match an integer 1 or a vector with all elements equal to 1.
ThreeOps_match< Cond, LHS, RHS, Instruction::Select > m_Select(const Cond &C, const LHS &L, const RHS &R)
Matches SelectInst.
BinaryOp_match< LHS, RHS, Instruction::Mul > m_Mul(const LHS &L, const RHS &R)
auto m_LogicalOr()
Matches L || R where L and R are arbitrary values.
SpecificCmpClass_match< LHS, RHS, ICmpInst > m_SpecificICmp(CmpPredicate MatchPred, const LHS &L, const RHS &R)
class_match< CmpInst > m_Cmp()
Matches any compare instruction and ignore it.
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
match_combine_or< CastInst_match< OpTy, ZExtInst >, CastInst_match< OpTy, SExtInst > > m_ZExtOrSExt(const OpTy &Op)
auto m_LogicalAnd()
Matches L && R where L and R are arbitrary values.
MatchFunctor< Val, Pattern > match_fn(const Pattern &P)
A match functor that can be used as a UnaryPredicate in functional algorithms like all_of.
BinaryOp_match< LHS, RHS, Instruction::Sub > m_Sub(const LHS &L, const RHS &R)
match_combine_or< LTy, RTy > m_CombineOr(const LTy &L, const RTy &R)
Combine two pattern matchers matching L || R.
class_match< const SCEVVScale > m_SCEVVScale()
bind_cst_ty m_scev_APInt(const APInt *&C)
Match an SCEV constant and bind it to an APInt.
specificloop_ty m_SpecificLoop(const Loop *L)
cst_pred_ty< is_specific_signed_cst > m_scev_SpecificSInt(int64_t V)
Match an SCEV constant with a plain signed integer (sign-extended value will be matched)
SCEVAffineAddRec_match< Op0_t, Op1_t, class_match< const Loop > > m_scev_AffineAddRec(const Op0_t &Op0, const Op1_t &Op1)
bind_ty< const SCEVMulExpr > m_scev_Mul(const SCEVMulExpr *&V)
bool match(const SCEV *S, const Pattern &P)
SCEVBinaryExpr_match< SCEVMulExpr, Op0_t, Op1_t, SCEV::FlagAnyWrap, true > m_scev_c_Mul(const Op0_t &Op0, const Op1_t &Op1)
class_match< const SCEV > m_SCEV()
match_combine_or< AllRecipe_match< Instruction::ZExt, Op0_t >, AllRecipe_match< Instruction::SExt, Op0_t > > m_ZExtOrSExt(const Op0_t &Op0)
VPInstruction_match< VPInstruction::ExtractLastElement, Op0_t > m_ExtractLastElement(const Op0_t &Op0)
class_match< VPValue > m_VPValue()
Match an arbitrary VPValue and ignore it.
VPInstruction_match< VPInstruction::ExtractLane, Op0_t, Op1_t > m_ExtractLane(const Op0_t &Op0, const Op1_t &Op1)
ValuesClass values(OptsTy... Options)
Helper to build a ValuesClass by forwarding a variable number of arguments as an initializer list to ...
initializer< Ty > init(const Ty &Val)
Add a small namespace to avoid name clashes with the classes used in the streaming interface.
DiagnosticInfoOptimizationBase::Argument NV
NodeAddr< InstrNode * > Instr
Definition RDFGraph.h:389
NodeAddr< PhiNode * > Phi
Definition RDFGraph.h:390
friend class Instruction
Iterator for Instructions in a `BasicBlock.
Definition BasicBlock.h:73
bool isSingleScalar(const VPValue *VPV)
Returns true if VPV is a single scalar, either because it produces the same value for all lanes or on...
VPValue * getOrCreateVPValueForSCEVExpr(VPlan &Plan, const SCEV *Expr)
Get or create a VPValue that corresponds to the expansion of Expr.
VPBasicBlock * getFirstLoopHeader(VPlan &Plan, VPDominatorTree &VPDT)
Returns the header block of the first, top-level loop, or null if none exist.
VPIRFlags getFlagsFromIndDesc(const InductionDescriptor &ID)
Extracts and returns NoWrap and FastMath flags from the induction binop in ID.
Definition VPlanUtils.h:85
unsigned getVFScaleFactor(VPRecipeBase *R)
Get the VF scaling factor applied to the recipe's output, if the recipe has one.
const SCEV * getSCEVExprForVPValue(const VPValue *V, ScalarEvolution &SE, const Loop *L=nullptr)
Return the SCEV expression for V.
This is an optimization pass for GlobalISel generic memory operations.
LLVM_ABI bool simplifyLoop(Loop *L, DominatorTree *DT, LoopInfo *LI, ScalarEvolution *SE, AssumptionCache *AC, MemorySSAUpdater *MSSAU, bool PreserveLCSSA)
Simplify each loop in a loop nest recursively.
LLVM_ABI void ReplaceInstWithInst(BasicBlock *BB, BasicBlock::iterator &BI, Instruction *I)
Replace the instruction specified by BI with the instruction specified by I.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Definition STLExtras.h:316
@ Offset
Definition DWP.cpp:532
detail::zippy< detail::zip_shortest, T, U, Args... > zip(T &&t, U &&u, Args &&...args)
zip iterator for two or more iteratable types.
Definition STLExtras.h:829
FunctionAddr VTableAddr Value
Definition InstrProf.h:137
LLVM_ABI Value * addRuntimeChecks(Instruction *Loc, Loop *TheLoop, const SmallVectorImpl< RuntimePointerCheck > &PointerChecks, SCEVExpander &Expander, bool HoistRuntimeChecks=false)
Add code that checks at runtime if the accessed arrays in PointerChecks overlap.
auto cast_if_present(const Y &Val)
cast_if_present<X> - Functionally identical to cast, except that a null value is accepted.
Definition Casting.h:683
LLVM_ABI bool RemoveRedundantDbgInstrs(BasicBlock *BB)
Try to remove redundant dbg.value instructions from given basic block.
LLVM_ABI_FOR_TEST cl::opt< bool > VerifyEachVPlan
LLVM_ABI std::optional< unsigned > getLoopEstimatedTripCount(Loop *L, unsigned *EstimatedLoopInvocationWeight=nullptr)
Return either:
static void reportVectorization(OptimizationRemarkEmitter *ORE, Loop *TheLoop, VectorizationFactor VF, unsigned IC)
Report successful vectorization of the loop.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1725
unsigned getLoadStoreAddressSpace(const Value *I)
A helper function that returns the address space of the pointer operand of load or store instruction.
LLVM_ABI Intrinsic::ID getMinMaxReductionIntrinsicOp(Intrinsic::ID RdxID)
Returns the min/max intrinsic used when expanding a min/max reduction.
auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)
Get the size of a range.
Definition STLExtras.h:1655
LLVM_ABI_FOR_TEST bool verifyVPlanIsValid(const VPlan &Plan, bool VerifyLate=false)
Verify invariants for general VPlans.
LLVM_ABI Intrinsic::ID getVectorIntrinsicIDForCall(const CallInst *CI, const TargetLibraryInfo *TLI)
Returns intrinsic ID for call.
InstructionCost Cost
ReductionStyle getReductionStyle(bool InLoop, bool Ordered, unsigned ScaleFactor)
Definition VPlan.h:2418
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
Definition STLExtras.h:2472
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
LLVM_ABI bool verifyFunction(const Function &F, raw_ostream *OS=nullptr)
Check a function for errors, useful for use when debugging a pass.
const Value * getLoadStorePointerOperand(const Value *V)
A helper function that returns the pointer operand of a load or store instruction.
OuterAnalysisManagerProxy< ModuleAnalysisManager, Function > ModuleAnalysisManagerFunctionProxy
Provide the ModuleAnalysisManager to Function proxy.
Value * getRuntimeVF(IRBuilderBase &B, Type *Ty, ElementCount VF)
Return the runtime value for VF.
LLVM_ABI bool formLCSSARecursively(Loop &L, const DominatorTree &DT, const LoopInfo *LI, ScalarEvolution *SE)
Put a loop nest into LCSSA form.
Definition LCSSA.cpp:449
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
Definition STLExtras.h:2136
LLVM_ABI bool shouldOptimizeForSize(const MachineFunction *MF, ProfileSummaryInfo *PSI, const MachineBlockFrequencyInfo *BFI, PGSOQueryType QueryType=PGSOQueryType::Other)
Returns true if machine function MF is suggested to be size-optimized based on the profile.
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
Definition STLExtras.h:632
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
Definition MathExtras.h:284
Align getLoadStoreAlignment(const Value *I)
A helper function that returns the alignment of load or store instruction.
iterator_range< df_iterator< VPBlockShallowTraversalWrapper< VPBlockBase * > > > vp_depth_first_shallow(VPBlockBase *G)
Returns an iterator range to traverse the graph starting at G in depth-first order.
Definition VPlanCFG.h:216
LLVM_ABI bool VerifySCEV
LLVM_ABI bool isSafeToSpeculativelyExecute(const Instruction *I, const Instruction *CtxI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr, const TargetLibraryInfo *TLI=nullptr, bool UseVariableInfo=true, bool IgnoreUBImplyingAttrs=true)
Return true if the instruction does not have any effects besides calculating the result and does not ...
bool isa_and_nonnull(const Y &Val)
Definition Casting.h:676
iterator_range< df_iterator< VPBlockDeepTraversalWrapper< VPBlockBase * > > > vp_depth_first_deep(VPBlockBase *G)
Returns an iterator range to traverse the graph starting at G in depth-first order while traversing t...
Definition VPlanCFG.h:243
SmallVector< VPRegisterUsage, 8 > calculateRegisterUsageForPlan(VPlan &Plan, ArrayRef< ElementCount > VFs, const TargetTransformInfo &TTI, const SmallPtrSetImpl< const Value * > &ValuesToIgnore)
Estimate the register usage for Plan and vectorization factors in VFs by calculating the highest numb...
unsigned Log2_64(uint64_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
Definition MathExtras.h:337
LLVM_ABI void setBranchWeights(Instruction &I, ArrayRef< uint32_t > Weights, bool IsExpected, bool ElideAllZero=false)
Create a new branch_weights metadata node and add or overwrite a prof metadata reference to instructi...
auto dyn_cast_or_null(const Y &Val)
Definition Casting.h:753
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1732
void collectEphemeralRecipesForVPlan(VPlan &Plan, DenseSet< VPRecipeBase * > &EphRecipes)
auto reverse(ContainerTy &&C)
Definition STLExtras.h:406
bool containsIrreducibleCFG(RPOTraversalT &RPOTraversal, const LoopInfoT &LI)
Return true if the control flow in RPOTraversal is irreducible.
Definition CFG.h:149
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
Definition MathExtras.h:279
void sort(IteratorTy Start, IteratorTy End)
Definition STLExtras.h:1622
LLVM_ABI_FOR_TEST cl::opt< bool > EnableWideActiveLaneMask
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:207
bool none_of(R &&Range, UnaryPredicate P)
Provide wrappers to std::none_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1739
LLVM_ABI cl::opt< bool > EnableLoopVectorization
LLVM_ABI bool wouldInstructionBeTriviallyDead(const Instruction *I, const TargetLibraryInfo *TLI=nullptr)
Return true if the result produced by the instruction would have no side effects if it was not used.
Definition Local.cpp:421
FunctionAddr VTableAddr Count
Definition InstrProf.h:139
SmallVector< ValueTypeFromRangeType< R >, Size > to_vector(R &&Range)
Given a range of type R, iterate the entire range and return a SmallVector with elements of the vecto...
Type * toVectorizedTy(Type *Ty, ElementCount EC)
A helper for converting to vectorized types.
LLVM_ABI void llvm_unreachable_internal(const char *msg=nullptr, const char *file=nullptr, unsigned line=0)
This function calls abort(), and prints the optional message to stderr.
bool canConstantBeExtended(const APInt *C, Type *NarrowType, TTI::PartialReductionExtendKind ExtKind)
Check if a constant CI can be safely treated as having been extended from a narrower type with the gi...
Definition VPlan.cpp:1718
T * find_singleton(R &&Range, Predicate P, bool AllowRepeats=false)
Return the single value in Range that satisfies P(<member of Range> *, AllowRepeats)->T * returning n...
Definition STLExtras.h:1787
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
cl::opt< unsigned > ForceTargetInstructionCost
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
format_object< Ts... > format(const char *Fmt, const Ts &... Vals)
These are helper functions used to produce formatted output.
Definition Format.h:129
constexpr T divideCeil(U Numerator, V Denominator)
Returns the integer ceil(Numerator / Denominator).
Definition MathExtras.h:394
bool canVectorizeTy(Type *Ty)
Returns true if Ty is a valid vector element type, void, or an unpacked literal struct where all elem...
TargetTransformInfo TTI
static void reportVectorizationInfo(const StringRef Msg, const StringRef ORETag, OptimizationRemarkEmitter *ORE, Loop *TheLoop, Instruction *I=nullptr, DebugLoc DL={})
Reports an informative message: print Msg for debugging purposes as well as an optimization remark.
LLVM_ABI bool isAssignmentTrackingEnabled(const Module &M)
Return true if assignment tracking is enabled for module M.
RecurKind
These are the kinds of recurrences that we support.
@ Or
Bitwise or logical OR of integers.
@ FMulAdd
Sum of float products with llvm.fmuladd(a * b + sum).
@ Sub
Subtraction of integers.
@ Add
Sum of integers.
@ AddChainWithSubs
A chain of adds and subs.
LLVM_ABI Value * getRecurrenceIdentity(RecurKind K, Type *Tp, FastMathFlags FMF)
Given information about an recurrence kind, return the identity for the @llvm.vector....
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
Definition Alignment.h:144
LLVM_ABI void reportVectorizationFailure(const StringRef DebugMsg, const StringRef OREMsg, const StringRef ORETag, OptimizationRemarkEmitter *ORE, Loop *TheLoop, Instruction *I=nullptr)
Reports a vectorization failure: print DebugMsg for debugging purposes along with the corresponding o...
DWARFExpression::Operation Op
ScalarEpilogueLowering
@ CM_ScalarEpilogueNotAllowedLowTripLoop
@ CM_ScalarEpilogueNotNeededUsePredicate
@ CM_ScalarEpilogueNotAllowedOptSize
@ CM_ScalarEpilogueAllowed
@ CM_ScalarEpilogueNotAllowedUsePredicate
LLVM_ABI bool isGuaranteedNotToBeUndefOrPoison(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Return true if this function can prove that V does not have undef bits and is never poison.
ArrayRef(const T &OneElt) -> ArrayRef< T >
Value * createStepForVF(IRBuilderBase &B, Type *Ty, ElementCount VF, int64_t Step)
Return a value for Step multiplied by VF.
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
LLVM_ABI BasicBlock * SplitBlock(BasicBlock *Old, BasicBlock::iterator SplitPt, DominatorTree *DT, LoopInfo *LI=nullptr, MemorySSAUpdater *MSSAU=nullptr, const Twine &BBName="", bool Before=false)
Split the specified block at the specified instruction.
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1758
auto predecessors(const MachineBasicBlock *BB)
iterator_range< pointer_iterator< WrappedIteratorT > > make_pointer_range(RangeT &&Range)
Definition iterator.h:363
cl::opt< bool > EnableVPlanNativePath
Type * getLoadStoreType(const Value *I)
A helper function that returns the type of a load or store instruction.
ArrayRef< Type * > getContainedTypes(Type *const &Ty)
Returns the types contained in Ty.
LLVM_ABI Value * addDiffRuntimeChecks(Instruction *Loc, ArrayRef< PointerDiffInfo > Checks, SCEVExpander &Expander, function_ref< Value *(IRBuilderBase &, unsigned)> GetVF, unsigned IC)
std::variant< RdxOrdered, RdxInLoop, RdxUnordered > ReductionStyle
Definition VPlan.h:2416
bool pred_empty(const BasicBlock *BB)
Definition CFG.h:119
@ DataAndControlFlowWithoutRuntimeCheck
Use predicate to control both data and control flow, but modify the trip count so that a runtime over...
@ None
Don't use tail folding.
@ DataWithEVL
Use predicated EVL instructions for tail-folding.
@ DataAndControlFlow
Use predicate to control both data and control flow.
@ DataWithoutLaneMask
Same as Data, but avoids using the get.active.lane.mask intrinsic to calculate the mask and instead i...
@ Data
Use predicate only to mask operations on data in the loop.
AnalysisManager< Function > FunctionAnalysisManager
Convenience typedef for the Function analysis manager.
LLVM_ABI bool hasBranchWeightMD(const Instruction &I)
Checks if an instructions has Branch Weight Metadata.
hash_code hash_combine(const Ts &...args)
Combine values into a single hash_code.
Definition Hashing.h:592
T bit_floor(T Value)
Returns the largest integral power of two no greater than Value if Value is nonzero.
Definition bit.h:330
Type * toVectorTy(Type *Scalar, ElementCount EC)
A helper function for converting Scalar types to vector types.
std::unique_ptr< VPlan > VPlanPtr
Definition VPlan.h:77
constexpr detail::IsaCheckPredicate< Types... > IsaPred
Function object wrapper for the llvm::isa type check.
Definition Casting.h:866
LLVM_ABI MapVector< Instruction *, uint64_t > computeMinimumValueSizes(ArrayRef< BasicBlock * > Blocks, DemandedBits &DB, const TargetTransformInfo *TTI=nullptr)
Compute a map of integer instructions to their minimum legal type size.
hash_code hash_combine_range(InputIteratorT first, InputIteratorT last)
Compute a hash_code for a sequence of values.
Definition Hashing.h:466
LLVM_ABI cl::opt< bool > EnableLoopInterleaving
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition BitVector.h:869
#define N
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition Alignment.h:39
A special type used by analysis passes to provide an address that identifies that particular analysis...
Definition Analysis.h:29
static LLVM_ABI void collectEphemeralValues(const Loop *L, AssumptionCache *AC, SmallPtrSetImpl< const Value * > &EphValues)
Collect a loop's ephemeral values (those used only by an assume or similar intrinsics in the loop).
An information struct used to provide DenseMap with the various necessary components for a given valu...
Encapsulate information regarding vectorization of a loop and its epilogue.
EpilogueLoopVectorizationInfo(ElementCount MVF, unsigned MUF, ElementCount EVF, unsigned EUF, VPlan &EpiloguePlan)
A class that represents two vectorization factors (initialized with 0 by default).
static FixedScalableVFPair getNone()
This holds details about a histogram operation – a load -> update -> store sequence where each lane i...
Incoming for lane maks phi as machine instruction, incoming register Reg and incoming block Block are...
TargetLibraryInfo * TLI
LLVM_ABI LoopVectorizeResult runImpl(Function &F)
LLVM_ABI bool processLoop(Loop *L)
ProfileSummaryInfo * PSI
LoopAccessInfoManager * LAIs
LLVM_ABI void printPipeline(raw_ostream &OS, function_ref< StringRef(StringRef)> MapClassName2PassName)
LLVM_ABI LoopVectorizePass(LoopVectorizeOptions Opts={})
BlockFrequencyInfo * BFI
ScalarEvolution * SE
AssumptionCache * AC
LLVM_ABI PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM)
OptimizationRemarkEmitter * ORE
TargetTransformInfo * TTI
Storage for information about made changes.
A CRTP mix-in to automatically provide informational APIs needed for passes.
Definition PassManager.h:69
This reduction is unordered with the partial result scaled down by some factor.
Definition VPlan.h:2413
A marker analysis to determine if extra passes should be run after loop vectorization.
static LLVM_ABI AnalysisKey Key
Holds the VFShape for a specific scalar to vector function mapping.
std::optional< unsigned > getParamIndexForOptionalMask() const
Instruction Set Architecture.
Encapsulates information needed to describe a parameter.
A range of powers-of-2 vectorization factors with fixed start and adjustable end.
ElementCount End
Struct to hold various analysis needed for cost computations.
unsigned getPredBlockCostDivisor(BasicBlock *BB) const
LoopVectorizationCostModel & CM
bool isLegacyUniformAfterVectorization(Instruction *I, ElementCount VF) const
Return true if I is considered uniform-after-vectorization in the legacy cost model for VF.
bool skipCostComputation(Instruction *UI, bool IsVector) const
Return true if the cost for UI shouldn't be computed, e.g.
InstructionCost getLegacyCost(Instruction *UI, ElementCount VF) const
Return the cost for UI with VF using the legacy cost model as fallback until computing the cost of al...
TargetTransformInfo::TargetCostKind CostKind
SmallPtrSet< Instruction *, 8 > SkipCostComputation
A recipe for handling first-order recurrence phis.
Definition VPlan.h:2373
A struct that represents some properties of the register usage of a loop.
VPTransformState holds information passed down when "executing" a VPlan, needed for generating the ou...
A recipe for widening select instructions.
Definition VPlan.h:1806
static void hoistPredicatedLoads(VPlan &Plan, ScalarEvolution &SE, const Loop *L)
Hoist predicated loads from the same address to the loop entry block, if they are guaranteed to execu...
static void materializeBroadcasts(VPlan &Plan)
Add explicit broadcasts for live-ins and VPValues defined in Plan's entry block if they are used as v...
static void materializePacksAndUnpacks(VPlan &Plan)
Add explicit Build[Struct]Vector recipes to Pack multiple scalar values into vectors and Unpack recip...
static bool handleMultiUseReductions(VPlan &Plan)
Try to legalize reductions with multiple in-loop uses.
static LLVM_ABI_FOR_TEST std::unique_ptr< VPlan > buildVPlan0(Loop *TheLoop, LoopInfo &LI, Type *InductionTy, DebugLoc IVDL, PredicatedScalarEvolution &PSE, LoopVersioning *LVer=nullptr)
Create a base VPlan0, serving as the common starting point for all later candidates.
static void materializeBackedgeTakenCount(VPlan &Plan, VPBasicBlock *VectorPH)
Materialize the backedge-taken count to be computed explicitly using VPInstructions.
static void optimizeInductionExitUsers(VPlan &Plan, DenseMap< VPValue *, VPValue * > &EndValues, ScalarEvolution &SE)
If there's a single exit block, optimize its phi recipes that use exiting IV values by feeding them p...
static LLVM_ABI_FOR_TEST void handleEarlyExits(VPlan &Plan, bool HasUncountableExit)
Update Plan to account for all early exits.
static void canonicalizeEVLLoops(VPlan &Plan)
Transform EVL loops to use variable-length stepping after region dissolution.
static void dropPoisonGeneratingRecipes(VPlan &Plan, const std::function< bool(BasicBlock *)> &BlockNeedsPredication)
Drop poison flags from recipes that may generate a poison value that is used after vectorization,...
static void createInterleaveGroups(VPlan &Plan, const SmallPtrSetImpl< const InterleaveGroup< Instruction > * > &InterleaveGroups, VPRecipeBuilder &RecipeBuilder, const bool &ScalarEpilogueAllowed)
static bool runPass(bool(*Transform)(VPlan &, ArgsTy...), VPlan &Plan, typename std::remove_reference< ArgsTy >::type &...Args)
Helper to run a VPlan transform Transform on VPlan, forwarding extra arguments to the transform.
static void addBranchWeightToMiddleTerminator(VPlan &Plan, ElementCount VF, std::optional< unsigned > VScaleForTuning)
Add branch weight metadata, if the Plan's middle block is terminated by a BranchOnCond recipe.
static void narrowInterleaveGroups(VPlan &Plan, ElementCount VF, TypeSize VectorRegWidth)
Try to convert a plan with interleave groups with VF elements to a plan with the interleave groups re...
static void unrollByUF(VPlan &Plan, unsigned UF)
Explicitly unroll Plan by UF.
static DenseMap< const SCEV *, Value * > expandSCEVs(VPlan &Plan, ScalarEvolution &SE)
Expand VPExpandSCEVRecipes in Plan's entry block.
static void convertToConcreteRecipes(VPlan &Plan)
Lower abstract recipes to concrete ones, that can be codegen'd.
static void addMinimumIterationCheck(VPlan &Plan, ElementCount VF, unsigned UF, ElementCount MinProfitableTripCount, bool RequiresScalarEpilogue, bool TailFolded, bool CheckNeededWithTailFolding, Loop *OrigLoop, const uint32_t *MinItersBypassWeights, DebugLoc DL, ScalarEvolution &SE)
static void convertToAbstractRecipes(VPlan &Plan, VPCostContext &Ctx, VFRange &Range)
This function converts initial recipes to the abstract recipes and clamps Range based on cost model f...
static void materializeConstantVectorTripCount(VPlan &Plan, ElementCount BestVF, unsigned BestUF, PredicatedScalarEvolution &PSE)
static LLVM_ABI_FOR_TEST bool tryToConvertVPInstructionsToVPRecipes(VPlan &Plan, function_ref< const InductionDescriptor *(PHINode *)> GetIntOrFpInductionDescriptor, const TargetLibraryInfo &TLI)
Replaces the VPInstructions in Plan with corresponding widen recipes.
static void addExitUsersForFirstOrderRecurrences(VPlan &Plan, VFRange &Range)
Handle users in the exit block for first order reductions in the original exit block.
static DenseMap< VPBasicBlock *, VPValue * > introduceMasksAndLinearize(VPlan &Plan, bool FoldTail)
Predicate and linearize the control-flow in the only loop region of Plan.
static void addExplicitVectorLength(VPlan &Plan, const std::optional< unsigned > &MaxEVLSafeElements)
Add a VPEVLBasedIVPHIRecipe and related recipes to Plan and replaces all uses except the canonical IV...
static void replaceSymbolicStrides(VPlan &Plan, PredicatedScalarEvolution &PSE, const DenseMap< Value *, const SCEV * > &StridesMap)
Replace symbolic strides from StridesMap in Plan with constants when possible.
static bool handleMaxMinNumReductions(VPlan &Plan)
Check if Plan contains any FMaxNum or FMinNum reductions.
static void removeBranchOnConst(VPlan &Plan)
Remove BranchOnCond recipes with true or false conditions together with removing dead edges to their ...
static LLVM_ABI_FOR_TEST void createLoopRegions(VPlan &Plan)
Replace loops in Plan's flat CFG with VPRegionBlocks, turning Plan's flat CFG into a hierarchical CFG...
static void removeDeadRecipes(VPlan &Plan)
Remove dead recipes from Plan.
static void attachCheckBlock(VPlan &Plan, Value *Cond, BasicBlock *CheckBlock, bool AddBranchWeights)
Wrap runtime check block CheckBlock in a VPIRBB and Cond in a VPValue and connect the block to Plan,...
static void materializeVectorTripCount(VPlan &Plan, VPBasicBlock *VectorPHVPBB, bool TailByMasking, bool RequiresScalarEpilogue)
Materialize vector trip count computations to a set of VPInstructions.
static void simplifyRecipes(VPlan &Plan)
Perform instcombine-like simplifications on recipes in Plan.
static void replicateByVF(VPlan &Plan, ElementCount VF)
Replace each replicating VPReplicateRecipe and VPInstruction outside of any replicate region in Plan ...
static void clearReductionWrapFlags(VPlan &Plan)
Clear NSW/NUW flags from reduction instructions if necessary.
static void cse(VPlan &Plan)
Perform common-subexpression-elimination on Plan.
static void addActiveLaneMask(VPlan &Plan, bool UseActiveLaneMaskForControlFlow, bool DataAndControlFlowWithoutRuntimeCheck)
Replace (ICMP_ULE, wide canonical IV, backedge-taken-count) checks with an (active-lane-mask recipe,...
static LLVM_ABI_FOR_TEST void optimize(VPlan &Plan)
Apply VPlan-to-VPlan optimizations to Plan, including induction recipe optimizations,...
static void dissolveLoopRegions(VPlan &Plan)
Replace loop regions with explicit CFG.
static void truncateToMinimalBitwidths(VPlan &Plan, const MapVector< Instruction *, uint64_t > &MinBWs)
Insert truncates and extends for any truncated recipe.
static bool adjustFixedOrderRecurrences(VPlan &Plan, VPBuilder &Builder)
Try to have all users of fixed-order recurrences appear after the recipe defining their previous valu...
static void optimizeForVFAndUF(VPlan &Plan, ElementCount BestVF, unsigned BestUF, PredicatedScalarEvolution &PSE)
Optimize Plan based on BestVF and BestUF.
static void materializeVFAndVFxUF(VPlan &Plan, VPBasicBlock *VectorPH, ElementCount VF)
Materialize VF and VFxUF to be computed explicitly using VPInstructions.
static void addMinimumVectorEpilogueIterationCheck(VPlan &Plan, Value *TripCount, Value *VectorTripCount, bool RequiresScalarEpilogue, ElementCount EpilogueVF, unsigned EpilogueUF, unsigned MainLoopStep, unsigned EpilogueLoopStep, ScalarEvolution &SE)
Add a check to Plan to see if the epilogue vector loop should be executed.
static void updateScalarResumePhis(VPlan &Plan, DenseMap< VPValue *, VPValue * > &IVEndValues)
Update the resume phis in the scalar preheader after creating wide recipes for first-order recurrence...
static LLVM_ABI_FOR_TEST void addMiddleCheck(VPlan &Plan, bool RequiresScalarEpilogueCheck, bool TailFolded)
If a check is needed to guard executing the scalar epilogue loop, it will be added to the middle bloc...
TODO: The following VectorizationFactor was pulled out of LoopVectorizationCostModel class.
InstructionCost Cost
Cost of the loop with that width.
ElementCount MinProfitableTripCount
The minimum trip count required to make vectorization profitable, e.g.
ElementCount Width
Vector width with best cost.
InstructionCost ScalarCost
Cost of the scalar loop.
static VectorizationFactor Disabled()
Width 1 means no vectorization, cost 0 means uncomputed cost.
static LLVM_ABI bool HoistRuntimeChecks