LLVM 22.0.0git
LoopVectorize.cpp
Go to the documentation of this file.
1//===- LoopVectorize.cpp - A Loop Vectorizer ------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This is the LLVM loop vectorizer. This pass modifies 'vectorizable' loops
10// and generates target-independent LLVM-IR.
11// The vectorizer uses the TargetTransformInfo analysis to estimate the costs
12// of instructions in order to estimate the profitability of vectorization.
13//
14// The loop vectorizer combines consecutive loop iterations into a single
15// 'wide' iteration. After this transformation the index is incremented
16// by the SIMD vector width, and not by one.
17//
18// This pass has three parts:
19// 1. The main loop pass that drives the different parts.
20// 2. LoopVectorizationLegality - A unit that checks for the legality
21// of the vectorization.
22// 3. InnerLoopVectorizer - A unit that performs the actual
23// widening of instructions.
24// 4. LoopVectorizationCostModel - A unit that checks for the profitability
25// of vectorization. It decides on the optimal vector width, which
26// can be one, if vectorization is not profitable.
27//
28// There is a development effort going on to migrate loop vectorizer to the
29// VPlan infrastructure and to introduce outer loop vectorization support (see
30// docs/VectorizationPlan.rst and
31// http://lists.llvm.org/pipermail/llvm-dev/2017-December/119523.html). For this
32// purpose, we temporarily introduced the VPlan-native vectorization path: an
33// alternative vectorization path that is natively implemented on top of the
34// VPlan infrastructure. See EnableVPlanNativePath for enabling.
35//
36//===----------------------------------------------------------------------===//
37//
38// The reduction-variable vectorization is based on the paper:
39// D. Nuzman and R. Henderson. Multi-platform Auto-vectorization.
40//
41// Variable uniformity checks are inspired by:
42// Karrenberg, R. and Hack, S. Whole Function Vectorization.
43//
44// The interleaved access vectorization is based on the paper:
45// Dorit Nuzman, Ira Rosen and Ayal Zaks. Auto-Vectorization of Interleaved
46// Data for SIMD
47//
48// Other ideas/concepts are from:
49// A. Zaks and D. Nuzman. Autovectorization in GCC-two years later.
50//
51// S. Maleki, Y. Gao, M. Garzaran, T. Wong and D. Padua. An Evaluation of
52// Vectorizing Compilers.
53//
54//===----------------------------------------------------------------------===//
55
58#include "VPRecipeBuilder.h"
59#include "VPlan.h"
60#include "VPlanAnalysis.h"
61#include "VPlanCFG.h"
62#include "VPlanHelpers.h"
63#include "VPlanPatternMatch.h"
64#include "VPlanTransforms.h"
65#include "VPlanUtils.h"
66#include "VPlanVerifier.h"
67#include "llvm/ADT/APInt.h"
68#include "llvm/ADT/ArrayRef.h"
69#include "llvm/ADT/DenseMap.h"
71#include "llvm/ADT/Hashing.h"
72#include "llvm/ADT/MapVector.h"
73#include "llvm/ADT/STLExtras.h"
76#include "llvm/ADT/Statistic.h"
77#include "llvm/ADT/StringRef.h"
78#include "llvm/ADT/Twine.h"
79#include "llvm/ADT/TypeSwitch.h"
84#include "llvm/Analysis/CFG.h"
101#include "llvm/IR/Attributes.h"
102#include "llvm/IR/BasicBlock.h"
103#include "llvm/IR/CFG.h"
104#include "llvm/IR/Constant.h"
105#include "llvm/IR/Constants.h"
106#include "llvm/IR/DataLayout.h"
107#include "llvm/IR/DebugInfo.h"
108#include "llvm/IR/DebugLoc.h"
109#include "llvm/IR/DerivedTypes.h"
111#include "llvm/IR/Dominators.h"
112#include "llvm/IR/Function.h"
113#include "llvm/IR/IRBuilder.h"
114#include "llvm/IR/InstrTypes.h"
115#include "llvm/IR/Instruction.h"
116#include "llvm/IR/Instructions.h"
118#include "llvm/IR/Intrinsics.h"
119#include "llvm/IR/MDBuilder.h"
120#include "llvm/IR/Metadata.h"
121#include "llvm/IR/Module.h"
122#include "llvm/IR/Operator.h"
123#include "llvm/IR/PatternMatch.h"
125#include "llvm/IR/Type.h"
126#include "llvm/IR/Use.h"
127#include "llvm/IR/User.h"
128#include "llvm/IR/Value.h"
129#include "llvm/IR/Verifier.h"
130#include "llvm/Support/Casting.h"
132#include "llvm/Support/Debug.h"
147#include <algorithm>
148#include <cassert>
149#include <cstdint>
150#include <functional>
151#include <iterator>
152#include <limits>
153#include <memory>
154#include <string>
155#include <tuple>
156#include <utility>
157
158using namespace llvm;
159using namespace SCEVPatternMatch;
160
161#define LV_NAME "loop-vectorize"
162#define DEBUG_TYPE LV_NAME
163
164#ifndef NDEBUG
165const char VerboseDebug[] = DEBUG_TYPE "-verbose";
166#endif
167
168STATISTIC(LoopsVectorized, "Number of loops vectorized");
169STATISTIC(LoopsAnalyzed, "Number of loops analyzed for vectorization");
170STATISTIC(LoopsEpilogueVectorized, "Number of epilogues vectorized");
171STATISTIC(LoopsEarlyExitVectorized, "Number of early exit loops vectorized");
172
174 "enable-epilogue-vectorization", cl::init(true), cl::Hidden,
175 cl::desc("Enable vectorization of epilogue loops."));
176
178 "epilogue-vectorization-force-VF", cl::init(1), cl::Hidden,
179 cl::desc("When epilogue vectorization is enabled, and a value greater than "
180 "1 is specified, forces the given VF for all applicable epilogue "
181 "loops."));
182
184 "epilogue-vectorization-minimum-VF", cl::Hidden,
185 cl::desc("Only loops with vectorization factor equal to or larger than "
186 "the specified value are considered for epilogue vectorization."));
187
188/// Loops with a known constant trip count below this number are vectorized only
189/// if no scalar iteration overheads are incurred.
191 "vectorizer-min-trip-count", cl::init(16), cl::Hidden,
192 cl::desc("Loops with a constant trip count that is smaller than this "
193 "value are vectorized only if no scalar iteration overheads "
194 "are incurred."));
195
197 "vectorize-memory-check-threshold", cl::init(128), cl::Hidden,
198 cl::desc("The maximum allowed number of runtime memory checks"));
199
200// Option prefer-predicate-over-epilogue indicates that an epilogue is undesired,
201// that predication is preferred, and this lists all options. I.e., the
202// vectorizer will try to fold the tail-loop (epilogue) into the vector body
203// and predicate the instructions accordingly. If tail-folding fails, there are
204// different fallback strategies depending on these values:
211} // namespace PreferPredicateTy
212
214 "prefer-predicate-over-epilogue",
217 cl::desc("Tail-folding and predication preferences over creating a scalar "
218 "epilogue loop."),
220 "scalar-epilogue",
221 "Don't tail-predicate loops, create scalar epilogue"),
223 "predicate-else-scalar-epilogue",
224 "prefer tail-folding, create scalar epilogue if tail "
225 "folding fails."),
227 "predicate-dont-vectorize",
228 "prefers tail-folding, don't attempt vectorization if "
229 "tail-folding fails.")));
230
232 "force-tail-folding-style", cl::desc("Force the tail folding style"),
235 clEnumValN(TailFoldingStyle::None, "none", "Disable tail folding"),
238 "Create lane mask for data only, using active.lane.mask intrinsic"),
240 "data-without-lane-mask",
241 "Create lane mask with compare/stepvector"),
243 "Create lane mask using active.lane.mask intrinsic, and use "
244 "it for both data and control flow"),
246 "data-and-control-without-rt-check",
247 "Similar to data-and-control, but remove the runtime check"),
249 "Use predicated EVL instructions for tail folding. If EVL "
250 "is unsupported, fallback to data-without-lane-mask.")));
251
253 "vectorizer-maximize-bandwidth", cl::init(false), cl::Hidden,
254 cl::desc("Maximize bandwidth when selecting vectorization factor which "
255 "will be determined by the smallest type in loop."));
256
258 "enable-interleaved-mem-accesses", cl::init(false), cl::Hidden,
259 cl::desc("Enable vectorization on interleaved memory accesses in a loop"));
260
261/// An interleave-group may need masking if it resides in a block that needs
262/// predication, or in order to mask away gaps.
264 "enable-masked-interleaved-mem-accesses", cl::init(false), cl::Hidden,
265 cl::desc("Enable vectorization on masked interleaved memory accesses in a loop"));
266
268 "force-target-num-scalar-regs", cl::init(0), cl::Hidden,
269 cl::desc("A flag that overrides the target's number of scalar registers."));
270
272 "force-target-num-vector-regs", cl::init(0), cl::Hidden,
273 cl::desc("A flag that overrides the target's number of vector registers."));
274
276 "force-target-max-scalar-interleave", cl::init(0), cl::Hidden,
277 cl::desc("A flag that overrides the target's max interleave factor for "
278 "scalar loops."));
279
281 "force-target-max-vector-interleave", cl::init(0), cl::Hidden,
282 cl::desc("A flag that overrides the target's max interleave factor for "
283 "vectorized loops."));
284
286 "force-target-instruction-cost", cl::init(0), cl::Hidden,
287 cl::desc("A flag that overrides the target's expected cost for "
288 "an instruction to a single constant value. Mostly "
289 "useful for getting consistent testing."));
290
292 "force-target-supports-scalable-vectors", cl::init(false), cl::Hidden,
293 cl::desc(
294 "Pretend that scalable vectors are supported, even if the target does "
295 "not support them. This flag should only be used for testing."));
296
298 "small-loop-cost", cl::init(20), cl::Hidden,
299 cl::desc(
300 "The cost of a loop that is considered 'small' by the interleaver."));
301
303 "loop-vectorize-with-block-frequency", cl::init(true), cl::Hidden,
304 cl::desc("Enable the use of the block frequency analysis to access PGO "
305 "heuristics minimizing code growth in cold regions and being more "
306 "aggressive in hot regions."));
307
308// Runtime interleave loops for load/store throughput.
310 "enable-loadstore-runtime-interleave", cl::init(true), cl::Hidden,
311 cl::desc(
312 "Enable runtime interleaving until load/store ports are saturated"));
313
314/// The number of stores in a loop that are allowed to need predication.
316 "vectorize-num-stores-pred", cl::init(1), cl::Hidden,
317 cl::desc("Max number of stores to be predicated behind an if."));
318
320 "enable-ind-var-reg-heur", cl::init(true), cl::Hidden,
321 cl::desc("Count the induction variable only once when interleaving"));
322
324 "enable-cond-stores-vec", cl::init(true), cl::Hidden,
325 cl::desc("Enable if predication of stores during vectorization."));
326
328 "max-nested-scalar-reduction-interleave", cl::init(2), cl::Hidden,
329 cl::desc("The maximum interleave count to use when interleaving a scalar "
330 "reduction in a nested loop."));
331
332static cl::opt<bool>
333 PreferInLoopReductions("prefer-inloop-reductions", cl::init(false),
335 cl::desc("Prefer in-loop vector reductions, "
336 "overriding the targets preference."));
337
339 "force-ordered-reductions", cl::init(false), cl::Hidden,
340 cl::desc("Enable the vectorisation of loops with in-order (strict) "
341 "FP reductions"));
342
344 "prefer-predicated-reduction-select", cl::init(false), cl::Hidden,
345 cl::desc(
346 "Prefer predicating a reduction operation over an after loop select."));
347
349 "enable-vplan-native-path", cl::Hidden,
350 cl::desc("Enable VPlan-native vectorization path with "
351 "support for outer loop vectorization."));
352
354 llvm::VerifyEachVPlan("vplan-verify-each",
355#ifdef EXPENSIVE_CHECKS
356 cl::init(true),
357#else
358 cl::init(false),
359#endif
361 cl::desc("Verfiy VPlans after VPlan transforms."));
362
363// This flag enables the stress testing of the VPlan H-CFG construction in the
364// VPlan-native vectorization path. It must be used in conjuction with
365// -enable-vplan-native-path. -vplan-verify-hcfg can also be used to enable the
366// verification of the H-CFGs built.
368 "vplan-build-stress-test", cl::init(false), cl::Hidden,
369 cl::desc(
370 "Build VPlan for every supported loop nest in the function and bail "
371 "out right after the build (stress test the VPlan H-CFG construction "
372 "in the VPlan-native vectorization path)."));
373
375 "interleave-loops", cl::init(true), cl::Hidden,
376 cl::desc("Enable loop interleaving in Loop vectorization passes"));
378 "vectorize-loops", cl::init(true), cl::Hidden,
379 cl::desc("Run the Loop vectorization passes"));
380
382 "force-widen-divrem-via-safe-divisor", cl::Hidden,
383 cl::desc(
384 "Override cost based safe divisor widening for div/rem instructions"));
385
387 "vectorizer-maximize-bandwidth-for-vector-calls", cl::init(true),
389 cl::desc("Try wider VFs if they enable the use of vector variants"));
390
392 "enable-early-exit-vectorization", cl::init(true), cl::Hidden,
393 cl::desc(
394 "Enable vectorization of early exit loops with uncountable exits."));
395
397 "vectorizer-consider-reg-pressure", cl::init(false), cl::Hidden,
398 cl::desc("Discard VFs if their register pressure is too high."));
399
400// Likelyhood of bypassing the vectorized loop because there are zero trips left
401// after prolog. See `emitIterationCountCheck`.
402static constexpr uint32_t MinItersBypassWeights[] = {1, 127};
403
404/// A helper function that returns true if the given type is irregular. The
405/// type is irregular if its allocated size doesn't equal the store size of an
406/// element of the corresponding vector type.
407static bool hasIrregularType(Type *Ty, const DataLayout &DL) {
408 // Determine if an array of N elements of type Ty is "bitcast compatible"
409 // with a <N x Ty> vector.
410 // This is only true if there is no padding between the array elements.
411 return DL.getTypeAllocSizeInBits(Ty) != DL.getTypeSizeInBits(Ty);
412}
413
414/// A version of ScalarEvolution::getSmallConstantTripCount that returns an
415/// ElementCount to include loops whose trip count is a function of vscale.
417 const Loop *L) {
418 if (unsigned ExpectedTC = SE->getSmallConstantTripCount(L))
419 return ElementCount::getFixed(ExpectedTC);
420
421 const SCEV *BTC = SE->getBackedgeTakenCount(L);
423 return ElementCount::getFixed(0);
424
425 const SCEV *ExitCount = SE->getTripCountFromExitCount(BTC, BTC->getType(), L);
426 if (isa<SCEVVScale>(ExitCount))
428
429 const APInt *Scale;
430 if (match(ExitCount, m_scev_Mul(m_scev_APInt(Scale), m_SCEVVScale())))
431 if (cast<SCEVMulExpr>(ExitCount)->hasNoUnsignedWrap())
432 if (Scale->getActiveBits() <= 32)
434
435 return ElementCount::getFixed(0);
436}
437
438/// Returns "best known" trip count, which is either a valid positive trip count
439/// or std::nullopt when an estimate cannot be made (including when the trip
440/// count would overflow), for the specified loop \p L as defined by the
441/// following procedure:
442/// 1) Returns exact trip count if it is known.
443/// 2) Returns expected trip count according to profile data if any.
444/// 3) Returns upper bound estimate if known, and if \p CanUseConstantMax.
445/// 4) Returns std::nullopt if all of the above failed.
446static std::optional<ElementCount>
448 bool CanUseConstantMax = true) {
449 // Check if exact trip count is known.
450 if (auto ExpectedTC = getSmallConstantTripCount(PSE.getSE(), L))
451 return ExpectedTC;
452
453 // Check if there is an expected trip count available from profile data.
455 if (auto EstimatedTC = getLoopEstimatedTripCount(L))
456 return ElementCount::getFixed(*EstimatedTC);
457
458 if (!CanUseConstantMax)
459 return std::nullopt;
460
461 // Check if upper bound estimate is known.
462 if (unsigned ExpectedTC = PSE.getSmallConstantMaxTripCount())
463 return ElementCount::getFixed(ExpectedTC);
464
465 return std::nullopt;
466}
467
468namespace {
469// Forward declare GeneratedRTChecks.
470class GeneratedRTChecks;
471
472using SCEV2ValueTy = DenseMap<const SCEV *, Value *>;
473} // namespace
474
475namespace llvm {
476
478
479/// InnerLoopVectorizer vectorizes loops which contain only one basic
480/// block to a specified vectorization factor (VF).
481/// This class performs the widening of scalars into vectors, or multiple
482/// scalars. This class also implements the following features:
483/// * It inserts an epilogue loop for handling loops that don't have iteration
484/// counts that are known to be a multiple of the vectorization factor.
485/// * It handles the code generation for reduction variables.
486/// * Scalarization (implementation using scalars) of un-vectorizable
487/// instructions.
488/// InnerLoopVectorizer does not perform any vectorization-legality
489/// checks, and relies on the caller to check for the different legality
490/// aspects. The InnerLoopVectorizer relies on the
491/// LoopVectorizationLegality class to provide information about the induction
492/// and reduction variables that were found to a given vectorization factor.
494public:
498 ElementCount VecWidth, unsigned UnrollFactor,
500 ProfileSummaryInfo *PSI, GeneratedRTChecks &RTChecks,
501 VPlan &Plan)
502 : OrigLoop(OrigLoop), PSE(PSE), LI(LI), DT(DT), TTI(TTI), AC(AC),
503 VF(VecWidth), UF(UnrollFactor), Builder(PSE.getSE()->getContext()),
506 Plan.getVectorLoopRegion()->getSinglePredecessor())) {}
507
508 virtual ~InnerLoopVectorizer() = default;
509
510 /// Creates a basic block for the scalar preheader. Both
511 /// EpilogueVectorizerMainLoop and EpilogueVectorizerEpilogueLoop overwrite
512 /// the method to create additional blocks and checks needed for epilogue
513 /// vectorization.
515
516 /// Fix the vectorized code, taking care of header phi's, and more.
518
519 /// Fix the non-induction PHIs in \p Plan.
521
522 /// Returns the original loop trip count.
523 Value *getTripCount() const { return TripCount; }
524
525 /// Used to set the trip count after ILV's construction and after the
526 /// preheader block has been executed. Note that this always holds the trip
527 /// count of the original loop for both main loop and epilogue vectorization.
528 void setTripCount(Value *TC) { TripCount = TC; }
529
530protected:
532
533 /// Create and return a new IR basic block for the scalar preheader whose name
534 /// is prefixed with \p Prefix.
536
537 /// Allow subclasses to override and print debug traces before/after vplan
538 /// execution, when trace information is requested.
539 virtual void printDebugTracesAtStart() {}
540 virtual void printDebugTracesAtEnd() {}
541
542 /// The original loop.
544
545 /// A wrapper around ScalarEvolution used to add runtime SCEV checks. Applies
546 /// dynamic knowledge to simplify SCEV expressions and converts them to a
547 /// more usable form.
549
550 /// Loop Info.
552
553 /// Dominator Tree.
555
556 /// Target Transform Info.
558
559 /// Assumption Cache.
561
562 /// The vectorization SIMD factor to use. Each vector will have this many
563 /// vector elements.
565
566 /// The vectorization unroll factor to use. Each scalar is vectorized to this
567 /// many different vector instructions.
568 unsigned UF;
569
570 /// The builder that we use
572
573 // --- Vectorization state ---
574
575 /// Trip count of the original loop.
576 Value *TripCount = nullptr;
577
578 /// The profitablity analysis.
580
581 /// BFI and PSI are used to check for profile guided size optimizations.
584
585 /// Structure to hold information about generated runtime checks, responsible
586 /// for cleaning the checks, if vectorization turns out unprofitable.
587 GeneratedRTChecks &RTChecks;
588
590
591 /// The vector preheader block of \p Plan, used as target for check blocks
592 /// introduced during skeleton creation.
594};
595
596/// Encapsulate information regarding vectorization of a loop and its epilogue.
597/// This information is meant to be updated and used across two stages of
598/// epilogue vectorization.
601 unsigned MainLoopUF = 0;
603 unsigned EpilogueUF = 0;
606 Value *TripCount = nullptr;
609
611 ElementCount EVF, unsigned EUF,
613 : MainLoopVF(MVF), MainLoopUF(MUF), EpilogueVF(EVF), EpilogueUF(EUF),
615 assert(EUF == 1 &&
616 "A high UF for the epilogue loop is likely not beneficial.");
617 }
618};
619
620/// An extension of the inner loop vectorizer that creates a skeleton for a
621/// vectorized loop that has its epilogue (residual) also vectorized.
622/// The idea is to run the vplan on a given loop twice, firstly to setup the
623/// skeleton and vectorize the main loop, and secondly to complete the skeleton
624/// from the first step and vectorize the epilogue. This is achieved by
625/// deriving two concrete strategy classes from this base class and invoking
626/// them in succession from the loop vectorizer planner.
628public:
639
640 /// Holds and updates state information required to vectorize the main loop
641 /// and its epilogue in two separate passes. This setup helps us avoid
642 /// regenerating and recomputing runtime safety checks. It also helps us to
643 /// shorten the iteration-count-check path length for the cases where the
644 /// iteration count of the loop is so small that the main vector loop is
645 /// completely skipped.
647
648protected:
650};
651
652/// A specialized derived class of inner loop vectorizer that performs
653/// vectorization of *main* loops in the process of vectorizing loops and their
654/// epilogues.
656public:
668 /// Implements the interface for creating a vectorized skeleton using the
669 /// *main loop* strategy (i.e., the first pass of VPlan execution).
671
672protected:
673 /// Introduces a new VPIRBasicBlock for \p CheckIRBB to Plan between the
674 /// vector preheader and its predecessor, also connecting the new block to the
675 /// scalar preheader.
676 void introduceCheckBlockInVPlan(BasicBlock *CheckIRBB);
677
678 // Create a check to see if the main vector loop should be executed
680 unsigned UF) const;
681
682 /// Emits an iteration count bypass check once for the main loop (when \p
683 /// ForEpilogue is false) and once for the epilogue loop (when \p
684 /// ForEpilogue is true).
686 bool ForEpilogue);
687 void printDebugTracesAtStart() override;
688 void printDebugTracesAtEnd() override;
689};
690
691// A specialized derived class of inner loop vectorizer that performs
692// vectorization of *epilogue* loops in the process of vectorizing loops and
693// their epilogues.
695public:
705 /// Implements the interface for creating a vectorized skeleton using the
706 /// *epilogue loop* strategy (i.e., the second pass of VPlan execution).
708
709protected:
710 void printDebugTracesAtStart() override;
711 void printDebugTracesAtEnd() override;
712};
713} // end namespace llvm
714
715/// Look for a meaningful debug location on the instruction or its operands.
717 if (!I)
718 return DebugLoc::getUnknown();
719
721 if (I->getDebugLoc() != Empty)
722 return I->getDebugLoc();
723
724 for (Use &Op : I->operands()) {
725 if (Instruction *OpInst = dyn_cast<Instruction>(Op))
726 if (OpInst->getDebugLoc() != Empty)
727 return OpInst->getDebugLoc();
728 }
729
730 return I->getDebugLoc();
731}
732
733/// Write a \p DebugMsg about vectorization to the debug output stream. If \p I
734/// is passed, the message relates to that particular instruction.
735#ifndef NDEBUG
736static void debugVectorizationMessage(const StringRef Prefix,
737 const StringRef DebugMsg,
738 Instruction *I) {
739 dbgs() << "LV: " << Prefix << DebugMsg;
740 if (I != nullptr)
741 dbgs() << " " << *I;
742 else
743 dbgs() << '.';
744 dbgs() << '\n';
745}
746#endif
747
748/// Create an analysis remark that explains why vectorization failed
749///
750/// \p PassName is the name of the pass (e.g. can be AlwaysPrint). \p
751/// RemarkName is the identifier for the remark. If \p I is passed it is an
752/// instruction that prevents vectorization. Otherwise \p TheLoop is used for
753/// the location of the remark. If \p DL is passed, use it as debug location for
754/// the remark. \return the remark object that can be streamed to.
755static OptimizationRemarkAnalysis
756createLVAnalysis(const char *PassName, StringRef RemarkName, Loop *TheLoop,
757 Instruction *I, DebugLoc DL = {}) {
758 BasicBlock *CodeRegion = I ? I->getParent() : TheLoop->getHeader();
759 // If debug location is attached to the instruction, use it. Otherwise if DL
760 // was not provided, use the loop's.
761 if (I && I->getDebugLoc())
762 DL = I->getDebugLoc();
763 else if (!DL)
764 DL = TheLoop->getStartLoc();
765
766 return OptimizationRemarkAnalysis(PassName, RemarkName, DL, CodeRegion);
767}
768
769namespace llvm {
770
771/// Return a value for Step multiplied by VF.
773 int64_t Step) {
774 assert(Ty->isIntegerTy() && "Expected an integer step");
775 ElementCount VFxStep = VF.multiplyCoefficientBy(Step);
776 assert(isPowerOf2_64(VF.getKnownMinValue()) && "must pass power-of-2 VF");
777 if (VF.isScalable() && isPowerOf2_64(Step)) {
778 return B.CreateShl(
779 B.CreateVScale(Ty),
780 ConstantInt::get(Ty, Log2_64(VFxStep.getKnownMinValue())), "", true);
781 }
782 return B.CreateElementCount(Ty, VFxStep);
783}
784
785/// Return the runtime value for VF.
787 return B.CreateElementCount(Ty, VF);
788}
789
791 const StringRef OREMsg, const StringRef ORETag,
792 OptimizationRemarkEmitter *ORE, Loop *TheLoop,
793 Instruction *I) {
794 LLVM_DEBUG(debugVectorizationMessage("Not vectorizing: ", DebugMsg, I));
795 LoopVectorizeHints Hints(TheLoop, true /* doesn't matter */, *ORE);
796 ORE->emit(
797 createLVAnalysis(Hints.vectorizeAnalysisPassName(), ORETag, TheLoop, I)
798 << "loop not vectorized: " << OREMsg);
799}
800
801/// Reports an informative message: print \p Msg for debugging purposes as well
802/// as an optimization remark. Uses either \p I as location of the remark, or
803/// otherwise \p TheLoop. If \p DL is passed, use it as debug location for the
804/// remark. If \p DL is passed, use it as debug location for the remark.
805static void reportVectorizationInfo(const StringRef Msg, const StringRef ORETag,
807 Loop *TheLoop, Instruction *I = nullptr,
808 DebugLoc DL = {}) {
810 LoopVectorizeHints Hints(TheLoop, true /* doesn't matter */, *ORE);
811 ORE->emit(createLVAnalysis(Hints.vectorizeAnalysisPassName(), ORETag, TheLoop,
812 I, DL)
813 << Msg);
814}
815
816/// Report successful vectorization of the loop. In case an outer loop is
817/// vectorized, prepend "outer" to the vectorization remark.
819 VectorizationFactor VF, unsigned IC) {
821 "Vectorizing: ", TheLoop->isInnermost() ? "innermost loop" : "outer loop",
822 nullptr));
823 StringRef LoopType = TheLoop->isInnermost() ? "" : "outer ";
824 ORE->emit([&]() {
825 return OptimizationRemark(LV_NAME, "Vectorized", TheLoop->getStartLoc(),
826 TheLoop->getHeader())
827 << "vectorized " << LoopType << "loop (vectorization width: "
828 << ore::NV("VectorizationFactor", VF.Width)
829 << ", interleaved count: " << ore::NV("InterleaveCount", IC) << ")";
830 });
831}
832
833} // end namespace llvm
834
835namespace llvm {
836
837// Loop vectorization cost-model hints how the scalar epilogue loop should be
838// lowered.
840
841 // The default: allowing scalar epilogues.
843
844 // Vectorization with OptForSize: don't allow epilogues.
846
847 // A special case of vectorisation with OptForSize: loops with a very small
848 // trip count are considered for vectorization under OptForSize, thereby
849 // making sure the cost of their loop body is dominant, free of runtime
850 // guards and scalar iteration overheads.
852
853 // Loop hint predicate indicating an epilogue is undesired.
855
856 // Directive indicating we must either tail fold or not vectorize
858};
859
860/// LoopVectorizationCostModel - estimates the expected speedups due to
861/// vectorization.
862/// In many cases vectorization is not profitable. This can happen because of
863/// a number of reasons. In this class we mainly attempt to predict the
864/// expected speedup/slowdowns due to the supported instruction set. We use the
865/// TargetTransformInfo to query the different backends for the cost of
866/// different operations.
869
870public:
881 : ScalarEpilogueStatus(SEL), TheLoop(L), PSE(PSE), LI(LI), Legal(Legal),
882 TTI(TTI), TLI(TLI), DB(DB), AC(AC), ORE(ORE), TheFunction(F),
883 Hints(Hints), InterleaveInfo(IAI) {
884 if (TTI.supportsScalableVectors() || ForceTargetSupportsScalableVectors)
885 initializeVScaleForTuning();
887 // Query this against the original loop and save it here because the profile
888 // of the original loop header may change as the transformation happens.
889 OptForSize = llvm::shouldOptimizeForSize(L->getHeader(), PSI, BFI,
891 }
892
893 /// \return An upper bound for the vectorization factors (both fixed and
894 /// scalable). If the factors are 0, vectorization and interleaving should be
895 /// avoided up front.
896 FixedScalableVFPair computeMaxVF(ElementCount UserVF, unsigned UserIC);
897
898 /// \return True if runtime checks are required for vectorization, and false
899 /// otherwise.
900 bool runtimeChecksRequired();
901
902 /// Setup cost-based decisions for user vectorization factor.
903 /// \return true if the UserVF is a feasible VF to be chosen.
906 return expectedCost(UserVF).isValid();
907 }
908
909 /// \return True if maximizing vector bandwidth is enabled by the target or
910 /// user options, for the given register kind.
911 bool useMaxBandwidth(TargetTransformInfo::RegisterKind RegKind);
912
913 /// \return True if register pressure should be considered for the given VF.
914 bool shouldConsiderRegPressureForVF(ElementCount VF);
915
916 /// \return The size (in bits) of the smallest and widest types in the code
917 /// that needs to be vectorized. We ignore values that remain scalar such as
918 /// 64 bit loop indices.
919 std::pair<unsigned, unsigned> getSmallestAndWidestTypes();
920
921 /// Memory access instruction may be vectorized in more than one way.
922 /// Form of instruction after vectorization depends on cost.
923 /// This function takes cost-based decisions for Load/Store instructions
924 /// and collects them in a map. This decisions map is used for building
925 /// the lists of loop-uniform and loop-scalar instructions.
926 /// The calculated cost is saved with widening decision in order to
927 /// avoid redundant calculations.
928 void setCostBasedWideningDecision(ElementCount VF);
929
930 /// A call may be vectorized in different ways depending on whether we have
931 /// vectorized variants available and whether the target supports masking.
932 /// This function analyzes all calls in the function at the supplied VF,
933 /// makes a decision based on the costs of available options, and stores that
934 /// decision in a map for use in planning and plan execution.
935 void setVectorizedCallDecision(ElementCount VF);
936
937 /// Collect values we want to ignore in the cost model.
938 void collectValuesToIgnore();
939
940 /// Collect all element types in the loop for which widening is needed.
941 void collectElementTypesForWidening();
942
943 /// Split reductions into those that happen in the loop, and those that happen
944 /// outside. In loop reductions are collected into InLoopReductions.
945 void collectInLoopReductions();
946
947 /// Returns true if we should use strict in-order reductions for the given
948 /// RdxDesc. This is true if the -enable-strict-reductions flag is passed,
949 /// the IsOrdered flag of RdxDesc is set and we do not allow reordering
950 /// of FP operations.
951 bool useOrderedReductions(const RecurrenceDescriptor &RdxDesc) const {
952 return !Hints->allowReordering() && RdxDesc.isOrdered();
953 }
954
955 /// \returns The smallest bitwidth each instruction can be represented with.
956 /// The vector equivalents of these instructions should be truncated to this
957 /// type.
959 return MinBWs;
960 }
961
962 /// \returns True if it is more profitable to scalarize instruction \p I for
963 /// vectorization factor \p VF.
965 assert(VF.isVector() &&
966 "Profitable to scalarize relevant only for VF > 1.");
967 assert(
968 TheLoop->isInnermost() &&
969 "cost-model should not be used for outer loops (in VPlan-native path)");
970
971 auto Scalars = InstsToScalarize.find(VF);
972 assert(Scalars != InstsToScalarize.end() &&
973 "VF not yet analyzed for scalarization profitability");
974 return Scalars->second.contains(I);
975 }
976
977 /// Returns true if \p I is known to be uniform after vectorization.
979 assert(
980 TheLoop->isInnermost() &&
981 "cost-model should not be used for outer loops (in VPlan-native path)");
982 // Pseudo probe needs to be duplicated for each unrolled iteration and
983 // vector lane so that profiled loop trip count can be accurately
984 // accumulated instead of being under counted.
986 return false;
987
988 if (VF.isScalar())
989 return true;
990
991 auto UniformsPerVF = Uniforms.find(VF);
992 assert(UniformsPerVF != Uniforms.end() &&
993 "VF not yet analyzed for uniformity");
994 return UniformsPerVF->second.count(I);
995 }
996
997 /// Returns true if \p I is known to be scalar after vectorization.
999 assert(
1000 TheLoop->isInnermost() &&
1001 "cost-model should not be used for outer loops (in VPlan-native path)");
1002 if (VF.isScalar())
1003 return true;
1004
1005 auto ScalarsPerVF = Scalars.find(VF);
1006 assert(ScalarsPerVF != Scalars.end() &&
1007 "Scalar values are not calculated for VF");
1008 return ScalarsPerVF->second.count(I);
1009 }
1010
1011 /// \returns True if instruction \p I can be truncated to a smaller bitwidth
1012 /// for vectorization factor \p VF.
1014 // Truncs must truncate at most to their destination type.
1015 if (isa_and_nonnull<TruncInst>(I) && MinBWs.contains(I) &&
1016 I->getType()->getScalarSizeInBits() < MinBWs.lookup(I))
1017 return false;
1018 return VF.isVector() && MinBWs.contains(I) &&
1019 !isProfitableToScalarize(I, VF) &&
1021 }
1022
1023 /// Decision that was taken during cost calculation for memory instruction.
1026 CM_Widen, // For consecutive accesses with stride +1.
1027 CM_Widen_Reverse, // For consecutive accesses with stride -1.
1033 };
1034
1035 /// Save vectorization decision \p W and \p Cost taken by the cost model for
1036 /// instruction \p I and vector width \p VF.
1039 assert(VF.isVector() && "Expected VF >=2");
1040 WideningDecisions[{I, VF}] = {W, Cost};
1041 }
1042
1043 /// Save vectorization decision \p W and \p Cost taken by the cost model for
1044 /// interleaving group \p Grp and vector width \p VF.
1048 assert(VF.isVector() && "Expected VF >=2");
1049 /// Broadcast this decicion to all instructions inside the group.
1050 /// When interleaving, the cost will only be assigned one instruction, the
1051 /// insert position. For other cases, add the appropriate fraction of the
1052 /// total cost to each instruction. This ensures accurate costs are used,
1053 /// even if the insert position instruction is not used.
1054 InstructionCost InsertPosCost = Cost;
1055 InstructionCost OtherMemberCost = 0;
1056 if (W != CM_Interleave)
1057 OtherMemberCost = InsertPosCost = Cost / Grp->getNumMembers();
1058 ;
1059 for (unsigned Idx = 0; Idx < Grp->getFactor(); ++Idx) {
1060 if (auto *I = Grp->getMember(Idx)) {
1061 if (Grp->getInsertPos() == I)
1062 WideningDecisions[{I, VF}] = {W, InsertPosCost};
1063 else
1064 WideningDecisions[{I, VF}] = {W, OtherMemberCost};
1065 }
1066 }
1067 }
1068
1069 /// Return the cost model decision for the given instruction \p I and vector
1070 /// width \p VF. Return CM_Unknown if this instruction did not pass
1071 /// through the cost modeling.
1073 assert(VF.isVector() && "Expected VF to be a vector VF");
1074 assert(
1075 TheLoop->isInnermost() &&
1076 "cost-model should not be used for outer loops (in VPlan-native path)");
1077
1078 std::pair<Instruction *, ElementCount> InstOnVF(I, VF);
1079 auto Itr = WideningDecisions.find(InstOnVF);
1080 if (Itr == WideningDecisions.end())
1081 return CM_Unknown;
1082 return Itr->second.first;
1083 }
1084
1085 /// Return the vectorization cost for the given instruction \p I and vector
1086 /// width \p VF.
1088 assert(VF.isVector() && "Expected VF >=2");
1089 std::pair<Instruction *, ElementCount> InstOnVF(I, VF);
1090 assert(WideningDecisions.contains(InstOnVF) &&
1091 "The cost is not calculated");
1092 return WideningDecisions[InstOnVF].second;
1093 }
1094
1102
1104 Function *Variant, Intrinsic::ID IID,
1105 std::optional<unsigned> MaskPos,
1107 assert(!VF.isScalar() && "Expected vector VF");
1108 CallWideningDecisions[{CI, VF}] = {Kind, Variant, IID, MaskPos, Cost};
1109 }
1110
1112 ElementCount VF) const {
1113 assert(!VF.isScalar() && "Expected vector VF");
1114 auto I = CallWideningDecisions.find({CI, VF});
1115 if (I == CallWideningDecisions.end())
1116 return {CM_Unknown, nullptr, Intrinsic::not_intrinsic, std::nullopt, 0};
1117 return I->second;
1118 }
1119
1120 /// Return True if instruction \p I is an optimizable truncate whose operand
1121 /// is an induction variable. Such a truncate will be removed by adding a new
1122 /// induction variable with the destination type.
1124 // If the instruction is not a truncate, return false.
1125 auto *Trunc = dyn_cast<TruncInst>(I);
1126 if (!Trunc)
1127 return false;
1128
1129 // Get the source and destination types of the truncate.
1130 Type *SrcTy = toVectorTy(Trunc->getSrcTy(), VF);
1131 Type *DestTy = toVectorTy(Trunc->getDestTy(), VF);
1132
1133 // If the truncate is free for the given types, return false. Replacing a
1134 // free truncate with an induction variable would add an induction variable
1135 // update instruction to each iteration of the loop. We exclude from this
1136 // check the primary induction variable since it will need an update
1137 // instruction regardless.
1138 Value *Op = Trunc->getOperand(0);
1139 if (Op != Legal->getPrimaryInduction() && TTI.isTruncateFree(SrcTy, DestTy))
1140 return false;
1141
1142 // If the truncated value is not an induction variable, return false.
1143 return Legal->isInductionPhi(Op);
1144 }
1145
1146 /// Collects the instructions to scalarize for each predicated instruction in
1147 /// the loop.
1148 void collectInstsToScalarize(ElementCount VF);
1149
1150 /// Collect values that will not be widened, including Uniforms, Scalars, and
1151 /// Instructions to Scalarize for the given \p VF.
1152 /// The sets depend on CM decision for Load/Store instructions
1153 /// that may be vectorized as interleave, gather-scatter or scalarized.
1154 /// Also make a decision on what to do about call instructions in the loop
1155 /// at that VF -- scalarize, call a known vector routine, or call a
1156 /// vector intrinsic.
1158 // Do the analysis once.
1159 if (VF.isScalar() || Uniforms.contains(VF))
1160 return;
1162 collectLoopUniforms(VF);
1164 collectLoopScalars(VF);
1166 }
1167
1168 /// Returns true if the target machine supports masked store operation
1169 /// for the given \p DataType and kind of access to \p Ptr.
1170 bool isLegalMaskedStore(Type *DataType, Value *Ptr, Align Alignment,
1171 unsigned AddressSpace) const {
1172 return Legal->isConsecutivePtr(DataType, Ptr) &&
1173 TTI.isLegalMaskedStore(DataType, Alignment, AddressSpace);
1174 }
1175
1176 /// Returns true if the target machine supports masked load operation
1177 /// for the given \p DataType and kind of access to \p Ptr.
1178 bool isLegalMaskedLoad(Type *DataType, Value *Ptr, Align Alignment,
1179 unsigned AddressSpace) const {
1180 return Legal->isConsecutivePtr(DataType, Ptr) &&
1181 TTI.isLegalMaskedLoad(DataType, Alignment, AddressSpace);
1182 }
1183
1184 /// Returns true if the target machine can represent \p V as a masked gather
1185 /// or scatter operation.
1187 bool LI = isa<LoadInst>(V);
1188 bool SI = isa<StoreInst>(V);
1189 if (!LI && !SI)
1190 return false;
1191 auto *Ty = getLoadStoreType(V);
1193 if (VF.isVector())
1194 Ty = VectorType::get(Ty, VF);
1195 return (LI && TTI.isLegalMaskedGather(Ty, Align)) ||
1196 (SI && TTI.isLegalMaskedScatter(Ty, Align));
1197 }
1198
1199 /// Returns true if the target machine supports all of the reduction
1200 /// variables found for the given VF.
1202 return (all_of(Legal->getReductionVars(), [&](auto &Reduction) -> bool {
1203 const RecurrenceDescriptor &RdxDesc = Reduction.second;
1204 return TTI.isLegalToVectorizeReduction(RdxDesc, VF);
1205 }));
1206 }
1207
1208 /// Given costs for both strategies, return true if the scalar predication
1209 /// lowering should be used for div/rem. This incorporates an override
1210 /// option so it is not simply a cost comparison.
1212 InstructionCost SafeDivisorCost) const {
1213 switch (ForceSafeDivisor) {
1214 case cl::BOU_UNSET:
1215 return ScalarCost < SafeDivisorCost;
1216 case cl::BOU_TRUE:
1217 return false;
1218 case cl::BOU_FALSE:
1219 return true;
1220 }
1221 llvm_unreachable("impossible case value");
1222 }
1223
1224 /// Returns true if \p I is an instruction which requires predication and
1225 /// for which our chosen predication strategy is scalarization (i.e. we
1226 /// don't have an alternate strategy such as masking available).
1227 /// \p VF is the vectorization factor that will be used to vectorize \p I.
1228 bool isScalarWithPredication(Instruction *I, ElementCount VF) const;
1229
1230 /// Returns true if \p I is an instruction that needs to be predicated
1231 /// at runtime. The result is independent of the predication mechanism.
1232 /// Superset of instructions that return true for isScalarWithPredication.
1233 bool isPredicatedInst(Instruction *I) const;
1234
1235 /// Return the costs for our two available strategies for lowering a
1236 /// div/rem operation which requires speculating at least one lane.
1237 /// First result is for scalarization (will be invalid for scalable
1238 /// vectors); second is for the safe-divisor strategy.
1239 std::pair<InstructionCost, InstructionCost>
1240 getDivRemSpeculationCost(Instruction *I,
1241 ElementCount VF) const;
1242
1243 /// Returns true if \p I is a memory instruction with consecutive memory
1244 /// access that can be widened.
1245 bool memoryInstructionCanBeWidened(Instruction *I, ElementCount VF);
1246
1247 /// Returns true if \p I is a memory instruction in an interleaved-group
1248 /// of memory accesses that can be vectorized with wide vector loads/stores
1249 /// and shuffles.
1250 bool interleavedAccessCanBeWidened(Instruction *I, ElementCount VF) const;
1251
1252 /// Check if \p Instr belongs to any interleaved access group.
1254 return InterleaveInfo.isInterleaved(Instr);
1255 }
1256
1257 /// Get the interleaved access group that \p Instr belongs to.
1260 return InterleaveInfo.getInterleaveGroup(Instr);
1261 }
1262
1263 /// Returns true if we're required to use a scalar epilogue for at least
1264 /// the final iteration of the original loop.
1265 bool requiresScalarEpilogue(bool IsVectorizing) const {
1266 if (!isScalarEpilogueAllowed()) {
1267 LLVM_DEBUG(dbgs() << "LV: Loop does not require scalar epilogue\n");
1268 return false;
1269 }
1270 // If we might exit from anywhere but the latch and early exit vectorization
1271 // is disabled, we must run the exiting iteration in scalar form.
1272 if (TheLoop->getExitingBlock() != TheLoop->getLoopLatch() &&
1273 !(EnableEarlyExitVectorization && Legal->hasUncountableEarlyExit())) {
1274 LLVM_DEBUG(dbgs() << "LV: Loop requires scalar epilogue: not exiting "
1275 "from latch block\n");
1276 return true;
1277 }
1278 if (IsVectorizing && InterleaveInfo.requiresScalarEpilogue()) {
1279 LLVM_DEBUG(dbgs() << "LV: Loop requires scalar epilogue: "
1280 "interleaved group requires scalar epilogue\n");
1281 return true;
1282 }
1283 LLVM_DEBUG(dbgs() << "LV: Loop does not require scalar epilogue\n");
1284 return false;
1285 }
1286
1287 /// Returns true if a scalar epilogue is not allowed due to optsize or a
1288 /// loop hint annotation.
1290 return ScalarEpilogueStatus == CM_ScalarEpilogueAllowed;
1291 }
1292
1293 /// Returns the TailFoldingStyle that is best for the current loop.
1294 TailFoldingStyle getTailFoldingStyle(bool IVUpdateMayOverflow = true) const {
1295 if (!ChosenTailFoldingStyle)
1297 return IVUpdateMayOverflow ? ChosenTailFoldingStyle->first
1298 : ChosenTailFoldingStyle->second;
1299 }
1300
1301 /// Selects and saves TailFoldingStyle for 2 options - if IV update may
1302 /// overflow or not.
1303 /// \param IsScalableVF true if scalable vector factors enabled.
1304 /// \param UserIC User specific interleave count.
1305 void setTailFoldingStyles(bool IsScalableVF, unsigned UserIC) {
1306 assert(!ChosenTailFoldingStyle && "Tail folding must not be selected yet.");
1307 if (!Legal->canFoldTailByMasking()) {
1308 ChosenTailFoldingStyle = {TailFoldingStyle::None, TailFoldingStyle::None};
1309 return;
1310 }
1311
1312 // Default to TTI preference, but allow command line override.
1313 ChosenTailFoldingStyle = {
1314 TTI.getPreferredTailFoldingStyle(/*IVUpdateMayOverflow=*/true),
1315 TTI.getPreferredTailFoldingStyle(/*IVUpdateMayOverflow=*/false)};
1316 if (ForceTailFoldingStyle.getNumOccurrences())
1317 ChosenTailFoldingStyle = {ForceTailFoldingStyle.getValue(),
1318 ForceTailFoldingStyle.getValue()};
1319
1320 if (ChosenTailFoldingStyle->first != TailFoldingStyle::DataWithEVL &&
1321 ChosenTailFoldingStyle->second != TailFoldingStyle::DataWithEVL)
1322 return;
1323 // Override EVL styles if needed.
1324 // FIXME: Investigate opportunity for fixed vector factor.
1325 bool EVLIsLegal = UserIC <= 1 && IsScalableVF &&
1326 TTI.hasActiveVectorLength() && !EnableVPlanNativePath;
1327 if (EVLIsLegal)
1328 return;
1329 // If for some reason EVL mode is unsupported, fallback to a scalar epilogue
1330 // if it's allowed, or DataWithoutLaneMask otherwise.
1331 if (ScalarEpilogueStatus == CM_ScalarEpilogueAllowed ||
1332 ScalarEpilogueStatus == CM_ScalarEpilogueNotNeededUsePredicate)
1333 ChosenTailFoldingStyle = {TailFoldingStyle::None, TailFoldingStyle::None};
1334 else
1335 ChosenTailFoldingStyle = {TailFoldingStyle::DataWithoutLaneMask,
1337
1338 LLVM_DEBUG(
1339 dbgs() << "LV: Preference for VP intrinsics indicated. Will "
1340 "not try to generate VP Intrinsics "
1341 << (UserIC > 1
1342 ? "since interleave count specified is greater than 1.\n"
1343 : "due to non-interleaving reasons.\n"));
1344 }
1345
1346 /// Returns true if all loop blocks should be masked to fold tail loop.
1347 bool foldTailByMasking() const {
1348 // TODO: check if it is possible to check for None style independent of
1349 // IVUpdateMayOverflow flag in getTailFoldingStyle.
1351 }
1352
1353 /// Return maximum safe number of elements to be processed per vector
1354 /// iteration, which do not prevent store-load forwarding and are safe with
1355 /// regard to the memory dependencies. Required for EVL-based VPlans to
1356 /// correctly calculate AVL (application vector length) as min(remaining AVL,
1357 /// MaxSafeElements).
1358 /// TODO: need to consider adjusting cost model to use this value as a
1359 /// vectorization factor for EVL-based vectorization.
1360 std::optional<unsigned> getMaxSafeElements() const { return MaxSafeElements; }
1361
1362 /// Returns true if the instructions in this block requires predication
1363 /// for any reason, e.g. because tail folding now requires a predicate
1364 /// or because the block in the original loop was predicated.
1366 return foldTailByMasking() || Legal->blockNeedsPredication(BB);
1367 }
1368
1369 /// Returns true if VP intrinsics with explicit vector length support should
1370 /// be generated in the tail folded loop.
1374
1375 /// Returns true if the Phi is part of an inloop reduction.
1376 bool isInLoopReduction(PHINode *Phi) const {
1377 return InLoopReductions.contains(Phi);
1378 }
1379
1380 /// Returns true if the predicated reduction select should be used to set the
1381 /// incoming value for the reduction phi.
1383 // Force to use predicated reduction select since the EVL of the
1384 // second-to-last iteration might not be VF*UF.
1385 if (foldTailWithEVL())
1386 return true;
1388 TTI.preferPredicatedReductionSelect();
1389 }
1390
1391 /// Estimate cost of an intrinsic call instruction CI if it were vectorized
1392 /// with factor VF. Return the cost of the instruction, including
1393 /// scalarization overhead if it's needed.
1394 InstructionCost getVectorIntrinsicCost(CallInst *CI, ElementCount VF) const;
1395
1396 /// Estimate cost of a call instruction CI if it were vectorized with factor
1397 /// VF. Return the cost of the instruction, including scalarization overhead
1398 /// if it's needed.
1399 InstructionCost getVectorCallCost(CallInst *CI, ElementCount VF) const;
1400
1401 /// Invalidates decisions already taken by the cost model.
1403 WideningDecisions.clear();
1404 CallWideningDecisions.clear();
1405 Uniforms.clear();
1406 Scalars.clear();
1407 }
1408
1409 /// Returns the expected execution cost. The unit of the cost does
1410 /// not matter because we use the 'cost' units to compare different
1411 /// vector widths. The cost that is returned is *not* normalized by
1412 /// the factor width.
1413 InstructionCost expectedCost(ElementCount VF);
1414
1415 bool hasPredStores() const { return NumPredStores > 0; }
1416
1417 /// Returns true if epilogue vectorization is considered profitable, and
1418 /// false otherwise.
1419 /// \p VF is the vectorization factor chosen for the original loop.
1420 /// \p Multiplier is an aditional scaling factor applied to VF before
1421 /// comparing to EpilogueVectorizationMinVF.
1422 bool isEpilogueVectorizationProfitable(const ElementCount VF,
1423 const unsigned IC) const;
1424
1425 /// Returns the execution time cost of an instruction for a given vector
1426 /// width. Vector width of one means scalar.
1427 InstructionCost getInstructionCost(Instruction *I, ElementCount VF);
1428
1429 /// Return the cost of instructions in an inloop reduction pattern, if I is
1430 /// part of that pattern.
1431 std::optional<InstructionCost> getReductionPatternCost(Instruction *I,
1432 ElementCount VF,
1433 Type *VectorTy) const;
1434
1435 /// Returns true if \p Op should be considered invariant and if it is
1436 /// trivially hoistable.
1437 bool shouldConsiderInvariant(Value *Op);
1438
1439 /// Return the value of vscale used for tuning the cost model.
1440 std::optional<unsigned> getVScaleForTuning() const { return VScaleForTuning; }
1441
1442private:
1443 unsigned NumPredStores = 0;
1444
1445 /// Used to store the value of vscale used for tuning the cost model. It is
1446 /// initialized during object construction.
1447 std::optional<unsigned> VScaleForTuning;
1448
1449 /// Initializes the value of vscale used for tuning the cost model. If
1450 /// vscale_range.min == vscale_range.max then return vscale_range.max, else
1451 /// return the value returned by the corresponding TTI method.
1452 void initializeVScaleForTuning() {
1453 const Function *Fn = TheLoop->getHeader()->getParent();
1454 if (Fn->hasFnAttribute(Attribute::VScaleRange)) {
1455 auto Attr = Fn->getFnAttribute(Attribute::VScaleRange);
1456 auto Min = Attr.getVScaleRangeMin();
1457 auto Max = Attr.getVScaleRangeMax();
1458 if (Max && Min == Max) {
1459 VScaleForTuning = Max;
1460 return;
1461 }
1462 }
1463
1464 VScaleForTuning = TTI.getVScaleForTuning();
1465 }
1466
1467 /// \return An upper bound for the vectorization factors for both
1468 /// fixed and scalable vectorization, where the minimum-known number of
1469 /// elements is a power-of-2 larger than zero. If scalable vectorization is
1470 /// disabled or unsupported, then the scalable part will be equal to
1471 /// ElementCount::getScalable(0).
1472 FixedScalableVFPair computeFeasibleMaxVF(unsigned MaxTripCount,
1473 ElementCount UserVF,
1474 bool FoldTailByMasking);
1475
1476 /// If \p VF > MaxTripcount, clamps it to the next lower VF that is <=
1477 /// MaxTripCount.
1478 ElementCount clampVFByMaxTripCount(ElementCount VF, unsigned MaxTripCount,
1479 bool FoldTailByMasking) const;
1480
1481 /// \return the maximized element count based on the targets vector
1482 /// registers and the loop trip-count, but limited to a maximum safe VF.
1483 /// This is a helper function of computeFeasibleMaxVF.
1484 ElementCount getMaximizedVFForTarget(unsigned MaxTripCount,
1485 unsigned SmallestType,
1486 unsigned WidestType,
1487 ElementCount MaxSafeVF,
1488 bool FoldTailByMasking);
1489
1490 /// Checks if scalable vectorization is supported and enabled. Caches the
1491 /// result to avoid repeated debug dumps for repeated queries.
1492 bool isScalableVectorizationAllowed();
1493
1494 /// \return the maximum legal scalable VF, based on the safe max number
1495 /// of elements.
1496 ElementCount getMaxLegalScalableVF(unsigned MaxSafeElements);
1497
1498 /// Calculate vectorization cost of memory instruction \p I.
1499 InstructionCost getMemoryInstructionCost(Instruction *I, ElementCount VF);
1500
1501 /// The cost computation for scalarized memory instruction.
1502 InstructionCost getMemInstScalarizationCost(Instruction *I, ElementCount VF);
1503
1504 /// The cost computation for interleaving group of memory instructions.
1505 InstructionCost getInterleaveGroupCost(Instruction *I, ElementCount VF);
1506
1507 /// The cost computation for Gather/Scatter instruction.
1508 InstructionCost getGatherScatterCost(Instruction *I, ElementCount VF);
1509
1510 /// The cost computation for widening instruction \p I with consecutive
1511 /// memory access.
1512 InstructionCost getConsecutiveMemOpCost(Instruction *I, ElementCount VF);
1513
1514 /// The cost calculation for Load/Store instruction \p I with uniform pointer -
1515 /// Load: scalar load + broadcast.
1516 /// Store: scalar store + (loop invariant value stored? 0 : extract of last
1517 /// element)
1518 InstructionCost getUniformMemOpCost(Instruction *I, ElementCount VF);
1519
1520 /// Estimate the overhead of scalarizing an instruction. This is a
1521 /// convenience wrapper for the type-based getScalarizationOverhead API.
1523 ElementCount VF) const;
1524
1525 /// Returns true if an artificially high cost for emulated masked memrefs
1526 /// should be used.
1527 bool useEmulatedMaskMemRefHack(Instruction *I, ElementCount VF);
1528
1529 /// Map of scalar integer values to the smallest bitwidth they can be legally
1530 /// represented as. The vector equivalents of these values should be truncated
1531 /// to this type.
1532 MapVector<Instruction *, uint64_t> MinBWs;
1533
1534 /// A type representing the costs for instructions if they were to be
1535 /// scalarized rather than vectorized. The entries are Instruction-Cost
1536 /// pairs.
1537 using ScalarCostsTy = MapVector<Instruction *, InstructionCost>;
1538
1539 /// A set containing all BasicBlocks that are known to present after
1540 /// vectorization as a predicated block.
1541 DenseMap<ElementCount, SmallPtrSet<BasicBlock *, 4>>
1542 PredicatedBBsAfterVectorization;
1543
1544 /// Records whether it is allowed to have the original scalar loop execute at
1545 /// least once. This may be needed as a fallback loop in case runtime
1546 /// aliasing/dependence checks fail, or to handle the tail/remainder
1547 /// iterations when the trip count is unknown or doesn't divide by the VF,
1548 /// or as a peel-loop to handle gaps in interleave-groups.
1549 /// Under optsize and when the trip count is very small we don't allow any
1550 /// iterations to execute in the scalar loop.
1551 ScalarEpilogueLowering ScalarEpilogueStatus = CM_ScalarEpilogueAllowed;
1552
1553 /// Control finally chosen tail folding style. The first element is used if
1554 /// the IV update may overflow, the second element - if it does not.
1555 std::optional<std::pair<TailFoldingStyle, TailFoldingStyle>>
1556 ChosenTailFoldingStyle;
1557
1558 /// true if scalable vectorization is supported and enabled.
1559 std::optional<bool> IsScalableVectorizationAllowed;
1560
1561 /// Maximum safe number of elements to be processed per vector iteration,
1562 /// which do not prevent store-load forwarding and are safe with regard to the
1563 /// memory dependencies. Required for EVL-based veectorization, where this
1564 /// value is used as the upper bound of the safe AVL.
1565 std::optional<unsigned> MaxSafeElements;
1566
1567 /// A map holding scalar costs for different vectorization factors. The
1568 /// presence of a cost for an instruction in the mapping indicates that the
1569 /// instruction will be scalarized when vectorizing with the associated
1570 /// vectorization factor. The entries are VF-ScalarCostTy pairs.
1571 MapVector<ElementCount, ScalarCostsTy> InstsToScalarize;
1572
1573 /// Holds the instructions known to be uniform after vectorization.
1574 /// The data is collected per VF.
1575 DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> Uniforms;
1576
1577 /// Holds the instructions known to be scalar after vectorization.
1578 /// The data is collected per VF.
1579 DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> Scalars;
1580
1581 /// Holds the instructions (address computations) that are forced to be
1582 /// scalarized.
1583 DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> ForcedScalars;
1584
1585 /// PHINodes of the reductions that should be expanded in-loop.
1586 SmallPtrSet<PHINode *, 4> InLoopReductions;
1587
1588 /// A Map of inloop reduction operations and their immediate chain operand.
1589 /// FIXME: This can be removed once reductions can be costed correctly in
1590 /// VPlan. This was added to allow quick lookup of the inloop operations.
1591 DenseMap<Instruction *, Instruction *> InLoopReductionImmediateChains;
1592
1593 /// Returns the expected difference in cost from scalarizing the expression
1594 /// feeding a predicated instruction \p PredInst. The instructions to
1595 /// scalarize and their scalar costs are collected in \p ScalarCosts. A
1596 /// non-negative return value implies the expression will be scalarized.
1597 /// Currently, only single-use chains are considered for scalarization.
1598 InstructionCost computePredInstDiscount(Instruction *PredInst,
1599 ScalarCostsTy &ScalarCosts,
1600 ElementCount VF);
1601
1602 /// Collect the instructions that are uniform after vectorization. An
1603 /// instruction is uniform if we represent it with a single scalar value in
1604 /// the vectorized loop corresponding to each vector iteration. Examples of
1605 /// uniform instructions include pointer operands of consecutive or
1606 /// interleaved memory accesses. Note that although uniformity implies an
1607 /// instruction will be scalar, the reverse is not true. In general, a
1608 /// scalarized instruction will be represented by VF scalar values in the
1609 /// vectorized loop, each corresponding to an iteration of the original
1610 /// scalar loop.
1611 void collectLoopUniforms(ElementCount VF);
1612
1613 /// Collect the instructions that are scalar after vectorization. An
1614 /// instruction is scalar if it is known to be uniform or will be scalarized
1615 /// during vectorization. collectLoopScalars should only add non-uniform nodes
1616 /// to the list if they are used by a load/store instruction that is marked as
1617 /// CM_Scalarize. Non-uniform scalarized instructions will be represented by
1618 /// VF values in the vectorized loop, each corresponding to an iteration of
1619 /// the original scalar loop.
1620 void collectLoopScalars(ElementCount VF);
1621
1622 /// Keeps cost model vectorization decision and cost for instructions.
1623 /// Right now it is used for memory instructions only.
1624 using DecisionList = DenseMap<std::pair<Instruction *, ElementCount>,
1625 std::pair<InstWidening, InstructionCost>>;
1626
1627 DecisionList WideningDecisions;
1628
1629 using CallDecisionList =
1630 DenseMap<std::pair<CallInst *, ElementCount>, CallWideningDecision>;
1631
1632 CallDecisionList CallWideningDecisions;
1633
1634 /// Returns true if \p V is expected to be vectorized and it needs to be
1635 /// extracted.
1636 bool needsExtract(Value *V, ElementCount VF) const {
1638 if (VF.isScalar() || !I || !TheLoop->contains(I) ||
1639 TheLoop->isLoopInvariant(I) ||
1640 getWideningDecision(I, VF) == CM_Scalarize ||
1641 (isa<CallInst>(I) &&
1642 getCallWideningDecision(cast<CallInst>(I), VF).Kind == CM_Scalarize))
1643 return false;
1644
1645 // Assume we can vectorize V (and hence we need extraction) if the
1646 // scalars are not computed yet. This can happen, because it is called
1647 // via getScalarizationOverhead from setCostBasedWideningDecision, before
1648 // the scalars are collected. That should be a safe assumption in most
1649 // cases, because we check if the operands have vectorizable types
1650 // beforehand in LoopVectorizationLegality.
1651 return !Scalars.contains(VF) || !isScalarAfterVectorization(I, VF);
1652 };
1653
1654 /// Returns a range containing only operands needing to be extracted.
1655 SmallVector<Value *, 4> filterExtractingOperands(Instruction::op_range Ops,
1656 ElementCount VF) const {
1657
1658 SmallPtrSet<const Value *, 4> UniqueOperands;
1660 for (Value *Op : Ops) {
1661 if (isa<Constant>(Op) || !UniqueOperands.insert(Op).second ||
1662 !needsExtract(Op, VF))
1663 continue;
1664 Res.push_back(Op);
1665 }
1666 return Res;
1667 }
1668
1669public:
1670 /// The loop that we evaluate.
1672
1673 /// Predicated scalar evolution analysis.
1675
1676 /// Loop Info analysis.
1678
1679 /// Vectorization legality.
1681
1682 /// Vector target information.
1684
1685 /// Target Library Info.
1687
1688 /// Demanded bits analysis.
1690
1691 /// Assumption cache.
1693
1694 /// Interface to emit optimization remarks.
1696
1698
1699 /// Loop Vectorize Hint.
1701
1702 /// The interleave access information contains groups of interleaved accesses
1703 /// with the same stride and close to each other.
1705
1706 /// Values to ignore in the cost model.
1708
1709 /// Values to ignore in the cost model when VF > 1.
1711
1712 /// All element types found in the loop.
1714
1715 /// The kind of cost that we are calculating
1717
1718 /// Whether this loop should be optimized for size based on function attribute
1719 /// or profile information.
1721
1722 /// The highest VF possible for this loop, without using MaxBandwidth.
1724};
1725} // end namespace llvm
1726
1727namespace {
1728/// Helper struct to manage generating runtime checks for vectorization.
1729///
1730/// The runtime checks are created up-front in temporary blocks to allow better
1731/// estimating the cost and un-linked from the existing IR. After deciding to
1732/// vectorize, the checks are moved back. If deciding not to vectorize, the
1733/// temporary blocks are completely removed.
1734class GeneratedRTChecks {
1735 /// Basic block which contains the generated SCEV checks, if any.
1736 BasicBlock *SCEVCheckBlock = nullptr;
1737
1738 /// The value representing the result of the generated SCEV checks. If it is
1739 /// nullptr no SCEV checks have been generated.
1740 Value *SCEVCheckCond = nullptr;
1741
1742 /// Basic block which contains the generated memory runtime checks, if any.
1743 BasicBlock *MemCheckBlock = nullptr;
1744
1745 /// The value representing the result of the generated memory runtime checks.
1746 /// If it is nullptr no memory runtime checks have been generated.
1747 Value *MemRuntimeCheckCond = nullptr;
1748
1749 DominatorTree *DT;
1750 LoopInfo *LI;
1752
1753 SCEVExpander SCEVExp;
1754 SCEVExpander MemCheckExp;
1755
1756 bool CostTooHigh = false;
1757
1758 Loop *OuterLoop = nullptr;
1759
1761
1762 /// The kind of cost that we are calculating
1764
1765public:
1766 GeneratedRTChecks(PredicatedScalarEvolution &PSE, DominatorTree *DT,
1769 : DT(DT), LI(LI), TTI(TTI),
1770 SCEVExp(*PSE.getSE(), DL, "scev.check", /*PreserveLCSSA=*/false),
1771 MemCheckExp(*PSE.getSE(), DL, "scev.check", /*PreserveLCSSA=*/false),
1772 PSE(PSE), CostKind(CostKind) {}
1773
1774 /// Generate runtime checks in SCEVCheckBlock and MemCheckBlock, so we can
1775 /// accurately estimate the cost of the runtime checks. The blocks are
1776 /// un-linked from the IR and are added back during vector code generation. If
1777 /// there is no vector code generation, the check blocks are removed
1778 /// completely.
1779 void create(Loop *L, const LoopAccessInfo &LAI,
1780 const SCEVPredicate &UnionPred, ElementCount VF, unsigned IC) {
1781
1782 // Hard cutoff to limit compile-time increase in case a very large number of
1783 // runtime checks needs to be generated.
1784 // TODO: Skip cutoff if the loop is guaranteed to execute, e.g. due to
1785 // profile info.
1786 CostTooHigh =
1788 if (CostTooHigh)
1789 return;
1790
1791 BasicBlock *LoopHeader = L->getHeader();
1792 BasicBlock *Preheader = L->getLoopPreheader();
1793
1794 // Use SplitBlock to create blocks for SCEV & memory runtime checks to
1795 // ensure the blocks are properly added to LoopInfo & DominatorTree. Those
1796 // may be used by SCEVExpander. The blocks will be un-linked from their
1797 // predecessors and removed from LI & DT at the end of the function.
1798 if (!UnionPred.isAlwaysTrue()) {
1799 SCEVCheckBlock = SplitBlock(Preheader, Preheader->getTerminator(), DT, LI,
1800 nullptr, "vector.scevcheck");
1801
1802 SCEVCheckCond = SCEVExp.expandCodeForPredicate(
1803 &UnionPred, SCEVCheckBlock->getTerminator());
1804 if (isa<Constant>(SCEVCheckCond)) {
1805 // Clean up directly after expanding the predicate to a constant, to
1806 // avoid further expansions re-using anything left over from SCEVExp.
1807 SCEVExpanderCleaner SCEVCleaner(SCEVExp);
1808 SCEVCleaner.cleanup();
1809 }
1810 }
1811
1812 const auto &RtPtrChecking = *LAI.getRuntimePointerChecking();
1813 if (RtPtrChecking.Need) {
1814 auto *Pred = SCEVCheckBlock ? SCEVCheckBlock : Preheader;
1815 MemCheckBlock = SplitBlock(Pred, Pred->getTerminator(), DT, LI, nullptr,
1816 "vector.memcheck");
1817
1818 auto DiffChecks = RtPtrChecking.getDiffChecks();
1819 if (DiffChecks) {
1820 Value *RuntimeVF = nullptr;
1821 MemRuntimeCheckCond = addDiffRuntimeChecks(
1822 MemCheckBlock->getTerminator(), *DiffChecks, MemCheckExp,
1823 [VF, &RuntimeVF](IRBuilderBase &B, unsigned Bits) {
1824 if (!RuntimeVF)
1825 RuntimeVF = getRuntimeVF(B, B.getIntNTy(Bits), VF);
1826 return RuntimeVF;
1827 },
1828 IC);
1829 } else {
1830 MemRuntimeCheckCond = addRuntimeChecks(
1831 MemCheckBlock->getTerminator(), L, RtPtrChecking.getChecks(),
1833 }
1834 assert(MemRuntimeCheckCond &&
1835 "no RT checks generated although RtPtrChecking "
1836 "claimed checks are required");
1837 }
1838
1839 SCEVExp.eraseDeadInstructions(SCEVCheckCond);
1840
1841 if (!MemCheckBlock && !SCEVCheckBlock)
1842 return;
1843
1844 // Unhook the temporary block with the checks, update various places
1845 // accordingly.
1846 if (SCEVCheckBlock)
1847 SCEVCheckBlock->replaceAllUsesWith(Preheader);
1848 if (MemCheckBlock)
1849 MemCheckBlock->replaceAllUsesWith(Preheader);
1850
1851 if (SCEVCheckBlock) {
1852 SCEVCheckBlock->getTerminator()->moveBefore(
1853 Preheader->getTerminator()->getIterator());
1854 auto *UI = new UnreachableInst(Preheader->getContext(), SCEVCheckBlock);
1855 UI->setDebugLoc(DebugLoc::getTemporary());
1856 Preheader->getTerminator()->eraseFromParent();
1857 }
1858 if (MemCheckBlock) {
1859 MemCheckBlock->getTerminator()->moveBefore(
1860 Preheader->getTerminator()->getIterator());
1861 auto *UI = new UnreachableInst(Preheader->getContext(), MemCheckBlock);
1862 UI->setDebugLoc(DebugLoc::getTemporary());
1863 Preheader->getTerminator()->eraseFromParent();
1864 }
1865
1866 DT->changeImmediateDominator(LoopHeader, Preheader);
1867 if (MemCheckBlock) {
1868 DT->eraseNode(MemCheckBlock);
1869 LI->removeBlock(MemCheckBlock);
1870 }
1871 if (SCEVCheckBlock) {
1872 DT->eraseNode(SCEVCheckBlock);
1873 LI->removeBlock(SCEVCheckBlock);
1874 }
1875
1876 // Outer loop is used as part of the later cost calculations.
1877 OuterLoop = L->getParentLoop();
1878 }
1879
1881 if (SCEVCheckBlock || MemCheckBlock)
1882 LLVM_DEBUG(dbgs() << "Calculating cost of runtime checks:\n");
1883
1884 if (CostTooHigh) {
1886 Cost.setInvalid();
1887 LLVM_DEBUG(dbgs() << " number of checks exceeded threshold\n");
1888 return Cost;
1889 }
1890
1891 InstructionCost RTCheckCost = 0;
1892 if (SCEVCheckBlock)
1893 for (Instruction &I : *SCEVCheckBlock) {
1894 if (SCEVCheckBlock->getTerminator() == &I)
1895 continue;
1897 LLVM_DEBUG(dbgs() << " " << C << " for " << I << "\n");
1898 RTCheckCost += C;
1899 }
1900 if (MemCheckBlock) {
1901 InstructionCost MemCheckCost = 0;
1902 for (Instruction &I : *MemCheckBlock) {
1903 if (MemCheckBlock->getTerminator() == &I)
1904 continue;
1906 LLVM_DEBUG(dbgs() << " " << C << " for " << I << "\n");
1907 MemCheckCost += C;
1908 }
1909
1910 // If the runtime memory checks are being created inside an outer loop
1911 // we should find out if these checks are outer loop invariant. If so,
1912 // the checks will likely be hoisted out and so the effective cost will
1913 // reduce according to the outer loop trip count.
1914 if (OuterLoop) {
1915 ScalarEvolution *SE = MemCheckExp.getSE();
1916 // TODO: If profitable, we could refine this further by analysing every
1917 // individual memory check, since there could be a mixture of loop
1918 // variant and invariant checks that mean the final condition is
1919 // variant.
1920 const SCEV *Cond = SE->getSCEV(MemRuntimeCheckCond);
1921 if (SE->isLoopInvariant(Cond, OuterLoop)) {
1922 // It seems reasonable to assume that we can reduce the effective
1923 // cost of the checks even when we know nothing about the trip
1924 // count. Assume that the outer loop executes at least twice.
1925 unsigned BestTripCount = 2;
1926
1927 // Get the best known TC estimate.
1928 if (auto EstimatedTC = getSmallBestKnownTC(
1929 PSE, OuterLoop, /* CanUseConstantMax = */ false))
1930 if (EstimatedTC->isFixed())
1931 BestTripCount = EstimatedTC->getFixedValue();
1932
1933 InstructionCost NewMemCheckCost = MemCheckCost / BestTripCount;
1934
1935 // Let's ensure the cost is always at least 1.
1936 NewMemCheckCost = std::max(NewMemCheckCost.getValue(),
1937 (InstructionCost::CostType)1);
1938
1939 if (BestTripCount > 1)
1941 << "We expect runtime memory checks to be hoisted "
1942 << "out of the outer loop. Cost reduced from "
1943 << MemCheckCost << " to " << NewMemCheckCost << '\n');
1944
1945 MemCheckCost = NewMemCheckCost;
1946 }
1947 }
1948
1949 RTCheckCost += MemCheckCost;
1950 }
1951
1952 if (SCEVCheckBlock || MemCheckBlock)
1953 LLVM_DEBUG(dbgs() << "Total cost of runtime checks: " << RTCheckCost
1954 << "\n");
1955
1956 return RTCheckCost;
1957 }
1958
1959 /// Remove the created SCEV & memory runtime check blocks & instructions, if
1960 /// unused.
1961 ~GeneratedRTChecks() {
1962 SCEVExpanderCleaner SCEVCleaner(SCEVExp);
1963 SCEVExpanderCleaner MemCheckCleaner(MemCheckExp);
1964 bool SCEVChecksUsed = !SCEVCheckBlock || !pred_empty(SCEVCheckBlock);
1965 bool MemChecksUsed = !MemCheckBlock || !pred_empty(MemCheckBlock);
1966 if (SCEVChecksUsed)
1967 SCEVCleaner.markResultUsed();
1968
1969 if (MemChecksUsed) {
1970 MemCheckCleaner.markResultUsed();
1971 } else {
1972 auto &SE = *MemCheckExp.getSE();
1973 // Memory runtime check generation creates compares that use expanded
1974 // values. Remove them before running the SCEVExpanderCleaners.
1975 for (auto &I : make_early_inc_range(reverse(*MemCheckBlock))) {
1976 if (MemCheckExp.isInsertedInstruction(&I))
1977 continue;
1978 SE.forgetValue(&I);
1979 I.eraseFromParent();
1980 }
1981 }
1982 MemCheckCleaner.cleanup();
1983 SCEVCleaner.cleanup();
1984
1985 if (!SCEVChecksUsed)
1986 SCEVCheckBlock->eraseFromParent();
1987 if (!MemChecksUsed)
1988 MemCheckBlock->eraseFromParent();
1989 }
1990
1991 /// Retrieves the SCEVCheckCond and SCEVCheckBlock that were generated as IR
1992 /// outside VPlan.
1993 std::pair<Value *, BasicBlock *> getSCEVChecks() const {
1994 using namespace llvm::PatternMatch;
1995 if (!SCEVCheckCond || match(SCEVCheckCond, m_ZeroInt()))
1996 return {nullptr, nullptr};
1997
1998 return {SCEVCheckCond, SCEVCheckBlock};
1999 }
2000
2001 /// Retrieves the MemCheckCond and MemCheckBlock that were generated as IR
2002 /// outside VPlan.
2003 std::pair<Value *, BasicBlock *> getMemRuntimeChecks() const {
2004 using namespace llvm::PatternMatch;
2005 if (MemRuntimeCheckCond && match(MemRuntimeCheckCond, m_ZeroInt()))
2006 return {nullptr, nullptr};
2007 return {MemRuntimeCheckCond, MemCheckBlock};
2008 }
2009
2010 /// Return true if any runtime checks have been added
2011 bool hasChecks() const {
2012 return getSCEVChecks().first || getMemRuntimeChecks().first;
2013 }
2014};
2015} // namespace
2016
2022
2027
2028// Return true if \p OuterLp is an outer loop annotated with hints for explicit
2029// vectorization. The loop needs to be annotated with #pragma omp simd
2030// simdlen(#) or #pragma clang vectorize(enable) vectorize_width(#). If the
2031// vector length information is not provided, vectorization is not considered
2032// explicit. Interleave hints are not allowed either. These limitations will be
2033// relaxed in the future.
2034// Please, note that we are currently forced to abuse the pragma 'clang
2035// vectorize' semantics. This pragma provides *auto-vectorization hints*
2036// (i.e., LV must check that vectorization is legal) whereas pragma 'omp simd'
2037// provides *explicit vectorization hints* (LV can bypass legal checks and
2038// assume that vectorization is legal). However, both hints are implemented
2039// using the same metadata (llvm.loop.vectorize, processed by
2040// LoopVectorizeHints). This will be fixed in the future when the native IR
2041// representation for pragma 'omp simd' is introduced.
2042static bool isExplicitVecOuterLoop(Loop *OuterLp,
2044 assert(!OuterLp->isInnermost() && "This is not an outer loop");
2045 LoopVectorizeHints Hints(OuterLp, true /*DisableInterleaving*/, *ORE);
2046
2047 // Only outer loops with an explicit vectorization hint are supported.
2048 // Unannotated outer loops are ignored.
2050 return false;
2051
2052 Function *Fn = OuterLp->getHeader()->getParent();
2053 if (!Hints.allowVectorization(Fn, OuterLp,
2054 true /*VectorizeOnlyWhenForced*/)) {
2055 LLVM_DEBUG(dbgs() << "LV: Loop hints prevent outer loop vectorization.\n");
2056 return false;
2057 }
2058
2059 if (Hints.getInterleave() > 1) {
2060 // TODO: Interleave support is future work.
2061 LLVM_DEBUG(dbgs() << "LV: Not vectorizing: Interleave is not supported for "
2062 "outer loops.\n");
2063 Hints.emitRemarkWithHints();
2064 return false;
2065 }
2066
2067 return true;
2068}
2069
2073 // Collect inner loops and outer loops without irreducible control flow. For
2074 // now, only collect outer loops that have explicit vectorization hints. If we
2075 // are stress testing the VPlan H-CFG construction, we collect the outermost
2076 // loop of every loop nest.
2077 if (L.isInnermost() || VPlanBuildStressTest ||
2079 LoopBlocksRPO RPOT(&L);
2080 RPOT.perform(LI);
2082 V.push_back(&L);
2083 // TODO: Collect inner loops inside marked outer loops in case
2084 // vectorization fails for the outer loop. Do not invoke
2085 // 'containsIrreducibleCFG' again for inner loops when the outer loop is
2086 // already known to be reducible. We can use an inherited attribute for
2087 // that.
2088 return;
2089 }
2090 }
2091 for (Loop *InnerL : L)
2092 collectSupportedLoops(*InnerL, LI, ORE, V);
2093}
2094
2095//===----------------------------------------------------------------------===//
2096// Implementation of LoopVectorizationLegality, InnerLoopVectorizer and
2097// LoopVectorizationCostModel and LoopVectorizationPlanner.
2098//===----------------------------------------------------------------------===//
2099
2100/// Compute the transformed value of Index at offset StartValue using step
2101/// StepValue.
2102/// For integer induction, returns StartValue + Index * StepValue.
2103/// For pointer induction, returns StartValue[Index * StepValue].
2104/// FIXME: The newly created binary instructions should contain nsw/nuw
2105/// flags, which can be found from the original scalar operations.
2106static Value *
2108 Value *Step,
2110 const BinaryOperator *InductionBinOp) {
2111 using namespace llvm::PatternMatch;
2112 Type *StepTy = Step->getType();
2113 Value *CastedIndex = StepTy->isIntegerTy()
2114 ? B.CreateSExtOrTrunc(Index, StepTy)
2115 : B.CreateCast(Instruction::SIToFP, Index, StepTy);
2116 if (CastedIndex != Index) {
2117 CastedIndex->setName(CastedIndex->getName() + ".cast");
2118 Index = CastedIndex;
2119 }
2120
2121 // Note: the IR at this point is broken. We cannot use SE to create any new
2122 // SCEV and then expand it, hoping that SCEV's simplification will give us
2123 // a more optimal code. Unfortunately, attempt of doing so on invalid IR may
2124 // lead to various SCEV crashes. So all we can do is to use builder and rely
2125 // on InstCombine for future simplifications. Here we handle some trivial
2126 // cases only.
2127 auto CreateAdd = [&B](Value *X, Value *Y) {
2128 assert(X->getType() == Y->getType() && "Types don't match!");
2129 if (match(X, m_ZeroInt()))
2130 return Y;
2131 if (match(Y, m_ZeroInt()))
2132 return X;
2133 return B.CreateAdd(X, Y);
2134 };
2135
2136 // We allow X to be a vector type, in which case Y will potentially be
2137 // splatted into a vector with the same element count.
2138 auto CreateMul = [&B](Value *X, Value *Y) {
2139 assert(X->getType()->getScalarType() == Y->getType() &&
2140 "Types don't match!");
2141 if (match(X, m_One()))
2142 return Y;
2143 if (match(Y, m_One()))
2144 return X;
2145 VectorType *XVTy = dyn_cast<VectorType>(X->getType());
2146 if (XVTy && !isa<VectorType>(Y->getType()))
2147 Y = B.CreateVectorSplat(XVTy->getElementCount(), Y);
2148 return B.CreateMul(X, Y);
2149 };
2150
2151 switch (InductionKind) {
2153 assert(!isa<VectorType>(Index->getType()) &&
2154 "Vector indices not supported for integer inductions yet");
2155 assert(Index->getType() == StartValue->getType() &&
2156 "Index type does not match StartValue type");
2157 if (isa<ConstantInt>(Step) && cast<ConstantInt>(Step)->isMinusOne())
2158 return B.CreateSub(StartValue, Index);
2159 auto *Offset = CreateMul(Index, Step);
2160 return CreateAdd(StartValue, Offset);
2161 }
2163 return B.CreatePtrAdd(StartValue, CreateMul(Index, Step));
2165 assert(!isa<VectorType>(Index->getType()) &&
2166 "Vector indices not supported for FP inductions yet");
2167 assert(Step->getType()->isFloatingPointTy() && "Expected FP Step value");
2168 assert(InductionBinOp &&
2169 (InductionBinOp->getOpcode() == Instruction::FAdd ||
2170 InductionBinOp->getOpcode() == Instruction::FSub) &&
2171 "Original bin op should be defined for FP induction");
2172
2173 Value *MulExp = B.CreateFMul(Step, Index);
2174 return B.CreateBinOp(InductionBinOp->getOpcode(), StartValue, MulExp,
2175 "induction");
2176 }
2178 return nullptr;
2179 }
2180 llvm_unreachable("invalid enum");
2181}
2182
2183static std::optional<unsigned> getMaxVScale(const Function &F,
2184 const TargetTransformInfo &TTI) {
2185 if (std::optional<unsigned> MaxVScale = TTI.getMaxVScale())
2186 return MaxVScale;
2187
2188 if (F.hasFnAttribute(Attribute::VScaleRange))
2189 return F.getFnAttribute(Attribute::VScaleRange).getVScaleRangeMax();
2190
2191 return std::nullopt;
2192}
2193
2194/// For the given VF and UF and maximum trip count computed for the loop, return
2195/// whether the induction variable might overflow in the vectorized loop. If not,
2196/// then we know a runtime overflow check always evaluates to false and can be
2197/// removed.
2199 const LoopVectorizationCostModel *Cost,
2200 ElementCount VF, std::optional<unsigned> UF = std::nullopt) {
2201 // Always be conservative if we don't know the exact unroll factor.
2202 unsigned MaxUF = UF ? *UF : Cost->TTI.getMaxInterleaveFactor(VF);
2203
2204 IntegerType *IdxTy = Cost->Legal->getWidestInductionType();
2205 APInt MaxUIntTripCount = IdxTy->getMask();
2206
2207 // We know the runtime overflow check is known false iff the (max) trip-count
2208 // is known and (max) trip-count + (VF * UF) does not overflow in the type of
2209 // the vector loop induction variable.
2210 if (unsigned TC = Cost->PSE.getSmallConstantMaxTripCount()) {
2211 uint64_t MaxVF = VF.getKnownMinValue();
2212 if (VF.isScalable()) {
2213 std::optional<unsigned> MaxVScale =
2214 getMaxVScale(*Cost->TheFunction, Cost->TTI);
2215 if (!MaxVScale)
2216 return false;
2217 MaxVF *= *MaxVScale;
2218 }
2219
2220 return (MaxUIntTripCount - TC).ugt(MaxVF * MaxUF);
2221 }
2222
2223 return false;
2224}
2225
2226// Return whether we allow using masked interleave-groups (for dealing with
2227// strided loads/stores that reside in predicated blocks, or for dealing
2228// with gaps).
2230 // If an override option has been passed in for interleaved accesses, use it.
2231 if (EnableMaskedInterleavedMemAccesses.getNumOccurrences() > 0)
2233
2234 return TTI.enableMaskedInterleavedAccessVectorization();
2235}
2236
2238 BasicBlock *CheckIRBB) {
2239 // Note: The block with the minimum trip-count check is already connected
2240 // during earlier VPlan construction.
2241 VPBlockBase *ScalarPH = Plan.getScalarPreheader();
2242 VPBlockBase *PreVectorPH = VectorPHVPBB->getSinglePredecessor();
2243 assert(PreVectorPH->getNumSuccessors() == 2 && "Expected 2 successors");
2244 assert(PreVectorPH->getSuccessors()[0] == ScalarPH && "Unexpected successor");
2245 VPIRBasicBlock *CheckVPIRBB = Plan.createVPIRBasicBlock(CheckIRBB);
2246 VPBlockUtils::insertOnEdge(PreVectorPH, VectorPHVPBB, CheckVPIRBB);
2247 PreVectorPH = CheckVPIRBB;
2248 VPBlockUtils::connectBlocks(PreVectorPH, ScalarPH);
2249 PreVectorPH->swapSuccessors();
2250
2251 // We just connected a new block to the scalar preheader. Update all
2252 // VPPhis by adding an incoming value for it, replicating the last value.
2253 unsigned NumPredecessors = ScalarPH->getNumPredecessors();
2254 for (VPRecipeBase &R : cast<VPBasicBlock>(ScalarPH)->phis()) {
2255 assert(isa<VPPhi>(&R) && "Phi expected to be VPPhi");
2256 assert(cast<VPPhi>(&R)->getNumIncoming() == NumPredecessors - 1 &&
2257 "must have incoming values for all operands");
2258 R.addOperand(R.getOperand(NumPredecessors - 2));
2259 }
2260}
2261
2263 BasicBlock *VectorPH, ElementCount VF, unsigned UF) const {
2264 // Generate code to check if the loop's trip count is less than VF * UF, or
2265 // equal to it in case a scalar epilogue is required; this implies that the
2266 // vector trip count is zero. This check also covers the case where adding one
2267 // to the backedge-taken count overflowed leading to an incorrect trip count
2268 // of zero. In this case we will also jump to the scalar loop.
2269 auto P = Cost->requiresScalarEpilogue(VF.isVector()) ? ICmpInst::ICMP_ULE
2271
2272 // Reuse existing vector loop preheader for TC checks.
2273 // Note that new preheader block is generated for vector loop.
2274 BasicBlock *const TCCheckBlock = VectorPH;
2276 TCCheckBlock->getContext(),
2277 InstSimplifyFolder(TCCheckBlock->getDataLayout()));
2278 Builder.SetInsertPoint(TCCheckBlock->getTerminator());
2279
2280 // If tail is to be folded, vector loop takes care of all iterations.
2282 Type *CountTy = Count->getType();
2283 Value *CheckMinIters = Builder.getFalse();
2284 auto CreateStep = [&]() -> Value * {
2285 // Create step with max(MinProTripCount, UF * VF).
2286 if (UF * VF.getKnownMinValue() >= MinProfitableTripCount.getKnownMinValue())
2287 return createStepForVF(Builder, CountTy, VF, UF);
2288
2289 Value *MinProfTC =
2290 Builder.CreateElementCount(CountTy, MinProfitableTripCount);
2291 if (!VF.isScalable())
2292 return MinProfTC;
2293 return Builder.CreateBinaryIntrinsic(
2294 Intrinsic::umax, MinProfTC, createStepForVF(Builder, CountTy, VF, UF));
2295 };
2296
2297 TailFoldingStyle Style = Cost->getTailFoldingStyle();
2298 if (Style == TailFoldingStyle::None) {
2299 Value *Step = CreateStep();
2300 ScalarEvolution &SE = *PSE.getSE();
2301 // TODO: Emit unconditional branch to vector preheader instead of
2302 // conditional branch with known condition.
2303 const SCEV *TripCountSCEV = SE.applyLoopGuards(SE.getSCEV(Count), OrigLoop);
2304 // Check if the trip count is < the step.
2305 if (SE.isKnownPredicate(P, TripCountSCEV, SE.getSCEV(Step))) {
2306 // TODO: Ensure step is at most the trip count when determining max VF and
2307 // UF, w/o tail folding.
2308 CheckMinIters = Builder.getTrue();
2310 TripCountSCEV, SE.getSCEV(Step))) {
2311 // Generate the minimum iteration check only if we cannot prove the
2312 // check is known to be true, or known to be false.
2313 CheckMinIters = Builder.CreateICmp(P, Count, Step, "min.iters.check");
2314 } // else step known to be < trip count, use CheckMinIters preset to false.
2315 } else if (VF.isScalable() && !TTI->isVScaleKnownToBeAPowerOfTwo() &&
2318 // vscale is not necessarily a power-of-2, which means we cannot guarantee
2319 // an overflow to zero when updating induction variables and so an
2320 // additional overflow check is required before entering the vector loop.
2321
2322 // Get the maximum unsigned value for the type.
2323 Value *MaxUIntTripCount =
2324 ConstantInt::get(CountTy, cast<IntegerType>(CountTy)->getMask());
2325 Value *LHS = Builder.CreateSub(MaxUIntTripCount, Count);
2326
2327 // Don't execute the vector loop if (UMax - n) < (VF * UF).
2328 CheckMinIters = Builder.CreateICmp(ICmpInst::ICMP_ULT, LHS, CreateStep());
2329 }
2330 return CheckMinIters;
2331}
2332
2333/// Replace \p VPBB with a VPIRBasicBlock wrapping \p IRBB. All recipes from \p
2334/// VPBB are moved to the end of the newly created VPIRBasicBlock. All
2335/// predecessors and successors of VPBB, if any, are rewired to the new
2336/// VPIRBasicBlock. If \p VPBB may be unreachable, \p Plan must be passed.
2338 BasicBlock *IRBB,
2339 VPlan *Plan = nullptr) {
2340 if (!Plan)
2341 Plan = VPBB->getPlan();
2342 VPIRBasicBlock *IRVPBB = Plan->createVPIRBasicBlock(IRBB);
2343 auto IP = IRVPBB->begin();
2344 for (auto &R : make_early_inc_range(VPBB->phis()))
2345 R.moveBefore(*IRVPBB, IP);
2346
2347 for (auto &R :
2349 R.moveBefore(*IRVPBB, IRVPBB->end());
2350
2351 VPBlockUtils::reassociateBlocks(VPBB, IRVPBB);
2352 // VPBB is now dead and will be cleaned up when the plan gets destroyed.
2353 return IRVPBB;
2354}
2355
2357 BasicBlock *VectorPH = OrigLoop->getLoopPreheader();
2358 assert(VectorPH && "Invalid loop structure");
2359 assert((OrigLoop->getUniqueLatchExitBlock() ||
2360 Cost->requiresScalarEpilogue(VF.isVector())) &&
2361 "loops not exiting via the latch without required epilogue?");
2362
2363 // NOTE: The Plan's scalar preheader VPBB isn't replaced with a VPIRBasicBlock
2364 // wrapping the newly created scalar preheader here at the moment, because the
2365 // Plan's scalar preheader may be unreachable at this point. Instead it is
2366 // replaced in executePlan.
2367 return SplitBlock(VectorPH, VectorPH->getTerminator(), DT, LI, nullptr,
2368 Twine(Prefix) + "scalar.ph");
2369}
2370
2371/// Return the expanded step for \p ID using \p ExpandedSCEVs to look up SCEV
2372/// expansion results.
2374 const SCEV2ValueTy &ExpandedSCEVs) {
2375 const SCEV *Step = ID.getStep();
2376 if (auto *C = dyn_cast<SCEVConstant>(Step))
2377 return C->getValue();
2378 if (auto *U = dyn_cast<SCEVUnknown>(Step))
2379 return U->getValue();
2380 Value *V = ExpandedSCEVs.lookup(Step);
2381 assert(V && "SCEV must be expanded at this point");
2382 return V;
2383}
2384
2385/// Knowing that loop \p L executes a single vector iteration, add instructions
2386/// that will get simplified and thus should not have any cost to \p
2387/// InstsToIgnore.
2390 SmallPtrSetImpl<Instruction *> &InstsToIgnore) {
2391 auto *Cmp = L->getLatchCmpInst();
2392 if (Cmp)
2393 InstsToIgnore.insert(Cmp);
2394 for (const auto &KV : IL) {
2395 // Extract the key by hand so that it can be used in the lambda below. Note
2396 // that captured structured bindings are a C++20 extension.
2397 const PHINode *IV = KV.first;
2398
2399 // Get next iteration value of the induction variable.
2400 Instruction *IVInst =
2401 cast<Instruction>(IV->getIncomingValueForBlock(L->getLoopLatch()));
2402 if (all_of(IVInst->users(),
2403 [&](const User *U) { return U == IV || U == Cmp; }))
2404 InstsToIgnore.insert(IVInst);
2405 }
2406}
2407
2409 // Create a new IR basic block for the scalar preheader.
2410 BasicBlock *ScalarPH = createScalarPreheader("");
2411 return ScalarPH->getSinglePredecessor();
2412}
2413
2414namespace {
2415
2416struct CSEDenseMapInfo {
2417 static bool canHandle(const Instruction *I) {
2420 }
2421
2422 static inline Instruction *getEmptyKey() {
2424 }
2425
2426 static inline Instruction *getTombstoneKey() {
2427 return DenseMapInfo<Instruction *>::getTombstoneKey();
2428 }
2429
2430 static unsigned getHashValue(const Instruction *I) {
2431 assert(canHandle(I) && "Unknown instruction!");
2432 return hash_combine(I->getOpcode(),
2433 hash_combine_range(I->operand_values()));
2434 }
2435
2436 static bool isEqual(const Instruction *LHS, const Instruction *RHS) {
2437 if (LHS == getEmptyKey() || RHS == getEmptyKey() ||
2438 LHS == getTombstoneKey() || RHS == getTombstoneKey())
2439 return LHS == RHS;
2440 return LHS->isIdenticalTo(RHS);
2441 }
2442};
2443
2444} // end anonymous namespace
2445
2446/// FIXME: This legacy common-subexpression-elimination routine is scheduled for
2447/// removal, in favor of the VPlan-based one.
2448static void legacyCSE(BasicBlock *BB) {
2449 // Perform simple cse.
2451 for (Instruction &In : llvm::make_early_inc_range(*BB)) {
2452 if (!CSEDenseMapInfo::canHandle(&In))
2453 continue;
2454
2455 // Check if we can replace this instruction with any of the
2456 // visited instructions.
2457 if (Instruction *V = CSEMap.lookup(&In)) {
2458 In.replaceAllUsesWith(V);
2459 In.eraseFromParent();
2460 continue;
2461 }
2462
2463 CSEMap[&In] = &In;
2464 }
2465}
2466
2467/// This function attempts to return a value that represents the ElementCount
2468/// at runtime. For fixed-width VFs we know this precisely at compile
2469/// time, but for scalable VFs we calculate it based on an estimate of the
2470/// vscale value.
2472 std::optional<unsigned> VScale) {
2473 unsigned EstimatedVF = VF.getKnownMinValue();
2474 if (VF.isScalable())
2475 if (VScale)
2476 EstimatedVF *= *VScale;
2477 assert(EstimatedVF >= 1 && "Estimated VF shouldn't be less than 1");
2478 return EstimatedVF;
2479}
2480
2483 ElementCount VF) const {
2484 // We only need to calculate a cost if the VF is scalar; for actual vectors
2485 // we should already have a pre-calculated cost at each VF.
2486 if (!VF.isScalar())
2487 return getCallWideningDecision(CI, VF).Cost;
2488
2489 Type *RetTy = CI->getType();
2491 if (auto RedCost = getReductionPatternCost(CI, VF, RetTy))
2492 return *RedCost;
2493
2495 for (auto &ArgOp : CI->args())
2496 Tys.push_back(ArgOp->getType());
2497
2498 InstructionCost ScalarCallCost =
2499 TTI.getCallInstrCost(CI->getCalledFunction(), RetTy, Tys, CostKind);
2500
2501 // If this is an intrinsic we may have a lower cost for it.
2504 return std::min(ScalarCallCost, IntrinsicCost);
2505 }
2506 return ScalarCallCost;
2507}
2508
2510 if (VF.isScalar() || !canVectorizeTy(Ty))
2511 return Ty;
2512 return toVectorizedTy(Ty, VF);
2513}
2514
2517 ElementCount VF) const {
2519 assert(ID && "Expected intrinsic call!");
2520 Type *RetTy = maybeVectorizeType(CI->getType(), VF);
2521 FastMathFlags FMF;
2522 if (auto *FPMO = dyn_cast<FPMathOperator>(CI))
2523 FMF = FPMO->getFastMathFlags();
2524
2527 SmallVector<Type *> ParamTys;
2528 std::transform(FTy->param_begin(), FTy->param_end(),
2529 std::back_inserter(ParamTys),
2530 [&](Type *Ty) { return maybeVectorizeType(Ty, VF); });
2531
2532 IntrinsicCostAttributes CostAttrs(ID, RetTy, Arguments, ParamTys, FMF,
2535 return TTI.getIntrinsicInstrCost(CostAttrs, CostKind);
2536}
2537
2539 // Fix widened non-induction PHIs by setting up the PHI operands.
2540 fixNonInductionPHIs(State);
2541
2542 // Don't apply optimizations below when no (vector) loop remains, as they all
2543 // require one at the moment.
2544 VPBasicBlock *HeaderVPBB =
2545 vputils::getFirstLoopHeader(*State.Plan, State.VPDT);
2546 if (!HeaderVPBB)
2547 return;
2548
2549 BasicBlock *HeaderBB = State.CFG.VPBB2IRBB[HeaderVPBB];
2550
2551 // Remove redundant induction instructions.
2552 legacyCSE(HeaderBB);
2553}
2554
2556 auto Iter = vp_depth_first_shallow(Plan.getEntry());
2558 for (VPRecipeBase &P : VPBB->phis()) {
2560 if (!VPPhi)
2561 continue;
2562 PHINode *NewPhi = cast<PHINode>(State.get(VPPhi));
2563 // Make sure the builder has a valid insert point.
2564 Builder.SetInsertPoint(NewPhi);
2565 for (const auto &[Inc, VPBB] : VPPhi->incoming_values_and_blocks())
2566 NewPhi->addIncoming(State.get(Inc), State.CFG.VPBB2IRBB[VPBB]);
2567 }
2568 }
2569}
2570
2571void LoopVectorizationCostModel::collectLoopScalars(ElementCount VF) {
2572 // We should not collect Scalars more than once per VF. Right now, this
2573 // function is called from collectUniformsAndScalars(), which already does
2574 // this check. Collecting Scalars for VF=1 does not make any sense.
2575 assert(VF.isVector() && !Scalars.contains(VF) &&
2576 "This function should not be visited twice for the same VF");
2577
2578 // This avoids any chances of creating a REPLICATE recipe during planning
2579 // since that would result in generation of scalarized code during execution,
2580 // which is not supported for scalable vectors.
2581 if (VF.isScalable()) {
2582 Scalars[VF].insert_range(Uniforms[VF]);
2583 return;
2584 }
2585
2587
2588 // These sets are used to seed the analysis with pointers used by memory
2589 // accesses that will remain scalar.
2591 SmallPtrSet<Instruction *, 8> PossibleNonScalarPtrs;
2592 auto *Latch = TheLoop->getLoopLatch();
2593
2594 // A helper that returns true if the use of Ptr by MemAccess will be scalar.
2595 // The pointer operands of loads and stores will be scalar as long as the
2596 // memory access is not a gather or scatter operation. The value operand of a
2597 // store will remain scalar if the store is scalarized.
2598 auto IsScalarUse = [&](Instruction *MemAccess, Value *Ptr) {
2599 InstWidening WideningDecision = getWideningDecision(MemAccess, VF);
2600 assert(WideningDecision != CM_Unknown &&
2601 "Widening decision should be ready at this moment");
2602 if (auto *Store = dyn_cast<StoreInst>(MemAccess))
2603 if (Ptr == Store->getValueOperand())
2604 return WideningDecision == CM_Scalarize;
2605 assert(Ptr == getLoadStorePointerOperand(MemAccess) &&
2606 "Ptr is neither a value or pointer operand");
2607 return WideningDecision != CM_GatherScatter;
2608 };
2609
2610 // A helper that returns true if the given value is a getelementptr
2611 // instruction contained in the loop.
2612 auto IsLoopVaryingGEP = [&](Value *V) {
2613 return isa<GetElementPtrInst>(V) && !TheLoop->isLoopInvariant(V);
2614 };
2615
2616 // A helper that evaluates a memory access's use of a pointer. If the use will
2617 // be a scalar use and the pointer is only used by memory accesses, we place
2618 // the pointer in ScalarPtrs. Otherwise, the pointer is placed in
2619 // PossibleNonScalarPtrs.
2620 auto EvaluatePtrUse = [&](Instruction *MemAccess, Value *Ptr) {
2621 // We only care about bitcast and getelementptr instructions contained in
2622 // the loop.
2623 if (!IsLoopVaryingGEP(Ptr))
2624 return;
2625
2626 // If the pointer has already been identified as scalar (e.g., if it was
2627 // also identified as uniform), there's nothing to do.
2628 auto *I = cast<Instruction>(Ptr);
2629 if (Worklist.count(I))
2630 return;
2631
2632 // If the use of the pointer will be a scalar use, and all users of the
2633 // pointer are memory accesses, place the pointer in ScalarPtrs. Otherwise,
2634 // place the pointer in PossibleNonScalarPtrs.
2635 if (IsScalarUse(MemAccess, Ptr) &&
2637 ScalarPtrs.insert(I);
2638 else
2639 PossibleNonScalarPtrs.insert(I);
2640 };
2641
2642 // We seed the scalars analysis with three classes of instructions: (1)
2643 // instructions marked uniform-after-vectorization and (2) bitcast,
2644 // getelementptr and (pointer) phi instructions used by memory accesses
2645 // requiring a scalar use.
2646 //
2647 // (1) Add to the worklist all instructions that have been identified as
2648 // uniform-after-vectorization.
2649 Worklist.insert_range(Uniforms[VF]);
2650
2651 // (2) Add to the worklist all bitcast and getelementptr instructions used by
2652 // memory accesses requiring a scalar use. The pointer operands of loads and
2653 // stores will be scalar unless the operation is a gather or scatter.
2654 // The value operand of a store will remain scalar if the store is scalarized.
2655 for (auto *BB : TheLoop->blocks())
2656 for (auto &I : *BB) {
2657 if (auto *Load = dyn_cast<LoadInst>(&I)) {
2658 EvaluatePtrUse(Load, Load->getPointerOperand());
2659 } else if (auto *Store = dyn_cast<StoreInst>(&I)) {
2660 EvaluatePtrUse(Store, Store->getPointerOperand());
2661 EvaluatePtrUse(Store, Store->getValueOperand());
2662 }
2663 }
2664 for (auto *I : ScalarPtrs)
2665 if (!PossibleNonScalarPtrs.count(I)) {
2666 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *I << "\n");
2667 Worklist.insert(I);
2668 }
2669
2670 // Insert the forced scalars.
2671 // FIXME: Currently VPWidenPHIRecipe() often creates a dead vector
2672 // induction variable when the PHI user is scalarized.
2673 auto ForcedScalar = ForcedScalars.find(VF);
2674 if (ForcedScalar != ForcedScalars.end())
2675 for (auto *I : ForcedScalar->second) {
2676 LLVM_DEBUG(dbgs() << "LV: Found (forced) scalar instruction: " << *I << "\n");
2677 Worklist.insert(I);
2678 }
2679
2680 // Expand the worklist by looking through any bitcasts and getelementptr
2681 // instructions we've already identified as scalar. This is similar to the
2682 // expansion step in collectLoopUniforms(); however, here we're only
2683 // expanding to include additional bitcasts and getelementptr instructions.
2684 unsigned Idx = 0;
2685 while (Idx != Worklist.size()) {
2686 Instruction *Dst = Worklist[Idx++];
2687 if (!IsLoopVaryingGEP(Dst->getOperand(0)))
2688 continue;
2689 auto *Src = cast<Instruction>(Dst->getOperand(0));
2690 if (llvm::all_of(Src->users(), [&](User *U) -> bool {
2691 auto *J = cast<Instruction>(U);
2692 return !TheLoop->contains(J) || Worklist.count(J) ||
2693 ((isa<LoadInst>(J) || isa<StoreInst>(J)) &&
2694 IsScalarUse(J, Src));
2695 })) {
2696 Worklist.insert(Src);
2697 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Src << "\n");
2698 }
2699 }
2700
2701 // An induction variable will remain scalar if all users of the induction
2702 // variable and induction variable update remain scalar.
2703 for (const auto &Induction : Legal->getInductionVars()) {
2704 auto *Ind = Induction.first;
2705 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch));
2706
2707 // If tail-folding is applied, the primary induction variable will be used
2708 // to feed a vector compare.
2709 if (Ind == Legal->getPrimaryInduction() && foldTailByMasking())
2710 continue;
2711
2712 // Returns true if \p Indvar is a pointer induction that is used directly by
2713 // load/store instruction \p I.
2714 auto IsDirectLoadStoreFromPtrIndvar = [&](Instruction *Indvar,
2715 Instruction *I) {
2716 return Induction.second.getKind() ==
2719 Indvar == getLoadStorePointerOperand(I) && IsScalarUse(I, Indvar);
2720 };
2721
2722 // Determine if all users of the induction variable are scalar after
2723 // vectorization.
2724 bool ScalarInd = all_of(Ind->users(), [&](User *U) -> bool {
2725 auto *I = cast<Instruction>(U);
2726 return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I) ||
2727 IsDirectLoadStoreFromPtrIndvar(Ind, I);
2728 });
2729 if (!ScalarInd)
2730 continue;
2731
2732 // If the induction variable update is a fixed-order recurrence, neither the
2733 // induction variable or its update should be marked scalar after
2734 // vectorization.
2735 auto *IndUpdatePhi = dyn_cast<PHINode>(IndUpdate);
2736 if (IndUpdatePhi && Legal->isFixedOrderRecurrence(IndUpdatePhi))
2737 continue;
2738
2739 // Determine if all users of the induction variable update instruction are
2740 // scalar after vectorization.
2741 bool ScalarIndUpdate = all_of(IndUpdate->users(), [&](User *U) -> bool {
2742 auto *I = cast<Instruction>(U);
2743 return I == Ind || !TheLoop->contains(I) || Worklist.count(I) ||
2744 IsDirectLoadStoreFromPtrIndvar(IndUpdate, I);
2745 });
2746 if (!ScalarIndUpdate)
2747 continue;
2748
2749 // The induction variable and its update instruction will remain scalar.
2750 Worklist.insert(Ind);
2751 Worklist.insert(IndUpdate);
2752 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Ind << "\n");
2753 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *IndUpdate
2754 << "\n");
2755 }
2756
2757 Scalars[VF].insert_range(Worklist);
2758}
2759
2761 Instruction *I, ElementCount VF) const {
2762 if (!isPredicatedInst(I))
2763 return false;
2764
2765 // Do we have a non-scalar lowering for this predicated
2766 // instruction? No - it is scalar with predication.
2767 switch(I->getOpcode()) {
2768 default:
2769 return true;
2770 case Instruction::Call:
2771 if (VF.isScalar())
2772 return true;
2774 case Instruction::Load:
2775 case Instruction::Store: {
2777 auto *Ty = getLoadStoreType(I);
2778 unsigned AS = getLoadStoreAddressSpace(I);
2779 Type *VTy = Ty;
2780 if (VF.isVector())
2781 VTy = VectorType::get(Ty, VF);
2782 const Align Alignment = getLoadStoreAlignment(I);
2783 return isa<LoadInst>(I) ? !(isLegalMaskedLoad(Ty, Ptr, Alignment, AS) ||
2784 TTI.isLegalMaskedGather(VTy, Alignment))
2785 : !(isLegalMaskedStore(Ty, Ptr, Alignment, AS) ||
2786 TTI.isLegalMaskedScatter(VTy, Alignment));
2787 }
2788 case Instruction::UDiv:
2789 case Instruction::SDiv:
2790 case Instruction::SRem:
2791 case Instruction::URem: {
2792 // We have the option to use the safe-divisor idiom to avoid predication.
2793 // The cost based decision here will always select safe-divisor for
2794 // scalable vectors as scalarization isn't legal.
2795 const auto [ScalarCost, SafeDivisorCost] = getDivRemSpeculationCost(I, VF);
2796 return isDivRemScalarWithPredication(ScalarCost, SafeDivisorCost);
2797 }
2798 }
2799}
2800
2801// TODO: Fold into LoopVectorizationLegality::isMaskRequired.
2803 // TODO: We can use the loop-preheader as context point here and get
2804 // context sensitive reasoning for isSafeToSpeculativelyExecute.
2806 (isa<LoadInst, StoreInst, CallInst>(I) && !Legal->isMaskRequired(I)) ||
2808 return false;
2809
2810 // If the instruction was executed conditionally in the original scalar loop,
2811 // predication is needed with a mask whose lanes are all possibly inactive.
2812 if (Legal->blockNeedsPredication(I->getParent()))
2813 return true;
2814
2815 // If we're not folding the tail by masking, predication is unnecessary.
2816 if (!foldTailByMasking())
2817 return false;
2818
2819 // All that remain are instructions with side-effects originally executed in
2820 // the loop unconditionally, but now execute under a tail-fold mask (only)
2821 // having at least one active lane (the first). If the side-effects of the
2822 // instruction are invariant, executing it w/o (the tail-folding) mask is safe
2823 // - it will cause the same side-effects as when masked.
2824 switch(I->getOpcode()) {
2825 default:
2827 "instruction should have been considered by earlier checks");
2828 case Instruction::Call:
2829 // Side-effects of a Call are assumed to be non-invariant, needing a
2830 // (fold-tail) mask.
2831 assert(Legal->isMaskRequired(I) &&
2832 "should have returned earlier for calls not needing a mask");
2833 return true;
2834 case Instruction::Load:
2835 // If the address is loop invariant no predication is needed.
2836 return !Legal->isInvariant(getLoadStorePointerOperand(I));
2837 case Instruction::Store: {
2838 // For stores, we need to prove both speculation safety (which follows from
2839 // the same argument as loads), but also must prove the value being stored
2840 // is correct. The easiest form of the later is to require that all values
2841 // stored are the same.
2842 return !(Legal->isInvariant(getLoadStorePointerOperand(I)) &&
2843 TheLoop->isLoopInvariant(cast<StoreInst>(I)->getValueOperand()));
2844 }
2845 case Instruction::UDiv:
2846 case Instruction::SDiv:
2847 case Instruction::SRem:
2848 case Instruction::URem:
2849 // If the divisor is loop-invariant no predication is needed.
2850 return !Legal->isInvariant(I->getOperand(1));
2851 }
2852}
2853
2854std::pair<InstructionCost, InstructionCost>
2856 ElementCount VF) const {
2857 assert(I->getOpcode() == Instruction::UDiv ||
2858 I->getOpcode() == Instruction::SDiv ||
2859 I->getOpcode() == Instruction::SRem ||
2860 I->getOpcode() == Instruction::URem);
2862
2863 // Scalarization isn't legal for scalable vector types
2864 InstructionCost ScalarizationCost = InstructionCost::getInvalid();
2865 if (!VF.isScalable()) {
2866 // Get the scalarization cost and scale this amount by the probability of
2867 // executing the predicated block. If the instruction is not predicated,
2868 // we fall through to the next case.
2869 ScalarizationCost = 0;
2870
2871 // These instructions have a non-void type, so account for the phi nodes
2872 // that we will create. This cost is likely to be zero. The phi node
2873 // cost, if any, should be scaled by the block probability because it
2874 // models a copy at the end of each predicated block.
2875 ScalarizationCost +=
2876 VF.getFixedValue() * TTI.getCFInstrCost(Instruction::PHI, CostKind);
2877
2878 // The cost of the non-predicated instruction.
2879 ScalarizationCost +=
2880 VF.getFixedValue() *
2881 TTI.getArithmeticInstrCost(I->getOpcode(), I->getType(), CostKind);
2882
2883 // The cost of insertelement and extractelement instructions needed for
2884 // scalarization.
2885 ScalarizationCost += getScalarizationOverhead(I, VF);
2886
2887 // Scale the cost by the probability of executing the predicated blocks.
2888 // This assumes the predicated block for each vector lane is equally
2889 // likely.
2890 ScalarizationCost = ScalarizationCost / getPredBlockCostDivisor(CostKind);
2891 }
2892
2893 InstructionCost SafeDivisorCost = 0;
2894 auto *VecTy = toVectorTy(I->getType(), VF);
2895 // The cost of the select guard to ensure all lanes are well defined
2896 // after we speculate above any internal control flow.
2897 SafeDivisorCost +=
2898 TTI.getCmpSelInstrCost(Instruction::Select, VecTy,
2899 toVectorTy(Type::getInt1Ty(I->getContext()), VF),
2901
2902 SmallVector<const Value *, 4> Operands(I->operand_values());
2903 SafeDivisorCost += TTI.getArithmeticInstrCost(
2904 I->getOpcode(), VecTy, CostKind,
2905 {TargetTransformInfo::OK_AnyValue, TargetTransformInfo::OP_None},
2906 {TargetTransformInfo::OK_AnyValue, TargetTransformInfo::OP_None},
2907 Operands, I);
2908 return {ScalarizationCost, SafeDivisorCost};
2909}
2910
2912 Instruction *I, ElementCount VF) const {
2913 assert(isAccessInterleaved(I) && "Expecting interleaved access.");
2915 "Decision should not be set yet.");
2916 auto *Group = getInterleavedAccessGroup(I);
2917 assert(Group && "Must have a group.");
2918 unsigned InterleaveFactor = Group->getFactor();
2919
2920 // If the instruction's allocated size doesn't equal its type size, it
2921 // requires padding and will be scalarized.
2922 auto &DL = I->getDataLayout();
2923 auto *ScalarTy = getLoadStoreType(I);
2924 if (hasIrregularType(ScalarTy, DL))
2925 return false;
2926
2927 // For scalable vectors, the interleave factors must be <= 8 since we require
2928 // the (de)interleaveN intrinsics instead of shufflevectors.
2929 if (VF.isScalable() && InterleaveFactor > 8)
2930 return false;
2931
2932 // If the group involves a non-integral pointer, we may not be able to
2933 // losslessly cast all values to a common type.
2934 bool ScalarNI = DL.isNonIntegralPointerType(ScalarTy);
2935 for (unsigned Idx = 0; Idx < InterleaveFactor; Idx++) {
2936 Instruction *Member = Group->getMember(Idx);
2937 if (!Member)
2938 continue;
2939 auto *MemberTy = getLoadStoreType(Member);
2940 bool MemberNI = DL.isNonIntegralPointerType(MemberTy);
2941 // Don't coerce non-integral pointers to integers or vice versa.
2942 if (MemberNI != ScalarNI)
2943 // TODO: Consider adding special nullptr value case here
2944 return false;
2945 if (MemberNI && ScalarNI &&
2946 ScalarTy->getPointerAddressSpace() !=
2947 MemberTy->getPointerAddressSpace())
2948 return false;
2949 }
2950
2951 // Check if masking is required.
2952 // A Group may need masking for one of two reasons: it resides in a block that
2953 // needs predication, or it was decided to use masking to deal with gaps
2954 // (either a gap at the end of a load-access that may result in a speculative
2955 // load, or any gaps in a store-access).
2956 bool PredicatedAccessRequiresMasking =
2957 blockNeedsPredicationForAnyReason(I->getParent()) &&
2958 Legal->isMaskRequired(I);
2959 bool LoadAccessWithGapsRequiresEpilogMasking =
2960 isa<LoadInst>(I) && Group->requiresScalarEpilogue() &&
2962 bool StoreAccessWithGapsRequiresMasking =
2963 isa<StoreInst>(I) && !Group->isFull();
2964 if (!PredicatedAccessRequiresMasking &&
2965 !LoadAccessWithGapsRequiresEpilogMasking &&
2966 !StoreAccessWithGapsRequiresMasking)
2967 return true;
2968
2969 // If masked interleaving is required, we expect that the user/target had
2970 // enabled it, because otherwise it either wouldn't have been created or
2971 // it should have been invalidated by the CostModel.
2973 "Masked interleave-groups for predicated accesses are not enabled.");
2974
2975 if (Group->isReverse())
2976 return false;
2977
2978 // TODO: Support interleaved access that requires a gap mask for scalable VFs.
2979 bool NeedsMaskForGaps = LoadAccessWithGapsRequiresEpilogMasking ||
2980 StoreAccessWithGapsRequiresMasking;
2981 if (VF.isScalable() && NeedsMaskForGaps)
2982 return false;
2983
2984 auto *Ty = getLoadStoreType(I);
2985 const Align Alignment = getLoadStoreAlignment(I);
2986 unsigned AS = getLoadStoreAddressSpace(I);
2987 return isa<LoadInst>(I) ? TTI.isLegalMaskedLoad(Ty, Alignment, AS)
2988 : TTI.isLegalMaskedStore(Ty, Alignment, AS);
2989}
2990
2992 Instruction *I, ElementCount VF) {
2993 // Get and ensure we have a valid memory instruction.
2994 assert((isa<LoadInst, StoreInst>(I)) && "Invalid memory instruction");
2995
2997 auto *ScalarTy = getLoadStoreType(I);
2998
2999 // In order to be widened, the pointer should be consecutive, first of all.
3000 if (!Legal->isConsecutivePtr(ScalarTy, Ptr))
3001 return false;
3002
3003 // If the instruction is a store located in a predicated block, it will be
3004 // scalarized.
3005 if (isScalarWithPredication(I, VF))
3006 return false;
3007
3008 // If the instruction's allocated size doesn't equal it's type size, it
3009 // requires padding and will be scalarized.
3010 auto &DL = I->getDataLayout();
3011 if (hasIrregularType(ScalarTy, DL))
3012 return false;
3013
3014 return true;
3015}
3016
3017void LoopVectorizationCostModel::collectLoopUniforms(ElementCount VF) {
3018 // We should not collect Uniforms more than once per VF. Right now,
3019 // this function is called from collectUniformsAndScalars(), which
3020 // already does this check. Collecting Uniforms for VF=1 does not make any
3021 // sense.
3022
3023 assert(VF.isVector() && !Uniforms.contains(VF) &&
3024 "This function should not be visited twice for the same VF");
3025
3026 // Visit the list of Uniforms. If we find no uniform value, we won't
3027 // analyze again. Uniforms.count(VF) will return 1.
3028 Uniforms[VF].clear();
3029
3030 // Now we know that the loop is vectorizable!
3031 // Collect instructions inside the loop that will remain uniform after
3032 // vectorization.
3033
3034 // Global values, params and instructions outside of current loop are out of
3035 // scope.
3036 auto IsOutOfScope = [&](Value *V) -> bool {
3038 return (!I || !TheLoop->contains(I));
3039 };
3040
3041 // Worklist containing uniform instructions demanding lane 0.
3042 SetVector<Instruction *> Worklist;
3043
3044 // Add uniform instructions demanding lane 0 to the worklist. Instructions
3045 // that require predication must not be considered uniform after
3046 // vectorization, because that would create an erroneous replicating region
3047 // where only a single instance out of VF should be formed.
3048 auto AddToWorklistIfAllowed = [&](Instruction *I) -> void {
3049 if (IsOutOfScope(I)) {
3050 LLVM_DEBUG(dbgs() << "LV: Found not uniform due to scope: "
3051 << *I << "\n");
3052 return;
3053 }
3054 if (isPredicatedInst(I)) {
3055 LLVM_DEBUG(
3056 dbgs() << "LV: Found not uniform due to requiring predication: " << *I
3057 << "\n");
3058 return;
3059 }
3060 LLVM_DEBUG(dbgs() << "LV: Found uniform instruction: " << *I << "\n");
3061 Worklist.insert(I);
3062 };
3063
3064 // Start with the conditional branches exiting the loop. If the branch
3065 // condition is an instruction contained in the loop that is only used by the
3066 // branch, it is uniform. Note conditions from uncountable early exits are not
3067 // uniform.
3069 TheLoop->getExitingBlocks(Exiting);
3070 for (BasicBlock *E : Exiting) {
3071 if (Legal->hasUncountableEarlyExit() && TheLoop->getLoopLatch() != E)
3072 continue;
3073 auto *Cmp = dyn_cast<Instruction>(E->getTerminator()->getOperand(0));
3074 if (Cmp && TheLoop->contains(Cmp) && Cmp->hasOneUse())
3075 AddToWorklistIfAllowed(Cmp);
3076 }
3077
3078 auto PrevVF = VF.divideCoefficientBy(2);
3079 // Return true if all lanes perform the same memory operation, and we can
3080 // thus choose to execute only one.
3081 auto IsUniformMemOpUse = [&](Instruction *I) {
3082 // If the value was already known to not be uniform for the previous
3083 // (smaller VF), it cannot be uniform for the larger VF.
3084 if (PrevVF.isVector()) {
3085 auto Iter = Uniforms.find(PrevVF);
3086 if (Iter != Uniforms.end() && !Iter->second.contains(I))
3087 return false;
3088 }
3089 if (!Legal->isUniformMemOp(*I, VF))
3090 return false;
3091 if (isa<LoadInst>(I))
3092 // Loading the same address always produces the same result - at least
3093 // assuming aliasing and ordering which have already been checked.
3094 return true;
3095 // Storing the same value on every iteration.
3096 return TheLoop->isLoopInvariant(cast<StoreInst>(I)->getValueOperand());
3097 };
3098
3099 auto IsUniformDecision = [&](Instruction *I, ElementCount VF) {
3100 InstWidening WideningDecision = getWideningDecision(I, VF);
3101 assert(WideningDecision != CM_Unknown &&
3102 "Widening decision should be ready at this moment");
3103
3104 if (IsUniformMemOpUse(I))
3105 return true;
3106
3107 return (WideningDecision == CM_Widen ||
3108 WideningDecision == CM_Widen_Reverse ||
3109 WideningDecision == CM_Interleave);
3110 };
3111
3112 // Returns true if Ptr is the pointer operand of a memory access instruction
3113 // I, I is known to not require scalarization, and the pointer is not also
3114 // stored.
3115 auto IsVectorizedMemAccessUse = [&](Instruction *I, Value *Ptr) -> bool {
3116 if (isa<StoreInst>(I) && I->getOperand(0) == Ptr)
3117 return false;
3118 return getLoadStorePointerOperand(I) == Ptr &&
3119 (IsUniformDecision(I, VF) || Legal->isInvariant(Ptr));
3120 };
3121
3122 // Holds a list of values which are known to have at least one uniform use.
3123 // Note that there may be other uses which aren't uniform. A "uniform use"
3124 // here is something which only demands lane 0 of the unrolled iterations;
3125 // it does not imply that all lanes produce the same value (e.g. this is not
3126 // the usual meaning of uniform)
3127 SetVector<Value *> HasUniformUse;
3128
3129 // Scan the loop for instructions which are either a) known to have only
3130 // lane 0 demanded or b) are uses which demand only lane 0 of their operand.
3131 for (auto *BB : TheLoop->blocks())
3132 for (auto &I : *BB) {
3133 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(&I)) {
3134 switch (II->getIntrinsicID()) {
3135 case Intrinsic::sideeffect:
3136 case Intrinsic::experimental_noalias_scope_decl:
3137 case Intrinsic::assume:
3138 case Intrinsic::lifetime_start:
3139 case Intrinsic::lifetime_end:
3140 if (TheLoop->hasLoopInvariantOperands(&I))
3141 AddToWorklistIfAllowed(&I);
3142 break;
3143 default:
3144 break;
3145 }
3146 }
3147
3148 if (auto *EVI = dyn_cast<ExtractValueInst>(&I)) {
3149 if (IsOutOfScope(EVI->getAggregateOperand())) {
3150 AddToWorklistIfAllowed(EVI);
3151 continue;
3152 }
3153 // Only ExtractValue instructions where the aggregate value comes from a
3154 // call are allowed to be non-uniform.
3155 assert(isa<CallInst>(EVI->getAggregateOperand()) &&
3156 "Expected aggregate value to be call return value");
3157 }
3158
3159 // If there's no pointer operand, there's nothing to do.
3161 if (!Ptr)
3162 continue;
3163
3164 // If the pointer can be proven to be uniform, always add it to the
3165 // worklist.
3166 if (isa<Instruction>(Ptr) && Legal->isUniform(Ptr, VF))
3167 AddToWorklistIfAllowed(cast<Instruction>(Ptr));
3168
3169 if (IsUniformMemOpUse(&I))
3170 AddToWorklistIfAllowed(&I);
3171
3172 if (IsVectorizedMemAccessUse(&I, Ptr))
3173 HasUniformUse.insert(Ptr);
3174 }
3175
3176 // Add to the worklist any operands which have *only* uniform (e.g. lane 0
3177 // demanding) users. Since loops are assumed to be in LCSSA form, this
3178 // disallows uses outside the loop as well.
3179 for (auto *V : HasUniformUse) {
3180 if (IsOutOfScope(V))
3181 continue;
3182 auto *I = cast<Instruction>(V);
3183 bool UsersAreMemAccesses = all_of(I->users(), [&](User *U) -> bool {
3184 auto *UI = cast<Instruction>(U);
3185 return TheLoop->contains(UI) && IsVectorizedMemAccessUse(UI, V);
3186 });
3187 if (UsersAreMemAccesses)
3188 AddToWorklistIfAllowed(I);
3189 }
3190
3191 // Expand Worklist in topological order: whenever a new instruction
3192 // is added , its users should be already inside Worklist. It ensures
3193 // a uniform instruction will only be used by uniform instructions.
3194 unsigned Idx = 0;
3195 while (Idx != Worklist.size()) {
3196 Instruction *I = Worklist[Idx++];
3197
3198 for (auto *OV : I->operand_values()) {
3199 // isOutOfScope operands cannot be uniform instructions.
3200 if (IsOutOfScope(OV))
3201 continue;
3202 // First order recurrence Phi's should typically be considered
3203 // non-uniform.
3204 auto *OP = dyn_cast<PHINode>(OV);
3205 if (OP && Legal->isFixedOrderRecurrence(OP))
3206 continue;
3207 // If all the users of the operand are uniform, then add the
3208 // operand into the uniform worklist.
3209 auto *OI = cast<Instruction>(OV);
3210 if (llvm::all_of(OI->users(), [&](User *U) -> bool {
3211 auto *J = cast<Instruction>(U);
3212 return Worklist.count(J) || IsVectorizedMemAccessUse(J, OI);
3213 }))
3214 AddToWorklistIfAllowed(OI);
3215 }
3216 }
3217
3218 // For an instruction to be added into Worklist above, all its users inside
3219 // the loop should also be in Worklist. However, this condition cannot be
3220 // true for phi nodes that form a cyclic dependence. We must process phi
3221 // nodes separately. An induction variable will remain uniform if all users
3222 // of the induction variable and induction variable update remain uniform.
3223 // The code below handles both pointer and non-pointer induction variables.
3224 BasicBlock *Latch = TheLoop->getLoopLatch();
3225 for (const auto &Induction : Legal->getInductionVars()) {
3226 auto *Ind = Induction.first;
3227 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch));
3228
3229 // Determine if all users of the induction variable are uniform after
3230 // vectorization.
3231 bool UniformInd = all_of(Ind->users(), [&](User *U) -> bool {
3232 auto *I = cast<Instruction>(U);
3233 return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I) ||
3234 IsVectorizedMemAccessUse(I, Ind);
3235 });
3236 if (!UniformInd)
3237 continue;
3238
3239 // Determine if all users of the induction variable update instruction are
3240 // uniform after vectorization.
3241 bool UniformIndUpdate = all_of(IndUpdate->users(), [&](User *U) -> bool {
3242 auto *I = cast<Instruction>(U);
3243 return I == Ind || Worklist.count(I) ||
3244 IsVectorizedMemAccessUse(I, IndUpdate);
3245 });
3246 if (!UniformIndUpdate)
3247 continue;
3248
3249 // The induction variable and its update instruction will remain uniform.
3250 AddToWorklistIfAllowed(Ind);
3251 AddToWorklistIfAllowed(IndUpdate);
3252 }
3253
3254 Uniforms[VF].insert_range(Worklist);
3255}
3256
3258 LLVM_DEBUG(dbgs() << "LV: Performing code size checks.\n");
3259
3260 if (Legal->getRuntimePointerChecking()->Need) {
3261 reportVectorizationFailure("Runtime ptr check is required with -Os/-Oz",
3262 "runtime pointer checks needed. Enable vectorization of this "
3263 "loop with '#pragma clang loop vectorize(enable)' when "
3264 "compiling with -Os/-Oz",
3265 "CantVersionLoopWithOptForSize", ORE, TheLoop);
3266 return true;
3267 }
3268
3269 if (!PSE.getPredicate().isAlwaysTrue()) {
3270 reportVectorizationFailure("Runtime SCEV check is required with -Os/-Oz",
3271 "runtime SCEV checks needed. Enable vectorization of this "
3272 "loop with '#pragma clang loop vectorize(enable)' when "
3273 "compiling with -Os/-Oz",
3274 "CantVersionLoopWithOptForSize", ORE, TheLoop);
3275 return true;
3276 }
3277
3278 // FIXME: Avoid specializing for stride==1 instead of bailing out.
3279 if (!Legal->getLAI()->getSymbolicStrides().empty()) {
3280 reportVectorizationFailure("Runtime stride check for small trip count",
3281 "runtime stride == 1 checks needed. Enable vectorization of "
3282 "this loop without such check by compiling with -Os/-Oz",
3283 "CantVersionLoopWithOptForSize", ORE, TheLoop);
3284 return true;
3285 }
3286
3287 return false;
3288}
3289
3290bool LoopVectorizationCostModel::isScalableVectorizationAllowed() {
3291 if (IsScalableVectorizationAllowed)
3292 return *IsScalableVectorizationAllowed;
3293
3294 IsScalableVectorizationAllowed = false;
3295 if (!TTI.supportsScalableVectors() && !ForceTargetSupportsScalableVectors)
3296 return false;
3297
3298 if (Hints->isScalableVectorizationDisabled()) {
3299 reportVectorizationInfo("Scalable vectorization is explicitly disabled",
3300 "ScalableVectorizationDisabled", ORE, TheLoop);
3301 return false;
3302 }
3303
3304 LLVM_DEBUG(dbgs() << "LV: Scalable vectorization is available\n");
3305
3306 auto MaxScalableVF = ElementCount::getScalable(
3307 std::numeric_limits<ElementCount::ScalarTy>::max());
3308
3309 // Test that the loop-vectorizer can legalize all operations for this MaxVF.
3310 // FIXME: While for scalable vectors this is currently sufficient, this should
3311 // be replaced by a more detailed mechanism that filters out specific VFs,
3312 // instead of invalidating vectorization for a whole set of VFs based on the
3313 // MaxVF.
3314
3315 // Disable scalable vectorization if the loop contains unsupported reductions.
3316 if (!canVectorizeReductions(MaxScalableVF)) {
3318 "Scalable vectorization not supported for the reduction "
3319 "operations found in this loop.",
3320 "ScalableVFUnfeasible", ORE, TheLoop);
3321 return false;
3322 }
3323
3324 // Disable scalable vectorization if the loop contains any instructions
3325 // with element types not supported for scalable vectors.
3326 if (any_of(ElementTypesInLoop, [&](Type *Ty) {
3327 return !Ty->isVoidTy() &&
3329 })) {
3330 reportVectorizationInfo("Scalable vectorization is not supported "
3331 "for all element types found in this loop.",
3332 "ScalableVFUnfeasible", ORE, TheLoop);
3333 return false;
3334 }
3335
3336 if (!Legal->isSafeForAnyVectorWidth() && !getMaxVScale(*TheFunction, TTI)) {
3337 reportVectorizationInfo("The target does not provide maximum vscale value "
3338 "for safe distance analysis.",
3339 "ScalableVFUnfeasible", ORE, TheLoop);
3340 return false;
3341 }
3342
3343 IsScalableVectorizationAllowed = true;
3344 return true;
3345}
3346
3347ElementCount
3348LoopVectorizationCostModel::getMaxLegalScalableVF(unsigned MaxSafeElements) {
3349 if (!isScalableVectorizationAllowed())
3350 return ElementCount::getScalable(0);
3351
3352 auto MaxScalableVF = ElementCount::getScalable(
3353 std::numeric_limits<ElementCount::ScalarTy>::max());
3354 if (Legal->isSafeForAnyVectorWidth())
3355 return MaxScalableVF;
3356
3357 std::optional<unsigned> MaxVScale = getMaxVScale(*TheFunction, TTI);
3358 // Limit MaxScalableVF by the maximum safe dependence distance.
3359 MaxScalableVF = ElementCount::getScalable(MaxSafeElements / *MaxVScale);
3360
3361 if (!MaxScalableVF)
3363 "Max legal vector width too small, scalable vectorization "
3364 "unfeasible.",
3365 "ScalableVFUnfeasible", ORE, TheLoop);
3366
3367 return MaxScalableVF;
3368}
3369
3370FixedScalableVFPair LoopVectorizationCostModel::computeFeasibleMaxVF(
3371 unsigned MaxTripCount, ElementCount UserVF, bool FoldTailByMasking) {
3372 MinBWs = computeMinimumValueSizes(TheLoop->getBlocks(), *DB, &TTI);
3373 unsigned SmallestType, WidestType;
3374 std::tie(SmallestType, WidestType) = getSmallestAndWidestTypes();
3375
3376 // Get the maximum safe dependence distance in bits computed by LAA.
3377 // It is computed by MaxVF * sizeOf(type) * 8, where type is taken from
3378 // the memory accesses that is most restrictive (involved in the smallest
3379 // dependence distance).
3380 unsigned MaxSafeElementsPowerOf2 =
3381 bit_floor(Legal->getMaxSafeVectorWidthInBits() / WidestType);
3382 if (!Legal->isSafeForAnyStoreLoadForwardDistances()) {
3383 unsigned SLDist = Legal->getMaxStoreLoadForwardSafeDistanceInBits();
3384 MaxSafeElementsPowerOf2 =
3385 std::min(MaxSafeElementsPowerOf2, SLDist / WidestType);
3386 }
3387 auto MaxSafeFixedVF = ElementCount::getFixed(MaxSafeElementsPowerOf2);
3388 auto MaxSafeScalableVF = getMaxLegalScalableVF(MaxSafeElementsPowerOf2);
3389
3390 if (!Legal->isSafeForAnyVectorWidth())
3391 this->MaxSafeElements = MaxSafeElementsPowerOf2;
3392
3393 LLVM_DEBUG(dbgs() << "LV: The max safe fixed VF is: " << MaxSafeFixedVF
3394 << ".\n");
3395 LLVM_DEBUG(dbgs() << "LV: The max safe scalable VF is: " << MaxSafeScalableVF
3396 << ".\n");
3397
3398 // First analyze the UserVF, fall back if the UserVF should be ignored.
3399 if (UserVF) {
3400 auto MaxSafeUserVF =
3401 UserVF.isScalable() ? MaxSafeScalableVF : MaxSafeFixedVF;
3402
3403 if (ElementCount::isKnownLE(UserVF, MaxSafeUserVF)) {
3404 // If `VF=vscale x N` is safe, then so is `VF=N`
3405 if (UserVF.isScalable())
3406 return FixedScalableVFPair(
3407 ElementCount::getFixed(UserVF.getKnownMinValue()), UserVF);
3408
3409 return UserVF;
3410 }
3411
3412 assert(ElementCount::isKnownGT(UserVF, MaxSafeUserVF));
3413
3414 // Only clamp if the UserVF is not scalable. If the UserVF is scalable, it
3415 // is better to ignore the hint and let the compiler choose a suitable VF.
3416 if (!UserVF.isScalable()) {
3417 LLVM_DEBUG(dbgs() << "LV: User VF=" << UserVF
3418 << " is unsafe, clamping to max safe VF="
3419 << MaxSafeFixedVF << ".\n");
3420 ORE->emit([&]() {
3421 return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationFactor",
3422 TheLoop->getStartLoc(),
3423 TheLoop->getHeader())
3424 << "User-specified vectorization factor "
3425 << ore::NV("UserVectorizationFactor", UserVF)
3426 << " is unsafe, clamping to maximum safe vectorization factor "
3427 << ore::NV("VectorizationFactor", MaxSafeFixedVF);
3428 });
3429 return MaxSafeFixedVF;
3430 }
3431
3433 LLVM_DEBUG(dbgs() << "LV: User VF=" << UserVF
3434 << " is ignored because scalable vectors are not "
3435 "available.\n");
3436 ORE->emit([&]() {
3437 return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationFactor",
3438 TheLoop->getStartLoc(),
3439 TheLoop->getHeader())
3440 << "User-specified vectorization factor "
3441 << ore::NV("UserVectorizationFactor", UserVF)
3442 << " is ignored because the target does not support scalable "
3443 "vectors. The compiler will pick a more suitable value.";
3444 });
3445 } else {
3446 LLVM_DEBUG(dbgs() << "LV: User VF=" << UserVF
3447 << " is unsafe. Ignoring scalable UserVF.\n");
3448 ORE->emit([&]() {
3449 return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationFactor",
3450 TheLoop->getStartLoc(),
3451 TheLoop->getHeader())
3452 << "User-specified vectorization factor "
3453 << ore::NV("UserVectorizationFactor", UserVF)
3454 << " is unsafe. Ignoring the hint to let the compiler pick a "
3455 "more suitable value.";
3456 });
3457 }
3458 }
3459
3460 LLVM_DEBUG(dbgs() << "LV: The Smallest and Widest types: " << SmallestType
3461 << " / " << WidestType << " bits.\n");
3462
3463 FixedScalableVFPair Result(ElementCount::getFixed(1),
3465 if (auto MaxVF =
3466 getMaximizedVFForTarget(MaxTripCount, SmallestType, WidestType,
3467 MaxSafeFixedVF, FoldTailByMasking))
3468 Result.FixedVF = MaxVF;
3469
3470 if (auto MaxVF =
3471 getMaximizedVFForTarget(MaxTripCount, SmallestType, WidestType,
3472 MaxSafeScalableVF, FoldTailByMasking))
3473 if (MaxVF.isScalable()) {
3474 Result.ScalableVF = MaxVF;
3475 LLVM_DEBUG(dbgs() << "LV: Found feasible scalable VF = " << MaxVF
3476 << "\n");
3477 }
3478
3479 return Result;
3480}
3481
3482FixedScalableVFPair
3484 if (Legal->getRuntimePointerChecking()->Need && TTI.hasBranchDivergence()) {
3485 // TODO: It may be useful to do since it's still likely to be dynamically
3486 // uniform if the target can skip.
3488 "Not inserting runtime ptr check for divergent target",
3489 "runtime pointer checks needed. Not enabled for divergent target",
3490 "CantVersionLoopWithDivergentTarget", ORE, TheLoop);
3492 }
3493
3494 ScalarEvolution *SE = PSE.getSE();
3496 unsigned MaxTC = PSE.getSmallConstantMaxTripCount();
3497 LLVM_DEBUG(dbgs() << "LV: Found trip count: " << TC << '\n');
3498 if (TC != ElementCount::getFixed(MaxTC))
3499 LLVM_DEBUG(dbgs() << "LV: Found maximum trip count: " << MaxTC << '\n');
3500 if (TC.isScalar()) {
3501 reportVectorizationFailure("Single iteration (non) loop",
3502 "loop trip count is one, irrelevant for vectorization",
3503 "SingleIterationLoop", ORE, TheLoop);
3505 }
3506
3507 // If BTC matches the widest induction type and is -1 then the trip count
3508 // computation will wrap to 0 and the vector trip count will be 0. Do not try
3509 // to vectorize.
3510 const SCEV *BTC = SE->getBackedgeTakenCount(TheLoop);
3511 if (!isa<SCEVCouldNotCompute>(BTC) &&
3512 BTC->getType()->getScalarSizeInBits() >=
3513 Legal->getWidestInductionType()->getScalarSizeInBits() &&
3515 SE->getMinusOne(BTC->getType()))) {
3517 "Trip count computation wrapped",
3518 "backedge-taken count is -1, loop trip count wrapped to 0",
3519 "TripCountWrapped", ORE, TheLoop);
3521 }
3522
3523 switch (ScalarEpilogueStatus) {
3525 return computeFeasibleMaxVF(MaxTC, UserVF, false);
3527 [[fallthrough]];
3529 LLVM_DEBUG(
3530 dbgs() << "LV: vector predicate hint/switch found.\n"
3531 << "LV: Not allowing scalar epilogue, creating predicated "
3532 << "vector loop.\n");
3533 break;
3535 // fallthrough as a special case of OptForSize
3537 if (ScalarEpilogueStatus == CM_ScalarEpilogueNotAllowedOptSize)
3538 LLVM_DEBUG(
3539 dbgs() << "LV: Not allowing scalar epilogue due to -Os/-Oz.\n");
3540 else
3541 LLVM_DEBUG(dbgs() << "LV: Not allowing scalar epilogue due to low trip "
3542 << "count.\n");
3543
3544 // Bail if runtime checks are required, which are not good when optimising
3545 // for size.
3548
3549 break;
3550 }
3551
3552 // Now try the tail folding
3553
3554 // Invalidate interleave groups that require an epilogue if we can't mask
3555 // the interleave-group.
3557 assert(WideningDecisions.empty() && Uniforms.empty() && Scalars.empty() &&
3558 "No decisions should have been taken at this point");
3559 // Note: There is no need to invalidate any cost modeling decisions here, as
3560 // none were taken so far.
3561 InterleaveInfo.invalidateGroupsRequiringScalarEpilogue();
3562 }
3563
3564 FixedScalableVFPair MaxFactors = computeFeasibleMaxVF(MaxTC, UserVF, true);
3565
3566 // Avoid tail folding if the trip count is known to be a multiple of any VF
3567 // we choose.
3568 std::optional<unsigned> MaxPowerOf2RuntimeVF =
3569 MaxFactors.FixedVF.getFixedValue();
3570 if (MaxFactors.ScalableVF) {
3571 std::optional<unsigned> MaxVScale = getMaxVScale(*TheFunction, TTI);
3572 if (MaxVScale && TTI.isVScaleKnownToBeAPowerOfTwo()) {
3573 MaxPowerOf2RuntimeVF = std::max<unsigned>(
3574 *MaxPowerOf2RuntimeVF,
3575 *MaxVScale * MaxFactors.ScalableVF.getKnownMinValue());
3576 } else
3577 MaxPowerOf2RuntimeVF = std::nullopt; // Stick with tail-folding for now.
3578 }
3579
3580 auto NoScalarEpilogueNeeded = [this, &UserIC](unsigned MaxVF) {
3581 // Return false if the loop is neither a single-latch-exit loop nor an
3582 // early-exit loop as tail-folding is not supported in that case.
3583 if (TheLoop->getExitingBlock() != TheLoop->getLoopLatch() &&
3584 !Legal->hasUncountableEarlyExit())
3585 return false;
3586 unsigned MaxVFtimesIC = UserIC ? MaxVF * UserIC : MaxVF;
3587 ScalarEvolution *SE = PSE.getSE();
3588 // Calling getSymbolicMaxBackedgeTakenCount enables support for loops
3589 // with uncountable exits. For countable loops, the symbolic maximum must
3590 // remain identical to the known back-edge taken count.
3591 const SCEV *BackedgeTakenCount = PSE.getSymbolicMaxBackedgeTakenCount();
3592 assert((Legal->hasUncountableEarlyExit() ||
3593 BackedgeTakenCount == PSE.getBackedgeTakenCount()) &&
3594 "Invalid loop count");
3595 const SCEV *ExitCount = SE->getAddExpr(
3596 BackedgeTakenCount, SE->getOne(BackedgeTakenCount->getType()));
3597 const SCEV *Rem = SE->getURemExpr(
3598 SE->applyLoopGuards(ExitCount, TheLoop),
3599 SE->getConstant(BackedgeTakenCount->getType(), MaxVFtimesIC));
3600 return Rem->isZero();
3601 };
3602
3603 if (MaxPowerOf2RuntimeVF > 0u) {
3604 assert((UserVF.isNonZero() || isPowerOf2_32(*MaxPowerOf2RuntimeVF)) &&
3605 "MaxFixedVF must be a power of 2");
3606 if (NoScalarEpilogueNeeded(*MaxPowerOf2RuntimeVF)) {
3607 // Accept MaxFixedVF if we do not have a tail.
3608 LLVM_DEBUG(dbgs() << "LV: No tail will remain for any chosen VF.\n");
3609 return MaxFactors;
3610 }
3611 }
3612
3613 auto ExpectedTC = getSmallBestKnownTC(PSE, TheLoop);
3614 if (ExpectedTC && ExpectedTC->isFixed() &&
3615 ExpectedTC->getFixedValue() <=
3616 TTI.getMinTripCountTailFoldingThreshold()) {
3617 if (MaxPowerOf2RuntimeVF > 0u) {
3618 // If we have a low-trip-count, and the fixed-width VF is known to divide
3619 // the trip count but the scalable factor does not, use the fixed-width
3620 // factor in preference to allow the generation of a non-predicated loop.
3621 if (ScalarEpilogueStatus == CM_ScalarEpilogueNotAllowedLowTripLoop &&
3622 NoScalarEpilogueNeeded(MaxFactors.FixedVF.getFixedValue())) {
3623 LLVM_DEBUG(dbgs() << "LV: Picking a fixed-width so that no tail will "
3624 "remain for any chosen VF.\n");
3625 MaxFactors.ScalableVF = ElementCount::getScalable(0);
3626 return MaxFactors;
3627 }
3628 }
3629
3631 "The trip count is below the minial threshold value.",
3632 "loop trip count is too low, avoiding vectorization", "LowTripCount",
3633 ORE, TheLoop);
3635 }
3636
3637 // If we don't know the precise trip count, or if the trip count that we
3638 // found modulo the vectorization factor is not zero, try to fold the tail
3639 // by masking.
3640 // FIXME: look for a smaller MaxVF that does divide TC rather than masking.
3641 bool ContainsScalableVF = MaxFactors.ScalableVF.isNonZero();
3642 setTailFoldingStyles(ContainsScalableVF, UserIC);
3643 if (foldTailByMasking()) {
3645 LLVM_DEBUG(
3646 dbgs()
3647 << "LV: tail is folded with EVL, forcing unroll factor to be 1. Will "
3648 "try to generate VP Intrinsics with scalable vector "
3649 "factors only.\n");
3650 // Tail folded loop using VP intrinsics restricts the VF to be scalable
3651 // for now.
3652 // TODO: extend it for fixed vectors, if required.
3653 assert(ContainsScalableVF && "Expected scalable vector factor.");
3654
3655 MaxFactors.FixedVF = ElementCount::getFixed(1);
3656 }
3657 return MaxFactors;
3658 }
3659
3660 // If there was a tail-folding hint/switch, but we can't fold the tail by
3661 // masking, fallback to a vectorization with a scalar epilogue.
3662 if (ScalarEpilogueStatus == CM_ScalarEpilogueNotNeededUsePredicate) {
3663 LLVM_DEBUG(dbgs() << "LV: Cannot fold tail by masking: vectorize with a "
3664 "scalar epilogue instead.\n");
3665 ScalarEpilogueStatus = CM_ScalarEpilogueAllowed;
3666 return MaxFactors;
3667 }
3668
3669 if (ScalarEpilogueStatus == CM_ScalarEpilogueNotAllowedUsePredicate) {
3670 LLVM_DEBUG(dbgs() << "LV: Can't fold tail by masking: don't vectorize\n");
3672 }
3673
3674 if (TC.isZero()) {
3676 "unable to calculate the loop count due to complex control flow",
3677 "UnknownLoopCountComplexCFG", ORE, TheLoop);
3679 }
3680
3682 "Cannot optimize for size and vectorize at the same time.",
3683 "cannot optimize for size and vectorize at the same time. "
3684 "Enable vectorization of this loop with '#pragma clang loop "
3685 "vectorize(enable)' when compiling with -Os/-Oz",
3686 "NoTailLoopWithOptForSize", ORE, TheLoop);
3688}
3689
3691 ElementCount VF) {
3692 if (ConsiderRegPressure.getNumOccurrences())
3693 return ConsiderRegPressure;
3694
3695 // TODO: We should eventually consider register pressure for all targets. The
3696 // TTI hook is temporary whilst target-specific issues are being fixed.
3697 if (TTI.shouldConsiderVectorizationRegPressure())
3698 return true;
3699
3700 if (!useMaxBandwidth(VF.isScalable()
3703 return false;
3704 // Only calculate register pressure for VFs enabled by MaxBandwidth.
3706 VF, VF.isScalable() ? MaxPermissibleVFWithoutMaxBW.ScalableVF
3708}
3709
3712 return MaximizeBandwidth || (MaximizeBandwidth.getNumOccurrences() == 0 &&
3713 (TTI.shouldMaximizeVectorBandwidth(RegKind) ||
3715 Legal->hasVectorCallVariants())));
3716}
3717
3718ElementCount LoopVectorizationCostModel::clampVFByMaxTripCount(
3719 ElementCount VF, unsigned MaxTripCount, bool FoldTailByMasking) const {
3720 unsigned EstimatedVF = VF.getKnownMinValue();
3721 if (VF.isScalable() && TheFunction->hasFnAttribute(Attribute::VScaleRange)) {
3722 auto Attr = TheFunction->getFnAttribute(Attribute::VScaleRange);
3723 auto Min = Attr.getVScaleRangeMin();
3724 EstimatedVF *= Min;
3725 }
3726
3727 // When a scalar epilogue is required, at least one iteration of the scalar
3728 // loop has to execute. Adjust MaxTripCount accordingly to avoid picking a
3729 // max VF that results in a dead vector loop.
3730 if (MaxTripCount > 0 && requiresScalarEpilogue(true))
3731 MaxTripCount -= 1;
3732
3733 if (MaxTripCount && MaxTripCount <= EstimatedVF &&
3734 (!FoldTailByMasking || isPowerOf2_32(MaxTripCount))) {
3735 // If upper bound loop trip count (TC) is known at compile time there is no
3736 // point in choosing VF greater than TC (as done in the loop below). Select
3737 // maximum power of two which doesn't exceed TC. If VF is
3738 // scalable, we only fall back on a fixed VF when the TC is less than or
3739 // equal to the known number of lanes.
3740 auto ClampedUpperTripCount = llvm::bit_floor(MaxTripCount);
3741 LLVM_DEBUG(dbgs() << "LV: Clamping the MaxVF to maximum power of two not "
3742 "exceeding the constant trip count: "
3743 << ClampedUpperTripCount << "\n");
3744 return ElementCount::get(ClampedUpperTripCount,
3745 FoldTailByMasking ? VF.isScalable() : false);
3746 }
3747 return VF;
3748}
3749
3750ElementCount LoopVectorizationCostModel::getMaximizedVFForTarget(
3751 unsigned MaxTripCount, unsigned SmallestType, unsigned WidestType,
3752 ElementCount MaxSafeVF, bool FoldTailByMasking) {
3753 bool ComputeScalableMaxVF = MaxSafeVF.isScalable();
3754 const TypeSize WidestRegister = TTI.getRegisterBitWidth(
3755 ComputeScalableMaxVF ? TargetTransformInfo::RGK_ScalableVector
3757
3758 // Convenience function to return the minimum of two ElementCounts.
3759 auto MinVF = [](const ElementCount &LHS, const ElementCount &RHS) {
3760 assert((LHS.isScalable() == RHS.isScalable()) &&
3761 "Scalable flags must match");
3762 return ElementCount::isKnownLT(LHS, RHS) ? LHS : RHS;
3763 };
3764
3765 // Ensure MaxVF is a power of 2; the dependence distance bound may not be.
3766 // Note that both WidestRegister and WidestType may not be a powers of 2.
3767 auto MaxVectorElementCount = ElementCount::get(
3768 llvm::bit_floor(WidestRegister.getKnownMinValue() / WidestType),
3769 ComputeScalableMaxVF);
3770 MaxVectorElementCount = MinVF(MaxVectorElementCount, MaxSafeVF);
3771 LLVM_DEBUG(dbgs() << "LV: The Widest register safe to use is: "
3772 << (MaxVectorElementCount * WidestType) << " bits.\n");
3773
3774 if (!MaxVectorElementCount) {
3775 LLVM_DEBUG(dbgs() << "LV: The target has no "
3776 << (ComputeScalableMaxVF ? "scalable" : "fixed")
3777 << " vector registers.\n");
3778 return ElementCount::getFixed(1);
3779 }
3780
3781 ElementCount MaxVF = clampVFByMaxTripCount(MaxVectorElementCount,
3782 MaxTripCount, FoldTailByMasking);
3783 // If the MaxVF was already clamped, there's no point in trying to pick a
3784 // larger one.
3785 if (MaxVF != MaxVectorElementCount)
3786 return MaxVF;
3787
3789 ComputeScalableMaxVF ? TargetTransformInfo::RGK_ScalableVector
3791
3792 if (MaxVF.isScalable())
3793 MaxPermissibleVFWithoutMaxBW.ScalableVF = MaxVF;
3794 else
3795 MaxPermissibleVFWithoutMaxBW.FixedVF = MaxVF;
3796
3797 if (useMaxBandwidth(RegKind)) {
3798 auto MaxVectorElementCountMaxBW = ElementCount::get(
3799 llvm::bit_floor(WidestRegister.getKnownMinValue() / SmallestType),
3800 ComputeScalableMaxVF);
3801 MaxVF = MinVF(MaxVectorElementCountMaxBW, MaxSafeVF);
3802
3803 if (ElementCount MinVF =
3804 TTI.getMinimumVF(SmallestType, ComputeScalableMaxVF)) {
3805 if (ElementCount::isKnownLT(MaxVF, MinVF)) {
3806 LLVM_DEBUG(dbgs() << "LV: Overriding calculated MaxVF(" << MaxVF
3807 << ") with target's minimum: " << MinVF << '\n');
3808 MaxVF = MinVF;
3809 }
3810 }
3811
3812 MaxVF = clampVFByMaxTripCount(MaxVF, MaxTripCount, FoldTailByMasking);
3813
3814 if (MaxVectorElementCount != MaxVF) {
3815 // Invalidate any widening decisions we might have made, in case the loop
3816 // requires prediction (decided later), but we have already made some
3817 // load/store widening decisions.
3818 invalidateCostModelingDecisions();
3819 }
3820 }
3821 return MaxVF;
3822}
3823
3824bool LoopVectorizationPlanner::isMoreProfitable(const VectorizationFactor &A,
3825 const VectorizationFactor &B,
3826 const unsigned MaxTripCount,
3827 bool HasTail,
3828 bool IsEpilogue) const {
3829 InstructionCost CostA = A.Cost;
3830 InstructionCost CostB = B.Cost;
3831
3832 // Improve estimate for the vector width if it is scalable.
3833 unsigned EstimatedWidthA = A.Width.getKnownMinValue();
3834 unsigned EstimatedWidthB = B.Width.getKnownMinValue();
3835 if (std::optional<unsigned> VScale = CM.getVScaleForTuning()) {
3836 if (A.Width.isScalable())
3837 EstimatedWidthA *= *VScale;
3838 if (B.Width.isScalable())
3839 EstimatedWidthB *= *VScale;
3840 }
3841
3842 // When optimizing for size choose whichever is smallest, which will be the
3843 // one with the smallest cost for the whole loop. On a tie pick the larger
3844 // vector width, on the assumption that throughput will be greater.
3845 if (CM.CostKind == TTI::TCK_CodeSize)
3846 return CostA < CostB ||
3847 (CostA == CostB && EstimatedWidthA > EstimatedWidthB);
3848
3849 // Assume vscale may be larger than 1 (or the value being tuned for),
3850 // so that scalable vectorization is slightly favorable over fixed-width
3851 // vectorization.
3852 bool PreferScalable = !TTI.preferFixedOverScalableIfEqualCost(IsEpilogue) &&
3853 A.Width.isScalable() && !B.Width.isScalable();
3854
3855 auto CmpFn = [PreferScalable](const InstructionCost &LHS,
3856 const InstructionCost &RHS) {
3857 return PreferScalable ? LHS <= RHS : LHS < RHS;
3858 };
3859
3860 // To avoid the need for FP division:
3861 // (CostA / EstimatedWidthA) < (CostB / EstimatedWidthB)
3862 // <=> (CostA * EstimatedWidthB) < (CostB * EstimatedWidthA)
3863 if (!MaxTripCount)
3864 return CmpFn(CostA * EstimatedWidthB, CostB * EstimatedWidthA);
3865
3866 auto GetCostForTC = [MaxTripCount, HasTail](unsigned VF,
3867 InstructionCost VectorCost,
3868 InstructionCost ScalarCost) {
3869 // If the trip count is a known (possibly small) constant, the trip count
3870 // will be rounded up to an integer number of iterations under
3871 // FoldTailByMasking. The total cost in that case will be
3872 // VecCost*ceil(TripCount/VF). When not folding the tail, the total
3873 // cost will be VecCost*floor(TC/VF) + ScalarCost*(TC%VF). There will be
3874 // some extra overheads, but for the purpose of comparing the costs of
3875 // different VFs we can use this to compare the total loop-body cost
3876 // expected after vectorization.
3877 if (HasTail)
3878 return VectorCost * (MaxTripCount / VF) +
3879 ScalarCost * (MaxTripCount % VF);
3880 return VectorCost * divideCeil(MaxTripCount, VF);
3881 };
3882
3883 auto RTCostA = GetCostForTC(EstimatedWidthA, CostA, A.ScalarCost);
3884 auto RTCostB = GetCostForTC(EstimatedWidthB, CostB, B.ScalarCost);
3885 return CmpFn(RTCostA, RTCostB);
3886}
3887
3888bool LoopVectorizationPlanner::isMoreProfitable(const VectorizationFactor &A,
3889 const VectorizationFactor &B,
3890 bool HasTail,
3891 bool IsEpilogue) const {
3892 const unsigned MaxTripCount = PSE.getSmallConstantMaxTripCount();
3893 return LoopVectorizationPlanner::isMoreProfitable(A, B, MaxTripCount, HasTail,
3894 IsEpilogue);
3895}
3896
3899 using RecipeVFPair = std::pair<VPRecipeBase *, ElementCount>;
3900 SmallVector<RecipeVFPair> InvalidCosts;
3901 for (const auto &Plan : VPlans) {
3902 for (ElementCount VF : Plan->vectorFactors()) {
3903 // The VPlan-based cost model is designed for computing vector cost.
3904 // Querying VPlan-based cost model with a scarlar VF will cause some
3905 // errors because we expect the VF is vector for most of the widen
3906 // recipes.
3907 if (VF.isScalar())
3908 continue;
3909
3910 VPCostContext CostCtx(CM.TTI, *CM.TLI, *Plan, CM, CM.CostKind,
3911 *CM.PSE.getSE());
3912 precomputeCosts(*Plan, VF, CostCtx);
3913 auto Iter = vp_depth_first_deep(Plan->getVectorLoopRegion()->getEntry());
3915 for (auto &R : *VPBB) {
3916 if (!R.cost(VF, CostCtx).isValid())
3917 InvalidCosts.emplace_back(&R, VF);
3918 }
3919 }
3920 }
3921 }
3922 if (InvalidCosts.empty())
3923 return;
3924
3925 // Emit a report of VFs with invalid costs in the loop.
3926
3927 // Group the remarks per recipe, keeping the recipe order from InvalidCosts.
3929 unsigned I = 0;
3930 for (auto &Pair : InvalidCosts)
3931 if (Numbering.try_emplace(Pair.first, I).second)
3932 ++I;
3933
3934 // Sort the list, first on recipe(number) then on VF.
3935 sort(InvalidCosts, [&Numbering](RecipeVFPair &A, RecipeVFPair &B) {
3936 unsigned NA = Numbering[A.first];
3937 unsigned NB = Numbering[B.first];
3938 if (NA != NB)
3939 return NA < NB;
3940 return ElementCount::isKnownLT(A.second, B.second);
3941 });
3942
3943 // For a list of ordered recipe-VF pairs:
3944 // [(load, VF1), (load, VF2), (store, VF1)]
3945 // group the recipes together to emit separate remarks for:
3946 // load (VF1, VF2)
3947 // store (VF1)
3948 auto Tail = ArrayRef<RecipeVFPair>(InvalidCosts);
3949 auto Subset = ArrayRef<RecipeVFPair>();
3950 do {
3951 if (Subset.empty())
3952 Subset = Tail.take_front(1);
3953
3954 VPRecipeBase *R = Subset.front().first;
3955
3956 unsigned Opcode =
3959 [](const auto *R) { return Instruction::PHI; })
3960 .Case<VPWidenSelectRecipe>(
3961 [](const auto *R) { return Instruction::Select; })
3962 .Case<VPWidenStoreRecipe>(
3963 [](const auto *R) { return Instruction::Store; })
3964 .Case<VPWidenLoadRecipe>(
3965 [](const auto *R) { return Instruction::Load; })
3966 .Case<VPWidenCallRecipe, VPWidenIntrinsicRecipe>(
3967 [](const auto *R) { return Instruction::Call; })
3970 [](const auto *R) { return R->getOpcode(); })
3971 .Case<VPInterleaveRecipe>([](const VPInterleaveRecipe *R) {
3972 return R->getStoredValues().empty() ? Instruction::Load
3973 : Instruction::Store;
3974 });
3975
3976 // If the next recipe is different, or if there are no other pairs,
3977 // emit a remark for the collated subset. e.g.
3978 // [(load, VF1), (load, VF2))]
3979 // to emit:
3980 // remark: invalid costs for 'load' at VF=(VF1, VF2)
3981 if (Subset == Tail || Tail[Subset.size()].first != R) {
3982 std::string OutString;
3983 raw_string_ostream OS(OutString);
3984 assert(!Subset.empty() && "Unexpected empty range");
3985 OS << "Recipe with invalid costs prevented vectorization at VF=(";
3986 for (const auto &Pair : Subset)
3987 OS << (Pair.second == Subset.front().second ? "" : ", ") << Pair.second;
3988 OS << "):";
3989 if (Opcode == Instruction::Call) {
3990 StringRef Name = "";
3991 if (auto *Int = dyn_cast<VPWidenIntrinsicRecipe>(R)) {
3992 Name = Int->getIntrinsicName();
3993 } else {
3994 auto *WidenCall = dyn_cast<VPWidenCallRecipe>(R);
3995 Function *CalledFn =
3996 WidenCall ? WidenCall->getCalledScalarFunction()
3997 : cast<Function>(R->getOperand(R->getNumOperands() - 1)
3998 ->getLiveInIRValue());
3999 Name = CalledFn->getName();
4000 }
4001 OS << " call to " << Name;
4002 } else
4003 OS << " " << Instruction::getOpcodeName(Opcode);
4004 reportVectorizationInfo(OutString, "InvalidCost", ORE, OrigLoop, nullptr,
4005 R->getDebugLoc());
4006 Tail = Tail.drop_front(Subset.size());
4007 Subset = {};
4008 } else
4009 // Grow the subset by one element
4010 Subset = Tail.take_front(Subset.size() + 1);
4011 } while (!Tail.empty());
4012}
4013
4014/// Check if any recipe of \p Plan will generate a vector value, which will be
4015/// assigned a vector register.
4017 const TargetTransformInfo &TTI) {
4018 assert(VF.isVector() && "Checking a scalar VF?");
4019 VPTypeAnalysis TypeInfo(Plan);
4020 DenseSet<VPRecipeBase *> EphemeralRecipes;
4021 collectEphemeralRecipesForVPlan(Plan, EphemeralRecipes);
4022 // Set of already visited types.
4023 DenseSet<Type *> Visited;
4026 for (VPRecipeBase &R : *VPBB) {
4027 if (EphemeralRecipes.contains(&R))
4028 continue;
4029 // Continue early if the recipe is considered to not produce a vector
4030 // result. Note that this includes VPInstruction where some opcodes may
4031 // produce a vector, to preserve existing behavior as VPInstructions model
4032 // aspects not directly mapped to existing IR instructions.
4033 switch (R.getVPDefID()) {
4034 case VPDef::VPDerivedIVSC:
4035 case VPDef::VPScalarIVStepsSC:
4036 case VPDef::VPReplicateSC:
4037 case VPDef::VPInstructionSC:
4038 case VPDef::VPCanonicalIVPHISC:
4039 case VPDef::VPVectorPointerSC:
4040 case VPDef::VPVectorEndPointerSC:
4041 case VPDef::VPExpandSCEVSC:
4042 case VPDef::VPEVLBasedIVPHISC:
4043 case VPDef::VPPredInstPHISC:
4044 case VPDef::VPBranchOnMaskSC:
4045 continue;
4046 case VPDef::VPReductionSC:
4047 case VPDef::VPActiveLaneMaskPHISC:
4048 case VPDef::VPWidenCallSC:
4049 case VPDef::VPWidenCanonicalIVSC:
4050 case VPDef::VPWidenCastSC:
4051 case VPDef::VPWidenGEPSC:
4052 case VPDef::VPWidenIntrinsicSC:
4053 case VPDef::VPWidenSC:
4054 case VPDef::VPWidenSelectSC:
4055 case VPDef::VPBlendSC:
4056 case VPDef::VPFirstOrderRecurrencePHISC:
4057 case VPDef::VPHistogramSC:
4058 case VPDef::VPWidenPHISC:
4059 case VPDef::VPWidenIntOrFpInductionSC:
4060 case VPDef::VPWidenPointerInductionSC:
4061 case VPDef::VPReductionPHISC:
4062 case VPDef::VPInterleaveEVLSC:
4063 case VPDef::VPInterleaveSC:
4064 case VPDef::VPWidenLoadEVLSC:
4065 case VPDef::VPWidenLoadSC:
4066 case VPDef::VPWidenStoreEVLSC:
4067 case VPDef::VPWidenStoreSC:
4068 break;
4069 default:
4070 llvm_unreachable("unhandled recipe");
4071 }
4072
4073 auto WillGenerateTargetVectors = [&TTI, VF](Type *VectorTy) {
4074 unsigned NumLegalParts = TTI.getNumberOfParts(VectorTy);
4075 if (!NumLegalParts)
4076 return false;
4077 if (VF.isScalable()) {
4078 // <vscale x 1 x iN> is assumed to be profitable over iN because
4079 // scalable registers are a distinct register class from scalar
4080 // ones. If we ever find a target which wants to lower scalable
4081 // vectors back to scalars, we'll need to update this code to
4082 // explicitly ask TTI about the register class uses for each part.
4083 return NumLegalParts <= VF.getKnownMinValue();
4084 }
4085 // Two or more elements that share a register - are vectorized.
4086 return NumLegalParts < VF.getFixedValue();
4087 };
4088
4089 // If no def nor is a store, e.g., branches, continue - no value to check.
4090 if (R.getNumDefinedValues() == 0 &&
4092 continue;
4093 // For multi-def recipes, currently only interleaved loads, suffice to
4094 // check first def only.
4095 // For stores check their stored value; for interleaved stores suffice
4096 // the check first stored value only. In all cases this is the second
4097 // operand.
4098 VPValue *ToCheck =
4099 R.getNumDefinedValues() >= 1 ? R.getVPValue(0) : R.getOperand(1);
4100 Type *ScalarTy = TypeInfo.inferScalarType(ToCheck);
4101 if (!Visited.insert({ScalarTy}).second)
4102 continue;
4103 Type *WideTy = toVectorizedTy(ScalarTy, VF);
4104 if (any_of(getContainedTypes(WideTy), WillGenerateTargetVectors))
4105 return true;
4106 }
4107 }
4108
4109 return false;
4110}
4111
4112static bool hasReplicatorRegion(VPlan &Plan) {
4114 Plan.getVectorLoopRegion()->getEntry())),
4115 [](auto *VPRB) { return VPRB->isReplicator(); });
4116}
4117
4118#ifndef NDEBUG
4119VectorizationFactor LoopVectorizationPlanner::selectVectorizationFactor() {
4120 InstructionCost ExpectedCost = CM.expectedCost(ElementCount::getFixed(1));
4121 LLVM_DEBUG(dbgs() << "LV: Scalar loop costs: " << ExpectedCost << ".\n");
4122 assert(ExpectedCost.isValid() && "Unexpected invalid cost for scalar loop");
4123 assert(
4124 any_of(VPlans,
4125 [](std::unique_ptr<VPlan> &P) { return P->hasScalarVFOnly(); }) &&
4126 "Expected Scalar VF to be a candidate");
4127
4128 const VectorizationFactor ScalarCost(ElementCount::getFixed(1), ExpectedCost,
4129 ExpectedCost);
4130 VectorizationFactor ChosenFactor = ScalarCost;
4131
4132 bool ForceVectorization = Hints.getForce() == LoopVectorizeHints::FK_Enabled;
4133 if (ForceVectorization &&
4134 (VPlans.size() > 1 || !VPlans[0]->hasScalarVFOnly())) {
4135 // Ignore scalar width, because the user explicitly wants vectorization.
4136 // Initialize cost to max so that VF = 2 is, at least, chosen during cost
4137 // evaluation.
4138 ChosenFactor.Cost = InstructionCost::getMax();
4139 }
4140
4141 for (auto &P : VPlans) {
4142 ArrayRef<ElementCount> VFs(P->vectorFactors().begin(),
4143 P->vectorFactors().end());
4144
4146 if (any_of(VFs, [this](ElementCount VF) {
4147 return CM.shouldConsiderRegPressureForVF(VF);
4148 }))
4149 RUs = calculateRegisterUsageForPlan(*P, VFs, TTI, CM.ValuesToIgnore);
4150
4151 for (unsigned I = 0; I < VFs.size(); I++) {
4152 ElementCount VF = VFs[I];
4153 // The cost for scalar VF=1 is already calculated, so ignore it.
4154 if (VF.isScalar())
4155 continue;
4156
4157 /// If the register pressure needs to be considered for VF,
4158 /// don't consider the VF as valid if it exceeds the number
4159 /// of registers for the target.
4160 if (CM.shouldConsiderRegPressureForVF(VF) &&
4161 RUs[I].exceedsMaxNumRegs(TTI, ForceTargetNumVectorRegs))
4162 continue;
4163
4164 InstructionCost C = CM.expectedCost(VF);
4165
4166 // Add on other costs that are modelled in VPlan, but not in the legacy
4167 // cost model.
4168 VPCostContext CostCtx(CM.TTI, *CM.TLI, *P, CM, CM.CostKind,
4169 *CM.PSE.getSE());
4170 VPRegionBlock *VectorRegion = P->getVectorLoopRegion();
4171 assert(VectorRegion && "Expected to have a vector region!");
4172 for (VPBasicBlock *VPBB : VPBlockUtils::blocksOnly<VPBasicBlock>(
4173 vp_depth_first_shallow(VectorRegion->getEntry()))) {
4174 for (VPRecipeBase &R : *VPBB) {
4175 auto *VPI = dyn_cast<VPInstruction>(&R);
4176 if (!VPI)
4177 continue;
4178 switch (VPI->getOpcode()) {
4179 // Selects are only modelled in the legacy cost model for safe
4180 // divisors.
4181 case Instruction::Select: {
4182 VPValue *VPV = VPI->getVPSingleValue();
4183 if (VPV->getNumUsers() == 1) {
4184 if (auto *WR = dyn_cast<VPWidenRecipe>(*VPV->user_begin())) {
4185 switch (WR->getOpcode()) {
4186 case Instruction::UDiv:
4187 case Instruction::SDiv:
4188 case Instruction::URem:
4189 case Instruction::SRem:
4190 continue;
4191 default:
4192 break;
4193 }
4194 }
4195 }
4196 C += VPI->cost(VF, CostCtx);
4197 break;
4198 }
4200 unsigned Multiplier =
4201 cast<ConstantInt>(VPI->getOperand(2)->getLiveInIRValue())
4202 ->getZExtValue();
4203 C += VPI->cost(VF * Multiplier, CostCtx);
4204 break;
4205 }
4207 C += VPI->cost(VF, CostCtx);
4208 break;
4209 default:
4210 break;
4211 }
4212 }
4213 }
4214
4215 VectorizationFactor Candidate(VF, C, ScalarCost.ScalarCost);
4216 unsigned Width =
4217 estimateElementCount(Candidate.Width, CM.getVScaleForTuning());
4218 LLVM_DEBUG(dbgs() << "LV: Vector loop of width " << VF
4219 << " costs: " << (Candidate.Cost / Width));
4220 if (VF.isScalable())
4221 LLVM_DEBUG(dbgs() << " (assuming a minimum vscale of "
4222 << CM.getVScaleForTuning().value_or(1) << ")");
4223 LLVM_DEBUG(dbgs() << ".\n");
4224
4225 if (!ForceVectorization && !willGenerateVectors(*P, VF, TTI)) {
4226 LLVM_DEBUG(
4227 dbgs()
4228 << "LV: Not considering vector loop of width " << VF
4229 << " because it will not generate any vector instructions.\n");
4230 continue;
4231 }
4232
4233 if (CM.OptForSize && !ForceVectorization && hasReplicatorRegion(*P)) {
4234 LLVM_DEBUG(
4235 dbgs()
4236 << "LV: Not considering vector loop of width " << VF
4237 << " because it would cause replicated blocks to be generated,"
4238 << " which isn't allowed when optimizing for size.\n");
4239 continue;
4240 }
4241
4242 if (isMoreProfitable(Candidate, ChosenFactor, P->hasScalarTail()))
4243 ChosenFactor = Candidate;
4244 }
4245 }
4246
4247 if (!EnableCondStoresVectorization && CM.hasPredStores()) {
4249 "There are conditional stores.",
4250 "store that is conditionally executed prevents vectorization",
4251 "ConditionalStore", ORE, OrigLoop);
4252 ChosenFactor = ScalarCost;
4253 }
4254
4255 LLVM_DEBUG(if (ForceVectorization && !ChosenFactor.Width.isScalar() &&
4256 !isMoreProfitable(ChosenFactor, ScalarCost,
4257 !CM.foldTailByMasking())) dbgs()
4258 << "LV: Vectorization seems to be not beneficial, "
4259 << "but was forced by a user.\n");
4260 return ChosenFactor;
4261}
4262#endif
4263
4264bool LoopVectorizationPlanner::isCandidateForEpilogueVectorization(
4265 ElementCount VF) const {
4266 // Cross iteration phis such as fixed-order recurrences and FMaxNum/FMinNum
4267 // reductions need special handling and are currently unsupported.
4268 if (any_of(OrigLoop->getHeader()->phis(), [&](PHINode &Phi) {
4269 if (!Legal->isReductionVariable(&Phi))
4270 return Legal->isFixedOrderRecurrence(&Phi);
4271 return RecurrenceDescriptor::isFPMinMaxNumRecurrenceKind(
4272 Legal->getRecurrenceDescriptor(&Phi).getRecurrenceKind());
4273 }))
4274 return false;
4275
4276 // Phis with uses outside of the loop require special handling and are
4277 // currently unsupported.
4278 for (const auto &Entry : Legal->getInductionVars()) {
4279 // Look for uses of the value of the induction at the last iteration.
4280 Value *PostInc =
4281 Entry.first->getIncomingValueForBlock(OrigLoop->getLoopLatch());
4282 for (User *U : PostInc->users())
4283 if (!OrigLoop->contains(cast<Instruction>(U)))
4284 return false;
4285 // Look for uses of penultimate value of the induction.
4286 for (User *U : Entry.first->users())
4287 if (!OrigLoop->contains(cast<Instruction>(U)))
4288 return false;
4289 }
4290
4291 // Epilogue vectorization code has not been auditted to ensure it handles
4292 // non-latch exits properly. It may be fine, but it needs auditted and
4293 // tested.
4294 // TODO: Add support for loops with an early exit.
4295 if (OrigLoop->getExitingBlock() != OrigLoop->getLoopLatch())
4296 return false;
4297
4298 return true;
4299}
4300
4302 const ElementCount VF, const unsigned IC) const {
4303 // FIXME: We need a much better cost-model to take different parameters such
4304 // as register pressure, code size increase and cost of extra branches into
4305 // account. For now we apply a very crude heuristic and only consider loops
4306 // with vectorization factors larger than a certain value.
4307
4308 // Allow the target to opt out entirely.
4309 if (!TTI.preferEpilogueVectorization())
4310 return false;
4311
4312 // We also consider epilogue vectorization unprofitable for targets that don't
4313 // consider interleaving beneficial (eg. MVE).
4314 if (TTI.getMaxInterleaveFactor(VF) <= 1)
4315 return false;
4316
4317 unsigned MinVFThreshold = EpilogueVectorizationMinVF.getNumOccurrences() > 0
4319 : TTI.getEpilogueVectorizationMinVF();
4320 return estimateElementCount(VF * IC, VScaleForTuning) >= MinVFThreshold;
4321}
4322
4324 const ElementCount MainLoopVF, unsigned IC) {
4327 LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization is disabled.\n");
4328 return Result;
4329 }
4330
4331 if (!CM.isScalarEpilogueAllowed()) {
4332 LLVM_DEBUG(dbgs() << "LEV: Unable to vectorize epilogue because no "
4333 "epilogue is allowed.\n");
4334 return Result;
4335 }
4336
4337 // Not really a cost consideration, but check for unsupported cases here to
4338 // simplify the logic.
4339 if (!isCandidateForEpilogueVectorization(MainLoopVF)) {
4340 LLVM_DEBUG(dbgs() << "LEV: Unable to vectorize epilogue because the loop "
4341 "is not a supported candidate.\n");
4342 return Result;
4343 }
4344
4346 LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization factor is forced.\n");
4348 if (hasPlanWithVF(ForcedEC))
4349 return {ForcedEC, 0, 0};
4350
4351 LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization forced factor is not "
4352 "viable.\n");
4353 return Result;
4354 }
4355
4356 if (OrigLoop->getHeader()->getParent()->hasOptSize()) {
4357 LLVM_DEBUG(
4358 dbgs() << "LEV: Epilogue vectorization skipped due to opt for size.\n");
4359 return Result;
4360 }
4361
4362 if (!CM.isEpilogueVectorizationProfitable(MainLoopVF, IC)) {
4363 LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization is not profitable for "
4364 "this loop\n");
4365 return Result;
4366 }
4367
4368 // If MainLoopVF = vscale x 2, and vscale is expected to be 4, then we know
4369 // the main loop handles 8 lanes per iteration. We could still benefit from
4370 // vectorizing the epilogue loop with VF=4.
4371 ElementCount EstimatedRuntimeVF = ElementCount::getFixed(
4372 estimateElementCount(MainLoopVF, CM.getVScaleForTuning()));
4373
4374 ScalarEvolution &SE = *PSE.getSE();
4375 Type *TCType = Legal->getWidestInductionType();
4376 const SCEV *RemainingIterations = nullptr;
4377 unsigned MaxTripCount = 0;
4378 const SCEV *TC =
4379 vputils::getSCEVExprForVPValue(getPlanFor(MainLoopVF).getTripCount(), SE);
4380 assert(!isa<SCEVCouldNotCompute>(TC) && "Trip count SCEV must be computable");
4381 RemainingIterations =
4382 SE.getURemExpr(TC, SE.getElementCount(TCType, MainLoopVF * IC));
4383
4384 // No iterations left to process in the epilogue.
4385 if (RemainingIterations->isZero())
4386 return Result;
4387
4388 if (MainLoopVF.isFixed()) {
4389 MaxTripCount = MainLoopVF.getFixedValue() * IC - 1;
4390 if (SE.isKnownPredicate(CmpInst::ICMP_ULT, RemainingIterations,
4391 SE.getConstant(TCType, MaxTripCount))) {
4392 MaxTripCount = SE.getUnsignedRangeMax(RemainingIterations).getZExtValue();
4393 }
4394 LLVM_DEBUG(dbgs() << "LEV: Maximum Trip Count for Epilogue: "
4395 << MaxTripCount << "\n");
4396 }
4397
4398 for (auto &NextVF : ProfitableVFs) {
4399 // Skip candidate VFs without a corresponding VPlan.
4400 if (!hasPlanWithVF(NextVF.Width))
4401 continue;
4402
4403 // Skip candidate VFs with widths >= the (estimated) runtime VF (scalable
4404 // vectors) or > the VF of the main loop (fixed vectors).
4405 if ((!NextVF.Width.isScalable() && MainLoopVF.isScalable() &&
4406 ElementCount::isKnownGE(NextVF.Width, EstimatedRuntimeVF)) ||
4407 (NextVF.Width.isScalable() &&
4408 ElementCount::isKnownGE(NextVF.Width, MainLoopVF)) ||
4409 (!NextVF.Width.isScalable() && !MainLoopVF.isScalable() &&
4410 ElementCount::isKnownGT(NextVF.Width, MainLoopVF)))
4411 continue;
4412
4413 // If NextVF is greater than the number of remaining iterations, the
4414 // epilogue loop would be dead. Skip such factors.
4415 if (RemainingIterations && !NextVF.Width.isScalable()) {
4416 if (SE.isKnownPredicate(
4418 SE.getConstant(TCType, NextVF.Width.getFixedValue()),
4419 RemainingIterations))
4420 continue;
4421 }
4422
4423 if (Result.Width.isScalar() ||
4424 isMoreProfitable(NextVF, Result, MaxTripCount, !CM.foldTailByMasking(),
4425 /*IsEpilogue*/ true))
4426 Result = NextVF;
4427 }
4428
4429 if (Result != VectorizationFactor::Disabled())
4430 LLVM_DEBUG(dbgs() << "LEV: Vectorizing epilogue loop with VF = "
4431 << Result.Width << "\n");
4432 return Result;
4433}
4434
4435std::pair<unsigned, unsigned>
4437 unsigned MinWidth = -1U;
4438 unsigned MaxWidth = 8;
4439 const DataLayout &DL = TheFunction->getDataLayout();
4440 // For in-loop reductions, no element types are added to ElementTypesInLoop
4441 // if there are no loads/stores in the loop. In this case, check through the
4442 // reduction variables to determine the maximum width.
4443 if (ElementTypesInLoop.empty() && !Legal->getReductionVars().empty()) {
4444 for (const auto &PhiDescriptorPair : Legal->getReductionVars()) {
4445 const RecurrenceDescriptor &RdxDesc = PhiDescriptorPair.second;
4446 // When finding the min width used by the recurrence we need to account
4447 // for casts on the input operands of the recurrence.
4448 MinWidth = std::min(
4449 MinWidth,
4450 std::min(RdxDesc.getMinWidthCastToRecurrenceTypeInBits(),
4452 MaxWidth = std::max(MaxWidth,
4454 }
4455 } else {
4456 for (Type *T : ElementTypesInLoop) {
4457 MinWidth = std::min<unsigned>(
4458 MinWidth, DL.getTypeSizeInBits(T->getScalarType()).getFixedValue());
4459 MaxWidth = std::max<unsigned>(
4460 MaxWidth, DL.getTypeSizeInBits(T->getScalarType()).getFixedValue());
4461 }
4462 }
4463 return {MinWidth, MaxWidth};
4464}
4465
4467 ElementTypesInLoop.clear();
4468 // For each block.
4469 for (BasicBlock *BB : TheLoop->blocks()) {
4470 // For each instruction in the loop.
4471 for (Instruction &I : BB->instructionsWithoutDebug()) {
4472 Type *T = I.getType();
4473
4474 // Skip ignored values.
4475 if (ValuesToIgnore.count(&I))
4476 continue;
4477
4478 // Only examine Loads, Stores and PHINodes.
4479 if (!isa<LoadInst>(I) && !isa<StoreInst>(I) && !isa<PHINode>(I))
4480 continue;
4481
4482 // Examine PHI nodes that are reduction variables. Update the type to
4483 // account for the recurrence type.
4484 if (auto *PN = dyn_cast<PHINode>(&I)) {
4485 if (!Legal->isReductionVariable(PN))
4486 continue;
4487 const RecurrenceDescriptor &RdxDesc =
4488 Legal->getRecurrenceDescriptor(PN);
4490 TTI.preferInLoopReduction(RdxDesc.getRecurrenceKind(),
4491 RdxDesc.getRecurrenceType()))
4492 continue;
4493 T = RdxDesc.getRecurrenceType();
4494 }
4495
4496 // Examine the stored values.
4497 if (auto *ST = dyn_cast<StoreInst>(&I))
4498 T = ST->getValueOperand()->getType();
4499
4500 assert(T->isSized() &&
4501 "Expected the load/store/recurrence type to be sized");
4502
4503 ElementTypesInLoop.insert(T);
4504 }
4505 }
4506}
4507
4508unsigned
4510 InstructionCost LoopCost) {
4511 // -- The interleave heuristics --
4512 // We interleave the loop in order to expose ILP and reduce the loop overhead.
4513 // There are many micro-architectural considerations that we can't predict
4514 // at this level. For example, frontend pressure (on decode or fetch) due to
4515 // code size, or the number and capabilities of the execution ports.
4516 //
4517 // We use the following heuristics to select the interleave count:
4518 // 1. If the code has reductions, then we interleave to break the cross
4519 // iteration dependency.
4520 // 2. If the loop is really small, then we interleave to reduce the loop
4521 // overhead.
4522 // 3. We don't interleave if we think that we will spill registers to memory
4523 // due to the increased register pressure.
4524
4525 if (!CM.isScalarEpilogueAllowed())
4526 return 1;
4527
4530 LLVM_DEBUG(dbgs() << "LV: Preference for VP intrinsics indicated. "
4531 "Unroll factor forced to be 1.\n");
4532 return 1;
4533 }
4534
4535 // We used the distance for the interleave count.
4536 if (!Legal->isSafeForAnyVectorWidth())
4537 return 1;
4538
4539 // We don't attempt to perform interleaving for loops with uncountable early
4540 // exits because the VPInstruction::AnyOf code cannot currently handle
4541 // multiple parts.
4542 if (Plan.hasEarlyExit())
4543 return 1;
4544
4545 const bool HasReductions =
4548
4549 // If we did not calculate the cost for VF (because the user selected the VF)
4550 // then we calculate the cost of VF here.
4551 if (LoopCost == 0) {
4552 if (VF.isScalar())
4553 LoopCost = CM.expectedCost(VF);
4554 else
4555 LoopCost = cost(Plan, VF);
4556 assert(LoopCost.isValid() && "Expected to have chosen a VF with valid cost");
4557
4558 // Loop body is free and there is no need for interleaving.
4559 if (LoopCost == 0)
4560 return 1;
4561 }
4562
4563 VPRegisterUsage R =
4564 calculateRegisterUsageForPlan(Plan, {VF}, TTI, CM.ValuesToIgnore)[0];
4565 // We divide by these constants so assume that we have at least one
4566 // instruction that uses at least one register.
4567 for (auto &Pair : R.MaxLocalUsers) {
4568 Pair.second = std::max(Pair.second, 1U);
4569 }
4570
4571 // We calculate the interleave count using the following formula.
4572 // Subtract the number of loop invariants from the number of available
4573 // registers. These registers are used by all of the interleaved instances.
4574 // Next, divide the remaining registers by the number of registers that is
4575 // required by the loop, in order to estimate how many parallel instances
4576 // fit without causing spills. All of this is rounded down if necessary to be
4577 // a power of two. We want power of two interleave count to simplify any
4578 // addressing operations or alignment considerations.
4579 // We also want power of two interleave counts to ensure that the induction
4580 // variable of the vector loop wraps to zero, when tail is folded by masking;
4581 // this currently happens when OptForSize, in which case IC is set to 1 above.
4582 unsigned IC = UINT_MAX;
4583
4584 for (const auto &Pair : R.MaxLocalUsers) {
4585 unsigned TargetNumRegisters = TTI.getNumberOfRegisters(Pair.first);
4586 LLVM_DEBUG(dbgs() << "LV: The target has " << TargetNumRegisters
4587 << " registers of "
4588 << TTI.getRegisterClassName(Pair.first)
4589 << " register class\n");
4590 if (VF.isScalar()) {
4591 if (ForceTargetNumScalarRegs.getNumOccurrences() > 0)
4592 TargetNumRegisters = ForceTargetNumScalarRegs;
4593 } else {
4594 if (ForceTargetNumVectorRegs.getNumOccurrences() > 0)
4595 TargetNumRegisters = ForceTargetNumVectorRegs;
4596 }
4597 unsigned MaxLocalUsers = Pair.second;
4598 unsigned LoopInvariantRegs = 0;
4599 if (R.LoopInvariantRegs.contains(Pair.first))
4600 LoopInvariantRegs = R.LoopInvariantRegs[Pair.first];
4601
4602 unsigned TmpIC = llvm::bit_floor((TargetNumRegisters - LoopInvariantRegs) /
4603 MaxLocalUsers);
4604 // Don't count the induction variable as interleaved.
4606 TmpIC = llvm::bit_floor((TargetNumRegisters - LoopInvariantRegs - 1) /
4607 std::max(1U, (MaxLocalUsers - 1)));
4608 }
4609
4610 IC = std::min(IC, TmpIC);
4611 }
4612
4613 // Clamp the interleave ranges to reasonable counts.
4614 unsigned MaxInterleaveCount = TTI.getMaxInterleaveFactor(VF);
4615
4616 // Check if the user has overridden the max.
4617 if (VF.isScalar()) {
4618 if (ForceTargetMaxScalarInterleaveFactor.getNumOccurrences() > 0)
4619 MaxInterleaveCount = ForceTargetMaxScalarInterleaveFactor;
4620 } else {
4621 if (ForceTargetMaxVectorInterleaveFactor.getNumOccurrences() > 0)
4622 MaxInterleaveCount = ForceTargetMaxVectorInterleaveFactor;
4623 }
4624
4625 // Try to get the exact trip count, or an estimate based on profiling data or
4626 // ConstantMax from PSE, failing that.
4627 auto BestKnownTC = getSmallBestKnownTC(PSE, OrigLoop);
4628
4629 // For fixed length VFs treat a scalable trip count as unknown.
4630 if (BestKnownTC && (BestKnownTC->isFixed() || VF.isScalable())) {
4631 // Re-evaluate trip counts and VFs to be in the same numerical space.
4632 unsigned AvailableTC =
4633 estimateElementCount(*BestKnownTC, CM.getVScaleForTuning());
4634 unsigned EstimatedVF = estimateElementCount(VF, CM.getVScaleForTuning());
4635
4636 // At least one iteration must be scalar when this constraint holds. So the
4637 // maximum available iterations for interleaving is one less.
4638 if (CM.requiresScalarEpilogue(VF.isVector()))
4639 --AvailableTC;
4640
4641 unsigned InterleaveCountLB = bit_floor(std::max(
4642 1u, std::min(AvailableTC / (EstimatedVF * 2), MaxInterleaveCount)));
4643
4644 if (getSmallConstantTripCount(PSE.getSE(), OrigLoop).isNonZero()) {
4645 // If the best known trip count is exact, we select between two
4646 // prospective ICs, where
4647 //
4648 // 1) the aggressive IC is capped by the trip count divided by VF
4649 // 2) the conservative IC is capped by the trip count divided by (VF * 2)
4650 //
4651 // The final IC is selected in a way that the epilogue loop trip count is
4652 // minimized while maximizing the IC itself, so that we either run the
4653 // vector loop at least once if it generates a small epilogue loop, or
4654 // else we run the vector loop at least twice.
4655
4656 unsigned InterleaveCountUB = bit_floor(std::max(
4657 1u, std::min(AvailableTC / EstimatedVF, MaxInterleaveCount)));
4658 MaxInterleaveCount = InterleaveCountLB;
4659
4660 if (InterleaveCountUB != InterleaveCountLB) {
4661 unsigned TailTripCountUB =
4662 (AvailableTC % (EstimatedVF * InterleaveCountUB));
4663 unsigned TailTripCountLB =
4664 (AvailableTC % (EstimatedVF * InterleaveCountLB));
4665 // If both produce same scalar tail, maximize the IC to do the same work
4666 // in fewer vector loop iterations
4667 if (TailTripCountUB == TailTripCountLB)
4668 MaxInterleaveCount = InterleaveCountUB;
4669 }
4670 } else {
4671 // If trip count is an estimated compile time constant, limit the
4672 // IC to be capped by the trip count divided by VF * 2, such that the
4673 // vector loop runs at least twice to make interleaving seem profitable
4674 // when there is an epilogue loop present. Since exact Trip count is not
4675 // known we choose to be conservative in our IC estimate.
4676 MaxInterleaveCount = InterleaveCountLB;
4677 }
4678 }
4679
4680 assert(MaxInterleaveCount > 0 &&
4681 "Maximum interleave count must be greater than 0");
4682
4683 // Clamp the calculated IC to be between the 1 and the max interleave count
4684 // that the target and trip count allows.
4685 if (IC > MaxInterleaveCount)
4686 IC = MaxInterleaveCount;
4687 else
4688 // Make sure IC is greater than 0.
4689 IC = std::max(1u, IC);
4690
4691 assert(IC > 0 && "Interleave count must be greater than 0.");
4692
4693 // Interleave if we vectorized this loop and there is a reduction that could
4694 // benefit from interleaving.
4695 if (VF.isVector() && HasReductions) {
4696 LLVM_DEBUG(dbgs() << "LV: Interleaving because of reductions.\n");
4697 return IC;
4698 }
4699
4700 // For any scalar loop that either requires runtime checks or predication we
4701 // are better off leaving this to the unroller. Note that if we've already
4702 // vectorized the loop we will have done the runtime check and so interleaving
4703 // won't require further checks.
4704 bool ScalarInterleavingRequiresPredication =
4705 (VF.isScalar() && any_of(OrigLoop->blocks(), [this](BasicBlock *BB) {
4706 return Legal->blockNeedsPredication(BB);
4707 }));
4708 bool ScalarInterleavingRequiresRuntimePointerCheck =
4709 (VF.isScalar() && Legal->getRuntimePointerChecking()->Need);
4710
4711 // We want to interleave small loops in order to reduce the loop overhead and
4712 // potentially expose ILP opportunities.
4713 LLVM_DEBUG(dbgs() << "LV: Loop cost is " << LoopCost << '\n'
4714 << "LV: IC is " << IC << '\n'
4715 << "LV: VF is " << VF << '\n');
4716 const bool AggressivelyInterleaveReductions =
4717 TTI.enableAggressiveInterleaving(HasReductions);
4718 if (!ScalarInterleavingRequiresRuntimePointerCheck &&
4719 !ScalarInterleavingRequiresPredication && LoopCost < SmallLoopCost) {
4720 // We assume that the cost overhead is 1 and we use the cost model
4721 // to estimate the cost of the loop and interleave until the cost of the
4722 // loop overhead is about 5% of the cost of the loop.
4723 unsigned SmallIC = std::min(IC, (unsigned)llvm::bit_floor<uint64_t>(
4724 SmallLoopCost / LoopCost.getValue()));
4725
4726 // Interleave until store/load ports (estimated by max interleave count) are
4727 // saturated.
4728 unsigned NumStores = 0;
4729 unsigned NumLoads = 0;
4732 for (VPRecipeBase &R : *VPBB) {
4734 NumLoads++;
4735 continue;
4736 }
4738 NumStores++;
4739 continue;
4740 }
4741
4742 if (auto *InterleaveR = dyn_cast<VPInterleaveRecipe>(&R)) {
4743 if (unsigned StoreOps = InterleaveR->getNumStoreOperands())
4744 NumStores += StoreOps;
4745 else
4746 NumLoads += InterleaveR->getNumDefinedValues();
4747 continue;
4748 }
4749 if (auto *RepR = dyn_cast<VPReplicateRecipe>(&R)) {
4750 NumLoads += isa<LoadInst>(RepR->getUnderlyingInstr());
4751 NumStores += isa<StoreInst>(RepR->getUnderlyingInstr());
4752 continue;
4753 }
4754 if (isa<VPHistogramRecipe>(&R)) {
4755 NumLoads++;
4756 NumStores++;
4757 continue;
4758 }
4759 }
4760 }
4761 unsigned StoresIC = IC / (NumStores ? NumStores : 1);
4762 unsigned LoadsIC = IC / (NumLoads ? NumLoads : 1);
4763
4764 // There is little point in interleaving for reductions containing selects
4765 // and compares when VF=1 since it may just create more overhead than it's
4766 // worth for loops with small trip counts. This is because we still have to
4767 // do the final reduction after the loop.
4768 bool HasSelectCmpReductions =
4769 HasReductions &&
4771 [](VPRecipeBase &R) {
4772 auto *RedR = dyn_cast<VPReductionPHIRecipe>(&R);
4773 return RedR && (RecurrenceDescriptor::isAnyOfRecurrenceKind(
4774 RedR->getRecurrenceKind()) ||
4775 RecurrenceDescriptor::isFindIVRecurrenceKind(
4776 RedR->getRecurrenceKind()));
4777 });
4778 if (HasSelectCmpReductions) {
4779 LLVM_DEBUG(dbgs() << "LV: Not interleaving select-cmp reductions.\n");
4780 return 1;
4781 }
4782
4783 // If we have a scalar reduction (vector reductions are already dealt with
4784 // by this point), we can increase the critical path length if the loop
4785 // we're interleaving is inside another loop. For tree-wise reductions
4786 // set the limit to 2, and for ordered reductions it's best to disable
4787 // interleaving entirely.
4788 if (HasReductions && OrigLoop->getLoopDepth() > 1) {
4789 bool HasOrderedReductions =
4791 [](VPRecipeBase &R) {
4792 auto *RedR = dyn_cast<VPReductionPHIRecipe>(&R);
4793
4794 return RedR && RedR->isOrdered();
4795 });
4796 if (HasOrderedReductions) {
4797 LLVM_DEBUG(
4798 dbgs() << "LV: Not interleaving scalar ordered reductions.\n");
4799 return 1;
4800 }
4801
4802 unsigned F = MaxNestedScalarReductionIC;
4803 SmallIC = std::min(SmallIC, F);
4804 StoresIC = std::min(StoresIC, F);
4805 LoadsIC = std::min(LoadsIC, F);
4806 }
4807
4809 std::max(StoresIC, LoadsIC) > SmallIC) {
4810 LLVM_DEBUG(
4811 dbgs() << "LV: Interleaving to saturate store or load ports.\n");
4812 return std::max(StoresIC, LoadsIC);
4813 }
4814
4815 // If there are scalar reductions and TTI has enabled aggressive
4816 // interleaving for reductions, we will interleave to expose ILP.
4817 if (VF.isScalar() && AggressivelyInterleaveReductions) {
4818 LLVM_DEBUG(dbgs() << "LV: Interleaving to expose ILP.\n");
4819 // Interleave no less than SmallIC but not as aggressive as the normal IC
4820 // to satisfy the rare situation when resources are too limited.
4821 return std::max(IC / 2, SmallIC);
4822 }
4823
4824 LLVM_DEBUG(dbgs() << "LV: Interleaving to reduce branch cost.\n");
4825 return SmallIC;
4826 }
4827
4828 // Interleave if this is a large loop (small loops are already dealt with by
4829 // this point) that could benefit from interleaving.
4830 if (AggressivelyInterleaveReductions) {
4831 LLVM_DEBUG(dbgs() << "LV: Interleaving to expose ILP.\n");
4832 return IC;
4833 }
4834
4835 LLVM_DEBUG(dbgs() << "LV: Not Interleaving.\n");
4836 return 1;
4837}
4838
4839bool LoopVectorizationCostModel::useEmulatedMaskMemRefHack(Instruction *I,
4840 ElementCount VF) {
4841 // TODO: Cost model for emulated masked load/store is completely
4842 // broken. This hack guides the cost model to use an artificially
4843 // high enough value to practically disable vectorization with such
4844 // operations, except where previously deployed legality hack allowed
4845 // using very low cost values. This is to avoid regressions coming simply
4846 // from moving "masked load/store" check from legality to cost model.
4847 // Masked Load/Gather emulation was previously never allowed.
4848 // Limited number of Masked Store/Scatter emulation was allowed.
4849 assert((isPredicatedInst(I)) &&
4850 "Expecting a scalar emulated instruction");
4851 return isa<LoadInst>(I) ||
4852 (isa<StoreInst>(I) &&
4853 NumPredStores > NumberOfStoresToPredicate);
4854}
4855
4857 assert(VF.isVector() && "Expected VF >= 2");
4858
4859 // If we've already collected the instructions to scalarize or the predicated
4860 // BBs after vectorization, there's nothing to do. Collection may already have
4861 // occurred if we have a user-selected VF and are now computing the expected
4862 // cost for interleaving.
4863 if (InstsToScalarize.contains(VF) ||
4864 PredicatedBBsAfterVectorization.contains(VF))
4865 return;
4866
4867 // Initialize a mapping for VF in InstsToScalalarize. If we find that it's
4868 // not profitable to scalarize any instructions, the presence of VF in the
4869 // map will indicate that we've analyzed it already.
4870 ScalarCostsTy &ScalarCostsVF = InstsToScalarize[VF];
4871
4872 // Find all the instructions that are scalar with predication in the loop and
4873 // determine if it would be better to not if-convert the blocks they are in.
4874 // If so, we also record the instructions to scalarize.
4875 for (BasicBlock *BB : TheLoop->blocks()) {
4877 continue;
4878 for (Instruction &I : *BB)
4879 if (isScalarWithPredication(&I, VF)) {
4880 ScalarCostsTy ScalarCosts;
4881 // Do not apply discount logic for:
4882 // 1. Scalars after vectorization, as there will only be a single copy
4883 // of the instruction.
4884 // 2. Scalable VF, as that would lead to invalid scalarization costs.
4885 // 3. Emulated masked memrefs, if a hacked cost is needed.
4886 if (!isScalarAfterVectorization(&I, VF) && !VF.isScalable() &&
4887 !useEmulatedMaskMemRefHack(&I, VF) &&
4888 computePredInstDiscount(&I, ScalarCosts, VF) >= 0) {
4889 for (const auto &[I, IC] : ScalarCosts)
4890 ScalarCostsVF.insert({I, IC});
4891 // Check if we decided to scalarize a call. If so, update the widening
4892 // decision of the call to CM_Scalarize with the computed scalar cost.
4893 for (const auto &[I, Cost] : ScalarCosts) {
4894 auto *CI = dyn_cast<CallInst>(I);
4895 if (!CI || !CallWideningDecisions.contains({CI, VF}))
4896 continue;
4897 CallWideningDecisions[{CI, VF}].Kind = CM_Scalarize;
4898 CallWideningDecisions[{CI, VF}].Cost = Cost;
4899 }
4900 }
4901 // Remember that BB will remain after vectorization.
4902 PredicatedBBsAfterVectorization[VF].insert(BB);
4903 for (auto *Pred : predecessors(BB)) {
4904 if (Pred->getSingleSuccessor() == BB)
4905 PredicatedBBsAfterVectorization[VF].insert(Pred);
4906 }
4907 }
4908 }
4909}
4910
4911InstructionCost LoopVectorizationCostModel::computePredInstDiscount(
4912 Instruction *PredInst, ScalarCostsTy &ScalarCosts, ElementCount VF) {
4913 assert(!isUniformAfterVectorization(PredInst, VF) &&
4914 "Instruction marked uniform-after-vectorization will be predicated");
4915
4916 // Initialize the discount to zero, meaning that the scalar version and the
4917 // vector version cost the same.
4918 InstructionCost Discount = 0;
4919
4920 // Holds instructions to analyze. The instructions we visit are mapped in
4921 // ScalarCosts. Those instructions are the ones that would be scalarized if
4922 // we find that the scalar version costs less.
4924
4925 // Returns true if the given instruction can be scalarized.
4926 auto CanBeScalarized = [&](Instruction *I) -> bool {
4927 // We only attempt to scalarize instructions forming a single-use chain
4928 // from the original predicated block that would otherwise be vectorized.
4929 // Although not strictly necessary, we give up on instructions we know will
4930 // already be scalar to avoid traversing chains that are unlikely to be
4931 // beneficial.
4932 if (!I->hasOneUse() || PredInst->getParent() != I->getParent() ||
4933 isScalarAfterVectorization(I, VF))
4934 return false;
4935
4936 // If the instruction is scalar with predication, it will be analyzed
4937 // separately. We ignore it within the context of PredInst.
4938 if (isScalarWithPredication(I, VF))
4939 return false;
4940
4941 // If any of the instruction's operands are uniform after vectorization,
4942 // the instruction cannot be scalarized. This prevents, for example, a
4943 // masked load from being scalarized.
4944 //
4945 // We assume we will only emit a value for lane zero of an instruction
4946 // marked uniform after vectorization, rather than VF identical values.
4947 // Thus, if we scalarize an instruction that uses a uniform, we would
4948 // create uses of values corresponding to the lanes we aren't emitting code
4949 // for. This behavior can be changed by allowing getScalarValue to clone
4950 // the lane zero values for uniforms rather than asserting.
4951 for (Use &U : I->operands())
4952 if (auto *J = dyn_cast<Instruction>(U.get()))
4953 if (isUniformAfterVectorization(J, VF))
4954 return false;
4955
4956 // Otherwise, we can scalarize the instruction.
4957 return true;
4958 };
4959
4960 // Compute the expected cost discount from scalarizing the entire expression
4961 // feeding the predicated instruction. We currently only consider expressions
4962 // that are single-use instruction chains.
4963 Worklist.push_back(PredInst);
4964 while (!Worklist.empty()) {
4965 Instruction *I = Worklist.pop_back_val();
4966
4967 // If we've already analyzed the instruction, there's nothing to do.
4968 if (ScalarCosts.contains(I))
4969 continue;
4970
4971 // Cannot scalarize fixed-order recurrence phis at the moment.
4972 if (isa<PHINode>(I) && Legal->isFixedOrderRecurrence(cast<PHINode>(I)))
4973 continue;
4974
4975 // Compute the cost of the vector instruction. Note that this cost already
4976 // includes the scalarization overhead of the predicated instruction.
4977 InstructionCost VectorCost = getInstructionCost(I, VF);
4978
4979 // Compute the cost of the scalarized instruction. This cost is the cost of
4980 // the instruction as if it wasn't if-converted and instead remained in the
4981 // predicated block. We will scale this cost by block probability after
4982 // computing the scalarization overhead.
4983 InstructionCost ScalarCost =
4984 VF.getFixedValue() * getInstructionCost(I, ElementCount::getFixed(1));
4985
4986 // Compute the scalarization overhead of needed insertelement instructions
4987 // and phi nodes.
4988 if (isScalarWithPredication(I, VF) && !I->getType()->isVoidTy()) {
4989 Type *WideTy = toVectorizedTy(I->getType(), VF);
4990 for (Type *VectorTy : getContainedTypes(WideTy)) {
4991 ScalarCost += TTI.getScalarizationOverhead(
4993 /*Insert=*/true,
4994 /*Extract=*/false, CostKind);
4995 }
4996 ScalarCost +=
4997 VF.getFixedValue() * TTI.getCFInstrCost(Instruction::PHI, CostKind);
4998 }
4999
5000 // Compute the scalarization overhead of needed extractelement
5001 // instructions. For each of the instruction's operands, if the operand can
5002 // be scalarized, add it to the worklist; otherwise, account for the
5003 // overhead.
5004 for (Use &U : I->operands())
5005 if (auto *J = dyn_cast<Instruction>(U.get())) {
5006 assert(canVectorizeTy(J->getType()) &&
5007 "Instruction has non-scalar type");
5008 if (CanBeScalarized(J))
5009 Worklist.push_back(J);
5010 else if (needsExtract(J, VF)) {
5011 Type *WideTy = toVectorizedTy(J->getType(), VF);
5012 for (Type *VectorTy : getContainedTypes(WideTy)) {
5013 ScalarCost += TTI.getScalarizationOverhead(
5014 cast<VectorType>(VectorTy),
5015 APInt::getAllOnes(VF.getFixedValue()), /*Insert*/ false,
5016 /*Extract*/ true, CostKind);
5017 }
5018 }
5019 }
5020
5021 // Scale the total scalar cost by block probability.
5022 ScalarCost /= getPredBlockCostDivisor(CostKind);
5023
5024 // Compute the discount. A non-negative discount means the vector version
5025 // of the instruction costs more, and scalarizing would be beneficial.
5026 Discount += VectorCost - ScalarCost;
5027 ScalarCosts[I] = ScalarCost;
5028 }
5029
5030 return Discount;
5031}
5032
5035
5036 // If the vector loop gets executed exactly once with the given VF, ignore the
5037 // costs of comparison and induction instructions, as they'll get simplified
5038 // away.
5039 SmallPtrSet<Instruction *, 2> ValuesToIgnoreForVF;
5040 auto TC = getSmallConstantTripCount(PSE.getSE(), TheLoop);
5041 if (TC == VF && !foldTailByMasking())
5043 ValuesToIgnoreForVF);
5044
5045 // For each block.
5046 for (BasicBlock *BB : TheLoop->blocks()) {
5047 InstructionCost BlockCost;
5048
5049 // For each instruction in the old loop.
5050 for (Instruction &I : BB->instructionsWithoutDebug()) {
5051 // Skip ignored values.
5052 if (ValuesToIgnore.count(&I) || ValuesToIgnoreForVF.count(&I) ||
5053 (VF.isVector() && VecValuesToIgnore.count(&I)))
5054 continue;
5055
5057
5058 // Check if we should override the cost.
5059 if (C.isValid() && ForceTargetInstructionCost.getNumOccurrences() > 0)
5061
5062 BlockCost += C;
5063 LLVM_DEBUG(dbgs() << "LV: Found an estimated cost of " << C << " for VF "
5064 << VF << " For instruction: " << I << '\n');
5065 }
5066
5067 // If we are vectorizing a predicated block, it will have been
5068 // if-converted. This means that the block's instructions (aside from
5069 // stores and instructions that may divide by zero) will now be
5070 // unconditionally executed. For the scalar case, we may not always execute
5071 // the predicated block, if it is an if-else block. Thus, scale the block's
5072 // cost by the probability of executing it. blockNeedsPredication from
5073 // Legal is used so as to not include all blocks in tail folded loops.
5074 if (VF.isScalar() && Legal->blockNeedsPredication(BB))
5075 BlockCost /= getPredBlockCostDivisor(CostKind);
5076
5077 Cost += BlockCost;
5078 }
5079
5080 return Cost;
5081}
5082
5083/// Gets Address Access SCEV after verifying that the access pattern
5084/// is loop invariant except the induction variable dependence.
5085///
5086/// This SCEV can be sent to the Target in order to estimate the address
5087/// calculation cost.
5089 Value *Ptr,
5092 const Loop *TheLoop) {
5093
5094 auto *Gep = dyn_cast<GetElementPtrInst>(Ptr);
5095 if (!Gep)
5096 return nullptr;
5097
5098 // We are looking for a gep with all loop invariant indices except for one
5099 // which should be an induction variable.
5100 auto *SE = PSE.getSE();
5101 unsigned NumOperands = Gep->getNumOperands();
5102 for (unsigned Idx = 1; Idx < NumOperands; ++Idx) {
5103 Value *Opd = Gep->getOperand(Idx);
5104 if (!SE->isLoopInvariant(SE->getSCEV(Opd), TheLoop) &&
5105 !Legal->isInductionVariable(Opd))
5106 return nullptr;
5107 }
5108
5109 // Now we know we have a GEP ptr, %inv, %ind, %inv. return the Ptr SCEV.
5110 return PSE.getSCEV(Ptr);
5111}
5112
5114LoopVectorizationCostModel::getMemInstScalarizationCost(Instruction *I,
5115 ElementCount VF) {
5116 assert(VF.isVector() &&
5117 "Scalarization cost of instruction implies vectorization.");
5118 if (VF.isScalable())
5119 return InstructionCost::getInvalid();
5120
5121 Type *ValTy = getLoadStoreType(I);
5122 auto *SE = PSE.getSE();
5123
5124 unsigned AS = getLoadStoreAddressSpace(I);
5126 Type *PtrTy = toVectorTy(Ptr->getType(), VF);
5127 // NOTE: PtrTy is a vector to signal `TTI::getAddressComputationCost`
5128 // that it is being called from this specific place.
5129
5130 // Figure out whether the access is strided and get the stride value
5131 // if it's known in compile time
5132 const SCEV *PtrSCEV = getAddressAccessSCEV(Ptr, Legal, PSE, TheLoop);
5133
5134 // Get the cost of the scalar memory instruction and address computation.
5136 PtrTy, SE, PtrSCEV, CostKind);
5137
5138 // Don't pass *I here, since it is scalar but will actually be part of a
5139 // vectorized loop where the user of it is a vectorized instruction.
5140 const Align Alignment = getLoadStoreAlignment(I);
5141 TTI::OperandValueInfo OpInfo = TTI::getOperandInfo(I->getOperand(0));
5142 Cost += VF.getFixedValue() *
5143 TTI.getMemoryOpCost(I->getOpcode(), ValTy->getScalarType(), Alignment,
5144 AS, CostKind, OpInfo);
5145
5146 // Get the overhead of the extractelement and insertelement instructions
5147 // we might create due to scalarization.
5149
5150 // If we have a predicated load/store, it will need extra i1 extracts and
5151 // conditional branches, but may not be executed for each vector lane. Scale
5152 // the cost by the probability of executing the predicated block.
5153 if (isPredicatedInst(I)) {
5155
5156 // Add the cost of an i1 extract and a branch
5157 auto *VecI1Ty =
5158 VectorType::get(IntegerType::getInt1Ty(ValTy->getContext()), VF);
5160 VecI1Ty, APInt::getAllOnes(VF.getFixedValue()),
5161 /*Insert=*/false, /*Extract=*/true, CostKind);
5162 Cost += TTI.getCFInstrCost(Instruction::Br, CostKind);
5163
5164 if (useEmulatedMaskMemRefHack(I, VF))
5165 // Artificially setting to a high enough value to practically disable
5166 // vectorization with such operations.
5167 Cost = 3000000;
5168 }
5169
5170 return Cost;
5171}
5172
5174LoopVectorizationCostModel::getConsecutiveMemOpCost(Instruction *I,
5175 ElementCount VF) {
5176 Type *ValTy = getLoadStoreType(I);
5177 auto *VectorTy = cast<VectorType>(toVectorTy(ValTy, VF));
5179 unsigned AS = getLoadStoreAddressSpace(I);
5180 int ConsecutiveStride = Legal->isConsecutivePtr(ValTy, Ptr);
5181
5182 assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) &&
5183 "Stride should be 1 or -1 for consecutive memory access");
5184 const Align Alignment = getLoadStoreAlignment(I);
5186 if (Legal->isMaskRequired(I)) {
5187 Cost += TTI.getMaskedMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS,
5188 CostKind);
5189 } else {
5190 TTI::OperandValueInfo OpInfo = TTI::getOperandInfo(I->getOperand(0));
5191 Cost += TTI.getMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS,
5192 CostKind, OpInfo, I);
5193 }
5194
5195 bool Reverse = ConsecutiveStride < 0;
5196 if (Reverse)
5198 VectorTy, {}, CostKind, 0);
5199 return Cost;
5200}
5201
5203LoopVectorizationCostModel::getUniformMemOpCost(Instruction *I,
5204 ElementCount VF) {
5205 assert(Legal->isUniformMemOp(*I, VF));
5206
5207 Type *ValTy = getLoadStoreType(I);
5209 auto *VectorTy = cast<VectorType>(toVectorTy(ValTy, VF));
5210 const Align Alignment = getLoadStoreAlignment(I);
5211 unsigned AS = getLoadStoreAddressSpace(I);
5212 if (isa<LoadInst>(I)) {
5213 return TTI.getAddressComputationCost(PtrTy, nullptr, nullptr, CostKind) +
5214 TTI.getMemoryOpCost(Instruction::Load, ValTy, Alignment, AS,
5215 CostKind) +
5217 VectorTy, {}, CostKind);
5218 }
5219 StoreInst *SI = cast<StoreInst>(I);
5220
5221 bool IsLoopInvariantStoreValue = Legal->isInvariant(SI->getValueOperand());
5222 // TODO: We have existing tests that request the cost of extracting element
5223 // VF.getKnownMinValue() - 1 from a scalable vector. This does not represent
5224 // the actual generated code, which involves extracting the last element of
5225 // a scalable vector where the lane to extract is unknown at compile time.
5227 TTI.getAddressComputationCost(PtrTy, nullptr, nullptr, CostKind) +
5228 TTI.getMemoryOpCost(Instruction::Store, ValTy, Alignment, AS, CostKind);
5229 if (!IsLoopInvariantStoreValue)
5230 Cost += TTI.getIndexedVectorInstrCostFromEnd(Instruction::ExtractElement,
5231 VectorTy, CostKind, 0);
5232 return Cost;
5233}
5234
5236LoopVectorizationCostModel::getGatherScatterCost(Instruction *I,
5237 ElementCount VF) {
5238 Type *ValTy = getLoadStoreType(I);
5239 auto *VectorTy = cast<VectorType>(toVectorTy(ValTy, VF));
5240 const Align Alignment = getLoadStoreAlignment(I);
5242 Type *PtrTy = Ptr->getType();
5243
5244 if (!Legal->isUniform(Ptr, VF))
5245 PtrTy = toVectorTy(PtrTy, VF);
5246
5247 return TTI.getAddressComputationCost(PtrTy, nullptr, nullptr, CostKind) +
5248 TTI.getGatherScatterOpCost(I->getOpcode(), VectorTy, Ptr,
5249 Legal->isMaskRequired(I), Alignment,
5250 CostKind, I);
5251}
5252
5254LoopVectorizationCostModel::getInterleaveGroupCost(Instruction *I,
5255 ElementCount VF) {
5256 const auto *Group = getInterleavedAccessGroup(I);
5257 assert(Group && "Fail to get an interleaved access group.");
5258
5259 Instruction *InsertPos = Group->getInsertPos();
5260 Type *ValTy = getLoadStoreType(InsertPos);
5261 auto *VectorTy = cast<VectorType>(toVectorTy(ValTy, VF));
5262 unsigned AS = getLoadStoreAddressSpace(InsertPos);
5263
5264 unsigned InterleaveFactor = Group->getFactor();
5265 auto *WideVecTy = VectorType::get(ValTy, VF * InterleaveFactor);
5266
5267 // Holds the indices of existing members in the interleaved group.
5268 SmallVector<unsigned, 4> Indices;
5269 for (unsigned IF = 0; IF < InterleaveFactor; IF++)
5270 if (Group->getMember(IF))
5271 Indices.push_back(IF);
5272
5273 // Calculate the cost of the whole interleaved group.
5274 bool UseMaskForGaps =
5275 (Group->requiresScalarEpilogue() && !isScalarEpilogueAllowed()) ||
5276 (isa<StoreInst>(I) && !Group->isFull());
5278 InsertPos->getOpcode(), WideVecTy, Group->getFactor(), Indices,
5279 Group->getAlign(), AS, CostKind, Legal->isMaskRequired(I),
5280 UseMaskForGaps);
5281
5282 if (Group->isReverse()) {
5283 // TODO: Add support for reversed masked interleaved access.
5284 assert(!Legal->isMaskRequired(I) &&
5285 "Reverse masked interleaved access not supported.");
5286 Cost += Group->getNumMembers() *
5288 VectorTy, {}, CostKind, 0);
5289 }
5290 return Cost;
5291}
5292
5293std::optional<InstructionCost>
5295 ElementCount VF,
5296 Type *Ty) const {
5297 using namespace llvm::PatternMatch;
5298 // Early exit for no inloop reductions
5299 if (InLoopReductions.empty() || VF.isScalar() || !isa<VectorType>(Ty))
5300 return std::nullopt;
5301 auto *VectorTy = cast<VectorType>(Ty);
5302
5303 // We are looking for a pattern of, and finding the minimal acceptable cost:
5304 // reduce(mul(ext(A), ext(B))) or
5305 // reduce(mul(A, B)) or
5306 // reduce(ext(A)) or
5307 // reduce(A).
5308 // The basic idea is that we walk down the tree to do that, finding the root
5309 // reduction instruction in InLoopReductionImmediateChains. From there we find
5310 // the pattern of mul/ext and test the cost of the entire pattern vs the cost
5311 // of the components. If the reduction cost is lower then we return it for the
5312 // reduction instruction and 0 for the other instructions in the pattern. If
5313 // it is not we return an invalid cost specifying the orignal cost method
5314 // should be used.
5315 Instruction *RetI = I;
5316 if (match(RetI, m_ZExtOrSExt(m_Value()))) {
5317 if (!RetI->hasOneUser())
5318 return std::nullopt;
5319 RetI = RetI->user_back();
5320 }
5321
5322 if (match(RetI, m_OneUse(m_Mul(m_Value(), m_Value()))) &&
5323 RetI->user_back()->getOpcode() == Instruction::Add) {
5324 RetI = RetI->user_back();
5325 }
5326
5327 // Test if the found instruction is a reduction, and if not return an invalid
5328 // cost specifying the parent to use the original cost modelling.
5329 Instruction *LastChain = InLoopReductionImmediateChains.lookup(RetI);
5330 if (!LastChain)
5331 return std::nullopt;
5332
5333 // Find the reduction this chain is a part of and calculate the basic cost of
5334 // the reduction on its own.
5335 Instruction *ReductionPhi = LastChain;
5336 while (!isa<PHINode>(ReductionPhi))
5337 ReductionPhi = InLoopReductionImmediateChains.at(ReductionPhi);
5338
5339 const RecurrenceDescriptor &RdxDesc =
5340 Legal->getRecurrenceDescriptor(cast<PHINode>(ReductionPhi));
5341
5342 InstructionCost BaseCost;
5343 RecurKind RK = RdxDesc.getRecurrenceKind();
5346 BaseCost = TTI.getMinMaxReductionCost(MinMaxID, VectorTy,
5347 RdxDesc.getFastMathFlags(), CostKind);
5348 } else {
5349 BaseCost = TTI.getArithmeticReductionCost(
5350 RdxDesc.getOpcode(), VectorTy, RdxDesc.getFastMathFlags(), CostKind);
5351 }
5352
5353 // For a call to the llvm.fmuladd intrinsic we need to add the cost of a
5354 // normal fmul instruction to the cost of the fadd reduction.
5355 if (RK == RecurKind::FMulAdd)
5356 BaseCost +=
5357 TTI.getArithmeticInstrCost(Instruction::FMul, VectorTy, CostKind);
5358
5359 // If we're using ordered reductions then we can just return the base cost
5360 // here, since getArithmeticReductionCost calculates the full ordered
5361 // reduction cost when FP reassociation is not allowed.
5362 if (useOrderedReductions(RdxDesc))
5363 return BaseCost;
5364
5365 // Get the operand that was not the reduction chain and match it to one of the
5366 // patterns, returning the better cost if it is found.
5367 Instruction *RedOp = RetI->getOperand(1) == LastChain
5370
5371 VectorTy = VectorType::get(I->getOperand(0)->getType(), VectorTy);
5372
5373 Instruction *Op0, *Op1;
5374 if (RedOp && RdxDesc.getOpcode() == Instruction::Add &&
5375 match(RedOp,
5377 match(Op0, m_ZExtOrSExt(m_Value())) &&
5378 Op0->getOpcode() == Op1->getOpcode() &&
5379 Op0->getOperand(0)->getType() == Op1->getOperand(0)->getType() &&
5380 !TheLoop->isLoopInvariant(Op0) && !TheLoop->isLoopInvariant(Op1) &&
5381 (Op0->getOpcode() == RedOp->getOpcode() || Op0 == Op1)) {
5382
5383 // Matched reduce.add(ext(mul(ext(A), ext(B)))
5384 // Note that the extend opcodes need to all match, or if A==B they will have
5385 // been converted to zext(mul(sext(A), sext(A))) as it is known positive,
5386 // which is equally fine.
5387 bool IsUnsigned = isa<ZExtInst>(Op0);
5388 auto *ExtType = VectorType::get(Op0->getOperand(0)->getType(), VectorTy);
5389 auto *MulType = VectorType::get(Op0->getType(), VectorTy);
5390
5391 InstructionCost ExtCost =
5392 TTI.getCastInstrCost(Op0->getOpcode(), MulType, ExtType,
5394 InstructionCost MulCost =
5395 TTI.getArithmeticInstrCost(Instruction::Mul, MulType, CostKind);
5396 InstructionCost Ext2Cost =
5397 TTI.getCastInstrCost(RedOp->getOpcode(), VectorTy, MulType,
5399
5400 InstructionCost RedCost = TTI.getMulAccReductionCost(
5401 IsUnsigned, RdxDesc.getOpcode(), RdxDesc.getRecurrenceType(), ExtType,
5402 CostKind);
5403
5404 if (RedCost.isValid() &&
5405 RedCost < ExtCost * 2 + MulCost + Ext2Cost + BaseCost)
5406 return I == RetI ? RedCost : 0;
5407 } else if (RedOp && match(RedOp, m_ZExtOrSExt(m_Value())) &&
5408 !TheLoop->isLoopInvariant(RedOp)) {
5409 // Matched reduce(ext(A))
5410 bool IsUnsigned = isa<ZExtInst>(RedOp);
5411 auto *ExtType = VectorType::get(RedOp->getOperand(0)->getType(), VectorTy);
5412 InstructionCost RedCost = TTI.getExtendedReductionCost(
5413 RdxDesc.getOpcode(), IsUnsigned, RdxDesc.getRecurrenceType(), ExtType,
5414 RdxDesc.getFastMathFlags(), CostKind);
5415
5416 InstructionCost ExtCost =
5417 TTI.getCastInstrCost(RedOp->getOpcode(), VectorTy, ExtType,
5419 if (RedCost.isValid() && RedCost < BaseCost + ExtCost)
5420 return I == RetI ? RedCost : 0;
5421 } else if (RedOp && RdxDesc.getOpcode() == Instruction::Add &&
5422 match(RedOp, m_Mul(m_Instruction(Op0), m_Instruction(Op1)))) {
5423 if (match(Op0, m_ZExtOrSExt(m_Value())) &&
5424 Op0->getOpcode() == Op1->getOpcode() &&
5425 !TheLoop->isLoopInvariant(Op0) && !TheLoop->isLoopInvariant(Op1)) {
5426 bool IsUnsigned = isa<ZExtInst>(Op0);
5427 Type *Op0Ty = Op0->getOperand(0)->getType();
5428 Type *Op1Ty = Op1->getOperand(0)->getType();
5429 Type *LargestOpTy =
5430 Op0Ty->getIntegerBitWidth() < Op1Ty->getIntegerBitWidth() ? Op1Ty
5431 : Op0Ty;
5432 auto *ExtType = VectorType::get(LargestOpTy, VectorTy);
5433
5434 // Matched reduce.add(mul(ext(A), ext(B))), where the two ext may be of
5435 // different sizes. We take the largest type as the ext to reduce, and add
5436 // the remaining cost as, for example reduce(mul(ext(ext(A)), ext(B))).
5437 InstructionCost ExtCost0 = TTI.getCastInstrCost(
5438 Op0->getOpcode(), VectorTy, VectorType::get(Op0Ty, VectorTy),
5440 InstructionCost ExtCost1 = TTI.getCastInstrCost(
5441 Op1->getOpcode(), VectorTy, VectorType::get(Op1Ty, VectorTy),
5443 InstructionCost MulCost =
5444 TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy, CostKind);
5445
5446 InstructionCost RedCost = TTI.getMulAccReductionCost(
5447 IsUnsigned, RdxDesc.getOpcode(), RdxDesc.getRecurrenceType(), ExtType,
5448 CostKind);
5449 InstructionCost ExtraExtCost = 0;
5450 if (Op0Ty != LargestOpTy || Op1Ty != LargestOpTy) {
5451 Instruction *ExtraExtOp = (Op0Ty != LargestOpTy) ? Op0 : Op1;
5452 ExtraExtCost = TTI.getCastInstrCost(
5453 ExtraExtOp->getOpcode(), ExtType,
5454 VectorType::get(ExtraExtOp->getOperand(0)->getType(), VectorTy),
5456 }
5457
5458 if (RedCost.isValid() &&
5459 (RedCost + ExtraExtCost) < (ExtCost0 + ExtCost1 + MulCost + BaseCost))
5460 return I == RetI ? RedCost : 0;
5461 } else if (!match(I, m_ZExtOrSExt(m_Value()))) {
5462 // Matched reduce.add(mul())
5463 InstructionCost MulCost =
5464 TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy, CostKind);
5465
5466 InstructionCost RedCost = TTI.getMulAccReductionCost(
5467 true, RdxDesc.getOpcode(), RdxDesc.getRecurrenceType(), VectorTy,
5468 CostKind);
5469
5470 if (RedCost.isValid() && RedCost < MulCost + BaseCost)
5471 return I == RetI ? RedCost : 0;
5472 }
5473 }
5474
5475 return I == RetI ? std::optional<InstructionCost>(BaseCost) : std::nullopt;
5476}
5477
5479LoopVectorizationCostModel::getMemoryInstructionCost(Instruction *I,
5480 ElementCount VF) {
5481 // Calculate scalar cost only. Vectorization cost should be ready at this
5482 // moment.
5483 if (VF.isScalar()) {
5484 Type *ValTy = getLoadStoreType(I);
5486 const Align Alignment = getLoadStoreAlignment(I);
5487 unsigned AS = getLoadStoreAddressSpace(I);
5488
5489 TTI::OperandValueInfo OpInfo = TTI::getOperandInfo(I->getOperand(0));
5490 return TTI.getAddressComputationCost(PtrTy, nullptr, nullptr, CostKind) +
5491 TTI.getMemoryOpCost(I->getOpcode(), ValTy, Alignment, AS, CostKind,
5492 OpInfo, I);
5493 }
5494 return getWideningCost(I, VF);
5495}
5496
5498LoopVectorizationCostModel::getScalarizationOverhead(Instruction *I,
5499 ElementCount VF) const {
5500
5501 // There is no mechanism yet to create a scalable scalarization loop,
5502 // so this is currently Invalid.
5503 if (VF.isScalable())
5504 return InstructionCost::getInvalid();
5505
5506 if (VF.isScalar())
5507 return 0;
5508
5510 Type *RetTy = toVectorizedTy(I->getType(), VF);
5511 if (!RetTy->isVoidTy() &&
5513
5514 for (Type *VectorTy : getContainedTypes(RetTy)) {
5517 /*Insert=*/true,
5518 /*Extract=*/false, CostKind);
5519 }
5520 }
5521
5522 // Some targets keep addresses scalar.
5524 return Cost;
5525
5526 // Some targets support efficient element stores.
5528 return Cost;
5529
5530 // Collect operands to consider.
5531 CallInst *CI = dyn_cast<CallInst>(I);
5532 Instruction::op_range Ops = CI ? CI->args() : I->operands();
5533
5534 // Skip operands that do not require extraction/scalarization and do not incur
5535 // any overhead.
5537 for (auto *V : filterExtractingOperands(Ops, VF))
5538 Tys.push_back(maybeVectorizeType(V->getType(), VF));
5540}
5541
5543 if (VF.isScalar())
5544 return;
5545 NumPredStores = 0;
5546 for (BasicBlock *BB : TheLoop->blocks()) {
5547 // For each instruction in the old loop.
5548 for (Instruction &I : *BB) {
5550 if (!Ptr)
5551 continue;
5552
5553 // TODO: We should generate better code and update the cost model for
5554 // predicated uniform stores. Today they are treated as any other
5555 // predicated store (see added test cases in
5556 // invariant-store-vectorization.ll).
5558 NumPredStores++;
5559
5560 if (Legal->isUniformMemOp(I, VF)) {
5561 auto IsLegalToScalarize = [&]() {
5562 if (!VF.isScalable())
5563 // Scalarization of fixed length vectors "just works".
5564 return true;
5565
5566 // We have dedicated lowering for unpredicated uniform loads and
5567 // stores. Note that even with tail folding we know that at least
5568 // one lane is active (i.e. generalized predication is not possible
5569 // here), and the logic below depends on this fact.
5570 if (!foldTailByMasking())
5571 return true;
5572
5573 // For scalable vectors, a uniform memop load is always
5574 // uniform-by-parts and we know how to scalarize that.
5575 if (isa<LoadInst>(I))
5576 return true;
5577
5578 // A uniform store isn't neccessarily uniform-by-part
5579 // and we can't assume scalarization.
5580 auto &SI = cast<StoreInst>(I);
5581 return TheLoop->isLoopInvariant(SI.getValueOperand());
5582 };
5583
5584 const InstructionCost GatherScatterCost =
5586 getGatherScatterCost(&I, VF) : InstructionCost::getInvalid();
5587
5588 // Load: Scalar load + broadcast
5589 // Store: Scalar store + isLoopInvariantStoreValue ? 0 : extract
5590 // FIXME: This cost is a significant under-estimate for tail folded
5591 // memory ops.
5592 const InstructionCost ScalarizationCost =
5593 IsLegalToScalarize() ? getUniformMemOpCost(&I, VF)
5595
5596 // Choose better solution for the current VF, Note that Invalid
5597 // costs compare as maximumal large. If both are invalid, we get
5598 // scalable invalid which signals a failure and a vectorization abort.
5599 if (GatherScatterCost < ScalarizationCost)
5600 setWideningDecision(&I, VF, CM_GatherScatter, GatherScatterCost);
5601 else
5602 setWideningDecision(&I, VF, CM_Scalarize, ScalarizationCost);
5603 continue;
5604 }
5605
5606 // We assume that widening is the best solution when possible.
5607 if (memoryInstructionCanBeWidened(&I, VF)) {
5608 InstructionCost Cost = getConsecutiveMemOpCost(&I, VF);
5609 int ConsecutiveStride = Legal->isConsecutivePtr(
5611 assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) &&
5612 "Expected consecutive stride.");
5613 InstWidening Decision =
5614 ConsecutiveStride == 1 ? CM_Widen : CM_Widen_Reverse;
5615 setWideningDecision(&I, VF, Decision, Cost);
5616 continue;
5617 }
5618
5619 // Choose between Interleaving, Gather/Scatter or Scalarization.
5621 unsigned NumAccesses = 1;
5622 if (isAccessInterleaved(&I)) {
5623 const auto *Group = getInterleavedAccessGroup(&I);
5624 assert(Group && "Fail to get an interleaved access group.");
5625
5626 // Make one decision for the whole group.
5627 if (getWideningDecision(&I, VF) != CM_Unknown)
5628 continue;
5629
5630 NumAccesses = Group->getNumMembers();
5632 InterleaveCost = getInterleaveGroupCost(&I, VF);
5633 }
5634
5635 InstructionCost GatherScatterCost =
5637 ? getGatherScatterCost(&I, VF) * NumAccesses
5639
5640 InstructionCost ScalarizationCost =
5641 getMemInstScalarizationCost(&I, VF) * NumAccesses;
5642
5643 // Choose better solution for the current VF,
5644 // write down this decision and use it during vectorization.
5646 InstWidening Decision;
5647 if (InterleaveCost <= GatherScatterCost &&
5648 InterleaveCost < ScalarizationCost) {
5649 Decision = CM_Interleave;
5650 Cost = InterleaveCost;
5651 } else if (GatherScatterCost < ScalarizationCost) {
5652 Decision = CM_GatherScatter;
5653 Cost = GatherScatterCost;
5654 } else {
5655 Decision = CM_Scalarize;
5656 Cost = ScalarizationCost;
5657 }
5658 // If the instructions belongs to an interleave group, the whole group
5659 // receives the same decision. The whole group receives the cost, but
5660 // the cost will actually be assigned to one instruction.
5661 if (const auto *Group = getInterleavedAccessGroup(&I)) {
5662 if (Decision == CM_Scalarize) {
5663 for (unsigned Idx = 0; Idx < Group->getFactor(); ++Idx) {
5664 if (auto *I = Group->getMember(Idx)) {
5665 setWideningDecision(I, VF, Decision,
5666 getMemInstScalarizationCost(I, VF));
5667 }
5668 }
5669 } else {
5670 setWideningDecision(Group, VF, Decision, Cost);
5671 }
5672 } else
5673 setWideningDecision(&I, VF, Decision, Cost);
5674 }
5675 }
5676
5677 // Make sure that any load of address and any other address computation
5678 // remains scalar unless there is gather/scatter support. This avoids
5679 // inevitable extracts into address registers, and also has the benefit of
5680 // activating LSR more, since that pass can't optimize vectorized
5681 // addresses.
5682 if (TTI.prefersVectorizedAddressing())
5683 return;
5684
5685 // Start with all scalar pointer uses.
5687 for (BasicBlock *BB : TheLoop->blocks())
5688 for (Instruction &I : *BB) {
5689 Instruction *PtrDef =
5691 if (PtrDef && TheLoop->contains(PtrDef) &&
5693 AddrDefs.insert(PtrDef);
5694 }
5695
5696 // Add all instructions used to generate the addresses.
5698 append_range(Worklist, AddrDefs);
5699 while (!Worklist.empty()) {
5700 Instruction *I = Worklist.pop_back_val();
5701 for (auto &Op : I->operands())
5702 if (auto *InstOp = dyn_cast<Instruction>(Op))
5703 if (TheLoop->contains(InstOp) && !isa<PHINode>(InstOp) &&
5704 AddrDefs.insert(InstOp).second)
5705 Worklist.push_back(InstOp);
5706 }
5707
5708 auto UpdateMemOpUserCost = [this, VF](LoadInst *LI) {
5709 // If there are direct memory op users of the newly scalarized load,
5710 // their cost may have changed because there's no scalarization
5711 // overhead for the operand. Update it.
5712 for (User *U : LI->users()) {
5714 continue;
5716 continue;
5719 getMemInstScalarizationCost(cast<Instruction>(U), VF));
5720 }
5721 };
5722 for (auto *I : AddrDefs) {
5723 if (isa<LoadInst>(I)) {
5724 // Setting the desired widening decision should ideally be handled in
5725 // by cost functions, but since this involves the task of finding out
5726 // if the loaded register is involved in an address computation, it is
5727 // instead changed here when we know this is the case.
5728 InstWidening Decision = getWideningDecision(I, VF);
5729 if (Decision == CM_Widen || Decision == CM_Widen_Reverse ||
5730 (!isPredicatedInst(I) && !Legal->isUniformMemOp(*I, VF) &&
5731 Decision == CM_Scalarize)) {
5732 // Scalarize a widened load of address or update the cost of a scalar
5733 // load of an address.
5735 I, VF, CM_Scalarize,
5736 (VF.getKnownMinValue() *
5737 getMemoryInstructionCost(I, ElementCount::getFixed(1))));
5738 UpdateMemOpUserCost(cast<LoadInst>(I));
5739 } else if (const auto *Group = getInterleavedAccessGroup(I)) {
5740 // Scalarize an interleave group of address loads.
5741 for (unsigned I = 0; I < Group->getFactor(); ++I) {
5742 if (Instruction *Member = Group->getMember(I)) {
5744 Member, VF, CM_Scalarize,
5745 (VF.getKnownMinValue() *
5746 getMemoryInstructionCost(Member, ElementCount::getFixed(1))));
5747 UpdateMemOpUserCost(cast<LoadInst>(Member));
5748 }
5749 }
5750 }
5751 } else {
5752 // Cannot scalarize fixed-order recurrence phis at the moment.
5753 if (isa<PHINode>(I) && Legal->isFixedOrderRecurrence(cast<PHINode>(I)))
5754 continue;
5755
5756 // Make sure I gets scalarized and a cost estimate without
5757 // scalarization overhead.
5758 ForcedScalars[VF].insert(I);
5759 }
5760 }
5761}
5762
5764 assert(!VF.isScalar() &&
5765 "Trying to set a vectorization decision for a scalar VF");
5766
5767 auto ForcedScalar = ForcedScalars.find(VF);
5768 for (BasicBlock *BB : TheLoop->blocks()) {
5769 // For each instruction in the old loop.
5770 for (Instruction &I : *BB) {
5772
5773 if (!CI)
5774 continue;
5775
5779 Function *ScalarFunc = CI->getCalledFunction();
5780 Type *ScalarRetTy = CI->getType();
5781 SmallVector<Type *, 4> Tys, ScalarTys;
5782 for (auto &ArgOp : CI->args())
5783 ScalarTys.push_back(ArgOp->getType());
5784
5785 // Estimate cost of scalarized vector call. The source operands are
5786 // assumed to be vectors, so we need to extract individual elements from
5787 // there, execute VF scalar calls, and then gather the result into the
5788 // vector return value.
5789 if (VF.isFixed()) {
5790 InstructionCost ScalarCallCost =
5791 TTI.getCallInstrCost(ScalarFunc, ScalarRetTy, ScalarTys, CostKind);
5792
5793 // Compute costs of unpacking argument values for the scalar calls and
5794 // packing the return values to a vector.
5795 InstructionCost ScalarizationCost = getScalarizationOverhead(CI, VF);
5796 ScalarCost = ScalarCallCost * VF.getKnownMinValue() + ScalarizationCost;
5797 } else {
5798 // There is no point attempting to calculate the scalar cost for a
5799 // scalable VF as we know it will be Invalid.
5801 "Unexpected valid cost for scalarizing scalable vectors");
5802 ScalarCost = InstructionCost::getInvalid();
5803 }
5804
5805 // Honor ForcedScalars and UniformAfterVectorization decisions.
5806 // TODO: For calls, it might still be more profitable to widen. Use
5807 // VPlan-based cost model to compare different options.
5808 if (VF.isVector() && ((ForcedScalar != ForcedScalars.end() &&
5809 ForcedScalar->second.contains(CI)) ||
5810 isUniformAfterVectorization(CI, VF))) {
5811 setCallWideningDecision(CI, VF, CM_Scalarize, nullptr,
5812 Intrinsic::not_intrinsic, std::nullopt,
5813 ScalarCost);
5814 continue;
5815 }
5816
5817 bool MaskRequired = Legal->isMaskRequired(CI);
5818 // Compute corresponding vector type for return value and arguments.
5819 Type *RetTy = toVectorizedTy(ScalarRetTy, VF);
5820 for (Type *ScalarTy : ScalarTys)
5821 Tys.push_back(toVectorizedTy(ScalarTy, VF));
5822
5823 // An in-loop reduction using an fmuladd intrinsic is a special case;
5824 // we don't want the normal cost for that intrinsic.
5826 if (auto RedCost = getReductionPatternCost(CI, VF, RetTy)) {
5829 std::nullopt, *RedCost);
5830 continue;
5831 }
5832
5833 // Find the cost of vectorizing the call, if we can find a suitable
5834 // vector variant of the function.
5835 VFInfo FuncInfo;
5836 Function *VecFunc = nullptr;
5837 // Search through any available variants for one we can use at this VF.
5838 for (VFInfo &Info : VFDatabase::getMappings(*CI)) {
5839 // Must match requested VF.
5840 if (Info.Shape.VF != VF)
5841 continue;
5842
5843 // Must take a mask argument if one is required
5844 if (MaskRequired && !Info.isMasked())
5845 continue;
5846
5847 // Check that all parameter kinds are supported
5848 bool ParamsOk = true;
5849 for (VFParameter Param : Info.Shape.Parameters) {
5850 switch (Param.ParamKind) {
5852 break;
5854 Value *ScalarParam = CI->getArgOperand(Param.ParamPos);
5855 // Make sure the scalar parameter in the loop is invariant.
5856 if (!PSE.getSE()->isLoopInvariant(PSE.getSCEV(ScalarParam),
5857 TheLoop))
5858 ParamsOk = false;
5859 break;
5860 }
5862 Value *ScalarParam = CI->getArgOperand(Param.ParamPos);
5863 // Find the stride for the scalar parameter in this loop and see if
5864 // it matches the stride for the variant.
5865 // TODO: do we need to figure out the cost of an extract to get the
5866 // first lane? Or do we hope that it will be folded away?
5867 ScalarEvolution *SE = PSE.getSE();
5868 if (!match(SE->getSCEV(ScalarParam),
5870 m_SCEV(), m_scev_SpecificSInt(Param.LinearStepOrPos),
5872 ParamsOk = false;
5873 break;
5874 }
5876 break;
5877 default:
5878 ParamsOk = false;
5879 break;
5880 }
5881 }
5882
5883 if (!ParamsOk)
5884 continue;
5885
5886 // Found a suitable candidate, stop here.
5887 VecFunc = CI->getModule()->getFunction(Info.VectorName);
5888 FuncInfo = Info;
5889 break;
5890 }
5891
5892 if (TLI && VecFunc && !CI->isNoBuiltin())
5893 VectorCost = TTI.getCallInstrCost(nullptr, RetTy, Tys, CostKind);
5894
5895 // Find the cost of an intrinsic; some targets may have instructions that
5896 // perform the operation without needing an actual call.
5898 if (IID != Intrinsic::not_intrinsic)
5900
5901 InstructionCost Cost = ScalarCost;
5902 InstWidening Decision = CM_Scalarize;
5903
5904 if (VectorCost <= Cost) {
5905 Cost = VectorCost;
5906 Decision = CM_VectorCall;
5907 }
5908
5909 if (IntrinsicCost <= Cost) {
5911 Decision = CM_IntrinsicCall;
5912 }
5913
5914 setCallWideningDecision(CI, VF, Decision, VecFunc, IID,
5916 }
5917 }
5918}
5919
5921 if (!Legal->isInvariant(Op))
5922 return false;
5923 // Consider Op invariant, if it or its operands aren't predicated
5924 // instruction in the loop. In that case, it is not trivially hoistable.
5925 auto *OpI = dyn_cast<Instruction>(Op);
5926 return !OpI || !TheLoop->contains(OpI) ||
5927 (!isPredicatedInst(OpI) &&
5928 (!isa<PHINode>(OpI) || OpI->getParent() != TheLoop->getHeader()) &&
5929 all_of(OpI->operands(),
5930 [this](Value *Op) { return shouldConsiderInvariant(Op); }));
5931}
5932
5935 ElementCount VF) {
5936 // If we know that this instruction will remain uniform, check the cost of
5937 // the scalar version.
5939 VF = ElementCount::getFixed(1);
5940
5941 if (VF.isVector() && isProfitableToScalarize(I, VF))
5942 return InstsToScalarize[VF][I];
5943
5944 // Forced scalars do not have any scalarization overhead.
5945 auto ForcedScalar = ForcedScalars.find(VF);
5946 if (VF.isVector() && ForcedScalar != ForcedScalars.end()) {
5947 auto InstSet = ForcedScalar->second;
5948 if (InstSet.count(I))
5950 VF.getKnownMinValue();
5951 }
5952
5953 Type *RetTy = I->getType();
5955 RetTy = IntegerType::get(RetTy->getContext(), MinBWs[I]);
5956 auto *SE = PSE.getSE();
5957
5958 Type *VectorTy;
5959 if (isScalarAfterVectorization(I, VF)) {
5960 [[maybe_unused]] auto HasSingleCopyAfterVectorization =
5961 [this](Instruction *I, ElementCount VF) -> bool {
5962 if (VF.isScalar())
5963 return true;
5964
5965 auto Scalarized = InstsToScalarize.find(VF);
5966 assert(Scalarized != InstsToScalarize.end() &&
5967 "VF not yet analyzed for scalarization profitability");
5968 return !Scalarized->second.count(I) &&
5969 llvm::all_of(I->users(), [&](User *U) {
5970 auto *UI = cast<Instruction>(U);
5971 return !Scalarized->second.count(UI);
5972 });
5973 };
5974
5975 // With the exception of GEPs and PHIs, after scalarization there should
5976 // only be one copy of the instruction generated in the loop. This is
5977 // because the VF is either 1, or any instructions that need scalarizing
5978 // have already been dealt with by the time we get here. As a result,
5979 // it means we don't have to multiply the instruction cost by VF.
5980 assert(I->getOpcode() == Instruction::GetElementPtr ||
5981 I->getOpcode() == Instruction::PHI ||
5982 (I->getOpcode() == Instruction::BitCast &&
5983 I->getType()->isPointerTy()) ||
5984 HasSingleCopyAfterVectorization(I, VF));
5985 VectorTy = RetTy;
5986 } else
5987 VectorTy = toVectorizedTy(RetTy, VF);
5988
5989 if (VF.isVector() && VectorTy->isVectorTy() &&
5990 !TTI.getNumberOfParts(VectorTy))
5992
5993 // TODO: We need to estimate the cost of intrinsic calls.
5994 switch (I->getOpcode()) {
5995 case Instruction::GetElementPtr:
5996 // We mark this instruction as zero-cost because the cost of GEPs in
5997 // vectorized code depends on whether the corresponding memory instruction
5998 // is scalarized or not. Therefore, we handle GEPs with the memory
5999 // instruction cost.
6000 return 0;
6001 case Instruction::Br: {
6002 // In cases of scalarized and predicated instructions, there will be VF
6003 // predicated blocks in the vectorized loop. Each branch around these
6004 // blocks requires also an extract of its vector compare i1 element.
6005 // Note that the conditional branch from the loop latch will be replaced by
6006 // a single branch controlling the loop, so there is no extra overhead from
6007 // scalarization.
6008 bool ScalarPredicatedBB = false;
6010 if (VF.isVector() && BI->isConditional() &&
6011 (PredicatedBBsAfterVectorization[VF].count(BI->getSuccessor(0)) ||
6012 PredicatedBBsAfterVectorization[VF].count(BI->getSuccessor(1))) &&
6013 BI->getParent() != TheLoop->getLoopLatch())
6014 ScalarPredicatedBB = true;
6015
6016 if (ScalarPredicatedBB) {
6017 // Not possible to scalarize scalable vector with predicated instructions.
6018 if (VF.isScalable())
6020 // Return cost for branches around scalarized and predicated blocks.
6021 auto *VecI1Ty =
6023 return (
6024 TTI.getScalarizationOverhead(
6025 VecI1Ty, APInt::getAllOnes(VF.getFixedValue()),
6026 /*Insert*/ false, /*Extract*/ true, CostKind) +
6027 (TTI.getCFInstrCost(Instruction::Br, CostKind) * VF.getFixedValue()));
6028 }
6029
6030 if (I->getParent() == TheLoop->getLoopLatch() || VF.isScalar())
6031 // The back-edge branch will remain, as will all scalar branches.
6032 return TTI.getCFInstrCost(Instruction::Br, CostKind);
6033
6034 // This branch will be eliminated by if-conversion.
6035 return 0;
6036 // Note: We currently assume zero cost for an unconditional branch inside
6037 // a predicated block since it will become a fall-through, although we
6038 // may decide in the future to call TTI for all branches.
6039 }
6040 case Instruction::Switch: {
6041 if (VF.isScalar())
6042 return TTI.getCFInstrCost(Instruction::Switch, CostKind);
6043 auto *Switch = cast<SwitchInst>(I);
6044 return Switch->getNumCases() *
6045 TTI.getCmpSelInstrCost(
6046 Instruction::ICmp,
6047 toVectorTy(Switch->getCondition()->getType(), VF),
6048 toVectorTy(Type::getInt1Ty(I->getContext()), VF),
6050 }
6051 case Instruction::PHI: {
6052 auto *Phi = cast<PHINode>(I);
6053
6054 // First-order recurrences are replaced by vector shuffles inside the loop.
6055 if (VF.isVector() && Legal->isFixedOrderRecurrence(Phi)) {
6057 std::iota(Mask.begin(), Mask.end(), VF.getKnownMinValue() - 1);
6058 return TTI.getShuffleCost(TargetTransformInfo::SK_Splice,
6059 cast<VectorType>(VectorTy),
6060 cast<VectorType>(VectorTy), Mask, CostKind,
6061 VF.getKnownMinValue() - 1);
6062 }
6063
6064 // Phi nodes in non-header blocks (not inductions, reductions, etc.) are
6065 // converted into select instructions. We require N - 1 selects per phi
6066 // node, where N is the number of incoming values.
6067 if (VF.isVector() && Phi->getParent() != TheLoop->getHeader()) {
6068 Type *ResultTy = Phi->getType();
6069
6070 // All instructions in an Any-of reduction chain are narrowed to bool.
6071 // Check if that is the case for this phi node.
6072 auto *HeaderUser = cast_if_present<PHINode>(
6073 find_singleton<User>(Phi->users(), [this](User *U, bool) -> User * {
6074 auto *Phi = dyn_cast<PHINode>(U);
6075 if (Phi && Phi->getParent() == TheLoop->getHeader())
6076 return Phi;
6077 return nullptr;
6078 }));
6079 if (HeaderUser) {
6080 auto &ReductionVars = Legal->getReductionVars();
6081 auto Iter = ReductionVars.find(HeaderUser);
6082 if (Iter != ReductionVars.end() &&
6084 Iter->second.getRecurrenceKind()))
6085 ResultTy = Type::getInt1Ty(Phi->getContext());
6086 }
6087 return (Phi->getNumIncomingValues() - 1) *
6088 TTI.getCmpSelInstrCost(
6089 Instruction::Select, toVectorTy(ResultTy, VF),
6090 toVectorTy(Type::getInt1Ty(Phi->getContext()), VF),
6092 }
6093
6094 // When tail folding with EVL, if the phi is part of an out of loop
6095 // reduction then it will be transformed into a wide vp_merge.
6096 if (VF.isVector() && foldTailWithEVL() &&
6097 Legal->getReductionVars().contains(Phi) && !isInLoopReduction(Phi)) {
6099 Intrinsic::vp_merge, toVectorTy(Phi->getType(), VF),
6100 {toVectorTy(Type::getInt1Ty(Phi->getContext()), VF)});
6101 return TTI.getIntrinsicInstrCost(ICA, CostKind);
6102 }
6103
6104 return TTI.getCFInstrCost(Instruction::PHI, CostKind);
6105 }
6106 case Instruction::UDiv:
6107 case Instruction::SDiv:
6108 case Instruction::URem:
6109 case Instruction::SRem:
6110 if (VF.isVector() && isPredicatedInst(I)) {
6111 const auto [ScalarCost, SafeDivisorCost] = getDivRemSpeculationCost(I, VF);
6112 return isDivRemScalarWithPredication(ScalarCost, SafeDivisorCost) ?
6113 ScalarCost : SafeDivisorCost;
6114 }
6115 // We've proven all lanes safe to speculate, fall through.
6116 [[fallthrough]];
6117 case Instruction::Add:
6118 case Instruction::Sub: {
6119 auto Info = Legal->getHistogramInfo(I);
6120 if (Info && VF.isVector()) {
6121 const HistogramInfo *HGram = Info.value();
6122 // Assume that a non-constant update value (or a constant != 1) requires
6123 // a multiply, and add that into the cost.
6125 ConstantInt *RHS = dyn_cast<ConstantInt>(I->getOperand(1));
6126 if (!RHS || RHS->getZExtValue() != 1)
6127 MulCost =
6128 TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy, CostKind);
6129
6130 // Find the cost of the histogram operation itself.
6131 Type *PtrTy = VectorType::get(HGram->Load->getPointerOperandType(), VF);
6132 Type *ScalarTy = I->getType();
6133 Type *MaskTy = VectorType::get(Type::getInt1Ty(I->getContext()), VF);
6134 IntrinsicCostAttributes ICA(Intrinsic::experimental_vector_histogram_add,
6135 Type::getVoidTy(I->getContext()),
6136 {PtrTy, ScalarTy, MaskTy});
6137
6138 // Add the costs together with the add/sub operation.
6139 return TTI.getIntrinsicInstrCost(ICA, CostKind) + MulCost +
6140 TTI.getArithmeticInstrCost(I->getOpcode(), VectorTy, CostKind);
6141 }
6142 [[fallthrough]];
6143 }
6144 case Instruction::FAdd:
6145 case Instruction::FSub:
6146 case Instruction::Mul:
6147 case Instruction::FMul:
6148 case Instruction::FDiv:
6149 case Instruction::FRem:
6150 case Instruction::Shl:
6151 case Instruction::LShr:
6152 case Instruction::AShr:
6153 case Instruction::And:
6154 case Instruction::Or:
6155 case Instruction::Xor: {
6156 // If we're speculating on the stride being 1, the multiplication may
6157 // fold away. We can generalize this for all operations using the notion
6158 // of neutral elements. (TODO)
6159 if (I->getOpcode() == Instruction::Mul &&
6160 ((TheLoop->isLoopInvariant(I->getOperand(0)) &&
6161 PSE.getSCEV(I->getOperand(0))->isOne()) ||
6162 (TheLoop->isLoopInvariant(I->getOperand(1)) &&
6163 PSE.getSCEV(I->getOperand(1))->isOne())))
6164 return 0;
6165
6166 // Detect reduction patterns
6167 if (auto RedCost = getReductionPatternCost(I, VF, VectorTy))
6168 return *RedCost;
6169
6170 // Certain instructions can be cheaper to vectorize if they have a constant
6171 // second vector operand. One example of this are shifts on x86.
6172 Value *Op2 = I->getOperand(1);
6173 if (!isa<Constant>(Op2) && TheLoop->isLoopInvariant(Op2) &&
6174 PSE.getSE()->isSCEVable(Op2->getType()) &&
6175 isa<SCEVConstant>(PSE.getSCEV(Op2))) {
6176 Op2 = cast<SCEVConstant>(PSE.getSCEV(Op2))->getValue();
6177 }
6178 auto Op2Info = TTI.getOperandInfo(Op2);
6179 if (Op2Info.Kind == TargetTransformInfo::OK_AnyValue &&
6182
6183 SmallVector<const Value *, 4> Operands(I->operand_values());
6184 return TTI.getArithmeticInstrCost(
6185 I->getOpcode(), VectorTy, CostKind,
6186 {TargetTransformInfo::OK_AnyValue, TargetTransformInfo::OP_None},
6187 Op2Info, Operands, I, TLI);
6188 }
6189 case Instruction::FNeg: {
6190 return TTI.getArithmeticInstrCost(
6191 I->getOpcode(), VectorTy, CostKind,
6192 {TargetTransformInfo::OK_AnyValue, TargetTransformInfo::OP_None},
6193 {TargetTransformInfo::OK_AnyValue, TargetTransformInfo::OP_None},
6194 I->getOperand(0), I);
6195 }
6196 case Instruction::Select: {
6198 const SCEV *CondSCEV = SE->getSCEV(SI->getCondition());
6199 bool ScalarCond = (SE->isLoopInvariant(CondSCEV, TheLoop));
6200
6201 const Value *Op0, *Op1;
6202 using namespace llvm::PatternMatch;
6203 if (!ScalarCond && (match(I, m_LogicalAnd(m_Value(Op0), m_Value(Op1))) ||
6204 match(I, m_LogicalOr(m_Value(Op0), m_Value(Op1))))) {
6205 // select x, y, false --> x & y
6206 // select x, true, y --> x | y
6207 const auto [Op1VK, Op1VP] = TTI::getOperandInfo(Op0);
6208 const auto [Op2VK, Op2VP] = TTI::getOperandInfo(Op1);
6209 assert(Op0->getType()->getScalarSizeInBits() == 1 &&
6210 Op1->getType()->getScalarSizeInBits() == 1);
6211
6212 return TTI.getArithmeticInstrCost(
6213 match(I, m_LogicalOr()) ? Instruction::Or : Instruction::And,
6214 VectorTy, CostKind, {Op1VK, Op1VP}, {Op2VK, Op2VP}, {Op0, Op1}, I);
6215 }
6216
6217 Type *CondTy = SI->getCondition()->getType();
6218 if (!ScalarCond)
6219 CondTy = VectorType::get(CondTy, VF);
6220
6222 if (auto *Cmp = dyn_cast<CmpInst>(SI->getCondition()))
6223 Pred = Cmp->getPredicate();
6224 return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, CondTy, Pred,
6225 CostKind, {TTI::OK_AnyValue, TTI::OP_None},
6226 {TTI::OK_AnyValue, TTI::OP_None}, I);
6227 }
6228 case Instruction::ICmp:
6229 case Instruction::FCmp: {
6230 Type *ValTy = I->getOperand(0)->getType();
6231
6233 [[maybe_unused]] Instruction *Op0AsInstruction =
6234 dyn_cast<Instruction>(I->getOperand(0));
6235 assert((!canTruncateToMinimalBitwidth(Op0AsInstruction, VF) ||
6236 MinBWs[I] == MinBWs[Op0AsInstruction]) &&
6237 "if both the operand and the compare are marked for "
6238 "truncation, they must have the same bitwidth");
6239 ValTy = IntegerType::get(ValTy->getContext(), MinBWs[I]);
6240 }
6241
6242 VectorTy = toVectorTy(ValTy, VF);
6243 return TTI.getCmpSelInstrCost(
6244 I->getOpcode(), VectorTy, CmpInst::makeCmpResultType(VectorTy),
6245 cast<CmpInst>(I)->getPredicate(), CostKind,
6246 {TTI::OK_AnyValue, TTI::OP_None}, {TTI::OK_AnyValue, TTI::OP_None}, I);
6247 }
6248 case Instruction::Store:
6249 case Instruction::Load: {
6250 ElementCount Width = VF;
6251 if (Width.isVector()) {
6252 InstWidening Decision = getWideningDecision(I, Width);
6253 assert(Decision != CM_Unknown &&
6254 "CM decision should be taken at this point");
6257 if (Decision == CM_Scalarize)
6258 Width = ElementCount::getFixed(1);
6259 }
6260 VectorTy = toVectorTy(getLoadStoreType(I), Width);
6261 return getMemoryInstructionCost(I, VF);
6262 }
6263 case Instruction::BitCast:
6264 if (I->getType()->isPointerTy())
6265 return 0;
6266 [[fallthrough]];
6267 case Instruction::ZExt:
6268 case Instruction::SExt:
6269 case Instruction::FPToUI:
6270 case Instruction::FPToSI:
6271 case Instruction::FPExt:
6272 case Instruction::PtrToInt:
6273 case Instruction::IntToPtr:
6274 case Instruction::SIToFP:
6275 case Instruction::UIToFP:
6276 case Instruction::Trunc:
6277 case Instruction::FPTrunc: {
6278 // Computes the CastContextHint from a Load/Store instruction.
6279 auto ComputeCCH = [&](Instruction *I) -> TTI::CastContextHint {
6281 "Expected a load or a store!");
6282
6283 if (VF.isScalar() || !TheLoop->contains(I))
6285
6286 switch (getWideningDecision(I, VF)) {
6298 llvm_unreachable("Instr did not go through cost modelling?");
6301 llvm_unreachable_internal("Instr has invalid widening decision");
6302 }
6303
6304 llvm_unreachable("Unhandled case!");
6305 };
6306
6307 unsigned Opcode = I->getOpcode();
6309 // For Trunc, the context is the only user, which must be a StoreInst.
6310 if (Opcode == Instruction::Trunc || Opcode == Instruction::FPTrunc) {
6311 if (I->hasOneUse())
6312 if (StoreInst *Store = dyn_cast<StoreInst>(*I->user_begin()))
6313 CCH = ComputeCCH(Store);
6314 }
6315 // For Z/Sext, the context is the operand, which must be a LoadInst.
6316 else if (Opcode == Instruction::ZExt || Opcode == Instruction::SExt ||
6317 Opcode == Instruction::FPExt) {
6318 if (LoadInst *Load = dyn_cast<LoadInst>(I->getOperand(0)))
6319 CCH = ComputeCCH(Load);
6320 }
6321
6322 // We optimize the truncation of induction variables having constant
6323 // integer steps. The cost of these truncations is the same as the scalar
6324 // operation.
6325 if (isOptimizableIVTruncate(I, VF)) {
6326 auto *Trunc = cast<TruncInst>(I);
6327 return TTI.getCastInstrCost(Instruction::Trunc, Trunc->getDestTy(),
6328 Trunc->getSrcTy(), CCH, CostKind, Trunc);
6329 }
6330
6331 // Detect reduction patterns
6332 if (auto RedCost = getReductionPatternCost(I, VF, VectorTy))
6333 return *RedCost;
6334
6335 Type *SrcScalarTy = I->getOperand(0)->getType();
6336 Instruction *Op0AsInstruction = dyn_cast<Instruction>(I->getOperand(0));
6337 if (canTruncateToMinimalBitwidth(Op0AsInstruction, VF))
6338 SrcScalarTy =
6339 IntegerType::get(SrcScalarTy->getContext(), MinBWs[Op0AsInstruction]);
6340 Type *SrcVecTy =
6341 VectorTy->isVectorTy() ? toVectorTy(SrcScalarTy, VF) : SrcScalarTy;
6342
6344 // If the result type is <= the source type, there will be no extend
6345 // after truncating the users to the minimal required bitwidth.
6346 if (VectorTy->getScalarSizeInBits() <= SrcVecTy->getScalarSizeInBits() &&
6347 (I->getOpcode() == Instruction::ZExt ||
6348 I->getOpcode() == Instruction::SExt))
6349 return 0;
6350 }
6351
6352 return TTI.getCastInstrCost(Opcode, VectorTy, SrcVecTy, CCH, CostKind, I);
6353 }
6354 case Instruction::Call:
6355 return getVectorCallCost(cast<CallInst>(I), VF);
6356 case Instruction::ExtractValue:
6357 return TTI.getInstructionCost(I, CostKind);
6358 case Instruction::Alloca:
6359 // We cannot easily widen alloca to a scalable alloca, as
6360 // the result would need to be a vector of pointers.
6361 if (VF.isScalable())
6363 [[fallthrough]];
6364 default:
6365 // This opcode is unknown. Assume that it is the same as 'mul'.
6366 return TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy, CostKind);
6367 } // end of switch.
6368}
6369
6371 // Ignore ephemeral values.
6373
6374 SmallVector<Value *, 4> DeadInterleavePointerOps;
6376
6377 // If a scalar epilogue is required, users outside the loop won't use
6378 // live-outs from the vector loop but from the scalar epilogue. Ignore them if
6379 // that is the case.
6380 bool RequiresScalarEpilogue = requiresScalarEpilogue(true);
6381 auto IsLiveOutDead = [this, RequiresScalarEpilogue](User *U) {
6382 return RequiresScalarEpilogue &&
6383 !TheLoop->contains(cast<Instruction>(U)->getParent());
6384 };
6385
6387 DFS.perform(LI);
6388 for (BasicBlock *BB : reverse(make_range(DFS.beginRPO(), DFS.endRPO())))
6389 for (Instruction &I : reverse(*BB)) {
6390 if (VecValuesToIgnore.contains(&I) || ValuesToIgnore.contains(&I))
6391 continue;
6392
6393 // Add instructions that would be trivially dead and are only used by
6394 // values already ignored to DeadOps to seed worklist.
6396 all_of(I.users(), [this, IsLiveOutDead](User *U) {
6397 return VecValuesToIgnore.contains(U) ||
6398 ValuesToIgnore.contains(U) || IsLiveOutDead(U);
6399 }))
6400 DeadOps.push_back(&I);
6401
6402 // For interleave groups, we only create a pointer for the start of the
6403 // interleave group. Queue up addresses of group members except the insert
6404 // position for further processing.
6405 if (isAccessInterleaved(&I)) {
6406 auto *Group = getInterleavedAccessGroup(&I);
6407 if (Group->getInsertPos() == &I)
6408 continue;
6409 Value *PointerOp = getLoadStorePointerOperand(&I);
6410 DeadInterleavePointerOps.push_back(PointerOp);
6411 }
6412
6413 // Queue branches for analysis. They are dead, if their successors only
6414 // contain dead instructions.
6415 if (auto *Br = dyn_cast<BranchInst>(&I)) {
6416 if (Br->isConditional())
6417 DeadOps.push_back(&I);
6418 }
6419 }
6420
6421 // Mark ops feeding interleave group members as free, if they are only used
6422 // by other dead computations.
6423 for (unsigned I = 0; I != DeadInterleavePointerOps.size(); ++I) {
6424 auto *Op = dyn_cast<Instruction>(DeadInterleavePointerOps[I]);
6425 if (!Op || !TheLoop->contains(Op) || any_of(Op->users(), [this](User *U) {
6426 Instruction *UI = cast<Instruction>(U);
6427 return !VecValuesToIgnore.contains(U) &&
6428 (!isAccessInterleaved(UI) ||
6429 getInterleavedAccessGroup(UI)->getInsertPos() == UI);
6430 }))
6431 continue;
6432 VecValuesToIgnore.insert(Op);
6433 append_range(DeadInterleavePointerOps, Op->operands());
6434 }
6435
6436 // Mark ops that would be trivially dead and are only used by ignored
6437 // instructions as free.
6438 BasicBlock *Header = TheLoop->getHeader();
6439
6440 // Returns true if the block contains only dead instructions. Such blocks will
6441 // be removed by VPlan-to-VPlan transforms and won't be considered by the
6442 // VPlan-based cost model, so skip them in the legacy cost-model as well.
6443 auto IsEmptyBlock = [this](BasicBlock *BB) {
6444 return all_of(*BB, [this](Instruction &I) {
6445 return ValuesToIgnore.contains(&I) || VecValuesToIgnore.contains(&I) ||
6446 (isa<BranchInst>(&I) && !cast<BranchInst>(&I)->isConditional());
6447 });
6448 };
6449 for (unsigned I = 0; I != DeadOps.size(); ++I) {
6450 auto *Op = dyn_cast<Instruction>(DeadOps[I]);
6451
6452 // Check if the branch should be considered dead.
6453 if (auto *Br = dyn_cast_or_null<BranchInst>(Op)) {
6454 BasicBlock *ThenBB = Br->getSuccessor(0);
6455 BasicBlock *ElseBB = Br->getSuccessor(1);
6456 // Don't considers branches leaving the loop for simplification.
6457 if (!TheLoop->contains(ThenBB) || !TheLoop->contains(ElseBB))
6458 continue;
6459 bool ThenEmpty = IsEmptyBlock(ThenBB);
6460 bool ElseEmpty = IsEmptyBlock(ElseBB);
6461 if ((ThenEmpty && ElseEmpty) ||
6462 (ThenEmpty && ThenBB->getSingleSuccessor() == ElseBB &&
6463 ElseBB->phis().empty()) ||
6464 (ElseEmpty && ElseBB->getSingleSuccessor() == ThenBB &&
6465 ThenBB->phis().empty())) {
6466 VecValuesToIgnore.insert(Br);
6467 DeadOps.push_back(Br->getCondition());
6468 }
6469 continue;
6470 }
6471
6472 // Skip any op that shouldn't be considered dead.
6473 if (!Op || !TheLoop->contains(Op) ||
6474 (isa<PHINode>(Op) && Op->getParent() == Header) ||
6476 any_of(Op->users(), [this, IsLiveOutDead](User *U) {
6477 return !VecValuesToIgnore.contains(U) &&
6478 !ValuesToIgnore.contains(U) && !IsLiveOutDead(U);
6479 }))
6480 continue;
6481
6482 // If all of Op's users are in ValuesToIgnore, add it to ValuesToIgnore
6483 // which applies for both scalar and vector versions. Otherwise it is only
6484 // dead in vector versions, so only add it to VecValuesToIgnore.
6485 if (all_of(Op->users(),
6486 [this](User *U) { return ValuesToIgnore.contains(U); }))
6487 ValuesToIgnore.insert(Op);
6488
6489 VecValuesToIgnore.insert(Op);
6490 append_range(DeadOps, Op->operands());
6491 }
6492
6493 // Ignore type-promoting instructions we identified during reduction
6494 // detection.
6495 for (const auto &Reduction : Legal->getReductionVars()) {
6496 const RecurrenceDescriptor &RedDes = Reduction.second;
6497 const SmallPtrSetImpl<Instruction *> &Casts = RedDes.getCastInsts();
6498 VecValuesToIgnore.insert_range(Casts);
6499 }
6500 // Ignore type-casting instructions we identified during induction
6501 // detection.
6502 for (const auto &Induction : Legal->getInductionVars()) {
6503 const InductionDescriptor &IndDes = Induction.second;
6504 const SmallVectorImpl<Instruction *> &Casts = IndDes.getCastInsts();
6505 VecValuesToIgnore.insert_range(Casts);
6506 }
6507}
6508
6510 // Avoid duplicating work finding in-loop reductions.
6511 if (!InLoopReductions.empty())
6512 return;
6513
6514 for (const auto &Reduction : Legal->getReductionVars()) {
6515 PHINode *Phi = Reduction.first;
6516 const RecurrenceDescriptor &RdxDesc = Reduction.second;
6517
6518 // We don't collect reductions that are type promoted (yet).
6519 if (RdxDesc.getRecurrenceType() != Phi->getType())
6520 continue;
6521
6522 // If the target would prefer this reduction to happen "in-loop", then we
6523 // want to record it as such.
6524 RecurKind Kind = RdxDesc.getRecurrenceKind();
6525 if (!PreferInLoopReductions && !useOrderedReductions(RdxDesc) &&
6526 !TTI.preferInLoopReduction(Kind, Phi->getType()))
6527 continue;
6528
6529 // Check that we can correctly put the reductions into the loop, by
6530 // finding the chain of operations that leads from the phi to the loop
6531 // exit value.
6532 SmallVector<Instruction *, 4> ReductionOperations =
6533 RdxDesc.getReductionOpChain(Phi, TheLoop);
6534 bool InLoop = !ReductionOperations.empty();
6535
6536 if (InLoop) {
6537 InLoopReductions.insert(Phi);
6538 // Add the elements to InLoopReductionImmediateChains for cost modelling.
6539 Instruction *LastChain = Phi;
6540 for (auto *I : ReductionOperations) {
6541 InLoopReductionImmediateChains[I] = LastChain;
6542 LastChain = I;
6543 }
6544 }
6545 LLVM_DEBUG(dbgs() << "LV: Using " << (InLoop ? "inloop" : "out of loop")
6546 << " reduction for phi: " << *Phi << "\n");
6547 }
6548}
6549
6550// This function will select a scalable VF if the target supports scalable
6551// vectors and a fixed one otherwise.
6552// TODO: we could return a pair of values that specify the max VF and
6553// min VF, to be used in `buildVPlans(MinVF, MaxVF)` instead of
6554// `buildVPlans(VF, VF)`. We cannot do it because VPLAN at the moment
6555// doesn't have a cost model that can choose which plan to execute if
6556// more than one is generated.
6559 unsigned WidestType;
6560 std::tie(std::ignore, WidestType) = CM.getSmallestAndWidestTypes();
6561
6563 TTI.enableScalableVectorization()
6566
6567 TypeSize RegSize = TTI.getRegisterBitWidth(RegKind);
6568 unsigned N = RegSize.getKnownMinValue() / WidestType;
6569 return ElementCount::get(N, RegSize.isScalable());
6570}
6571
6574 ElementCount VF = UserVF;
6575 // Outer loop handling: They may require CFG and instruction level
6576 // transformations before even evaluating whether vectorization is profitable.
6577 // Since we cannot modify the incoming IR, we need to build VPlan upfront in
6578 // the vectorization pipeline.
6579 if (!OrigLoop->isInnermost()) {
6580 // If the user doesn't provide a vectorization factor, determine a
6581 // reasonable one.
6582 if (UserVF.isZero()) {
6583 VF = determineVPlanVF(TTI, CM);
6584 LLVM_DEBUG(dbgs() << "LV: VPlan computed VF " << VF << ".\n");
6585
6586 // Make sure we have a VF > 1 for stress testing.
6587 if (VPlanBuildStressTest && (VF.isScalar() || VF.isZero())) {
6588 LLVM_DEBUG(dbgs() << "LV: VPlan stress testing: "
6589 << "overriding computed VF.\n");
6590 VF = ElementCount::getFixed(4);
6591 }
6592 } else if (UserVF.isScalable() && !TTI.supportsScalableVectors() &&
6594 LLVM_DEBUG(dbgs() << "LV: Not vectorizing. Scalable VF requested, but "
6595 << "not supported by the target.\n");
6597 "Scalable vectorization requested but not supported by the target",
6598 "the scalable user-specified vectorization width for outer-loop "
6599 "vectorization cannot be used because the target does not support "
6600 "scalable vectors.",
6601 "ScalableVFUnfeasible", ORE, OrigLoop);
6603 }
6604 assert(EnableVPlanNativePath && "VPlan-native path is not enabled.");
6606 "VF needs to be a power of two");
6607 LLVM_DEBUG(dbgs() << "LV: Using " << (!UserVF.isZero() ? "user " : "")
6608 << "VF " << VF << " to build VPlans.\n");
6609 buildVPlans(VF, VF);
6610
6611 if (VPlans.empty())
6613
6614 // For VPlan build stress testing, we bail out after VPlan construction.
6617
6618 return {VF, 0 /*Cost*/, 0 /* ScalarCost */};
6619 }
6620
6621 LLVM_DEBUG(
6622 dbgs() << "LV: Not vectorizing. Inner loops aren't supported in the "
6623 "VPlan-native path.\n");
6625}
6626
6627void LoopVectorizationPlanner::plan(ElementCount UserVF, unsigned UserIC) {
6628 assert(OrigLoop->isInnermost() && "Inner loop expected.");
6629 CM.collectValuesToIgnore();
6630 CM.collectElementTypesForWidening();
6631
6632 FixedScalableVFPair MaxFactors = CM.computeMaxVF(UserVF, UserIC);
6633 if (!MaxFactors) // Cases that should not to be vectorized nor interleaved.
6634 return;
6635
6636 // Invalidate interleave groups if all blocks of loop will be predicated.
6637 if (CM.blockNeedsPredicationForAnyReason(OrigLoop->getHeader()) &&
6639 LLVM_DEBUG(
6640 dbgs()
6641 << "LV: Invalidate all interleaved groups due to fold-tail by masking "
6642 "which requires masked-interleaved support.\n");
6643 if (CM.InterleaveInfo.invalidateGroups())
6644 // Invalidating interleave groups also requires invalidating all decisions
6645 // based on them, which includes widening decisions and uniform and scalar
6646 // values.
6647 CM.invalidateCostModelingDecisions();
6648 }
6649
6650 if (CM.foldTailByMasking())
6651 Legal->prepareToFoldTailByMasking();
6652
6653 ElementCount MaxUserVF =
6654 UserVF.isScalable() ? MaxFactors.ScalableVF : MaxFactors.FixedVF;
6655 if (UserVF) {
6656 if (!ElementCount::isKnownLE(UserVF, MaxUserVF)) {
6658 "UserVF ignored because it may be larger than the maximal safe VF",
6659 "InvalidUserVF", ORE, OrigLoop);
6660 } else {
6662 "VF needs to be a power of two");
6663 // Collect the instructions (and their associated costs) that will be more
6664 // profitable to scalarize.
6665 CM.collectInLoopReductions();
6666 if (CM.selectUserVectorizationFactor(UserVF)) {
6667 LLVM_DEBUG(dbgs() << "LV: Using user VF " << UserVF << ".\n");
6668 buildVPlansWithVPRecipes(UserVF, UserVF);
6670 return;
6671 }
6672 reportVectorizationInfo("UserVF ignored because of invalid costs.",
6673 "InvalidCost", ORE, OrigLoop);
6674 }
6675 }
6676
6677 // Collect the Vectorization Factor Candidates.
6678 SmallVector<ElementCount> VFCandidates;
6679 for (auto VF = ElementCount::getFixed(1);
6680 ElementCount::isKnownLE(VF, MaxFactors.FixedVF); VF *= 2)
6681 VFCandidates.push_back(VF);
6682 for (auto VF = ElementCount::getScalable(1);
6683 ElementCount::isKnownLE(VF, MaxFactors.ScalableVF); VF *= 2)
6684 VFCandidates.push_back(VF);
6685
6686 CM.collectInLoopReductions();
6687 for (const auto &VF : VFCandidates) {
6688 // Collect Uniform and Scalar instructions after vectorization with VF.
6689 CM.collectNonVectorizedAndSetWideningDecisions(VF);
6690 }
6691
6692 buildVPlansWithVPRecipes(ElementCount::getFixed(1), MaxFactors.FixedVF);
6693 buildVPlansWithVPRecipes(ElementCount::getScalable(1), MaxFactors.ScalableVF);
6694
6696}
6697
6699 ElementCount VF) const {
6700 InstructionCost Cost = CM.getInstructionCost(UI, VF);
6701 if (Cost.isValid() && ForceTargetInstructionCost.getNumOccurrences())
6703 return Cost;
6704}
6705
6707 ElementCount VF) const {
6708 return CM.isUniformAfterVectorization(I, VF);
6709}
6710
6711bool VPCostContext::skipCostComputation(Instruction *UI, bool IsVector) const {
6712 return CM.ValuesToIgnore.contains(UI) ||
6713 (IsVector && CM.VecValuesToIgnore.contains(UI)) ||
6714 SkipCostComputation.contains(UI);
6715}
6716
6718LoopVectorizationPlanner::precomputeCosts(VPlan &Plan, ElementCount VF,
6719 VPCostContext &CostCtx) const {
6721 // Cost modeling for inductions is inaccurate in the legacy cost model
6722 // compared to the recipes that are generated. To match here initially during
6723 // VPlan cost model bring up directly use the induction costs from the legacy
6724 // cost model. Note that we do this as pre-processing; the VPlan may not have
6725 // any recipes associated with the original induction increment instruction
6726 // and may replace truncates with VPWidenIntOrFpInductionRecipe. We precompute
6727 // the cost of induction phis and increments (both that are represented by
6728 // recipes and those that are not), to avoid distinguishing between them here,
6729 // and skip all recipes that represent induction phis and increments (the
6730 // former case) later on, if they exist, to avoid counting them twice.
6731 // Similarly we pre-compute the cost of any optimized truncates.
6732 // TODO: Switch to more accurate costing based on VPlan.
6733 for (const auto &[IV, IndDesc] : Legal->getInductionVars()) {
6735 IV->getIncomingValueForBlock(OrigLoop->getLoopLatch()));
6736 SmallVector<Instruction *> IVInsts = {IVInc};
6737 for (unsigned I = 0; I != IVInsts.size(); I++) {
6738 for (Value *Op : IVInsts[I]->operands()) {
6739 auto *OpI = dyn_cast<Instruction>(Op);
6740 if (Op == IV || !OpI || !OrigLoop->contains(OpI) || !Op->hasOneUse())
6741 continue;
6742 IVInsts.push_back(OpI);
6743 }
6744 }
6745 IVInsts.push_back(IV);
6746 for (User *U : IV->users()) {
6747 auto *CI = cast<Instruction>(U);
6748 if (!CostCtx.CM.isOptimizableIVTruncate(CI, VF))
6749 continue;
6750 IVInsts.push_back(CI);
6751 }
6752
6753 // If the vector loop gets executed exactly once with the given VF, ignore
6754 // the costs of comparison and induction instructions, as they'll get
6755 // simplified away.
6756 // TODO: Remove this code after stepping away from the legacy cost model and
6757 // adding code to simplify VPlans before calculating their costs.
6758 auto TC = getSmallConstantTripCount(PSE.getSE(), OrigLoop);
6759 if (TC == VF && !CM.foldTailByMasking())
6760 addFullyUnrolledInstructionsToIgnore(OrigLoop, Legal->getInductionVars(),
6761 CostCtx.SkipCostComputation);
6762
6763 for (Instruction *IVInst : IVInsts) {
6764 if (CostCtx.skipCostComputation(IVInst, VF.isVector()))
6765 continue;
6766 InstructionCost InductionCost = CostCtx.getLegacyCost(IVInst, VF);
6767 LLVM_DEBUG({
6768 dbgs() << "Cost of " << InductionCost << " for VF " << VF
6769 << ": induction instruction " << *IVInst << "\n";
6770 });
6771 Cost += InductionCost;
6772 CostCtx.SkipCostComputation.insert(IVInst);
6773 }
6774 }
6775
6776 /// Compute the cost of all exiting conditions of the loop using the legacy
6777 /// cost model. This is to match the legacy behavior, which adds the cost of
6778 /// all exit conditions. Note that this over-estimates the cost, as there will
6779 /// be a single condition to control the vector loop.
6781 CM.TheLoop->getExitingBlocks(Exiting);
6782 SetVector<Instruction *> ExitInstrs;
6783 // Collect all exit conditions.
6784 for (BasicBlock *EB : Exiting) {
6785 auto *Term = dyn_cast<BranchInst>(EB->getTerminator());
6786 if (!Term || CostCtx.skipCostComputation(Term, VF.isVector()))
6787 continue;
6788 if (auto *CondI = dyn_cast<Instruction>(Term->getOperand(0))) {
6789 ExitInstrs.insert(CondI);
6790 }
6791 }
6792 // Compute the cost of all instructions only feeding the exit conditions.
6793 for (unsigned I = 0; I != ExitInstrs.size(); ++I) {
6794 Instruction *CondI = ExitInstrs[I];
6795 if (!OrigLoop->contains(CondI) ||
6796 !CostCtx.SkipCostComputation.insert(CondI).second)
6797 continue;
6798 InstructionCost CondICost = CostCtx.getLegacyCost(CondI, VF);
6799 LLVM_DEBUG({
6800 dbgs() << "Cost of " << CondICost << " for VF " << VF
6801 << ": exit condition instruction " << *CondI << "\n";
6802 });
6803 Cost += CondICost;
6804 for (Value *Op : CondI->operands()) {
6805 auto *OpI = dyn_cast<Instruction>(Op);
6806 if (!OpI || CostCtx.skipCostComputation(OpI, VF.isVector()) ||
6807 any_of(OpI->users(), [&ExitInstrs, this](User *U) {
6808 return OrigLoop->contains(cast<Instruction>(U)->getParent()) &&
6809 !ExitInstrs.contains(cast<Instruction>(U));
6810 }))
6811 continue;
6812 ExitInstrs.insert(OpI);
6813 }
6814 }
6815
6816 // Pre-compute the costs for branches except for the backedge, as the number
6817 // of replicate regions in a VPlan may not directly match the number of
6818 // branches, which would lead to different decisions.
6819 // TODO: Compute cost of branches for each replicate region in the VPlan,
6820 // which is more accurate than the legacy cost model.
6821 for (BasicBlock *BB : OrigLoop->blocks()) {
6822 if (CostCtx.skipCostComputation(BB->getTerminator(), VF.isVector()))
6823 continue;
6824 CostCtx.SkipCostComputation.insert(BB->getTerminator());
6825 if (BB == OrigLoop->getLoopLatch())
6826 continue;
6827 auto BranchCost = CostCtx.getLegacyCost(BB->getTerminator(), VF);
6828 Cost += BranchCost;
6829 }
6830
6831 // Pre-compute costs for instructions that are forced-scalar or profitable to
6832 // scalarize. Their costs will be computed separately in the legacy cost
6833 // model.
6834 for (Instruction *ForcedScalar : CM.ForcedScalars[VF]) {
6835 if (CostCtx.skipCostComputation(ForcedScalar, VF.isVector()))
6836 continue;
6837 CostCtx.SkipCostComputation.insert(ForcedScalar);
6838 InstructionCost ForcedCost = CostCtx.getLegacyCost(ForcedScalar, VF);
6839 LLVM_DEBUG({
6840 dbgs() << "Cost of " << ForcedCost << " for VF " << VF
6841 << ": forced scalar " << *ForcedScalar << "\n";
6842 });
6843 Cost += ForcedCost;
6844 }
6845 for (const auto &[Scalarized, ScalarCost] : CM.InstsToScalarize[VF]) {
6846 if (CostCtx.skipCostComputation(Scalarized, VF.isVector()))
6847 continue;
6848 CostCtx.SkipCostComputation.insert(Scalarized);
6849 LLVM_DEBUG({
6850 dbgs() << "Cost of " << ScalarCost << " for VF " << VF
6851 << ": profitable to scalarize " << *Scalarized << "\n";
6852 });
6853 Cost += ScalarCost;
6854 }
6855
6856 return Cost;
6857}
6858
6859InstructionCost LoopVectorizationPlanner::cost(VPlan &Plan,
6860 ElementCount VF) const {
6861 VPCostContext CostCtx(CM.TTI, *CM.TLI, Plan, CM, CM.CostKind, *PSE.getSE());
6862 InstructionCost Cost = precomputeCosts(Plan, VF, CostCtx);
6863
6864 // Now compute and add the VPlan-based cost.
6865 Cost += Plan.cost(VF, CostCtx);
6866#ifndef NDEBUG
6867 unsigned EstimatedWidth = estimateElementCount(VF, CM.getVScaleForTuning());
6868 LLVM_DEBUG(dbgs() << "Cost for VF " << VF << ": " << Cost
6869 << " (Estimated cost per lane: ");
6870 if (Cost.isValid()) {
6871 double CostPerLane = double(Cost.getValue()) / EstimatedWidth;
6872 LLVM_DEBUG(dbgs() << format("%.1f", CostPerLane));
6873 } else /* No point dividing an invalid cost - it will still be invalid */
6874 LLVM_DEBUG(dbgs() << "Invalid");
6875 LLVM_DEBUG(dbgs() << ")\n");
6876#endif
6877 return Cost;
6878}
6879
6880#ifndef NDEBUG
6881/// Return true if the original loop \ TheLoop contains any instructions that do
6882/// not have corresponding recipes in \p Plan and are not marked to be ignored
6883/// in \p CostCtx. This means the VPlan contains simplification that the legacy
6884/// cost-model did not account for.
6886 VPCostContext &CostCtx,
6887 Loop *TheLoop,
6888 ElementCount VF) {
6889 // First collect all instructions for the recipes in Plan.
6890 auto GetInstructionForCost = [](const VPRecipeBase *R) -> Instruction * {
6891 if (auto *S = dyn_cast<VPSingleDefRecipe>(R))
6892 return dyn_cast_or_null<Instruction>(S->getUnderlyingValue());
6893 if (auto *WidenMem = dyn_cast<VPWidenMemoryRecipe>(R))
6894 return &WidenMem->getIngredient();
6895 return nullptr;
6896 };
6897
6898 // Check if a select for a safe divisor was hoisted to the pre-header. If so,
6899 // the select doesn't need to be considered for the vector loop cost; go with
6900 // the more accurate VPlan-based cost model.
6901 for (VPRecipeBase &R : *Plan.getVectorPreheader()) {
6902 auto *VPI = dyn_cast<VPInstruction>(&R);
6903 if (!VPI || VPI->getOpcode() != Instruction::Select ||
6904 VPI->getNumUsers() != 1)
6905 continue;
6906
6907 if (auto *WR = dyn_cast<VPWidenRecipe>(*VPI->user_begin())) {
6908 switch (WR->getOpcode()) {
6909 case Instruction::UDiv:
6910 case Instruction::SDiv:
6911 case Instruction::URem:
6912 case Instruction::SRem:
6913 return true;
6914 default:
6915 break;
6916 }
6917 }
6918 }
6919
6920 DenseSet<Instruction *> SeenInstrs;
6921 auto Iter = vp_depth_first_deep(Plan.getVectorLoopRegion()->getEntry());
6923 for (VPRecipeBase &R : *VPBB) {
6924 if (auto *IR = dyn_cast<VPInterleaveRecipe>(&R)) {
6925 auto *IG = IR->getInterleaveGroup();
6926 unsigned NumMembers = IG->getNumMembers();
6927 for (unsigned I = 0; I != NumMembers; ++I) {
6928 if (Instruction *M = IG->getMember(I))
6929 SeenInstrs.insert(M);
6930 }
6931 continue;
6932 }
6933 // Unused FOR splices are removed by VPlan transforms, so the VPlan-based
6934 // cost model won't cost it whilst the legacy will.
6935 if (auto *FOR = dyn_cast<VPFirstOrderRecurrencePHIRecipe>(&R)) {
6936 using namespace VPlanPatternMatch;
6937 if (none_of(FOR->users(),
6938 match_fn(m_VPInstruction<
6940 return true;
6941 }
6942 // The VPlan-based cost model is more accurate for partial reduction and
6943 // comparing against the legacy cost isn't desirable.
6945 return true;
6946
6947 // The VPlan-based cost model can analyze if recipes are scalar
6948 // recursively, but the legacy cost model cannot.
6949 if (auto *WidenMemR = dyn_cast<VPWidenMemoryRecipe>(&R)) {
6950 auto *AddrI = dyn_cast<Instruction>(
6951 getLoadStorePointerOperand(&WidenMemR->getIngredient()));
6952 if (AddrI && vputils::isSingleScalar(WidenMemR->getAddr()) !=
6953 CostCtx.isLegacyUniformAfterVectorization(AddrI, VF))
6954 return true;
6955 }
6956
6957 /// If a VPlan transform folded a recipe to one producing a single-scalar,
6958 /// but the original instruction wasn't uniform-after-vectorization in the
6959 /// legacy cost model, the legacy cost overestimates the actual cost.
6960 if (auto *RepR = dyn_cast<VPReplicateRecipe>(&R)) {
6961 if (RepR->isSingleScalar() &&
6963 RepR->getUnderlyingInstr(), VF))
6964 return true;
6965 }
6966 if (Instruction *UI = GetInstructionForCost(&R)) {
6967 // If we adjusted the predicate of the recipe, the cost in the legacy
6968 // cost model may be different.
6969 using namespace VPlanPatternMatch;
6970 CmpPredicate Pred;
6971 if (match(&R, m_Cmp(Pred, m_VPValue(), m_VPValue())) &&
6972 cast<VPRecipeWithIRFlags>(R).getPredicate() !=
6973 cast<CmpInst>(UI)->getPredicate())
6974 return true;
6975 SeenInstrs.insert(UI);
6976 }
6977 }
6978 }
6979
6980 // Return true if the loop contains any instructions that are not also part of
6981 // the VPlan or are skipped for VPlan-based cost computations. This indicates
6982 // that the VPlan contains extra simplifications.
6983 return any_of(TheLoop->blocks(), [&SeenInstrs, &CostCtx,
6984 TheLoop](BasicBlock *BB) {
6985 return any_of(*BB, [&SeenInstrs, &CostCtx, TheLoop, BB](Instruction &I) {
6986 // Skip induction phis when checking for simplifications, as they may not
6987 // be lowered directly be lowered to a corresponding PHI recipe.
6988 if (isa<PHINode>(&I) && BB == TheLoop->getHeader() &&
6989 CostCtx.CM.Legal->isInductionPhi(cast<PHINode>(&I)))
6990 return false;
6991 return !SeenInstrs.contains(&I) && !CostCtx.skipCostComputation(&I, true);
6992 });
6993 });
6994}
6995#endif
6996
6998 if (VPlans.empty())
7000 // If there is a single VPlan with a single VF, return it directly.
7001 VPlan &FirstPlan = *VPlans[0];
7002 if (VPlans.size() == 1 && size(FirstPlan.vectorFactors()) == 1)
7003 return {*FirstPlan.vectorFactors().begin(), 0, 0};
7004
7005 LLVM_DEBUG(dbgs() << "LV: Computing best VF using cost kind: "
7006 << (CM.CostKind == TTI::TCK_RecipThroughput
7007 ? "Reciprocal Throughput\n"
7008 : CM.CostKind == TTI::TCK_Latency
7009 ? "Instruction Latency\n"
7010 : CM.CostKind == TTI::TCK_CodeSize ? "Code Size\n"
7011 : CM.CostKind == TTI::TCK_SizeAndLatency
7012 ? "Code Size and Latency\n"
7013 : "Unknown\n"));
7014
7016 assert(hasPlanWithVF(ScalarVF) &&
7017 "More than a single plan/VF w/o any plan having scalar VF");
7018
7019 // TODO: Compute scalar cost using VPlan-based cost model.
7020 InstructionCost ScalarCost = CM.expectedCost(ScalarVF);
7021 LLVM_DEBUG(dbgs() << "LV: Scalar loop costs: " << ScalarCost << ".\n");
7022 VectorizationFactor ScalarFactor(ScalarVF, ScalarCost, ScalarCost);
7023 VectorizationFactor BestFactor = ScalarFactor;
7024
7025 bool ForceVectorization = Hints.getForce() == LoopVectorizeHints::FK_Enabled;
7026 if (ForceVectorization) {
7027 // Ignore scalar width, because the user explicitly wants vectorization.
7028 // Initialize cost to max so that VF = 2 is, at least, chosen during cost
7029 // evaluation.
7030 BestFactor.Cost = InstructionCost::getMax();
7031 }
7032
7033 for (auto &P : VPlans) {
7034 ArrayRef<ElementCount> VFs(P->vectorFactors().begin(),
7035 P->vectorFactors().end());
7036
7038 if (any_of(VFs, [this](ElementCount VF) {
7039 return CM.shouldConsiderRegPressureForVF(VF);
7040 }))
7041 RUs = calculateRegisterUsageForPlan(*P, VFs, TTI, CM.ValuesToIgnore);
7042
7043 for (unsigned I = 0; I < VFs.size(); I++) {
7044 ElementCount VF = VFs[I];
7045 if (VF.isScalar())
7046 continue;
7047 if (!ForceVectorization && !willGenerateVectors(*P, VF, TTI)) {
7048 LLVM_DEBUG(
7049 dbgs()
7050 << "LV: Not considering vector loop of width " << VF
7051 << " because it will not generate any vector instructions.\n");
7052 continue;
7053 }
7054 if (CM.OptForSize && !ForceVectorization && hasReplicatorRegion(*P)) {
7055 LLVM_DEBUG(
7056 dbgs()
7057 << "LV: Not considering vector loop of width " << VF
7058 << " because it would cause replicated blocks to be generated,"
7059 << " which isn't allowed when optimizing for size.\n");
7060 continue;
7061 }
7062
7063 InstructionCost Cost = cost(*P, VF);
7064 VectorizationFactor CurrentFactor(VF, Cost, ScalarCost);
7065
7066 if (CM.shouldConsiderRegPressureForVF(VF) &&
7067 RUs[I].exceedsMaxNumRegs(TTI, ForceTargetNumVectorRegs)) {
7068 LLVM_DEBUG(dbgs() << "LV(REG): Not considering vector loop of width "
7069 << VF << " because it uses too many registers\n");
7070 continue;
7071 }
7072
7073 if (isMoreProfitable(CurrentFactor, BestFactor, P->hasScalarTail()))
7074 BestFactor = CurrentFactor;
7075
7076 // If profitable add it to ProfitableVF list.
7077 if (isMoreProfitable(CurrentFactor, ScalarFactor, P->hasScalarTail()))
7078 ProfitableVFs.push_back(CurrentFactor);
7079 }
7080 }
7081
7082#ifndef NDEBUG
7083 // Select the optimal vectorization factor according to the legacy cost-model.
7084 // This is now only used to verify the decisions by the new VPlan-based
7085 // cost-model and will be retired once the VPlan-based cost-model is
7086 // stabilized.
7087 VectorizationFactor LegacyVF = selectVectorizationFactor();
7088 VPlan &BestPlan = getPlanFor(BestFactor.Width);
7089
7090 // Pre-compute the cost and use it to check if BestPlan contains any
7091 // simplifications not accounted for in the legacy cost model. If that's the
7092 // case, don't trigger the assertion, as the extra simplifications may cause a
7093 // different VF to be picked by the VPlan-based cost model.
7094 VPCostContext CostCtx(CM.TTI, *CM.TLI, BestPlan, CM, CM.CostKind,
7095 *CM.PSE.getSE());
7096 precomputeCosts(BestPlan, BestFactor.Width, CostCtx);
7097 // Verify that the VPlan-based and legacy cost models agree, except for VPlans
7098 // with early exits and plans with additional VPlan simplifications. The
7099 // legacy cost model doesn't properly model costs for such loops.
7100 assert((BestFactor.Width == LegacyVF.Width || BestPlan.hasEarlyExit() ||
7102 CostCtx, OrigLoop,
7103 BestFactor.Width) ||
7105 getPlanFor(LegacyVF.Width), CostCtx, OrigLoop, LegacyVF.Width)) &&
7106 " VPlan cost model and legacy cost model disagreed");
7107 assert((BestFactor.Width.isScalar() || BestFactor.ScalarCost > 0) &&
7108 "when vectorizing, the scalar cost must be computed.");
7109#endif
7110
7111 LLVM_DEBUG(dbgs() << "LV: Selecting VF: " << BestFactor.Width << ".\n");
7112 return BestFactor;
7113}
7114
7116 using namespace VPlanPatternMatch;
7118 "RdxResult must be ComputeFindIVResult");
7119 VPValue *StartVPV = RdxResult->getOperand(1);
7120 match(StartVPV, m_Freeze(m_VPValue(StartVPV)));
7121 return StartVPV->getLiveInIRValue();
7122}
7123
7124// If \p EpiResumePhiR is resume VPPhi for a reduction when vectorizing the
7125// epilog loop, fix the reduction's scalar PHI node by adding the incoming value
7126// from the main vector loop.
7128 VPPhi *EpiResumePhiR, PHINode &EpiResumePhi, BasicBlock *BypassBlock) {
7129 // Get the VPInstruction computing the reduction result in the middle block.
7130 // The first operand may not be from the middle block if it is not connected
7131 // to the scalar preheader. In that case, there's nothing to fix.
7132 VPValue *Incoming = EpiResumePhiR->getOperand(0);
7135 auto *EpiRedResult = dyn_cast<VPInstruction>(Incoming);
7136 if (!EpiRedResult ||
7137 (EpiRedResult->getOpcode() != VPInstruction::ComputeAnyOfResult &&
7138 EpiRedResult->getOpcode() != VPInstruction::ComputeReductionResult &&
7139 EpiRedResult->getOpcode() != VPInstruction::ComputeFindIVResult))
7140 return;
7141
7142 auto *EpiRedHeaderPhi =
7143 cast<VPReductionPHIRecipe>(EpiRedResult->getOperand(0));
7144 RecurKind Kind = EpiRedHeaderPhi->getRecurrenceKind();
7145 Value *MainResumeValue;
7146 if (auto *VPI = dyn_cast<VPInstruction>(EpiRedHeaderPhi->getStartValue())) {
7147 assert((VPI->getOpcode() == VPInstruction::Broadcast ||
7148 VPI->getOpcode() == VPInstruction::ReductionStartVector) &&
7149 "unexpected start recipe");
7150 MainResumeValue = VPI->getOperand(0)->getUnderlyingValue();
7151 } else
7152 MainResumeValue = EpiRedHeaderPhi->getStartValue()->getUnderlyingValue();
7154 [[maybe_unused]] Value *StartV =
7155 EpiRedResult->getOperand(1)->getLiveInIRValue();
7156 auto *Cmp = cast<ICmpInst>(MainResumeValue);
7157 assert(Cmp->getPredicate() == CmpInst::ICMP_NE &&
7158 "AnyOf expected to start with ICMP_NE");
7159 assert(Cmp->getOperand(1) == StartV &&
7160 "AnyOf expected to start by comparing main resume value to original "
7161 "start value");
7162 MainResumeValue = Cmp->getOperand(0);
7164 Value *StartV = getStartValueFromReductionResult(EpiRedResult);
7165 Value *SentinelV = EpiRedResult->getOperand(2)->getLiveInIRValue();
7166 using namespace llvm::PatternMatch;
7167 Value *Cmp, *OrigResumeV, *CmpOp;
7168 [[maybe_unused]] bool IsExpectedPattern =
7169 match(MainResumeValue,
7170 m_Select(m_OneUse(m_Value(Cmp)), m_Specific(SentinelV),
7171 m_Value(OrigResumeV))) &&
7173 m_Value(CmpOp))) &&
7174 ((CmpOp == StartV && isGuaranteedNotToBeUndefOrPoison(CmpOp))));
7175 assert(IsExpectedPattern && "Unexpected reduction resume pattern");
7176 MainResumeValue = OrigResumeV;
7177 }
7178 PHINode *MainResumePhi = cast<PHINode>(MainResumeValue);
7179
7180 // When fixing reductions in the epilogue loop we should already have
7181 // created a bc.merge.rdx Phi after the main vector body. Ensure that we carry
7182 // over the incoming values correctly.
7183 EpiResumePhi.setIncomingValueForBlock(
7184 BypassBlock, MainResumePhi->getIncomingValueForBlock(BypassBlock));
7185}
7186
7188 ElementCount BestVF, unsigned BestUF, VPlan &BestVPlan,
7189 InnerLoopVectorizer &ILV, DominatorTree *DT, bool VectorizingEpilogue) {
7190 assert(BestVPlan.hasVF(BestVF) &&
7191 "Trying to execute plan with unsupported VF");
7192 assert(BestVPlan.hasUF(BestUF) &&
7193 "Trying to execute plan with unsupported UF");
7194 if (BestVPlan.hasEarlyExit())
7195 ++LoopsEarlyExitVectorized;
7196 // TODO: Move to VPlan transform stage once the transition to the VPlan-based
7197 // cost model is complete for better cost estimates.
7200 BestVPlan);
7203 bool HasBranchWeights =
7204 hasBranchWeightMD(*OrigLoop->getLoopLatch()->getTerminator());
7205 if (HasBranchWeights) {
7206 std::optional<unsigned> VScale = CM.getVScaleForTuning();
7208 BestVPlan, BestVF, VScale);
7209 }
7210
7211 // Checks are the same for all VPlans, added to BestVPlan only for
7212 // compactness.
7213 attachRuntimeChecks(BestVPlan, ILV.RTChecks, HasBranchWeights);
7214
7215 // Retrieving VectorPH now when it's easier while VPlan still has Regions.
7216 VPBasicBlock *VectorPH = cast<VPBasicBlock>(BestVPlan.getVectorPreheader());
7217
7218 VPlanTransforms::optimizeForVFAndUF(BestVPlan, BestVF, BestUF, PSE);
7221 if (BestVPlan.getEntry()->getSingleSuccessor() ==
7222 BestVPlan.getScalarPreheader()) {
7223 // TODO: The vector loop would be dead, should not even try to vectorize.
7224 ORE->emit([&]() {
7225 return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationDead",
7226 OrigLoop->getStartLoc(),
7227 OrigLoop->getHeader())
7228 << "Created vector loop never executes due to insufficient trip "
7229 "count.";
7230 });
7232 }
7233
7235
7237 // Regions are dissolved after optimizing for VF and UF, which completely
7238 // removes unneeded loop regions first.
7240 // Canonicalize EVL loops after regions are dissolved.
7244 BestVPlan, VectorPH, CM.foldTailByMasking(),
7245 CM.requiresScalarEpilogue(BestVF.isVector()));
7246 VPlanTransforms::materializeVFAndVFxUF(BestVPlan, VectorPH, BestVF);
7247 VPlanTransforms::cse(BestVPlan);
7249
7250 // 0. Generate SCEV-dependent code in the entry, including TripCount, before
7251 // making any changes to the CFG.
7252 DenseMap<const SCEV *, Value *> ExpandedSCEVs =
7253 VPlanTransforms::expandSCEVs(BestVPlan, *PSE.getSE());
7254 if (!ILV.getTripCount())
7255 ILV.setTripCount(BestVPlan.getTripCount()->getLiveInIRValue());
7256 else
7257 assert(VectorizingEpilogue && "should only re-use the existing trip "
7258 "count during epilogue vectorization");
7259
7260 // Perform the actual loop transformation.
7261 VPTransformState State(&TTI, BestVF, LI, DT, ILV.AC, ILV.Builder, &BestVPlan,
7262 OrigLoop->getParentLoop(),
7263 Legal->getWidestInductionType());
7264
7265#ifdef EXPENSIVE_CHECKS
7266 assert(DT->verify(DominatorTree::VerificationLevel::Fast));
7267#endif
7268
7269 // 1. Set up the skeleton for vectorization, including vector pre-header and
7270 // middle block. The vector loop is created during VPlan execution.
7271 State.CFG.PrevBB = ILV.createVectorizedLoopSkeleton();
7273 State.CFG.PrevBB->getSingleSuccessor(), &BestVPlan);
7275
7276 assert(verifyVPlanIsValid(BestVPlan, true /*VerifyLate*/) &&
7277 "final VPlan is invalid");
7278
7279 // After vectorization, the exit blocks of the original loop will have
7280 // additional predecessors. Invalidate SCEVs for the exit phis in case SE
7281 // looked through single-entry phis.
7282 ScalarEvolution &SE = *PSE.getSE();
7283 for (VPIRBasicBlock *Exit : BestVPlan.getExitBlocks()) {
7284 if (!Exit->hasPredecessors())
7285 continue;
7286 for (VPRecipeBase &PhiR : Exit->phis())
7288 &cast<VPIRPhi>(PhiR).getIRPhi());
7289 }
7290 // Forget the original loop and block dispositions.
7291 SE.forgetLoop(OrigLoop);
7293
7295
7296 //===------------------------------------------------===//
7297 //
7298 // Notice: any optimization or new instruction that go
7299 // into the code below should also be implemented in
7300 // the cost-model.
7301 //
7302 //===------------------------------------------------===//
7303
7304 // Retrieve loop information before executing the plan, which may remove the
7305 // original loop, if it becomes unreachable.
7306 MDNode *LID = OrigLoop->getLoopID();
7307 unsigned OrigLoopInvocationWeight = 0;
7308 std::optional<unsigned> OrigAverageTripCount =
7309 getLoopEstimatedTripCount(OrigLoop, &OrigLoopInvocationWeight);
7310
7311 BestVPlan.execute(&State);
7312
7313 // 2.6. Maintain Loop Hints
7314 // Keep all loop hints from the original loop on the vector loop (we'll
7315 // replace the vectorizer-specific hints below).
7316 VPBasicBlock *HeaderVPBB = vputils::getFirstLoopHeader(BestVPlan, State.VPDT);
7317 // Add metadata to disable runtime unrolling a scalar loop when there
7318 // are no runtime checks about strides and memory. A scalar loop that is
7319 // rarely used is not worth unrolling.
7320 bool DisableRuntimeUnroll = !ILV.RTChecks.hasChecks() && !BestVF.isScalar();
7322 HeaderVPBB ? LI->getLoopFor(State.CFG.VPBB2IRBB.lookup(HeaderVPBB))
7323 : nullptr,
7324 HeaderVPBB, BestVPlan, VectorizingEpilogue, LID, OrigAverageTripCount,
7325 OrigLoopInvocationWeight,
7326 estimateElementCount(BestVF * BestUF, CM.getVScaleForTuning()),
7327 DisableRuntimeUnroll);
7328
7329 // 3. Fix the vectorized code: take care of header phi's, live-outs,
7330 // predication, updating analyses.
7331 ILV.fixVectorizedLoop(State);
7332
7334
7335 return ExpandedSCEVs;
7336}
7337
7338//===--------------------------------------------------------------------===//
7339// EpilogueVectorizerMainLoop
7340//===--------------------------------------------------------------------===//
7341
7342/// This function is partially responsible for generating the control flow
7343/// depicted in https://llvm.org/docs/Vectorizers.html#epilogue-vectorization.
7345 BasicBlock *ScalarPH = createScalarPreheader("");
7346 BasicBlock *VectorPH = ScalarPH->getSinglePredecessor();
7347
7348 // Generate the code to check the minimum iteration count of the vector
7349 // epilogue (see below).
7350 EPI.EpilogueIterationCountCheck =
7351 emitIterationCountCheck(VectorPH, ScalarPH, true);
7352 EPI.EpilogueIterationCountCheck->setName("iter.check");
7353
7354 VectorPH = cast<BranchInst>(EPI.EpilogueIterationCountCheck->getTerminator())
7355 ->getSuccessor(1);
7356 // Generate the iteration count check for the main loop, *after* the check
7357 // for the epilogue loop, so that the path-length is shorter for the case
7358 // that goes directly through the vector epilogue. The longer-path length for
7359 // the main loop is compensated for, by the gain from vectorizing the larger
7360 // trip count. Note: the branch will get updated later on when we vectorize
7361 // the epilogue.
7362 EPI.MainLoopIterationCountCheck =
7363 emitIterationCountCheck(VectorPH, ScalarPH, false);
7364
7365 return cast<BranchInst>(EPI.MainLoopIterationCountCheck->getTerminator())
7366 ->getSuccessor(1);
7367}
7368
7370 LLVM_DEBUG({
7371 dbgs() << "Create Skeleton for epilogue vectorized loop (first pass)\n"
7372 << "Main Loop VF:" << EPI.MainLoopVF
7373 << ", Main Loop UF:" << EPI.MainLoopUF
7374 << ", Epilogue Loop VF:" << EPI.EpilogueVF
7375 << ", Epilogue Loop UF:" << EPI.EpilogueUF << "\n";
7376 });
7377}
7378
7381 dbgs() << "intermediate fn:\n"
7382 << *OrigLoop->getHeader()->getParent() << "\n";
7383 });
7384}
7385
7387 BasicBlock *VectorPH, BasicBlock *Bypass, bool ForEpilogue) {
7388 assert(Bypass && "Expected valid bypass basic block.");
7391 Value *CheckMinIters = createIterationCountCheck(
7392 VectorPH, ForEpilogue ? EPI.EpilogueVF : EPI.MainLoopVF,
7393 ForEpilogue ? EPI.EpilogueUF : EPI.MainLoopUF);
7394
7395 BasicBlock *const TCCheckBlock = VectorPH;
7396 if (!ForEpilogue)
7397 TCCheckBlock->setName("vector.main.loop.iter.check");
7398
7399 // Create new preheader for vector loop.
7400 VectorPH = SplitBlock(TCCheckBlock, TCCheckBlock->getTerminator(),
7401 static_cast<DominatorTree *>(nullptr), LI, nullptr,
7402 "vector.ph");
7403 if (ForEpilogue) {
7404 // Save the trip count so we don't have to regenerate it in the
7405 // vec.epilog.iter.check. This is safe to do because the trip count
7406 // generated here dominates the vector epilog iter check.
7407 EPI.TripCount = Count;
7408 } else {
7410 }
7411
7412 BranchInst &BI = *BranchInst::Create(Bypass, VectorPH, CheckMinIters);
7413 if (hasBranchWeightMD(*OrigLoop->getLoopLatch()->getTerminator()))
7414 setBranchWeights(BI, MinItersBypassWeights, /*IsExpected=*/false);
7415 ReplaceInstWithInst(TCCheckBlock->getTerminator(), &BI);
7416
7417 // When vectorizing the main loop, its trip-count check is placed in a new
7418 // block, whereas the overall trip-count check is placed in the VPlan entry
7419 // block. When vectorizing the epilogue loop, its trip-count check is placed
7420 // in the VPlan entry block.
7421 if (!ForEpilogue)
7422 introduceCheckBlockInVPlan(TCCheckBlock);
7423 return TCCheckBlock;
7424}
7425
7426//===--------------------------------------------------------------------===//
7427// EpilogueVectorizerEpilogueLoop
7428//===--------------------------------------------------------------------===//
7429
7430/// This function creates a new scalar preheader, using the previous one as
7431/// entry block to the epilogue VPlan. The minimum iteration check is being
7432/// represented in VPlan.
7434 BasicBlock *NewScalarPH = createScalarPreheader("vec.epilog.");
7435 BasicBlock *OriginalScalarPH = NewScalarPH->getSinglePredecessor();
7436 OriginalScalarPH->setName("vec.epilog.iter.check");
7437 VPIRBasicBlock *NewEntry = Plan.createVPIRBasicBlock(OriginalScalarPH);
7438 VPBasicBlock *OldEntry = Plan.getEntry();
7439 for (auto &R : make_early_inc_range(*OldEntry)) {
7440 // Skip moving VPIRInstructions (including VPIRPhis), which are unmovable by
7441 // defining.
7442 if (isa<VPIRInstruction>(&R))
7443 continue;
7444 R.moveBefore(*NewEntry, NewEntry->end());
7445 }
7446
7447 VPBlockUtils::reassociateBlocks(OldEntry, NewEntry);
7448 Plan.setEntry(NewEntry);
7449 // OldEntry is now dead and will be cleaned up when the plan gets destroyed.
7450
7451 return OriginalScalarPH;
7452}
7453
7455 LLVM_DEBUG({
7456 dbgs() << "Create Skeleton for epilogue vectorized loop (second pass)\n"
7457 << "Epilogue Loop VF:" << EPI.EpilogueVF
7458 << ", Epilogue Loop UF:" << EPI.EpilogueUF << "\n";
7459 });
7460}
7461
7464 dbgs() << "final fn:\n" << *OrigLoop->getHeader()->getParent() << "\n";
7465 });
7466}
7467
7469VPRecipeBuilder::tryToWidenMemory(Instruction *I, ArrayRef<VPValue *> Operands,
7470 VFRange &Range) {
7472 "Must be called with either a load or store");
7473
7474 auto WillWiden = [&](ElementCount VF) -> bool {
7476 CM.getWideningDecision(I, VF);
7478 "CM decision should be taken at this point.");
7480 return true;
7481 if (CM.isScalarAfterVectorization(I, VF) ||
7482 CM.isProfitableToScalarize(I, VF))
7483 return false;
7485 };
7486
7488 return nullptr;
7489
7490 VPValue *Mask = nullptr;
7491 if (Legal->isMaskRequired(I))
7492 Mask = getBlockInMask(Builder.getInsertBlock());
7493
7494 // Determine if the pointer operand of the access is either consecutive or
7495 // reverse consecutive.
7497 CM.getWideningDecision(I, Range.Start);
7499 bool Consecutive =
7501
7502 VPValue *Ptr = isa<LoadInst>(I) ? Operands[0] : Operands[1];
7503 if (Consecutive) {
7505 Ptr->getUnderlyingValue()->stripPointerCasts());
7506 VPSingleDefRecipe *VectorPtr;
7507 if (Reverse) {
7508 // When folding the tail, we may compute an address that we don't in the
7509 // original scalar loop: drop the GEP no-wrap flags in this case.
7510 // Otherwise preserve existing flags without no-unsigned-wrap, as we will
7511 // emit negative indices.
7512 GEPNoWrapFlags Flags =
7513 CM.foldTailByMasking() || !GEP
7515 : GEP->getNoWrapFlags().withoutNoUnsignedWrap();
7516 VectorPtr =
7518 /*Stride*/ -1, Flags, I->getDebugLoc());
7519 } else {
7520 VectorPtr = new VPVectorPointerRecipe(Ptr, getLoadStoreType(I),
7521 GEP ? GEP->getNoWrapFlags()
7523 I->getDebugLoc());
7524 }
7525 Builder.insert(VectorPtr);
7526 Ptr = VectorPtr;
7527 }
7528 if (LoadInst *Load = dyn_cast<LoadInst>(I))
7529 return new VPWidenLoadRecipe(*Load, Ptr, Mask, Consecutive, Reverse,
7530 VPIRMetadata(*Load, LVer), I->getDebugLoc());
7531
7532 StoreInst *Store = cast<StoreInst>(I);
7533 return new VPWidenStoreRecipe(*Store, Ptr, Operands[0], Mask, Consecutive,
7534 Reverse, VPIRMetadata(*Store, LVer),
7535 I->getDebugLoc());
7536}
7537
7538/// Creates a VPWidenIntOrFpInductionRecpipe for \p Phi. If needed, it will also
7539/// insert a recipe to expand the step for the induction recipe.
7540static VPWidenIntOrFpInductionRecipe *
7542 VPValue *Start, const InductionDescriptor &IndDesc,
7543 VPlan &Plan, ScalarEvolution &SE, Loop &OrigLoop) {
7544 assert(IndDesc.getStartValue() ==
7545 Phi->getIncomingValueForBlock(OrigLoop.getLoopPreheader()));
7546 assert(SE.isLoopInvariant(IndDesc.getStep(), &OrigLoop) &&
7547 "step must be loop invariant");
7548
7549 VPValue *Step =
7551 if (auto *TruncI = dyn_cast<TruncInst>(PhiOrTrunc)) {
7552 return new VPWidenIntOrFpInductionRecipe(Phi, Start, Step, &Plan.getVF(),
7553 IndDesc, TruncI,
7554 TruncI->getDebugLoc());
7555 }
7556 assert(isa<PHINode>(PhiOrTrunc) && "must be a phi node here");
7557 return new VPWidenIntOrFpInductionRecipe(Phi, Start, Step, &Plan.getVF(),
7558 IndDesc, Phi->getDebugLoc());
7559}
7560
7561VPHeaderPHIRecipe *VPRecipeBuilder::tryToOptimizeInductionPHI(
7562 PHINode *Phi, ArrayRef<VPValue *> Operands, VFRange &Range) {
7563
7564 // Check if this is an integer or fp induction. If so, build the recipe that
7565 // produces its scalar and vector values.
7566 if (auto *II = Legal->getIntOrFpInductionDescriptor(Phi))
7567 return createWidenInductionRecipes(Phi, Phi, Operands[0], *II, Plan,
7568 *PSE.getSE(), *OrigLoop);
7569
7570 // Check if this is pointer induction. If so, build the recipe for it.
7571 if (auto *II = Legal->getPointerInductionDescriptor(Phi)) {
7572 VPValue *Step = vputils::getOrCreateVPValueForSCEVExpr(Plan, II->getStep());
7573 return new VPWidenPointerInductionRecipe(
7574 Phi, Operands[0], Step, &Plan.getVFxUF(), *II,
7576 [&](ElementCount VF) {
7577 return CM.isScalarAfterVectorization(Phi, VF);
7578 },
7579 Range),
7580 Phi->getDebugLoc());
7581 }
7582 return nullptr;
7583}
7584
7585VPWidenIntOrFpInductionRecipe *VPRecipeBuilder::tryToOptimizeInductionTruncate(
7586 TruncInst *I, ArrayRef<VPValue *> Operands, VFRange &Range) {
7587 // Optimize the special case where the source is a constant integer
7588 // induction variable. Notice that we can only optimize the 'trunc' case
7589 // because (a) FP conversions lose precision, (b) sext/zext may wrap, and
7590 // (c) other casts depend on pointer size.
7591
7592 // Determine whether \p K is a truncation based on an induction variable that
7593 // can be optimized.
7594 auto IsOptimizableIVTruncate =
7595 [&](Instruction *K) -> std::function<bool(ElementCount)> {
7596 return [=](ElementCount VF) -> bool {
7597 return CM.isOptimizableIVTruncate(K, VF);
7598 };
7599 };
7600
7602 IsOptimizableIVTruncate(I), Range)) {
7603
7604 auto *Phi = cast<PHINode>(I->getOperand(0));
7605 const InductionDescriptor &II = *Legal->getIntOrFpInductionDescriptor(Phi);
7606 VPValue *Start = Plan.getOrAddLiveIn(II.getStartValue());
7607 return createWidenInductionRecipes(Phi, I, Start, II, Plan, *PSE.getSE(),
7608 *OrigLoop);
7609 }
7610 return nullptr;
7611}
7612
7613VPSingleDefRecipe *VPRecipeBuilder::tryToWidenCall(CallInst *CI,
7614 ArrayRef<VPValue *> Operands,
7615 VFRange &Range) {
7617 [this, CI](ElementCount VF) {
7618 return CM.isScalarWithPredication(CI, VF);
7619 },
7620 Range);
7621
7622 if (IsPredicated)
7623 return nullptr;
7624
7626 if (ID && (ID == Intrinsic::assume || ID == Intrinsic::lifetime_end ||
7627 ID == Intrinsic::lifetime_start || ID == Intrinsic::sideeffect ||
7628 ID == Intrinsic::pseudoprobe ||
7629 ID == Intrinsic::experimental_noalias_scope_decl))
7630 return nullptr;
7631
7633
7634 // Is it beneficial to perform intrinsic call compared to lib call?
7635 bool ShouldUseVectorIntrinsic =
7637 [&](ElementCount VF) -> bool {
7638 return CM.getCallWideningDecision(CI, VF).Kind ==
7640 },
7641 Range);
7642 if (ShouldUseVectorIntrinsic)
7643 return new VPWidenIntrinsicRecipe(*CI, ID, Ops, CI->getType(),
7644 CI->getDebugLoc());
7645
7646 Function *Variant = nullptr;
7647 std::optional<unsigned> MaskPos;
7648 // Is better to call a vectorized version of the function than to to scalarize
7649 // the call?
7650 auto ShouldUseVectorCall = LoopVectorizationPlanner::getDecisionAndClampRange(
7651 [&](ElementCount VF) -> bool {
7652 // The following case may be scalarized depending on the VF.
7653 // The flag shows whether we can use a usual Call for vectorized
7654 // version of the instruction.
7655
7656 // If we've found a variant at a previous VF, then stop looking. A
7657 // vectorized variant of a function expects input in a certain shape
7658 // -- basically the number of input registers, the number of lanes
7659 // per register, and whether there's a mask required.
7660 // We store a pointer to the variant in the VPWidenCallRecipe, so
7661 // once we have an appropriate variant it's only valid for that VF.
7662 // This will force a different vplan to be generated for each VF that
7663 // finds a valid variant.
7664 if (Variant)
7665 return false;
7666 LoopVectorizationCostModel::CallWideningDecision Decision =
7667 CM.getCallWideningDecision(CI, VF);
7669 Variant = Decision.Variant;
7670 MaskPos = Decision.MaskPos;
7671 return true;
7672 }
7673
7674 return false;
7675 },
7676 Range);
7677 if (ShouldUseVectorCall) {
7678 if (MaskPos.has_value()) {
7679 // We have 2 cases that would require a mask:
7680 // 1) The block needs to be predicated, either due to a conditional
7681 // in the scalar loop or use of an active lane mask with
7682 // tail-folding, and we use the appropriate mask for the block.
7683 // 2) No mask is required for the block, but the only available
7684 // vector variant at this VF requires a mask, so we synthesize an
7685 // all-true mask.
7686 VPValue *Mask = nullptr;
7687 if (Legal->isMaskRequired(CI))
7688 Mask = getBlockInMask(Builder.getInsertBlock());
7689 else
7690 Mask = Plan.getOrAddLiveIn(
7691 ConstantInt::getTrue(IntegerType::getInt1Ty(CI->getContext())));
7692
7693 Ops.insert(Ops.begin() + *MaskPos, Mask);
7694 }
7695
7696 Ops.push_back(Operands.back());
7697 return new VPWidenCallRecipe(CI, Variant, Ops, CI->getDebugLoc());
7698 }
7699
7700 return nullptr;
7701}
7702
7703bool VPRecipeBuilder::shouldWiden(Instruction *I, VFRange &Range) const {
7705 !isa<StoreInst>(I) && "Instruction should have been handled earlier");
7706 // Instruction should be widened, unless it is scalar after vectorization,
7707 // scalarization is profitable or it is predicated.
7708 auto WillScalarize = [this, I](ElementCount VF) -> bool {
7709 return CM.isScalarAfterVectorization(I, VF) ||
7710 CM.isProfitableToScalarize(I, VF) ||
7711 CM.isScalarWithPredication(I, VF);
7712 };
7714 Range);
7715}
7716
7717VPWidenRecipe *VPRecipeBuilder::tryToWiden(Instruction *I,
7718 ArrayRef<VPValue *> Operands) {
7719 switch (I->getOpcode()) {
7720 default:
7721 return nullptr;
7722 case Instruction::SDiv:
7723 case Instruction::UDiv:
7724 case Instruction::SRem:
7725 case Instruction::URem: {
7726 // If not provably safe, use a select to form a safe divisor before widening the
7727 // div/rem operation itself. Otherwise fall through to general handling below.
7728 if (CM.isPredicatedInst(I)) {
7729 SmallVector<VPValue *> Ops(Operands);
7730 VPValue *Mask = getBlockInMask(Builder.getInsertBlock());
7731 VPValue *One =
7732 Plan.getOrAddLiveIn(ConstantInt::get(I->getType(), 1u, false));
7733 auto *SafeRHS = Builder.createSelect(Mask, Ops[1], One, I->getDebugLoc());
7734 Ops[1] = SafeRHS;
7735 return new VPWidenRecipe(*I, Ops);
7736 }
7737 [[fallthrough]];
7738 }
7739 case Instruction::Add:
7740 case Instruction::And:
7741 case Instruction::AShr:
7742 case Instruction::FAdd:
7743 case Instruction::FCmp:
7744 case Instruction::FDiv:
7745 case Instruction::FMul:
7746 case Instruction::FNeg:
7747 case Instruction::FRem:
7748 case Instruction::FSub:
7749 case Instruction::ICmp:
7750 case Instruction::LShr:
7751 case Instruction::Mul:
7752 case Instruction::Or:
7753 case Instruction::Select:
7754 case Instruction::Shl:
7755 case Instruction::Sub:
7756 case Instruction::Xor:
7757 case Instruction::Freeze: {
7758 SmallVector<VPValue *> NewOps(Operands);
7759 if (Instruction::isBinaryOp(I->getOpcode())) {
7760 // The legacy cost model uses SCEV to check if some of the operands are
7761 // constants. To match the legacy cost model's behavior, use SCEV to try
7762 // to replace operands with constants.
7763 ScalarEvolution &SE = *PSE.getSE();
7764 auto GetConstantViaSCEV = [this, &SE](VPValue *Op) {
7765 if (!Op->isLiveIn())
7766 return Op;
7767 Value *V = Op->getUnderlyingValue();
7768 if (isa<Constant>(V) || !SE.isSCEVable(V->getType()))
7769 return Op;
7770 auto *C = dyn_cast<SCEVConstant>(SE.getSCEV(V));
7771 if (!C)
7772 return Op;
7773 return Plan.getOrAddLiveIn(C->getValue());
7774 };
7775 // For Mul, the legacy cost model checks both operands.
7776 if (I->getOpcode() == Instruction::Mul)
7777 NewOps[0] = GetConstantViaSCEV(NewOps[0]);
7778 // For other binops, the legacy cost model only checks the second operand.
7779 NewOps[1] = GetConstantViaSCEV(NewOps[1]);
7780 }
7781 return new VPWidenRecipe(*I, NewOps);
7782 }
7783 case Instruction::ExtractValue: {
7784 SmallVector<VPValue *> NewOps(Operands);
7785 Type *I32Ty = IntegerType::getInt32Ty(I->getContext());
7786 auto *EVI = cast<ExtractValueInst>(I);
7787 assert(EVI->getNumIndices() == 1 && "Expected one extractvalue index");
7788 unsigned Idx = EVI->getIndices()[0];
7789 NewOps.push_back(Plan.getOrAddLiveIn(ConstantInt::get(I32Ty, Idx, false)));
7790 return new VPWidenRecipe(*I, NewOps);
7791 }
7792 };
7793}
7794
7795VPHistogramRecipe *
7796VPRecipeBuilder::tryToWidenHistogram(const HistogramInfo *HI,
7797 ArrayRef<VPValue *> Operands) {
7798 // FIXME: Support other operations.
7799 unsigned Opcode = HI->Update->getOpcode();
7800 assert((Opcode == Instruction::Add || Opcode == Instruction::Sub) &&
7801 "Histogram update operation must be an Add or Sub");
7802
7804 // Bucket address.
7805 HGramOps.push_back(Operands[1]);
7806 // Increment value.
7807 HGramOps.push_back(getVPValueOrAddLiveIn(HI->Update->getOperand(1)));
7808
7809 // In case of predicated execution (due to tail-folding, or conditional
7810 // execution, or both), pass the relevant mask.
7811 if (Legal->isMaskRequired(HI->Store))
7812 HGramOps.push_back(getBlockInMask(Builder.getInsertBlock()));
7813
7814 return new VPHistogramRecipe(Opcode, HGramOps, HI->Store->getDebugLoc());
7815}
7816
7817VPReplicateRecipe *
7819 VFRange &Range) {
7821 [&](ElementCount VF) { return CM.isUniformAfterVectorization(I, VF); },
7822 Range);
7823
7824 bool IsPredicated = CM.isPredicatedInst(I);
7825
7826 // Even if the instruction is not marked as uniform, there are certain
7827 // intrinsic calls that can be effectively treated as such, so we check for
7828 // them here. Conservatively, we only do this for scalable vectors, since
7829 // for fixed-width VFs we can always fall back on full scalarization.
7830 if (!IsUniform && Range.Start.isScalable() && isa<IntrinsicInst>(I)) {
7831 switch (cast<IntrinsicInst>(I)->getIntrinsicID()) {
7832 case Intrinsic::assume:
7833 case Intrinsic::lifetime_start:
7834 case Intrinsic::lifetime_end:
7835 // For scalable vectors if one of the operands is variant then we still
7836 // want to mark as uniform, which will generate one instruction for just
7837 // the first lane of the vector. We can't scalarize the call in the same
7838 // way as for fixed-width vectors because we don't know how many lanes
7839 // there are.
7840 //
7841 // The reasons for doing it this way for scalable vectors are:
7842 // 1. For the assume intrinsic generating the instruction for the first
7843 // lane is still be better than not generating any at all. For
7844 // example, the input may be a splat across all lanes.
7845 // 2. For the lifetime start/end intrinsics the pointer operand only
7846 // does anything useful when the input comes from a stack object,
7847 // which suggests it should always be uniform. For non-stack objects
7848 // the effect is to poison the object, which still allows us to
7849 // remove the call.
7850 IsUniform = true;
7851 break;
7852 default:
7853 break;
7854 }
7855 }
7856 VPValue *BlockInMask = nullptr;
7857 if (!IsPredicated) {
7858 // Finalize the recipe for Instr, first if it is not predicated.
7859 LLVM_DEBUG(dbgs() << "LV: Scalarizing:" << *I << "\n");
7860 } else {
7861 LLVM_DEBUG(dbgs() << "LV: Scalarizing and predicating:" << *I << "\n");
7862 // Instructions marked for predication are replicated and a mask operand is
7863 // added initially. Masked replicate recipes will later be placed under an
7864 // if-then construct to prevent side-effects. Generate recipes to compute
7865 // the block mask for this region.
7866 BlockInMask = getBlockInMask(Builder.getInsertBlock());
7867 }
7868
7869 // Note that there is some custom logic to mark some intrinsics as uniform
7870 // manually above for scalable vectors, which this assert needs to account for
7871 // as well.
7872 assert((Range.Start.isScalar() || !IsUniform || !IsPredicated ||
7873 (Range.Start.isScalable() && isa<IntrinsicInst>(I))) &&
7874 "Should not predicate a uniform recipe");
7875 auto *Recipe = new VPReplicateRecipe(I, Operands, IsUniform, BlockInMask,
7876 VPIRMetadata(*I, LVer));
7877 return Recipe;
7878}
7879
7880/// Find all possible partial reductions in the loop and track all of those that
7881/// are valid so recipes can be formed later.
7883 // Find all possible partial reductions.
7885 PartialReductionChains;
7886 for (const auto &[Phi, RdxDesc] : Legal->getReductionVars()) {
7887 getScaledReductions(Phi, RdxDesc.getLoopExitInstr(), Range,
7888 PartialReductionChains);
7889 }
7890
7891 // A partial reduction is invalid if any of its extends are used by
7892 // something that isn't another partial reduction. This is because the
7893 // extends are intended to be lowered along with the reduction itself.
7894
7895 // Build up a set of partial reduction ops for efficient use checking.
7896 SmallPtrSet<User *, 4> PartialReductionOps;
7897 for (const auto &[PartialRdx, _] : PartialReductionChains)
7898 PartialReductionOps.insert(PartialRdx.ExtendUser);
7899
7900 auto ExtendIsOnlyUsedByPartialReductions =
7901 [&PartialReductionOps](Instruction *Extend) {
7902 return all_of(Extend->users(), [&](const User *U) {
7903 return PartialReductionOps.contains(U);
7904 });
7905 };
7906
7907 // Check if each use of a chain's two extends is a partial reduction
7908 // and only add those that don't have non-partial reduction users.
7909 for (auto Pair : PartialReductionChains) {
7910 PartialReductionChain Chain = Pair.first;
7911 if (ExtendIsOnlyUsedByPartialReductions(Chain.ExtendA) &&
7912 (!Chain.ExtendB || ExtendIsOnlyUsedByPartialReductions(Chain.ExtendB)))
7913 ScaledReductionMap.try_emplace(Chain.Reduction, Pair.second);
7914 }
7915}
7916
7917bool VPRecipeBuilder::getScaledReductions(
7918 Instruction *PHI, Instruction *RdxExitInstr, VFRange &Range,
7919 SmallVectorImpl<std::pair<PartialReductionChain, unsigned>> &Chains) {
7920 if (!CM.TheLoop->contains(RdxExitInstr))
7921 return false;
7922
7923 auto *Update = dyn_cast<BinaryOperator>(RdxExitInstr);
7924 if (!Update)
7925 return false;
7926
7927 Value *Op = Update->getOperand(0);
7928 Value *PhiOp = Update->getOperand(1);
7929 if (Op == PHI)
7930 std::swap(Op, PhiOp);
7931
7932 // Try and get a scaled reduction from the first non-phi operand.
7933 // If one is found, we use the discovered reduction instruction in
7934 // place of the accumulator for costing.
7935 if (auto *OpInst = dyn_cast<Instruction>(Op)) {
7936 if (getScaledReductions(PHI, OpInst, Range, Chains)) {
7937 PHI = Chains.rbegin()->first.Reduction;
7938
7939 Op = Update->getOperand(0);
7940 PhiOp = Update->getOperand(1);
7941 if (Op == PHI)
7942 std::swap(Op, PhiOp);
7943 }
7944 }
7945 if (PhiOp != PHI)
7946 return false;
7947
7948 using namespace llvm::PatternMatch;
7949
7950 // If the update is a binary operator, check both of its operands to see if
7951 // they are extends. Otherwise, see if the update comes directly from an
7952 // extend.
7953 Instruction *Exts[2] = {nullptr};
7954 BinaryOperator *ExtendUser = dyn_cast<BinaryOperator>(Op);
7955 std::optional<unsigned> BinOpc;
7956 Type *ExtOpTypes[2] = {nullptr};
7958
7959 auto CollectExtInfo = [this, &Exts, &ExtOpTypes,
7960 &ExtKinds](SmallVectorImpl<Value *> &Ops) -> bool {
7961 for (const auto &[I, OpI] : enumerate(Ops)) {
7962 const APInt *C;
7963 if (I > 0 && match(OpI, m_APInt(C)) &&
7964 canConstantBeExtended(C, ExtOpTypes[0], ExtKinds[0])) {
7965 ExtOpTypes[I] = ExtOpTypes[0];
7966 ExtKinds[I] = ExtKinds[0];
7967 continue;
7968 }
7969 Value *ExtOp;
7970 if (!match(OpI, m_ZExtOrSExt(m_Value(ExtOp))))
7971 return false;
7972 Exts[I] = cast<Instruction>(OpI);
7973
7974 // TODO: We should be able to support live-ins.
7975 if (!CM.TheLoop->contains(Exts[I]))
7976 return false;
7977
7978 ExtOpTypes[I] = ExtOp->getType();
7979 ExtKinds[I] = TTI::getPartialReductionExtendKind(Exts[I]);
7980 }
7981 return true;
7982 };
7983
7984 if (ExtendUser) {
7985 if (!ExtendUser->hasOneUse())
7986 return false;
7987
7988 // Use the side-effect of match to replace BinOp only if the pattern is
7989 // matched, we don't care at this point whether it actually matched.
7990 match(ExtendUser, m_Neg(m_BinOp(ExtendUser)));
7991
7992 SmallVector<Value *> Ops(ExtendUser->operands());
7993 if (!CollectExtInfo(Ops))
7994 return false;
7995
7996 BinOpc = std::make_optional(ExtendUser->getOpcode());
7997 } else if (match(Update, m_Add(m_Value(), m_Value()))) {
7998 // We already know the operands for Update are Op and PhiOp.
8000 if (!CollectExtInfo(Ops))
8001 return false;
8002
8003 ExtendUser = Update;
8004 BinOpc = std::nullopt;
8005 } else
8006 return false;
8007
8008 PartialReductionChain Chain(RdxExitInstr, Exts[0], Exts[1], ExtendUser);
8009
8010 TypeSize PHISize = PHI->getType()->getPrimitiveSizeInBits();
8011 TypeSize ASize = ExtOpTypes[0]->getPrimitiveSizeInBits();
8012 if (!PHISize.hasKnownScalarFactor(ASize))
8013 return false;
8014 unsigned TargetScaleFactor = PHISize.getKnownScalarFactor(ASize);
8015
8017 [&](ElementCount VF) {
8019 Update->getOpcode(), ExtOpTypes[0], ExtOpTypes[1],
8020 PHI->getType(), VF, ExtKinds[0], ExtKinds[1], BinOpc,
8021 CM.CostKind);
8022 return Cost.isValid();
8023 },
8024 Range)) {
8025 Chains.emplace_back(Chain, TargetScaleFactor);
8026 return true;
8027 }
8028
8029 return false;
8030}
8031
8033 VFRange &Range) {
8034 // First, check for specific widening recipes that deal with inductions, Phi
8035 // nodes, calls and memory operations.
8036 VPRecipeBase *Recipe;
8037 Instruction *Instr = R->getUnderlyingInstr();
8038 SmallVector<VPValue *, 4> Operands(R->operands());
8039 if (auto *PhiR = dyn_cast<VPPhi>(R)) {
8040 VPBasicBlock *Parent = PhiR->getParent();
8041 [[maybe_unused]] VPRegionBlock *LoopRegionOf =
8042 Parent->getEnclosingLoopRegion();
8043 assert(LoopRegionOf && LoopRegionOf->getEntry() == Parent &&
8044 "Non-header phis should have been handled during predication");
8045 auto *Phi = cast<PHINode>(R->getUnderlyingInstr());
8046 assert(Operands.size() == 2 && "Must have 2 operands for header phis");
8047 if ((Recipe = tryToOptimizeInductionPHI(Phi, Operands, Range)))
8048 return Recipe;
8049
8050 VPHeaderPHIRecipe *PhiRecipe = nullptr;
8051 assert((Legal->isReductionVariable(Phi) ||
8052 Legal->isFixedOrderRecurrence(Phi)) &&
8053 "can only widen reductions and fixed-order recurrences here");
8054 VPValue *StartV = Operands[0];
8055 if (Legal->isReductionVariable(Phi)) {
8056 const RecurrenceDescriptor &RdxDesc = Legal->getRecurrenceDescriptor(Phi);
8057 assert(RdxDesc.getRecurrenceStartValue() ==
8058 Phi->getIncomingValueForBlock(OrigLoop->getLoopPreheader()));
8059
8060 // If the PHI is used by a partial reduction, set the scale factor.
8061 unsigned ScaleFactor =
8062 getScalingForReduction(RdxDesc.getLoopExitInstr()).value_or(1);
8063 PhiRecipe = new VPReductionPHIRecipe(
8064 Phi, RdxDesc.getRecurrenceKind(), *StartV, CM.isInLoopReduction(Phi),
8065 CM.useOrderedReductions(RdxDesc), ScaleFactor);
8066 } else {
8067 // TODO: Currently fixed-order recurrences are modeled as chains of
8068 // first-order recurrences. If there are no users of the intermediate
8069 // recurrences in the chain, the fixed order recurrence should be modeled
8070 // directly, enabling more efficient codegen.
8071 PhiRecipe = new VPFirstOrderRecurrencePHIRecipe(Phi, *StartV);
8072 }
8073 // Add backedge value.
8074 PhiRecipe->addOperand(Operands[1]);
8075 return PhiRecipe;
8076 }
8077 assert(!R->isPhi() && "only VPPhi nodes expected at this point");
8078
8079 if (isa<TruncInst>(Instr) && (Recipe = tryToOptimizeInductionTruncate(
8080 cast<TruncInst>(Instr), Operands, Range)))
8081 return Recipe;
8082
8083 // All widen recipes below deal only with VF > 1.
8085 [&](ElementCount VF) { return VF.isScalar(); }, Range))
8086 return nullptr;
8087
8088 if (auto *CI = dyn_cast<CallInst>(Instr))
8089 return tryToWidenCall(CI, Operands, Range);
8090
8091 if (StoreInst *SI = dyn_cast<StoreInst>(Instr))
8092 if (auto HistInfo = Legal->getHistogramInfo(SI))
8093 return tryToWidenHistogram(*HistInfo, Operands);
8094
8095 if (isa<LoadInst>(Instr) || isa<StoreInst>(Instr))
8096 return tryToWidenMemory(Instr, Operands, Range);
8097
8098 if (std::optional<unsigned> ScaleFactor = getScalingForReduction(Instr)) {
8099 if (auto PartialRed =
8100 tryToCreatePartialReduction(Instr, Operands, ScaleFactor.value()))
8101 return PartialRed;
8102 }
8103
8104 if (!shouldWiden(Instr, Range))
8105 return nullptr;
8106
8107 if (auto *GEP = dyn_cast<GetElementPtrInst>(Instr))
8108 return new VPWidenGEPRecipe(GEP, Operands);
8109
8110 if (auto *SI = dyn_cast<SelectInst>(Instr)) {
8111 return new VPWidenSelectRecipe(*SI, Operands);
8112 }
8113
8114 if (auto *CI = dyn_cast<CastInst>(Instr)) {
8115 return new VPWidenCastRecipe(CI->getOpcode(), Operands[0], CI->getType(),
8116 *CI);
8117 }
8118
8119 return tryToWiden(Instr, Operands);
8120}
8121
8124 ArrayRef<VPValue *> Operands,
8125 unsigned ScaleFactor) {
8126 assert(Operands.size() == 2 &&
8127 "Unexpected number of operands for partial reduction");
8128
8129 VPValue *BinOp = Operands[0];
8130 VPValue *Accumulator = Operands[1];
8131 VPRecipeBase *BinOpRecipe = BinOp->getDefiningRecipe();
8132 if (isa<VPReductionPHIRecipe>(BinOpRecipe) ||
8133 isa<VPPartialReductionRecipe>(BinOpRecipe))
8134 std::swap(BinOp, Accumulator);
8135
8136 if (ScaleFactor !=
8137 vputils::getVFScaleFactor(Accumulator->getDefiningRecipe()))
8138 return nullptr;
8139
8140 unsigned ReductionOpcode = Reduction->getOpcode();
8141 if (ReductionOpcode == Instruction::Sub) {
8142 auto *const Zero = ConstantInt::get(Reduction->getType(), 0);
8144 Ops.push_back(Plan.getOrAddLiveIn(Zero));
8145 Ops.push_back(BinOp);
8146 BinOp = new VPWidenRecipe(*Reduction, Ops);
8147 Builder.insert(BinOp->getDefiningRecipe());
8148 ReductionOpcode = Instruction::Add;
8149 }
8150
8151 VPValue *Cond = nullptr;
8152 if (CM.blockNeedsPredicationForAnyReason(Reduction->getParent())) {
8153 assert((ReductionOpcode == Instruction::Add ||
8154 ReductionOpcode == Instruction::Sub) &&
8155 "Expected an ADD or SUB operation for predicated partial "
8156 "reductions (because the neutral element in the mask is zero)!");
8157 Cond = getBlockInMask(Builder.getInsertBlock());
8158 VPValue *Zero =
8159 Plan.getOrAddLiveIn(ConstantInt::get(Reduction->getType(), 0));
8160 BinOp = Builder.createSelect(Cond, BinOp, Zero, Reduction->getDebugLoc());
8161 }
8162 return new VPPartialReductionRecipe(ReductionOpcode, Accumulator, BinOp, Cond,
8163 ScaleFactor, Reduction);
8164}
8165
8166void LoopVectorizationPlanner::buildVPlansWithVPRecipes(ElementCount MinVF,
8167 ElementCount MaxVF) {
8168 if (ElementCount::isKnownGT(MinVF, MaxVF))
8169 return;
8170
8171 assert(OrigLoop->isInnermost() && "Inner loop expected.");
8172
8173 const LoopAccessInfo *LAI = Legal->getLAI();
8175 OrigLoop, LI, DT, PSE.getSE());
8176 if (!LAI->getRuntimePointerChecking()->getChecks().empty() &&
8178 // Only use noalias metadata when using memory checks guaranteeing no
8179 // overlap across all iterations.
8180 LVer.prepareNoAliasMetadata();
8181 }
8182
8183 // Create initial base VPlan0, to serve as common starting point for all
8184 // candidates built later for specific VF ranges.
8185 auto VPlan0 = VPlanTransforms::buildVPlan0(
8186 OrigLoop, *LI, Legal->getWidestInductionType(),
8187 getDebugLocFromInstOrOperands(Legal->getPrimaryInduction()), PSE);
8188
8189 auto MaxVFTimes2 = MaxVF * 2;
8190 for (ElementCount VF = MinVF; ElementCount::isKnownLT(VF, MaxVFTimes2);) {
8191 VFRange SubRange = {VF, MaxVFTimes2};
8192 if (auto Plan = tryToBuildVPlanWithVPRecipes(
8193 std::unique_ptr<VPlan>(VPlan0->duplicate()), SubRange, &LVer)) {
8194 // Now optimize the initial VPlan.
8196 *Plan, CM.getMinimalBitwidths());
8198 // TODO: try to put it close to addActiveLaneMask().
8199 if (CM.foldTailWithEVL())
8201 *Plan, CM.getMaxSafeElements());
8202
8204 VPlans.push_back(std::move(P));
8205
8206 assert(verifyVPlanIsValid(*Plan) && "VPlan is invalid");
8207 VPlans.push_back(std::move(Plan));
8208 }
8209 VF = SubRange.End;
8210 }
8211}
8212
8213VPlanPtr LoopVectorizationPlanner::tryToBuildVPlanWithVPRecipes(
8214 VPlanPtr Plan, VFRange &Range, LoopVersioning *LVer) {
8215
8216 using namespace llvm::VPlanPatternMatch;
8217 SmallPtrSet<const InterleaveGroup<Instruction> *, 1> InterleaveGroups;
8218
8219 // ---------------------------------------------------------------------------
8220 // Build initial VPlan: Scan the body of the loop in a topological order to
8221 // visit each basic block after having visited its predecessor basic blocks.
8222 // ---------------------------------------------------------------------------
8223
8224 bool RequiresScalarEpilogueCheck =
8226 [this](ElementCount VF) {
8227 return !CM.requiresScalarEpilogue(VF.isVector());
8228 },
8229 Range);
8230 VPlanTransforms::handleEarlyExits(*Plan, Legal->hasUncountableEarlyExit());
8231 VPlanTransforms::addMiddleCheck(*Plan, RequiresScalarEpilogueCheck,
8232 CM.foldTailByMasking());
8233
8235
8236 // Don't use getDecisionAndClampRange here, because we don't know the UF
8237 // so this function is better to be conservative, rather than to split
8238 // it up into different VPlans.
8239 // TODO: Consider using getDecisionAndClampRange here to split up VPlans.
8240 bool IVUpdateMayOverflow = false;
8241 for (ElementCount VF : Range)
8242 IVUpdateMayOverflow |= !isIndvarOverflowCheckKnownFalse(&CM, VF);
8243
8244 TailFoldingStyle Style = CM.getTailFoldingStyle(IVUpdateMayOverflow);
8245 // Use NUW for the induction increment if we proved that it won't overflow in
8246 // the vector loop or when not folding the tail. In the later case, we know
8247 // that the canonical induction increment will not overflow as the vector trip
8248 // count is >= increment and a multiple of the increment.
8249 VPRegionBlock *LoopRegion = Plan->getVectorLoopRegion();
8250 bool HasNUW = !IVUpdateMayOverflow || Style == TailFoldingStyle::None;
8251 if (!HasNUW) {
8252 auto *IVInc =
8253 LoopRegion->getExitingBasicBlock()->getTerminator()->getOperand(0);
8254 assert(match(IVInc,
8255 m_VPInstruction<Instruction::Add>(
8256 m_Specific(LoopRegion->getCanonicalIV()), m_VPValue())) &&
8257 "Did not find the canonical IV increment");
8258 cast<VPRecipeWithIRFlags>(IVInc)->dropPoisonGeneratingFlags();
8259 }
8260
8261 // ---------------------------------------------------------------------------
8262 // Pre-construction: record ingredients whose recipes we'll need to further
8263 // process after constructing the initial VPlan.
8264 // ---------------------------------------------------------------------------
8265
8266 // For each interleave group which is relevant for this (possibly trimmed)
8267 // Range, add it to the set of groups to be later applied to the VPlan and add
8268 // placeholders for its members' Recipes which we'll be replacing with a
8269 // single VPInterleaveRecipe.
8270 for (InterleaveGroup<Instruction> *IG : IAI.getInterleaveGroups()) {
8271 auto ApplyIG = [IG, this](ElementCount VF) -> bool {
8272 bool Result = (VF.isVector() && // Query is illegal for VF == 1
8273 CM.getWideningDecision(IG->getInsertPos(), VF) ==
8275 // For scalable vectors, the interleave factors must be <= 8 since we
8276 // require the (de)interleaveN intrinsics instead of shufflevectors.
8277 assert((!Result || !VF.isScalable() || IG->getFactor() <= 8) &&
8278 "Unsupported interleave factor for scalable vectors");
8279 return Result;
8280 };
8281 if (!getDecisionAndClampRange(ApplyIG, Range))
8282 continue;
8283 InterleaveGroups.insert(IG);
8284 }
8285
8286 // ---------------------------------------------------------------------------
8287 // Predicate and linearize the top-level loop region.
8288 // ---------------------------------------------------------------------------
8289 auto BlockMaskCache = VPlanTransforms::introduceMasksAndLinearize(
8290 *Plan, CM.foldTailByMasking());
8291
8292 // ---------------------------------------------------------------------------
8293 // Construct wide recipes and apply predication for original scalar
8294 // VPInstructions in the loop.
8295 // ---------------------------------------------------------------------------
8296 VPRecipeBuilder RecipeBuilder(*Plan, OrigLoop, TLI, &TTI, Legal, CM, PSE,
8297 Builder, BlockMaskCache, LVer);
8298 RecipeBuilder.collectScaledReductions(Range);
8299
8300 // Scan the body of the loop in a topological order to visit each basic block
8301 // after having visited its predecessor basic blocks.
8302 VPBasicBlock *HeaderVPBB = LoopRegion->getEntryBasicBlock();
8303 ReversePostOrderTraversal<VPBlockShallowTraversalWrapper<VPBlockBase *>> RPOT(
8304 HeaderVPBB);
8305
8306 auto *MiddleVPBB = Plan->getMiddleBlock();
8307 VPBasicBlock::iterator MBIP = MiddleVPBB->getFirstNonPhi();
8308 // Mapping from VPValues in the initial plan to their widened VPValues. Needed
8309 // temporarily to update created block masks.
8310 DenseMap<VPValue *, VPValue *> Old2New;
8311 for (VPBasicBlock *VPBB : VPBlockUtils::blocksOnly<VPBasicBlock>(RPOT)) {
8312 // Convert input VPInstructions to widened recipes.
8313 for (VPRecipeBase &R : make_early_inc_range(*VPBB)) {
8314 auto *SingleDef = cast<VPSingleDefRecipe>(&R);
8315 auto *UnderlyingValue = SingleDef->getUnderlyingValue();
8316 // Skip recipes that do not need transforming, including canonical IV,
8317 // wide canonical IV and VPInstructions without underlying values. The
8318 // latter are added above for masking.
8319 // FIXME: Migrate code relying on the underlying instruction from VPlan0
8320 // to construct recipes below to not use the underlying instruction.
8322 &R) ||
8323 (isa<VPInstruction>(&R) && !UnderlyingValue))
8324 continue;
8325
8326 // FIXME: VPlan0, which models a copy of the original scalar loop, should
8327 // not use VPWidenPHIRecipe to model the phis.
8329 UnderlyingValue && "unsupported recipe");
8330
8331 // TODO: Gradually replace uses of underlying instruction by analyses on
8332 // VPlan.
8333 Instruction *Instr = cast<Instruction>(UnderlyingValue);
8334 Builder.setInsertPoint(SingleDef);
8335
8336 // The stores with invariant address inside the loop will be deleted, and
8337 // in the exit block, a uniform store recipe will be created for the final
8338 // invariant store of the reduction.
8339 StoreInst *SI;
8340 if ((SI = dyn_cast<StoreInst>(Instr)) &&
8341 Legal->isInvariantAddressOfReduction(SI->getPointerOperand())) {
8342 // Only create recipe for the final invariant store of the reduction.
8343 if (Legal->isInvariantStoreOfReduction(SI)) {
8344 auto *Recipe =
8345 new VPReplicateRecipe(SI, R.operands(), true /* IsUniform */,
8346 nullptr /*Mask*/, VPIRMetadata(*SI, LVer));
8347 Recipe->insertBefore(*MiddleVPBB, MBIP);
8348 }
8349 R.eraseFromParent();
8350 continue;
8351 }
8352
8353 VPRecipeBase *Recipe =
8354 RecipeBuilder.tryToCreateWidenRecipe(SingleDef, Range);
8355 if (!Recipe)
8356 Recipe = RecipeBuilder.handleReplication(Instr, R.operands(), Range);
8357
8358 RecipeBuilder.setRecipe(Instr, Recipe);
8359 if (isa<VPWidenIntOrFpInductionRecipe>(Recipe) && isa<TruncInst>(Instr)) {
8360 // Optimized a truncate to VPWidenIntOrFpInductionRecipe. It needs to be
8361 // moved to the phi section in the header.
8362 Recipe->insertBefore(*HeaderVPBB, HeaderVPBB->getFirstNonPhi());
8363 } else {
8364 Builder.insert(Recipe);
8365 }
8366 if (Recipe->getNumDefinedValues() == 1) {
8367 SingleDef->replaceAllUsesWith(Recipe->getVPSingleValue());
8368 Old2New[SingleDef] = Recipe->getVPSingleValue();
8369 } else {
8370 assert(Recipe->getNumDefinedValues() == 0 &&
8371 "Unexpected multidef recipe");
8372 R.eraseFromParent();
8373 }
8374 }
8375 }
8376
8377 // replaceAllUsesWith above may invalidate the block masks. Update them here.
8378 // TODO: Include the masks as operands in the predicated VPlan directly
8379 // to remove the need to keep a map of masks beyond the predication
8380 // transform.
8381 RecipeBuilder.updateBlockMaskCache(Old2New);
8382 for (VPValue *Old : Old2New.keys())
8383 Old->getDefiningRecipe()->eraseFromParent();
8384
8385 assert(isa<VPRegionBlock>(LoopRegion) &&
8386 !LoopRegion->getEntryBasicBlock()->empty() &&
8387 "entry block must be set to a VPRegionBlock having a non-empty entry "
8388 "VPBasicBlock");
8389
8390 // Update wide induction increments to use the same step as the corresponding
8391 // wide induction. This enables detecting induction increments directly in
8392 // VPlan and removes redundant splats.
8393 for (const auto &[Phi, ID] : Legal->getInductionVars()) {
8394 auto *IVInc = cast<Instruction>(
8395 Phi->getIncomingValueForBlock(OrigLoop->getLoopLatch()));
8396 if (IVInc->getOperand(0) != Phi || IVInc->getOpcode() != Instruction::Add)
8397 continue;
8398 VPWidenInductionRecipe *WideIV =
8399 cast<VPWidenInductionRecipe>(RecipeBuilder.getRecipe(Phi));
8400 VPRecipeBase *R = RecipeBuilder.getRecipe(IVInc);
8401 R->setOperand(1, WideIV->getStepValue());
8402 }
8403
8404 // TODO: We can't call runPass on these transforms yet, due to verifier
8405 // failures.
8407 DenseMap<VPValue *, VPValue *> IVEndValues;
8408 VPlanTransforms::addScalarResumePhis(*Plan, RecipeBuilder, IVEndValues);
8409
8410 // ---------------------------------------------------------------------------
8411 // Transform initial VPlan: Apply previously taken decisions, in order, to
8412 // bring the VPlan to its final state.
8413 // ---------------------------------------------------------------------------
8414
8415 // Adjust the recipes for any inloop reductions.
8416 adjustRecipesForReductions(Plan, RecipeBuilder, Range.Start);
8417
8418 // Apply mandatory transformation to handle FP maxnum/minnum reduction with
8419 // NaNs if possible, bail out otherwise.
8421 *Plan))
8422 return nullptr;
8423
8424 // Transform recipes to abstract recipes if it is legal and beneficial and
8425 // clamp the range for better cost estimation.
8426 // TODO: Enable following transform when the EVL-version of extended-reduction
8427 // and mulacc-reduction are implemented.
8428 if (!CM.foldTailWithEVL()) {
8429 VPCostContext CostCtx(CM.TTI, *CM.TLI, *Plan, CM, CM.CostKind,
8430 *CM.PSE.getSE());
8432 CostCtx, Range);
8433 }
8434
8435 for (ElementCount VF : Range)
8436 Plan->addVF(VF);
8437 Plan->setName("Initial VPlan");
8438
8439 // Interleave memory: for each Interleave Group we marked earlier as relevant
8440 // for this VPlan, replace the Recipes widening its memory instructions with a
8441 // single VPInterleaveRecipe at its insertion point.
8443 InterleaveGroups, RecipeBuilder,
8444 CM.isScalarEpilogueAllowed());
8445
8446 // Replace VPValues for known constant strides.
8448 Legal->getLAI()->getSymbolicStrides());
8449
8450 auto BlockNeedsPredication = [this](BasicBlock *BB) {
8451 return Legal->blockNeedsPredication(BB);
8452 };
8454 BlockNeedsPredication);
8455
8456 // Sink users of fixed-order recurrence past the recipe defining the previous
8457 // value and introduce FirstOrderRecurrenceSplice VPInstructions.
8459 *Plan, Builder))
8460 return nullptr;
8461
8462 if (useActiveLaneMask(Style)) {
8463 // TODO: Move checks to VPlanTransforms::addActiveLaneMask once
8464 // TailFoldingStyle is visible there.
8465 bool ForControlFlow = useActiveLaneMaskForControlFlow(Style);
8466 bool WithoutRuntimeCheck =
8467 Style == TailFoldingStyle::DataAndControlFlowWithoutRuntimeCheck;
8468 VPlanTransforms::addActiveLaneMask(*Plan, ForControlFlow,
8469 WithoutRuntimeCheck);
8470 }
8471 VPlanTransforms::optimizeInductionExitUsers(*Plan, IVEndValues, *PSE.getSE());
8472
8473 assert(verifyVPlanIsValid(*Plan) && "VPlan is invalid");
8474 return Plan;
8475}
8476
8477VPlanPtr LoopVectorizationPlanner::tryToBuildVPlan(VFRange &Range) {
8478 // Outer loop handling: They may require CFG and instruction level
8479 // transformations before even evaluating whether vectorization is profitable.
8480 // Since we cannot modify the incoming IR, we need to build VPlan upfront in
8481 // the vectorization pipeline.
8482 assert(!OrigLoop->isInnermost());
8483 assert(EnableVPlanNativePath && "VPlan-native path is not enabled.");
8484
8485 auto Plan = VPlanTransforms::buildVPlan0(
8486 OrigLoop, *LI, Legal->getWidestInductionType(),
8487 getDebugLocFromInstOrOperands(Legal->getPrimaryInduction()), PSE);
8489 /*HasUncountableExit*/ false);
8490 VPlanTransforms::addMiddleCheck(*Plan, /*RequiresScalarEpilogue*/ true,
8491 /*TailFolded*/ false);
8492
8494
8495 for (ElementCount VF : Range)
8496 Plan->addVF(VF);
8497
8499 *Plan,
8500 [this](PHINode *P) {
8501 return Legal->getIntOrFpInductionDescriptor(P);
8502 },
8503 *TLI))
8504 return nullptr;
8505
8506 // Collect mapping of IR header phis to header phi recipes, to be used in
8507 // addScalarResumePhis.
8508 DenseMap<VPBasicBlock *, VPValue *> BlockMaskCache;
8509 VPRecipeBuilder RecipeBuilder(*Plan, OrigLoop, TLI, &TTI, Legal, CM, PSE,
8510 Builder, BlockMaskCache, nullptr /*LVer*/);
8511 for (auto &R : Plan->getVectorLoopRegion()->getEntryBasicBlock()->phis()) {
8513 continue;
8514 auto *HeaderR = cast<VPHeaderPHIRecipe>(&R);
8515 RecipeBuilder.setRecipe(HeaderR->getUnderlyingInstr(), HeaderR);
8516 }
8517 DenseMap<VPValue *, VPValue *> IVEndValues;
8518 // TODO: IVEndValues are not used yet in the native path, to optimize exit
8519 // values.
8520 // TODO: We can't call runPass on the transform yet, due to verifier
8521 // failures.
8522 VPlanTransforms::addScalarResumePhis(*Plan, RecipeBuilder, IVEndValues);
8523
8524 assert(verifyVPlanIsValid(*Plan) && "VPlan is invalid");
8525 return Plan;
8526}
8527
8528// Adjust the recipes for reductions. For in-loop reductions the chain of
8529// instructions leading from the loop exit instr to the phi need to be converted
8530// to reductions, with one operand being vector and the other being the scalar
8531// reduction chain. For other reductions, a select is introduced between the phi
8532// and users outside the vector region when folding the tail.
8533//
8534// A ComputeReductionResult recipe is added to the middle block, also for
8535// in-loop reductions which compute their result in-loop, because generating
8536// the subsequent bc.merge.rdx phi is driven by ComputeReductionResult recipes.
8537//
8538// Adjust AnyOf reductions; replace the reduction phi for the selected value
8539// with a boolean reduction phi node to check if the condition is true in any
8540// iteration. The final value is selected by the final ComputeReductionResult.
8541void LoopVectorizationPlanner::adjustRecipesForReductions(
8542 VPlanPtr &Plan, VPRecipeBuilder &RecipeBuilder, ElementCount MinVF) {
8543 using namespace VPlanPatternMatch;
8544 VPRegionBlock *VectorLoopRegion = Plan->getVectorLoopRegion();
8545 VPBasicBlock *Header = VectorLoopRegion->getEntryBasicBlock();
8546 VPBasicBlock *MiddleVPBB = Plan->getMiddleBlock();
8548
8549 for (VPRecipeBase &R : Header->phis()) {
8550 auto *PhiR = dyn_cast<VPReductionPHIRecipe>(&R);
8551 if (!PhiR || !PhiR->isInLoop() || (MinVF.isScalar() && !PhiR->isOrdered()))
8552 continue;
8553
8554 RecurKind Kind = PhiR->getRecurrenceKind();
8555 assert(
8558 "AnyOf and FindIV reductions are not allowed for in-loop reductions");
8559
8560 // Collect the chain of "link" recipes for the reduction starting at PhiR.
8561 SetVector<VPSingleDefRecipe *> Worklist;
8562 Worklist.insert(PhiR);
8563 for (unsigned I = 0; I != Worklist.size(); ++I) {
8564 VPSingleDefRecipe *Cur = Worklist[I];
8565 for (VPUser *U : Cur->users()) {
8566 auto *UserRecipe = cast<VPSingleDefRecipe>(U);
8567 if (!UserRecipe->getParent()->getEnclosingLoopRegion()) {
8568 assert((UserRecipe->getParent() == MiddleVPBB ||
8569 UserRecipe->getParent() == Plan->getScalarPreheader()) &&
8570 "U must be either in the loop region, the middle block or the "
8571 "scalar preheader.");
8572 continue;
8573 }
8574 Worklist.insert(UserRecipe);
8575 }
8576 }
8577
8578 // Visit operation "Links" along the reduction chain top-down starting from
8579 // the phi until LoopExitValue. We keep track of the previous item
8580 // (PreviousLink) to tell which of the two operands of a Link will remain
8581 // scalar and which will be reduced. For minmax by select(cmp), Link will be
8582 // the select instructions. Blend recipes of in-loop reduction phi's will
8583 // get folded to their non-phi operand, as the reduction recipe handles the
8584 // condition directly.
8585 VPSingleDefRecipe *PreviousLink = PhiR; // Aka Worklist[0].
8586 for (VPSingleDefRecipe *CurrentLink : drop_begin(Worklist)) {
8587 if (auto *Blend = dyn_cast<VPBlendRecipe>(CurrentLink)) {
8588 assert(Blend->getNumIncomingValues() == 2 &&
8589 "Blend must have 2 incoming values");
8590 if (Blend->getIncomingValue(0) == PhiR) {
8591 Blend->replaceAllUsesWith(Blend->getIncomingValue(1));
8592 } else {
8593 assert(Blend->getIncomingValue(1) == PhiR &&
8594 "PhiR must be an operand of the blend");
8595 Blend->replaceAllUsesWith(Blend->getIncomingValue(0));
8596 }
8597 continue;
8598 }
8599
8600 Instruction *CurrentLinkI = CurrentLink->getUnderlyingInstr();
8601
8602 // Index of the first operand which holds a non-mask vector operand.
8603 unsigned IndexOfFirstOperand;
8604 // Recognize a call to the llvm.fmuladd intrinsic.
8605 bool IsFMulAdd = (Kind == RecurKind::FMulAdd);
8606 VPValue *VecOp;
8607 VPBasicBlock *LinkVPBB = CurrentLink->getParent();
8608 if (IsFMulAdd) {
8609 assert(
8611 "Expected instruction to be a call to the llvm.fmuladd intrinsic");
8612 assert(((MinVF.isScalar() && isa<VPReplicateRecipe>(CurrentLink)) ||
8613 isa<VPWidenIntrinsicRecipe>(CurrentLink)) &&
8614 CurrentLink->getOperand(2) == PreviousLink &&
8615 "expected a call where the previous link is the added operand");
8616
8617 // If the instruction is a call to the llvm.fmuladd intrinsic then we
8618 // need to create an fmul recipe (multiplying the first two operands of
8619 // the fmuladd together) to use as the vector operand for the fadd
8620 // reduction.
8621 VPInstruction *FMulRecipe = new VPInstruction(
8622 Instruction::FMul,
8623 {CurrentLink->getOperand(0), CurrentLink->getOperand(1)},
8624 CurrentLinkI->getFastMathFlags());
8625 LinkVPBB->insert(FMulRecipe, CurrentLink->getIterator());
8626 VecOp = FMulRecipe;
8627 } else if (PhiR->isInLoop() && Kind == RecurKind::AddChainWithSubs &&
8628 CurrentLinkI->getOpcode() == Instruction::Sub) {
8629 Type *PhiTy = PhiR->getUnderlyingValue()->getType();
8630 auto *Zero = Plan->getOrAddLiveIn(ConstantInt::get(PhiTy, 0));
8631 VPWidenRecipe *Sub = new VPWidenRecipe(
8632 Instruction::Sub, {Zero, CurrentLink->getOperand(1)}, {},
8633 VPIRMetadata(), CurrentLinkI->getDebugLoc());
8634 Sub->setUnderlyingValue(CurrentLinkI);
8635 LinkVPBB->insert(Sub, CurrentLink->getIterator());
8636 VecOp = Sub;
8637 } else {
8639 if (isa<VPWidenRecipe>(CurrentLink)) {
8640 assert(isa<CmpInst>(CurrentLinkI) &&
8641 "need to have the compare of the select");
8642 continue;
8643 }
8644 assert(isa<VPWidenSelectRecipe>(CurrentLink) &&
8645 "must be a select recipe");
8646 IndexOfFirstOperand = 1;
8647 } else {
8648 assert((MinVF.isScalar() || isa<VPWidenRecipe>(CurrentLink)) &&
8649 "Expected to replace a VPWidenSC");
8650 IndexOfFirstOperand = 0;
8651 }
8652 // Note that for non-commutable operands (cmp-selects), the semantics of
8653 // the cmp-select are captured in the recurrence kind.
8654 unsigned VecOpId =
8655 CurrentLink->getOperand(IndexOfFirstOperand) == PreviousLink
8656 ? IndexOfFirstOperand + 1
8657 : IndexOfFirstOperand;
8658 VecOp = CurrentLink->getOperand(VecOpId);
8659 assert(VecOp != PreviousLink &&
8660 CurrentLink->getOperand(CurrentLink->getNumOperands() - 1 -
8661 (VecOpId - IndexOfFirstOperand)) ==
8662 PreviousLink &&
8663 "PreviousLink must be the operand other than VecOp");
8664 }
8665
8666 VPValue *CondOp = nullptr;
8667 if (CM.blockNeedsPredicationForAnyReason(CurrentLinkI->getParent()))
8668 CondOp = RecipeBuilder.getBlockInMask(CurrentLink->getParent());
8669
8670 // TODO: Retrieve FMFs from recipes directly.
8671 RecurrenceDescriptor RdxDesc = Legal->getRecurrenceDescriptor(
8672 cast<PHINode>(PhiR->getUnderlyingInstr()));
8673 // Non-FP RdxDescs will have all fast math flags set, so clear them.
8674 FastMathFlags FMFs = isa<FPMathOperator>(CurrentLinkI)
8675 ? RdxDesc.getFastMathFlags()
8676 : FastMathFlags();
8677 auto *RedRecipe = new VPReductionRecipe(
8678 Kind, FMFs, CurrentLinkI, PreviousLink, VecOp, CondOp,
8679 PhiR->isOrdered(), CurrentLinkI->getDebugLoc());
8680 // Append the recipe to the end of the VPBasicBlock because we need to
8681 // ensure that it comes after all of it's inputs, including CondOp.
8682 // Delete CurrentLink as it will be invalid if its operand is replaced
8683 // with a reduction defined at the bottom of the block in the next link.
8684 if (LinkVPBB->getNumSuccessors() == 0)
8685 RedRecipe->insertBefore(&*std::prev(std::prev(LinkVPBB->end())));
8686 else
8687 LinkVPBB->appendRecipe(RedRecipe);
8688
8689 CurrentLink->replaceAllUsesWith(RedRecipe);
8690 ToDelete.push_back(CurrentLink);
8691 PreviousLink = RedRecipe;
8692 }
8693 }
8694 VPBasicBlock *LatchVPBB = VectorLoopRegion->getExitingBasicBlock();
8695 Builder.setInsertPoint(&*std::prev(std::prev(LatchVPBB->end())));
8696 VPBasicBlock::iterator IP = MiddleVPBB->getFirstNonPhi();
8697 for (VPRecipeBase &R :
8698 Plan->getVectorLoopRegion()->getEntryBasicBlock()->phis()) {
8699 VPReductionPHIRecipe *PhiR = dyn_cast<VPReductionPHIRecipe>(&R);
8700 if (!PhiR)
8701 continue;
8702
8703 const RecurrenceDescriptor &RdxDesc = Legal->getRecurrenceDescriptor(
8705 Type *PhiTy = PhiR->getUnderlyingValue()->getType();
8706 // If tail is folded by masking, introduce selects between the phi
8707 // and the users outside the vector region of each reduction, at the
8708 // beginning of the dedicated latch block.
8709 auto *OrigExitingVPV = PhiR->getBackedgeValue();
8710 auto *NewExitingVPV = PhiR->getBackedgeValue();
8711 // Don't output selects for partial reductions because they have an output
8712 // with fewer lanes than the VF. So the operands of the select would have
8713 // different numbers of lanes. Partial reductions mask the input instead.
8714 if (!PhiR->isInLoop() && CM.foldTailByMasking() &&
8715 !isa<VPPartialReductionRecipe>(OrigExitingVPV->getDefiningRecipe())) {
8716 VPValue *Cond = RecipeBuilder.getBlockInMask(PhiR->getParent());
8717 std::optional<FastMathFlags> FMFs =
8718 PhiTy->isFloatingPointTy()
8719 ? std::make_optional(RdxDesc.getFastMathFlags())
8720 : std::nullopt;
8721 NewExitingVPV =
8722 Builder.createSelect(Cond, OrigExitingVPV, PhiR, {}, "", FMFs);
8723 OrigExitingVPV->replaceUsesWithIf(NewExitingVPV, [](VPUser &U, unsigned) {
8724 return isa<VPInstruction>(&U) &&
8725 (cast<VPInstruction>(&U)->getOpcode() ==
8727 cast<VPInstruction>(&U)->getOpcode() ==
8729 cast<VPInstruction>(&U)->getOpcode() ==
8731 });
8732 if (CM.usePredicatedReductionSelect())
8733 PhiR->setOperand(1, NewExitingVPV);
8734 }
8735
8736 // We want code in the middle block to appear to execute on the location of
8737 // the scalar loop's latch terminator because: (a) it is all compiler
8738 // generated, (b) these instructions are always executed after evaluating
8739 // the latch conditional branch, and (c) other passes may add new
8740 // predecessors which terminate on this line. This is the easiest way to
8741 // ensure we don't accidentally cause an extra step back into the loop while
8742 // debugging.
8743 DebugLoc ExitDL = OrigLoop->getLoopLatch()->getTerminator()->getDebugLoc();
8744
8745 // TODO: At the moment ComputeReductionResult also drives creation of the
8746 // bc.merge.rdx phi nodes, hence it needs to be created unconditionally here
8747 // even for in-loop reductions, until the reduction resume value handling is
8748 // also modeled in VPlan.
8749 VPInstruction *FinalReductionResult;
8750 VPBuilder::InsertPointGuard Guard(Builder);
8751 Builder.setInsertPoint(MiddleVPBB, IP);
8752 RecurKind RecurrenceKind = PhiR->getRecurrenceKind();
8754 VPValue *Start = PhiR->getStartValue();
8755 VPValue *Sentinel = Plan->getOrAddLiveIn(RdxDesc.getSentinelValue());
8756 FinalReductionResult =
8757 Builder.createNaryOp(VPInstruction::ComputeFindIVResult,
8758 {PhiR, Start, Sentinel, NewExitingVPV}, ExitDL);
8759 } else if (RecurrenceDescriptor::isAnyOfRecurrenceKind(RecurrenceKind)) {
8760 VPValue *Start = PhiR->getStartValue();
8761 FinalReductionResult =
8762 Builder.createNaryOp(VPInstruction::ComputeAnyOfResult,
8763 {PhiR, Start, NewExitingVPV}, ExitDL);
8764 } else {
8765 VPIRFlags Flags =
8767 ? VPIRFlags(RdxDesc.getFastMathFlags())
8768 : VPIRFlags();
8769 FinalReductionResult =
8770 Builder.createNaryOp(VPInstruction::ComputeReductionResult,
8771 {PhiR, NewExitingVPV}, Flags, ExitDL);
8772 }
8773 // If the vector reduction can be performed in a smaller type, we truncate
8774 // then extend the loop exit value to enable InstCombine to evaluate the
8775 // entire expression in the smaller type.
8776 if (MinVF.isVector() && PhiTy != RdxDesc.getRecurrenceType() &&
8778 assert(!PhiR->isInLoop() && "Unexpected truncated inloop reduction!");
8780 "Unexpected truncated min-max recurrence!");
8781 Type *RdxTy = RdxDesc.getRecurrenceType();
8782 VPWidenCastRecipe *Trunc;
8783 Instruction::CastOps ExtendOpc =
8784 RdxDesc.isSigned() ? Instruction::SExt : Instruction::ZExt;
8785 VPWidenCastRecipe *Extnd;
8786 {
8787 VPBuilder::InsertPointGuard Guard(Builder);
8788 Builder.setInsertPoint(
8789 NewExitingVPV->getDefiningRecipe()->getParent(),
8790 std::next(NewExitingVPV->getDefiningRecipe()->getIterator()));
8791 Trunc =
8792 Builder.createWidenCast(Instruction::Trunc, NewExitingVPV, RdxTy);
8793 Extnd = Builder.createWidenCast(ExtendOpc, Trunc, PhiTy);
8794 }
8795 if (PhiR->getOperand(1) == NewExitingVPV)
8796 PhiR->setOperand(1, Extnd->getVPSingleValue());
8797
8798 // Update ComputeReductionResult with the truncated exiting value and
8799 // extend its result.
8800 FinalReductionResult->setOperand(1, Trunc);
8801 FinalReductionResult =
8802 Builder.createScalarCast(ExtendOpc, FinalReductionResult, PhiTy, {});
8803 }
8804
8805 // Update all users outside the vector region. Also replace redundant
8806 // ExtractLastElement.
8807 for (auto *U : to_vector(OrigExitingVPV->users())) {
8808 auto *Parent = cast<VPRecipeBase>(U)->getParent();
8809 if (FinalReductionResult == U || Parent->getParent())
8810 continue;
8811 U->replaceUsesOfWith(OrigExitingVPV, FinalReductionResult);
8813 cast<VPInstruction>(U)->replaceAllUsesWith(FinalReductionResult);
8814 }
8815
8816 // Adjust AnyOf reductions; replace the reduction phi for the selected value
8817 // with a boolean reduction phi node to check if the condition is true in
8818 // any iteration. The final value is selected by the final
8819 // ComputeReductionResult.
8820 if (RecurrenceDescriptor::isAnyOfRecurrenceKind(RecurrenceKind)) {
8821 auto *Select = cast<VPRecipeBase>(*find_if(PhiR->users(), [](VPUser *U) {
8822 return isa<VPWidenSelectRecipe>(U) ||
8823 (isa<VPReplicateRecipe>(U) &&
8824 cast<VPReplicateRecipe>(U)->getUnderlyingInstr()->getOpcode() ==
8825 Instruction::Select);
8826 }));
8827 VPValue *Cmp = Select->getOperand(0);
8828 // If the compare is checking the reduction PHI node, adjust it to check
8829 // the start value.
8830 if (VPRecipeBase *CmpR = Cmp->getDefiningRecipe())
8831 CmpR->replaceUsesOfWith(PhiR, PhiR->getStartValue());
8832 Builder.setInsertPoint(Select);
8833
8834 // If the true value of the select is the reduction phi, the new value is
8835 // selected if the negated condition is true in any iteration.
8836 if (Select->getOperand(1) == PhiR)
8837 Cmp = Builder.createNot(Cmp);
8838 VPValue *Or = Builder.createOr(PhiR, Cmp);
8839 Select->getVPSingleValue()->replaceAllUsesWith(Or);
8840 // Delete Select now that it has invalid types.
8841 ToDelete.push_back(Select);
8842
8843 // Convert the reduction phi to operate on bools.
8844 PhiR->setOperand(0, Plan->getOrAddLiveIn(ConstantInt::getFalse(
8845 OrigLoop->getHeader()->getContext())));
8846 continue;
8847 }
8848
8850 RdxDesc.getRecurrenceKind())) {
8851 // Adjust the start value for FindFirstIV/FindLastIV recurrences to use
8852 // the sentinel value after generating the ResumePhi recipe, which uses
8853 // the original start value.
8854 PhiR->setOperand(0, Plan->getOrAddLiveIn(RdxDesc.getSentinelValue()));
8855 }
8856 RecurKind RK = RdxDesc.getRecurrenceKind();
8860 VPBuilder PHBuilder(Plan->getVectorPreheader());
8861 VPValue *Iden = Plan->getOrAddLiveIn(
8862 getRecurrenceIdentity(RK, PhiTy, RdxDesc.getFastMathFlags()));
8863 // If the PHI is used by a partial reduction, set the scale factor.
8864 unsigned ScaleFactor =
8865 RecipeBuilder.getScalingForReduction(RdxDesc.getLoopExitInstr())
8866 .value_or(1);
8867 Type *I32Ty = IntegerType::getInt32Ty(PhiTy->getContext());
8868 auto *ScaleFactorVPV =
8869 Plan->getOrAddLiveIn(ConstantInt::get(I32Ty, ScaleFactor));
8870 VPValue *StartV = PHBuilder.createNaryOp(
8872 {PhiR->getStartValue(), Iden, ScaleFactorVPV},
8873 PhiTy->isFloatingPointTy() ? RdxDesc.getFastMathFlags()
8874 : FastMathFlags());
8875 PhiR->setOperand(0, StartV);
8876 }
8877 }
8878 for (VPRecipeBase *R : ToDelete)
8879 R->eraseFromParent();
8880
8882}
8883
8884void LoopVectorizationPlanner::attachRuntimeChecks(
8885 VPlan &Plan, GeneratedRTChecks &RTChecks, bool HasBranchWeights) const {
8886 const auto &[SCEVCheckCond, SCEVCheckBlock] = RTChecks.getSCEVChecks();
8887 if (SCEVCheckBlock && SCEVCheckBlock->hasNPredecessors(0)) {
8888 assert((!CM.OptForSize ||
8889 CM.Hints->getForce() == LoopVectorizeHints::FK_Enabled) &&
8890 "Cannot SCEV check stride or overflow when optimizing for size");
8891 VPlanTransforms::attachCheckBlock(Plan, SCEVCheckCond, SCEVCheckBlock,
8892 HasBranchWeights);
8893 }
8894 const auto &[MemCheckCond, MemCheckBlock] = RTChecks.getMemRuntimeChecks();
8895 if (MemCheckBlock && MemCheckBlock->hasNPredecessors(0)) {
8896 // VPlan-native path does not do any analysis for runtime checks
8897 // currently.
8898 assert((!EnableVPlanNativePath || OrigLoop->isInnermost()) &&
8899 "Runtime checks are not supported for outer loops yet");
8900
8901 if (CM.OptForSize) {
8902 assert(
8903 CM.Hints->getForce() == LoopVectorizeHints::FK_Enabled &&
8904 "Cannot emit memory checks when optimizing for size, unless forced "
8905 "to vectorize.");
8906 ORE->emit([&]() {
8907 return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationCodeSize",
8908 OrigLoop->getStartLoc(),
8909 OrigLoop->getHeader())
8910 << "Code-size may be reduced by not forcing "
8911 "vectorization, or by source-code modifications "
8912 "eliminating the need for runtime checks "
8913 "(e.g., adding 'restrict').";
8914 });
8915 }
8916 VPlanTransforms::attachCheckBlock(Plan, MemCheckCond, MemCheckBlock,
8917 HasBranchWeights);
8918 }
8919}
8920
8922 VPlan &Plan, ElementCount VF, unsigned UF,
8923 ElementCount MinProfitableTripCount) const {
8924 // vscale is not necessarily a power-of-2, which means we cannot guarantee
8925 // an overflow to zero when updating induction variables and so an
8926 // additional overflow check is required before entering the vector loop.
8927 bool IsIndvarOverflowCheckNeededForVF =
8928 VF.isScalable() && !TTI.isVScaleKnownToBeAPowerOfTwo() &&
8929 !isIndvarOverflowCheckKnownFalse(&CM, VF, UF) &&
8930 CM.getTailFoldingStyle() !=
8932 const uint32_t *BranchWeigths =
8933 hasBranchWeightMD(*OrigLoop->getLoopLatch()->getTerminator())
8935 : nullptr;
8937 Plan, VF, UF, MinProfitableTripCount,
8938 CM.requiresScalarEpilogue(VF.isVector()), CM.foldTailByMasking(),
8939 IsIndvarOverflowCheckNeededForVF, OrigLoop, BranchWeigths,
8940 OrigLoop->getLoopPredecessor()->getTerminator()->getDebugLoc(),
8941 *PSE.getSE());
8942}
8943
8945 assert(!State.Lane && "VPDerivedIVRecipe being replicated.");
8946
8947 // Fast-math-flags propagate from the original induction instruction.
8948 IRBuilder<>::FastMathFlagGuard FMFG(State.Builder);
8949 if (FPBinOp)
8950 State.Builder.setFastMathFlags(FPBinOp->getFastMathFlags());
8951
8952 Value *Step = State.get(getStepValue(), VPLane(0));
8953 Value *Index = State.get(getOperand(1), VPLane(0));
8954 Value *DerivedIV = emitTransformedIndex(
8955 State.Builder, Index, getStartValue()->getLiveInIRValue(), Step, Kind,
8957 DerivedIV->setName(Name);
8958 State.set(this, DerivedIV, VPLane(0));
8959}
8960
8961// Determine how to lower the scalar epilogue, which depends on 1) optimising
8962// for minimum code-size, 2) predicate compiler options, 3) loop hints forcing
8963// predication, and 4) a TTI hook that analyses whether the loop is suitable
8964// for predication.
8969 // 1) OptSize takes precedence over all other options, i.e. if this is set,
8970 // don't look at hints or options, and don't request a scalar epilogue.
8971 // (For PGSO, as shouldOptimizeForSize isn't currently accessible from
8972 // LoopAccessInfo (due to code dependency and not being able to reliably get
8973 // PSI/BFI from a loop analysis under NPM), we cannot suppress the collection
8974 // of strides in LoopAccessInfo::analyzeLoop() and vectorize without
8975 // versioning when the vectorization is forced, unlike hasOptSize. So revert
8976 // back to the old way and vectorize with versioning when forced. See D81345.)
8977 if (F->hasOptSize() || (llvm::shouldOptimizeForSize(L->getHeader(), PSI, BFI,
8981
8982 // 2) If set, obey the directives
8983 if (PreferPredicateOverEpilogue.getNumOccurrences()) {
8991 };
8992 }
8993
8994 // 3) If set, obey the hints
8995 switch (Hints.getPredicate()) {
9000 };
9001
9002 // 4) if the TTI hook indicates this is profitable, request predication.
9003 TailFoldingInfo TFI(TLI, &LVL, IAI);
9004 if (TTI->preferPredicateOverEpilogue(&TFI))
9006
9008}
9009
9010// Process the loop in the VPlan-native vectorization path. This path builds
9011// VPlan upfront in the vectorization pipeline, which allows to apply
9012// VPlan-to-VPlan transformations from the very beginning without modifying the
9013// input LLVM IR.
9020 LoopVectorizationRequirements &Requirements) {
9021
9023 LLVM_DEBUG(dbgs() << "LV: cannot compute the outer-loop trip count\n");
9024 return false;
9025 }
9026 assert(EnableVPlanNativePath && "VPlan-native path is disabled.");
9027 Function *F = L->getHeader()->getParent();
9028 InterleavedAccessInfo IAI(PSE, L, DT, LI, LVL->getLAI());
9029
9031 getScalarEpilogueLowering(F, L, Hints, PSI, BFI, TTI, TLI, *LVL, &IAI);
9032
9033 LoopVectorizationCostModel CM(SEL, L, PSE, LI, LVL, *TTI, TLI, DB, AC, ORE, F,
9034 &Hints, IAI, PSI, BFI);
9035 // Use the planner for outer loop vectorization.
9036 // TODO: CM is not used at this point inside the planner. Turn CM into an
9037 // optional argument if we don't need it in the future.
9038 LoopVectorizationPlanner LVP(L, LI, DT, TLI, *TTI, LVL, CM, IAI, PSE, Hints,
9039 ORE);
9040
9041 // Get user vectorization factor.
9042 ElementCount UserVF = Hints.getWidth();
9043
9045
9046 // Plan how to best vectorize, return the best VF and its cost.
9047 const VectorizationFactor VF = LVP.planInVPlanNativePath(UserVF);
9048
9049 // If we are stress testing VPlan builds, do not attempt to generate vector
9050 // code. Masked vector code generation support will follow soon.
9051 // Also, do not attempt to vectorize if no vector code will be produced.
9053 return false;
9054
9055 VPlan &BestPlan = LVP.getPlanFor(VF.Width);
9056
9057 {
9058 GeneratedRTChecks Checks(PSE, DT, LI, TTI, F->getDataLayout(), CM.CostKind);
9059 InnerLoopVectorizer LB(L, PSE, LI, DT, TTI, AC, VF.Width, /*UF=*/1, &CM,
9060 BFI, PSI, Checks, BestPlan);
9061 LLVM_DEBUG(dbgs() << "Vectorizing outer loop in \""
9062 << L->getHeader()->getParent()->getName() << "\"\n");
9063 LVP.addMinimumIterationCheck(BestPlan, VF.Width, /*UF=*/1,
9065
9066 LVP.executePlan(VF.Width, /*UF=*/1, BestPlan, LB, DT, false);
9067 }
9068
9069 reportVectorization(ORE, L, VF, 1);
9070
9071 assert(!verifyFunction(*L->getHeader()->getParent(), &dbgs()));
9072 return true;
9073}
9074
9075// Emit a remark if there are stores to floats that required a floating point
9076// extension. If the vectorized loop was generated with floating point there
9077// will be a performance penalty from the conversion overhead and the change in
9078// the vector width.
9081 for (BasicBlock *BB : L->getBlocks()) {
9082 for (Instruction &Inst : *BB) {
9083 if (auto *S = dyn_cast<StoreInst>(&Inst)) {
9084 if (S->getValueOperand()->getType()->isFloatTy())
9085 Worklist.push_back(S);
9086 }
9087 }
9088 }
9089
9090 // Traverse the floating point stores upwards searching, for floating point
9091 // conversions.
9094 while (!Worklist.empty()) {
9095 auto *I = Worklist.pop_back_val();
9096 if (!L->contains(I))
9097 continue;
9098 if (!Visited.insert(I).second)
9099 continue;
9100
9101 // Emit a remark if the floating point store required a floating
9102 // point conversion.
9103 // TODO: More work could be done to identify the root cause such as a
9104 // constant or a function return type and point the user to it.
9105 if (isa<FPExtInst>(I) && EmittedRemark.insert(I).second)
9106 ORE->emit([&]() {
9107 return OptimizationRemarkAnalysis(LV_NAME, "VectorMixedPrecision",
9108 I->getDebugLoc(), L->getHeader())
9109 << "floating point conversion changes vector width. "
9110 << "Mixed floating point precision requires an up/down "
9111 << "cast that will negatively impact performance.";
9112 });
9113
9114 for (Use &Op : I->operands())
9115 if (auto *OpI = dyn_cast<Instruction>(Op))
9116 Worklist.push_back(OpI);
9117 }
9118}
9119
9120/// For loops with uncountable early exits, find the cost of doing work when
9121/// exiting the loop early, such as calculating the final exit values of
9122/// variables used outside the loop.
9123/// TODO: This is currently overly pessimistic because the loop may not take
9124/// the early exit, but better to keep this conservative for now. In future,
9125/// it might be possible to relax this by using branch probabilities.
9127 VPlan &Plan, ElementCount VF) {
9128 InstructionCost Cost = 0;
9129 for (auto *ExitVPBB : Plan.getExitBlocks()) {
9130 for (auto *PredVPBB : ExitVPBB->getPredecessors()) {
9131 // If the predecessor is not the middle.block, then it must be the
9132 // vector.early.exit block, which may contain work to calculate the exit
9133 // values of variables used outside the loop.
9134 if (PredVPBB != Plan.getMiddleBlock()) {
9135 LLVM_DEBUG(dbgs() << "Calculating cost of work in exit block "
9136 << PredVPBB->getName() << ":\n");
9137 Cost += PredVPBB->cost(VF, CostCtx);
9138 }
9139 }
9140 }
9141 return Cost;
9142}
9143
9144/// This function determines whether or not it's still profitable to vectorize
9145/// the loop given the extra work we have to do outside of the loop:
9146/// 1. Perform the runtime checks before entering the loop to ensure it's safe
9147/// to vectorize.
9148/// 2. In the case of loops with uncountable early exits, we may have to do
9149/// extra work when exiting the loop early, such as calculating the final
9150/// exit values of variables used outside the loop.
9151static bool isOutsideLoopWorkProfitable(GeneratedRTChecks &Checks,
9152 VectorizationFactor &VF, Loop *L,
9154 VPCostContext &CostCtx, VPlan &Plan,
9156 std::optional<unsigned> VScale) {
9157 InstructionCost TotalCost = Checks.getCost();
9158 if (!TotalCost.isValid())
9159 return false;
9160
9161 // Add on the cost of any work required in the vector early exit block, if
9162 // one exists.
9163 TotalCost += calculateEarlyExitCost(CostCtx, Plan, VF.Width);
9164
9165 // When interleaving only scalar and vector cost will be equal, which in turn
9166 // would lead to a divide by 0. Fall back to hard threshold.
9167 if (VF.Width.isScalar()) {
9168 // TODO: Should we rename VectorizeMemoryCheckThreshold?
9169 if (TotalCost > VectorizeMemoryCheckThreshold) {
9170 LLVM_DEBUG(
9171 dbgs()
9172 << "LV: Interleaving only is not profitable due to runtime checks\n");
9173 return false;
9174 }
9175 return true;
9176 }
9177
9178 // The scalar cost should only be 0 when vectorizing with a user specified
9179 // VF/IC. In those cases, runtime checks should always be generated.
9180 uint64_t ScalarC = VF.ScalarCost.getValue();
9181 if (ScalarC == 0)
9182 return true;
9183
9184 // First, compute the minimum iteration count required so that the vector
9185 // loop outperforms the scalar loop.
9186 // The total cost of the scalar loop is
9187 // ScalarC * TC
9188 // where
9189 // * TC is the actual trip count of the loop.
9190 // * ScalarC is the cost of a single scalar iteration.
9191 //
9192 // The total cost of the vector loop is
9193 // RtC + VecC * (TC / VF) + EpiC
9194 // where
9195 // * RtC is the cost of the generated runtime checks plus the cost of
9196 // performing any additional work in the vector.early.exit block for loops
9197 // with uncountable early exits.
9198 // * VecC is the cost of a single vector iteration.
9199 // * TC is the actual trip count of the loop
9200 // * VF is the vectorization factor
9201 // * EpiCost is the cost of the generated epilogue, including the cost
9202 // of the remaining scalar operations.
9203 //
9204 // Vectorization is profitable once the total vector cost is less than the
9205 // total scalar cost:
9206 // RtC + VecC * (TC / VF) + EpiC < ScalarC * TC
9207 //
9208 // Now we can compute the minimum required trip count TC as
9209 // VF * (RtC + EpiC) / (ScalarC * VF - VecC) < TC
9210 //
9211 // For now we assume the epilogue cost EpiC = 0 for simplicity. Note that
9212 // the computations are performed on doubles, not integers and the result
9213 // is rounded up, hence we get an upper estimate of the TC.
9214 unsigned IntVF = estimateElementCount(VF.Width, VScale);
9215 uint64_t RtC = TotalCost.getValue();
9216 uint64_t Div = ScalarC * IntVF - VF.Cost.getValue();
9217 uint64_t MinTC1 = Div == 0 ? 0 : divideCeil(RtC * IntVF, Div);
9218
9219 // Second, compute a minimum iteration count so that the cost of the
9220 // runtime checks is only a fraction of the total scalar loop cost. This
9221 // adds a loop-dependent bound on the overhead incurred if the runtime
9222 // checks fail. In case the runtime checks fail, the cost is RtC + ScalarC
9223 // * TC. To bound the runtime check to be a fraction 1/X of the scalar
9224 // cost, compute
9225 // RtC < ScalarC * TC * (1 / X) ==> RtC * X / ScalarC < TC
9226 uint64_t MinTC2 = divideCeil(RtC * 10, ScalarC);
9227
9228 // Now pick the larger minimum. If it is not a multiple of VF and a scalar
9229 // epilogue is allowed, choose the next closest multiple of VF. This should
9230 // partly compensate for ignoring the epilogue cost.
9231 uint64_t MinTC = std::max(MinTC1, MinTC2);
9232 if (SEL == CM_ScalarEpilogueAllowed)
9233 MinTC = alignTo(MinTC, IntVF);
9235
9236 LLVM_DEBUG(
9237 dbgs() << "LV: Minimum required TC for runtime checks to be profitable:"
9238 << VF.MinProfitableTripCount << "\n");
9239
9240 // Skip vectorization if the expected trip count is less than the minimum
9241 // required trip count.
9242 if (auto ExpectedTC = getSmallBestKnownTC(PSE, L)) {
9243 if (ElementCount::isKnownLT(*ExpectedTC, VF.MinProfitableTripCount)) {
9244 LLVM_DEBUG(dbgs() << "LV: Vectorization is not beneficial: expected "
9245 "trip count < minimum profitable VF ("
9246 << *ExpectedTC << " < " << VF.MinProfitableTripCount
9247 << ")\n");
9248
9249 return false;
9250 }
9251 }
9252 return true;
9253}
9254
9256 : InterleaveOnlyWhenForced(Opts.InterleaveOnlyWhenForced ||
9258 VectorizeOnlyWhenForced(Opts.VectorizeOnlyWhenForced ||
9260
9261/// Prepare \p MainPlan for vectorizing the main vector loop during epilogue
9262/// vectorization. Remove ResumePhis from \p MainPlan for inductions that
9263/// don't have a corresponding wide induction in \p EpiPlan.
9264static void preparePlanForMainVectorLoop(VPlan &MainPlan, VPlan &EpiPlan) {
9265 // Collect PHI nodes of widened phis in the VPlan for the epilogue. Those
9266 // will need their resume-values computed in the main vector loop. Others
9267 // can be removed from the main VPlan.
9268 SmallPtrSet<PHINode *, 2> EpiWidenedPhis;
9269 for (VPRecipeBase &R :
9272 continue;
9273 EpiWidenedPhis.insert(
9274 cast<PHINode>(R.getVPSingleValue()->getUnderlyingValue()));
9275 }
9276 for (VPRecipeBase &R :
9277 make_early_inc_range(MainPlan.getScalarHeader()->phis())) {
9278 auto *VPIRInst = cast<VPIRPhi>(&R);
9279 if (EpiWidenedPhis.contains(&VPIRInst->getIRPhi()))
9280 continue;
9281 // There is no corresponding wide induction in the epilogue plan that would
9282 // need a resume value. Remove the VPIRInst wrapping the scalar header phi
9283 // together with the corresponding ResumePhi. The resume values for the
9284 // scalar loop will be created during execution of EpiPlan.
9285 VPRecipeBase *ResumePhi = VPIRInst->getOperand(0)->getDefiningRecipe();
9286 VPIRInst->eraseFromParent();
9287 ResumePhi->eraseFromParent();
9288 }
9290
9291 using namespace VPlanPatternMatch;
9292 // When vectorizing the epilogue, FindFirstIV & FindLastIV reductions can
9293 // introduce multiple uses of undef/poison. If the reduction start value may
9294 // be undef or poison it needs to be frozen and the frozen start has to be
9295 // used when computing the reduction result. We also need to use the frozen
9296 // value in the resume phi generated by the main vector loop, as this is also
9297 // used to compute the reduction result after the epilogue vector loop.
9298 auto AddFreezeForFindLastIVReductions = [](VPlan &Plan,
9299 bool UpdateResumePhis) {
9300 VPBuilder Builder(Plan.getEntry());
9301 for (VPRecipeBase &R : *Plan.getMiddleBlock()) {
9302 auto *VPI = dyn_cast<VPInstruction>(&R);
9303 if (!VPI || VPI->getOpcode() != VPInstruction::ComputeFindIVResult)
9304 continue;
9305 VPValue *OrigStart = VPI->getOperand(1);
9307 continue;
9308 VPInstruction *Freeze =
9309 Builder.createNaryOp(Instruction::Freeze, {OrigStart}, {}, "fr");
9310 VPI->setOperand(1, Freeze);
9311 if (UpdateResumePhis)
9312 OrigStart->replaceUsesWithIf(Freeze, [Freeze](VPUser &U, unsigned) {
9313 return Freeze != &U && isa<VPPhi>(&U);
9314 });
9315 }
9316 };
9317 AddFreezeForFindLastIVReductions(MainPlan, true);
9318 AddFreezeForFindLastIVReductions(EpiPlan, false);
9319
9320 VPBasicBlock *MainScalarPH = MainPlan.getScalarPreheader();
9321 VPValue *VectorTC = &MainPlan.getVectorTripCount();
9322 // If there is a suitable resume value for the canonical induction in the
9323 // scalar (which will become vector) epilogue loop, use it and move it to the
9324 // beginning of the scalar preheader. Otherwise create it below.
9325 auto ResumePhiIter =
9326 find_if(MainScalarPH->phis(), [VectorTC](VPRecipeBase &R) {
9327 return match(&R, m_VPInstruction<Instruction::PHI>(m_Specific(VectorTC),
9328 m_ZeroInt()));
9329 });
9330 VPPhi *ResumePhi = nullptr;
9331 if (ResumePhiIter == MainScalarPH->phis().end()) {
9332 VPBuilder ScalarPHBuilder(MainScalarPH, MainScalarPH->begin());
9333 ResumePhi = ScalarPHBuilder.createScalarPhi(
9334 {VectorTC,
9336 {}, "vec.epilog.resume.val");
9337 } else {
9338 ResumePhi = cast<VPPhi>(&*ResumePhiIter);
9339 if (MainScalarPH->begin() == MainScalarPH->end())
9340 ResumePhi->moveBefore(*MainScalarPH, MainScalarPH->end());
9341 else if (&*MainScalarPH->begin() != ResumePhi)
9342 ResumePhi->moveBefore(*MainScalarPH, MainScalarPH->begin());
9343 }
9344 // Add a user to to make sure the resume phi won't get removed.
9345 VPBuilder(MainScalarPH)
9347}
9348
9349/// Prepare \p Plan for vectorizing the epilogue loop. That is, re-use expanded
9350/// SCEVs from \p ExpandedSCEVs and set resume values for header recipes. Some
9351/// reductions require creating new instructions to compute the resume values.
9352/// They are collected in a vector and returned. They must be moved to the
9353/// preheader of the vector epilogue loop, after created by the execution of \p
9354/// Plan.
9356 VPlan &Plan, Loop *L, const SCEV2ValueTy &ExpandedSCEVs,
9358 ScalarEvolution &SE) {
9359 VPRegionBlock *VectorLoop = Plan.getVectorLoopRegion();
9360 VPBasicBlock *Header = VectorLoop->getEntryBasicBlock();
9361 Header->setName("vec.epilog.vector.body");
9362
9363 VPCanonicalIVPHIRecipe *IV = VectorLoop->getCanonicalIV();
9364 // When vectorizing the epilogue loop, the canonical induction needs to be
9365 // adjusted by the value after the main vector loop. Find the resume value
9366 // created during execution of the main VPlan. It must be the first phi in the
9367 // loop preheader. Use the value to increment the canonical IV, and update all
9368 // users in the loop region to use the adjusted value.
9369 // FIXME: Improve modeling for canonical IV start values in the epilogue
9370 // loop.
9371 using namespace llvm::PatternMatch;
9372 PHINode *EPResumeVal = &*L->getLoopPreheader()->phis().begin();
9373 for (Value *Inc : EPResumeVal->incoming_values()) {
9374 if (match(Inc, m_SpecificInt(0)))
9375 continue;
9376 assert(!EPI.VectorTripCount &&
9377 "Must only have a single non-zero incoming value");
9378 EPI.VectorTripCount = Inc;
9379 }
9380 // If we didn't find a non-zero vector trip count, all incoming values
9381 // must be zero, which also means the vector trip count is zero. Pick the
9382 // first zero as vector trip count.
9383 // TODO: We should not choose VF * UF so the main vector loop is known to
9384 // be dead.
9385 if (!EPI.VectorTripCount) {
9386 assert(EPResumeVal->getNumIncomingValues() > 0 &&
9387 all_of(EPResumeVal->incoming_values(),
9388 [](Value *Inc) { return match(Inc, m_SpecificInt(0)); }) &&
9389 "all incoming values must be 0");
9390 EPI.VectorTripCount = EPResumeVal->getOperand(0);
9391 }
9392 VPValue *VPV = Plan.getOrAddLiveIn(EPResumeVal);
9393 assert(all_of(IV->users(),
9394 [](const VPUser *U) {
9395 return isa<VPScalarIVStepsRecipe>(U) ||
9396 isa<VPDerivedIVRecipe>(U) ||
9397 cast<VPRecipeBase>(U)->isScalarCast() ||
9398 cast<VPInstruction>(U)->getOpcode() ==
9399 Instruction::Add;
9400 }) &&
9401 "the canonical IV should only be used by its increment or "
9402 "ScalarIVSteps when resetting the start value");
9403 VPBuilder Builder(Header, Header->getFirstNonPhi());
9404 VPInstruction *Add = Builder.createNaryOp(Instruction::Add, {IV, VPV});
9405 IV->replaceAllUsesWith(Add);
9406 Add->setOperand(0, IV);
9407
9409 SmallVector<Instruction *> InstsToMove;
9410 // Ensure that the start values for all header phi recipes are updated before
9411 // vectorizing the epilogue loop. Skip the canonical IV, which has been
9412 // handled above.
9413 for (VPRecipeBase &R : drop_begin(Header->phis())) {
9414 Value *ResumeV = nullptr;
9415 // TODO: Move setting of resume values to prepareToExecute.
9416 if (auto *ReductionPhi = dyn_cast<VPReductionPHIRecipe>(&R)) {
9417 auto *RdxResult =
9418 cast<VPInstruction>(*find_if(ReductionPhi->users(), [](VPUser *U) {
9419 auto *VPI = dyn_cast<VPInstruction>(U);
9420 return VPI &&
9421 (VPI->getOpcode() == VPInstruction::ComputeAnyOfResult ||
9422 VPI->getOpcode() == VPInstruction::ComputeReductionResult ||
9423 VPI->getOpcode() == VPInstruction::ComputeFindIVResult);
9424 }));
9425 ResumeV = cast<PHINode>(ReductionPhi->getUnderlyingInstr())
9426 ->getIncomingValueForBlock(L->getLoopPreheader());
9427 RecurKind RK = ReductionPhi->getRecurrenceKind();
9429 Value *StartV = RdxResult->getOperand(1)->getLiveInIRValue();
9430 // VPReductionPHIRecipes for AnyOf reductions expect a boolean as
9431 // start value; compare the final value from the main vector loop
9432 // to the start value.
9433 BasicBlock *PBB = cast<Instruction>(ResumeV)->getParent();
9434 IRBuilder<> Builder(PBB, PBB->getFirstNonPHIIt());
9435 ResumeV = Builder.CreateICmpNE(ResumeV, StartV);
9436 if (auto *I = dyn_cast<Instruction>(ResumeV))
9437 InstsToMove.push_back(I);
9439 Value *StartV = getStartValueFromReductionResult(RdxResult);
9440 ToFrozen[StartV] = cast<PHINode>(ResumeV)->getIncomingValueForBlock(
9442
9443 // VPReductionPHIRecipe for FindFirstIV/FindLastIV reductions requires
9444 // an adjustment to the resume value. The resume value is adjusted to
9445 // the sentinel value when the final value from the main vector loop
9446 // equals the start value. This ensures correctness when the start value
9447 // might not be less than the minimum value of a monotonically
9448 // increasing induction variable.
9449 BasicBlock *ResumeBB = cast<Instruction>(ResumeV)->getParent();
9450 IRBuilder<> Builder(ResumeBB, ResumeBB->getFirstNonPHIIt());
9451 Value *Cmp = Builder.CreateICmpEQ(ResumeV, ToFrozen[StartV]);
9452 if (auto *I = dyn_cast<Instruction>(Cmp))
9453 InstsToMove.push_back(I);
9454 Value *Sentinel = RdxResult->getOperand(2)->getLiveInIRValue();
9455 ResumeV = Builder.CreateSelect(Cmp, Sentinel, ResumeV);
9456 if (auto *I = dyn_cast<Instruction>(ResumeV))
9457 InstsToMove.push_back(I);
9458 } else {
9459 VPValue *StartVal = Plan.getOrAddLiveIn(ResumeV);
9460 auto *PhiR = dyn_cast<VPReductionPHIRecipe>(&R);
9461 if (auto *VPI = dyn_cast<VPInstruction>(PhiR->getStartValue())) {
9462 assert(VPI->getOpcode() == VPInstruction::ReductionStartVector &&
9463 "unexpected start value");
9464 VPI->setOperand(0, StartVal);
9465 continue;
9466 }
9467 }
9468 } else {
9469 // Retrieve the induction resume values for wide inductions from
9470 // their original phi nodes in the scalar loop.
9471 PHINode *IndPhi = cast<VPWidenInductionRecipe>(&R)->getPHINode();
9472 // Hook up to the PHINode generated by a ResumePhi recipe of main
9473 // loop VPlan, which feeds the scalar loop.
9474 ResumeV = IndPhi->getIncomingValueForBlock(L->getLoopPreheader());
9475 }
9476 assert(ResumeV && "Must have a resume value");
9477 VPValue *StartVal = Plan.getOrAddLiveIn(ResumeV);
9478 cast<VPHeaderPHIRecipe>(&R)->setStartValue(StartVal);
9479 }
9480
9481 // For some VPValues in the epilogue plan we must re-use the generated IR
9482 // values from the main plan. Replace them with live-in VPValues.
9483 // TODO: This is a workaround needed for epilogue vectorization and it
9484 // should be removed once induction resume value creation is done
9485 // directly in VPlan.
9486 for (auto &R : make_early_inc_range(*Plan.getEntry())) {
9487 // Re-use frozen values from the main plan for Freeze VPInstructions in the
9488 // epilogue plan. This ensures all users use the same frozen value.
9489 auto *VPI = dyn_cast<VPInstruction>(&R);
9490 if (VPI && VPI->getOpcode() == Instruction::Freeze) {
9491 VPI->replaceAllUsesWith(Plan.getOrAddLiveIn(
9492 ToFrozen.lookup(VPI->getOperand(0)->getLiveInIRValue())));
9493 continue;
9494 }
9495
9496 // Re-use the trip count and steps expanded for the main loop, as
9497 // skeleton creation needs it as a value that dominates both the scalar
9498 // and vector epilogue loops
9499 auto *ExpandR = dyn_cast<VPExpandSCEVRecipe>(&R);
9500 if (!ExpandR)
9501 continue;
9502 VPValue *ExpandedVal =
9503 Plan.getOrAddLiveIn(ExpandedSCEVs.lookup(ExpandR->getSCEV()));
9504 ExpandR->replaceAllUsesWith(ExpandedVal);
9505 if (Plan.getTripCount() == ExpandR)
9506 Plan.resetTripCount(ExpandedVal);
9507 ExpandR->eraseFromParent();
9508 }
9509
9510 auto VScale = CM.getVScaleForTuning();
9511 unsigned MainLoopStep =
9512 estimateElementCount(EPI.MainLoopVF * EPI.MainLoopUF, VScale);
9513 unsigned EpilogueLoopStep =
9514 estimateElementCount(EPI.EpilogueVF * EPI.EpilogueUF, VScale);
9516 Plan, EPI.TripCount, EPI.VectorTripCount,
9518 EPI.EpilogueUF, MainLoopStep, EpilogueLoopStep, SE);
9519
9520 return InstsToMove;
9521}
9522
9523// Generate bypass values from the additional bypass block. Note that when the
9524// vectorized epilogue is skipped due to iteration count check, then the
9525// resume value for the induction variable comes from the trip count of the
9526// main vector loop, passed as the second argument.
9528 PHINode *OrigPhi, const InductionDescriptor &II, IRBuilder<> &BypassBuilder,
9529 const SCEV2ValueTy &ExpandedSCEVs, Value *MainVectorTripCount,
9530 Instruction *OldInduction) {
9531 Value *Step = getExpandedStep(II, ExpandedSCEVs);
9532 // For the primary induction the additional bypass end value is known.
9533 // Otherwise it is computed.
9534 Value *EndValueFromAdditionalBypass = MainVectorTripCount;
9535 if (OrigPhi != OldInduction) {
9536 auto *BinOp = II.getInductionBinOp();
9537 // Fast-math-flags propagate from the original induction instruction.
9539 BypassBuilder.setFastMathFlags(BinOp->getFastMathFlags());
9540
9541 // Compute the end value for the additional bypass.
9542 EndValueFromAdditionalBypass =
9543 emitTransformedIndex(BypassBuilder, MainVectorTripCount,
9544 II.getStartValue(), Step, II.getKind(), BinOp);
9545 EndValueFromAdditionalBypass->setName("ind.end");
9546 }
9547 return EndValueFromAdditionalBypass;
9548}
9549
9551 VPlan &BestEpiPlan,
9553 const SCEV2ValueTy &ExpandedSCEVs,
9554 Value *MainVectorTripCount) {
9555 // Fix reduction resume values from the additional bypass block.
9556 BasicBlock *PH = L->getLoopPreheader();
9557 for (auto *Pred : predecessors(PH)) {
9558 for (PHINode &Phi : PH->phis()) {
9559 if (Phi.getBasicBlockIndex(Pred) != -1)
9560 continue;
9561 Phi.addIncoming(Phi.getIncomingValueForBlock(BypassBlock), Pred);
9562 }
9563 }
9564 auto *ScalarPH = cast<VPIRBasicBlock>(BestEpiPlan.getScalarPreheader());
9565 if (ScalarPH->hasPredecessors()) {
9566 // If ScalarPH has predecessors, we may need to update its reduction
9567 // resume values.
9568 for (const auto &[R, IRPhi] :
9569 zip(ScalarPH->phis(), ScalarPH->getIRBasicBlock()->phis())) {
9571 BypassBlock);
9572 }
9573 }
9574
9575 // Fix induction resume values from the additional bypass block.
9576 IRBuilder<> BypassBuilder(BypassBlock, BypassBlock->getFirstInsertionPt());
9577 for (const auto &[IVPhi, II] : LVL.getInductionVars()) {
9578 auto *Inc = cast<PHINode>(IVPhi->getIncomingValueForBlock(PH));
9580 IVPhi, II, BypassBuilder, ExpandedSCEVs, MainVectorTripCount,
9581 LVL.getPrimaryInduction());
9582 // TODO: Directly add as extra operand to the VPResumePHI recipe.
9583 Inc->setIncomingValueForBlock(BypassBlock, V);
9584 }
9585}
9586
9587/// Connect the epilogue vector loop generated for \p EpiPlan to the main vector
9588// loop, after both plans have executed, updating branches from the iteration
9589// and runtime checks of the main loop, as well as updating various phis. \p
9590// InstsToMove contains instructions that need to be moved to the preheader of
9591// the epilogue vector loop.
9593 VPlan &EpiPlan, Loop *L, EpilogueLoopVectorizationInfo &EPI,
9595 DenseMap<const SCEV *, Value *> &ExpandedSCEVs, GeneratedRTChecks &Checks,
9596 ArrayRef<Instruction *> InstsToMove) {
9597 BasicBlock *VecEpilogueIterationCountCheck =
9598 cast<VPIRBasicBlock>(EpiPlan.getEntry())->getIRBasicBlock();
9599
9600 BasicBlock *VecEpiloguePreHeader =
9601 cast<BranchInst>(VecEpilogueIterationCountCheck->getTerminator())
9602 ->getSuccessor(1);
9603 // Adjust the control flow taking the state info from the main loop
9604 // vectorization into account.
9606 "expected this to be saved from the previous pass.");
9607 DomTreeUpdater DTU(DT, DomTreeUpdater::UpdateStrategy::Eager);
9609 VecEpilogueIterationCountCheck, VecEpiloguePreHeader);
9610
9612 VecEpilogueIterationCountCheck},
9614 VecEpiloguePreHeader}});
9615
9616 BasicBlock *ScalarPH =
9617 cast<VPIRBasicBlock>(EpiPlan.getScalarPreheader())->getIRBasicBlock();
9619 VecEpilogueIterationCountCheck, ScalarPH);
9620 DTU.applyUpdates(
9622 VecEpilogueIterationCountCheck},
9624
9625 // Adjust the terminators of runtime check blocks and phis using them.
9626 BasicBlock *SCEVCheckBlock = Checks.getSCEVChecks().second;
9627 BasicBlock *MemCheckBlock = Checks.getMemRuntimeChecks().second;
9628 if (SCEVCheckBlock) {
9629 SCEVCheckBlock->getTerminator()->replaceUsesOfWith(
9630 VecEpilogueIterationCountCheck, ScalarPH);
9631 DTU.applyUpdates({{DominatorTree::Delete, SCEVCheckBlock,
9632 VecEpilogueIterationCountCheck},
9633 {DominatorTree::Insert, SCEVCheckBlock, ScalarPH}});
9634 }
9635 if (MemCheckBlock) {
9636 MemCheckBlock->getTerminator()->replaceUsesOfWith(
9637 VecEpilogueIterationCountCheck, ScalarPH);
9638 DTU.applyUpdates(
9639 {{DominatorTree::Delete, MemCheckBlock, VecEpilogueIterationCountCheck},
9640 {DominatorTree::Insert, MemCheckBlock, ScalarPH}});
9641 }
9642
9643 // The vec.epilog.iter.check block may contain Phi nodes from inductions
9644 // or reductions which merge control-flow from the latch block and the
9645 // middle block. Update the incoming values here and move the Phi into the
9646 // preheader.
9647 SmallVector<PHINode *, 4> PhisInBlock(
9648 llvm::make_pointer_range(VecEpilogueIterationCountCheck->phis()));
9649
9650 for (PHINode *Phi : PhisInBlock) {
9651 Phi->moveBefore(VecEpiloguePreHeader->getFirstNonPHIIt());
9652 Phi->replaceIncomingBlockWith(
9653 VecEpilogueIterationCountCheck->getSinglePredecessor(),
9654 VecEpilogueIterationCountCheck);
9655
9656 // If the phi doesn't have an incoming value from the
9657 // EpilogueIterationCountCheck, we are done. Otherwise remove the
9658 // incoming value and also those from other check blocks. This is needed
9659 // for reduction phis only.
9660 if (none_of(Phi->blocks(), [&](BasicBlock *IncB) {
9661 return EPI.EpilogueIterationCountCheck == IncB;
9662 }))
9663 continue;
9664 Phi->removeIncomingValue(EPI.EpilogueIterationCountCheck);
9665 if (SCEVCheckBlock)
9666 Phi->removeIncomingValue(SCEVCheckBlock);
9667 if (MemCheckBlock)
9668 Phi->removeIncomingValue(MemCheckBlock);
9669 }
9670
9671 auto IP = VecEpiloguePreHeader->getFirstNonPHIIt();
9672 for (auto *I : InstsToMove)
9673 I->moveBefore(IP);
9674
9675 // VecEpilogueIterationCountCheck conditionally skips over the epilogue loop
9676 // after executing the main loop. We need to update the resume values of
9677 // inductions and reductions during epilogue vectorization.
9678 fixScalarResumeValuesFromBypass(VecEpilogueIterationCountCheck, L, EpiPlan,
9679 LVL, ExpandedSCEVs, EPI.VectorTripCount);
9680}
9681
9683 assert((EnableVPlanNativePath || L->isInnermost()) &&
9684 "VPlan-native path is not enabled. Only process inner loops.");
9685
9686 LLVM_DEBUG(dbgs() << "\nLV: Checking a loop in '"
9687 << L->getHeader()->getParent()->getName() << "' from "
9688 << L->getLocStr() << "\n");
9689
9690 LoopVectorizeHints Hints(L, InterleaveOnlyWhenForced, *ORE, TTI);
9691
9692 LLVM_DEBUG(
9693 dbgs() << "LV: Loop hints:"
9694 << " force="
9696 ? "disabled"
9698 ? "enabled"
9699 : "?"))
9700 << " width=" << Hints.getWidth()
9701 << " interleave=" << Hints.getInterleave() << "\n");
9702
9703 // Function containing loop
9704 Function *F = L->getHeader()->getParent();
9705
9706 // Looking at the diagnostic output is the only way to determine if a loop
9707 // was vectorized (other than looking at the IR or machine code), so it
9708 // is important to generate an optimization remark for each loop. Most of
9709 // these messages are generated as OptimizationRemarkAnalysis. Remarks
9710 // generated as OptimizationRemark and OptimizationRemarkMissed are
9711 // less verbose reporting vectorized loops and unvectorized loops that may
9712 // benefit from vectorization, respectively.
9713
9714 if (!Hints.allowVectorization(F, L, VectorizeOnlyWhenForced)) {
9715 LLVM_DEBUG(dbgs() << "LV: Loop hints prevent vectorization.\n");
9716 return false;
9717 }
9718
9719 PredicatedScalarEvolution PSE(*SE, *L);
9720
9721 // Check if it is legal to vectorize the loop.
9722 LoopVectorizationRequirements Requirements;
9723 LoopVectorizationLegality LVL(L, PSE, DT, TTI, TLI, F, *LAIs, LI, ORE,
9724 &Requirements, &Hints, DB, AC, BFI, PSI, AA);
9726 LLVM_DEBUG(dbgs() << "LV: Not vectorizing: Cannot prove legality.\n");
9727 Hints.emitRemarkWithHints();
9728 return false;
9729 }
9730
9732 reportVectorizationFailure("Auto-vectorization of loops with uncountable "
9733 "early exit is not enabled",
9734 "UncountableEarlyExitLoopsDisabled", ORE, L);
9735 return false;
9736 }
9737
9738 if (!LVL.getPotentiallyFaultingLoads().empty()) {
9739 reportVectorizationFailure("Auto-vectorization of loops with potentially "
9740 "faulting load is not supported",
9741 "PotentiallyFaultingLoadsNotSupported", ORE, L);
9742 return false;
9743 }
9744
9745 // Entrance to the VPlan-native vectorization path. Outer loops are processed
9746 // here. They may require CFG and instruction level transformations before
9747 // even evaluating whether vectorization is profitable. Since we cannot modify
9748 // the incoming IR, we need to build VPlan upfront in the vectorization
9749 // pipeline.
9750 if (!L->isInnermost())
9751 return processLoopInVPlanNativePath(L, PSE, LI, DT, &LVL, TTI, TLI, DB, AC,
9752 ORE, BFI, PSI, Hints, Requirements);
9753
9754 assert(L->isInnermost() && "Inner loop expected.");
9755
9756 InterleavedAccessInfo IAI(PSE, L, DT, LI, LVL.getLAI());
9757 bool UseInterleaved = TTI->enableInterleavedAccessVectorization();
9758
9759 // If an override option has been passed in for interleaved accesses, use it.
9760 if (EnableInterleavedMemAccesses.getNumOccurrences() > 0)
9761 UseInterleaved = EnableInterleavedMemAccesses;
9762
9763 // Analyze interleaved memory accesses.
9764 if (UseInterleaved)
9766
9767 if (LVL.hasUncountableEarlyExit()) {
9768 BasicBlock *LoopLatch = L->getLoopLatch();
9769 if (IAI.requiresScalarEpilogue() ||
9771 [LoopLatch](BasicBlock *BB) { return BB != LoopLatch; })) {
9772 reportVectorizationFailure("Auto-vectorization of early exit loops "
9773 "requiring a scalar epilogue is unsupported",
9774 "UncountableEarlyExitUnsupported", ORE, L);
9775 return false;
9776 }
9777 }
9778
9779 // Check the function attributes and profiles to find out if this function
9780 // should be optimized for size.
9782 getScalarEpilogueLowering(F, L, Hints, PSI, BFI, TTI, TLI, LVL, &IAI);
9783
9784 // Check the loop for a trip count threshold: vectorize loops with a tiny trip
9785 // count by optimizing for size, to minimize overheads.
9786 auto ExpectedTC = getSmallBestKnownTC(PSE, L);
9787 if (ExpectedTC && ExpectedTC->isFixed() &&
9788 ExpectedTC->getFixedValue() < TinyTripCountVectorThreshold) {
9789 LLVM_DEBUG(dbgs() << "LV: Found a loop with a very small trip count. "
9790 << "This loop is worth vectorizing only if no scalar "
9791 << "iteration overheads are incurred.");
9793 LLVM_DEBUG(dbgs() << " But vectorizing was explicitly forced.\n");
9794 else {
9795 LLVM_DEBUG(dbgs() << "\n");
9796 // Predicate tail-folded loops are efficient even when the loop
9797 // iteration count is low. However, setting the epilogue policy to
9798 // `CM_ScalarEpilogueNotAllowedLowTripLoop` prevents vectorizing loops
9799 // with runtime checks. It's more effective to let
9800 // `isOutsideLoopWorkProfitable` determine if vectorization is
9801 // beneficial for the loop.
9804 }
9805 }
9806
9807 // Check the function attributes to see if implicit floats or vectors are
9808 // allowed.
9809 if (F->hasFnAttribute(Attribute::NoImplicitFloat)) {
9811 "Can't vectorize when the NoImplicitFloat attribute is used",
9812 "loop not vectorized due to NoImplicitFloat attribute",
9813 "NoImplicitFloat", ORE, L);
9814 Hints.emitRemarkWithHints();
9815 return false;
9816 }
9817
9818 // Check if the target supports potentially unsafe FP vectorization.
9819 // FIXME: Add a check for the type of safety issue (denormal, signaling)
9820 // for the target we're vectorizing for, to make sure none of the
9821 // additional fp-math flags can help.
9822 if (Hints.isPotentiallyUnsafe() &&
9823 TTI->isFPVectorizationPotentiallyUnsafe()) {
9825 "Potentially unsafe FP op prevents vectorization",
9826 "loop not vectorized due to unsafe FP support.",
9827 "UnsafeFP", ORE, L);
9828 Hints.emitRemarkWithHints();
9829 return false;
9830 }
9831
9832 bool AllowOrderedReductions;
9833 // If the flag is set, use that instead and override the TTI behaviour.
9834 if (ForceOrderedReductions.getNumOccurrences() > 0)
9835 AllowOrderedReductions = ForceOrderedReductions;
9836 else
9837 AllowOrderedReductions = TTI->enableOrderedReductions();
9838 if (!LVL.canVectorizeFPMath(AllowOrderedReductions)) {
9839 ORE->emit([&]() {
9840 auto *ExactFPMathInst = Requirements.getExactFPInst();
9841 return OptimizationRemarkAnalysisFPCommute(DEBUG_TYPE, "CantReorderFPOps",
9842 ExactFPMathInst->getDebugLoc(),
9843 ExactFPMathInst->getParent())
9844 << "loop not vectorized: cannot prove it is safe to reorder "
9845 "floating-point operations";
9846 });
9847 LLVM_DEBUG(dbgs() << "LV: loop not vectorized: cannot prove it is safe to "
9848 "reorder floating-point operations\n");
9849 Hints.emitRemarkWithHints();
9850 return false;
9851 }
9852
9853 // Use the cost model.
9854 LoopVectorizationCostModel CM(SEL, L, PSE, LI, &LVL, *TTI, TLI, DB, AC, ORE,
9855 F, &Hints, IAI, PSI, BFI);
9856 // Use the planner for vectorization.
9857 LoopVectorizationPlanner LVP(L, LI, DT, TLI, *TTI, &LVL, CM, IAI, PSE, Hints,
9858 ORE);
9859
9860 // Get user vectorization factor and interleave count.
9861 ElementCount UserVF = Hints.getWidth();
9862 unsigned UserIC = Hints.getInterleave();
9863
9864 // Plan how to best vectorize.
9865 LVP.plan(UserVF, UserIC);
9867 unsigned IC = 1;
9868
9869 if (ORE->allowExtraAnalysis(LV_NAME))
9871
9872 GeneratedRTChecks Checks(PSE, DT, LI, TTI, F->getDataLayout(), CM.CostKind);
9873 if (LVP.hasPlanWithVF(VF.Width)) {
9874 // Select the interleave count.
9875 IC = LVP.selectInterleaveCount(LVP.getPlanFor(VF.Width), VF.Width, VF.Cost);
9876
9877 unsigned SelectedIC = std::max(IC, UserIC);
9878 // Optimistically generate runtime checks if they are needed. Drop them if
9879 // they turn out to not be profitable.
9880 if (VF.Width.isVector() || SelectedIC > 1) {
9881 Checks.create(L, *LVL.getLAI(), PSE.getPredicate(), VF.Width, SelectedIC);
9882
9883 // Bail out early if either the SCEV or memory runtime checks are known to
9884 // fail. In that case, the vector loop would never execute.
9885 using namespace llvm::PatternMatch;
9886 if (Checks.getSCEVChecks().first &&
9887 match(Checks.getSCEVChecks().first, m_One()))
9888 return false;
9889 if (Checks.getMemRuntimeChecks().first &&
9890 match(Checks.getMemRuntimeChecks().first, m_One()))
9891 return false;
9892 }
9893
9894 // Check if it is profitable to vectorize with runtime checks.
9895 bool ForceVectorization =
9897 VPCostContext CostCtx(CM.TTI, *CM.TLI, LVP.getPlanFor(VF.Width), CM,
9898 CM.CostKind, *CM.PSE.getSE());
9899 if (!ForceVectorization &&
9900 !isOutsideLoopWorkProfitable(Checks, VF, L, PSE, CostCtx,
9901 LVP.getPlanFor(VF.Width), SEL,
9902 CM.getVScaleForTuning())) {
9903 ORE->emit([&]() {
9905 DEBUG_TYPE, "CantReorderMemOps", L->getStartLoc(),
9906 L->getHeader())
9907 << "loop not vectorized: cannot prove it is safe to reorder "
9908 "memory operations";
9909 });
9910 LLVM_DEBUG(dbgs() << "LV: Too many memory checks needed.\n");
9911 Hints.emitRemarkWithHints();
9912 return false;
9913 }
9914 }
9915
9916 // Identify the diagnostic messages that should be produced.
9917 std::pair<StringRef, std::string> VecDiagMsg, IntDiagMsg;
9918 bool VectorizeLoop = true, InterleaveLoop = true;
9919 if (VF.Width.isScalar()) {
9920 LLVM_DEBUG(dbgs() << "LV: Vectorization is possible but not beneficial.\n");
9921 VecDiagMsg = {
9922 "VectorizationNotBeneficial",
9923 "the cost-model indicates that vectorization is not beneficial"};
9924 VectorizeLoop = false;
9925 }
9926
9927 if (!LVP.hasPlanWithVF(VF.Width) && UserIC > 1) {
9928 // Tell the user interleaving was avoided up-front, despite being explicitly
9929 // requested.
9930 LLVM_DEBUG(dbgs() << "LV: Ignoring UserIC, because vectorization and "
9931 "interleaving should be avoided up front\n");
9932 IntDiagMsg = {"InterleavingAvoided",
9933 "Ignoring UserIC, because interleaving was avoided up front"};
9934 InterleaveLoop = false;
9935 } else if (IC == 1 && UserIC <= 1) {
9936 // Tell the user interleaving is not beneficial.
9937 LLVM_DEBUG(dbgs() << "LV: Interleaving is not beneficial.\n");
9938 IntDiagMsg = {
9939 "InterleavingNotBeneficial",
9940 "the cost-model indicates that interleaving is not beneficial"};
9941 InterleaveLoop = false;
9942 if (UserIC == 1) {
9943 IntDiagMsg.first = "InterleavingNotBeneficialAndDisabled";
9944 IntDiagMsg.second +=
9945 " and is explicitly disabled or interleave count is set to 1";
9946 }
9947 } else if (IC > 1 && UserIC == 1) {
9948 // Tell the user interleaving is beneficial, but it explicitly disabled.
9949 LLVM_DEBUG(dbgs() << "LV: Interleaving is beneficial but is explicitly "
9950 "disabled.\n");
9951 IntDiagMsg = {"InterleavingBeneficialButDisabled",
9952 "the cost-model indicates that interleaving is beneficial "
9953 "but is explicitly disabled or interleave count is set to 1"};
9954 InterleaveLoop = false;
9955 }
9956
9957 // If there is a histogram in the loop, do not just interleave without
9958 // vectorizing. The order of operations will be incorrect without the
9959 // histogram intrinsics, which are only used for recipes with VF > 1.
9960 if (!VectorizeLoop && InterleaveLoop && LVL.hasHistograms()) {
9961 LLVM_DEBUG(dbgs() << "LV: Not interleaving without vectorization due "
9962 << "to histogram operations.\n");
9963 IntDiagMsg = {
9964 "HistogramPreventsScalarInterleaving",
9965 "Unable to interleave without vectorization due to constraints on "
9966 "the order of histogram operations"};
9967 InterleaveLoop = false;
9968 }
9969
9970 // Override IC if user provided an interleave count.
9971 IC = UserIC > 0 ? UserIC : IC;
9972
9973 // Emit diagnostic messages, if any.
9974 const char *VAPassName = Hints.vectorizeAnalysisPassName();
9975 if (!VectorizeLoop && !InterleaveLoop) {
9976 // Do not vectorize or interleaving the loop.
9977 ORE->emit([&]() {
9978 return OptimizationRemarkMissed(VAPassName, VecDiagMsg.first,
9979 L->getStartLoc(), L->getHeader())
9980 << VecDiagMsg.second;
9981 });
9982 ORE->emit([&]() {
9983 return OptimizationRemarkMissed(LV_NAME, IntDiagMsg.first,
9984 L->getStartLoc(), L->getHeader())
9985 << IntDiagMsg.second;
9986 });
9987 return false;
9988 }
9989
9990 if (!VectorizeLoop && InterleaveLoop) {
9991 LLVM_DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n');
9992 ORE->emit([&]() {
9993 return OptimizationRemarkAnalysis(VAPassName, VecDiagMsg.first,
9994 L->getStartLoc(), L->getHeader())
9995 << VecDiagMsg.second;
9996 });
9997 } else if (VectorizeLoop && !InterleaveLoop) {
9998 LLVM_DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width
9999 << ") in " << L->getLocStr() << '\n');
10000 ORE->emit([&]() {
10001 return OptimizationRemarkAnalysis(LV_NAME, IntDiagMsg.first,
10002 L->getStartLoc(), L->getHeader())
10003 << IntDiagMsg.second;
10004 });
10005 } else if (VectorizeLoop && InterleaveLoop) {
10006 LLVM_DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width
10007 << ") in " << L->getLocStr() << '\n');
10008 LLVM_DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n');
10009 }
10010
10011 // Report the vectorization decision.
10012 if (VF.Width.isScalar()) {
10013 using namespace ore;
10014 assert(IC > 1);
10015 ORE->emit([&]() {
10016 return OptimizationRemark(LV_NAME, "Interleaved", L->getStartLoc(),
10017 L->getHeader())
10018 << "interleaved loop (interleaved count: "
10019 << NV("InterleaveCount", IC) << ")";
10020 });
10021 } else {
10022 // Report the vectorization decision.
10023 reportVectorization(ORE, L, VF, IC);
10024 }
10025 if (ORE->allowExtraAnalysis(LV_NAME))
10027
10028 // If we decided that it is *legal* to interleave or vectorize the loop, then
10029 // do it.
10030
10031 VPlan &BestPlan = LVP.getPlanFor(VF.Width);
10032 // Consider vectorizing the epilogue too if it's profitable.
10033 VectorizationFactor EpilogueVF =
10035 if (EpilogueVF.Width.isVector()) {
10036 std::unique_ptr<VPlan> BestMainPlan(BestPlan.duplicate());
10037
10038 // The first pass vectorizes the main loop and creates a scalar epilogue
10039 // to be vectorized by executing the plan (potentially with a different
10040 // factor) again shortly afterwards.
10041 VPlan &BestEpiPlan = LVP.getPlanFor(EpilogueVF.Width);
10042 BestEpiPlan.getMiddleBlock()->setName("vec.epilog.middle.block");
10043 BestEpiPlan.getVectorPreheader()->setName("vec.epilog.ph");
10044 preparePlanForMainVectorLoop(*BestMainPlan, BestEpiPlan);
10045 EpilogueLoopVectorizationInfo EPI(VF.Width, IC, EpilogueVF.Width, 1,
10046 BestEpiPlan);
10047 EpilogueVectorizerMainLoop MainILV(L, PSE, LI, DT, TTI, AC, EPI, &CM, BFI,
10048 PSI, Checks, *BestMainPlan);
10049 auto ExpandedSCEVs = LVP.executePlan(EPI.MainLoopVF, EPI.MainLoopUF,
10050 *BestMainPlan, MainILV, DT, false);
10051 ++LoopsVectorized;
10052
10053 // Second pass vectorizes the epilogue and adjusts the control flow
10054 // edges from the first pass.
10055 EpilogueVectorizerEpilogueLoop EpilogILV(L, PSE, LI, DT, TTI, AC, EPI, &CM,
10056 BFI, PSI, Checks, BestEpiPlan);
10058 BestEpiPlan, L, ExpandedSCEVs, EPI, CM, *PSE.getSE());
10059 LVP.executePlan(EPI.EpilogueVF, EPI.EpilogueUF, BestEpiPlan, EpilogILV, DT,
10060 true);
10061 connectEpilogueVectorLoop(BestEpiPlan, L, EPI, DT, LVL, ExpandedSCEVs,
10062 Checks, InstsToMove);
10063 ++LoopsEpilogueVectorized;
10064 } else {
10065 InnerLoopVectorizer LB(L, PSE, LI, DT, TTI, AC, VF.Width, IC, &CM, BFI, PSI,
10066 Checks, BestPlan);
10067 // TODO: Move to general VPlan pipeline once epilogue loops are also
10068 // supported.
10071 IC, PSE);
10072 LVP.addMinimumIterationCheck(BestPlan, VF.Width, IC,
10074
10075 LVP.executePlan(VF.Width, IC, BestPlan, LB, DT, false);
10076 ++LoopsVectorized;
10077 }
10078
10079 assert(DT->verify(DominatorTree::VerificationLevel::Fast) &&
10080 "DT not preserved correctly");
10081 assert(!verifyFunction(*F, &dbgs()));
10082
10083 return true;
10084}
10085
10087
10088 // Don't attempt if
10089 // 1. the target claims to have no vector registers, and
10090 // 2. interleaving won't help ILP.
10091 //
10092 // The second condition is necessary because, even if the target has no
10093 // vector registers, loop vectorization may still enable scalar
10094 // interleaving.
10095 if (!TTI->getNumberOfRegisters(TTI->getRegisterClassForType(true)) &&
10096 TTI->getMaxInterleaveFactor(ElementCount::getFixed(1)) < 2)
10097 return LoopVectorizeResult(false, false);
10098
10099 bool Changed = false, CFGChanged = false;
10100
10101 // The vectorizer requires loops to be in simplified form.
10102 // Since simplification may add new inner loops, it has to run before the
10103 // legality and profitability checks. This means running the loop vectorizer
10104 // will simplify all loops, regardless of whether anything end up being
10105 // vectorized.
10106 for (const auto &L : *LI)
10107 Changed |= CFGChanged |=
10108 simplifyLoop(L, DT, LI, SE, AC, nullptr, false /* PreserveLCSSA */);
10109
10110 // Build up a worklist of inner-loops to vectorize. This is necessary as
10111 // the act of vectorizing or partially unrolling a loop creates new loops
10112 // and can invalidate iterators across the loops.
10113 SmallVector<Loop *, 8> Worklist;
10114
10115 for (Loop *L : *LI)
10116 collectSupportedLoops(*L, LI, ORE, Worklist);
10117
10118 LoopsAnalyzed += Worklist.size();
10119
10120 // Now walk the identified inner loops.
10121 while (!Worklist.empty()) {
10122 Loop *L = Worklist.pop_back_val();
10123
10124 // For the inner loops we actually process, form LCSSA to simplify the
10125 // transform.
10126 Changed |= formLCSSARecursively(*L, *DT, LI, SE);
10127
10128 Changed |= CFGChanged |= processLoop(L);
10129
10130 if (Changed) {
10131 LAIs->clear();
10132
10133#ifndef NDEBUG
10134 if (VerifySCEV)
10135 SE->verify();
10136#endif
10137 }
10138 }
10139
10140 // Process each loop nest in the function.
10141 return LoopVectorizeResult(Changed, CFGChanged);
10142}
10143
10146 LI = &AM.getResult<LoopAnalysis>(F);
10147 // There are no loops in the function. Return before computing other
10148 // expensive analyses.
10149 if (LI->empty())
10150 return PreservedAnalyses::all();
10159 AA = &AM.getResult<AAManager>(F);
10160
10161 auto &MAMProxy = AM.getResult<ModuleAnalysisManagerFunctionProxy>(F);
10162 PSI = MAMProxy.getCachedResult<ProfileSummaryAnalysis>(*F.getParent());
10163 BFI = nullptr;
10164 if (PSI && PSI->hasProfileSummary())
10166 LoopVectorizeResult Result = runImpl(F);
10167 if (!Result.MadeAnyChange)
10168 return PreservedAnalyses::all();
10170
10171 if (isAssignmentTrackingEnabled(*F.getParent())) {
10172 for (auto &BB : F)
10174 }
10175
10176 PA.preserve<LoopAnalysis>();
10180
10181 if (Result.MadeCFGChange) {
10182 // Making CFG changes likely means a loop got vectorized. Indicate that
10183 // extra simplification passes should be run.
10184 // TODO: MadeCFGChanges is not a prefect proxy. Extra passes should only
10185 // be run if runtime checks have been added.
10188 } else {
10190 }
10191 return PA;
10192}
10193
10195 raw_ostream &OS, function_ref<StringRef(StringRef)> MapClassName2PassName) {
10196 static_cast<PassInfoMixin<LoopVectorizePass> *>(this)->printPipeline(
10197 OS, MapClassName2PassName);
10198
10199 OS << '<';
10200 OS << (InterleaveOnlyWhenForced ? "" : "no-") << "interleave-forced-only;";
10201 OS << (VectorizeOnlyWhenForced ? "" : "no-") << "vectorize-forced-only;";
10202 OS << '>';
10203}
for(const MachineOperand &MO :llvm::drop_begin(OldMI.operands(), Desc.getNumOperands()))
static unsigned getIntrinsicID(const SDNode *N)
unsigned RegSize
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
aarch64 promote const
AMDGPU Lower Kernel Arguments
AMDGPU Register Bank Select
Rewrite undef for PHI
This file implements a class to represent arbitrary precision integral constant values and operations...
@ PostInc
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static bool isEqual(const Function &Caller, const Function &Callee)
This file contains the simple types necessary to represent the attributes associated with functions a...
static const Function * getParent(const Value *V)
This is the interface for LLVM's primary stateless and local alias analysis.
static bool IsEmptyBlock(MachineBasicBlock *MBB)
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
#define clEnumValN(ENUMVAL, FLAGNAME, DESC)
This file contains the declarations for the subclasses of Constant, which represent the different fla...
static cl::opt< OutputCostKind > CostKind("cost-kind", cl::desc("Target cost kind"), cl::init(OutputCostKind::RecipThroughput), cl::values(clEnumValN(OutputCostKind::RecipThroughput, "throughput", "Reciprocal throughput"), clEnumValN(OutputCostKind::Latency, "latency", "Instruction latency"), clEnumValN(OutputCostKind::CodeSize, "code-size", "Code size"), clEnumValN(OutputCostKind::SizeAndLatency, "size-latency", "Code size and latency"), clEnumValN(OutputCostKind::All, "all", "Print all cost kinds")))
static cl::opt< IntrinsicCostStrategy > IntrinsicCost("intrinsic-cost-strategy", cl::desc("Costing strategy for intrinsic instructions"), cl::init(IntrinsicCostStrategy::InstructionCost), cl::values(clEnumValN(IntrinsicCostStrategy::InstructionCost, "instruction-cost", "Use TargetTransformInfo::getInstructionCost"), clEnumValN(IntrinsicCostStrategy::IntrinsicCost, "intrinsic-cost", "Use TargetTransformInfo::getIntrinsicInstrCost"), clEnumValN(IntrinsicCostStrategy::TypeBasedIntrinsicCost, "type-based-intrinsic-cost", "Calculate the intrinsic cost based only on argument types")))
static InstructionCost getCost(Instruction &Inst, TTI::TargetCostKind CostKind, TargetTransformInfo &TTI, TargetLibraryInfo &TLI)
Definition CostModel.cpp:74
This file defines DenseMapInfo traits for DenseMap.
This file defines the DenseMap class.
#define DEBUG_TYPE
This is the interface for a simple mod/ref and alias analysis over globals.
Hexagon Common GEP
#define _
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
Module.h This file contains the declarations for the Module class.
This defines the Use class.
static bool hasNoUnsignedWrap(BinaryOperator &I)
This file defines an InstructionCost class that is used when calculating the cost of an instruction,...
static std::pair< Value *, APInt > getMask(Value *WideMask, unsigned Factor, ElementCount LeafValueEC)
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
Legalize the Machine IR a function s Machine IR
Definition Legalizer.cpp:80
static cl::opt< unsigned, true > VectorizationFactor("force-vector-width", cl::Hidden, cl::desc("Sets the SIMD width. Zero is autoselect."), cl::location(VectorizerParams::VectorizationFactor))
This header provides classes for managing per-loop analyses.
static const char * VerboseDebug
#define LV_NAME
This file defines the LoopVectorizationLegality class.
This file provides a LoopVectorizationPlanner class.
static void collectSupportedLoops(Loop &L, LoopInfo *LI, OptimizationRemarkEmitter *ORE, SmallVectorImpl< Loop * > &V)
static cl::opt< unsigned > EpilogueVectorizationMinVF("epilogue-vectorization-minimum-VF", cl::Hidden, cl::desc("Only loops with vectorization factor equal to or larger than " "the specified value are considered for epilogue vectorization."))
static cl::opt< unsigned > EpilogueVectorizationForceVF("epilogue-vectorization-force-VF", cl::init(1), cl::Hidden, cl::desc("When epilogue vectorization is enabled, and a value greater than " "1 is specified, forces the given VF for all applicable epilogue " "loops."))
static Type * maybeVectorizeType(Type *Ty, ElementCount VF)
static ElementCount determineVPlanVF(const TargetTransformInfo &TTI, LoopVectorizationCostModel &CM)
static ElementCount getSmallConstantTripCount(ScalarEvolution *SE, const Loop *L)
A version of ScalarEvolution::getSmallConstantTripCount that returns an ElementCount to include loops...
static cl::opt< unsigned > VectorizeMemoryCheckThreshold("vectorize-memory-check-threshold", cl::init(128), cl::Hidden, cl::desc("The maximum allowed number of runtime memory checks"))
static void preparePlanForMainVectorLoop(VPlan &MainPlan, VPlan &EpiPlan)
Prepare MainPlan for vectorizing the main vector loop during epilogue vectorization.
static cl::opt< unsigned > TinyTripCountVectorThreshold("vectorizer-min-trip-count", cl::init(16), cl::Hidden, cl::desc("Loops with a constant trip count that is smaller than this " "value are vectorized only if no scalar iteration overheads " "are incurred."))
Loops with a known constant trip count below this number are vectorized only if no scalar iteration o...
static void debugVectorizationMessage(const StringRef Prefix, const StringRef DebugMsg, Instruction *I)
Write a DebugMsg about vectorization to the debug output stream.
static cl::opt< bool > EnableCondStoresVectorization("enable-cond-stores-vec", cl::init(true), cl::Hidden, cl::desc("Enable if predication of stores during vectorization."))
static void legacyCSE(BasicBlock *BB)
FIXME: This legacy common-subexpression-elimination routine is scheduled for removal,...
static VPIRBasicBlock * replaceVPBBWithIRVPBB(VPBasicBlock *VPBB, BasicBlock *IRBB, VPlan *Plan=nullptr)
Replace VPBB with a VPIRBasicBlock wrapping IRBB.
static Value * emitTransformedIndex(IRBuilderBase &B, Value *Index, Value *StartValue, Value *Step, InductionDescriptor::InductionKind InductionKind, const BinaryOperator *InductionBinOp)
Compute the transformed value of Index at offset StartValue using step StepValue.
static DebugLoc getDebugLocFromInstOrOperands(Instruction *I)
Look for a meaningful debug location on the instruction or its operands.
static Value * createInductionAdditionalBypassValues(PHINode *OrigPhi, const InductionDescriptor &II, IRBuilder<> &BypassBuilder, const SCEV2ValueTy &ExpandedSCEVs, Value *MainVectorTripCount, Instruction *OldInduction)
static void fixReductionScalarResumeWhenVectorizingEpilog(VPPhi *EpiResumePhiR, PHINode &EpiResumePhi, BasicBlock *BypassBlock)
static Value * getStartValueFromReductionResult(VPInstruction *RdxResult)
static cl::opt< bool > ForceTargetSupportsScalableVectors("force-target-supports-scalable-vectors", cl::init(false), cl::Hidden, cl::desc("Pretend that scalable vectors are supported, even if the target does " "not support them. This flag should only be used for testing."))
static bool useActiveLaneMaskForControlFlow(TailFoldingStyle Style)
static cl::opt< bool > EnableEarlyExitVectorization("enable-early-exit-vectorization", cl::init(true), cl::Hidden, cl::desc("Enable vectorization of early exit loops with uncountable exits."))
static cl::opt< bool > ConsiderRegPressure("vectorizer-consider-reg-pressure", cl::init(false), cl::Hidden, cl::desc("Discard VFs if their register pressure is too high."))
static unsigned estimateElementCount(ElementCount VF, std::optional< unsigned > VScale)
This function attempts to return a value that represents the ElementCount at runtime.
static constexpr uint32_t MinItersBypassWeights[]
static cl::opt< unsigned > ForceTargetNumScalarRegs("force-target-num-scalar-regs", cl::init(0), cl::Hidden, cl::desc("A flag that overrides the target's number of scalar registers."))
static cl::opt< bool > UseWiderVFIfCallVariantsPresent("vectorizer-maximize-bandwidth-for-vector-calls", cl::init(true), cl::Hidden, cl::desc("Try wider VFs if they enable the use of vector variants"))
static std::optional< unsigned > getMaxVScale(const Function &F, const TargetTransformInfo &TTI)
static cl::opt< unsigned > SmallLoopCost("small-loop-cost", cl::init(20), cl::Hidden, cl::desc("The cost of a loop that is considered 'small' by the interleaver."))
static void connectEpilogueVectorLoop(VPlan &EpiPlan, Loop *L, EpilogueLoopVectorizationInfo &EPI, DominatorTree *DT, LoopVectorizationLegality &LVL, DenseMap< const SCEV *, Value * > &ExpandedSCEVs, GeneratedRTChecks &Checks, ArrayRef< Instruction * > InstsToMove)
Connect the epilogue vector loop generated for EpiPlan to the main vector.
static bool planContainsAdditionalSimplifications(VPlan &Plan, VPCostContext &CostCtx, Loop *TheLoop, ElementCount VF)
Return true if the original loop \ TheLoop contains any instructions that do not have corresponding r...
static cl::opt< unsigned > ForceTargetNumVectorRegs("force-target-num-vector-regs", cl::init(0), cl::Hidden, cl::desc("A flag that overrides the target's number of vector registers."))
static bool isExplicitVecOuterLoop(Loop *OuterLp, OptimizationRemarkEmitter *ORE)
static cl::opt< bool > EnableIndVarRegisterHeur("enable-ind-var-reg-heur", cl::init(true), cl::Hidden, cl::desc("Count the induction variable only once when interleaving"))
static cl::opt< TailFoldingStyle > ForceTailFoldingStyle("force-tail-folding-style", cl::desc("Force the tail folding style"), cl::init(TailFoldingStyle::None), cl::values(clEnumValN(TailFoldingStyle::None, "none", "Disable tail folding"), clEnumValN(TailFoldingStyle::Data, "data", "Create lane mask for data only, using active.lane.mask intrinsic"), clEnumValN(TailFoldingStyle::DataWithoutLaneMask, "data-without-lane-mask", "Create lane mask with compare/stepvector"), clEnumValN(TailFoldingStyle::DataAndControlFlow, "data-and-control", "Create lane mask using active.lane.mask intrinsic, and use " "it for both data and control flow"), clEnumValN(TailFoldingStyle::DataAndControlFlowWithoutRuntimeCheck, "data-and-control-without-rt-check", "Similar to data-and-control, but remove the runtime check"), clEnumValN(TailFoldingStyle::DataWithEVL, "data-with-evl", "Use predicated EVL instructions for tail folding. If EVL " "is unsupported, fallback to data-without-lane-mask.")))
static cl::opt< bool > EnableEpilogueVectorization("enable-epilogue-vectorization", cl::init(true), cl::Hidden, cl::desc("Enable vectorization of epilogue loops."))
static ScalarEpilogueLowering getScalarEpilogueLowering(Function *F, Loop *L, LoopVectorizeHints &Hints, ProfileSummaryInfo *PSI, BlockFrequencyInfo *BFI, TargetTransformInfo *TTI, TargetLibraryInfo *TLI, LoopVectorizationLegality &LVL, InterleavedAccessInfo *IAI)
static cl::opt< bool > PreferPredicatedReductionSelect("prefer-predicated-reduction-select", cl::init(false), cl::Hidden, cl::desc("Prefer predicating a reduction operation over an after loop select."))
static VPWidenIntOrFpInductionRecipe * createWidenInductionRecipes(PHINode *Phi, Instruction *PhiOrTrunc, VPValue *Start, const InductionDescriptor &IndDesc, VPlan &Plan, ScalarEvolution &SE, Loop &OrigLoop)
Creates a VPWidenIntOrFpInductionRecpipe for Phi.
static cl::opt< bool > PreferInLoopReductions("prefer-inloop-reductions", cl::init(false), cl::Hidden, cl::desc("Prefer in-loop vector reductions, " "overriding the targets preference."))
static SmallVector< Instruction * > preparePlanForEpilogueVectorLoop(VPlan &Plan, Loop *L, const SCEV2ValueTy &ExpandedSCEVs, EpilogueLoopVectorizationInfo &EPI, LoopVectorizationCostModel &CM, ScalarEvolution &SE)
Prepare Plan for vectorizing the epilogue loop.
static cl::opt< bool > EnableLoadStoreRuntimeInterleave("enable-loadstore-runtime-interleave", cl::init(true), cl::Hidden, cl::desc("Enable runtime interleaving until load/store ports are saturated"))
static cl::opt< bool > VPlanBuildStressTest("vplan-build-stress-test", cl::init(false), cl::Hidden, cl::desc("Build VPlan for every supported loop nest in the function and bail " "out right after the build (stress test the VPlan H-CFG construction " "in the VPlan-native vectorization path)."))
static bool hasIrregularType(Type *Ty, const DataLayout &DL)
A helper function that returns true if the given type is irregular.
static cl::opt< bool > LoopVectorizeWithBlockFrequency("loop-vectorize-with-block-frequency", cl::init(true), cl::Hidden, cl::desc("Enable the use of the block frequency analysis to access PGO " "heuristics minimizing code growth in cold regions and being more " "aggressive in hot regions."))
static std::optional< ElementCount > getSmallBestKnownTC(PredicatedScalarEvolution &PSE, Loop *L, bool CanUseConstantMax=true)
Returns "best known" trip count, which is either a valid positive trip count or std::nullopt when an ...
static Value * getExpandedStep(const InductionDescriptor &ID, const SCEV2ValueTy &ExpandedSCEVs)
Return the expanded step for ID using ExpandedSCEVs to look up SCEV expansion results.
static bool useActiveLaneMask(TailFoldingStyle Style)
static bool hasReplicatorRegion(VPlan &Plan)
static bool isIndvarOverflowCheckKnownFalse(const LoopVectorizationCostModel *Cost, ElementCount VF, std::optional< unsigned > UF=std::nullopt)
For the given VF and UF and maximum trip count computed for the loop, return whether the induction va...
static void addFullyUnrolledInstructionsToIgnore(Loop *L, const LoopVectorizationLegality::InductionList &IL, SmallPtrSetImpl< Instruction * > &InstsToIgnore)
Knowing that loop L executes a single vector iteration, add instructions that will get simplified and...
static cl::opt< PreferPredicateTy::Option > PreferPredicateOverEpilogue("prefer-predicate-over-epilogue", cl::init(PreferPredicateTy::ScalarEpilogue), cl::Hidden, cl::desc("Tail-folding and predication preferences over creating a scalar " "epilogue loop."), cl::values(clEnumValN(PreferPredicateTy::ScalarEpilogue, "scalar-epilogue", "Don't tail-predicate loops, create scalar epilogue"), clEnumValN(PreferPredicateTy::PredicateElseScalarEpilogue, "predicate-else-scalar-epilogue", "prefer tail-folding, create scalar epilogue if tail " "folding fails."), clEnumValN(PreferPredicateTy::PredicateOrDontVectorize, "predicate-dont-vectorize", "prefers tail-folding, don't attempt vectorization if " "tail-folding fails.")))
static cl::opt< bool > EnableInterleavedMemAccesses("enable-interleaved-mem-accesses", cl::init(false), cl::Hidden, cl::desc("Enable vectorization on interleaved memory accesses in a loop"))
static cl::opt< bool > EnableMaskedInterleavedMemAccesses("enable-masked-interleaved-mem-accesses", cl::init(false), cl::Hidden, cl::desc("Enable vectorization on masked interleaved memory accesses in a loop"))
An interleave-group may need masking if it resides in a block that needs predication,...
static cl::opt< bool > ForceOrderedReductions("force-ordered-reductions", cl::init(false), cl::Hidden, cl::desc("Enable the vectorisation of loops with in-order (strict) " "FP reductions"))
static const SCEV * getAddressAccessSCEV(Value *Ptr, LoopVectorizationLegality *Legal, PredicatedScalarEvolution &PSE, const Loop *TheLoop)
Gets Address Access SCEV after verifying that the access pattern is loop invariant except the inducti...
static cl::opt< cl::boolOrDefault > ForceSafeDivisor("force-widen-divrem-via-safe-divisor", cl::Hidden, cl::desc("Override cost based safe divisor widening for div/rem instructions"))
static InstructionCost calculateEarlyExitCost(VPCostContext &CostCtx, VPlan &Plan, ElementCount VF)
For loops with uncountable early exits, find the cost of doing work when exiting the loop early,...
static cl::opt< unsigned > ForceTargetMaxVectorInterleaveFactor("force-target-max-vector-interleave", cl::init(0), cl::Hidden, cl::desc("A flag that overrides the target's max interleave factor for " "vectorized loops."))
static bool processLoopInVPlanNativePath(Loop *L, PredicatedScalarEvolution &PSE, LoopInfo *LI, DominatorTree *DT, LoopVectorizationLegality *LVL, TargetTransformInfo *TTI, TargetLibraryInfo *TLI, DemandedBits *DB, AssumptionCache *AC, OptimizationRemarkEmitter *ORE, BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI, LoopVectorizeHints &Hints, LoopVectorizationRequirements &Requirements)
static bool useMaskedInterleavedAccesses(const TargetTransformInfo &TTI)
static cl::opt< unsigned > NumberOfStoresToPredicate("vectorize-num-stores-pred", cl::init(1), cl::Hidden, cl::desc("Max number of stores to be predicated behind an if."))
The number of stores in a loop that are allowed to need predication.
static cl::opt< unsigned > MaxNestedScalarReductionIC("max-nested-scalar-reduction-interleave", cl::init(2), cl::Hidden, cl::desc("The maximum interleave count to use when interleaving a scalar " "reduction in a nested loop."))
static cl::opt< unsigned > ForceTargetMaxScalarInterleaveFactor("force-target-max-scalar-interleave", cl::init(0), cl::Hidden, cl::desc("A flag that overrides the target's max interleave factor for " "scalar loops."))
static void checkMixedPrecision(Loop *L, OptimizationRemarkEmitter *ORE)
static bool willGenerateVectors(VPlan &Plan, ElementCount VF, const TargetTransformInfo &TTI)
Check if any recipe of Plan will generate a vector value, which will be assigned a vector register.
static bool isOutsideLoopWorkProfitable(GeneratedRTChecks &Checks, VectorizationFactor &VF, Loop *L, PredicatedScalarEvolution &PSE, VPCostContext &CostCtx, VPlan &Plan, ScalarEpilogueLowering SEL, std::optional< unsigned > VScale)
This function determines whether or not it's still profitable to vectorize the loop given the extra w...
static void fixScalarResumeValuesFromBypass(BasicBlock *BypassBlock, Loop *L, VPlan &BestEpiPlan, LoopVectorizationLegality &LVL, const SCEV2ValueTy &ExpandedSCEVs, Value *MainVectorTripCount)
static cl::opt< bool > MaximizeBandwidth("vectorizer-maximize-bandwidth", cl::init(false), cl::Hidden, cl::desc("Maximize bandwidth when selecting vectorization factor which " "will be determined by the smallest type in loop."))
static OptimizationRemarkAnalysis createLVAnalysis(const char *PassName, StringRef RemarkName, Loop *TheLoop, Instruction *I, DebugLoc DL={})
Create an analysis remark that explains why vectorization failed.
#define F(x, y, z)
Definition MD5.cpp:55
#define I(x, y, z)
Definition MD5.cpp:58
This file implements a map that provides insertion order iteration.
This file contains the declarations for metadata subclasses.
#define T
ConstantRange Range(APInt(BitWidth, Low), APInt(BitWidth, High))
uint64_t IntrinsicInst * II
#define P(N)
This file contains the declarations for profiling metadata utility functions.
const SmallVectorImpl< MachineOperand > & Cond
static BinaryOperator * CreateMul(Value *S1, Value *S2, const Twine &Name, BasicBlock::iterator InsertBefore, Value *FlagsOp)
static BinaryOperator * CreateAdd(Value *S1, Value *S2, const Twine &Name, BasicBlock::iterator InsertBefore, Value *FlagsOp)
static bool isValid(const char C)
Returns true if C is a valid mangled character: <0-9a-zA-Z_>.
static InstructionCost getScalarizationOverhead(const TargetTransformInfo &TTI, Type *ScalarTy, VectorType *Ty, const APInt &DemandedElts, bool Insert, bool Extract, TTI::TargetCostKind CostKind, bool ForPoisonSrc=true, ArrayRef< Value * > VL={})
This is similar to TargetTransformInfo::getScalarizationOverhead, but if ScalarTy is a FixedVectorTyp...
This file contains some templates that are useful if you are working with the STL at all.
#define OP(OPC)
Definition Instruction.h:46
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Definition Statistic.h:171
#define LLVM_DEBUG(...)
Definition Debug.h:114
#define DEBUG_WITH_TYPE(TYPE,...)
DEBUG_WITH_TYPE macro - This macro should be used by passes to emit debug information.
Definition Debug.h:72
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
static TableGen::Emitter::OptClass< SkeletonEmitter > X("gen-skeleton-class", "Generate example skeleton class")
This pass exposes codegen information to IR-level passes.
LocallyHashedType DenseMapInfo< LocallyHashedType >::Empty
This file implements the TypeSwitch template, which mimics a switch() statement whose cases are type ...
This file contains the declarations of different VPlan-related auxiliary helpers.
This file provides utility VPlan to VPlan transformations.
This file declares the class VPlanVerifier, which contains utility functions to check the consistency...
This file contains the declarations of the Vectorization Plan base classes:
static const char PassName[]
Value * RHS
Value * LHS
static const uint32_t IV[8]
Definition blake3_impl.h:83
A manager for alias analyses.
Class for arbitrary precision integers.
Definition APInt.h:78
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
Definition APInt.h:234
uint64_t getZExtValue() const
Get zero extended value.
Definition APInt.h:1540
unsigned getActiveBits() const
Compute the number of active bits in the value.
Definition APInt.h:1512
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:41
const T & back() const
back - Get the last element.
Definition ArrayRef.h:156
ArrayRef< T > take_front(size_t N=1) const
Return a copy of *this with only the first N elements.
Definition ArrayRef.h:224
size_t size() const
size - Get the array size.
Definition ArrayRef.h:147
A function analysis which provides an AssumptionCache.
A cache of @llvm.assume calls within a function.
LLVM_ABI unsigned getVScaleRangeMin() const
Returns the minimum value for the vscale_range attribute.
LLVM Basic Block Representation.
Definition BasicBlock.h:62
iterator_range< const_phi_iterator > phis() const
Returns a range that iterates over the phis in the basic block.
Definition BasicBlock.h:528
LLVM_ABI const_iterator getFirstInsertionPt() const
Returns an iterator to the first instruction in this block that is suitable for inserting a non-PHI i...
const Function * getParent() const
Return the enclosing method, or null if none.
Definition BasicBlock.h:213
LLVM_ABI InstListType::const_iterator getFirstNonPHIIt() const
Returns an iterator to the first instruction in this block that is not a PHINode instruction.
LLVM_ABI const BasicBlock * getSinglePredecessor() const
Return the predecessor of this block if it has a single predecessor block.
LLVM_ABI const BasicBlock * getSingleSuccessor() const
Return the successor of this block if it has a single successor.
LLVM_ABI const DataLayout & getDataLayout() const
Get the data layout of the module this basic block belongs to.
LLVM_ABI LLVMContext & getContext() const
Get the context in which this basic block lives.
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition BasicBlock.h:233
BinaryOps getOpcode() const
Definition InstrTypes.h:374
Analysis pass which computes BlockFrequencyInfo.
BlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate IR basic block frequen...
Conditional or Unconditional Branch instruction.
bool isConditional() const
static BranchInst * Create(BasicBlock *IfTrue, InsertPosition InsertBefore=nullptr)
BasicBlock * getSuccessor(unsigned i) const
Represents analyses that only rely on functions' control flow.
Definition Analysis.h:73
bool isNoBuiltin() const
Return true if the call should not be treated as a call to a builtin.
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
Value * getArgOperand(unsigned i) const
iterator_range< User::op_iterator > args()
Iteration adapter for range-for loops.
unsigned arg_size() const
This class represents a function call, abstracting a target machine's calling convention.
static Type * makeCmpResultType(Type *opnd_type)
Create a result type for fcmp/icmp.
Definition InstrTypes.h:982
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition InstrTypes.h:676
@ ICMP_UGT
unsigned greater than
Definition InstrTypes.h:699
@ ICMP_ULT
unsigned less than
Definition InstrTypes.h:701
@ ICMP_NE
not equal
Definition InstrTypes.h:698
@ ICMP_ULE
unsigned less or equal
Definition InstrTypes.h:702
Predicate getInversePredicate() const
For example, EQ -> NE, UGT -> ULE, SLT -> SGE, OEQ -> UNE, UGT -> OLE, OLT -> UGE,...
Definition InstrTypes.h:789
An abstraction over a floating-point predicate, and a pack of an integer predicate with samesign info...
This is the shared class of boolean and integer constants.
Definition Constants.h:87
static LLVM_ABI ConstantInt * getTrue(LLVMContext &Context)
static LLVM_ABI ConstantInt * getFalse(LLVMContext &Context)
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:63
A debug info location.
Definition DebugLoc.h:124
static DebugLoc getTemporary()
Definition DebugLoc.h:161
static DebugLoc getUnknown()
Definition DebugLoc.h:162
An analysis that produces DemandedBits for a function.
ValueT lookup(const_arg_type_t< KeyT > Val) const
lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...
Definition DenseMap.h:194
iterator find(const_arg_type_t< KeyT > Val)
Definition DenseMap.h:167
std::pair< iterator, bool > try_emplace(KeyT &&Key, Ts &&...Args)
Definition DenseMap.h:237
iterator end()
Definition DenseMap.h:81
bool contains(const_arg_type_t< KeyT > Val) const
Return true if the specified key is in the map, false otherwise.
Definition DenseMap.h:158
void insert_range(Range &&R)
Inserts range of 'std::pair<KeyT, ValueT>' values into the map.
Definition DenseMap.h:275
Implements a dense probed hash-table based set.
Definition DenseSet.h:279
Analysis pass which computes a DominatorTree.
Definition Dominators.h:284
void changeImmediateDominator(DomTreeNodeBase< NodeT > *N, DomTreeNodeBase< NodeT > *NewIDom)
changeImmediateDominator - This method is used to update the dominator tree information when a node's...
void eraseNode(NodeT *BB)
eraseNode - Removes a node from the dominator tree.
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition Dominators.h:165
constexpr bool isVector() const
One or more elements.
Definition TypeSize.h:325
static constexpr ElementCount getScalable(ScalarTy MinVal)
Definition TypeSize.h:313
static constexpr ElementCount getFixed(ScalarTy MinVal)
Definition TypeSize.h:310
static constexpr ElementCount get(ScalarTy MinVal, bool Scalable)
Definition TypeSize.h:316
constexpr bool isScalar() const
Exactly one element.
Definition TypeSize.h:321
BasicBlock * createVectorizedLoopSkeleton() final
Implements the interface for creating a vectorized skeleton using the epilogue loop strategy (i....
EpilogueVectorizerEpilogueLoop(Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI, DominatorTree *DT, const TargetTransformInfo *TTI, AssumptionCache *AC, EpilogueLoopVectorizationInfo &EPI, LoopVectorizationCostModel *CM, BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI, GeneratedRTChecks &Checks, VPlan &Plan)
void printDebugTracesAtStart() override
Allow subclasses to override and print debug traces before/after vplan execution, when trace informat...
A specialized derived class of inner loop vectorizer that performs vectorization of main loops in the...
void introduceCheckBlockInVPlan(BasicBlock *CheckIRBB)
Introduces a new VPIRBasicBlock for CheckIRBB to Plan between the vector preheader and its predecesso...
BasicBlock * emitIterationCountCheck(BasicBlock *VectorPH, BasicBlock *Bypass, bool ForEpilogue)
Emits an iteration count bypass check once for the main loop (when ForEpilogue is false) and once for...
EpilogueVectorizerMainLoop(Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI, DominatorTree *DT, const TargetTransformInfo *TTI, AssumptionCache *AC, EpilogueLoopVectorizationInfo &EPI, LoopVectorizationCostModel *CM, BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI, GeneratedRTChecks &Check, VPlan &Plan)
Value * createIterationCountCheck(BasicBlock *VectorPH, ElementCount VF, unsigned UF) const
void printDebugTracesAtStart() override
Allow subclasses to override and print debug traces before/after vplan execution, when trace informat...
BasicBlock * createVectorizedLoopSkeleton() final
Implements the interface for creating a vectorized skeleton using the main loop strategy (i....
Convenience struct for specifying and reasoning about fast-math flags.
Definition FMF.h:22
Class to represent function types.
param_iterator param_begin() const
param_iterator param_end() const
FunctionType * getFunctionType() const
Returns the FunctionType for me.
Definition Function.h:209
Attribute getFnAttribute(Attribute::AttrKind Kind) const
Return the attribute for the given attribute kind.
Definition Function.cpp:762
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
Definition Function.cpp:727
Represents flags for the getelementptr instruction/expression.
static GEPNoWrapFlags none()
void applyUpdates(ArrayRef< UpdateT > Updates)
Submit updates to all available trees.
Common base class shared among various IRBuilders.
Definition IRBuilder.h:114
void setFastMathFlags(FastMathFlags NewFMF)
Set the fast-math flags to be used with generated fp-math operators.
Definition IRBuilder.h:345
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition IRBuilder.h:2788
A struct for saving information about induction variables.
const SCEV * getStep() const
InductionKind
This enum represents the kinds of inductions that we support.
@ IK_NoInduction
Not an induction variable.
@ IK_FpInduction
Floating point induction variable.
@ IK_PtrInduction
Pointer induction var. Step = C.
@ IK_IntInduction
Integer induction variable. Step = C.
const SmallVectorImpl< Instruction * > & getCastInsts() const
Returns a reference to the type cast instructions in the induction update chain, that are redundant w...
Value * getStartValue() const
InnerLoopAndEpilogueVectorizer(Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI, DominatorTree *DT, const TargetTransformInfo *TTI, AssumptionCache *AC, EpilogueLoopVectorizationInfo &EPI, LoopVectorizationCostModel *CM, BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI, GeneratedRTChecks &Checks, VPlan &Plan, ElementCount VecWidth, ElementCount MinProfitableTripCount, unsigned UnrollFactor)
EpilogueLoopVectorizationInfo & EPI
Holds and updates state information required to vectorize the main loop and its epilogue in two separ...
InnerLoopVectorizer vectorizes loops which contain only one basic block to a specified vectorization ...
virtual void printDebugTracesAtStart()
Allow subclasses to override and print debug traces before/after vplan execution, when trace informat...
Value * TripCount
Trip count of the original loop.
const TargetTransformInfo * TTI
Target Transform Info.
LoopVectorizationCostModel * Cost
The profitablity analysis.
BlockFrequencyInfo * BFI
BFI and PSI are used to check for profile guided size optimizations.
Value * getTripCount() const
Returns the original loop trip count.
friend class LoopVectorizationPlanner
PredicatedScalarEvolution & PSE
A wrapper around ScalarEvolution used to add runtime SCEV checks.
LoopInfo * LI
Loop Info.
ProfileSummaryInfo * PSI
DominatorTree * DT
Dominator Tree.
void setTripCount(Value *TC)
Used to set the trip count after ILV's construction and after the preheader block has been executed.
void fixVectorizedLoop(VPTransformState &State)
Fix the vectorized code, taking care of header phi's, and more.
virtual BasicBlock * createVectorizedLoopSkeleton()
Creates a basic block for the scalar preheader.
virtual void printDebugTracesAtEnd()
AssumptionCache * AC
Assumption Cache.
InnerLoopVectorizer(Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI, DominatorTree *DT, const TargetTransformInfo *TTI, AssumptionCache *AC, ElementCount VecWidth, unsigned UnrollFactor, LoopVectorizationCostModel *CM, BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI, GeneratedRTChecks &RTChecks, VPlan &Plan)
IRBuilder Builder
The builder that we use.
void fixNonInductionPHIs(VPTransformState &State)
Fix the non-induction PHIs in Plan.
VPBasicBlock * VectorPHVPBB
The vector preheader block of Plan, used as target for check blocks introduced during skeleton creati...
unsigned UF
The vectorization unroll factor to use.
GeneratedRTChecks & RTChecks
Structure to hold information about generated runtime checks, responsible for cleaning the checks,...
virtual ~InnerLoopVectorizer()=default
ElementCount VF
The vectorization SIMD factor to use.
Loop * OrigLoop
The original loop.
BasicBlock * createScalarPreheader(StringRef Prefix)
Create and return a new IR basic block for the scalar preheader whose name is prefixed with Prefix.
InstSimplifyFolder - Use InstructionSimplify to fold operations to existing values.
static InstructionCost getInvalid(CostType Val=0)
static InstructionCost getMax()
CostType getValue() const
This function is intended to be used as sparingly as possible, since the class provides the full rang...
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
LLVM_ABI const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
LLVM_ABI void moveBefore(InstListType::iterator InsertPos)
Unlink this instruction from its current basic block and insert it into the basic block that MovePos ...
bool isBinaryOp() const
LLVM_ABI InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
Instruction * user_back()
Specialize the methods defined in Value, as we know that an instruction can only be used by other ins...
LLVM_ABI FastMathFlags getFastMathFlags() const LLVM_READONLY
Convenience function for getting all the fast-math flags, which must be an operator which supports th...
const char * getOpcodeName() const
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
Class to represent integer types.
static LLVM_ABI IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
Definition Type.cpp:319
LLVM_ABI APInt getMask() const
For example, this is 0xFF for an 8 bit integer, 0xFFFF for i16, etc.
Definition Type.cpp:343
The group of interleaved loads/stores sharing the same stride and close to each other.
uint32_t getFactor() const
InstTy * getMember(uint32_t Index) const
Get the member with the given index Index.
InstTy * getInsertPos() const
uint32_t getNumMembers() const
Drive the analysis of interleaved memory accesses in the loop.
bool requiresScalarEpilogue() const
Returns true if an interleaved group that may access memory out-of-bounds requires a scalar epilogue ...
LLVM_ABI void analyzeInterleaving(bool EnableMaskedInterleavedGroup)
Analyze the interleaved accesses and collect them in interleave groups.
An instruction for reading from memory.
Type * getPointerOperandType() const
This analysis provides dependence information for the memory accesses of a loop.
Drive the analysis of memory accesses in the loop.
const RuntimePointerChecking * getRuntimePointerChecking() const
unsigned getNumRuntimePointerChecks() const
Number of memchecks required to prove independence of otherwise may-alias pointers.
Analysis pass that exposes the LoopInfo for a function.
Definition LoopInfo.h:569
bool contains(const LoopT *L) const
Return true if the specified loop is contained within in this loop.
BlockT * getLoopLatch() const
If there is a single latch block for this loop, return it.
bool isInnermost() const
Return true if the loop does not contain any (natural) loops.
void getExitingBlocks(SmallVectorImpl< BlockT * > &ExitingBlocks) const
Return all blocks inside the loop that have successors outside of the loop.
BlockT * getHeader() const
iterator_range< block_iterator > blocks() const
BlockT * getLoopPreheader() const
If there is a preheader for this loop, return it.
ArrayRef< BlockT * > getBlocks() const
Get a list of the basic blocks which make up this loop.
Store the result of a depth first search within basic blocks contained by a single loop.
RPOIterator beginRPO() const
Reverse iterate over the cached postorder blocks.
void perform(const LoopInfo *LI)
Traverse the loop blocks and store the DFS result.
RPOIterator endRPO() const
Wrapper class to LoopBlocksDFS that provides a standard begin()/end() interface for the DFS reverse p...
void perform(const LoopInfo *LI)
Traverse the loop blocks and store the DFS result.
void removeBlock(BlockT *BB)
This method completely removes BB from all data structures, including all of the Loop objects it is n...
LoopVectorizationCostModel - estimates the expected speedups due to vectorization.
SmallPtrSet< Type *, 16 > ElementTypesInLoop
All element types found in the loop.
bool isLegalMaskedLoad(Type *DataType, Value *Ptr, Align Alignment, unsigned AddressSpace) const
Returns true if the target machine supports masked load operation for the given DataType and kind of ...
LoopVectorizationCostModel(ScalarEpilogueLowering SEL, Loop *L, PredicatedScalarEvolution &PSE, LoopInfo *LI, LoopVectorizationLegality *Legal, const TargetTransformInfo &TTI, const TargetLibraryInfo *TLI, DemandedBits *DB, AssumptionCache *AC, OptimizationRemarkEmitter *ORE, const Function *F, const LoopVectorizeHints *Hints, InterleavedAccessInfo &IAI, ProfileSummaryInfo *PSI, BlockFrequencyInfo *BFI)
void collectElementTypesForWidening()
Collect all element types in the loop for which widening is needed.
bool canVectorizeReductions(ElementCount VF) const
Returns true if the target machine supports all of the reduction variables found for the given VF.
bool isLegalMaskedStore(Type *DataType, Value *Ptr, Align Alignment, unsigned AddressSpace) const
Returns true if the target machine supports masked store operation for the given DataType and kind of...
bool isEpilogueVectorizationProfitable(const ElementCount VF, const unsigned IC) const
Returns true if epilogue vectorization is considered profitable, and false otherwise.
bool isPredicatedInst(Instruction *I) const
Returns true if I is an instruction that needs to be predicated at runtime.
void collectValuesToIgnore()
Collect values we want to ignore in the cost model.
void collectInLoopReductions()
Split reductions into those that happen in the loop, and those that happen outside.
std::pair< unsigned, unsigned > getSmallestAndWidestTypes()
bool isUniformAfterVectorization(Instruction *I, ElementCount VF) const
Returns true if I is known to be uniform after vectorization.
void collectNonVectorizedAndSetWideningDecisions(ElementCount VF)
Collect values that will not be widened, including Uniforms, Scalars, and Instructions to Scalarize f...
PredicatedScalarEvolution & PSE
Predicated scalar evolution analysis.
const LoopVectorizeHints * Hints
Loop Vectorize Hint.
std::optional< unsigned > getMaxSafeElements() const
Return maximum safe number of elements to be processed per vector iteration, which do not prevent sto...
const TargetTransformInfo & TTI
Vector target information.
LoopVectorizationLegality * Legal
Vectorization legality.
std::optional< InstructionCost > getReductionPatternCost(Instruction *I, ElementCount VF, Type *VectorTy) const
Return the cost of instructions in an inloop reduction pattern, if I is part of that pattern.
InstructionCost getInstructionCost(Instruction *I, ElementCount VF)
Returns the execution time cost of an instruction for a given vector width.
DemandedBits * DB
Demanded bits analysis.
bool interleavedAccessCanBeWidened(Instruction *I, ElementCount VF) const
Returns true if I is a memory instruction in an interleaved-group of memory accesses that can be vect...
const TargetLibraryInfo * TLI
Target Library Info.
bool memoryInstructionCanBeWidened(Instruction *I, ElementCount VF)
Returns true if I is a memory instruction with consecutive memory access that can be widened.
const InterleaveGroup< Instruction > * getInterleavedAccessGroup(Instruction *Instr) const
Get the interleaved access group that Instr belongs to.
InstructionCost getVectorIntrinsicCost(CallInst *CI, ElementCount VF) const
Estimate cost of an intrinsic call instruction CI if it were vectorized with factor VF.
bool OptForSize
Whether this loop should be optimized for size based on function attribute or profile information.
bool useMaxBandwidth(TargetTransformInfo::RegisterKind RegKind)
bool isScalarAfterVectorization(Instruction *I, ElementCount VF) const
Returns true if I is known to be scalar after vectorization.
bool isOptimizableIVTruncate(Instruction *I, ElementCount VF)
Return True if instruction I is an optimizable truncate whose operand is an induction variable.
FixedScalableVFPair computeMaxVF(ElementCount UserVF, unsigned UserIC)
bool shouldConsiderRegPressureForVF(ElementCount VF)
Loop * TheLoop
The loop that we evaluate.
TTI::TargetCostKind CostKind
The kind of cost that we are calculating.
TailFoldingStyle getTailFoldingStyle(bool IVUpdateMayOverflow=true) const
Returns the TailFoldingStyle that is best for the current loop.
InterleavedAccessInfo & InterleaveInfo
The interleave access information contains groups of interleaved accesses with the same stride and cl...
SmallPtrSet< const Value *, 16 > ValuesToIgnore
Values to ignore in the cost model.
void setVectorizedCallDecision(ElementCount VF)
A call may be vectorized in different ways depending on whether we have vectorized variants available...
void invalidateCostModelingDecisions()
Invalidates decisions already taken by the cost model.
bool isAccessInterleaved(Instruction *Instr) const
Check if Instr belongs to any interleaved access group.
bool selectUserVectorizationFactor(ElementCount UserVF)
Setup cost-based decisions for user vectorization factor.
std::optional< unsigned > getVScaleForTuning() const
Return the value of vscale used for tuning the cost model.
OptimizationRemarkEmitter * ORE
Interface to emit optimization remarks.
LoopInfo * LI
Loop Info analysis.
bool requiresScalarEpilogue(bool IsVectorizing) const
Returns true if we're required to use a scalar epilogue for at least the final iteration of the origi...
SmallPtrSet< const Value *, 16 > VecValuesToIgnore
Values to ignore in the cost model when VF > 1.
bool isInLoopReduction(PHINode *Phi) const
Returns true if the Phi is part of an inloop reduction.
bool isProfitableToScalarize(Instruction *I, ElementCount VF) const
void setWideningDecision(const InterleaveGroup< Instruction > *Grp, ElementCount VF, InstWidening W, InstructionCost Cost)
Save vectorization decision W and Cost taken by the cost model for interleaving group Grp and vector ...
const MapVector< Instruction *, uint64_t > & getMinimalBitwidths() const
CallWideningDecision getCallWideningDecision(CallInst *CI, ElementCount VF) const
bool isLegalGatherOrScatter(Value *V, ElementCount VF)
Returns true if the target machine can represent V as a masked gather or scatter operation.
bool canTruncateToMinimalBitwidth(Instruction *I, ElementCount VF) const
bool shouldConsiderInvariant(Value *Op)
Returns true if Op should be considered invariant and if it is trivially hoistable.
bool foldTailByMasking() const
Returns true if all loop blocks should be masked to fold tail loop.
bool foldTailWithEVL() const
Returns true if VP intrinsics with explicit vector length support should be generated in the tail fol...
bool usePredicatedReductionSelect() const
Returns true if the predicated reduction select should be used to set the incoming value for the redu...
bool blockNeedsPredicationForAnyReason(BasicBlock *BB) const
Returns true if the instructions in this block requires predication for any reason,...
void setCallWideningDecision(CallInst *CI, ElementCount VF, InstWidening Kind, Function *Variant, Intrinsic::ID IID, std::optional< unsigned > MaskPos, InstructionCost Cost)
void setTailFoldingStyles(bool IsScalableVF, unsigned UserIC)
Selects and saves TailFoldingStyle for 2 options - if IV update may overflow or not.
AssumptionCache * AC
Assumption cache.
void setWideningDecision(Instruction *I, ElementCount VF, InstWidening W, InstructionCost Cost)
Save vectorization decision W and Cost taken by the cost model for instruction I and vector width VF.
InstWidening
Decision that was taken during cost calculation for memory instruction.
bool isScalarWithPredication(Instruction *I, ElementCount VF) const
Returns true if I is an instruction which requires predication and for which our chosen predication s...
InstructionCost getVectorCallCost(CallInst *CI, ElementCount VF) const
Estimate cost of a call instruction CI if it were vectorized with factor VF.
bool useOrderedReductions(const RecurrenceDescriptor &RdxDesc) const
Returns true if we should use strict in-order reductions for the given RdxDesc.
std::pair< InstructionCost, InstructionCost > getDivRemSpeculationCost(Instruction *I, ElementCount VF) const
Return the costs for our two available strategies for lowering a div/rem operation which requires spe...
bool isDivRemScalarWithPredication(InstructionCost ScalarCost, InstructionCost SafeDivisorCost) const
Given costs for both strategies, return true if the scalar predication lowering should be used for di...
InstructionCost expectedCost(ElementCount VF)
Returns the expected execution cost.
void setCostBasedWideningDecision(ElementCount VF)
Memory access instruction may be vectorized in more than one way.
InstWidening getWideningDecision(Instruction *I, ElementCount VF) const
Return the cost model decision for the given instruction I and vector width VF.
FixedScalableVFPair MaxPermissibleVFWithoutMaxBW
The highest VF possible for this loop, without using MaxBandwidth.
bool isScalarEpilogueAllowed() const
Returns true if a scalar epilogue is not allowed due to optsize or a loop hint annotation.
InstructionCost getWideningCost(Instruction *I, ElementCount VF)
Return the vectorization cost for the given instruction I and vector width VF.
void collectInstsToScalarize(ElementCount VF)
Collects the instructions to scalarize for each predicated instruction in the loop.
LoopVectorizationLegality checks if it is legal to vectorize a loop, and to what vectorization factor...
MapVector< PHINode *, InductionDescriptor > InductionList
InductionList saves induction variables and maps them to the induction descriptor.
const SmallPtrSetImpl< const Instruction * > & getPotentiallyFaultingLoads() const
Returns potentially faulting loads.
bool canVectorize(bool UseVPlanNativePath)
Returns true if it is legal to vectorize this loop.
bool canVectorizeFPMath(bool EnableStrictReductions)
Returns true if it is legal to vectorize the FP math operations in this loop.
PHINode * getPrimaryInduction()
Returns the primary induction variable.
const SmallVector< BasicBlock *, 4 > & getCountableExitingBlocks() const
Returns all exiting blocks with a countable exit, i.e.
const InductionList & getInductionVars() const
Returns the induction variables found in the loop.
bool hasUncountableEarlyExit() const
Returns true if the loop has exactly one uncountable early exit, i.e.
bool hasHistograms() const
Returns a list of all known histogram operations in the loop.
const LoopAccessInfo * getLAI() const
Planner drives the vectorization process after having passed Legality checks.
VectorizationFactor selectEpilogueVectorizationFactor(const ElementCount MaxVF, unsigned IC)
VPlan & getPlanFor(ElementCount VF) const
Return the VPlan for VF.
Definition VPlan.cpp:1615
VectorizationFactor planInVPlanNativePath(ElementCount UserVF)
Use the VPlan-native path to plan how to best vectorize, return the best VF and its cost.
void updateLoopMetadataAndProfileInfo(Loop *VectorLoop, VPBasicBlock *HeaderVPBB, const VPlan &Plan, bool VectorizingEpilogue, MDNode *OrigLoopID, std::optional< unsigned > OrigAverageTripCount, unsigned OrigLoopInvocationWeight, unsigned EstimatedVFxUF, bool DisableRuntimeUnroll)
Update loop metadata and profile info for both the scalar remainder loop and VectorLoop,...
Definition VPlan.cpp:1666
void buildVPlans(ElementCount MinVF, ElementCount MaxVF)
Build VPlans for power-of-2 VF's between MinVF and MaxVF inclusive, according to the information gath...
Definition VPlan.cpp:1599
VectorizationFactor computeBestVF()
Compute and return the most profitable vectorization factor.
DenseMap< const SCEV *, Value * > executePlan(ElementCount VF, unsigned UF, VPlan &BestPlan, InnerLoopVectorizer &LB, DominatorTree *DT, bool VectorizingEpilogue)
Generate the IR code for the vectorized loop captured in VPlan BestPlan according to the best selecte...
unsigned selectInterleaveCount(VPlan &Plan, ElementCount VF, InstructionCost LoopCost)
void emitInvalidCostRemarks(OptimizationRemarkEmitter *ORE)
Emit remarks for recipes with invalid costs in the available VPlans.
static bool getDecisionAndClampRange(const std::function< bool(ElementCount)> &Predicate, VFRange &Range)
Test a Predicate on a Range of VF's.
Definition VPlan.cpp:1580
void printPlans(raw_ostream &O)
Definition VPlan.cpp:1744
void plan(ElementCount UserVF, unsigned UserIC)
Build VPlans for the specified UserVF and UserIC if they are non-zero or all applicable candidate VFs...
void addMinimumIterationCheck(VPlan &Plan, ElementCount VF, unsigned UF, ElementCount MinProfitableTripCount) const
Create a check to Plan to see if the vector loop should be executed based on its trip count.
bool hasPlanWithVF(ElementCount VF) const
Look through the existing plans and return true if we have one with vectorization factor VF.
This holds vectorization requirements that must be verified late in the process.
Utility class for getting and setting loop vectorizer hints in the form of loop metadata.
bool allowVectorization(Function *F, Loop *L, bool VectorizeOnlyWhenForced) const
void emitRemarkWithHints() const
Dumps all the hint information.
const char * vectorizeAnalysisPassName() const
If hints are provided that force vectorization, use the AlwaysPrint pass name to force the frontend t...
This class emits a version of the loop where run-time checks ensure that may-alias pointers can't ove...
Represents a single loop in the control flow graph.
Definition LoopInfo.h:40
bool hasLoopInvariantOperands(const Instruction *I) const
Return true if all the operands of the specified instruction are loop invariant.
Definition LoopInfo.cpp:67
DebugLoc getStartLoc() const
Return the debug location of the start of this loop.
Definition LoopInfo.cpp:632
bool isLoopInvariant(const Value *V) const
Return true if the specified value is loop invariant.
Definition LoopInfo.cpp:61
Metadata node.
Definition Metadata.h:1078
This class implements a map that also provides access to all stored values in a deterministic order.
Definition MapVector.h:36
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition MapVector.h:119
Function * getFunction(StringRef Name) const
Look up the specified function in the module symbol table.
Definition Module.cpp:230
Diagnostic information for optimization analysis remarks related to pointer aliasing.
Diagnostic information for optimization analysis remarks related to floating-point non-commutativity.
Diagnostic information for optimization analysis remarks.
The optimization diagnostic interface.
LLVM_ABI void emit(DiagnosticInfoOptimizationBase &OptDiag)
Output the remark via the diagnostic handler and to the optimization record file.
Diagnostic information for missed-optimization remarks.
Diagnostic information for applied optimization remarks.
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
op_range incoming_values()
void setIncomingValueForBlock(const BasicBlock *BB, Value *V)
Set every incoming value(s) for block BB to V.
Value * getIncomingValueForBlock(const BasicBlock *BB) const
unsigned getNumIncomingValues() const
Return the number of incoming edges.
An interface layer with SCEV used to manage how we see SCEV expressions for values in the context of ...
ScalarEvolution * getSE() const
Returns the ScalarEvolution analysis used.
LLVM_ABI const SCEVPredicate & getPredicate() const
LLVM_ABI unsigned getSmallConstantMaxTripCount()
Returns the upper bound of the loop trip count as a normal unsigned value, or 0 if the trip count is ...
LLVM_ABI const SCEV * getBackedgeTakenCount()
Get the (predicated) backedge count for the analyzed loop.
LLVM_ABI const SCEV * getSCEV(Value *V)
Returns the SCEV expression of V, in the context of the current SCEV predicate.
A set of analyses that are preserved following a run of a transformation pass.
Definition Analysis.h:112
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition Analysis.h:118
PreservedAnalyses & preserveSet()
Mark an analysis set as preserved.
Definition Analysis.h:151
PreservedAnalyses & preserve()
Mark an analysis as preserved.
Definition Analysis.h:132
An analysis pass based on the new PM to deliver ProfileSummaryInfo.
Analysis providing profile information.
The RecurrenceDescriptor is used to identify recurrences variables in a loop.
static bool isFMulAddIntrinsic(Instruction *I)
Returns true if the instruction is a call to the llvm.fmuladd intrinsic.
FastMathFlags getFastMathFlags() const
Instruction * getLoopExitInstr() const
static LLVM_ABI unsigned getOpcode(RecurKind Kind)
Returns the opcode corresponding to the RecurrenceKind.
Type * getRecurrenceType() const
Returns the type of the recurrence.
const SmallPtrSet< Instruction *, 8 > & getCastInsts() const
Returns a reference to the instructions used for type-promoting the recurrence.
unsigned getMinWidthCastToRecurrenceTypeInBits() const
Returns the minimum width used by the recurrence in bits.
TrackingVH< Value > getRecurrenceStartValue() const
LLVM_ABI SmallVector< Instruction *, 4 > getReductionOpChain(PHINode *Phi, Loop *L) const
Attempts to find a chain of operations from Phi to LoopExitInst that can be treated as a set of reduc...
static bool isAnyOfRecurrenceKind(RecurKind Kind)
Returns true if the recurrence kind is of the form select(cmp(),x,y) where one of (x,...
bool isSigned() const
Returns true if all source operands of the recurrence are SExtInsts.
RecurKind getRecurrenceKind() const
bool isOrdered() const
Expose an ordered FP reduction to the instance users.
static LLVM_ABI bool isFloatingPointRecurrenceKind(RecurKind Kind)
Returns true if the recurrence kind is a floating point kind.
static bool isFindIVRecurrenceKind(RecurKind Kind)
Returns true if the recurrence kind is of the form select(cmp(),x,y) where one of (x,...
Value * getSentinelValue() const
Returns the sentinel value for FindFirstIV & FindLastIV recurrences to replace the start value.
static bool isMinMaxRecurrenceKind(RecurKind Kind)
Returns true if the recurrence kind is any min/max kind.
std::optional< ArrayRef< PointerDiffInfo > > getDiffChecks() const
const SmallVectorImpl< RuntimePointerCheck > & getChecks() const
Returns the checks that generateChecks created.
This class uses information about analyze scalars to rewrite expressions in canonical form.
ScalarEvolution * getSE()
bool isInsertedInstruction(Instruction *I) const
Return true if the specified instruction was inserted by the code rewriter.
LLVM_ABI Value * expandCodeForPredicate(const SCEVPredicate *Pred, Instruction *Loc)
Generates a code sequence that evaluates this predicate.
void eraseDeadInstructions(Value *Root)
Remove inserted instructions that are dead, e.g.
virtual bool isAlwaysTrue() const =0
Returns true if the predicate is always true.
This class represents an analyzed expression in the program.
LLVM_ABI bool isZero() const
Return true if the expression is a constant zero.
LLVM_ABI Type * getType() const
Return the LLVM type of this SCEV expression.
Analysis pass that exposes the ScalarEvolution for a function.
The main scalar evolution driver.
LLVM_ABI const SCEV * getURemExpr(const SCEV *LHS, const SCEV *RHS)
Represents an unsigned remainder expression based on unsigned division.
LLVM_ABI const SCEV * getBackedgeTakenCount(const Loop *L, ExitCountKind Kind=Exact)
If the specified loop has a predictable backedge-taken count, return it, otherwise return a SCEVCould...
LLVM_ABI const SCEV * getConstant(ConstantInt *V)
LLVM_ABI const SCEV * getSCEV(Value *V)
Return a SCEV expression for the full generality of the specified expression.
LLVM_ABI const SCEV * getTripCountFromExitCount(const SCEV *ExitCount)
A version of getTripCountFromExitCount below which always picks an evaluation type which can not resu...
const SCEV * getOne(Type *Ty)
Return a SCEV for the constant 1 of a specific type.
LLVM_ABI void forgetLoop(const Loop *L)
This method should be called by the client when it has changed a loop in a way that may effect Scalar...
LLVM_ABI bool isLoopInvariant(const SCEV *S, const Loop *L)
Return true if the value of the given SCEV is unchanging in the specified loop.
LLVM_ABI bool isSCEVable(Type *Ty) const
Test if values of the given type are analyzable within the SCEV framework.
LLVM_ABI const SCEV * getElementCount(Type *Ty, ElementCount EC, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap)
LLVM_ABI void forgetValue(Value *V)
This method should be called by the client when it has changed a value in a way that may effect its v...
LLVM_ABI void forgetBlockAndLoopDispositions(Value *V=nullptr)
Called when the client has changed the disposition of values in a loop or block.
const SCEV * getMinusOne(Type *Ty)
Return a SCEV for the constant -1 of a specific type.
LLVM_ABI void forgetLcssaPhiWithNewPredecessor(Loop *L, PHINode *V)
Forget LCSSA phi node V of loop L to which a new predecessor was added, such that it may no longer be...
LLVM_ABI unsigned getSmallConstantTripCount(const Loop *L)
Returns the exact trip count of the loop if we can compute it, and the result is a small constant.
APInt getUnsignedRangeMax(const SCEV *S)
Determine the max of the unsigned range for a particular SCEV.
LLVM_ABI const SCEV * applyLoopGuards(const SCEV *Expr, const Loop *L)
Try to apply information from loop guards for L to Expr.
LLVM_ABI const SCEV * getAddExpr(SmallVectorImpl< const SCEV * > &Ops, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)
Get a canonical add expression, or something simpler if possible.
LLVM_ABI bool isKnownPredicate(CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS)
Test if the given expression is known to satisfy the condition described by Pred, LHS,...
This class represents the LLVM 'select' instruction.
A vector that has set insertion semantics.
Definition SetVector.h:59
size_type size() const
Determine the number of elements in the SetVector.
Definition SetVector.h:102
void insert_range(Range &&R)
Definition SetVector.h:175
size_type count(const key_type &key) const
Count the number of elements of a given key in the SetVector.
Definition SetVector.h:261
bool insert(const value_type &X)
Insert a new element into the SetVector.
Definition SetVector.h:150
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
bool contains(ConstPtrType Ptr) const
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
A SetVector that performs no allocations if smaller than a certain size.
Definition SetVector.h:338
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
An instruction for storing to memory.
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
Analysis pass providing the TargetTransformInfo.
Analysis pass providing the TargetLibraryInfo.
Provides information about what library functions are available for the current target.
This pass provides access to the codegen interfaces that are needed for IR-level transformations.
LLVM_ABI std::optional< unsigned > getVScaleForTuning() const
LLVM_ABI InstructionCost getScalarizationOverhead(VectorType *Ty, const APInt &DemandedElts, bool Insert, bool Extract, TTI::TargetCostKind CostKind, bool ForPoisonSrc=true, ArrayRef< Value * > VL={}) const
Estimate the overhead of scalarizing an instruction.
LLVM_ABI bool supportsEfficientVectorElementLoadStore() const
If target has efficient vector element load/store instructions, it can return true here so that inser...
LLVM_ABI bool prefersVectorizedAddressing() const
Return true if target doesn't mind addresses in vectors.
LLVM_ABI TypeSize getRegisterBitWidth(RegisterKind K) const
LLVM_ABI bool preferFixedOverScalableIfEqualCost(bool IsEpilogue) const
LLVM_ABI InstructionCost getMemoryOpCost(unsigned Opcode, Type *Src, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind=TTI::TCK_RecipThroughput, OperandValueInfo OpdInfo={OK_AnyValue, OP_None}, const Instruction *I=nullptr) const
LLVM_ABI InstructionCost getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef< unsigned > Indices, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind=TTI::TCK_RecipThroughput, bool UseMaskForCond=false, bool UseMaskForGaps=false) const
LLVM_ABI InstructionCost getShuffleCost(ShuffleKind Kind, VectorType *DstTy, VectorType *SrcTy, ArrayRef< int > Mask={}, TTI::TargetCostKind CostKind=TTI::TCK_RecipThroughput, int Index=0, VectorType *SubTp=nullptr, ArrayRef< const Value * > Args={}, const Instruction *CxtI=nullptr) const
static LLVM_ABI PartialReductionExtendKind getPartialReductionExtendKind(Instruction *I)
Get the kind of extension that an instruction represents.
static LLVM_ABI OperandValueInfo getOperandInfo(const Value *V)
Collect properties of V used in cost analysis, e.g. OP_PowerOf2.
LLVM_ABI bool isElementTypeLegalForScalableVector(Type *Ty) const
LLVM_ABI ElementCount getMinimumVF(unsigned ElemWidth, bool IsScalable) const
TargetCostKind
The kind of cost model.
@ TCK_RecipThroughput
Reciprocal throughput.
@ TCK_CodeSize
Instruction code size.
@ TCK_SizeAndLatency
The weighted sum of size and latency.
@ TCK_Latency
The latency of instruction.
LLVM_ABI InstructionCost getMaskedMemoryOpCost(unsigned Opcode, Type *Src, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind=TTI::TCK_RecipThroughput) const
LLVM_ABI InstructionCost getAddressComputationCost(Type *PtrTy, ScalarEvolution *SE, const SCEV *Ptr, TTI::TargetCostKind CostKind) const
LLVM_ABI InstructionCost getPartialReductionCost(unsigned Opcode, Type *InputTypeA, Type *InputTypeB, Type *AccumType, ElementCount VF, PartialReductionExtendKind OpAExtend, PartialReductionExtendKind OpBExtend, std::optional< unsigned > BinOp, TTI::TargetCostKind CostKind) const
LLVM_ABI InstructionCost getGatherScatterOpCost(unsigned Opcode, Type *DataTy, const Value *Ptr, bool VariableMask, Align Alignment, TTI::TargetCostKind CostKind=TTI::TCK_RecipThroughput, const Instruction *I=nullptr) const
LLVM_ABI bool supportsScalableVectors() const
@ TCC_Free
Expected to fold away in lowering.
LLVM_ABI InstructionCost getInstructionCost(const User *U, ArrayRef< const Value * > Operands, TargetCostKind CostKind) const
Estimate the cost of a given IR user when lowered.
LLVM_ABI InstructionCost getIndexedVectorInstrCostFromEnd(unsigned Opcode, Type *Val, TTI::TargetCostKind CostKind, unsigned Index) const
LLVM_ABI InstructionCost getOperandsScalarizationOverhead(ArrayRef< Type * > Tys, TTI::TargetCostKind CostKind) const
Estimate the overhead of scalarizing operands with the given types.
@ SK_Splice
Concatenates elements from the first input vector with elements of the second input vector.
@ SK_Broadcast
Broadcast element 0 to all other elements.
@ SK_Reverse
Reverse the order of the vector.
LLVM_ABI InstructionCost getCFInstrCost(unsigned Opcode, TTI::TargetCostKind CostKind=TTI::TCK_SizeAndLatency, const Instruction *I=nullptr) const
CastContextHint
Represents a hint about the context in which a cast is used.
@ Reversed
The cast is used with a reversed load/store.
@ Masked
The cast is used with a masked load/store.
@ None
The cast is not used with a load/store of any kind.
@ Normal
The cast is used with a normal load/store.
@ Interleave
The cast is used with an interleaved load/store.
@ GatherScatter
The cast is used with a gather/scatter.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition Twine.h:82
This class implements a switch-like dispatch statement for a value of 'T' using dyn_cast functionalit...
Definition TypeSwitch.h:88
TypeSwitch< T, ResultT > & Case(CallableT &&caseFn)
Add a case on the given type.
Definition TypeSwitch.h:97
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:45
LLVM_ABI unsigned getIntegerBitWidth() const
bool isVectorTy() const
True if this is an instance of VectorType.
Definition Type.h:273
static LLVM_ABI Type * getVoidTy(LLVMContext &C)
Definition Type.cpp:281
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
Definition Type.h:352
LLVM_ABI TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
Definition Type.cpp:198
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
Definition Type.h:128
LLVM_ABI unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
Definition Type.cpp:231
static LLVM_ABI IntegerType * getInt1Ty(LLVMContext &C)
Definition Type.cpp:294
bool isFloatingPointTy() const
Return true if this is one of the floating-point types.
Definition Type.h:184
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition Type.h:240
bool isVoidTy() const
Return true if this is 'void'.
Definition Type.h:139
A Use represents the edge between a Value definition and its users.
Definition Use.h:35
op_range operands()
Definition User.h:292
LLVM_ABI bool replaceUsesOfWith(Value *From, Value *To)
Replace uses of one Value with another.
Definition User.cpp:21
Value * getOperand(unsigned i) const
Definition User.h:232
static SmallVector< VFInfo, 8 > getMappings(const CallInst &CI)
Retrieve all the VFInfo instances associated to the CallInst CI.
Definition VectorUtils.h:74
VPBasicBlock serves as the leaf of the Hierarchical Control-Flow Graph.
Definition VPlan.h:3800
void appendRecipe(VPRecipeBase *Recipe)
Augment the existing recipes of a VPBasicBlock with an additional Recipe as the last recipe.
Definition VPlan.h:3875
RecipeListTy::iterator iterator
Instruction iterators...
Definition VPlan.h:3827
iterator end()
Definition VPlan.h:3837
iterator begin()
Recipe iterator methods.
Definition VPlan.h:3835
iterator_range< iterator > phis()
Returns an iterator range over the PHI-like recipes in the block.
Definition VPlan.h:3888
iterator getFirstNonPhi()
Return the position of the first non-phi node recipe in the block.
Definition VPlan.cpp:246
VPRegionBlock * getEnclosingLoopRegion()
Definition VPlan.cpp:619
VPRecipeBase * getTerminator()
If the block has multiple successors, return the branch recipe terminating the block.
Definition VPlan.cpp:664
void insert(VPRecipeBase *Recipe, iterator InsertPt)
Definition VPlan.h:3866
bool empty() const
Definition VPlan.h:3846
VPBlockBase is the building block of the Hierarchical Control-Flow Graph.
Definition VPlan.h:80
VPRegionBlock * getParent()
Definition VPlan.h:172
const VPBasicBlock * getExitingBasicBlock() const
Definition VPlan.cpp:190
void setName(const Twine &newName)
Definition VPlan.h:165
size_t getNumSuccessors() const
Definition VPlan.h:218
void swapSuccessors()
Swap successors of the block. The block must have exactly 2 successors.
Definition VPlan.h:321
size_t getNumPredecessors() const
Definition VPlan.h:219
VPlan * getPlan()
Definition VPlan.cpp:165
const VPBasicBlock * getEntryBasicBlock() const
Definition VPlan.cpp:170
VPBlockBase * getSingleSuccessor() const
Definition VPlan.h:208
const VPBlocksTy & getSuccessors() const
Definition VPlan.h:197
static auto blocksOnly(const T &Range)
Return an iterator range over Range which only includes BlockTy blocks.
Definition VPlanUtils.h:238
static void insertOnEdge(VPBlockBase *From, VPBlockBase *To, VPBlockBase *BlockPtr)
Inserts BlockPtr on the edge between From and To.
Definition VPlanUtils.h:259
static void connectBlocks(VPBlockBase *From, VPBlockBase *To, unsigned PredIdx=-1u, unsigned SuccIdx=-1u)
Connect VPBlockBases From and To bi-directionally.
Definition VPlanUtils.h:197
static void reassociateBlocks(VPBlockBase *Old, VPBlockBase *New)
Reassociate all the blocks connected to Old so that they now point to New.
Definition VPlanUtils.h:224
VPlan-based builder utility analogous to IRBuilder.
VPPhi * createScalarPhi(ArrayRef< VPValue * > IncomingValues, DebugLoc DL, const Twine &Name="")
VPInstruction * createNaryOp(unsigned Opcode, ArrayRef< VPValue * > Operands, Instruction *Inst=nullptr, const Twine &Name="")
Create an N-ary operation with Opcode, Operands and set Inst as its underlying Instruction.
Canonical scalar induction phi of the vector loop.
Definition VPlan.h:3456
unsigned getNumDefinedValues() const
Returns the number of values defined by the VPDef.
Definition VPlanValue.h:424
VPValue * getVPSingleValue()
Returns the only VPValue defined by the VPDef.
Definition VPlanValue.h:397
void execute(VPTransformState &State) override
Generate the transformed value of the induction at offset StartValue (1.
VPValue * getStepValue() const
Definition VPlan.h:3677
VPValue * getStartValue() const
Definition VPlan.h:3676
A pure virtual base class for all recipes modeling header phis, including phis for first order recurr...
Definition VPlan.h:1981
virtual VPValue * getBackedgeValue()
Returns the incoming value from the loop backedge.
Definition VPlan.h:2029
VPValue * getStartValue()
Returns the start value of the phi, if one is set.
Definition VPlan.h:2018
A special type of VPBasicBlock that wraps an existing IR basic block.
Definition VPlan.h:3953
Helper to manage IR metadata for recipes.
Definition VPlan.h:938
This is a concrete Recipe that models a single VPlan-level instruction.
Definition VPlan.h:979
@ ComputeAnyOfResult
Compute the final result of a AnyOf reduction with select(cmp(),x,y), where one of (x,...
Definition VPlan.h:1017
@ ResumeForEpilogue
Explicit user for the resume phi of the canonical induction in the main VPlan, used by the epilogue v...
Definition VPlan.h:1066
@ FirstOrderRecurrenceSplice
Definition VPlan.h:985
@ ReductionStartVector
Start vector for reductions with 3 operands: the original start value, the identity value for the red...
Definition VPlan.h:1057
unsigned getOpcode() const
Definition VPlan.h:1123
VPInterleaveRecipe is a recipe for transforming an interleave group of load or stores into one wide l...
Definition VPlan.h:2579
In what follows, the term "input IR" refers to code that is fed into the vectorizer whereas the term ...
A recipe for forming partial reductions.
Definition VPlan.h:2765
detail::zippy< llvm::detail::zip_first, VPUser::const_operand_range, const_incoming_blocks_range > incoming_values_and_blocks() const
Returns an iterator range over pairs of incoming values and corresponding incoming blocks.
Definition VPlan.h:1294
VPRecipeBase is a base class modeling a sequence of one or more output IR instructions.
Definition VPlan.h:386
VPBasicBlock * getParent()
Definition VPlan.h:407
void moveBefore(VPBasicBlock &BB, iplist< VPRecipeBase >::iterator I)
Unlink this recipe and insert into BB before I.
void insertBefore(VPRecipeBase *InsertPos)
Insert an unlinked recipe into a basic block immediately before the specified recipe.
iplist< VPRecipeBase >::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
VPRecipeBase * tryToCreateWidenRecipe(VPSingleDefRecipe *R, VFRange &Range)
Create and return a widened recipe for R if one can be created within the given VF Range.
VPValue * getBlockInMask(VPBasicBlock *VPBB) const
Returns the entry mask for block VPBB or null if the mask is all-true.
std::optional< unsigned > getScalingForReduction(const Instruction *ExitInst)
void collectScaledReductions(VFRange &Range)
Find all possible partial reductions in the loop and track all of those that are valid so recipes can...
VPReplicateRecipe * handleReplication(Instruction *I, ArrayRef< VPValue * > Operands, VFRange &Range)
Build a VPReplicationRecipe for I using Operands.
VPRecipeBase * tryToCreatePartialReduction(Instruction *Reduction, ArrayRef< VPValue * > Operands, unsigned ScaleFactor)
Create and return a partial reduction recipe for a reduction instruction along with binary operation ...
A recipe for handling reduction phis.
Definition VPlan.h:2334
bool isInLoop() const
Returns true, if the phi is part of an in-loop reduction.
Definition VPlan.h:2394
RecurKind getRecurrenceKind() const
Returns the recurrence kind of the reduction.
Definition VPlan.h:2388
VPRegionBlock represents a collection of VPBasicBlocks and VPRegionBlocks which form a Single-Entry-S...
Definition VPlan.h:3988
const VPBlockBase * getEntry() const
Definition VPlan.h:4024
VPCanonicalIVPHIRecipe * getCanonicalIV()
Returns the canonical induction recipe of the region.
Definition VPlan.h:4086
VPReplicateRecipe replicates a given instruction producing multiple scalar copies of the original sca...
Definition VPlan.h:2868
VPSingleDef is a base class for recipes for modeling a sequence of one or more output IR that define ...
Definition VPlan.h:517
Instruction * getUnderlyingInstr()
Returns the underlying instruction.
Definition VPlan.h:582
An analysis for type-inference for VPValues.
Type * inferScalarType(const VPValue *V)
Infer the type of V. Returns the scalar type of V.
This class augments VPValue with operands which provide the inverse def-use edges from VPValue's user...
Definition VPlanValue.h:199
void setOperand(unsigned I, VPValue *New)
Definition VPlanValue.h:243
VPValue * getOperand(unsigned N) const
Definition VPlanValue.h:238
void addOperand(VPValue *Operand)
Definition VPlanValue.h:232
VPRecipeBase * getDefiningRecipe()
Returns the recipe defining this VPValue or nullptr if it is not defined by a recipe,...
Definition VPlan.cpp:135
Value * getLiveInIRValue() const
Returns the underlying IR value, if this VPValue is defined outside the scope of VPlan.
Definition VPlanValue.h:176
Value * getUnderlyingValue() const
Return the underlying Value attached to this VPValue.
Definition VPlanValue.h:85
void replaceAllUsesWith(VPValue *New)
Definition VPlan.cpp:1416
user_iterator user_begin()
Definition VPlanValue.h:130
unsigned getNumUsers() const
Definition VPlanValue.h:113
void replaceUsesWithIf(VPValue *New, llvm::function_ref< bool(VPUser &U, unsigned Idx)> ShouldReplace)
Go through the uses list for this VPValue and make each use point to New if the callback ShouldReplac...
Definition VPlan.cpp:1420
user_range users()
Definition VPlanValue.h:134
A recipe to compute a pointer to the last element of each part of a widened memory access for widened...
Definition VPlan.h:1845
VPWidenCastRecipe is a recipe to create vector cast instructions.
Definition VPlan.h:1486
A recipe for handling GEP instructions.
Definition VPlan.h:1773
VPValue * getStepValue()
Returns the step value of the induction.
Definition VPlan.h:2074
A recipe for handling phi nodes of integer and floating-point inductions, producing their vector valu...
Definition VPlan.h:2121
A common base class for widening memory operations.
Definition VPlan.h:3169
A recipe for widened phis.
Definition VPlan.h:2257
VPWidenRecipe is a recipe for producing a widened instruction using the opcode and operands of the re...
Definition VPlan.h:1443
VPlan models a candidate for vectorization, encoding various decisions take to produce efficient outp...
Definition VPlan.h:4112
bool hasVF(ElementCount VF) const
Definition VPlan.h:4335
VPBasicBlock * getEntry()
Definition VPlan.h:4214
VPValue & getVectorTripCount()
The vector trip count.
Definition VPlan.h:4305
VPValue & getVFxUF()
Returns VF * UF of the vector loop region.
Definition VPlan.h:4315
VPValue & getVF()
Returns the VF of the vector loop region.
Definition VPlan.h:4308
VPValue * getTripCount() const
The trip count of the original loop.
Definition VPlan.h:4276
iterator_range< SmallSetVector< ElementCount, 2 >::iterator > vectorFactors() const
Returns an iterator range over all VFs of the plan.
Definition VPlan.h:4342
bool hasUF(unsigned UF) const
Definition VPlan.h:4353
ArrayRef< VPIRBasicBlock * > getExitBlocks() const
Return an ArrayRef containing VPIRBasicBlocks wrapping the exit blocks of the original scalar loop.
Definition VPlan.h:4266
LLVM_ABI_FOR_TEST VPRegionBlock * getVectorLoopRegion()
Returns the VPRegionBlock of the vector loop.
Definition VPlan.cpp:1049
bool hasEarlyExit() const
Returns true if the VPlan is based on a loop with an early exit.
Definition VPlan.h:4488
InstructionCost cost(ElementCount VF, VPCostContext &Ctx)
Return the cost of this plan.
Definition VPlan.cpp:1031
void resetTripCount(VPValue *NewTripCount)
Resets the trip count for the VPlan.
Definition VPlan.h:4290
VPBasicBlock * getMiddleBlock()
Returns the 'middle' block of the plan, that is the block that selects whether to execute the scalar ...
Definition VPlan.h:4239
VPValue * getOrAddLiveIn(Value *V)
Gets the live-in VPValue for V or adds a new live-in (if none exists yet) for V.
Definition VPlan.h:4377
VPBasicBlock * getScalarPreheader() const
Return the VPBasicBlock for the preheader of the scalar loop.
Definition VPlan.h:4257
void execute(VPTransformState *State)
Generate the IR code for this VPlan.
Definition VPlan.cpp:943
VPIRBasicBlock * getScalarHeader() const
Return the VPIRBasicBlock wrapping the header of the scalar loop.
Definition VPlan.h:4262
VPBasicBlock * getVectorPreheader()
Returns the preheader of the vector loop region, if one exists, or null otherwise.
Definition VPlan.h:4219
VPlan * duplicate()
Clone the current VPlan, update all VPValues of the new VPlan and cloned recipes to refer to the clon...
Definition VPlan.cpp:1191
LLVM Value Representation.
Definition Value.h:75
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:256
LLVM_ABI bool hasOneUser() const
Return true if there is exactly one user of this value.
Definition Value.cpp:166
LLVM_ABI void setName(const Twine &Name)
Change the name of the value.
Definition Value.cpp:390
bool hasOneUse() const
Return true if there is exactly one use of this value.
Definition Value.h:439
LLVM_ABI void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
Definition Value.cpp:546
iterator_range< user_iterator > users()
Definition Value.h:426
LLVM_ABI LLVMContext & getContext() const
All values hold a context through their type.
Definition Value.cpp:1099
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
Definition Value.cpp:322
static LLVM_ABI VectorType * get(Type *ElementType, ElementCount EC)
This static method is the primary way to construct an VectorType.
std::pair< iterator, bool > insert(const ValueT &V)
Definition DenseSet.h:202
bool contains(const_arg_type_t< ValueT > V) const
Check if the set contains the given element.
Definition DenseSet.h:175
constexpr bool hasKnownScalarFactor(const FixedOrScalableQuantity &RHS) const
Returns true if there exists a value X where RHS.multiplyCoefficientBy(X) will result in a value whos...
Definition TypeSize.h:270
constexpr ScalarTy getFixedValue() const
Definition TypeSize.h:201
static constexpr bool isKnownLE(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
Definition TypeSize.h:231
constexpr bool isNonZero() const
Definition TypeSize.h:156
constexpr ScalarTy getKnownScalarFactor(const FixedOrScalableQuantity &RHS) const
Returns a value X where RHS.multiplyCoefficientBy(X) will result in a value whose quantity matches ou...
Definition TypeSize.h:278
static constexpr bool isKnownLT(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
Definition TypeSize.h:217
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
Definition TypeSize.h:169
constexpr LeafTy multiplyCoefficientBy(ScalarTy RHS) const
Definition TypeSize.h:257
constexpr bool isFixed() const
Returns true if the quantity is not scaled by vscale.
Definition TypeSize.h:172
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
Definition TypeSize.h:166
constexpr bool isZero() const
Definition TypeSize.h:154
static constexpr bool isKnownGT(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
Definition TypeSize.h:224
constexpr LeafTy divideCoefficientBy(ScalarTy RHS) const
We do not provide the '/' operator here because division for polynomial types does not work in the sa...
Definition TypeSize.h:253
static constexpr bool isKnownGE(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
Definition TypeSize.h:238
An efficient, type-erasing, non-owning reference to a callable.
const ParentTy * getParent() const
Definition ilist_node.h:34
self_iterator getIterator()
Definition ilist_node.h:123
IteratorT end() const
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition raw_ostream.h:53
A raw_ostream that writes to an std::string.
Changed
This provides a very simple, boring adaptor for a begin and end iterator into a range type.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
@ Entry
Definition COFF.h:862
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
@ Tail
Attemps to make calls as fast as possible while guaranteeing that tail call optimization can always b...
Definition CallingConv.h:76
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
@ BasicBlock
Various leaf nodes.
Definition ISDOpcodes.h:81
std::variant< std::monostate, Loc::Single, Loc::Multi, Loc::MMI, Loc::EntryValue > Variant
Alias for the std::variant specialization base class of DbgVariable.
Definition DwarfDebug.h:189
SpecificConstantMatch m_ZeroInt()
Convenience matchers for specific integer values.
BinaryOp_match< SpecificConstantMatch, SrcTy, TargetOpcode::G_SUB > m_Neg(const SrcTy &&Src)
Matches a register negated by a G_SUB.
OneUse_match< SubPat > m_OneUse(const SubPat &SP)
BinaryOp_match< LHS, RHS, Instruction::Add > m_Add(const LHS &L, const RHS &R)
class_match< BinaryOperator > m_BinOp()
Match an arbitrary binary operation and ignore it.
OneOps_match< OpTy, Instruction::Freeze > m_Freeze(const OpTy &Op)
Matches FreezeInst.
ap_match< APInt > m_APInt(const APInt *&Res)
Match a ConstantInt or splatted ConstantVector, binding the specified pointer to the contained APInt.
specific_intval< false > m_SpecificInt(const APInt &V)
Match a specific integer value or vector with all elements equal to the value.
bool match(Val *V, const Pattern &P)
bind_ty< Instruction > m_Instruction(Instruction *&I)
Match an instruction, capturing it if we match.
specificval_ty m_Specific(const Value *V)
Match if we have a specific specified value.
cst_pred_ty< is_one > m_One()
Match an integer 1 or a vector with all elements equal to 1.
ThreeOps_match< Cond, LHS, RHS, Instruction::Select > m_Select(const Cond &C, const LHS &L, const RHS &R)
Matches SelectInst.
BinaryOp_match< LHS, RHS, Instruction::Mul > m_Mul(const LHS &L, const RHS &R)
auto m_LogicalOr()
Matches L || R where L and R are arbitrary values.
SpecificCmpClass_match< LHS, RHS, ICmpInst > m_SpecificICmp(CmpPredicate MatchPred, const LHS &L, const RHS &R)
class_match< CmpInst > m_Cmp()
Matches any compare instruction and ignore it.
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
match_combine_or< CastInst_match< OpTy, ZExtInst >, CastInst_match< OpTy, SExtInst > > m_ZExtOrSExt(const OpTy &Op)
auto m_LogicalAnd()
Matches L && R where L and R are arbitrary values.
MatchFunctor< Val, Pattern > match_fn(const Pattern &P)
A match functor that can be used as a UnaryPredicate in functional algorithms like all_of.
class_match< const SCEVVScale > m_SCEVVScale()
bind_cst_ty m_scev_APInt(const APInt *&C)
Match an SCEV constant and bind it to an APInt.
specificloop_ty m_SpecificLoop(const Loop *L)
cst_pred_ty< is_specific_signed_cst > m_scev_SpecificSInt(int64_t V)
Match an SCEV constant with a plain signed integer (sign-extended value will be matched)
SCEVAffineAddRec_match< Op0_t, Op1_t, class_match< const Loop > > m_scev_AffineAddRec(const Op0_t &Op0, const Op1_t &Op1)
bind_ty< const SCEVMulExpr > m_scev_Mul(const SCEVMulExpr *&V)
bool match(const SCEV *S, const Pattern &P)
class_match< const SCEV > m_SCEV()
match_combine_or< AllRecipe_match< Instruction::ZExt, Op0_t >, AllRecipe_match< Instruction::SExt, Op0_t > > m_ZExtOrSExt(const Op0_t &Op0)
VPInstruction_match< VPInstruction::ExtractLastElement, Op0_t > m_ExtractLastElement(const Op0_t &Op0)
class_match< VPValue > m_VPValue()
Match an arbitrary VPValue and ignore it.
ValuesClass values(OptsTy... Options)
Helper to build a ValuesClass by forwarding a variable number of arguments as an initializer list to ...
initializer< Ty > init(const Ty &Val)
Add a small namespace to avoid name clashes with the classes used in the streaming interface.
DiagnosticInfoOptimizationBase::Argument NV
NodeAddr< InstrNode * > Instr
Definition RDFGraph.h:389
NodeAddr< PhiNode * > Phi
Definition RDFGraph.h:390
friend class Instruction
Iterator for Instructions in a `BasicBlock.
Definition BasicBlock.h:73
bool isSingleScalar(const VPValue *VPV)
Returns true if VPV is a single scalar, either because it produces the same value for all lanes or on...
Definition VPlanUtils.h:44
VPValue * getOrCreateVPValueForSCEVExpr(VPlan &Plan, const SCEV *Expr)
Get or create a VPValue that corresponds to the expansion of Expr.
VPBasicBlock * getFirstLoopHeader(VPlan &Plan, VPDominatorTree &VPDT)
Returns the header block of the first, top-level loop, or null if none exist.
const SCEV * getSCEVExprForVPValue(VPValue *V, ScalarEvolution &SE)
Return the SCEV expression for V.
unsigned getVFScaleFactor(VPRecipeBase *R)
Get the VF scaling factor applied to the recipe's output, if the recipe has one.
This is an optimization pass for GlobalISel generic memory operations.
LLVM_ABI bool simplifyLoop(Loop *L, DominatorTree *DT, LoopInfo *LI, ScalarEvolution *SE, AssumptionCache *AC, MemorySSAUpdater *MSSAU, bool PreserveLCSSA)
Simplify each loop in a loop nest recursively.
LLVM_ABI void ReplaceInstWithInst(BasicBlock *BB, BasicBlock::iterator &BI, Instruction *I)
Replace the instruction specified by BI with the instruction specified by I.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Definition STLExtras.h:316
@ Offset
Definition DWP.cpp:477
detail::zippy< detail::zip_shortest, T, U, Args... > zip(T &&t, U &&u, Args &&...args)
zip iterator for two or more iteratable types.
Definition STLExtras.h:829
FunctionAddr VTableAddr Value
Definition InstrProf.h:137
LLVM_ABI Value * addRuntimeChecks(Instruction *Loc, Loop *TheLoop, const SmallVectorImpl< RuntimePointerCheck > &PointerChecks, SCEVExpander &Expander, bool HoistRuntimeChecks=false)
Add code that checks at runtime if the accessed arrays in PointerChecks overlap.
auto cast_if_present(const Y &Val)
cast_if_present<X> - Functionally identical to cast, except that a null value is accepted.
Definition Casting.h:683
LLVM_ABI bool RemoveRedundantDbgInstrs(BasicBlock *BB)
Try to remove redundant dbg.value instructions from given basic block.
cl::opt< bool > VerifyEachVPlan
LLVM_ABI std::optional< unsigned > getLoopEstimatedTripCount(Loop *L, unsigned *EstimatedLoopInvocationWeight=nullptr)
Return either:
static void reportVectorization(OptimizationRemarkEmitter *ORE, Loop *TheLoop, VectorizationFactor VF, unsigned IC)
Report successful vectorization of the loop.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1725
unsigned getLoadStoreAddressSpace(const Value *I)
A helper function that returns the address space of the pointer operand of load or store instruction.
LLVM_ABI Intrinsic::ID getMinMaxReductionIntrinsicOp(Intrinsic::ID RdxID)
Returns the min/max intrinsic used when expanding a min/max reduction.
auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)
Get the size of a range.
Definition STLExtras.h:1655
LLVM_ABI_FOR_TEST bool verifyVPlanIsValid(const VPlan &Plan, bool VerifyLate=false)
Verify invariants for general VPlans.
LLVM_ABI Intrinsic::ID getVectorIntrinsicIDForCall(const CallInst *CI, const TargetLibraryInfo *TLI)
Returns intrinsic ID for call.
InstructionCost Cost
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
Definition STLExtras.h:2472
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
LLVM_ABI bool verifyFunction(const Function &F, raw_ostream *OS=nullptr)
Check a function for errors, useful for use when debugging a pass.
const Value * getLoadStorePointerOperand(const Value *V)
A helper function that returns the pointer operand of a load or store instruction.
OuterAnalysisManagerProxy< ModuleAnalysisManager, Function > ModuleAnalysisManagerFunctionProxy
Provide the ModuleAnalysisManager to Function proxy.
Value * getRuntimeVF(IRBuilderBase &B, Type *Ty, ElementCount VF)
Return the runtime value for VF.
LLVM_ABI bool formLCSSARecursively(Loop &L, const DominatorTree &DT, const LoopInfo *LI, ScalarEvolution *SE)
Put a loop nest into LCSSA form.
Definition LCSSA.cpp:449
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
Definition STLExtras.h:2136
LLVM_ABI bool shouldOptimizeForSize(const MachineFunction *MF, ProfileSummaryInfo *PSI, const MachineBlockFrequencyInfo *BFI, PGSOQueryType QueryType=PGSOQueryType::Other)
Returns true if machine function MF is suggested to be size-optimized based on the profile.
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
Definition STLExtras.h:632
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
Definition MathExtras.h:284
Align getLoadStoreAlignment(const Value *I)
A helper function that returns the alignment of load or store instruction.
iterator_range< df_iterator< VPBlockShallowTraversalWrapper< VPBlockBase * > > > vp_depth_first_shallow(VPBlockBase *G)
Returns an iterator range to traverse the graph starting at G in depth-first order.
Definition VPlanCFG.h:216
LLVM_ABI bool VerifySCEV
LLVM_ABI bool isSafeToSpeculativelyExecute(const Instruction *I, const Instruction *CtxI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr, const TargetLibraryInfo *TLI=nullptr, bool UseVariableInfo=true, bool IgnoreUBImplyingAttrs=true)
Return true if the instruction does not have any effects besides calculating the result and does not ...
bool isa_and_nonnull(const Y &Val)
Definition Casting.h:676
iterator_range< df_iterator< VPBlockDeepTraversalWrapper< VPBlockBase * > > > vp_depth_first_deep(VPBlockBase *G)
Returns an iterator range to traverse the graph starting at G in depth-first order while traversing t...
Definition VPlanCFG.h:243
SmallVector< VPRegisterUsage, 8 > calculateRegisterUsageForPlan(VPlan &Plan, ArrayRef< ElementCount > VFs, const TargetTransformInfo &TTI, const SmallPtrSetImpl< const Value * > &ValuesToIgnore)
Estimate the register usage for Plan and vectorization factors in VFs by calculating the highest numb...
unsigned Log2_64(uint64_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
Definition MathExtras.h:337
LLVM_ABI void setBranchWeights(Instruction &I, ArrayRef< uint32_t > Weights, bool IsExpected, bool ElideAllZero=false)
Create a new branch_weights metadata node and add or overwrite a prof metadata reference to instructi...
auto dyn_cast_or_null(const Y &Val)
Definition Casting.h:753
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1732
void collectEphemeralRecipesForVPlan(VPlan &Plan, DenseSet< VPRecipeBase * > &EphRecipes)
auto reverse(ContainerTy &&C)
Definition STLExtras.h:406
bool containsIrreducibleCFG(RPOTraversalT &RPOTraversal, const LoopInfoT &LI)
Return true if the control flow in RPOTraversal is irreducible.
Definition CFG.h:149
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
Definition MathExtras.h:279
void sort(IteratorTy Start, IteratorTy End)
Definition STLExtras.h:1622
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:207
bool none_of(R &&Range, UnaryPredicate P)
Provide wrappers to std::none_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1739
LLVM_ABI cl::opt< bool > EnableLoopVectorization
LLVM_ABI bool wouldInstructionBeTriviallyDead(const Instruction *I, const TargetLibraryInfo *TLI=nullptr)
Return true if the result produced by the instruction would have no side effects if it was not used.
Definition Local.cpp:421
FunctionAddr VTableAddr Count
Definition InstrProf.h:139
SmallVector< ValueTypeFromRangeType< R >, Size > to_vector(R &&Range)
Given a range of type R, iterate the entire range and return a SmallVector with elements of the vecto...
Type * toVectorizedTy(Type *Ty, ElementCount EC)
A helper for converting to vectorized types.
LLVM_ABI void llvm_unreachable_internal(const char *msg=nullptr, const char *file=nullptr, unsigned line=0)
This function calls abort(), and prints the optional message to stderr.
bool canConstantBeExtended(const APInt *C, Type *NarrowType, TTI::PartialReductionExtendKind ExtKind)
Check if a constant CI can be safely treated as having been extended from a narrower type with the gi...
Definition VPlan.cpp:1757
T * find_singleton(R &&Range, Predicate P, bool AllowRepeats=false)
Return the single value in Range that satisfies P(<member of Range> *, AllowRepeats)->T * returning n...
Definition STLExtras.h:1787
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
cl::opt< unsigned > ForceTargetInstructionCost
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
format_object< Ts... > format(const char *Fmt, const Ts &... Vals)
These are helper functions used to produce formatted output.
Definition Format.h:129
constexpr T divideCeil(U Numerator, V Denominator)
Returns the integer ceil(Numerator / Denominator).
Definition MathExtras.h:394
bool canVectorizeTy(Type *Ty)
Returns true if Ty is a valid vector element type, void, or an unpacked literal struct where all elem...
TargetTransformInfo TTI
static void reportVectorizationInfo(const StringRef Msg, const StringRef ORETag, OptimizationRemarkEmitter *ORE, Loop *TheLoop, Instruction *I=nullptr, DebugLoc DL={})
Reports an informative message: print Msg for debugging purposes as well as an optimization remark.
LLVM_ABI bool isAssignmentTrackingEnabled(const Module &M)
Return true if assignment tracking is enabled for module M.
RecurKind
These are the kinds of recurrences that we support.
@ Or
Bitwise or logical OR of integers.
@ FMulAdd
Sum of float products with llvm.fmuladd(a * b + sum).
@ Sub
Subtraction of integers.
@ Add
Sum of integers.
LLVM_ABI Value * getRecurrenceIdentity(RecurKind K, Type *Tp, FastMathFlags FMF)
Given information about an recurrence kind, return the identity for the @llvm.vector....
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
Definition Alignment.h:144
LLVM_ABI void reportVectorizationFailure(const StringRef DebugMsg, const StringRef OREMsg, const StringRef ORETag, OptimizationRemarkEmitter *ORE, Loop *TheLoop, Instruction *I=nullptr)
Reports a vectorization failure: print DebugMsg for debugging purposes along with the corresponding o...
DWARFExpression::Operation Op
ScalarEpilogueLowering
@ CM_ScalarEpilogueNotAllowedLowTripLoop
@ CM_ScalarEpilogueNotNeededUsePredicate
@ CM_ScalarEpilogueNotAllowedOptSize
@ CM_ScalarEpilogueAllowed
@ CM_ScalarEpilogueNotAllowedUsePredicate
LLVM_ABI bool isGuaranteedNotToBeUndefOrPoison(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Return true if this function can prove that V does not have undef bits and is never poison.
ArrayRef(const T &OneElt) -> ArrayRef< T >
Value * createStepForVF(IRBuilderBase &B, Type *Ty, ElementCount VF, int64_t Step)
Return a value for Step multiplied by VF.
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
LLVM_ABI BasicBlock * SplitBlock(BasicBlock *Old, BasicBlock::iterator SplitPt, DominatorTree *DT, LoopInfo *LI=nullptr, MemorySSAUpdater *MSSAU=nullptr, const Twine &BBName="", bool Before=false)
Split the specified block at the specified instruction.
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1758
auto predecessors(const MachineBasicBlock *BB)
iterator_range< pointer_iterator< WrappedIteratorT > > make_pointer_range(RangeT &&Range)
Definition iterator.h:363
cl::opt< bool > EnableVPlanNativePath
Definition VPlan.cpp:56
Type * getLoadStoreType(const Value *I)
A helper function that returns the type of a load or store instruction.
ArrayRef< Type * > getContainedTypes(Type *const &Ty)
Returns the types contained in Ty.
LLVM_ABI Value * addDiffRuntimeChecks(Instruction *Loc, ArrayRef< PointerDiffInfo > Checks, SCEVExpander &Expander, function_ref< Value *(IRBuilderBase &, unsigned)> GetVF, unsigned IC)
bool pred_empty(const BasicBlock *BB)
Definition CFG.h:119
@ DataAndControlFlowWithoutRuntimeCheck
Use predicate to control both data and control flow, but modify the trip count so that a runtime over...
@ None
Don't use tail folding.
@ DataWithEVL
Use predicated EVL instructions for tail-folding.
@ DataAndControlFlow
Use predicate to control both data and control flow.
@ DataWithoutLaneMask
Same as Data, but avoids using the get.active.lane.mask intrinsic to calculate the mask and instead i...
@ Data
Use predicate only to mask operations on data in the loop.
unsigned getPredBlockCostDivisor(TargetTransformInfo::TargetCostKind CostKind)
A helper function that returns how much we should divide the cost of a predicated block by.
AnalysisManager< Function > FunctionAnalysisManager
Convenience typedef for the Function analysis manager.
LLVM_ABI bool hasBranchWeightMD(const Instruction &I)
Checks if an instructions has Branch Weight Metadata.
hash_code hash_combine(const Ts &...args)
Combine values into a single hash_code.
Definition Hashing.h:592
T bit_floor(T Value)
Returns the largest integral power of two no greater than Value if Value is nonzero.
Definition bit.h:330
Type * toVectorTy(Type *Scalar, ElementCount EC)
A helper function for converting Scalar types to vector types.
std::unique_ptr< VPlan > VPlanPtr
Definition VPlan.h:76
constexpr detail::IsaCheckPredicate< Types... > IsaPred
Function object wrapper for the llvm::isa type check.
Definition Casting.h:830
LLVM_ABI MapVector< Instruction *, uint64_t > computeMinimumValueSizes(ArrayRef< BasicBlock * > Blocks, DemandedBits &DB, const TargetTransformInfo *TTI=nullptr)
Compute a map of integer instructions to their minimum legal type size.
hash_code hash_combine_range(InputIteratorT first, InputIteratorT last)
Compute a hash_code for a sequence of values.
Definition Hashing.h:466
LLVM_ABI cl::opt< bool > EnableLoopInterleaving
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition BitVector.h:869
#define N
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition Alignment.h:39
A special type used by analysis passes to provide an address that identifies that particular analysis...
Definition Analysis.h:29
static LLVM_ABI void collectEphemeralValues(const Loop *L, AssumptionCache *AC, SmallPtrSetImpl< const Value * > &EphValues)
Collect a loop's ephemeral values (those used only by an assume or similar intrinsics in the loop).
An information struct used to provide DenseMap with the various necessary components for a given valu...
Encapsulate information regarding vectorization of a loop and its epilogue.
EpilogueLoopVectorizationInfo(ElementCount MVF, unsigned MUF, ElementCount EVF, unsigned EUF, VPlan &EpiloguePlan)
A class that represents two vectorization factors (initialized with 0 by default).
static FixedScalableVFPair getNone()
This holds details about a histogram operation – a load -> update -> store sequence where each lane i...
Incoming for lane maks phi as machine instruction, incoming register Reg and incoming block Block are...
TargetLibraryInfo * TLI
LLVM_ABI LoopVectorizeResult runImpl(Function &F)
LLVM_ABI bool processLoop(Loop *L)
ProfileSummaryInfo * PSI
LoopAccessInfoManager * LAIs
LLVM_ABI void printPipeline(raw_ostream &OS, function_ref< StringRef(StringRef)> MapClassName2PassName)
LLVM_ABI LoopVectorizePass(LoopVectorizeOptions Opts={})
BlockFrequencyInfo * BFI
ScalarEvolution * SE
AssumptionCache * AC
LLVM_ABI PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM)
OptimizationRemarkEmitter * ORE
TargetTransformInfo * TTI
Storage for information about made changes.
A chain of instructions that form a partial reduction.
Instruction * Reduction
The top-level binary operation that forms the reduction to a scalar after the loop body.
Instruction * ExtendA
The extension of each of the inner binary operation's operands.
A CRTP mix-in to automatically provide informational APIs needed for passes.
Definition PassManager.h:70
A marker analysis to determine if extra passes should be run after loop vectorization.
static LLVM_ABI AnalysisKey Key
Holds the VFShape for a specific scalar to vector function mapping.
std::optional< unsigned > getParamIndexForOptionalMask() const
Instruction Set Architecture.
Encapsulates information needed to describe a parameter.
A range of powers-of-2 vectorization factors with fixed start and adjustable end.
ElementCount End
Struct to hold various analysis needed for cost computations.
LoopVectorizationCostModel & CM
bool isLegacyUniformAfterVectorization(Instruction *I, ElementCount VF) const
Return true if I is considered uniform-after-vectorization in the legacy cost model for VF.
bool skipCostComputation(Instruction *UI, bool IsVector) const
Return true if the cost for UI shouldn't be computed, e.g.
InstructionCost getLegacyCost(Instruction *UI, ElementCount VF) const
Return the cost for UI with VF using the legacy cost model as fallback until computing the cost of al...
SmallPtrSet< Instruction *, 8 > SkipCostComputation
A recipe for handling first-order recurrence phis.
Definition VPlan.h:2299
A struct that represents some properties of the register usage of a loop.
VPTransformState holds information passed down when "executing" a VPlan, needed for generating the ou...
A recipe for widening select instructions.
Definition VPlan.h:1727
static void materializeBroadcasts(VPlan &Plan)
Add explicit broadcasts for live-ins and VPValues defined in Plan's entry block if they are used as v...
static void materializePacksAndUnpacks(VPlan &Plan)
Add explicit Build[Struct]Vector recipes to Pack multiple scalar values into vectors and Unpack recip...
static void materializeBackedgeTakenCount(VPlan &Plan, VPBasicBlock *VectorPH)
Materialize the backedge-taken count to be computed explicitly using VPInstructions.
static LLVM_ABI_FOR_TEST std::unique_ptr< VPlan > buildVPlan0(Loop *TheLoop, LoopInfo &LI, Type *InductionTy, DebugLoc IVDL, PredicatedScalarEvolution &PSE)
Create a base VPlan0, serving as the common starting point for all later candidates.
static void optimizeInductionExitUsers(VPlan &Plan, DenseMap< VPValue *, VPValue * > &EndValues, ScalarEvolution &SE)
If there's a single exit block, optimize its phi recipes that use exiting IV values by feeding them p...
static LLVM_ABI_FOR_TEST void handleEarlyExits(VPlan &Plan, bool HasUncountableExit)
Update Plan to account for all early exits.
static void addScalarResumePhis(VPlan &Plan, VPRecipeBuilder &Builder, DenseMap< VPValue *, VPValue * > &IVEndValues)
Create resume phis in the scalar preheader for first-order recurrences, reductions and inductions,...
static void canonicalizeEVLLoops(VPlan &Plan)
Transform EVL loops to use variable-length stepping after region dissolution.
static void dropPoisonGeneratingRecipes(VPlan &Plan, const std::function< bool(BasicBlock *)> &BlockNeedsPredication)
Drop poison flags from recipes that may generate a poison value that is used after vectorization,...
static void createInterleaveGroups(VPlan &Plan, const SmallPtrSetImpl< const InterleaveGroup< Instruction > * > &InterleaveGroups, VPRecipeBuilder &RecipeBuilder, const bool &ScalarEpilogueAllowed)
static bool runPass(bool(*Transform)(VPlan &, ArgsTy...), VPlan &Plan, typename std::remove_reference< ArgsTy >::type &...Args)
Helper to run a VPlan transform Transform on VPlan, forwarding extra arguments to the transform.
static void addBranchWeightToMiddleTerminator(VPlan &Plan, ElementCount VF, std::optional< unsigned > VScaleForTuning)
Add branch weight metadata, if the Plan's middle block is terminated by a BranchOnCond recipe.
static std::unique_ptr< VPlan > narrowInterleaveGroups(VPlan &Plan, const TargetTransformInfo &TTI)
Try to find a single VF among Plan's VFs for which all interleave groups (with known minimum VF eleme...
static void unrollByUF(VPlan &Plan, unsigned UF)
Explicitly unroll Plan by UF.
static DenseMap< const SCEV *, Value * > expandSCEVs(VPlan &Plan, ScalarEvolution &SE)
Expand VPExpandSCEVRecipes in Plan's entry block.
static void convertToConcreteRecipes(VPlan &Plan)
Lower abstract recipes to concrete ones, that can be codegen'd.
static void addMinimumIterationCheck(VPlan &Plan, ElementCount VF, unsigned UF, ElementCount MinProfitableTripCount, bool RequiresScalarEpilogue, bool TailFolded, bool CheckNeededWithTailFolding, Loop *OrigLoop, const uint32_t *MinItersBypassWeights, DebugLoc DL, ScalarEvolution &SE)
static void convertToAbstractRecipes(VPlan &Plan, VPCostContext &Ctx, VFRange &Range)
This function converts initial recipes to the abstract recipes and clamps Range based on cost model f...
static void materializeConstantVectorTripCount(VPlan &Plan, ElementCount BestVF, unsigned BestUF, PredicatedScalarEvolution &PSE)
static LLVM_ABI_FOR_TEST bool tryToConvertVPInstructionsToVPRecipes(VPlan &Plan, function_ref< const InductionDescriptor *(PHINode *)> GetIntOrFpInductionDescriptor, const TargetLibraryInfo &TLI)
Replaces the VPInstructions in Plan with corresponding widen recipes.
static void addExitUsersForFirstOrderRecurrences(VPlan &Plan, VFRange &Range)
Handle users in the exit block for first order reductions in the original exit block.
static DenseMap< VPBasicBlock *, VPValue * > introduceMasksAndLinearize(VPlan &Plan, bool FoldTail)
Predicate and linearize the control-flow in the only loop region of Plan.
static void addExplicitVectorLength(VPlan &Plan, const std::optional< unsigned > &MaxEVLSafeElements)
Add a VPEVLBasedIVPHIRecipe and related recipes to Plan and replaces all uses except the canonical IV...
static void replaceSymbolicStrides(VPlan &Plan, PredicatedScalarEvolution &PSE, const DenseMap< Value *, const SCEV * > &StridesMap)
Replace symbolic strides from StridesMap in Plan with constants when possible.
static bool handleMaxMinNumReductions(VPlan &Plan)
Check if Plan contains any FMaxNum or FMinNum reductions.
static void removeBranchOnConst(VPlan &Plan)
Remove BranchOnCond recipes with true or false conditions together with removing dead edges to their ...
static LLVM_ABI_FOR_TEST void createLoopRegions(VPlan &Plan)
Replace loops in Plan's flat CFG with VPRegionBlocks, turning Plan's flat CFG into a hierarchical CFG...
static void removeDeadRecipes(VPlan &Plan)
Remove dead recipes from Plan.
static void attachCheckBlock(VPlan &Plan, Value *Cond, BasicBlock *CheckBlock, bool AddBranchWeights)
Wrap runtime check block CheckBlock in a VPIRBB and Cond in a VPValue and connect the block to Plan,...
static void materializeVectorTripCount(VPlan &Plan, VPBasicBlock *VectorPHVPBB, bool TailByMasking, bool RequiresScalarEpilogue)
Materialize vector trip count computations to a set of VPInstructions.
static void simplifyRecipes(VPlan &Plan)
Perform instcombine-like simplifications on recipes in Plan.
static void replicateByVF(VPlan &Plan, ElementCount VF)
Replace each replicating VPReplicateRecipe and VPInstruction outside of any replicate region in Plan ...
static void clearReductionWrapFlags(VPlan &Plan)
Clear NSW/NUW flags from reduction instructions if necessary.
static void cse(VPlan &Plan)
Perform common-subexpression-elimination on Plan.
static void addActiveLaneMask(VPlan &Plan, bool UseActiveLaneMaskForControlFlow, bool DataAndControlFlowWithoutRuntimeCheck)
Replace (ICMP_ULE, wide canonical IV, backedge-taken-count) checks with an (active-lane-mask recipe,...
static void optimize(VPlan &Plan)
Apply VPlan-to-VPlan optimizations to Plan, including induction recipe optimizations,...
static void dissolveLoopRegions(VPlan &Plan)
Replace loop regions with explicit CFG.
static void truncateToMinimalBitwidths(VPlan &Plan, const MapVector< Instruction *, uint64_t > &MinBWs)
Insert truncates and extends for any truncated recipe.
static bool adjustFixedOrderRecurrences(VPlan &Plan, VPBuilder &Builder)
Try to have all users of fixed-order recurrences appear after the recipe defining their previous valu...
static void optimizeForVFAndUF(VPlan &Plan, ElementCount BestVF, unsigned BestUF, PredicatedScalarEvolution &PSE)
Optimize Plan based on BestVF and BestUF.
static void materializeVFAndVFxUF(VPlan &Plan, VPBasicBlock *VectorPH, ElementCount VF)
Materialize VF and VFxUF to be computed explicitly using VPInstructions.
static void addMinimumVectorEpilogueIterationCheck(VPlan &Plan, Value *TripCount, Value *VectorTripCount, bool RequiresScalarEpilogue, ElementCount EpilogueVF, unsigned EpilogueUF, unsigned MainLoopStep, unsigned EpilogueLoopStep, ScalarEvolution &SE)
Add a check to Plan to see if the epilogue vector loop should be executed.
static LLVM_ABI_FOR_TEST void addMiddleCheck(VPlan &Plan, bool RequiresScalarEpilogueCheck, bool TailFolded)
If a check is needed to guard executing the scalar epilogue loop, it will be added to the middle bloc...
TODO: The following VectorizationFactor was pulled out of LoopVectorizationCostModel class.
InstructionCost Cost
Cost of the loop with that width.
ElementCount MinProfitableTripCount
The minimum trip count required to make vectorization profitable, e.g.
ElementCount Width
Vector width with best cost.
InstructionCost ScalarCost
Cost of the scalar loop.
static VectorizationFactor Disabled()
Width 1 means no vectorization, cost 0 means uncomputed cost.
static LLVM_ABI bool HoistRuntimeChecks