LLVM 23.0.0git
LoopVectorize.cpp
Go to the documentation of this file.
1//===- LoopVectorize.cpp - A Loop Vectorizer ------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This is the LLVM loop vectorizer. This pass modifies 'vectorizable' loops
10// and generates target-independent LLVM-IR.
11// The vectorizer uses the TargetTransformInfo analysis to estimate the costs
12// of instructions in order to estimate the profitability of vectorization.
13//
14// The loop vectorizer combines consecutive loop iterations into a single
15// 'wide' iteration. After this transformation the index is incremented
16// by the SIMD vector width, and not by one.
17//
18// This pass has three parts:
19// 1. The main loop pass that drives the different parts.
20// 2. LoopVectorizationLegality - A unit that checks for the legality
21// of the vectorization.
22// 3. InnerLoopVectorizer - A unit that performs the actual
23// widening of instructions.
24// 4. LoopVectorizationCostModel - A unit that checks for the profitability
25// of vectorization. It decides on the optimal vector width, which
26// can be one, if vectorization is not profitable.
27//
28// There is a development effort going on to migrate loop vectorizer to the
29// VPlan infrastructure and to introduce outer loop vectorization support (see
30// docs/VectorizationPlan.rst and
31// http://lists.llvm.org/pipermail/llvm-dev/2017-December/119523.html). For this
32// purpose, we temporarily introduced the VPlan-native vectorization path: an
33// alternative vectorization path that is natively implemented on top of the
34// VPlan infrastructure. See EnableVPlanNativePath for enabling.
35//
36//===----------------------------------------------------------------------===//
37//
38// The reduction-variable vectorization is based on the paper:
39// D. Nuzman and R. Henderson. Multi-platform Auto-vectorization.
40//
41// Variable uniformity checks are inspired by:
42// Karrenberg, R. and Hack, S. Whole Function Vectorization.
43//
44// The interleaved access vectorization is based on the paper:
45// Dorit Nuzman, Ira Rosen and Ayal Zaks. Auto-Vectorization of Interleaved
46// Data for SIMD
47//
48// Other ideas/concepts are from:
49// A. Zaks and D. Nuzman. Autovectorization in GCC-two years later.
50//
51// S. Maleki, Y. Gao, M. Garzaran, T. Wong and D. Padua. An Evaluation of
52// Vectorizing Compilers.
53//
54//===----------------------------------------------------------------------===//
55
58#include "VPRecipeBuilder.h"
59#include "VPlan.h"
60#include "VPlanAnalysis.h"
61#include "VPlanCFG.h"
62#include "VPlanHelpers.h"
63#include "VPlanPatternMatch.h"
64#include "VPlanTransforms.h"
65#include "VPlanUtils.h"
66#include "VPlanVerifier.h"
67#include "llvm/ADT/APInt.h"
68#include "llvm/ADT/ArrayRef.h"
69#include "llvm/ADT/DenseMap.h"
71#include "llvm/ADT/Hashing.h"
72#include "llvm/ADT/MapVector.h"
73#include "llvm/ADT/STLExtras.h"
76#include "llvm/ADT/Statistic.h"
77#include "llvm/ADT/StringRef.h"
78#include "llvm/ADT/Twine.h"
79#include "llvm/ADT/TypeSwitch.h"
84#include "llvm/Analysis/CFG.h"
101#include "llvm/IR/Attributes.h"
102#include "llvm/IR/BasicBlock.h"
103#include "llvm/IR/CFG.h"
104#include "llvm/IR/Constant.h"
105#include "llvm/IR/Constants.h"
106#include "llvm/IR/DataLayout.h"
107#include "llvm/IR/DebugInfo.h"
108#include "llvm/IR/DebugLoc.h"
109#include "llvm/IR/DerivedTypes.h"
111#include "llvm/IR/Dominators.h"
112#include "llvm/IR/Function.h"
113#include "llvm/IR/IRBuilder.h"
114#include "llvm/IR/InstrTypes.h"
115#include "llvm/IR/Instruction.h"
116#include "llvm/IR/Instructions.h"
118#include "llvm/IR/Intrinsics.h"
119#include "llvm/IR/MDBuilder.h"
120#include "llvm/IR/Metadata.h"
121#include "llvm/IR/Module.h"
122#include "llvm/IR/Operator.h"
123#include "llvm/IR/PatternMatch.h"
125#include "llvm/IR/Type.h"
126#include "llvm/IR/Use.h"
127#include "llvm/IR/User.h"
128#include "llvm/IR/Value.h"
129#include "llvm/IR/Verifier.h"
130#include "llvm/Support/Casting.h"
132#include "llvm/Support/Debug.h"
147#include <algorithm>
148#include <cassert>
149#include <cmath>
150#include <cstdint>
151#include <functional>
152#include <iterator>
153#include <limits>
154#include <memory>
155#include <string>
156#include <tuple>
157#include <utility>
158
159using namespace llvm;
160using namespace SCEVPatternMatch;
161
162#define LV_NAME "loop-vectorize"
163#define DEBUG_TYPE LV_NAME
164
165#ifndef NDEBUG
166const char VerboseDebug[] = DEBUG_TYPE "-verbose";
167#endif
168
169STATISTIC(LoopsVectorized, "Number of loops vectorized");
170STATISTIC(LoopsAnalyzed, "Number of loops analyzed for vectorization");
171STATISTIC(LoopsEpilogueVectorized, "Number of epilogues vectorized");
172STATISTIC(LoopsEarlyExitVectorized, "Number of early exit loops vectorized");
173
175 "enable-epilogue-vectorization", cl::init(true), cl::Hidden,
176 cl::desc("Enable vectorization of epilogue loops."));
177
179 "epilogue-vectorization-force-VF", cl::init(1), cl::Hidden,
180 cl::desc("When epilogue vectorization is enabled, and a value greater than "
181 "1 is specified, forces the given VF for all applicable epilogue "
182 "loops."));
183
185 "epilogue-vectorization-minimum-VF", cl::Hidden,
186 cl::desc("Only loops with vectorization factor equal to or larger than "
187 "the specified value are considered for epilogue vectorization."));
188
189/// Loops with a known constant trip count below this number are vectorized only
190/// if no scalar iteration overheads are incurred.
192 "vectorizer-min-trip-count", cl::init(16), cl::Hidden,
193 cl::desc("Loops with a constant trip count that is smaller than this "
194 "value are vectorized only if no scalar iteration overheads "
195 "are incurred."));
196
198 "vectorize-memory-check-threshold", cl::init(128), cl::Hidden,
199 cl::desc("The maximum allowed number of runtime memory checks"));
200
201/// Note: This currently only applies to `llvm.masked.load` and
202/// `llvm.masked.store`. TODO: Extend this to cover other operations as needed.
204 "force-target-supports-masked-memory-ops", cl::init(false), cl::Hidden,
205 cl::desc("Assume the target supports masked memory operations (used for "
206 "testing)."));
207
208// Option prefer-predicate-over-epilogue indicates that an epilogue is undesired,
209// that predication is preferred, and this lists all options. I.e., the
210// vectorizer will try to fold the tail-loop (epilogue) into the vector body
211// and predicate the instructions accordingly. If tail-folding fails, there are
212// different fallback strategies depending on these values:
219} // namespace PreferPredicateTy
220
222 "prefer-predicate-over-epilogue",
225 cl::desc("Tail-folding and predication preferences over creating a scalar "
226 "epilogue loop."),
228 "scalar-epilogue",
229 "Don't tail-predicate loops, create scalar epilogue"),
231 "predicate-else-scalar-epilogue",
232 "prefer tail-folding, create scalar epilogue if tail "
233 "folding fails."),
235 "predicate-dont-vectorize",
236 "prefers tail-folding, don't attempt vectorization if "
237 "tail-folding fails.")));
238
240 "force-tail-folding-style", cl::desc("Force the tail folding style"),
243 clEnumValN(TailFoldingStyle::None, "none", "Disable tail folding"),
246 "Create lane mask for data only, using active.lane.mask intrinsic"),
248 "data-without-lane-mask",
249 "Create lane mask with compare/stepvector"),
251 "Create lane mask using active.lane.mask intrinsic, and use "
252 "it for both data and control flow"),
254 "Use predicated EVL instructions for tail folding. If EVL "
255 "is unsupported, fallback to data-without-lane-mask.")));
256
258 "enable-wide-lane-mask", cl::init(false), cl::Hidden,
259 cl::desc("Enable use of wide lane masks when used for control flow in "
260 "tail-folded loops"));
261
263 "vectorizer-maximize-bandwidth", cl::init(false), cl::Hidden,
264 cl::desc("Maximize bandwidth when selecting vectorization factor which "
265 "will be determined by the smallest type in loop."));
266
268 "enable-interleaved-mem-accesses", cl::init(false), cl::Hidden,
269 cl::desc("Enable vectorization on interleaved memory accesses in a loop"));
270
271/// An interleave-group may need masking if it resides in a block that needs
272/// predication, or in order to mask away gaps.
274 "enable-masked-interleaved-mem-accesses", cl::init(false), cl::Hidden,
275 cl::desc("Enable vectorization on masked interleaved memory accesses in a loop"));
276
278 "force-target-num-scalar-regs", cl::init(0), cl::Hidden,
279 cl::desc("A flag that overrides the target's number of scalar registers."));
280
282 "force-target-num-vector-regs", cl::init(0), cl::Hidden,
283 cl::desc("A flag that overrides the target's number of vector registers."));
284
286 "force-target-max-scalar-interleave", cl::init(0), cl::Hidden,
287 cl::desc("A flag that overrides the target's max interleave factor for "
288 "scalar loops."));
289
291 "force-target-max-vector-interleave", cl::init(0), cl::Hidden,
292 cl::desc("A flag that overrides the target's max interleave factor for "
293 "vectorized loops."));
294
296 "force-target-instruction-cost", cl::init(0), cl::Hidden,
297 cl::desc("A flag that overrides the target's expected cost for "
298 "an instruction to a single constant value. Mostly "
299 "useful for getting consistent testing."));
300
302 "force-target-supports-scalable-vectors", cl::init(false), cl::Hidden,
303 cl::desc(
304 "Pretend that scalable vectors are supported, even if the target does "
305 "not support them. This flag should only be used for testing."));
306
308 "small-loop-cost", cl::init(20), cl::Hidden,
309 cl::desc(
310 "The cost of a loop that is considered 'small' by the interleaver."));
311
313 "loop-vectorize-with-block-frequency", cl::init(true), cl::Hidden,
314 cl::desc("Enable the use of the block frequency analysis to access PGO "
315 "heuristics minimizing code growth in cold regions and being more "
316 "aggressive in hot regions."));
317
318// Runtime interleave loops for load/store throughput.
320 "enable-loadstore-runtime-interleave", cl::init(true), cl::Hidden,
321 cl::desc(
322 "Enable runtime interleaving until load/store ports are saturated"));
323
324/// The number of stores in a loop that are allowed to need predication.
326 "vectorize-num-stores-pred", cl::init(1), cl::Hidden,
327 cl::desc("Max number of stores to be predicated behind an if."));
328
330 "enable-ind-var-reg-heur", cl::init(true), cl::Hidden,
331 cl::desc("Count the induction variable only once when interleaving"));
332
334 "enable-cond-stores-vec", cl::init(true), cl::Hidden,
335 cl::desc("Enable if predication of stores during vectorization."));
336
338 "max-nested-scalar-reduction-interleave", cl::init(2), cl::Hidden,
339 cl::desc("The maximum interleave count to use when interleaving a scalar "
340 "reduction in a nested loop."));
341
342static cl::opt<bool>
343 PreferInLoopReductions("prefer-inloop-reductions", cl::init(false),
345 cl::desc("Prefer in-loop vector reductions, "
346 "overriding the targets preference."));
347
349 "force-ordered-reductions", cl::init(false), cl::Hidden,
350 cl::desc("Enable the vectorisation of loops with in-order (strict) "
351 "FP reductions"));
352
354 "prefer-predicated-reduction-select", cl::init(false), cl::Hidden,
355 cl::desc(
356 "Prefer predicating a reduction operation over an after loop select."));
357
359 "enable-vplan-native-path", cl::Hidden,
360 cl::desc("Enable VPlan-native vectorization path with "
361 "support for outer loop vectorization."));
362
364 llvm::VerifyEachVPlan("vplan-verify-each",
365#ifdef EXPENSIVE_CHECKS
366 cl::init(true),
367#else
368 cl::init(false),
369#endif
371 cl::desc("Verify VPlans after VPlan transforms."));
372
373#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
375 "vplan-print-after-all", cl::init(false), cl::Hidden,
376 cl::desc("Print VPlans after all VPlan transformations."));
377
379 "vplan-print-after", cl::Hidden,
380 cl::desc("Print VPlans after specified VPlan transformations (regexp)."));
381
383 "vplan-print-vector-region-scope", cl::init(false), cl::Hidden,
384 cl::desc("Limit VPlan printing to vector loop region in "
385 "`-vplan-print-after*` if the plan has one."));
386#endif
387
388// This flag enables the stress testing of the VPlan H-CFG construction in the
389// VPlan-native vectorization path. It must be used in conjuction with
390// -enable-vplan-native-path. -vplan-verify-hcfg can also be used to enable the
391// verification of the H-CFGs built.
393 "vplan-build-stress-test", cl::init(false), cl::Hidden,
394 cl::desc(
395 "Build VPlan for every supported loop nest in the function and bail "
396 "out right after the build (stress test the VPlan H-CFG construction "
397 "in the VPlan-native vectorization path)."));
398
400 "interleave-loops", cl::init(true), cl::Hidden,
401 cl::desc("Enable loop interleaving in Loop vectorization passes"));
403 "vectorize-loops", cl::init(true), cl::Hidden,
404 cl::desc("Run the Loop vectorization passes"));
405
407 "force-widen-divrem-via-safe-divisor", cl::Hidden,
408 cl::desc(
409 "Override cost based safe divisor widening for div/rem instructions"));
410
412 "vectorizer-maximize-bandwidth-for-vector-calls", cl::init(true),
414 cl::desc("Try wider VFs if they enable the use of vector variants"));
415
417 "enable-early-exit-vectorization", cl::init(true), cl::Hidden,
418 cl::desc(
419 "Enable vectorization of early exit loops with uncountable exits."));
420
422 "vectorizer-consider-reg-pressure", cl::init(false), cl::Hidden,
423 cl::desc("Discard VFs if their register pressure is too high."));
424
425// Likelyhood of bypassing the vectorized loop because there are zero trips left
426// after prolog. See `emitIterationCountCheck`.
427static constexpr uint32_t MinItersBypassWeights[] = {1, 127};
428
429/// A helper function that returns true if the given type is irregular. The
430/// type is irregular if its allocated size doesn't equal the store size of an
431/// element of the corresponding vector type.
432static bool hasIrregularType(Type *Ty, const DataLayout &DL) {
433 // Determine if an array of N elements of type Ty is "bitcast compatible"
434 // with a <N x Ty> vector.
435 // This is only true if there is no padding between the array elements.
436 return DL.getTypeAllocSizeInBits(Ty) != DL.getTypeSizeInBits(Ty);
437}
438
439/// A version of ScalarEvolution::getSmallConstantTripCount that returns an
440/// ElementCount to include loops whose trip count is a function of vscale.
442 const Loop *L) {
443 if (unsigned ExpectedTC = SE->getSmallConstantTripCount(L))
444 return ElementCount::getFixed(ExpectedTC);
445
446 const SCEV *BTC = SE->getBackedgeTakenCount(L);
448 return ElementCount::getFixed(0);
449
450 const SCEV *ExitCount = SE->getTripCountFromExitCount(BTC, BTC->getType(), L);
451 if (isa<SCEVVScale>(ExitCount))
453
454 const APInt *Scale;
455 if (match(ExitCount, m_scev_Mul(m_scev_APInt(Scale), m_SCEVVScale())))
456 if (cast<SCEVMulExpr>(ExitCount)->hasNoUnsignedWrap())
457 if (Scale->getActiveBits() <= 32)
459
460 return ElementCount::getFixed(0);
461}
462
463/// Returns "best known" trip count, which is either a valid positive trip count
464/// or std::nullopt when an estimate cannot be made (including when the trip
465/// count would overflow), for the specified loop \p L as defined by the
466/// following procedure:
467/// 1) Returns exact trip count if it is known.
468/// 2) Returns expected trip count according to profile data if any.
469/// 3) Returns upper bound estimate if known, and if \p CanUseConstantMax.
470/// 4) Returns std::nullopt if all of the above failed.
471static std::optional<ElementCount>
473 bool CanUseConstantMax = true) {
474 // Check if exact trip count is known.
475 if (auto ExpectedTC = getSmallConstantTripCount(PSE.getSE(), L))
476 return ExpectedTC;
477
478 // Check if there is an expected trip count available from profile data.
480 if (auto EstimatedTC = getLoopEstimatedTripCount(L))
481 return ElementCount::getFixed(*EstimatedTC);
482
483 if (!CanUseConstantMax)
484 return std::nullopt;
485
486 // Check if upper bound estimate is known.
487 if (unsigned ExpectedTC = PSE.getSmallConstantMaxTripCount())
488 return ElementCount::getFixed(ExpectedTC);
489
490 return std::nullopt;
491}
492
493namespace {
494// Forward declare GeneratedRTChecks.
495class GeneratedRTChecks;
496
497using SCEV2ValueTy = DenseMap<const SCEV *, Value *>;
498} // namespace
499
500namespace llvm {
501
503
504/// InnerLoopVectorizer vectorizes loops which contain only one basic
505/// block to a specified vectorization factor (VF).
506/// This class performs the widening of scalars into vectors, or multiple
507/// scalars. This class also implements the following features:
508/// * It inserts an epilogue loop for handling loops that don't have iteration
509/// counts that are known to be a multiple of the vectorization factor.
510/// * It handles the code generation for reduction variables.
511/// * Scalarization (implementation using scalars) of un-vectorizable
512/// instructions.
513/// InnerLoopVectorizer does not perform any vectorization-legality
514/// checks, and relies on the caller to check for the different legality
515/// aspects. The InnerLoopVectorizer relies on the
516/// LoopVectorizationLegality class to provide information about the induction
517/// and reduction variables that were found to a given vectorization factor.
519public:
523 ElementCount VecWidth, unsigned UnrollFactor,
525 GeneratedRTChecks &RTChecks, VPlan &Plan)
526 : OrigLoop(OrigLoop), PSE(PSE), LI(LI), DT(DT), TTI(TTI), AC(AC),
527 VF(VecWidth), UF(UnrollFactor), Builder(PSE.getSE()->getContext()),
530 Plan.getVectorLoopRegion()->getSinglePredecessor())) {}
531
532 virtual ~InnerLoopVectorizer() = default;
533
534 /// Creates a basic block for the scalar preheader. Both
535 /// EpilogueVectorizerMainLoop and EpilogueVectorizerEpilogueLoop overwrite
536 /// the method to create additional blocks and checks needed for epilogue
537 /// vectorization.
539
540 /// Fix the vectorized code, taking care of header phi's, and more.
542
543 /// Fix the non-induction PHIs in \p Plan.
545
546 /// Returns the original loop trip count.
547 Value *getTripCount() const { return TripCount; }
548
549 /// Used to set the trip count after ILV's construction and after the
550 /// preheader block has been executed. Note that this always holds the trip
551 /// count of the original loop for both main loop and epilogue vectorization.
552 void setTripCount(Value *TC) { TripCount = TC; }
553
554protected:
556
557 /// Create and return a new IR basic block for the scalar preheader whose name
558 /// is prefixed with \p Prefix.
560
561 /// Allow subclasses to override and print debug traces before/after vplan
562 /// execution, when trace information is requested.
563 virtual void printDebugTracesAtStart() {}
564 virtual void printDebugTracesAtEnd() {}
565
566 /// The original loop.
568
569 /// A wrapper around ScalarEvolution used to add runtime SCEV checks. Applies
570 /// dynamic knowledge to simplify SCEV expressions and converts them to a
571 /// more usable form.
573
574 /// Loop Info.
576
577 /// Dominator Tree.
579
580 /// Target Transform Info.
582
583 /// Assumption Cache.
585
586 /// The vectorization SIMD factor to use. Each vector will have this many
587 /// vector elements.
589
590 /// The vectorization unroll factor to use. Each scalar is vectorized to this
591 /// many different vector instructions.
592 unsigned UF;
593
594 /// The builder that we use
596
597 // --- Vectorization state ---
598
599 /// Trip count of the original loop.
600 Value *TripCount = nullptr;
601
602 /// The profitablity analysis.
604
605 /// Structure to hold information about generated runtime checks, responsible
606 /// for cleaning the checks, if vectorization turns out unprofitable.
607 GeneratedRTChecks &RTChecks;
608
610
611 /// The vector preheader block of \p Plan, used as target for check blocks
612 /// introduced during skeleton creation.
614};
615
616/// Encapsulate information regarding vectorization of a loop and its epilogue.
617/// This information is meant to be updated and used across two stages of
618/// epilogue vectorization.
621 unsigned MainLoopUF = 0;
623 unsigned EpilogueUF = 0;
626 Value *TripCount = nullptr;
629
631 ElementCount EVF, unsigned EUF,
633 : MainLoopVF(MVF), MainLoopUF(MUF), EpilogueVF(EVF), EpilogueUF(EUF),
635 assert(EUF == 1 &&
636 "A high UF for the epilogue loop is likely not beneficial.");
637 }
638};
639
640/// An extension of the inner loop vectorizer that creates a skeleton for a
641/// vectorized loop that has its epilogue (residual) also vectorized.
642/// The idea is to run the vplan on a given loop twice, firstly to setup the
643/// skeleton and vectorize the main loop, and secondly to complete the skeleton
644/// from the first step and vectorize the epilogue. This is achieved by
645/// deriving two concrete strategy classes from this base class and invoking
646/// them in succession from the loop vectorizer planner.
648public:
658
659 /// Holds and updates state information required to vectorize the main loop
660 /// and its epilogue in two separate passes. This setup helps us avoid
661 /// regenerating and recomputing runtime safety checks. It also helps us to
662 /// shorten the iteration-count-check path length for the cases where the
663 /// iteration count of the loop is so small that the main vector loop is
664 /// completely skipped.
666
667protected:
669};
670
671/// A specialized derived class of inner loop vectorizer that performs
672/// vectorization of *main* loops in the process of vectorizing loops and their
673/// epilogues.
675public:
686 /// Implements the interface for creating a vectorized skeleton using the
687 /// *main loop* strategy (i.e., the first pass of VPlan execution).
689
690protected:
691 /// Introduces a new VPIRBasicBlock for \p CheckIRBB to Plan between the
692 /// vector preheader and its predecessor, also connecting the new block to the
693 /// scalar preheader.
694 void introduceCheckBlockInVPlan(BasicBlock *CheckIRBB);
695
696 // Create a check to see if the main vector loop should be executed
698 unsigned UF) const;
699
700 /// Emits an iteration count bypass check once for the main loop (when \p
701 /// ForEpilogue is false) and once for the epilogue loop (when \p
702 /// ForEpilogue is true).
704 bool ForEpilogue);
705 void printDebugTracesAtStart() override;
706 void printDebugTracesAtEnd() override;
707};
708
709// A specialized derived class of inner loop vectorizer that performs
710// vectorization of *epilogue* loops in the process of vectorizing loops and
711// their epilogues.
713public:
720 GeneratedRTChecks &Checks, VPlan &Plan)
722 Checks, Plan, EPI.EpilogueVF,
723 EPI.EpilogueVF, EPI.EpilogueUF) {}
724 /// Implements the interface for creating a vectorized skeleton using the
725 /// *epilogue loop* strategy (i.e., the second pass of VPlan execution).
727
728protected:
729 void printDebugTracesAtStart() override;
730 void printDebugTracesAtEnd() override;
731};
732} // end namespace llvm
733
734/// Look for a meaningful debug location on the instruction or its operands.
736 if (!I)
737 return DebugLoc::getUnknown();
738
740 if (I->getDebugLoc() != Empty)
741 return I->getDebugLoc();
742
743 for (Use &Op : I->operands()) {
744 if (Instruction *OpInst = dyn_cast<Instruction>(Op))
745 if (OpInst->getDebugLoc() != Empty)
746 return OpInst->getDebugLoc();
747 }
748
749 return I->getDebugLoc();
750}
751
752/// Write a \p DebugMsg about vectorization to the debug output stream. If \p I
753/// is passed, the message relates to that particular instruction.
754#ifndef NDEBUG
755static void debugVectorizationMessage(const StringRef Prefix,
756 const StringRef DebugMsg,
757 Instruction *I) {
758 dbgs() << "LV: " << Prefix << DebugMsg;
759 if (I != nullptr)
760 dbgs() << " " << *I;
761 else
762 dbgs() << '.';
763 dbgs() << '\n';
764}
765#endif
766
767/// Create an analysis remark that explains why vectorization failed
768///
769/// \p PassName is the name of the pass (e.g. can be AlwaysPrint). \p
770/// RemarkName is the identifier for the remark. If \p I is passed it is an
771/// instruction that prevents vectorization. Otherwise \p TheLoop is used for
772/// the location of the remark. If \p DL is passed, use it as debug location for
773/// the remark. \return the remark object that can be streamed to.
774static OptimizationRemarkAnalysis
775createLVAnalysis(const char *PassName, StringRef RemarkName, Loop *TheLoop,
776 Instruction *I, DebugLoc DL = {}) {
777 BasicBlock *CodeRegion = I ? I->getParent() : TheLoop->getHeader();
778 // If debug location is attached to the instruction, use it. Otherwise if DL
779 // was not provided, use the loop's.
780 if (I && I->getDebugLoc())
781 DL = I->getDebugLoc();
782 else if (!DL)
783 DL = TheLoop->getStartLoc();
784
785 return OptimizationRemarkAnalysis(PassName, RemarkName, DL, CodeRegion);
786}
787
788namespace llvm {
789
790/// Return a value for Step multiplied by VF.
792 int64_t Step) {
793 assert(Ty->isIntegerTy() && "Expected an integer step");
794 ElementCount VFxStep = VF.multiplyCoefficientBy(Step);
795 assert(isPowerOf2_64(VF.getKnownMinValue()) && "must pass power-of-2 VF");
796 if (VF.isScalable() && isPowerOf2_64(Step)) {
797 return B.CreateShl(
798 B.CreateVScale(Ty),
799 ConstantInt::get(Ty, Log2_64(VFxStep.getKnownMinValue())), "", true);
800 }
801 return B.CreateElementCount(Ty, VFxStep);
802}
803
804/// Return the runtime value for VF.
806 return B.CreateElementCount(Ty, VF);
807}
808
810 const StringRef OREMsg, const StringRef ORETag,
811 OptimizationRemarkEmitter *ORE, Loop *TheLoop,
812 Instruction *I) {
813 LLVM_DEBUG(debugVectorizationMessage("Not vectorizing: ", DebugMsg, I));
814 LoopVectorizeHints Hints(TheLoop, true /* doesn't matter */, *ORE);
815 ORE->emit(
816 createLVAnalysis(Hints.vectorizeAnalysisPassName(), ORETag, TheLoop, I)
817 << "loop not vectorized: " << OREMsg);
818}
819
820/// Reports an informative message: print \p Msg for debugging purposes as well
821/// as an optimization remark. Uses either \p I as location of the remark, or
822/// otherwise \p TheLoop. If \p DL is passed, use it as debug location for the
823/// remark. If \p DL is passed, use it as debug location for the remark.
824static void reportVectorizationInfo(const StringRef Msg, const StringRef ORETag,
826 Loop *TheLoop, Instruction *I = nullptr,
827 DebugLoc DL = {}) {
829 LoopVectorizeHints Hints(TheLoop, true /* doesn't matter */, *ORE);
830 ORE->emit(createLVAnalysis(Hints.vectorizeAnalysisPassName(), ORETag, TheLoop,
831 I, DL)
832 << Msg);
833}
834
835/// Report successful vectorization of the loop. In case an outer loop is
836/// vectorized, prepend "outer" to the vectorization remark.
838 VectorizationFactor VF, unsigned IC) {
840 "Vectorizing: ", TheLoop->isInnermost() ? "innermost loop" : "outer loop",
841 nullptr));
842 StringRef LoopType = TheLoop->isInnermost() ? "" : "outer ";
843 ORE->emit([&]() {
844 return OptimizationRemark(LV_NAME, "Vectorized", TheLoop->getStartLoc(),
845 TheLoop->getHeader())
846 << "vectorized " << LoopType << "loop (vectorization width: "
847 << ore::NV("VectorizationFactor", VF.Width)
848 << ", interleaved count: " << ore::NV("InterleaveCount", IC) << ")";
849 });
850}
851
852} // end namespace llvm
853
854namespace llvm {
855
856// Loop vectorization cost-model hints how the scalar epilogue loop should be
857// lowered.
859
860 // The default: allowing scalar epilogues.
862
863 // Vectorization with OptForSize: don't allow epilogues.
865
866 // A special case of vectorisation with OptForSize: loops with a very small
867 // trip count are considered for vectorization under OptForSize, thereby
868 // making sure the cost of their loop body is dominant, free of runtime
869 // guards and scalar iteration overheads.
871
872 // Loop hint predicate indicating an epilogue is undesired.
874
875 // Directive indicating we must either tail fold or not vectorize
877};
878
879/// LoopVectorizationCostModel - estimates the expected speedups due to
880/// vectorization.
881/// In many cases vectorization is not profitable. This can happen because of
882/// a number of reasons. In this class we mainly attempt to predict the
883/// expected speedup/slowdowns due to the supported instruction set. We use the
884/// TargetTransformInfo to query the different backends for the cost of
885/// different operations.
888
889public:
897 std::function<BlockFrequencyInfo &()> GetBFI,
898 const Function *F, const LoopVectorizeHints *Hints,
900 : ScalarEpilogueStatus(SEL), TheLoop(L), PSE(PSE), LI(LI), Legal(Legal),
901 TTI(TTI), TLI(TLI), DB(DB), AC(AC), ORE(ORE), GetBFI(GetBFI),
904 if (TTI.supportsScalableVectors() || ForceTargetSupportsScalableVectors)
905 initializeVScaleForTuning();
907 }
908
909 /// \return An upper bound for the vectorization factors (both fixed and
910 /// scalable). If the factors are 0, vectorization and interleaving should be
911 /// avoided up front.
912 FixedScalableVFPair computeMaxVF(ElementCount UserVF, unsigned UserIC);
913
914 /// \return True if runtime checks are required for vectorization, and false
915 /// otherwise.
916 bool runtimeChecksRequired();
917
918 /// Setup cost-based decisions for user vectorization factor.
919 /// \return true if the UserVF is a feasible VF to be chosen.
922 return expectedCost(UserVF).isValid();
923 }
924
925 /// \return True if maximizing vector bandwidth is enabled by the target or
926 /// user options, for the given register kind.
927 bool useMaxBandwidth(TargetTransformInfo::RegisterKind RegKind);
928
929 /// \return True if register pressure should be considered for the given VF.
930 bool shouldConsiderRegPressureForVF(ElementCount VF);
931
932 /// \return The size (in bits) of the smallest and widest types in the code
933 /// that needs to be vectorized. We ignore values that remain scalar such as
934 /// 64 bit loop indices.
935 std::pair<unsigned, unsigned> getSmallestAndWidestTypes();
936
937 /// Memory access instruction may be vectorized in more than one way.
938 /// Form of instruction after vectorization depends on cost.
939 /// This function takes cost-based decisions for Load/Store instructions
940 /// and collects them in a map. This decisions map is used for building
941 /// the lists of loop-uniform and loop-scalar instructions.
942 /// The calculated cost is saved with widening decision in order to
943 /// avoid redundant calculations.
944 void setCostBasedWideningDecision(ElementCount VF);
945
946 /// A call may be vectorized in different ways depending on whether we have
947 /// vectorized variants available and whether the target supports masking.
948 /// This function analyzes all calls in the function at the supplied VF,
949 /// makes a decision based on the costs of available options, and stores that
950 /// decision in a map for use in planning and plan execution.
951 void setVectorizedCallDecision(ElementCount VF);
952
953 /// Collect values we want to ignore in the cost model.
954 void collectValuesToIgnore();
955
956 /// Collect all element types in the loop for which widening is needed.
957 void collectElementTypesForWidening();
958
959 /// Split reductions into those that happen in the loop, and those that happen
960 /// outside. In loop reductions are collected into InLoopReductions.
961 void collectInLoopReductions();
962
963 /// Returns true if we should use strict in-order reductions for the given
964 /// RdxDesc. This is true if the -enable-strict-reductions flag is passed,
965 /// the IsOrdered flag of RdxDesc is set and we do not allow reordering
966 /// of FP operations.
967 bool useOrderedReductions(const RecurrenceDescriptor &RdxDesc) const {
968 return !Hints->allowReordering() && RdxDesc.isOrdered();
969 }
970
971 /// \returns The smallest bitwidth each instruction can be represented with.
972 /// The vector equivalents of these instructions should be truncated to this
973 /// type.
975 return MinBWs;
976 }
977
978 /// \returns True if it is more profitable to scalarize instruction \p I for
979 /// vectorization factor \p VF.
981 assert(VF.isVector() &&
982 "Profitable to scalarize relevant only for VF > 1.");
983 assert(
984 TheLoop->isInnermost() &&
985 "cost-model should not be used for outer loops (in VPlan-native path)");
986
987 auto Scalars = InstsToScalarize.find(VF);
988 assert(Scalars != InstsToScalarize.end() &&
989 "VF not yet analyzed for scalarization profitability");
990 return Scalars->second.contains(I);
991 }
992
993 /// Returns true if \p I is known to be uniform after vectorization.
995 assert(
996 TheLoop->isInnermost() &&
997 "cost-model should not be used for outer loops (in VPlan-native path)");
998 // Pseudo probe needs to be duplicated for each unrolled iteration and
999 // vector lane so that profiled loop trip count can be accurately
1000 // accumulated instead of being under counted.
1002 return false;
1003
1004 if (VF.isScalar())
1005 return true;
1006
1007 auto UniformsPerVF = Uniforms.find(VF);
1008 assert(UniformsPerVF != Uniforms.end() &&
1009 "VF not yet analyzed for uniformity");
1010 return UniformsPerVF->second.count(I);
1011 }
1012
1013 /// Returns true if \p I is known to be scalar after vectorization.
1015 assert(
1016 TheLoop->isInnermost() &&
1017 "cost-model should not be used for outer loops (in VPlan-native path)");
1018 if (VF.isScalar())
1019 return true;
1020
1021 auto ScalarsPerVF = Scalars.find(VF);
1022 assert(ScalarsPerVF != Scalars.end() &&
1023 "Scalar values are not calculated for VF");
1024 return ScalarsPerVF->second.count(I);
1025 }
1026
1027 /// \returns True if instruction \p I can be truncated to a smaller bitwidth
1028 /// for vectorization factor \p VF.
1030 // Truncs must truncate at most to their destination type.
1031 if (isa_and_nonnull<TruncInst>(I) && MinBWs.contains(I) &&
1032 I->getType()->getScalarSizeInBits() < MinBWs.lookup(I))
1033 return false;
1034 return VF.isVector() && MinBWs.contains(I) &&
1035 !isProfitableToScalarize(I, VF) &&
1037 }
1038
1039 /// Decision that was taken during cost calculation for memory instruction.
1042 CM_Widen, // For consecutive accesses with stride +1.
1043 CM_Widen_Reverse, // For consecutive accesses with stride -1.
1049 };
1050
1051 /// Save vectorization decision \p W and \p Cost taken by the cost model for
1052 /// instruction \p I and vector width \p VF.
1055 assert(VF.isVector() && "Expected VF >=2");
1056 WideningDecisions[{I, VF}] = {W, Cost};
1057 }
1058
1059 /// Save vectorization decision \p W and \p Cost taken by the cost model for
1060 /// interleaving group \p Grp and vector width \p VF.
1064 assert(VF.isVector() && "Expected VF >=2");
1065 /// Broadcast this decicion to all instructions inside the group.
1066 /// When interleaving, the cost will only be assigned one instruction, the
1067 /// insert position. For other cases, add the appropriate fraction of the
1068 /// total cost to each instruction. This ensures accurate costs are used,
1069 /// even if the insert position instruction is not used.
1070 InstructionCost InsertPosCost = Cost;
1071 InstructionCost OtherMemberCost = 0;
1072 if (W != CM_Interleave)
1073 OtherMemberCost = InsertPosCost = Cost / Grp->getNumMembers();
1074 ;
1075 for (unsigned Idx = 0; Idx < Grp->getFactor(); ++Idx) {
1076 if (auto *I = Grp->getMember(Idx)) {
1077 if (Grp->getInsertPos() == I)
1078 WideningDecisions[{I, VF}] = {W, InsertPosCost};
1079 else
1080 WideningDecisions[{I, VF}] = {W, OtherMemberCost};
1081 }
1082 }
1083 }
1084
1085 /// Return the cost model decision for the given instruction \p I and vector
1086 /// width \p VF. Return CM_Unknown if this instruction did not pass
1087 /// through the cost modeling.
1089 assert(VF.isVector() && "Expected VF to be a vector VF");
1090 assert(
1091 TheLoop->isInnermost() &&
1092 "cost-model should not be used for outer loops (in VPlan-native path)");
1093
1094 std::pair<Instruction *, ElementCount> InstOnVF(I, VF);
1095 auto Itr = WideningDecisions.find(InstOnVF);
1096 if (Itr == WideningDecisions.end())
1097 return CM_Unknown;
1098 return Itr->second.first;
1099 }
1100
1101 /// Return the vectorization cost for the given instruction \p I and vector
1102 /// width \p VF.
1104 assert(VF.isVector() && "Expected VF >=2");
1105 std::pair<Instruction *, ElementCount> InstOnVF(I, VF);
1106 assert(WideningDecisions.contains(InstOnVF) &&
1107 "The cost is not calculated");
1108 return WideningDecisions[InstOnVF].second;
1109 }
1110
1118
1120 Function *Variant, Intrinsic::ID IID,
1121 std::optional<unsigned> MaskPos,
1123 assert(!VF.isScalar() && "Expected vector VF");
1124 CallWideningDecisions[{CI, VF}] = {Kind, Variant, IID, MaskPos, Cost};
1125 }
1126
1128 ElementCount VF) const {
1129 assert(!VF.isScalar() && "Expected vector VF");
1130 auto I = CallWideningDecisions.find({CI, VF});
1131 if (I == CallWideningDecisions.end())
1132 return {CM_Unknown, nullptr, Intrinsic::not_intrinsic, std::nullopt, 0};
1133 return I->second;
1134 }
1135
1136 /// Return True if instruction \p I is an optimizable truncate whose operand
1137 /// is an induction variable. Such a truncate will be removed by adding a new
1138 /// induction variable with the destination type.
1140 // If the instruction is not a truncate, return false.
1141 auto *Trunc = dyn_cast<TruncInst>(I);
1142 if (!Trunc)
1143 return false;
1144
1145 // Get the source and destination types of the truncate.
1146 Type *SrcTy = toVectorTy(Trunc->getSrcTy(), VF);
1147 Type *DestTy = toVectorTy(Trunc->getDestTy(), VF);
1148
1149 // If the truncate is free for the given types, return false. Replacing a
1150 // free truncate with an induction variable would add an induction variable
1151 // update instruction to each iteration of the loop. We exclude from this
1152 // check the primary induction variable since it will need an update
1153 // instruction regardless.
1154 Value *Op = Trunc->getOperand(0);
1155 if (Op != Legal->getPrimaryInduction() && TTI.isTruncateFree(SrcTy, DestTy))
1156 return false;
1157
1158 // If the truncated value is not an induction variable, return false.
1159 return Legal->isInductionPhi(Op);
1160 }
1161
1162 /// Collects the instructions to scalarize for each predicated instruction in
1163 /// the loop.
1164 void collectInstsToScalarize(ElementCount VF);
1165
1166 /// Collect values that will not be widened, including Uniforms, Scalars, and
1167 /// Instructions to Scalarize for the given \p VF.
1168 /// The sets depend on CM decision for Load/Store instructions
1169 /// that may be vectorized as interleave, gather-scatter or scalarized.
1170 /// Also make a decision on what to do about call instructions in the loop
1171 /// at that VF -- scalarize, call a known vector routine, or call a
1172 /// vector intrinsic.
1174 // Do the analysis once.
1175 if (VF.isScalar() || Uniforms.contains(VF))
1176 return;
1178 collectLoopUniforms(VF);
1180 collectLoopScalars(VF);
1182 }
1183
1184 /// Returns true if the target machine supports masked store operation
1185 /// for the given \p DataType and kind of access to \p Ptr.
1186 bool isLegalMaskedStore(Type *DataType, Value *Ptr, Align Alignment,
1187 unsigned AddressSpace) const {
1188 return Legal->isConsecutivePtr(DataType, Ptr) &&
1190 TTI.isLegalMaskedStore(DataType, Alignment, AddressSpace));
1191 }
1192
1193 /// Returns true if the target machine supports masked load operation
1194 /// for the given \p DataType and kind of access to \p Ptr.
1195 bool isLegalMaskedLoad(Type *DataType, Value *Ptr, Align Alignment,
1196 unsigned AddressSpace) const {
1197 return Legal->isConsecutivePtr(DataType, Ptr) &&
1199 TTI.isLegalMaskedLoad(DataType, Alignment, AddressSpace));
1200 }
1201
1202 /// Returns true if the target machine can represent \p V as a masked gather
1203 /// or scatter operation.
1205 bool LI = isa<LoadInst>(V);
1206 bool SI = isa<StoreInst>(V);
1207 if (!LI && !SI)
1208 return false;
1209 auto *Ty = getLoadStoreType(V);
1211 if (VF.isVector())
1212 Ty = VectorType::get(Ty, VF);
1213 return (LI && TTI.isLegalMaskedGather(Ty, Align)) ||
1214 (SI && TTI.isLegalMaskedScatter(Ty, Align));
1215 }
1216
1217 /// Returns true if the target machine supports all of the reduction
1218 /// variables found for the given VF.
1220 return (all_of(Legal->getReductionVars(), [&](auto &Reduction) -> bool {
1221 const RecurrenceDescriptor &RdxDesc = Reduction.second;
1222 return TTI.isLegalToVectorizeReduction(RdxDesc, VF);
1223 }));
1224 }
1225
1226 /// Given costs for both strategies, return true if the scalar predication
1227 /// lowering should be used for div/rem. This incorporates an override
1228 /// option so it is not simply a cost comparison.
1230 InstructionCost SafeDivisorCost) const {
1231 switch (ForceSafeDivisor) {
1232 case cl::BOU_UNSET:
1233 return ScalarCost < SafeDivisorCost;
1234 case cl::BOU_TRUE:
1235 return false;
1236 case cl::BOU_FALSE:
1237 return true;
1238 }
1239 llvm_unreachable("impossible case value");
1240 }
1241
1242 /// Returns true if \p I is an instruction which requires predication and
1243 /// for which our chosen predication strategy is scalarization (i.e. we
1244 /// don't have an alternate strategy such as masking available).
1245 /// \p VF is the vectorization factor that will be used to vectorize \p I.
1246 bool isScalarWithPredication(Instruction *I, ElementCount VF);
1247
1248 /// Returns true if \p I is an instruction that needs to be predicated
1249 /// at runtime. The result is independent of the predication mechanism.
1250 /// Superset of instructions that return true for isScalarWithPredication.
1251 bool isPredicatedInst(Instruction *I) const;
1252
1253 /// A helper function that returns how much we should divide the cost of a
1254 /// predicated block by. Typically this is the reciprocal of the block
1255 /// probability, i.e. if we return X we are assuming the predicated block will
1256 /// execute once for every X iterations of the loop header so the block should
1257 /// only contribute 1/X of its cost to the total cost calculation, but when
1258 /// optimizing for code size it will just be 1 as code size costs don't depend
1259 /// on execution probabilities.
1260 ///
1261 /// Note that if a block wasn't originally predicated but was predicated due
1262 /// to tail folding, the divisor will still be 1 because it will execute for
1263 /// every iteration of the loop header.
1264 inline uint64_t
1265 getPredBlockCostDivisor(TargetTransformInfo::TargetCostKind CostKind,
1266 const BasicBlock *BB);
1267
1268 /// Returns true if an artificially high cost for emulated masked memrefs
1269 /// should be used.
1270 bool useEmulatedMaskMemRefHack(Instruction *I, ElementCount VF);
1271
1272 /// Return the costs for our two available strategies for lowering a
1273 /// div/rem operation which requires speculating at least one lane.
1274 /// First result is for scalarization (will be invalid for scalable
1275 /// vectors); second is for the safe-divisor strategy.
1276 std::pair<InstructionCost, InstructionCost>
1277 getDivRemSpeculationCost(Instruction *I, ElementCount VF);
1278
1279 /// Returns true if \p I is a memory instruction with consecutive memory
1280 /// access that can be widened.
1281 bool memoryInstructionCanBeWidened(Instruction *I, ElementCount VF);
1282
1283 /// Returns true if \p I is a memory instruction in an interleaved-group
1284 /// of memory accesses that can be vectorized with wide vector loads/stores
1285 /// and shuffles.
1286 bool interleavedAccessCanBeWidened(Instruction *I, ElementCount VF) const;
1287
1288 /// Check if \p Instr belongs to any interleaved access group.
1290 return InterleaveInfo.isInterleaved(Instr);
1291 }
1292
1293 /// Get the interleaved access group that \p Instr belongs to.
1296 return InterleaveInfo.getInterleaveGroup(Instr);
1297 }
1298
1299 /// Returns true if we're required to use a scalar epilogue for at least
1300 /// the final iteration of the original loop.
1301 bool requiresScalarEpilogue(bool IsVectorizing) const {
1302 if (!isScalarEpilogueAllowed()) {
1303 LLVM_DEBUG(dbgs() << "LV: Loop does not require scalar epilogue\n");
1304 return false;
1305 }
1306 // If we might exit from anywhere but the latch and early exit vectorization
1307 // is disabled, we must run the exiting iteration in scalar form.
1308 if (TheLoop->getExitingBlock() != TheLoop->getLoopLatch() &&
1309 !(EnableEarlyExitVectorization && Legal->hasUncountableEarlyExit())) {
1310 LLVM_DEBUG(dbgs() << "LV: Loop requires scalar epilogue: not exiting "
1311 "from latch block\n");
1312 return true;
1313 }
1314 if (IsVectorizing && InterleaveInfo.requiresScalarEpilogue()) {
1315 LLVM_DEBUG(dbgs() << "LV: Loop requires scalar epilogue: "
1316 "interleaved group requires scalar epilogue\n");
1317 return true;
1318 }
1319 LLVM_DEBUG(dbgs() << "LV: Loop does not require scalar epilogue\n");
1320 return false;
1321 }
1322
1323 /// Returns true if a scalar epilogue is not allowed due to optsize or a
1324 /// loop hint annotation.
1326 return ScalarEpilogueStatus == CM_ScalarEpilogueAllowed;
1327 }
1328
1329 /// Returns true if tail-folding is preferred over a scalar epilogue.
1331 return ScalarEpilogueStatus == CM_ScalarEpilogueNotNeededUsePredicate ||
1332 ScalarEpilogueStatus == CM_ScalarEpilogueNotAllowedUsePredicate;
1333 }
1334
1335 /// Returns the TailFoldingStyle that is best for the current loop.
1337 return ChosenTailFoldingStyle;
1338 }
1339
1340 /// Selects and saves TailFoldingStyle.
1341 /// \param IsScalableVF true if scalable vector factors enabled.
1342 /// \param UserIC User specific interleave count.
1343 void setTailFoldingStyle(bool IsScalableVF, unsigned UserIC) {
1344 assert(ChosenTailFoldingStyle == TailFoldingStyle::None &&
1345 "Tail folding must not be selected yet.");
1346 if (!Legal->canFoldTailByMasking()) {
1347 ChosenTailFoldingStyle = TailFoldingStyle::None;
1348 return;
1349 }
1350
1351 // Default to TTI preference, but allow command line override.
1352 ChosenTailFoldingStyle = TTI.getPreferredTailFoldingStyle();
1353 if (ForceTailFoldingStyle.getNumOccurrences())
1354 ChosenTailFoldingStyle = ForceTailFoldingStyle.getValue();
1355
1356 if (ChosenTailFoldingStyle != TailFoldingStyle::DataWithEVL)
1357 return;
1358 // Override EVL styles if needed.
1359 // FIXME: Investigate opportunity for fixed vector factor.
1360 bool EVLIsLegal = UserIC <= 1 && IsScalableVF &&
1361 TTI.hasActiveVectorLength() && !EnableVPlanNativePath;
1362 if (EVLIsLegal)
1363 return;
1364 // If for some reason EVL mode is unsupported, fallback to a scalar epilogue
1365 // if it's allowed, or DataWithoutLaneMask otherwise.
1366 if (ScalarEpilogueStatus == CM_ScalarEpilogueAllowed ||
1367 ScalarEpilogueStatus == CM_ScalarEpilogueNotNeededUsePredicate)
1368 ChosenTailFoldingStyle = TailFoldingStyle::None;
1369 else
1370 ChosenTailFoldingStyle = TailFoldingStyle::DataWithoutLaneMask;
1371
1372 LLVM_DEBUG(
1373 dbgs() << "LV: Preference for VP intrinsics indicated. Will "
1374 "not try to generate VP Intrinsics "
1375 << (UserIC > 1
1376 ? "since interleave count specified is greater than 1.\n"
1377 : "due to non-interleaving reasons.\n"));
1378 }
1379
1380 /// Returns true if all loop blocks should be masked to fold tail loop.
1381 bool foldTailByMasking() const {
1383 }
1384
1385 /// Returns true if the use of wide lane masks is requested and the loop is
1386 /// using tail-folding with a lane mask for control flow.
1389 return false;
1390
1392 }
1393
1394 /// Return maximum safe number of elements to be processed per vector
1395 /// iteration, which do not prevent store-load forwarding and are safe with
1396 /// regard to the memory dependencies. Required for EVL-based VPlans to
1397 /// correctly calculate AVL (application vector length) as min(remaining AVL,
1398 /// MaxSafeElements).
1399 /// TODO: need to consider adjusting cost model to use this value as a
1400 /// vectorization factor for EVL-based vectorization.
1401 std::optional<unsigned> getMaxSafeElements() const { return MaxSafeElements; }
1402
1403 /// Returns true if the instructions in this block requires predication
1404 /// for any reason, e.g. because tail folding now requires a predicate
1405 /// or because the block in the original loop was predicated.
1407 return foldTailByMasking() || Legal->blockNeedsPredication(BB);
1408 }
1409
1410 /// Returns true if VP intrinsics with explicit vector length support should
1411 /// be generated in the tail folded loop.
1415
1416 /// Returns true if the Phi is part of an inloop reduction.
1417 bool isInLoopReduction(PHINode *Phi) const {
1418 return InLoopReductions.contains(Phi);
1419 }
1420
1421 /// Returns the set of in-loop reduction PHIs.
1423 return InLoopReductions;
1424 }
1425
1426 /// Returns true if the predicated reduction select should be used to set the
1427 /// incoming value for the reduction phi.
1428 bool usePredicatedReductionSelect(RecurKind RecurrenceKind) const {
1429 // Force to use predicated reduction select since the EVL of the
1430 // second-to-last iteration might not be VF*UF.
1431 if (foldTailWithEVL())
1432 return true;
1433
1434 // Note: For FindLast recurrences we prefer a predicated select to simplify
1435 // matching in handleFindLastReductions(), rather than handle multiple
1436 // cases.
1438 return true;
1439
1441 TTI.preferPredicatedReductionSelect();
1442 }
1443
1444 /// Estimate cost of an intrinsic call instruction CI if it were vectorized
1445 /// with factor VF. Return the cost of the instruction, including
1446 /// scalarization overhead if it's needed.
1447 InstructionCost getVectorIntrinsicCost(CallInst *CI, ElementCount VF) const;
1448
1449 /// Estimate cost of a call instruction CI if it were vectorized with factor
1450 /// VF. Return the cost of the instruction, including scalarization overhead
1451 /// if it's needed.
1452 InstructionCost getVectorCallCost(CallInst *CI, ElementCount VF) const;
1453
1454 /// Invalidates decisions already taken by the cost model.
1456 WideningDecisions.clear();
1457 CallWideningDecisions.clear();
1458 Uniforms.clear();
1459 Scalars.clear();
1460 }
1461
1462 /// Returns the expected execution cost. The unit of the cost does
1463 /// not matter because we use the 'cost' units to compare different
1464 /// vector widths. The cost that is returned is *not* normalized by
1465 /// the factor width.
1466 InstructionCost expectedCost(ElementCount VF);
1467
1468 bool hasPredStores() const { return NumPredStores > 0; }
1469
1470 /// Returns true if epilogue vectorization is considered profitable, and
1471 /// false otherwise.
1472 /// \p VF is the vectorization factor chosen for the original loop.
1473 /// \p Multiplier is an aditional scaling factor applied to VF before
1474 /// comparing to EpilogueVectorizationMinVF.
1475 bool isEpilogueVectorizationProfitable(const ElementCount VF,
1476 const unsigned IC) const;
1477
1478 /// Returns the execution time cost of an instruction for a given vector
1479 /// width. Vector width of one means scalar.
1480 InstructionCost getInstructionCost(Instruction *I, ElementCount VF);
1481
1482 /// Return the cost of instructions in an inloop reduction pattern, if I is
1483 /// part of that pattern.
1484 std::optional<InstructionCost> getReductionPatternCost(Instruction *I,
1485 ElementCount VF,
1486 Type *VectorTy) const;
1487
1488 /// Returns true if \p Op should be considered invariant and if it is
1489 /// trivially hoistable.
1490 bool shouldConsiderInvariant(Value *Op);
1491
1492 /// Return the value of vscale used for tuning the cost model.
1493 std::optional<unsigned> getVScaleForTuning() const { return VScaleForTuning; }
1494
1495private:
1496 unsigned NumPredStores = 0;
1497
1498 /// Used to store the value of vscale used for tuning the cost model. It is
1499 /// initialized during object construction.
1500 std::optional<unsigned> VScaleForTuning;
1501
1502 /// Initializes the value of vscale used for tuning the cost model. If
1503 /// vscale_range.min == vscale_range.max then return vscale_range.max, else
1504 /// return the value returned by the corresponding TTI method.
1505 void initializeVScaleForTuning() {
1506 const Function *Fn = TheLoop->getHeader()->getParent();
1507 if (Fn->hasFnAttribute(Attribute::VScaleRange)) {
1508 auto Attr = Fn->getFnAttribute(Attribute::VScaleRange);
1509 auto Min = Attr.getVScaleRangeMin();
1510 auto Max = Attr.getVScaleRangeMax();
1511 if (Max && Min == Max) {
1512 VScaleForTuning = Max;
1513 return;
1514 }
1515 }
1516
1517 VScaleForTuning = TTI.getVScaleForTuning();
1518 }
1519
1520 /// \return An upper bound for the vectorization factors for both
1521 /// fixed and scalable vectorization, where the minimum-known number of
1522 /// elements is a power-of-2 larger than zero. If scalable vectorization is
1523 /// disabled or unsupported, then the scalable part will be equal to
1524 /// ElementCount::getScalable(0).
1525 FixedScalableVFPair computeFeasibleMaxVF(unsigned MaxTripCount,
1526 ElementCount UserVF, unsigned UserIC,
1527 bool FoldTailByMasking);
1528
1529 /// If \p VF * \p UserIC > MaxTripcount, clamps VF to the next lower VF that
1530 /// results in VF * UserIC <= MaxTripCount.
1531 ElementCount clampVFByMaxTripCount(ElementCount VF, unsigned MaxTripCount,
1532 unsigned UserIC,
1533 bool FoldTailByMasking) const;
1534
1535 /// \return the maximized element count based on the targets vector
1536 /// registers and the loop trip-count, but limited to a maximum safe VF.
1537 /// This is a helper function of computeFeasibleMaxVF.
1538 ElementCount getMaximizedVFForTarget(unsigned MaxTripCount,
1539 unsigned SmallestType,
1540 unsigned WidestType,
1541 ElementCount MaxSafeVF, unsigned UserIC,
1542 bool FoldTailByMasking);
1543
1544 /// Checks if scalable vectorization is supported and enabled. Caches the
1545 /// result to avoid repeated debug dumps for repeated queries.
1546 bool isScalableVectorizationAllowed();
1547
1548 /// \return the maximum legal scalable VF, based on the safe max number
1549 /// of elements.
1550 ElementCount getMaxLegalScalableVF(unsigned MaxSafeElements);
1551
1552 /// Calculate vectorization cost of memory instruction \p I.
1553 InstructionCost getMemoryInstructionCost(Instruction *I, ElementCount VF);
1554
1555 /// The cost computation for scalarized memory instruction.
1556 InstructionCost getMemInstScalarizationCost(Instruction *I, ElementCount VF);
1557
1558 /// The cost computation for interleaving group of memory instructions.
1559 InstructionCost getInterleaveGroupCost(Instruction *I, ElementCount VF);
1560
1561 /// The cost computation for Gather/Scatter instruction.
1562 InstructionCost getGatherScatterCost(Instruction *I, ElementCount VF);
1563
1564 /// The cost computation for widening instruction \p I with consecutive
1565 /// memory access.
1566 InstructionCost getConsecutiveMemOpCost(Instruction *I, ElementCount VF);
1567
1568 /// The cost calculation for Load/Store instruction \p I with uniform pointer -
1569 /// Load: scalar load + broadcast.
1570 /// Store: scalar store + (loop invariant value stored? 0 : extract of last
1571 /// element)
1572 InstructionCost getUniformMemOpCost(Instruction *I, ElementCount VF);
1573
1574 /// Estimate the overhead of scalarizing an instruction. This is a
1575 /// convenience wrapper for the type-based getScalarizationOverhead API.
1577 ElementCount VF) const;
1578
1579 /// Map of scalar integer values to the smallest bitwidth they can be legally
1580 /// represented as. The vector equivalents of these values should be truncated
1581 /// to this type.
1582 MapVector<Instruction *, uint64_t> MinBWs;
1583
1584 /// A type representing the costs for instructions if they were to be
1585 /// scalarized rather than vectorized. The entries are Instruction-Cost
1586 /// pairs.
1587 using ScalarCostsTy = MapVector<Instruction *, InstructionCost>;
1588
1589 /// A set containing all BasicBlocks that are known to present after
1590 /// vectorization as a predicated block.
1591 DenseMap<ElementCount, SmallPtrSet<BasicBlock *, 4>>
1592 PredicatedBBsAfterVectorization;
1593
1594 /// Records whether it is allowed to have the original scalar loop execute at
1595 /// least once. This may be needed as a fallback loop in case runtime
1596 /// aliasing/dependence checks fail, or to handle the tail/remainder
1597 /// iterations when the trip count is unknown or doesn't divide by the VF,
1598 /// or as a peel-loop to handle gaps in interleave-groups.
1599 /// Under optsize and when the trip count is very small we don't allow any
1600 /// iterations to execute in the scalar loop.
1601 ScalarEpilogueLowering ScalarEpilogueStatus = CM_ScalarEpilogueAllowed;
1602
1603 /// Control finally chosen tail folding style.
1604 TailFoldingStyle ChosenTailFoldingStyle = TailFoldingStyle::None;
1605
1606 /// true if scalable vectorization is supported and enabled.
1607 std::optional<bool> IsScalableVectorizationAllowed;
1608
1609 /// Maximum safe number of elements to be processed per vector iteration,
1610 /// which do not prevent store-load forwarding and are safe with regard to the
1611 /// memory dependencies. Required for EVL-based veectorization, where this
1612 /// value is used as the upper bound of the safe AVL.
1613 std::optional<unsigned> MaxSafeElements;
1614
1615 /// A map holding scalar costs for different vectorization factors. The
1616 /// presence of a cost for an instruction in the mapping indicates that the
1617 /// instruction will be scalarized when vectorizing with the associated
1618 /// vectorization factor. The entries are VF-ScalarCostTy pairs.
1619 MapVector<ElementCount, ScalarCostsTy> InstsToScalarize;
1620
1621 /// Holds the instructions known to be uniform after vectorization.
1622 /// The data is collected per VF.
1623 DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> Uniforms;
1624
1625 /// Holds the instructions known to be scalar after vectorization.
1626 /// The data is collected per VF.
1627 DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> Scalars;
1628
1629 /// Holds the instructions (address computations) that are forced to be
1630 /// scalarized.
1631 DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> ForcedScalars;
1632
1633 /// PHINodes of the reductions that should be expanded in-loop.
1634 SmallPtrSet<PHINode *, 4> InLoopReductions;
1635
1636 /// A Map of inloop reduction operations and their immediate chain operand.
1637 /// FIXME: This can be removed once reductions can be costed correctly in
1638 /// VPlan. This was added to allow quick lookup of the inloop operations.
1639 DenseMap<Instruction *, Instruction *> InLoopReductionImmediateChains;
1640
1641 /// Returns the expected difference in cost from scalarizing the expression
1642 /// feeding a predicated instruction \p PredInst. The instructions to
1643 /// scalarize and their scalar costs are collected in \p ScalarCosts. A
1644 /// non-negative return value implies the expression will be scalarized.
1645 /// Currently, only single-use chains are considered for scalarization.
1646 InstructionCost computePredInstDiscount(Instruction *PredInst,
1647 ScalarCostsTy &ScalarCosts,
1648 ElementCount VF);
1649
1650 /// Collect the instructions that are uniform after vectorization. An
1651 /// instruction is uniform if we represent it with a single scalar value in
1652 /// the vectorized loop corresponding to each vector iteration. Examples of
1653 /// uniform instructions include pointer operands of consecutive or
1654 /// interleaved memory accesses. Note that although uniformity implies an
1655 /// instruction will be scalar, the reverse is not true. In general, a
1656 /// scalarized instruction will be represented by VF scalar values in the
1657 /// vectorized loop, each corresponding to an iteration of the original
1658 /// scalar loop.
1659 void collectLoopUniforms(ElementCount VF);
1660
1661 /// Collect the instructions that are scalar after vectorization. An
1662 /// instruction is scalar if it is known to be uniform or will be scalarized
1663 /// during vectorization. collectLoopScalars should only add non-uniform nodes
1664 /// to the list if they are used by a load/store instruction that is marked as
1665 /// CM_Scalarize. Non-uniform scalarized instructions will be represented by
1666 /// VF values in the vectorized loop, each corresponding to an iteration of
1667 /// the original scalar loop.
1668 void collectLoopScalars(ElementCount VF);
1669
1670 /// Keeps cost model vectorization decision and cost for instructions.
1671 /// Right now it is used for memory instructions only.
1672 using DecisionList = DenseMap<std::pair<Instruction *, ElementCount>,
1673 std::pair<InstWidening, InstructionCost>>;
1674
1675 DecisionList WideningDecisions;
1676
1677 using CallDecisionList =
1678 DenseMap<std::pair<CallInst *, ElementCount>, CallWideningDecision>;
1679
1680 CallDecisionList CallWideningDecisions;
1681
1682 /// Returns true if \p V is expected to be vectorized and it needs to be
1683 /// extracted.
1684 bool needsExtract(Value *V, ElementCount VF) const {
1686 if (VF.isScalar() || !I || !TheLoop->contains(I) ||
1687 TheLoop->isLoopInvariant(I) ||
1688 getWideningDecision(I, VF) == CM_Scalarize ||
1689 (isa<CallInst>(I) &&
1690 getCallWideningDecision(cast<CallInst>(I), VF).Kind == CM_Scalarize))
1691 return false;
1692
1693 // Assume we can vectorize V (and hence we need extraction) if the
1694 // scalars are not computed yet. This can happen, because it is called
1695 // via getScalarizationOverhead from setCostBasedWideningDecision, before
1696 // the scalars are collected. That should be a safe assumption in most
1697 // cases, because we check if the operands have vectorizable types
1698 // beforehand in LoopVectorizationLegality.
1699 return !Scalars.contains(VF) || !isScalarAfterVectorization(I, VF);
1700 };
1701
1702 /// Returns a range containing only operands needing to be extracted.
1703 SmallVector<Value *, 4> filterExtractingOperands(Instruction::op_range Ops,
1704 ElementCount VF) const {
1705
1706 SmallPtrSet<const Value *, 4> UniqueOperands;
1707 SmallVector<Value *, 4> Res;
1708 for (Value *Op : Ops) {
1709 if (isa<Constant>(Op) || !UniqueOperands.insert(Op).second ||
1710 !needsExtract(Op, VF))
1711 continue;
1712 Res.push_back(Op);
1713 }
1714 return Res;
1715 }
1716
1717public:
1718 /// The loop that we evaluate.
1720
1721 /// Predicated scalar evolution analysis.
1723
1724 /// Loop Info analysis.
1726
1727 /// Vectorization legality.
1729
1730 /// Vector target information.
1732
1733 /// Target Library Info.
1735
1736 /// Demanded bits analysis.
1738
1739 /// Assumption cache.
1741
1742 /// Interface to emit optimization remarks.
1744
1745 /// A function to lazily fetch BlockFrequencyInfo. This avoids computing it
1746 /// unless necessary, e.g. when the loop isn't legal to vectorize or when
1747 /// there is no predication.
1748 std::function<BlockFrequencyInfo &()> GetBFI;
1749 /// The BlockFrequencyInfo returned from GetBFI.
1751 /// Returns the BlockFrequencyInfo for the function if cached, otherwise
1752 /// fetches it via GetBFI. Avoids an indirect call to the std::function.
1754 if (!BFI)
1755 BFI = &GetBFI();
1756 return *BFI;
1757 }
1758
1760
1761 /// Loop Vectorize Hint.
1763
1764 /// The interleave access information contains groups of interleaved accesses
1765 /// with the same stride and close to each other.
1767
1768 /// Values to ignore in the cost model.
1770
1771 /// Values to ignore in the cost model when VF > 1.
1773
1774 /// All element types found in the loop.
1776
1777 /// The kind of cost that we are calculating
1779
1780 /// Whether this loop should be optimized for size based on function attribute
1781 /// or profile information.
1783
1784 /// The highest VF possible for this loop, without using MaxBandwidth.
1786};
1787} // end namespace llvm
1788
1789namespace {
1790/// Helper struct to manage generating runtime checks for vectorization.
1791///
1792/// The runtime checks are created up-front in temporary blocks to allow better
1793/// estimating the cost and un-linked from the existing IR. After deciding to
1794/// vectorize, the checks are moved back. If deciding not to vectorize, the
1795/// temporary blocks are completely removed.
1796class GeneratedRTChecks {
1797 /// Basic block which contains the generated SCEV checks, if any.
1798 BasicBlock *SCEVCheckBlock = nullptr;
1799
1800 /// The value representing the result of the generated SCEV checks. If it is
1801 /// nullptr no SCEV checks have been generated.
1802 Value *SCEVCheckCond = nullptr;
1803
1804 /// Basic block which contains the generated memory runtime checks, if any.
1805 BasicBlock *MemCheckBlock = nullptr;
1806
1807 /// The value representing the result of the generated memory runtime checks.
1808 /// If it is nullptr no memory runtime checks have been generated.
1809 Value *MemRuntimeCheckCond = nullptr;
1810
1811 DominatorTree *DT;
1812 LoopInfo *LI;
1814
1815 SCEVExpander SCEVExp;
1816 SCEVExpander MemCheckExp;
1817
1818 bool CostTooHigh = false;
1819
1820 Loop *OuterLoop = nullptr;
1821
1823
1824 /// The kind of cost that we are calculating
1826
1827public:
1828 GeneratedRTChecks(PredicatedScalarEvolution &PSE, DominatorTree *DT,
1831 : DT(DT), LI(LI), TTI(TTI),
1832 SCEVExp(*PSE.getSE(), "scev.check", /*PreserveLCSSA=*/false),
1833 MemCheckExp(*PSE.getSE(), "scev.check", /*PreserveLCSSA=*/false),
1834 PSE(PSE), CostKind(CostKind) {}
1835
1836 /// Generate runtime checks in SCEVCheckBlock and MemCheckBlock, so we can
1837 /// accurately estimate the cost of the runtime checks. The blocks are
1838 /// un-linked from the IR and are added back during vector code generation. If
1839 /// there is no vector code generation, the check blocks are removed
1840 /// completely.
1841 void create(Loop *L, const LoopAccessInfo &LAI,
1842 const SCEVPredicate &UnionPred, ElementCount VF, unsigned IC,
1843 OptimizationRemarkEmitter &ORE) {
1844
1845 // Hard cutoff to limit compile-time increase in case a very large number of
1846 // runtime checks needs to be generated.
1847 // TODO: Skip cutoff if the loop is guaranteed to execute, e.g. due to
1848 // profile info.
1849 CostTooHigh =
1851 if (CostTooHigh) {
1852 // Mark runtime checks as never succeeding when they exceed the threshold.
1853 MemRuntimeCheckCond = ConstantInt::getTrue(L->getHeader()->getContext());
1854 SCEVCheckCond = ConstantInt::getTrue(L->getHeader()->getContext());
1855 ORE.emit([&]() {
1856 return OptimizationRemarkAnalysisAliasing(
1857 DEBUG_TYPE, "TooManyMemoryRuntimeChecks", L->getStartLoc(),
1858 L->getHeader())
1859 << "loop not vectorized: too many memory checks needed";
1860 });
1861 LLVM_DEBUG(dbgs() << "LV: Too many memory checks needed.\n");
1862 return;
1863 }
1864
1865 BasicBlock *LoopHeader = L->getHeader();
1866 BasicBlock *Preheader = L->getLoopPreheader();
1867
1868 // Use SplitBlock to create blocks for SCEV & memory runtime checks to
1869 // ensure the blocks are properly added to LoopInfo & DominatorTree. Those
1870 // may be used by SCEVExpander. The blocks will be un-linked from their
1871 // predecessors and removed from LI & DT at the end of the function.
1872 if (!UnionPred.isAlwaysTrue()) {
1873 SCEVCheckBlock = SplitBlock(Preheader, Preheader->getTerminator(), DT, LI,
1874 nullptr, "vector.scevcheck");
1875
1876 SCEVCheckCond = SCEVExp.expandCodeForPredicate(
1877 &UnionPred, SCEVCheckBlock->getTerminator());
1878 if (isa<Constant>(SCEVCheckCond)) {
1879 // Clean up directly after expanding the predicate to a constant, to
1880 // avoid further expansions re-using anything left over from SCEVExp.
1881 SCEVExpanderCleaner SCEVCleaner(SCEVExp);
1882 SCEVCleaner.cleanup();
1883 }
1884 }
1885
1886 const auto &RtPtrChecking = *LAI.getRuntimePointerChecking();
1887 if (RtPtrChecking.Need) {
1888 auto *Pred = SCEVCheckBlock ? SCEVCheckBlock : Preheader;
1889 MemCheckBlock = SplitBlock(Pred, Pred->getTerminator(), DT, LI, nullptr,
1890 "vector.memcheck");
1891
1892 auto DiffChecks = RtPtrChecking.getDiffChecks();
1893 if (DiffChecks) {
1894 Value *RuntimeVF = nullptr;
1895 MemRuntimeCheckCond = addDiffRuntimeChecks(
1896 MemCheckBlock->getTerminator(), *DiffChecks, MemCheckExp,
1897 [VF, &RuntimeVF](IRBuilderBase &B, unsigned Bits) {
1898 if (!RuntimeVF)
1899 RuntimeVF = getRuntimeVF(B, B.getIntNTy(Bits), VF);
1900 return RuntimeVF;
1901 },
1902 IC);
1903 } else {
1904 MemRuntimeCheckCond = addRuntimeChecks(
1905 MemCheckBlock->getTerminator(), L, RtPtrChecking.getChecks(),
1907 }
1908 assert(MemRuntimeCheckCond &&
1909 "no RT checks generated although RtPtrChecking "
1910 "claimed checks are required");
1911 }
1912
1913 SCEVExp.eraseDeadInstructions(SCEVCheckCond);
1914
1915 if (!MemCheckBlock && !SCEVCheckBlock)
1916 return;
1917
1918 // Unhook the temporary block with the checks, update various places
1919 // accordingly.
1920 if (SCEVCheckBlock)
1921 SCEVCheckBlock->replaceAllUsesWith(Preheader);
1922 if (MemCheckBlock)
1923 MemCheckBlock->replaceAllUsesWith(Preheader);
1924
1925 if (SCEVCheckBlock) {
1926 SCEVCheckBlock->getTerminator()->moveBefore(
1927 Preheader->getTerminator()->getIterator());
1928 auto *UI = new UnreachableInst(Preheader->getContext(), SCEVCheckBlock);
1929 UI->setDebugLoc(DebugLoc::getTemporary());
1930 Preheader->getTerminator()->eraseFromParent();
1931 }
1932 if (MemCheckBlock) {
1933 MemCheckBlock->getTerminator()->moveBefore(
1934 Preheader->getTerminator()->getIterator());
1935 auto *UI = new UnreachableInst(Preheader->getContext(), MemCheckBlock);
1936 UI->setDebugLoc(DebugLoc::getTemporary());
1937 Preheader->getTerminator()->eraseFromParent();
1938 }
1939
1940 DT->changeImmediateDominator(LoopHeader, Preheader);
1941 if (MemCheckBlock) {
1942 DT->eraseNode(MemCheckBlock);
1943 LI->removeBlock(MemCheckBlock);
1944 }
1945 if (SCEVCheckBlock) {
1946 DT->eraseNode(SCEVCheckBlock);
1947 LI->removeBlock(SCEVCheckBlock);
1948 }
1949
1950 // Outer loop is used as part of the later cost calculations.
1951 OuterLoop = L->getParentLoop();
1952 }
1953
1955 if (SCEVCheckBlock || MemCheckBlock)
1956 LLVM_DEBUG(dbgs() << "Calculating cost of runtime checks:\n");
1957
1958 if (CostTooHigh) {
1960 Cost.setInvalid();
1961 LLVM_DEBUG(dbgs() << " number of checks exceeded threshold\n");
1962 return Cost;
1963 }
1964
1965 InstructionCost RTCheckCost = 0;
1966 if (SCEVCheckBlock)
1967 for (Instruction &I : *SCEVCheckBlock) {
1968 if (SCEVCheckBlock->getTerminator() == &I)
1969 continue;
1971 LLVM_DEBUG(dbgs() << " " << C << " for " << I << "\n");
1972 RTCheckCost += C;
1973 }
1974 if (MemCheckBlock) {
1975 InstructionCost MemCheckCost = 0;
1976 for (Instruction &I : *MemCheckBlock) {
1977 if (MemCheckBlock->getTerminator() == &I)
1978 continue;
1980 LLVM_DEBUG(dbgs() << " " << C << " for " << I << "\n");
1981 MemCheckCost += C;
1982 }
1983
1984 // If the runtime memory checks are being created inside an outer loop
1985 // we should find out if these checks are outer loop invariant. If so,
1986 // the checks will likely be hoisted out and so the effective cost will
1987 // reduce according to the outer loop trip count.
1988 if (OuterLoop) {
1989 ScalarEvolution *SE = MemCheckExp.getSE();
1990 // TODO: If profitable, we could refine this further by analysing every
1991 // individual memory check, since there could be a mixture of loop
1992 // variant and invariant checks that mean the final condition is
1993 // variant.
1994 const SCEV *Cond = SE->getSCEV(MemRuntimeCheckCond);
1995 if (SE->isLoopInvariant(Cond, OuterLoop)) {
1996 // It seems reasonable to assume that we can reduce the effective
1997 // cost of the checks even when we know nothing about the trip
1998 // count. Assume that the outer loop executes at least twice.
1999 unsigned BestTripCount = 2;
2000
2001 // Get the best known TC estimate.
2002 if (auto EstimatedTC = getSmallBestKnownTC(
2003 PSE, OuterLoop, /* CanUseConstantMax = */ false))
2004 if (EstimatedTC->isFixed())
2005 BestTripCount = EstimatedTC->getFixedValue();
2006
2007 InstructionCost NewMemCheckCost = MemCheckCost / BestTripCount;
2008
2009 // Let's ensure the cost is always at least 1.
2010 NewMemCheckCost = std::max(NewMemCheckCost.getValue(),
2011 (InstructionCost::CostType)1);
2012
2013 if (BestTripCount > 1)
2015 << "We expect runtime memory checks to be hoisted "
2016 << "out of the outer loop. Cost reduced from "
2017 << MemCheckCost << " to " << NewMemCheckCost << '\n');
2018
2019 MemCheckCost = NewMemCheckCost;
2020 }
2021 }
2022
2023 RTCheckCost += MemCheckCost;
2024 }
2025
2026 if (SCEVCheckBlock || MemCheckBlock)
2027 LLVM_DEBUG(dbgs() << "Total cost of runtime checks: " << RTCheckCost
2028 << "\n");
2029
2030 return RTCheckCost;
2031 }
2032
2033 /// Remove the created SCEV & memory runtime check blocks & instructions, if
2034 /// unused.
2035 ~GeneratedRTChecks() {
2036 SCEVExpanderCleaner SCEVCleaner(SCEVExp);
2037 SCEVExpanderCleaner MemCheckCleaner(MemCheckExp);
2038 bool SCEVChecksUsed = !SCEVCheckBlock || !pred_empty(SCEVCheckBlock);
2039 bool MemChecksUsed = !MemCheckBlock || !pred_empty(MemCheckBlock);
2040 if (SCEVChecksUsed)
2041 SCEVCleaner.markResultUsed();
2042
2043 if (MemChecksUsed) {
2044 MemCheckCleaner.markResultUsed();
2045 } else {
2046 auto &SE = *MemCheckExp.getSE();
2047 // Memory runtime check generation creates compares that use expanded
2048 // values. Remove them before running the SCEVExpanderCleaners.
2049 for (auto &I : make_early_inc_range(reverse(*MemCheckBlock))) {
2050 if (MemCheckExp.isInsertedInstruction(&I))
2051 continue;
2052 SE.forgetValue(&I);
2053 I.eraseFromParent();
2054 }
2055 }
2056 MemCheckCleaner.cleanup();
2057 SCEVCleaner.cleanup();
2058
2059 if (!SCEVChecksUsed)
2060 SCEVCheckBlock->eraseFromParent();
2061 if (!MemChecksUsed)
2062 MemCheckBlock->eraseFromParent();
2063 }
2064
2065 /// Retrieves the SCEVCheckCond and SCEVCheckBlock that were generated as IR
2066 /// outside VPlan.
2067 std::pair<Value *, BasicBlock *> getSCEVChecks() const {
2068 using namespace llvm::PatternMatch;
2069 if (!SCEVCheckCond || match(SCEVCheckCond, m_ZeroInt()))
2070 return {nullptr, nullptr};
2071
2072 return {SCEVCheckCond, SCEVCheckBlock};
2073 }
2074
2075 /// Retrieves the MemCheckCond and MemCheckBlock that were generated as IR
2076 /// outside VPlan.
2077 std::pair<Value *, BasicBlock *> getMemRuntimeChecks() const {
2078 using namespace llvm::PatternMatch;
2079 if (MemRuntimeCheckCond && match(MemRuntimeCheckCond, m_ZeroInt()))
2080 return {nullptr, nullptr};
2081 return {MemRuntimeCheckCond, MemCheckBlock};
2082 }
2083
2084 /// Return true if any runtime checks have been added
2085 bool hasChecks() const {
2086 return getSCEVChecks().first || getMemRuntimeChecks().first;
2087 }
2088};
2089} // namespace
2090
2092 return Style == TailFoldingStyle::Data ||
2094}
2095
2099
2100// Return true if \p OuterLp is an outer loop annotated with hints for explicit
2101// vectorization. The loop needs to be annotated with #pragma omp simd
2102// simdlen(#) or #pragma clang vectorize(enable) vectorize_width(#). If the
2103// vector length information is not provided, vectorization is not considered
2104// explicit. Interleave hints are not allowed either. These limitations will be
2105// relaxed in the future.
2106// Please, note that we are currently forced to abuse the pragma 'clang
2107// vectorize' semantics. This pragma provides *auto-vectorization hints*
2108// (i.e., LV must check that vectorization is legal) whereas pragma 'omp simd'
2109// provides *explicit vectorization hints* (LV can bypass legal checks and
2110// assume that vectorization is legal). However, both hints are implemented
2111// using the same metadata (llvm.loop.vectorize, processed by
2112// LoopVectorizeHints). This will be fixed in the future when the native IR
2113// representation for pragma 'omp simd' is introduced.
2114static bool isExplicitVecOuterLoop(Loop *OuterLp,
2116 assert(!OuterLp->isInnermost() && "This is not an outer loop");
2117 LoopVectorizeHints Hints(OuterLp, true /*DisableInterleaving*/, *ORE);
2118
2119 // Only outer loops with an explicit vectorization hint are supported.
2120 // Unannotated outer loops are ignored.
2122 return false;
2123
2124 Function *Fn = OuterLp->getHeader()->getParent();
2125 if (!Hints.allowVectorization(Fn, OuterLp,
2126 true /*VectorizeOnlyWhenForced*/)) {
2127 LLVM_DEBUG(dbgs() << "LV: Loop hints prevent outer loop vectorization.\n");
2128 return false;
2129 }
2130
2131 if (Hints.getInterleave() > 1) {
2132 // TODO: Interleave support is future work.
2133 LLVM_DEBUG(dbgs() << "LV: Not vectorizing: Interleave is not supported for "
2134 "outer loops.\n");
2135 Hints.emitRemarkWithHints();
2136 return false;
2137 }
2138
2139 return true;
2140}
2141
2145 // Collect inner loops and outer loops without irreducible control flow. For
2146 // now, only collect outer loops that have explicit vectorization hints. If we
2147 // are stress testing the VPlan H-CFG construction, we collect the outermost
2148 // loop of every loop nest.
2149 if (L.isInnermost() || VPlanBuildStressTest ||
2151 LoopBlocksRPO RPOT(&L);
2152 RPOT.perform(LI);
2154 V.push_back(&L);
2155 // TODO: Collect inner loops inside marked outer loops in case
2156 // vectorization fails for the outer loop. Do not invoke
2157 // 'containsIrreducibleCFG' again for inner loops when the outer loop is
2158 // already known to be reducible. We can use an inherited attribute for
2159 // that.
2160 return;
2161 }
2162 }
2163 for (Loop *InnerL : L)
2164 collectSupportedLoops(*InnerL, LI, ORE, V);
2165}
2166
2167//===----------------------------------------------------------------------===//
2168// Implementation of LoopVectorizationLegality, InnerLoopVectorizer and
2169// LoopVectorizationCostModel and LoopVectorizationPlanner.
2170//===----------------------------------------------------------------------===//
2171
2172/// FIXME: The newly created binary instructions should contain nsw/nuw
2173/// flags, which can be found from the original scalar operations.
2174Value *
2176 Value *Step,
2178 const BinaryOperator *InductionBinOp) {
2179 using namespace llvm::PatternMatch;
2180 Type *StepTy = Step->getType();
2181 Value *CastedIndex = StepTy->isIntegerTy()
2182 ? B.CreateSExtOrTrunc(Index, StepTy)
2183 : B.CreateCast(Instruction::SIToFP, Index, StepTy);
2184 if (CastedIndex != Index) {
2185 CastedIndex->setName(CastedIndex->getName() + ".cast");
2186 Index = CastedIndex;
2187 }
2188
2189 // Note: the IR at this point is broken. We cannot use SE to create any new
2190 // SCEV and then expand it, hoping that SCEV's simplification will give us
2191 // a more optimal code. Unfortunately, attempt of doing so on invalid IR may
2192 // lead to various SCEV crashes. So all we can do is to use builder and rely
2193 // on InstCombine for future simplifications. Here we handle some trivial
2194 // cases only.
2195 auto CreateAdd = [&B](Value *X, Value *Y) {
2196 assert(X->getType() == Y->getType() && "Types don't match!");
2197 if (match(X, m_ZeroInt()))
2198 return Y;
2199 if (match(Y, m_ZeroInt()))
2200 return X;
2201 return B.CreateAdd(X, Y);
2202 };
2203
2204 // We allow X to be a vector type, in which case Y will potentially be
2205 // splatted into a vector with the same element count.
2206 auto CreateMul = [&B](Value *X, Value *Y) {
2207 assert(X->getType()->getScalarType() == Y->getType() &&
2208 "Types don't match!");
2209 if (match(X, m_One()))
2210 return Y;
2211 if (match(Y, m_One()))
2212 return X;
2213 VectorType *XVTy = dyn_cast<VectorType>(X->getType());
2214 if (XVTy && !isa<VectorType>(Y->getType()))
2215 Y = B.CreateVectorSplat(XVTy->getElementCount(), Y);
2216 return B.CreateMul(X, Y);
2217 };
2218
2219 switch (InductionKind) {
2221 assert(!isa<VectorType>(Index->getType()) &&
2222 "Vector indices not supported for integer inductions yet");
2223 assert(Index->getType() == StartValue->getType() &&
2224 "Index type does not match StartValue type");
2225 if (isa<ConstantInt>(Step) && cast<ConstantInt>(Step)->isMinusOne())
2226 return B.CreateSub(StartValue, Index);
2227 auto *Offset = CreateMul(Index, Step);
2228 return CreateAdd(StartValue, Offset);
2229 }
2231 return B.CreatePtrAdd(StartValue, CreateMul(Index, Step));
2233 assert(!isa<VectorType>(Index->getType()) &&
2234 "Vector indices not supported for FP inductions yet");
2235 assert(Step->getType()->isFloatingPointTy() && "Expected FP Step value");
2236 assert(InductionBinOp &&
2237 (InductionBinOp->getOpcode() == Instruction::FAdd ||
2238 InductionBinOp->getOpcode() == Instruction::FSub) &&
2239 "Original bin op should be defined for FP induction");
2240
2241 Value *MulExp = B.CreateFMul(Step, Index);
2242 return B.CreateBinOp(InductionBinOp->getOpcode(), StartValue, MulExp,
2243 "induction");
2244 }
2246 return nullptr;
2247 }
2248 llvm_unreachable("invalid enum");
2249}
2250
2251static std::optional<unsigned> getMaxVScale(const Function &F,
2252 const TargetTransformInfo &TTI) {
2253 if (std::optional<unsigned> MaxVScale = TTI.getMaxVScale())
2254 return MaxVScale;
2255
2256 if (F.hasFnAttribute(Attribute::VScaleRange))
2257 return F.getFnAttribute(Attribute::VScaleRange).getVScaleRangeMax();
2258
2259 return std::nullopt;
2260}
2261
2262/// For the given VF and UF and maximum trip count computed for the loop, return
2263/// whether the induction variable might overflow in the vectorized loop. If not,
2264/// then we know a runtime overflow check always evaluates to false and can be
2265/// removed.
2267 const LoopVectorizationCostModel *Cost,
2268 ElementCount VF, std::optional<unsigned> UF = std::nullopt) {
2269 // Always be conservative if we don't know the exact unroll factor.
2270 unsigned MaxUF = UF ? *UF : Cost->TTI.getMaxInterleaveFactor(VF);
2271
2272 IntegerType *IdxTy = Cost->Legal->getWidestInductionType();
2273 APInt MaxUIntTripCount = IdxTy->getMask();
2274
2275 // We know the runtime overflow check is known false iff the (max) trip-count
2276 // is known and (max) trip-count + (VF * UF) does not overflow in the type of
2277 // the vector loop induction variable.
2278 if (unsigned TC = Cost->PSE.getSmallConstantMaxTripCount()) {
2279 uint64_t MaxVF = VF.getKnownMinValue();
2280 if (VF.isScalable()) {
2281 std::optional<unsigned> MaxVScale =
2282 getMaxVScale(*Cost->TheFunction, Cost->TTI);
2283 if (!MaxVScale)
2284 return false;
2285 MaxVF *= *MaxVScale;
2286 }
2287
2288 return (MaxUIntTripCount - TC).ugt(MaxVF * MaxUF);
2289 }
2290
2291 return false;
2292}
2293
2294// Return whether we allow using masked interleave-groups (for dealing with
2295// strided loads/stores that reside in predicated blocks, or for dealing
2296// with gaps).
2298 // If an override option has been passed in for interleaved accesses, use it.
2299 if (EnableMaskedInterleavedMemAccesses.getNumOccurrences() > 0)
2301
2302 return TTI.enableMaskedInterleavedAccessVectorization();
2303}
2304
2306 BasicBlock *CheckIRBB) {
2307 // Note: The block with the minimum trip-count check is already connected
2308 // during earlier VPlan construction.
2309 VPBlockBase *ScalarPH = Plan.getScalarPreheader();
2310 VPBlockBase *PreVectorPH = VectorPHVPBB->getSinglePredecessor();
2311 assert(PreVectorPH->getNumSuccessors() == 2 && "Expected 2 successors");
2312 assert(PreVectorPH->getSuccessors()[0] == ScalarPH && "Unexpected successor");
2313 VPIRBasicBlock *CheckVPIRBB = Plan.createVPIRBasicBlock(CheckIRBB);
2314 VPBlockUtils::insertOnEdge(PreVectorPH, VectorPHVPBB, CheckVPIRBB);
2315 PreVectorPH = CheckVPIRBB;
2316 VPBlockUtils::connectBlocks(PreVectorPH, ScalarPH);
2317 PreVectorPH->swapSuccessors();
2318
2319 // We just connected a new block to the scalar preheader. Update all
2320 // VPPhis by adding an incoming value for it, replicating the last value.
2321 unsigned NumPredecessors = ScalarPH->getNumPredecessors();
2322 for (VPRecipeBase &R : cast<VPBasicBlock>(ScalarPH)->phis()) {
2323 assert(isa<VPPhi>(&R) && "Phi expected to be VPPhi");
2324 assert(cast<VPPhi>(&R)->getNumIncoming() == NumPredecessors - 1 &&
2325 "must have incoming values for all operands");
2326 R.addOperand(R.getOperand(NumPredecessors - 2));
2327 }
2328}
2329
2331 BasicBlock *VectorPH, ElementCount VF, unsigned UF) const {
2332 // Generate code to check if the loop's trip count is less than VF * UF, or
2333 // equal to it in case a scalar epilogue is required; this implies that the
2334 // vector trip count is zero. This check also covers the case where adding one
2335 // to the backedge-taken count overflowed leading to an incorrect trip count
2336 // of zero. In this case we will also jump to the scalar loop.
2337 auto P = Cost->requiresScalarEpilogue(VF.isVector()) ? ICmpInst::ICMP_ULE
2339
2340 // Reuse existing vector loop preheader for TC checks.
2341 // Note that new preheader block is generated for vector loop.
2342 BasicBlock *const TCCheckBlock = VectorPH;
2344 TCCheckBlock->getContext(),
2345 InstSimplifyFolder(TCCheckBlock->getDataLayout()));
2346 Builder.SetInsertPoint(TCCheckBlock->getTerminator());
2347
2348 // If tail is to be folded, vector loop takes care of all iterations.
2350 Type *CountTy = Count->getType();
2351 Value *CheckMinIters = Builder.getFalse();
2352 auto CreateStep = [&]() -> Value * {
2353 // Create step with max(MinProTripCount, UF * VF).
2354 if (UF * VF.getKnownMinValue() >= MinProfitableTripCount.getKnownMinValue())
2355 return createStepForVF(Builder, CountTy, VF, UF);
2356
2357 Value *MinProfTC =
2358 Builder.CreateElementCount(CountTy, MinProfitableTripCount);
2359 if (!VF.isScalable())
2360 return MinProfTC;
2361 return Builder.CreateBinaryIntrinsic(
2362 Intrinsic::umax, MinProfTC, createStepForVF(Builder, CountTy, VF, UF));
2363 };
2364
2365 TailFoldingStyle Style = Cost->getTailFoldingStyle();
2366 if (Style == TailFoldingStyle::None) {
2367 Value *Step = CreateStep();
2368 ScalarEvolution &SE = *PSE.getSE();
2369 // TODO: Emit unconditional branch to vector preheader instead of
2370 // conditional branch with known condition.
2371 const SCEV *TripCountSCEV = SE.applyLoopGuards(SE.getSCEV(Count), OrigLoop);
2372 // Check if the trip count is < the step.
2373 if (SE.isKnownPredicate(P, TripCountSCEV, SE.getSCEV(Step))) {
2374 // TODO: Ensure step is at most the trip count when determining max VF and
2375 // UF, w/o tail folding.
2376 CheckMinIters = Builder.getTrue();
2378 TripCountSCEV, SE.getSCEV(Step))) {
2379 // Generate the minimum iteration check only if we cannot prove the
2380 // check is known to be true, or known to be false.
2381 CheckMinIters = Builder.CreateICmp(P, Count, Step, "min.iters.check");
2382 } // else step known to be < trip count, use CheckMinIters preset to false.
2383 }
2384
2385 return CheckMinIters;
2386}
2387
2388/// Replace \p VPBB with a VPIRBasicBlock wrapping \p IRBB. All recipes from \p
2389/// VPBB are moved to the end of the newly created VPIRBasicBlock. All
2390/// predecessors and successors of VPBB, if any, are rewired to the new
2391/// VPIRBasicBlock. If \p VPBB may be unreachable, \p Plan must be passed.
2393 BasicBlock *IRBB,
2394 VPlan *Plan = nullptr) {
2395 if (!Plan)
2396 Plan = VPBB->getPlan();
2397 VPIRBasicBlock *IRVPBB = Plan->createVPIRBasicBlock(IRBB);
2398 auto IP = IRVPBB->begin();
2399 for (auto &R : make_early_inc_range(VPBB->phis()))
2400 R.moveBefore(*IRVPBB, IP);
2401
2402 for (auto &R :
2404 R.moveBefore(*IRVPBB, IRVPBB->end());
2405
2406 VPBlockUtils::reassociateBlocks(VPBB, IRVPBB);
2407 // VPBB is now dead and will be cleaned up when the plan gets destroyed.
2408 return IRVPBB;
2409}
2410
2412 BasicBlock *VectorPH = OrigLoop->getLoopPreheader();
2413 assert(VectorPH && "Invalid loop structure");
2414 assert((OrigLoop->getUniqueLatchExitBlock() ||
2415 Cost->requiresScalarEpilogue(VF.isVector())) &&
2416 "loops not exiting via the latch without required epilogue?");
2417
2418 // NOTE: The Plan's scalar preheader VPBB isn't replaced with a VPIRBasicBlock
2419 // wrapping the newly created scalar preheader here at the moment, because the
2420 // Plan's scalar preheader may be unreachable at this point. Instead it is
2421 // replaced in executePlan.
2422 return SplitBlock(VectorPH, VectorPH->getTerminator(), DT, LI, nullptr,
2423 Twine(Prefix) + "scalar.ph");
2424}
2425
2426/// Return the expanded step for \p ID using \p ExpandedSCEVs to look up SCEV
2427/// expansion results.
2429 const SCEV2ValueTy &ExpandedSCEVs) {
2430 const SCEV *Step = ID.getStep();
2431 if (auto *C = dyn_cast<SCEVConstant>(Step))
2432 return C->getValue();
2433 if (auto *U = dyn_cast<SCEVUnknown>(Step))
2434 return U->getValue();
2435 Value *V = ExpandedSCEVs.lookup(Step);
2436 assert(V && "SCEV must be expanded at this point");
2437 return V;
2438}
2439
2440/// Knowing that loop \p L executes a single vector iteration, add instructions
2441/// that will get simplified and thus should not have any cost to \p
2442/// InstsToIgnore.
2445 SmallPtrSetImpl<Instruction *> &InstsToIgnore) {
2446 auto *Cmp = L->getLatchCmpInst();
2447 if (Cmp)
2448 InstsToIgnore.insert(Cmp);
2449 for (const auto &KV : IL) {
2450 // Extract the key by hand so that it can be used in the lambda below. Note
2451 // that captured structured bindings are a C++20 extension.
2452 const PHINode *IV = KV.first;
2453
2454 // Get next iteration value of the induction variable.
2455 Instruction *IVInst =
2456 cast<Instruction>(IV->getIncomingValueForBlock(L->getLoopLatch()));
2457 if (all_of(IVInst->users(),
2458 [&](const User *U) { return U == IV || U == Cmp; }))
2459 InstsToIgnore.insert(IVInst);
2460 }
2461}
2462
2464 // Create a new IR basic block for the scalar preheader.
2465 BasicBlock *ScalarPH = createScalarPreheader("");
2466 return ScalarPH->getSinglePredecessor();
2467}
2468
2469namespace {
2470
2471struct CSEDenseMapInfo {
2472 static bool canHandle(const Instruction *I) {
2475 }
2476
2477 static inline Instruction *getEmptyKey() {
2479 }
2480
2481 static inline Instruction *getTombstoneKey() {
2482 return DenseMapInfo<Instruction *>::getTombstoneKey();
2483 }
2484
2485 static unsigned getHashValue(const Instruction *I) {
2486 assert(canHandle(I) && "Unknown instruction!");
2487 return hash_combine(I->getOpcode(),
2488 hash_combine_range(I->operand_values()));
2489 }
2490
2491 static bool isEqual(const Instruction *LHS, const Instruction *RHS) {
2492 if (LHS == getEmptyKey() || RHS == getEmptyKey() ||
2493 LHS == getTombstoneKey() || RHS == getTombstoneKey())
2494 return LHS == RHS;
2495 return LHS->isIdenticalTo(RHS);
2496 }
2497};
2498
2499} // end anonymous namespace
2500
2501/// FIXME: This legacy common-subexpression-elimination routine is scheduled for
2502/// removal, in favor of the VPlan-based one.
2503static void legacyCSE(BasicBlock *BB) {
2504 // Perform simple cse.
2506 for (Instruction &In : llvm::make_early_inc_range(*BB)) {
2507 if (!CSEDenseMapInfo::canHandle(&In))
2508 continue;
2509
2510 // Check if we can replace this instruction with any of the
2511 // visited instructions.
2512 if (Instruction *V = CSEMap.lookup(&In)) {
2513 In.replaceAllUsesWith(V);
2514 In.eraseFromParent();
2515 continue;
2516 }
2517
2518 CSEMap[&In] = &In;
2519 }
2520}
2521
2522/// This function attempts to return a value that represents the ElementCount
2523/// at runtime. For fixed-width VFs we know this precisely at compile
2524/// time, but for scalable VFs we calculate it based on an estimate of the
2525/// vscale value.
2527 std::optional<unsigned> VScale) {
2528 unsigned EstimatedVF = VF.getKnownMinValue();
2529 if (VF.isScalable())
2530 if (VScale)
2531 EstimatedVF *= *VScale;
2532 assert(EstimatedVF >= 1 && "Estimated VF shouldn't be less than 1");
2533 return EstimatedVF;
2534}
2535
2538 ElementCount VF) const {
2539 // We only need to calculate a cost if the VF is scalar; for actual vectors
2540 // we should already have a pre-calculated cost at each VF.
2541 if (!VF.isScalar())
2542 return getCallWideningDecision(CI, VF).Cost;
2543
2544 Type *RetTy = CI->getType();
2546 if (auto RedCost = getReductionPatternCost(CI, VF, RetTy))
2547 return *RedCost;
2548
2550 for (auto &ArgOp : CI->args())
2551 Tys.push_back(ArgOp->getType());
2552
2553 InstructionCost ScalarCallCost =
2554 TTI.getCallInstrCost(CI->getCalledFunction(), RetTy, Tys, CostKind);
2555
2556 // If this is an intrinsic we may have a lower cost for it.
2559 return std::min(ScalarCallCost, IntrinsicCost);
2560 }
2561 return ScalarCallCost;
2562}
2563
2565 if (VF.isScalar() || !canVectorizeTy(Ty))
2566 return Ty;
2567 return toVectorizedTy(Ty, VF);
2568}
2569
2572 ElementCount VF) const {
2574 assert(ID && "Expected intrinsic call!");
2575 Type *RetTy = maybeVectorizeType(CI->getType(), VF);
2576 FastMathFlags FMF;
2577 if (auto *FPMO = dyn_cast<FPMathOperator>(CI))
2578 FMF = FPMO->getFastMathFlags();
2579
2582 SmallVector<Type *> ParamTys;
2583 std::transform(FTy->param_begin(), FTy->param_end(),
2584 std::back_inserter(ParamTys),
2585 [&](Type *Ty) { return maybeVectorizeType(Ty, VF); });
2586
2587 IntrinsicCostAttributes CostAttrs(ID, RetTy, Arguments, ParamTys, FMF,
2590 return TTI.getIntrinsicInstrCost(CostAttrs, CostKind);
2591}
2592
2594 // Fix widened non-induction PHIs by setting up the PHI operands.
2595 fixNonInductionPHIs(State);
2596
2597 // Don't apply optimizations below when no (vector) loop remains, as they all
2598 // require one at the moment.
2599 VPBasicBlock *HeaderVPBB =
2600 vputils::getFirstLoopHeader(*State.Plan, State.VPDT);
2601 if (!HeaderVPBB)
2602 return;
2603
2604 BasicBlock *HeaderBB = State.CFG.VPBB2IRBB[HeaderVPBB];
2605
2606 // Remove redundant induction instructions.
2607 legacyCSE(HeaderBB);
2608}
2609
2611 auto Iter = vp_depth_first_shallow(Plan.getEntry());
2613 for (VPRecipeBase &P : VPBB->phis()) {
2615 if (!VPPhi)
2616 continue;
2617 PHINode *NewPhi = cast<PHINode>(State.get(VPPhi));
2618 // Make sure the builder has a valid insert point.
2619 Builder.SetInsertPoint(NewPhi);
2620 for (const auto &[Inc, VPBB] : VPPhi->incoming_values_and_blocks())
2621 NewPhi->addIncoming(State.get(Inc), State.CFG.VPBB2IRBB[VPBB]);
2622 }
2623 }
2624}
2625
2626void LoopVectorizationCostModel::collectLoopScalars(ElementCount VF) {
2627 // We should not collect Scalars more than once per VF. Right now, this
2628 // function is called from collectUniformsAndScalars(), which already does
2629 // this check. Collecting Scalars for VF=1 does not make any sense.
2630 assert(VF.isVector() && !Scalars.contains(VF) &&
2631 "This function should not be visited twice for the same VF");
2632
2633 // This avoids any chances of creating a REPLICATE recipe during planning
2634 // since that would result in generation of scalarized code during execution,
2635 // which is not supported for scalable vectors.
2636 if (VF.isScalable()) {
2637 Scalars[VF].insert_range(Uniforms[VF]);
2638 return;
2639 }
2640
2642
2643 // These sets are used to seed the analysis with pointers used by memory
2644 // accesses that will remain scalar.
2646 SmallPtrSet<Instruction *, 8> PossibleNonScalarPtrs;
2647 auto *Latch = TheLoop->getLoopLatch();
2648
2649 // A helper that returns true if the use of Ptr by MemAccess will be scalar.
2650 // The pointer operands of loads and stores will be scalar as long as the
2651 // memory access is not a gather or scatter operation. The value operand of a
2652 // store will remain scalar if the store is scalarized.
2653 auto IsScalarUse = [&](Instruction *MemAccess, Value *Ptr) {
2654 InstWidening WideningDecision = getWideningDecision(MemAccess, VF);
2655 assert(WideningDecision != CM_Unknown &&
2656 "Widening decision should be ready at this moment");
2657 if (auto *Store = dyn_cast<StoreInst>(MemAccess))
2658 if (Ptr == Store->getValueOperand())
2659 return WideningDecision == CM_Scalarize;
2660 assert(Ptr == getLoadStorePointerOperand(MemAccess) &&
2661 "Ptr is neither a value or pointer operand");
2662 return WideningDecision != CM_GatherScatter;
2663 };
2664
2665 // A helper that returns true if the given value is a getelementptr
2666 // instruction contained in the loop.
2667 auto IsLoopVaryingGEP = [&](Value *V) {
2668 return isa<GetElementPtrInst>(V) && !TheLoop->isLoopInvariant(V);
2669 };
2670
2671 // A helper that evaluates a memory access's use of a pointer. If the use will
2672 // be a scalar use and the pointer is only used by memory accesses, we place
2673 // the pointer in ScalarPtrs. Otherwise, the pointer is placed in
2674 // PossibleNonScalarPtrs.
2675 auto EvaluatePtrUse = [&](Instruction *MemAccess, Value *Ptr) {
2676 // We only care about bitcast and getelementptr instructions contained in
2677 // the loop.
2678 if (!IsLoopVaryingGEP(Ptr))
2679 return;
2680
2681 // If the pointer has already been identified as scalar (e.g., if it was
2682 // also identified as uniform), there's nothing to do.
2683 auto *I = cast<Instruction>(Ptr);
2684 if (Worklist.count(I))
2685 return;
2686
2687 // If the use of the pointer will be a scalar use, and all users of the
2688 // pointer are memory accesses, place the pointer in ScalarPtrs. Otherwise,
2689 // place the pointer in PossibleNonScalarPtrs.
2690 if (IsScalarUse(MemAccess, Ptr) &&
2692 ScalarPtrs.insert(I);
2693 else
2694 PossibleNonScalarPtrs.insert(I);
2695 };
2696
2697 // We seed the scalars analysis with three classes of instructions: (1)
2698 // instructions marked uniform-after-vectorization and (2) bitcast,
2699 // getelementptr and (pointer) phi instructions used by memory accesses
2700 // requiring a scalar use.
2701 //
2702 // (1) Add to the worklist all instructions that have been identified as
2703 // uniform-after-vectorization.
2704 Worklist.insert_range(Uniforms[VF]);
2705
2706 // (2) Add to the worklist all bitcast and getelementptr instructions used by
2707 // memory accesses requiring a scalar use. The pointer operands of loads and
2708 // stores will be scalar unless the operation is a gather or scatter.
2709 // The value operand of a store will remain scalar if the store is scalarized.
2710 for (auto *BB : TheLoop->blocks())
2711 for (auto &I : *BB) {
2712 if (auto *Load = dyn_cast<LoadInst>(&I)) {
2713 EvaluatePtrUse(Load, Load->getPointerOperand());
2714 } else if (auto *Store = dyn_cast<StoreInst>(&I)) {
2715 EvaluatePtrUse(Store, Store->getPointerOperand());
2716 EvaluatePtrUse(Store, Store->getValueOperand());
2717 }
2718 }
2719 for (auto *I : ScalarPtrs)
2720 if (!PossibleNonScalarPtrs.count(I)) {
2721 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *I << "\n");
2722 Worklist.insert(I);
2723 }
2724
2725 // Insert the forced scalars.
2726 // FIXME: Currently VPWidenPHIRecipe() often creates a dead vector
2727 // induction variable when the PHI user is scalarized.
2728 auto ForcedScalar = ForcedScalars.find(VF);
2729 if (ForcedScalar != ForcedScalars.end())
2730 for (auto *I : ForcedScalar->second) {
2731 LLVM_DEBUG(dbgs() << "LV: Found (forced) scalar instruction: " << *I << "\n");
2732 Worklist.insert(I);
2733 }
2734
2735 // Expand the worklist by looking through any bitcasts and getelementptr
2736 // instructions we've already identified as scalar. This is similar to the
2737 // expansion step in collectLoopUniforms(); however, here we're only
2738 // expanding to include additional bitcasts and getelementptr instructions.
2739 unsigned Idx = 0;
2740 while (Idx != Worklist.size()) {
2741 Instruction *Dst = Worklist[Idx++];
2742 if (!IsLoopVaryingGEP(Dst->getOperand(0)))
2743 continue;
2744 auto *Src = cast<Instruction>(Dst->getOperand(0));
2745 if (llvm::all_of(Src->users(), [&](User *U) -> bool {
2746 auto *J = cast<Instruction>(U);
2747 return !TheLoop->contains(J) || Worklist.count(J) ||
2748 ((isa<LoadInst>(J) || isa<StoreInst>(J)) &&
2749 IsScalarUse(J, Src));
2750 })) {
2751 Worklist.insert(Src);
2752 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Src << "\n");
2753 }
2754 }
2755
2756 // An induction variable will remain scalar if all users of the induction
2757 // variable and induction variable update remain scalar.
2758 for (const auto &Induction : Legal->getInductionVars()) {
2759 auto *Ind = Induction.first;
2760 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch));
2761
2762 // If tail-folding is applied, the primary induction variable will be used
2763 // to feed a vector compare.
2764 if (Ind == Legal->getPrimaryInduction() && foldTailByMasking())
2765 continue;
2766
2767 // Returns true if \p Indvar is a pointer induction that is used directly by
2768 // load/store instruction \p I.
2769 auto IsDirectLoadStoreFromPtrIndvar = [&](Instruction *Indvar,
2770 Instruction *I) {
2771 return Induction.second.getKind() ==
2774 Indvar == getLoadStorePointerOperand(I) && IsScalarUse(I, Indvar);
2775 };
2776
2777 // Determine if all users of the induction variable are scalar after
2778 // vectorization.
2779 bool ScalarInd = all_of(Ind->users(), [&](User *U) -> bool {
2780 auto *I = cast<Instruction>(U);
2781 return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I) ||
2782 IsDirectLoadStoreFromPtrIndvar(Ind, I);
2783 });
2784 if (!ScalarInd)
2785 continue;
2786
2787 // If the induction variable update is a fixed-order recurrence, neither the
2788 // induction variable or its update should be marked scalar after
2789 // vectorization.
2790 auto *IndUpdatePhi = dyn_cast<PHINode>(IndUpdate);
2791 if (IndUpdatePhi && Legal->isFixedOrderRecurrence(IndUpdatePhi))
2792 continue;
2793
2794 // Determine if all users of the induction variable update instruction are
2795 // scalar after vectorization.
2796 bool ScalarIndUpdate = all_of(IndUpdate->users(), [&](User *U) -> bool {
2797 auto *I = cast<Instruction>(U);
2798 return I == Ind || !TheLoop->contains(I) || Worklist.count(I) ||
2799 IsDirectLoadStoreFromPtrIndvar(IndUpdate, I);
2800 });
2801 if (!ScalarIndUpdate)
2802 continue;
2803
2804 // The induction variable and its update instruction will remain scalar.
2805 Worklist.insert(Ind);
2806 Worklist.insert(IndUpdate);
2807 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Ind << "\n");
2808 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *IndUpdate
2809 << "\n");
2810 }
2811
2812 Scalars[VF].insert_range(Worklist);
2813}
2814
2816 ElementCount VF) {
2817 if (!isPredicatedInst(I))
2818 return false;
2819
2820 // Do we have a non-scalar lowering for this predicated
2821 // instruction? No - it is scalar with predication.
2822 switch(I->getOpcode()) {
2823 default:
2824 return true;
2825 case Instruction::Call:
2826 if (VF.isScalar())
2827 return true;
2829 case Instruction::Load:
2830 case Instruction::Store: {
2831 auto *Ptr = getLoadStorePointerOperand(I);
2832 auto *Ty = getLoadStoreType(I);
2833 unsigned AS = getLoadStoreAddressSpace(I);
2834 Type *VTy = Ty;
2835 if (VF.isVector())
2836 VTy = VectorType::get(Ty, VF);
2837 const Align Alignment = getLoadStoreAlignment(I);
2838 return isa<LoadInst>(I) ? !(isLegalMaskedLoad(Ty, Ptr, Alignment, AS) ||
2839 TTI.isLegalMaskedGather(VTy, Alignment))
2840 : !(isLegalMaskedStore(Ty, Ptr, Alignment, AS) ||
2841 TTI.isLegalMaskedScatter(VTy, Alignment));
2842 }
2843 case Instruction::UDiv:
2844 case Instruction::SDiv:
2845 case Instruction::SRem:
2846 case Instruction::URem: {
2847 // We have the option to use the safe-divisor idiom to avoid predication.
2848 // The cost based decision here will always select safe-divisor for
2849 // scalable vectors as scalarization isn't legal.
2850 const auto [ScalarCost, SafeDivisorCost] = getDivRemSpeculationCost(I, VF);
2851 return isDivRemScalarWithPredication(ScalarCost, SafeDivisorCost);
2852 }
2853 }
2854}
2855
2856// TODO: Fold into LoopVectorizationLegality::isMaskRequired.
2858 // TODO: We can use the loop-preheader as context point here and get
2859 // context sensitive reasoning for isSafeToSpeculativelyExecute.
2861 (isa<LoadInst, StoreInst, CallInst>(I) && !Legal->isMaskRequired(I)) ||
2863 return false;
2864
2865 // If the instruction was executed conditionally in the original scalar loop,
2866 // predication is needed with a mask whose lanes are all possibly inactive.
2867 if (Legal->blockNeedsPredication(I->getParent()))
2868 return true;
2869
2870 // If we're not folding the tail by masking, predication is unnecessary.
2871 if (!foldTailByMasking())
2872 return false;
2873
2874 // All that remain are instructions with side-effects originally executed in
2875 // the loop unconditionally, but now execute under a tail-fold mask (only)
2876 // having at least one active lane (the first). If the side-effects of the
2877 // instruction are invariant, executing it w/o (the tail-folding) mask is safe
2878 // - it will cause the same side-effects as when masked.
2879 switch(I->getOpcode()) {
2880 default:
2882 "instruction should have been considered by earlier checks");
2883 case Instruction::Call:
2884 // Side-effects of a Call are assumed to be non-invariant, needing a
2885 // (fold-tail) mask.
2886 assert(Legal->isMaskRequired(I) &&
2887 "should have returned earlier for calls not needing a mask");
2888 return true;
2889 case Instruction::Load:
2890 // If the address is loop invariant no predication is needed.
2891 return !Legal->isInvariant(getLoadStorePointerOperand(I));
2892 case Instruction::Store: {
2893 // For stores, we need to prove both speculation safety (which follows from
2894 // the same argument as loads), but also must prove the value being stored
2895 // is correct. The easiest form of the later is to require that all values
2896 // stored are the same.
2897 return !(Legal->isInvariant(getLoadStorePointerOperand(I)) &&
2898 TheLoop->isLoopInvariant(cast<StoreInst>(I)->getValueOperand()));
2899 }
2900 case Instruction::UDiv:
2901 case Instruction::URem:
2902 // If the divisor is loop-invariant no predication is needed.
2903 return !Legal->isInvariant(I->getOperand(1));
2904 case Instruction::SDiv:
2905 case Instruction::SRem:
2906 // Conservative for now, since masked-off lanes may be poison and could
2907 // trigger signed overflow.
2908 return true;
2909 }
2910}
2911
2915 return 1;
2916 // If the block wasn't originally predicated then return early to avoid
2917 // computing BlockFrequencyInfo unnecessarily.
2918 if (!Legal->blockNeedsPredication(BB))
2919 return 1;
2920
2921 uint64_t HeaderFreq =
2922 getBFI().getBlockFreq(TheLoop->getHeader()).getFrequency();
2923 uint64_t BBFreq = getBFI().getBlockFreq(BB).getFrequency();
2924 assert(HeaderFreq >= BBFreq &&
2925 "Header has smaller block freq than dominated BB?");
2926 return std::round((double)HeaderFreq / BBFreq);
2927}
2928
2929std::pair<InstructionCost, InstructionCost>
2931 ElementCount VF) {
2932 assert(I->getOpcode() == Instruction::UDiv ||
2933 I->getOpcode() == Instruction::SDiv ||
2934 I->getOpcode() == Instruction::SRem ||
2935 I->getOpcode() == Instruction::URem);
2937
2938 // Scalarization isn't legal for scalable vector types
2939 InstructionCost ScalarizationCost = InstructionCost::getInvalid();
2940 if (!VF.isScalable()) {
2941 // Get the scalarization cost and scale this amount by the probability of
2942 // executing the predicated block. If the instruction is not predicated,
2943 // we fall through to the next case.
2944 ScalarizationCost = 0;
2945
2946 // These instructions have a non-void type, so account for the phi nodes
2947 // that we will create. This cost is likely to be zero. The phi node
2948 // cost, if any, should be scaled by the block probability because it
2949 // models a copy at the end of each predicated block.
2950 ScalarizationCost +=
2951 VF.getFixedValue() * TTI.getCFInstrCost(Instruction::PHI, CostKind);
2952
2953 // The cost of the non-predicated instruction.
2954 ScalarizationCost +=
2955 VF.getFixedValue() *
2956 TTI.getArithmeticInstrCost(I->getOpcode(), I->getType(), CostKind);
2957
2958 // The cost of insertelement and extractelement instructions needed for
2959 // scalarization.
2960 ScalarizationCost += getScalarizationOverhead(I, VF);
2961
2962 // Scale the cost by the probability of executing the predicated blocks.
2963 // This assumes the predicated block for each vector lane is equally
2964 // likely.
2965 ScalarizationCost =
2966 ScalarizationCost / getPredBlockCostDivisor(CostKind, I->getParent());
2967 }
2968
2969 InstructionCost SafeDivisorCost = 0;
2970 auto *VecTy = toVectorTy(I->getType(), VF);
2971 // The cost of the select guard to ensure all lanes are well defined
2972 // after we speculate above any internal control flow.
2973 SafeDivisorCost +=
2974 TTI.getCmpSelInstrCost(Instruction::Select, VecTy,
2975 toVectorTy(Type::getInt1Ty(I->getContext()), VF),
2977
2978 SmallVector<const Value *, 4> Operands(I->operand_values());
2979 SafeDivisorCost += TTI.getArithmeticInstrCost(
2980 I->getOpcode(), VecTy, CostKind,
2981 {TargetTransformInfo::OK_AnyValue, TargetTransformInfo::OP_None},
2982 {TargetTransformInfo::OK_AnyValue, TargetTransformInfo::OP_None},
2983 Operands, I);
2984 return {ScalarizationCost, SafeDivisorCost};
2985}
2986
2988 Instruction *I, ElementCount VF) const {
2989 assert(isAccessInterleaved(I) && "Expecting interleaved access.");
2991 "Decision should not be set yet.");
2992 auto *Group = getInterleavedAccessGroup(I);
2993 assert(Group && "Must have a group.");
2994 unsigned InterleaveFactor = Group->getFactor();
2995
2996 // If the instruction's allocated size doesn't equal its type size, it
2997 // requires padding and will be scalarized.
2998 auto &DL = I->getDataLayout();
2999 auto *ScalarTy = getLoadStoreType(I);
3000 if (hasIrregularType(ScalarTy, DL))
3001 return false;
3002
3003 // For scalable vectors, the interleave factors must be <= 8 since we require
3004 // the (de)interleaveN intrinsics instead of shufflevectors.
3005 if (VF.isScalable() && InterleaveFactor > 8)
3006 return false;
3007
3008 // If the group involves a non-integral pointer, we may not be able to
3009 // losslessly cast all values to a common type.
3010 bool ScalarNI = DL.isNonIntegralPointerType(ScalarTy);
3011 for (unsigned Idx = 0; Idx < InterleaveFactor; Idx++) {
3012 Instruction *Member = Group->getMember(Idx);
3013 if (!Member)
3014 continue;
3015 auto *MemberTy = getLoadStoreType(Member);
3016 bool MemberNI = DL.isNonIntegralPointerType(MemberTy);
3017 // Don't coerce non-integral pointers to integers or vice versa.
3018 if (MemberNI != ScalarNI)
3019 // TODO: Consider adding special nullptr value case here
3020 return false;
3021 if (MemberNI && ScalarNI &&
3022 ScalarTy->getPointerAddressSpace() !=
3023 MemberTy->getPointerAddressSpace())
3024 return false;
3025 }
3026
3027 // Check if masking is required.
3028 // A Group may need masking for one of two reasons: it resides in a block that
3029 // needs predication, or it was decided to use masking to deal with gaps
3030 // (either a gap at the end of a load-access that may result in a speculative
3031 // load, or any gaps in a store-access).
3032 bool PredicatedAccessRequiresMasking =
3033 blockNeedsPredicationForAnyReason(I->getParent()) &&
3034 Legal->isMaskRequired(I);
3035 bool LoadAccessWithGapsRequiresEpilogMasking =
3036 isa<LoadInst>(I) && Group->requiresScalarEpilogue() &&
3038 bool StoreAccessWithGapsRequiresMasking =
3039 isa<StoreInst>(I) && !Group->isFull();
3040 if (!PredicatedAccessRequiresMasking &&
3041 !LoadAccessWithGapsRequiresEpilogMasking &&
3042 !StoreAccessWithGapsRequiresMasking)
3043 return true;
3044
3045 // If masked interleaving is required, we expect that the user/target had
3046 // enabled it, because otherwise it either wouldn't have been created or
3047 // it should have been invalidated by the CostModel.
3049 "Masked interleave-groups for predicated accesses are not enabled.");
3050
3051 if (Group->isReverse())
3052 return false;
3053
3054 // TODO: Support interleaved access that requires a gap mask for scalable VFs.
3055 bool NeedsMaskForGaps = LoadAccessWithGapsRequiresEpilogMasking ||
3056 StoreAccessWithGapsRequiresMasking;
3057 if (VF.isScalable() && NeedsMaskForGaps)
3058 return false;
3059
3060 auto *Ty = getLoadStoreType(I);
3061 const Align Alignment = getLoadStoreAlignment(I);
3062 unsigned AS = getLoadStoreAddressSpace(I);
3063 return isa<LoadInst>(I) ? TTI.isLegalMaskedLoad(Ty, Alignment, AS)
3064 : TTI.isLegalMaskedStore(Ty, Alignment, AS);
3065}
3066
3068 Instruction *I, ElementCount VF) {
3069 // Get and ensure we have a valid memory instruction.
3070 assert((isa<LoadInst, StoreInst>(I)) && "Invalid memory instruction");
3071
3072 auto *Ptr = getLoadStorePointerOperand(I);
3073 auto *ScalarTy = getLoadStoreType(I);
3074
3075 // In order to be widened, the pointer should be consecutive, first of all.
3076 if (!Legal->isConsecutivePtr(ScalarTy, Ptr))
3077 return false;
3078
3079 // If the instruction is a store located in a predicated block, it will be
3080 // scalarized.
3081 if (isScalarWithPredication(I, VF))
3082 return false;
3083
3084 // If the instruction's allocated size doesn't equal it's type size, it
3085 // requires padding and will be scalarized.
3086 auto &DL = I->getDataLayout();
3087 if (hasIrregularType(ScalarTy, DL))
3088 return false;
3089
3090 return true;
3091}
3092
3093void LoopVectorizationCostModel::collectLoopUniforms(ElementCount VF) {
3094 // We should not collect Uniforms more than once per VF. Right now,
3095 // this function is called from collectUniformsAndScalars(), which
3096 // already does this check. Collecting Uniforms for VF=1 does not make any
3097 // sense.
3098
3099 assert(VF.isVector() && !Uniforms.contains(VF) &&
3100 "This function should not be visited twice for the same VF");
3101
3102 // Visit the list of Uniforms. If we find no uniform value, we won't
3103 // analyze again. Uniforms.count(VF) will return 1.
3104 Uniforms[VF].clear();
3105
3106 // Now we know that the loop is vectorizable!
3107 // Collect instructions inside the loop that will remain uniform after
3108 // vectorization.
3109
3110 // Global values, params and instructions outside of current loop are out of
3111 // scope.
3112 auto IsOutOfScope = [&](Value *V) -> bool {
3114 return (!I || !TheLoop->contains(I));
3115 };
3116
3117 // Worklist containing uniform instructions demanding lane 0.
3118 SetVector<Instruction *> Worklist;
3119
3120 // Add uniform instructions demanding lane 0 to the worklist. Instructions
3121 // that require predication must not be considered uniform after
3122 // vectorization, because that would create an erroneous replicating region
3123 // where only a single instance out of VF should be formed.
3124 auto AddToWorklistIfAllowed = [&](Instruction *I) -> void {
3125 if (IsOutOfScope(I)) {
3126 LLVM_DEBUG(dbgs() << "LV: Found not uniform due to scope: "
3127 << *I << "\n");
3128 return;
3129 }
3130 if (isPredicatedInst(I)) {
3131 LLVM_DEBUG(
3132 dbgs() << "LV: Found not uniform due to requiring predication: " << *I
3133 << "\n");
3134 return;
3135 }
3136 LLVM_DEBUG(dbgs() << "LV: Found uniform instruction: " << *I << "\n");
3137 Worklist.insert(I);
3138 };
3139
3140 // Start with the conditional branches exiting the loop. If the branch
3141 // condition is an instruction contained in the loop that is only used by the
3142 // branch, it is uniform. Note conditions from uncountable early exits are not
3143 // uniform.
3145 TheLoop->getExitingBlocks(Exiting);
3146 for (BasicBlock *E : Exiting) {
3147 if (Legal->hasUncountableEarlyExit() && TheLoop->getLoopLatch() != E)
3148 continue;
3149 auto *Cmp = dyn_cast<Instruction>(E->getTerminator()->getOperand(0));
3150 if (Cmp && TheLoop->contains(Cmp) && Cmp->hasOneUse())
3151 AddToWorklistIfAllowed(Cmp);
3152 }
3153
3154 auto PrevVF = VF.divideCoefficientBy(2);
3155 // Return true if all lanes perform the same memory operation, and we can
3156 // thus choose to execute only one.
3157 auto IsUniformMemOpUse = [&](Instruction *I) {
3158 // If the value was already known to not be uniform for the previous
3159 // (smaller VF), it cannot be uniform for the larger VF.
3160 if (PrevVF.isVector()) {
3161 auto Iter = Uniforms.find(PrevVF);
3162 if (Iter != Uniforms.end() && !Iter->second.contains(I))
3163 return false;
3164 }
3165 if (!Legal->isUniformMemOp(*I, VF))
3166 return false;
3167 if (isa<LoadInst>(I))
3168 // Loading the same address always produces the same result - at least
3169 // assuming aliasing and ordering which have already been checked.
3170 return true;
3171 // Storing the same value on every iteration.
3172 return TheLoop->isLoopInvariant(cast<StoreInst>(I)->getValueOperand());
3173 };
3174
3175 auto IsUniformDecision = [&](Instruction *I, ElementCount VF) {
3176 InstWidening WideningDecision = getWideningDecision(I, VF);
3177 assert(WideningDecision != CM_Unknown &&
3178 "Widening decision should be ready at this moment");
3179
3180 if (IsUniformMemOpUse(I))
3181 return true;
3182
3183 return (WideningDecision == CM_Widen ||
3184 WideningDecision == CM_Widen_Reverse ||
3185 WideningDecision == CM_Interleave);
3186 };
3187
3188 // Returns true if Ptr is the pointer operand of a memory access instruction
3189 // I, I is known to not require scalarization, and the pointer is not also
3190 // stored.
3191 auto IsVectorizedMemAccessUse = [&](Instruction *I, Value *Ptr) -> bool {
3192 if (isa<StoreInst>(I) && I->getOperand(0) == Ptr)
3193 return false;
3194 return getLoadStorePointerOperand(I) == Ptr &&
3195 (IsUniformDecision(I, VF) || Legal->isInvariant(Ptr));
3196 };
3197
3198 // Holds a list of values which are known to have at least one uniform use.
3199 // Note that there may be other uses which aren't uniform. A "uniform use"
3200 // here is something which only demands lane 0 of the unrolled iterations;
3201 // it does not imply that all lanes produce the same value (e.g. this is not
3202 // the usual meaning of uniform)
3203 SetVector<Value *> HasUniformUse;
3204
3205 // Scan the loop for instructions which are either a) known to have only
3206 // lane 0 demanded or b) are uses which demand only lane 0 of their operand.
3207 for (auto *BB : TheLoop->blocks())
3208 for (auto &I : *BB) {
3209 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(&I)) {
3210 switch (II->getIntrinsicID()) {
3211 case Intrinsic::sideeffect:
3212 case Intrinsic::experimental_noalias_scope_decl:
3213 case Intrinsic::assume:
3214 case Intrinsic::lifetime_start:
3215 case Intrinsic::lifetime_end:
3216 if (TheLoop->hasLoopInvariantOperands(&I))
3217 AddToWorklistIfAllowed(&I);
3218 break;
3219 default:
3220 break;
3221 }
3222 }
3223
3224 if (auto *EVI = dyn_cast<ExtractValueInst>(&I)) {
3225 if (IsOutOfScope(EVI->getAggregateOperand())) {
3226 AddToWorklistIfAllowed(EVI);
3227 continue;
3228 }
3229 // Only ExtractValue instructions where the aggregate value comes from a
3230 // call are allowed to be non-uniform.
3231 assert(isa<CallInst>(EVI->getAggregateOperand()) &&
3232 "Expected aggregate value to be call return value");
3233 }
3234
3235 // If there's no pointer operand, there's nothing to do.
3236 auto *Ptr = getLoadStorePointerOperand(&I);
3237 if (!Ptr)
3238 continue;
3239
3240 // If the pointer can be proven to be uniform, always add it to the
3241 // worklist.
3242 if (isa<Instruction>(Ptr) && Legal->isUniform(Ptr, VF))
3243 AddToWorklistIfAllowed(cast<Instruction>(Ptr));
3244
3245 if (IsUniformMemOpUse(&I))
3246 AddToWorklistIfAllowed(&I);
3247
3248 if (IsVectorizedMemAccessUse(&I, Ptr))
3249 HasUniformUse.insert(Ptr);
3250 }
3251
3252 // Add to the worklist any operands which have *only* uniform (e.g. lane 0
3253 // demanding) users. Since loops are assumed to be in LCSSA form, this
3254 // disallows uses outside the loop as well.
3255 for (auto *V : HasUniformUse) {
3256 if (IsOutOfScope(V))
3257 continue;
3258 auto *I = cast<Instruction>(V);
3259 bool UsersAreMemAccesses = all_of(I->users(), [&](User *U) -> bool {
3260 auto *UI = cast<Instruction>(U);
3261 return TheLoop->contains(UI) && IsVectorizedMemAccessUse(UI, V);
3262 });
3263 if (UsersAreMemAccesses)
3264 AddToWorklistIfAllowed(I);
3265 }
3266
3267 // Expand Worklist in topological order: whenever a new instruction
3268 // is added , its users should be already inside Worklist. It ensures
3269 // a uniform instruction will only be used by uniform instructions.
3270 unsigned Idx = 0;
3271 while (Idx != Worklist.size()) {
3272 Instruction *I = Worklist[Idx++];
3273
3274 for (auto *OV : I->operand_values()) {
3275 // isOutOfScope operands cannot be uniform instructions.
3276 if (IsOutOfScope(OV))
3277 continue;
3278 // First order recurrence Phi's should typically be considered
3279 // non-uniform.
3280 auto *OP = dyn_cast<PHINode>(OV);
3281 if (OP && Legal->isFixedOrderRecurrence(OP))
3282 continue;
3283 // If all the users of the operand are uniform, then add the
3284 // operand into the uniform worklist.
3285 auto *OI = cast<Instruction>(OV);
3286 if (llvm::all_of(OI->users(), [&](User *U) -> bool {
3287 auto *J = cast<Instruction>(U);
3288 return Worklist.count(J) || IsVectorizedMemAccessUse(J, OI);
3289 }))
3290 AddToWorklistIfAllowed(OI);
3291 }
3292 }
3293
3294 // For an instruction to be added into Worklist above, all its users inside
3295 // the loop should also be in Worklist. However, this condition cannot be
3296 // true for phi nodes that form a cyclic dependence. We must process phi
3297 // nodes separately. An induction variable will remain uniform if all users
3298 // of the induction variable and induction variable update remain uniform.
3299 // The code below handles both pointer and non-pointer induction variables.
3300 BasicBlock *Latch = TheLoop->getLoopLatch();
3301 for (const auto &Induction : Legal->getInductionVars()) {
3302 auto *Ind = Induction.first;
3303 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch));
3304
3305 // Determine if all users of the induction variable are uniform after
3306 // vectorization.
3307 bool UniformInd = all_of(Ind->users(), [&](User *U) -> bool {
3308 auto *I = cast<Instruction>(U);
3309 return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I) ||
3310 IsVectorizedMemAccessUse(I, Ind);
3311 });
3312 if (!UniformInd)
3313 continue;
3314
3315 // Determine if all users of the induction variable update instruction are
3316 // uniform after vectorization.
3317 bool UniformIndUpdate = all_of(IndUpdate->users(), [&](User *U) -> bool {
3318 auto *I = cast<Instruction>(U);
3319 return I == Ind || Worklist.count(I) ||
3320 IsVectorizedMemAccessUse(I, IndUpdate);
3321 });
3322 if (!UniformIndUpdate)
3323 continue;
3324
3325 // The induction variable and its update instruction will remain uniform.
3326 AddToWorklistIfAllowed(Ind);
3327 AddToWorklistIfAllowed(IndUpdate);
3328 }
3329
3330 Uniforms[VF].insert_range(Worklist);
3331}
3332
3334 LLVM_DEBUG(dbgs() << "LV: Performing code size checks.\n");
3335
3336 if (Legal->getRuntimePointerChecking()->Need) {
3337 reportVectorizationFailure("Runtime ptr check is required with -Os/-Oz",
3338 "runtime pointer checks needed. Enable vectorization of this "
3339 "loop with '#pragma clang loop vectorize(enable)' when "
3340 "compiling with -Os/-Oz",
3341 "CantVersionLoopWithOptForSize", ORE, TheLoop);
3342 return true;
3343 }
3344
3345 if (!PSE.getPredicate().isAlwaysTrue()) {
3346 reportVectorizationFailure("Runtime SCEV check is required with -Os/-Oz",
3347 "runtime SCEV checks needed. Enable vectorization of this "
3348 "loop with '#pragma clang loop vectorize(enable)' when "
3349 "compiling with -Os/-Oz",
3350 "CantVersionLoopWithOptForSize", ORE, TheLoop);
3351 return true;
3352 }
3353
3354 // FIXME: Avoid specializing for stride==1 instead of bailing out.
3355 if (!Legal->getLAI()->getSymbolicStrides().empty()) {
3356 reportVectorizationFailure("Runtime stride check for small trip count",
3357 "runtime stride == 1 checks needed. Enable vectorization of "
3358 "this loop without such check by compiling with -Os/-Oz",
3359 "CantVersionLoopWithOptForSize", ORE, TheLoop);
3360 return true;
3361 }
3362
3363 return false;
3364}
3365
3366bool LoopVectorizationCostModel::isScalableVectorizationAllowed() {
3367 if (IsScalableVectorizationAllowed)
3368 return *IsScalableVectorizationAllowed;
3369
3370 IsScalableVectorizationAllowed = false;
3371 if (!TTI.supportsScalableVectors() && !ForceTargetSupportsScalableVectors)
3372 return false;
3373
3374 if (Hints->isScalableVectorizationDisabled()) {
3375 reportVectorizationInfo("Scalable vectorization is explicitly disabled",
3376 "ScalableVectorizationDisabled", ORE, TheLoop);
3377 return false;
3378 }
3379
3380 LLVM_DEBUG(dbgs() << "LV: Scalable vectorization is available\n");
3381
3382 auto MaxScalableVF = ElementCount::getScalable(
3383 std::numeric_limits<ElementCount::ScalarTy>::max());
3384
3385 // Test that the loop-vectorizer can legalize all operations for this MaxVF.
3386 // FIXME: While for scalable vectors this is currently sufficient, this should
3387 // be replaced by a more detailed mechanism that filters out specific VFs,
3388 // instead of invalidating vectorization for a whole set of VFs based on the
3389 // MaxVF.
3390
3391 // Disable scalable vectorization if the loop contains unsupported reductions.
3392 if (!canVectorizeReductions(MaxScalableVF)) {
3394 "Scalable vectorization not supported for the reduction "
3395 "operations found in this loop.",
3396 "ScalableVFUnfeasible", ORE, TheLoop);
3397 return false;
3398 }
3399
3400 // Disable scalable vectorization if the loop contains any instructions
3401 // with element types not supported for scalable vectors.
3402 if (any_of(ElementTypesInLoop, [&](Type *Ty) {
3403 return !Ty->isVoidTy() &&
3405 })) {
3406 reportVectorizationInfo("Scalable vectorization is not supported "
3407 "for all element types found in this loop.",
3408 "ScalableVFUnfeasible", ORE, TheLoop);
3409 return false;
3410 }
3411
3412 if (!Legal->isSafeForAnyVectorWidth() && !getMaxVScale(*TheFunction, TTI)) {
3413 reportVectorizationInfo("The target does not provide maximum vscale value "
3414 "for safe distance analysis.",
3415 "ScalableVFUnfeasible", ORE, TheLoop);
3416 return false;
3417 }
3418
3419 IsScalableVectorizationAllowed = true;
3420 return true;
3421}
3422
3423ElementCount
3424LoopVectorizationCostModel::getMaxLegalScalableVF(unsigned MaxSafeElements) {
3425 if (!isScalableVectorizationAllowed())
3426 return ElementCount::getScalable(0);
3427
3428 auto MaxScalableVF = ElementCount::getScalable(
3429 std::numeric_limits<ElementCount::ScalarTy>::max());
3430 if (Legal->isSafeForAnyVectorWidth())
3431 return MaxScalableVF;
3432
3433 std::optional<unsigned> MaxVScale = getMaxVScale(*TheFunction, TTI);
3434 // Limit MaxScalableVF by the maximum safe dependence distance.
3435 MaxScalableVF = ElementCount::getScalable(MaxSafeElements / *MaxVScale);
3436
3437 if (!MaxScalableVF)
3439 "Max legal vector width too small, scalable vectorization "
3440 "unfeasible.",
3441 "ScalableVFUnfeasible", ORE, TheLoop);
3442
3443 return MaxScalableVF;
3444}
3445
3446FixedScalableVFPair LoopVectorizationCostModel::computeFeasibleMaxVF(
3447 unsigned MaxTripCount, ElementCount UserVF, unsigned UserIC,
3448 bool FoldTailByMasking) {
3449 MinBWs = computeMinimumValueSizes(TheLoop->getBlocks(), *DB, &TTI);
3450 unsigned SmallestType, WidestType;
3451 std::tie(SmallestType, WidestType) = getSmallestAndWidestTypes();
3452
3453 // Get the maximum safe dependence distance in bits computed by LAA.
3454 // It is computed by MaxVF * sizeOf(type) * 8, where type is taken from
3455 // the memory accesses that is most restrictive (involved in the smallest
3456 // dependence distance).
3457 unsigned MaxSafeElementsPowerOf2 =
3458 bit_floor(Legal->getMaxSafeVectorWidthInBits() / WidestType);
3459 if (!Legal->isSafeForAnyStoreLoadForwardDistances()) {
3460 unsigned SLDist = Legal->getMaxStoreLoadForwardSafeDistanceInBits();
3461 MaxSafeElementsPowerOf2 =
3462 std::min(MaxSafeElementsPowerOf2, SLDist / WidestType);
3463 }
3464 auto MaxSafeFixedVF = ElementCount::getFixed(MaxSafeElementsPowerOf2);
3465 auto MaxSafeScalableVF = getMaxLegalScalableVF(MaxSafeElementsPowerOf2);
3466
3467 if (!Legal->isSafeForAnyVectorWidth())
3468 this->MaxSafeElements = MaxSafeElementsPowerOf2;
3469
3470 LLVM_DEBUG(dbgs() << "LV: The max safe fixed VF is: " << MaxSafeFixedVF
3471 << ".\n");
3472 LLVM_DEBUG(dbgs() << "LV: The max safe scalable VF is: " << MaxSafeScalableVF
3473 << ".\n");
3474
3475 // First analyze the UserVF, fall back if the UserVF should be ignored.
3476 if (UserVF) {
3477 auto MaxSafeUserVF =
3478 UserVF.isScalable() ? MaxSafeScalableVF : MaxSafeFixedVF;
3479
3480 if (ElementCount::isKnownLE(UserVF, MaxSafeUserVF)) {
3481 // If `VF=vscale x N` is safe, then so is `VF=N`
3482 if (UserVF.isScalable())
3483 return FixedScalableVFPair(
3484 ElementCount::getFixed(UserVF.getKnownMinValue()), UserVF);
3485
3486 return UserVF;
3487 }
3488
3489 assert(ElementCount::isKnownGT(UserVF, MaxSafeUserVF));
3490
3491 // Only clamp if the UserVF is not scalable. If the UserVF is scalable, it
3492 // is better to ignore the hint and let the compiler choose a suitable VF.
3493 if (!UserVF.isScalable()) {
3494 LLVM_DEBUG(dbgs() << "LV: User VF=" << UserVF
3495 << " is unsafe, clamping to max safe VF="
3496 << MaxSafeFixedVF << ".\n");
3497 ORE->emit([&]() {
3498 return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationFactor",
3499 TheLoop->getStartLoc(),
3500 TheLoop->getHeader())
3501 << "User-specified vectorization factor "
3502 << ore::NV("UserVectorizationFactor", UserVF)
3503 << " is unsafe, clamping to maximum safe vectorization factor "
3504 << ore::NV("VectorizationFactor", MaxSafeFixedVF);
3505 });
3506 return MaxSafeFixedVF;
3507 }
3508
3510 LLVM_DEBUG(dbgs() << "LV: User VF=" << UserVF
3511 << " is ignored because scalable vectors are not "
3512 "available.\n");
3513 ORE->emit([&]() {
3514 return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationFactor",
3515 TheLoop->getStartLoc(),
3516 TheLoop->getHeader())
3517 << "User-specified vectorization factor "
3518 << ore::NV("UserVectorizationFactor", UserVF)
3519 << " is ignored because the target does not support scalable "
3520 "vectors. The compiler will pick a more suitable value.";
3521 });
3522 } else {
3523 LLVM_DEBUG(dbgs() << "LV: User VF=" << UserVF
3524 << " is unsafe. Ignoring scalable UserVF.\n");
3525 ORE->emit([&]() {
3526 return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationFactor",
3527 TheLoop->getStartLoc(),
3528 TheLoop->getHeader())
3529 << "User-specified vectorization factor "
3530 << ore::NV("UserVectorizationFactor", UserVF)
3531 << " is unsafe. Ignoring the hint to let the compiler pick a "
3532 "more suitable value.";
3533 });
3534 }
3535 }
3536
3537 LLVM_DEBUG(dbgs() << "LV: The Smallest and Widest types: " << SmallestType
3538 << " / " << WidestType << " bits.\n");
3539
3540 FixedScalableVFPair Result(ElementCount::getFixed(1),
3542 if (auto MaxVF =
3543 getMaximizedVFForTarget(MaxTripCount, SmallestType, WidestType,
3544 MaxSafeFixedVF, UserIC, FoldTailByMasking))
3545 Result.FixedVF = MaxVF;
3546
3547 if (auto MaxVF =
3548 getMaximizedVFForTarget(MaxTripCount, SmallestType, WidestType,
3549 MaxSafeScalableVF, UserIC, FoldTailByMasking))
3550 if (MaxVF.isScalable()) {
3551 Result.ScalableVF = MaxVF;
3552 LLVM_DEBUG(dbgs() << "LV: Found feasible scalable VF = " << MaxVF
3553 << "\n");
3554 }
3555
3556 return Result;
3557}
3558
3559FixedScalableVFPair
3561 if (Legal->getRuntimePointerChecking()->Need && TTI.hasBranchDivergence()) {
3562 // TODO: It may be useful to do since it's still likely to be dynamically
3563 // uniform if the target can skip.
3565 "Not inserting runtime ptr check for divergent target",
3566 "runtime pointer checks needed. Not enabled for divergent target",
3567 "CantVersionLoopWithDivergentTarget", ORE, TheLoop);
3569 }
3570
3571 ScalarEvolution *SE = PSE.getSE();
3573 unsigned MaxTC = PSE.getSmallConstantMaxTripCount();
3574 LLVM_DEBUG(dbgs() << "LV: Found trip count: " << TC << '\n');
3575 if (TC != ElementCount::getFixed(MaxTC))
3576 LLVM_DEBUG(dbgs() << "LV: Found maximum trip count: " << MaxTC << '\n');
3577 if (TC.isScalar()) {
3578 reportVectorizationFailure("Single iteration (non) loop",
3579 "loop trip count is one, irrelevant for vectorization",
3580 "SingleIterationLoop", ORE, TheLoop);
3582 }
3583
3584 // If BTC matches the widest induction type and is -1 then the trip count
3585 // computation will wrap to 0 and the vector trip count will be 0. Do not try
3586 // to vectorize.
3587 const SCEV *BTC = SE->getBackedgeTakenCount(TheLoop);
3588 if (!isa<SCEVCouldNotCompute>(BTC) &&
3589 BTC->getType()->getScalarSizeInBits() >=
3590 Legal->getWidestInductionType()->getScalarSizeInBits() &&
3592 SE->getMinusOne(BTC->getType()))) {
3594 "Trip count computation wrapped",
3595 "backedge-taken count is -1, loop trip count wrapped to 0",
3596 "TripCountWrapped", ORE, TheLoop);
3598 }
3599
3600 switch (ScalarEpilogueStatus) {
3602 return computeFeasibleMaxVF(MaxTC, UserVF, UserIC, false);
3604 [[fallthrough]];
3606 LLVM_DEBUG(
3607 dbgs() << "LV: vector predicate hint/switch found.\n"
3608 << "LV: Not allowing scalar epilogue, creating predicated "
3609 << "vector loop.\n");
3610 break;
3612 // fallthrough as a special case of OptForSize
3614 if (ScalarEpilogueStatus == CM_ScalarEpilogueNotAllowedOptSize)
3615 LLVM_DEBUG(
3616 dbgs() << "LV: Not allowing scalar epilogue due to -Os/-Oz.\n");
3617 else
3618 LLVM_DEBUG(dbgs() << "LV: Not allowing scalar epilogue due to low trip "
3619 << "count.\n");
3620
3621 // Bail if runtime checks are required, which are not good when optimising
3622 // for size.
3625
3626 break;
3627 }
3628
3629 // Now try the tail folding
3630
3631 // Invalidate interleave groups that require an epilogue if we can't mask
3632 // the interleave-group.
3634 assert(WideningDecisions.empty() && Uniforms.empty() && Scalars.empty() &&
3635 "No decisions should have been taken at this point");
3636 // Note: There is no need to invalidate any cost modeling decisions here, as
3637 // none were taken so far.
3638 InterleaveInfo.invalidateGroupsRequiringScalarEpilogue();
3639 }
3640
3641 FixedScalableVFPair MaxFactors =
3642 computeFeasibleMaxVF(MaxTC, UserVF, UserIC, true);
3643
3644 // Avoid tail folding if the trip count is known to be a multiple of any VF
3645 // we choose.
3646 std::optional<unsigned> MaxPowerOf2RuntimeVF =
3647 MaxFactors.FixedVF.getFixedValue();
3648 if (MaxFactors.ScalableVF) {
3649 std::optional<unsigned> MaxVScale = getMaxVScale(*TheFunction, TTI);
3650 if (MaxVScale) {
3651 MaxPowerOf2RuntimeVF = std::max<unsigned>(
3652 *MaxPowerOf2RuntimeVF,
3653 *MaxVScale * MaxFactors.ScalableVF.getKnownMinValue());
3654 } else
3655 MaxPowerOf2RuntimeVF = std::nullopt; // Stick with tail-folding for now.
3656 }
3657
3658 auto NoScalarEpilogueNeeded = [this, &UserIC](unsigned MaxVF) {
3659 // Return false if the loop is neither a single-latch-exit loop nor an
3660 // early-exit loop as tail-folding is not supported in that case.
3661 if (TheLoop->getExitingBlock() != TheLoop->getLoopLatch() &&
3662 !Legal->hasUncountableEarlyExit())
3663 return false;
3664 unsigned MaxVFtimesIC = UserIC ? MaxVF * UserIC : MaxVF;
3665 ScalarEvolution *SE = PSE.getSE();
3666 // Calling getSymbolicMaxBackedgeTakenCount enables support for loops
3667 // with uncountable exits. For countable loops, the symbolic maximum must
3668 // remain identical to the known back-edge taken count.
3669 const SCEV *BackedgeTakenCount = PSE.getSymbolicMaxBackedgeTakenCount();
3670 assert((Legal->hasUncountableEarlyExit() ||
3671 BackedgeTakenCount == PSE.getBackedgeTakenCount()) &&
3672 "Invalid loop count");
3673 const SCEV *ExitCount = SE->getAddExpr(
3674 BackedgeTakenCount, SE->getOne(BackedgeTakenCount->getType()));
3675 const SCEV *Rem = SE->getURemExpr(
3676 SE->applyLoopGuards(ExitCount, TheLoop),
3677 SE->getConstant(BackedgeTakenCount->getType(), MaxVFtimesIC));
3678 return Rem->isZero();
3679 };
3680
3681 if (MaxPowerOf2RuntimeVF > 0u) {
3682 assert((UserVF.isNonZero() || isPowerOf2_32(*MaxPowerOf2RuntimeVF)) &&
3683 "MaxFixedVF must be a power of 2");
3684 if (NoScalarEpilogueNeeded(*MaxPowerOf2RuntimeVF)) {
3685 // Accept MaxFixedVF if we do not have a tail.
3686 LLVM_DEBUG(dbgs() << "LV: No tail will remain for any chosen VF.\n");
3687 return MaxFactors;
3688 }
3689 }
3690
3691 auto ExpectedTC = getSmallBestKnownTC(PSE, TheLoop);
3692 if (ExpectedTC && ExpectedTC->isFixed() &&
3693 ExpectedTC->getFixedValue() <=
3694 TTI.getMinTripCountTailFoldingThreshold()) {
3695 if (MaxPowerOf2RuntimeVF > 0u) {
3696 // If we have a low-trip-count, and the fixed-width VF is known to divide
3697 // the trip count but the scalable factor does not, use the fixed-width
3698 // factor in preference to allow the generation of a non-predicated loop.
3699 if (ScalarEpilogueStatus == CM_ScalarEpilogueNotAllowedLowTripLoop &&
3700 NoScalarEpilogueNeeded(MaxFactors.FixedVF.getFixedValue())) {
3701 LLVM_DEBUG(dbgs() << "LV: Picking a fixed-width so that no tail will "
3702 "remain for any chosen VF.\n");
3703 MaxFactors.ScalableVF = ElementCount::getScalable(0);
3704 return MaxFactors;
3705 }
3706 }
3707
3709 "The trip count is below the minial threshold value.",
3710 "loop trip count is too low, avoiding vectorization", "LowTripCount",
3711 ORE, TheLoop);
3713 }
3714
3715 // If we don't know the precise trip count, or if the trip count that we
3716 // found modulo the vectorization factor is not zero, try to fold the tail
3717 // by masking.
3718 // FIXME: look for a smaller MaxVF that does divide TC rather than masking.
3719 bool ContainsScalableVF = MaxFactors.ScalableVF.isNonZero();
3720 setTailFoldingStyle(ContainsScalableVF, UserIC);
3721 if (foldTailByMasking()) {
3722 if (foldTailWithEVL()) {
3723 LLVM_DEBUG(
3724 dbgs()
3725 << "LV: tail is folded with EVL, forcing unroll factor to be 1. Will "
3726 "try to generate VP Intrinsics with scalable vector "
3727 "factors only.\n");
3728 // Tail folded loop using VP intrinsics restricts the VF to be scalable
3729 // for now.
3730 // TODO: extend it for fixed vectors, if required.
3731 assert(ContainsScalableVF && "Expected scalable vector factor.");
3732
3733 MaxFactors.FixedVF = ElementCount::getFixed(1);
3734 }
3735 return MaxFactors;
3736 }
3737
3738 // If there was a tail-folding hint/switch, but we can't fold the tail by
3739 // masking, fallback to a vectorization with a scalar epilogue.
3740 if (ScalarEpilogueStatus == CM_ScalarEpilogueNotNeededUsePredicate) {
3741 LLVM_DEBUG(dbgs() << "LV: Cannot fold tail by masking: vectorize with a "
3742 "scalar epilogue instead.\n");
3743 ScalarEpilogueStatus = CM_ScalarEpilogueAllowed;
3744 return MaxFactors;
3745 }
3746
3747 if (ScalarEpilogueStatus == CM_ScalarEpilogueNotAllowedUsePredicate) {
3748 LLVM_DEBUG(dbgs() << "LV: Can't fold tail by masking: don't vectorize\n");
3750 }
3751
3752 if (TC.isZero()) {
3754 "unable to calculate the loop count due to complex control flow",
3755 "UnknownLoopCountComplexCFG", ORE, TheLoop);
3757 }
3758
3760 "Cannot optimize for size and vectorize at the same time.",
3761 "cannot optimize for size and vectorize at the same time. "
3762 "Enable vectorization of this loop with '#pragma clang loop "
3763 "vectorize(enable)' when compiling with -Os/-Oz",
3764 "NoTailLoopWithOptForSize", ORE, TheLoop);
3766}
3767
3769 ElementCount VF) {
3770 if (ConsiderRegPressure.getNumOccurrences())
3771 return ConsiderRegPressure;
3772
3773 // TODO: We should eventually consider register pressure for all targets. The
3774 // TTI hook is temporary whilst target-specific issues are being fixed.
3775 if (TTI.shouldConsiderVectorizationRegPressure())
3776 return true;
3777
3778 if (!useMaxBandwidth(VF.isScalable()
3781 return false;
3782 // Only calculate register pressure for VFs enabled by MaxBandwidth.
3784 VF, VF.isScalable() ? MaxPermissibleVFWithoutMaxBW.ScalableVF
3786}
3787
3790 return MaximizeBandwidth || (MaximizeBandwidth.getNumOccurrences() == 0 &&
3791 (TTI.shouldMaximizeVectorBandwidth(RegKind) ||
3793 Legal->hasVectorCallVariants())));
3794}
3795
3796ElementCount LoopVectorizationCostModel::clampVFByMaxTripCount(
3797 ElementCount VF, unsigned MaxTripCount, unsigned UserIC,
3798 bool FoldTailByMasking) const {
3799 unsigned EstimatedVF = VF.getKnownMinValue();
3800 if (VF.isScalable() && TheFunction->hasFnAttribute(Attribute::VScaleRange)) {
3801 auto Attr = TheFunction->getFnAttribute(Attribute::VScaleRange);
3802 auto Min = Attr.getVScaleRangeMin();
3803 EstimatedVF *= Min;
3804 }
3805
3806 // When a scalar epilogue is required, at least one iteration of the scalar
3807 // loop has to execute. Adjust MaxTripCount accordingly to avoid picking a
3808 // max VF that results in a dead vector loop.
3809 if (MaxTripCount > 0 && requiresScalarEpilogue(true))
3810 MaxTripCount -= 1;
3811
3812 // When the user specifies an interleave count, we need to ensure that
3813 // VF * UserIC <= MaxTripCount to avoid a dead vector loop.
3814 unsigned IC = UserIC > 0 ? UserIC : 1;
3815 unsigned EstimatedVFTimesIC = EstimatedVF * IC;
3816
3817 if (MaxTripCount && MaxTripCount <= EstimatedVFTimesIC &&
3818 (!FoldTailByMasking || isPowerOf2_32(MaxTripCount))) {
3819 // If upper bound loop trip count (TC) is known at compile time there is no
3820 // point in choosing VF greater than TC / IC (as done in the loop below).
3821 // Select maximum power of two which doesn't exceed TC / IC. If VF is
3822 // scalable, we only fall back on a fixed VF when the TC is less than or
3823 // equal to the known number of lanes.
3824 auto ClampedUpperTripCount = llvm::bit_floor(MaxTripCount / IC);
3825 if (ClampedUpperTripCount == 0)
3826 ClampedUpperTripCount = 1;
3827 LLVM_DEBUG(dbgs() << "LV: Clamping the MaxVF to maximum power of two not "
3828 "exceeding the constant trip count"
3829 << (UserIC > 0 ? " divided by UserIC" : "") << ": "
3830 << ClampedUpperTripCount << "\n");
3831 return ElementCount::get(ClampedUpperTripCount,
3832 FoldTailByMasking ? VF.isScalable() : false);
3833 }
3834 return VF;
3835}
3836
3837ElementCount LoopVectorizationCostModel::getMaximizedVFForTarget(
3838 unsigned MaxTripCount, unsigned SmallestType, unsigned WidestType,
3839 ElementCount MaxSafeVF, unsigned UserIC, bool FoldTailByMasking) {
3840 bool ComputeScalableMaxVF = MaxSafeVF.isScalable();
3841 const TypeSize WidestRegister = TTI.getRegisterBitWidth(
3842 ComputeScalableMaxVF ? TargetTransformInfo::RGK_ScalableVector
3844
3845 // Convenience function to return the minimum of two ElementCounts.
3846 auto MinVF = [](const ElementCount &LHS, const ElementCount &RHS) {
3847 assert((LHS.isScalable() == RHS.isScalable()) &&
3848 "Scalable flags must match");
3849 return ElementCount::isKnownLT(LHS, RHS) ? LHS : RHS;
3850 };
3851
3852 // Ensure MaxVF is a power of 2; the dependence distance bound may not be.
3853 // Note that both WidestRegister and WidestType may not be a powers of 2.
3854 auto MaxVectorElementCount = ElementCount::get(
3855 llvm::bit_floor(WidestRegister.getKnownMinValue() / WidestType),
3856 ComputeScalableMaxVF);
3857 MaxVectorElementCount = MinVF(MaxVectorElementCount, MaxSafeVF);
3858 LLVM_DEBUG(dbgs() << "LV: The Widest register safe to use is: "
3859 << (MaxVectorElementCount * WidestType) << " bits.\n");
3860
3861 if (!MaxVectorElementCount) {
3862 LLVM_DEBUG(dbgs() << "LV: The target has no "
3863 << (ComputeScalableMaxVF ? "scalable" : "fixed")
3864 << " vector registers.\n");
3865 return ElementCount::getFixed(1);
3866 }
3867
3868 ElementCount MaxVF = clampVFByMaxTripCount(
3869 MaxVectorElementCount, MaxTripCount, UserIC, FoldTailByMasking);
3870 // If the MaxVF was already clamped, there's no point in trying to pick a
3871 // larger one.
3872 if (MaxVF != MaxVectorElementCount)
3873 return MaxVF;
3874
3876 ComputeScalableMaxVF ? TargetTransformInfo::RGK_ScalableVector
3878
3879 if (MaxVF.isScalable())
3880 MaxPermissibleVFWithoutMaxBW.ScalableVF = MaxVF;
3881 else
3882 MaxPermissibleVFWithoutMaxBW.FixedVF = MaxVF;
3883
3884 if (useMaxBandwidth(RegKind)) {
3885 auto MaxVectorElementCountMaxBW = ElementCount::get(
3886 llvm::bit_floor(WidestRegister.getKnownMinValue() / SmallestType),
3887 ComputeScalableMaxVF);
3888 MaxVF = MinVF(MaxVectorElementCountMaxBW, MaxSafeVF);
3889
3890 if (ElementCount MinVF =
3891 TTI.getMinimumVF(SmallestType, ComputeScalableMaxVF)) {
3892 if (ElementCount::isKnownLT(MaxVF, MinVF)) {
3893 LLVM_DEBUG(dbgs() << "LV: Overriding calculated MaxVF(" << MaxVF
3894 << ") with target's minimum: " << MinVF << '\n');
3895 MaxVF = MinVF;
3896 }
3897 }
3898
3899 MaxVF =
3900 clampVFByMaxTripCount(MaxVF, MaxTripCount, UserIC, FoldTailByMasking);
3901
3902 if (MaxVectorElementCount != MaxVF) {
3903 // Invalidate any widening decisions we might have made, in case the loop
3904 // requires prediction (decided later), but we have already made some
3905 // load/store widening decisions.
3906 invalidateCostModelingDecisions();
3907 }
3908 }
3909 return MaxVF;
3910}
3911
3912bool LoopVectorizationPlanner::isMoreProfitable(const VectorizationFactor &A,
3913 const VectorizationFactor &B,
3914 const unsigned MaxTripCount,
3915 bool HasTail,
3916 bool IsEpilogue) const {
3917 InstructionCost CostA = A.Cost;
3918 InstructionCost CostB = B.Cost;
3919
3920 // Improve estimate for the vector width if it is scalable.
3921 unsigned EstimatedWidthA = A.Width.getKnownMinValue();
3922 unsigned EstimatedWidthB = B.Width.getKnownMinValue();
3923 if (std::optional<unsigned> VScale = CM.getVScaleForTuning()) {
3924 if (A.Width.isScalable())
3925 EstimatedWidthA *= *VScale;
3926 if (B.Width.isScalable())
3927 EstimatedWidthB *= *VScale;
3928 }
3929
3930 // When optimizing for size choose whichever is smallest, which will be the
3931 // one with the smallest cost for the whole loop. On a tie pick the larger
3932 // vector width, on the assumption that throughput will be greater.
3933 if (CM.CostKind == TTI::TCK_CodeSize)
3934 return CostA < CostB ||
3935 (CostA == CostB && EstimatedWidthA > EstimatedWidthB);
3936
3937 // Assume vscale may be larger than 1 (or the value being tuned for),
3938 // so that scalable vectorization is slightly favorable over fixed-width
3939 // vectorization.
3940 bool PreferScalable = !TTI.preferFixedOverScalableIfEqualCost(IsEpilogue) &&
3941 A.Width.isScalable() && !B.Width.isScalable();
3942
3943 auto CmpFn = [PreferScalable](const InstructionCost &LHS,
3944 const InstructionCost &RHS) {
3945 return PreferScalable ? LHS <= RHS : LHS < RHS;
3946 };
3947
3948 // To avoid the need for FP division:
3949 // (CostA / EstimatedWidthA) < (CostB / EstimatedWidthB)
3950 // <=> (CostA * EstimatedWidthB) < (CostB * EstimatedWidthA)
3951 if (!MaxTripCount)
3952 return CmpFn(CostA * EstimatedWidthB, CostB * EstimatedWidthA);
3953
3954 auto GetCostForTC = [MaxTripCount, HasTail](unsigned VF,
3955 InstructionCost VectorCost,
3956 InstructionCost ScalarCost) {
3957 // If the trip count is a known (possibly small) constant, the trip count
3958 // will be rounded up to an integer number of iterations under
3959 // FoldTailByMasking. The total cost in that case will be
3960 // VecCost*ceil(TripCount/VF). When not folding the tail, the total
3961 // cost will be VecCost*floor(TC/VF) + ScalarCost*(TC%VF). There will be
3962 // some extra overheads, but for the purpose of comparing the costs of
3963 // different VFs we can use this to compare the total loop-body cost
3964 // expected after vectorization.
3965 if (HasTail)
3966 return VectorCost * (MaxTripCount / VF) +
3967 ScalarCost * (MaxTripCount % VF);
3968 return VectorCost * divideCeil(MaxTripCount, VF);
3969 };
3970
3971 auto RTCostA = GetCostForTC(EstimatedWidthA, CostA, A.ScalarCost);
3972 auto RTCostB = GetCostForTC(EstimatedWidthB, CostB, B.ScalarCost);
3973 return CmpFn(RTCostA, RTCostB);
3974}
3975
3976bool LoopVectorizationPlanner::isMoreProfitable(const VectorizationFactor &A,
3977 const VectorizationFactor &B,
3978 bool HasTail,
3979 bool IsEpilogue) const {
3980 const unsigned MaxTripCount = PSE.getSmallConstantMaxTripCount();
3981 return LoopVectorizationPlanner::isMoreProfitable(A, B, MaxTripCount, HasTail,
3982 IsEpilogue);
3983}
3984
3987 using RecipeVFPair = std::pair<VPRecipeBase *, ElementCount>;
3988 SmallVector<RecipeVFPair> InvalidCosts;
3989 for (const auto &Plan : VPlans) {
3990 for (ElementCount VF : Plan->vectorFactors()) {
3991 // The VPlan-based cost model is designed for computing vector cost.
3992 // Querying VPlan-based cost model with a scarlar VF will cause some
3993 // errors because we expect the VF is vector for most of the widen
3994 // recipes.
3995 if (VF.isScalar())
3996 continue;
3997
3998 VPCostContext CostCtx(CM.TTI, *CM.TLI, *Plan, CM, CM.CostKind, CM.PSE,
3999 OrigLoop);
4000 precomputeCosts(*Plan, VF, CostCtx);
4001 auto Iter = vp_depth_first_deep(Plan->getVectorLoopRegion()->getEntry());
4003 for (auto &R : *VPBB) {
4004 if (!R.cost(VF, CostCtx).isValid())
4005 InvalidCosts.emplace_back(&R, VF);
4006 }
4007 }
4008 }
4009 }
4010 if (InvalidCosts.empty())
4011 return;
4012
4013 // Emit a report of VFs with invalid costs in the loop.
4014
4015 // Group the remarks per recipe, keeping the recipe order from InvalidCosts.
4017 unsigned I = 0;
4018 for (auto &Pair : InvalidCosts)
4019 if (Numbering.try_emplace(Pair.first, I).second)
4020 ++I;
4021
4022 // Sort the list, first on recipe(number) then on VF.
4023 sort(InvalidCosts, [&Numbering](RecipeVFPair &A, RecipeVFPair &B) {
4024 unsigned NA = Numbering[A.first];
4025 unsigned NB = Numbering[B.first];
4026 if (NA != NB)
4027 return NA < NB;
4028 return ElementCount::isKnownLT(A.second, B.second);
4029 });
4030
4031 // For a list of ordered recipe-VF pairs:
4032 // [(load, VF1), (load, VF2), (store, VF1)]
4033 // group the recipes together to emit separate remarks for:
4034 // load (VF1, VF2)
4035 // store (VF1)
4036 auto Tail = ArrayRef<RecipeVFPair>(InvalidCosts);
4037 auto Subset = ArrayRef<RecipeVFPair>();
4038 do {
4039 if (Subset.empty())
4040 Subset = Tail.take_front(1);
4041
4042 VPRecipeBase *R = Subset.front().first;
4043
4044 unsigned Opcode =
4046 .Case([](const VPHeaderPHIRecipe *R) { return Instruction::PHI; })
4047 .Case(
4048 [](const VPWidenStoreRecipe *R) { return Instruction::Store; })
4049 .Case([](const VPWidenLoadRecipe *R) { return Instruction::Load; })
4050 .Case<VPWidenCallRecipe, VPWidenIntrinsicRecipe>(
4051 [](const auto *R) { return Instruction::Call; })
4054 [](const auto *R) { return R->getOpcode(); })
4055 .Case([](const VPInterleaveRecipe *R) {
4056 return R->getStoredValues().empty() ? Instruction::Load
4057 : Instruction::Store;
4058 })
4059 .Case([](const VPReductionRecipe *R) {
4060 return RecurrenceDescriptor::getOpcode(R->getRecurrenceKind());
4061 });
4062
4063 // If the next recipe is different, or if there are no other pairs,
4064 // emit a remark for the collated subset. e.g.
4065 // [(load, VF1), (load, VF2))]
4066 // to emit:
4067 // remark: invalid costs for 'load' at VF=(VF1, VF2)
4068 if (Subset == Tail || Tail[Subset.size()].first != R) {
4069 std::string OutString;
4070 raw_string_ostream OS(OutString);
4071 assert(!Subset.empty() && "Unexpected empty range");
4072 OS << "Recipe with invalid costs prevented vectorization at VF=(";
4073 for (const auto &Pair : Subset)
4074 OS << (Pair.second == Subset.front().second ? "" : ", ") << Pair.second;
4075 OS << "):";
4076 if (Opcode == Instruction::Call) {
4077 StringRef Name = "";
4078 if (auto *Int = dyn_cast<VPWidenIntrinsicRecipe>(R)) {
4079 Name = Int->getIntrinsicName();
4080 } else {
4081 auto *WidenCall = dyn_cast<VPWidenCallRecipe>(R);
4082 Function *CalledFn =
4083 WidenCall ? WidenCall->getCalledScalarFunction()
4084 : cast<Function>(R->getOperand(R->getNumOperands() - 1)
4085 ->getLiveInIRValue());
4086 Name = CalledFn->getName();
4087 }
4088 OS << " call to " << Name;
4089 } else
4090 OS << " " << Instruction::getOpcodeName(Opcode);
4091 reportVectorizationInfo(OutString, "InvalidCost", ORE, OrigLoop, nullptr,
4092 R->getDebugLoc());
4093 Tail = Tail.drop_front(Subset.size());
4094 Subset = {};
4095 } else
4096 // Grow the subset by one element
4097 Subset = Tail.take_front(Subset.size() + 1);
4098 } while (!Tail.empty());
4099}
4100
4101/// Check if any recipe of \p Plan will generate a vector value, which will be
4102/// assigned a vector register.
4104 const TargetTransformInfo &TTI) {
4105 assert(VF.isVector() && "Checking a scalar VF?");
4106 VPTypeAnalysis TypeInfo(Plan);
4107 DenseSet<VPRecipeBase *> EphemeralRecipes;
4108 collectEphemeralRecipesForVPlan(Plan, EphemeralRecipes);
4109 // Set of already visited types.
4110 DenseSet<Type *> Visited;
4113 for (VPRecipeBase &R : *VPBB) {
4114 if (EphemeralRecipes.contains(&R))
4115 continue;
4116 // Continue early if the recipe is considered to not produce a vector
4117 // result. Note that this includes VPInstruction where some opcodes may
4118 // produce a vector, to preserve existing behavior as VPInstructions model
4119 // aspects not directly mapped to existing IR instructions.
4120 switch (R.getVPRecipeID()) {
4121 case VPRecipeBase::VPDerivedIVSC:
4122 case VPRecipeBase::VPScalarIVStepsSC:
4123 case VPRecipeBase::VPReplicateSC:
4124 case VPRecipeBase::VPInstructionSC:
4125 case VPRecipeBase::VPCanonicalIVPHISC:
4126 case VPRecipeBase::VPCurrentIterationPHISC:
4127 case VPRecipeBase::VPVectorPointerSC:
4128 case VPRecipeBase::VPVectorEndPointerSC:
4129 case VPRecipeBase::VPExpandSCEVSC:
4130 case VPRecipeBase::VPPredInstPHISC:
4131 case VPRecipeBase::VPBranchOnMaskSC:
4132 continue;
4133 case VPRecipeBase::VPReductionSC:
4134 case VPRecipeBase::VPActiveLaneMaskPHISC:
4135 case VPRecipeBase::VPWidenCallSC:
4136 case VPRecipeBase::VPWidenCanonicalIVSC:
4137 case VPRecipeBase::VPWidenCastSC:
4138 case VPRecipeBase::VPWidenGEPSC:
4139 case VPRecipeBase::VPWidenIntrinsicSC:
4140 case VPRecipeBase::VPWidenSC:
4141 case VPRecipeBase::VPBlendSC:
4142 case VPRecipeBase::VPFirstOrderRecurrencePHISC:
4143 case VPRecipeBase::VPHistogramSC:
4144 case VPRecipeBase::VPWidenPHISC:
4145 case VPRecipeBase::VPWidenIntOrFpInductionSC:
4146 case VPRecipeBase::VPWidenPointerInductionSC:
4147 case VPRecipeBase::VPReductionPHISC:
4148 case VPRecipeBase::VPInterleaveEVLSC:
4149 case VPRecipeBase::VPInterleaveSC:
4150 case VPRecipeBase::VPWidenLoadEVLSC:
4151 case VPRecipeBase::VPWidenLoadSC:
4152 case VPRecipeBase::VPWidenStoreEVLSC:
4153 case VPRecipeBase::VPWidenStoreSC:
4154 break;
4155 default:
4156 llvm_unreachable("unhandled recipe");
4157 }
4158
4159 auto WillGenerateTargetVectors = [&TTI, VF](Type *VectorTy) {
4160 unsigned NumLegalParts = TTI.getNumberOfParts(VectorTy);
4161 if (!NumLegalParts)
4162 return false;
4163 if (VF.isScalable()) {
4164 // <vscale x 1 x iN> is assumed to be profitable over iN because
4165 // scalable registers are a distinct register class from scalar
4166 // ones. If we ever find a target which wants to lower scalable
4167 // vectors back to scalars, we'll need to update this code to
4168 // explicitly ask TTI about the register class uses for each part.
4169 return NumLegalParts <= VF.getKnownMinValue();
4170 }
4171 // Two or more elements that share a register - are vectorized.
4172 return NumLegalParts < VF.getFixedValue();
4173 };
4174
4175 // If no def nor is a store, e.g., branches, continue - no value to check.
4176 if (R.getNumDefinedValues() == 0 &&
4178 continue;
4179 // For multi-def recipes, currently only interleaved loads, suffice to
4180 // check first def only.
4181 // For stores check their stored value; for interleaved stores suffice
4182 // the check first stored value only. In all cases this is the second
4183 // operand.
4184 VPValue *ToCheck =
4185 R.getNumDefinedValues() >= 1 ? R.getVPValue(0) : R.getOperand(1);
4186 Type *ScalarTy = TypeInfo.inferScalarType(ToCheck);
4187 if (!Visited.insert({ScalarTy}).second)
4188 continue;
4189 Type *WideTy = toVectorizedTy(ScalarTy, VF);
4190 if (any_of(getContainedTypes(WideTy), WillGenerateTargetVectors))
4191 return true;
4192 }
4193 }
4194
4195 return false;
4196}
4197
4198static bool hasReplicatorRegion(VPlan &Plan) {
4200 Plan.getVectorLoopRegion()->getEntry())),
4201 [](auto *VPRB) { return VPRB->isReplicator(); });
4202}
4203
4204#ifndef NDEBUG
4205VectorizationFactor LoopVectorizationPlanner::selectVectorizationFactor() {
4206 InstructionCost ExpectedCost = CM.expectedCost(ElementCount::getFixed(1));
4207 LLVM_DEBUG(dbgs() << "LV: Scalar loop costs: " << ExpectedCost << ".\n");
4208 assert(ExpectedCost.isValid() && "Unexpected invalid cost for scalar loop");
4209 assert(
4210 any_of(VPlans,
4211 [](std::unique_ptr<VPlan> &P) { return P->hasScalarVFOnly(); }) &&
4212 "Expected Scalar VF to be a candidate");
4213
4214 const VectorizationFactor ScalarCost(ElementCount::getFixed(1), ExpectedCost,
4215 ExpectedCost);
4216 VectorizationFactor ChosenFactor = ScalarCost;
4217
4218 bool ForceVectorization = Hints.getForce() == LoopVectorizeHints::FK_Enabled;
4219 if (ForceVectorization &&
4220 (VPlans.size() > 1 || !VPlans[0]->hasScalarVFOnly())) {
4221 // Ignore scalar width, because the user explicitly wants vectorization.
4222 // Initialize cost to max so that VF = 2 is, at least, chosen during cost
4223 // evaluation.
4224 ChosenFactor.Cost = InstructionCost::getMax();
4225 }
4226
4227 for (auto &P : VPlans) {
4228 ArrayRef<ElementCount> VFs(P->vectorFactors().begin(),
4229 P->vectorFactors().end());
4230
4232 if (any_of(VFs, [this](ElementCount VF) {
4233 return CM.shouldConsiderRegPressureForVF(VF);
4234 }))
4235 RUs = calculateRegisterUsageForPlan(*P, VFs, TTI, CM.ValuesToIgnore);
4236
4237 for (unsigned I = 0; I < VFs.size(); I++) {
4238 ElementCount VF = VFs[I];
4239 // The cost for scalar VF=1 is already calculated, so ignore it.
4240 if (VF.isScalar())
4241 continue;
4242
4243 InstructionCost C = CM.expectedCost(VF);
4244
4245 // Add on other costs that are modelled in VPlan, but not in the legacy
4246 // cost model.
4247 VPCostContext CostCtx(CM.TTI, *CM.TLI, *P, CM, CM.CostKind, CM.PSE,
4248 OrigLoop);
4249 VPRegionBlock *VectorRegion = P->getVectorLoopRegion();
4250 assert(VectorRegion && "Expected to have a vector region!");
4251 for (VPBasicBlock *VPBB : VPBlockUtils::blocksOnly<VPBasicBlock>(
4252 vp_depth_first_shallow(VectorRegion->getEntry()))) {
4253 for (VPRecipeBase &R : *VPBB) {
4254 auto *VPI = dyn_cast<VPInstruction>(&R);
4255 if (!VPI)
4256 continue;
4257 switch (VPI->getOpcode()) {
4258 // Selects are only modelled in the legacy cost model for safe
4259 // divisors.
4260 case Instruction::Select: {
4261 if (auto *WR =
4262 dyn_cast_or_null<VPWidenRecipe>(VPI->getSingleUser())) {
4263 switch (WR->getOpcode()) {
4264 case Instruction::UDiv:
4265 case Instruction::SDiv:
4266 case Instruction::URem:
4267 case Instruction::SRem:
4268 continue;
4269 default:
4270 break;
4271 }
4272 }
4273 C += VPI->cost(VF, CostCtx);
4274 break;
4275 }
4277 unsigned Multiplier =
4278 cast<VPConstantInt>(VPI->getOperand(2))->getZExtValue();
4279 C += VPI->cost(VF * Multiplier, CostCtx);
4280 break;
4281 }
4284 C += VPI->cost(VF, CostCtx);
4285 break;
4286 default:
4287 break;
4288 }
4289 }
4290 }
4291
4292 // Add the cost of any spills due to excess register usage
4293 if (CM.shouldConsiderRegPressureForVF(VF))
4294 C += RUs[I].spillCost(CostCtx, ForceTargetNumVectorRegs);
4295
4296 VectorizationFactor Candidate(VF, C, ScalarCost.ScalarCost);
4297 unsigned Width =
4298 estimateElementCount(Candidate.Width, CM.getVScaleForTuning());
4299 LLVM_DEBUG(dbgs() << "LV: Vector loop of width " << VF
4300 << " costs: " << (Candidate.Cost / Width));
4301 if (VF.isScalable())
4302 LLVM_DEBUG(dbgs() << " (assuming a minimum vscale of "
4303 << CM.getVScaleForTuning().value_or(1) << ")");
4304 LLVM_DEBUG(dbgs() << ".\n");
4305
4306 if (!ForceVectorization && !willGenerateVectors(*P, VF, TTI)) {
4307 LLVM_DEBUG(
4308 dbgs()
4309 << "LV: Not considering vector loop of width " << VF
4310 << " because it will not generate any vector instructions.\n");
4311 continue;
4312 }
4313
4314 if (CM.OptForSize && !ForceVectorization && hasReplicatorRegion(*P)) {
4315 LLVM_DEBUG(
4316 dbgs()
4317 << "LV: Not considering vector loop of width " << VF
4318 << " because it would cause replicated blocks to be generated,"
4319 << " which isn't allowed when optimizing for size.\n");
4320 continue;
4321 }
4322
4323 if (isMoreProfitable(Candidate, ChosenFactor, P->hasScalarTail()))
4324 ChosenFactor = Candidate;
4325 }
4326 }
4327
4328 if (!EnableCondStoresVectorization && CM.hasPredStores()) {
4330 "There are conditional stores.",
4331 "store that is conditionally executed prevents vectorization",
4332 "ConditionalStore", ORE, OrigLoop);
4333 ChosenFactor = ScalarCost;
4334 }
4335
4336 LLVM_DEBUG(if (ForceVectorization && !ChosenFactor.Width.isScalar() &&
4337 !isMoreProfitable(ChosenFactor, ScalarCost,
4338 !CM.foldTailByMasking())) dbgs()
4339 << "LV: Vectorization seems to be not beneficial, "
4340 << "but was forced by a user.\n");
4341 return ChosenFactor;
4342}
4343#endif
4344
4345/// Returns true if the VPlan contains a VPReductionPHIRecipe with
4346/// FindLast recurrence kind.
4347static bool hasFindLastReductionPhi(VPlan &Plan) {
4349 [](VPRecipeBase &R) {
4350 auto *RedPhi = dyn_cast<VPReductionPHIRecipe>(&R);
4351 return RedPhi &&
4352 RecurrenceDescriptor::isFindLastRecurrenceKind(
4353 RedPhi->getRecurrenceKind());
4354 });
4355}
4356
4357/// Returns true if the VPlan contains header phi recipes that are not currently
4358/// supported for epilogue vectorization.
4360 return any_of(
4362 [](VPRecipeBase &R) {
4363 if (auto *WidenInd = dyn_cast<VPWidenIntOrFpInductionRecipe>(&R))
4364 return !WidenInd->getPHINode();
4365 auto *RedPhi = dyn_cast<VPReductionPHIRecipe>(&R);
4366 return RedPhi && (RecurrenceDescriptor::isFindLastRecurrenceKind(
4367 RedPhi->getRecurrenceKind()) ||
4368 !RedPhi->getUnderlyingValue());
4369 });
4370}
4371
4372bool LoopVectorizationPlanner::isCandidateForEpilogueVectorization(
4373 ElementCount VF) const {
4374 // Cross iteration phis such as fixed-order recurrences and FMaxNum/FMinNum
4375 // reductions need special handling and are currently unsupported.
4376 if (any_of(OrigLoop->getHeader()->phis(), [&](PHINode &Phi) {
4377 if (!Legal->isReductionVariable(&Phi))
4378 return Legal->isFixedOrderRecurrence(&Phi);
4379 RecurKind Kind =
4380 Legal->getRecurrenceDescriptor(&Phi).getRecurrenceKind();
4381 return RecurrenceDescriptor::isFPMinMaxNumRecurrenceKind(Kind);
4382 }))
4383 return false;
4384
4385 // FindLast reductions and inductions without underlying PHI require special
4386 // handling and are currently not supported for epilogue vectorization.
4387 if (hasUnsupportedHeaderPhiRecipe(getPlanFor(VF)))
4388 return false;
4389
4390 // Phis with uses outside of the loop require special handling and are
4391 // currently unsupported.
4392 for (const auto &Entry : Legal->getInductionVars()) {
4393 // Look for uses of the value of the induction at the last iteration.
4394 Value *PostInc =
4395 Entry.first->getIncomingValueForBlock(OrigLoop->getLoopLatch());
4396 for (User *U : PostInc->users())
4397 if (!OrigLoop->contains(cast<Instruction>(U)))
4398 return false;
4399 // Look for uses of penultimate value of the induction.
4400 for (User *U : Entry.first->users())
4401 if (!OrigLoop->contains(cast<Instruction>(U)))
4402 return false;
4403 }
4404
4405 // Epilogue vectorization code has not been auditted to ensure it handles
4406 // non-latch exits properly. It may be fine, but it needs auditted and
4407 // tested.
4408 // TODO: Add support for loops with an early exit.
4409 if (OrigLoop->getExitingBlock() != OrigLoop->getLoopLatch())
4410 return false;
4411
4412 return true;
4413}
4414
4416 const ElementCount VF, const unsigned IC) const {
4417 // FIXME: We need a much better cost-model to take different parameters such
4418 // as register pressure, code size increase and cost of extra branches into
4419 // account. For now we apply a very crude heuristic and only consider loops
4420 // with vectorization factors larger than a certain value.
4421
4422 // Allow the target to opt out.
4423 if (!TTI.preferEpilogueVectorization(VF * IC))
4424 return false;
4425
4426 unsigned MinVFThreshold = EpilogueVectorizationMinVF.getNumOccurrences() > 0
4428 : TTI.getEpilogueVectorizationMinVF();
4429 return estimateElementCount(VF * IC, VScaleForTuning) >= MinVFThreshold;
4430}
4431
4433 const ElementCount MainLoopVF, unsigned IC) {
4436 LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization is disabled.\n");
4437 return Result;
4438 }
4439
4440 if (!CM.isScalarEpilogueAllowed()) {
4441 LLVM_DEBUG(dbgs() << "LEV: Unable to vectorize epilogue because no "
4442 "epilogue is allowed.\n");
4443 return Result;
4444 }
4445
4446 // Not really a cost consideration, but check for unsupported cases here to
4447 // simplify the logic.
4448 if (!isCandidateForEpilogueVectorization(MainLoopVF)) {
4449 LLVM_DEBUG(dbgs() << "LEV: Unable to vectorize epilogue because the loop "
4450 "is not a supported candidate.\n");
4451 return Result;
4452 }
4453
4455 LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization factor is forced.\n");
4457 if (hasPlanWithVF(ForcedEC))
4458 return {ForcedEC, 0, 0};
4459
4460 LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization forced factor is not "
4461 "viable.\n");
4462 return Result;
4463 }
4464
4465 if (OrigLoop->getHeader()->getParent()->hasOptSize()) {
4466 LLVM_DEBUG(
4467 dbgs() << "LEV: Epilogue vectorization skipped due to opt for size.\n");
4468 return Result;
4469 }
4470
4471 if (!CM.isEpilogueVectorizationProfitable(MainLoopVF, IC)) {
4472 LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization is not profitable for "
4473 "this loop\n");
4474 return Result;
4475 }
4476
4477 // If MainLoopVF = vscale x 2, and vscale is expected to be 4, then we know
4478 // the main loop handles 8 lanes per iteration. We could still benefit from
4479 // vectorizing the epilogue loop with VF=4.
4480 ElementCount EstimatedRuntimeVF = ElementCount::getFixed(
4481 estimateElementCount(MainLoopVF, CM.getVScaleForTuning()));
4482
4483 Type *TCType = Legal->getWidestInductionType();
4484 const SCEV *RemainingIterations = nullptr;
4485 unsigned MaxTripCount = 0;
4487 getPlanFor(MainLoopVF).getTripCount(), PSE);
4488 assert(!isa<SCEVCouldNotCompute>(TC) && "Trip count SCEV must be computable");
4489 const SCEV *KnownMinTC;
4490 bool ScalableTC = match(TC, m_scev_c_Mul(m_SCEV(KnownMinTC), m_SCEVVScale()));
4491 bool ScalableRemIter = false;
4492 ScalarEvolution &SE = *PSE.getSE();
4493 // Use versions of TC and VF in which both are either scalable or fixed.
4494 if (ScalableTC == MainLoopVF.isScalable()) {
4495 ScalableRemIter = ScalableTC;
4496 RemainingIterations =
4497 SE.getURemExpr(TC, SE.getElementCount(TCType, MainLoopVF * IC));
4498 } else if (ScalableTC) {
4499 const SCEV *EstimatedTC = SE.getMulExpr(
4500 KnownMinTC,
4501 SE.getConstant(TCType, CM.getVScaleForTuning().value_or(1)));
4502 RemainingIterations = SE.getURemExpr(
4503 EstimatedTC, SE.getElementCount(TCType, MainLoopVF * IC));
4504 } else
4505 RemainingIterations =
4506 SE.getURemExpr(TC, SE.getElementCount(TCType, EstimatedRuntimeVF * IC));
4507
4508 // No iterations left to process in the epilogue.
4509 if (RemainingIterations->isZero())
4510 return Result;
4511
4512 if (MainLoopVF.isFixed()) {
4513 MaxTripCount = MainLoopVF.getFixedValue() * IC - 1;
4514 if (SE.isKnownPredicate(CmpInst::ICMP_ULT, RemainingIterations,
4515 SE.getConstant(TCType, MaxTripCount))) {
4516 MaxTripCount = SE.getUnsignedRangeMax(RemainingIterations).getZExtValue();
4517 }
4518 LLVM_DEBUG(dbgs() << "LEV: Maximum Trip Count for Epilogue: "
4519 << MaxTripCount << "\n");
4520 }
4521
4522 auto SkipVF = [&](const SCEV *VF, const SCEV *RemIter) -> bool {
4523 return SE.isKnownPredicate(CmpInst::ICMP_UGT, VF, RemIter);
4524 };
4525 for (auto &NextVF : ProfitableVFs) {
4526 // Skip candidate VFs without a corresponding VPlan.
4527 if (!hasPlanWithVF(NextVF.Width))
4528 continue;
4529
4530 // Skip candidate VFs with widths >= the (estimated) runtime VF (scalable
4531 // vectors) or > the VF of the main loop (fixed vectors).
4532 if ((!NextVF.Width.isScalable() && MainLoopVF.isScalable() &&
4533 ElementCount::isKnownGE(NextVF.Width, EstimatedRuntimeVF)) ||
4534 (NextVF.Width.isScalable() &&
4535 ElementCount::isKnownGE(NextVF.Width, MainLoopVF)) ||
4536 (!NextVF.Width.isScalable() && !MainLoopVF.isScalable() &&
4537 ElementCount::isKnownGT(NextVF.Width, MainLoopVF)))
4538 continue;
4539
4540 // If NextVF is greater than the number of remaining iterations, the
4541 // epilogue loop would be dead. Skip such factors.
4542 // TODO: We should also consider comparing against a scalable
4543 // RemainingIterations when SCEV be able to evaluate non-canonical
4544 // vscale-based expressions.
4545 if (!ScalableRemIter) {
4546 // Handle the case where NextVF and RemainingIterations are in different
4547 // numerical spaces.
4548 ElementCount EC = NextVF.Width;
4549 if (NextVF.Width.isScalable())
4551 estimateElementCount(NextVF.Width, CM.getVScaleForTuning()));
4552 if (SkipVF(SE.getElementCount(TCType, EC), RemainingIterations))
4553 continue;
4554 }
4555
4556 if (Result.Width.isScalar() ||
4557 isMoreProfitable(NextVF, Result, MaxTripCount, !CM.foldTailByMasking(),
4558 /*IsEpilogue*/ true))
4559 Result = NextVF;
4560 }
4561
4562 if (Result != VectorizationFactor::Disabled())
4563 LLVM_DEBUG(dbgs() << "LEV: Vectorizing epilogue loop with VF = "
4564 << Result.Width << "\n");
4565 return Result;
4566}
4567
4568std::pair<unsigned, unsigned>
4570 unsigned MinWidth = -1U;
4571 unsigned MaxWidth = 8;
4572 const DataLayout &DL = TheFunction->getDataLayout();
4573 // For in-loop reductions, no element types are added to ElementTypesInLoop
4574 // if there are no loads/stores in the loop. In this case, check through the
4575 // reduction variables to determine the maximum width.
4576 if (ElementTypesInLoop.empty() && !Legal->getReductionVars().empty()) {
4577 for (const auto &PhiDescriptorPair : Legal->getReductionVars()) {
4578 const RecurrenceDescriptor &RdxDesc = PhiDescriptorPair.second;
4579 // When finding the min width used by the recurrence we need to account
4580 // for casts on the input operands of the recurrence.
4581 MinWidth = std::min(
4582 MinWidth,
4583 std::min(RdxDesc.getMinWidthCastToRecurrenceTypeInBits(),
4585 MaxWidth = std::max(MaxWidth,
4587 }
4588 } else {
4589 for (Type *T : ElementTypesInLoop) {
4590 MinWidth = std::min<unsigned>(
4591 MinWidth, DL.getTypeSizeInBits(T->getScalarType()).getFixedValue());
4592 MaxWidth = std::max<unsigned>(
4593 MaxWidth, DL.getTypeSizeInBits(T->getScalarType()).getFixedValue());
4594 }
4595 }
4596 return {MinWidth, MaxWidth};
4597}
4598
4600 ElementTypesInLoop.clear();
4601 // For each block.
4602 for (BasicBlock *BB : TheLoop->blocks()) {
4603 // For each instruction in the loop.
4604 for (Instruction &I : *BB) {
4605 Type *T = I.getType();
4606
4607 // Skip ignored values.
4608 if (ValuesToIgnore.count(&I))
4609 continue;
4610
4611 // Only examine Loads, Stores and PHINodes.
4612 if (!isa<LoadInst>(I) && !isa<StoreInst>(I) && !isa<PHINode>(I))
4613 continue;
4614
4615 // Examine PHI nodes that are reduction variables. Update the type to
4616 // account for the recurrence type.
4617 if (auto *PN = dyn_cast<PHINode>(&I)) {
4618 if (!Legal->isReductionVariable(PN))
4619 continue;
4620 const RecurrenceDescriptor &RdxDesc =
4621 Legal->getRecurrenceDescriptor(PN);
4623 TTI.preferInLoopReduction(RdxDesc.getRecurrenceKind(),
4624 RdxDesc.getRecurrenceType()))
4625 continue;
4626 T = RdxDesc.getRecurrenceType();
4627 }
4628
4629 // Examine the stored values.
4630 if (auto *ST = dyn_cast<StoreInst>(&I))
4631 T = ST->getValueOperand()->getType();
4632
4633 assert(T->isSized() &&
4634 "Expected the load/store/recurrence type to be sized");
4635
4636 ElementTypesInLoop.insert(T);
4637 }
4638 }
4639}
4640
4641unsigned
4643 InstructionCost LoopCost) {
4644 // -- The interleave heuristics --
4645 // We interleave the loop in order to expose ILP and reduce the loop overhead.
4646 // There are many micro-architectural considerations that we can't predict
4647 // at this level. For example, frontend pressure (on decode or fetch) due to
4648 // code size, or the number and capabilities of the execution ports.
4649 //
4650 // We use the following heuristics to select the interleave count:
4651 // 1. If the code has reductions, then we interleave to break the cross
4652 // iteration dependency.
4653 // 2. If the loop is really small, then we interleave to reduce the loop
4654 // overhead.
4655 // 3. We don't interleave if we think that we will spill registers to memory
4656 // due to the increased register pressure.
4657
4658 // Only interleave tail-folded loops if wide lane masks are requested, as the
4659 // overhead of multiple instructions to calculate the predicate is likely
4660 // not beneficial. If a scalar epilogue is not allowed for any other reason,
4661 // do not interleave.
4662 if (!CM.isScalarEpilogueAllowed() &&
4663 !(CM.preferPredicatedLoop() && CM.useWideActiveLaneMask()))
4664 return 1;
4665
4668 LLVM_DEBUG(dbgs() << "LV: Loop requires variable-length step. "
4669 "Unroll factor forced to be 1.\n");
4670 return 1;
4671 }
4672
4673 // We used the distance for the interleave count.
4674 if (!Legal->isSafeForAnyVectorWidth())
4675 return 1;
4676
4677 // We don't attempt to perform interleaving for loops with uncountable early
4678 // exits because the VPInstruction::AnyOf code cannot currently handle
4679 // multiple parts.
4680 if (Plan.hasEarlyExit())
4681 return 1;
4682
4683 const bool HasReductions =
4686
4687 // FIXME: implement interleaving for FindLast transform correctly.
4688 if (hasFindLastReductionPhi(Plan))
4689 return 1;
4690
4691 VPRegisterUsage R =
4692 calculateRegisterUsageForPlan(Plan, {VF}, TTI, CM.ValuesToIgnore)[0];
4693
4694 // If we did not calculate the cost for VF (because the user selected the VF)
4695 // then we calculate the cost of VF here.
4696 if (LoopCost == 0) {
4697 if (VF.isScalar())
4698 LoopCost = CM.expectedCost(VF);
4699 else
4700 LoopCost = cost(Plan, VF, &R);
4701 assert(LoopCost.isValid() && "Expected to have chosen a VF with valid cost");
4702
4703 // Loop body is free and there is no need for interleaving.
4704 if (LoopCost == 0)
4705 return 1;
4706 }
4707
4708 // We divide by these constants so assume that we have at least one
4709 // instruction that uses at least one register.
4710 for (auto &Pair : R.MaxLocalUsers) {
4711 Pair.second = std::max(Pair.second, 1U);
4712 }
4713
4714 // We calculate the interleave count using the following formula.
4715 // Subtract the number of loop invariants from the number of available
4716 // registers. These registers are used by all of the interleaved instances.
4717 // Next, divide the remaining registers by the number of registers that is
4718 // required by the loop, in order to estimate how many parallel instances
4719 // fit without causing spills. All of this is rounded down if necessary to be
4720 // a power of two. We want power of two interleave count to simplify any
4721 // addressing operations or alignment considerations.
4722 // We also want power of two interleave counts to ensure that the induction
4723 // variable of the vector loop wraps to zero, when tail is folded by masking;
4724 // this currently happens when OptForSize, in which case IC is set to 1 above.
4725 unsigned IC = UINT_MAX;
4726
4727 for (const auto &Pair : R.MaxLocalUsers) {
4728 unsigned TargetNumRegisters = TTI.getNumberOfRegisters(Pair.first);
4729 LLVM_DEBUG(dbgs() << "LV: The target has " << TargetNumRegisters
4730 << " registers of "
4731 << TTI.getRegisterClassName(Pair.first)
4732 << " register class\n");
4733 if (VF.isScalar()) {
4734 if (ForceTargetNumScalarRegs.getNumOccurrences() > 0)
4735 TargetNumRegisters = ForceTargetNumScalarRegs;
4736 } else {
4737 if (ForceTargetNumVectorRegs.getNumOccurrences() > 0)
4738 TargetNumRegisters = ForceTargetNumVectorRegs;
4739 }
4740 unsigned MaxLocalUsers = Pair.second;
4741 unsigned LoopInvariantRegs = 0;
4742 if (R.LoopInvariantRegs.contains(Pair.first))
4743 LoopInvariantRegs = R.LoopInvariantRegs[Pair.first];
4744
4745 unsigned TmpIC = llvm::bit_floor((TargetNumRegisters - LoopInvariantRegs) /
4746 MaxLocalUsers);
4747 // Don't count the induction variable as interleaved.
4749 TmpIC = llvm::bit_floor((TargetNumRegisters - LoopInvariantRegs - 1) /
4750 std::max(1U, (MaxLocalUsers - 1)));
4751 }
4752
4753 IC = std::min(IC, TmpIC);
4754 }
4755
4756 // Clamp the interleave ranges to reasonable counts.
4757 unsigned MaxInterleaveCount = TTI.getMaxInterleaveFactor(VF);
4758 LLVM_DEBUG(dbgs() << "LV: MaxInterleaveFactor for the target is "
4759 << MaxInterleaveCount << "\n");
4760
4761 // Check if the user has overridden the max.
4762 if (VF.isScalar()) {
4763 if (ForceTargetMaxScalarInterleaveFactor.getNumOccurrences() > 0)
4764 MaxInterleaveCount = ForceTargetMaxScalarInterleaveFactor;
4765 } else {
4766 if (ForceTargetMaxVectorInterleaveFactor.getNumOccurrences() > 0)
4767 MaxInterleaveCount = ForceTargetMaxVectorInterleaveFactor;
4768 }
4769
4770 // Try to get the exact trip count, or an estimate based on profiling data or
4771 // ConstantMax from PSE, failing that.
4772 auto BestKnownTC = getSmallBestKnownTC(PSE, OrigLoop);
4773
4774 // For fixed length VFs treat a scalable trip count as unknown.
4775 if (BestKnownTC && (BestKnownTC->isFixed() || VF.isScalable())) {
4776 // Re-evaluate trip counts and VFs to be in the same numerical space.
4777 unsigned AvailableTC =
4778 estimateElementCount(*BestKnownTC, CM.getVScaleForTuning());
4779 unsigned EstimatedVF = estimateElementCount(VF, CM.getVScaleForTuning());
4780
4781 // At least one iteration must be scalar when this constraint holds. So the
4782 // maximum available iterations for interleaving is one less.
4783 if (CM.requiresScalarEpilogue(VF.isVector()))
4784 --AvailableTC;
4785
4786 unsigned InterleaveCountLB = bit_floor(std::max(
4787 1u, std::min(AvailableTC / (EstimatedVF * 2), MaxInterleaveCount)));
4788
4789 if (getSmallConstantTripCount(PSE.getSE(), OrigLoop).isNonZero()) {
4790 // If the best known trip count is exact, we select between two
4791 // prospective ICs, where
4792 //
4793 // 1) the aggressive IC is capped by the trip count divided by VF
4794 // 2) the conservative IC is capped by the trip count divided by (VF * 2)
4795 //
4796 // The final IC is selected in a way that the epilogue loop trip count is
4797 // minimized while maximizing the IC itself, so that we either run the
4798 // vector loop at least once if it generates a small epilogue loop, or
4799 // else we run the vector loop at least twice.
4800
4801 unsigned InterleaveCountUB = bit_floor(std::max(
4802 1u, std::min(AvailableTC / EstimatedVF, MaxInterleaveCount)));
4803 MaxInterleaveCount = InterleaveCountLB;
4804
4805 if (InterleaveCountUB != InterleaveCountLB) {
4806 unsigned TailTripCountUB =
4807 (AvailableTC % (EstimatedVF * InterleaveCountUB));
4808 unsigned TailTripCountLB =
4809 (AvailableTC % (EstimatedVF * InterleaveCountLB));
4810 // If both produce same scalar tail, maximize the IC to do the same work
4811 // in fewer vector loop iterations
4812 if (TailTripCountUB == TailTripCountLB)
4813 MaxInterleaveCount = InterleaveCountUB;
4814 }
4815 } else {
4816 // If trip count is an estimated compile time constant, limit the
4817 // IC to be capped by the trip count divided by VF * 2, such that the
4818 // vector loop runs at least twice to make interleaving seem profitable
4819 // when there is an epilogue loop present. Since exact Trip count is not
4820 // known we choose to be conservative in our IC estimate.
4821 MaxInterleaveCount = InterleaveCountLB;
4822 }
4823 }
4824
4825 assert(MaxInterleaveCount > 0 &&
4826 "Maximum interleave count must be greater than 0");
4827
4828 // Clamp the calculated IC to be between the 1 and the max interleave count
4829 // that the target and trip count allows.
4830 if (IC > MaxInterleaveCount)
4831 IC = MaxInterleaveCount;
4832 else
4833 // Make sure IC is greater than 0.
4834 IC = std::max(1u, IC);
4835
4836 assert(IC > 0 && "Interleave count must be greater than 0.");
4837
4838 // Interleave if we vectorized this loop and there is a reduction that could
4839 // benefit from interleaving.
4840 if (VF.isVector() && HasReductions) {
4841 LLVM_DEBUG(dbgs() << "LV: Interleaving because of reductions.\n");
4842 return IC;
4843 }
4844
4845 // For any scalar loop that either requires runtime checks or predication we
4846 // are better off leaving this to the unroller. Note that if we've already
4847 // vectorized the loop we will have done the runtime check and so interleaving
4848 // won't require further checks.
4849 bool ScalarInterleavingRequiresPredication =
4850 (VF.isScalar() && any_of(OrigLoop->blocks(), [this](BasicBlock *BB) {
4851 return Legal->blockNeedsPredication(BB);
4852 }));
4853 bool ScalarInterleavingRequiresRuntimePointerCheck =
4854 (VF.isScalar() && Legal->getRuntimePointerChecking()->Need);
4855
4856 // We want to interleave small loops in order to reduce the loop overhead and
4857 // potentially expose ILP opportunities.
4858 LLVM_DEBUG(dbgs() << "LV: Loop cost is " << LoopCost << '\n'
4859 << "LV: IC is " << IC << '\n'
4860 << "LV: VF is " << VF << '\n');
4861 const bool AggressivelyInterleave =
4862 TTI.enableAggressiveInterleaving(HasReductions);
4863 if (!ScalarInterleavingRequiresRuntimePointerCheck &&
4864 !ScalarInterleavingRequiresPredication && LoopCost < SmallLoopCost) {
4865 // We assume that the cost overhead is 1 and we use the cost model
4866 // to estimate the cost of the loop and interleave until the cost of the
4867 // loop overhead is about 5% of the cost of the loop.
4868 unsigned SmallIC = std::min(IC, (unsigned)llvm::bit_floor<uint64_t>(
4869 SmallLoopCost / LoopCost.getValue()));
4870
4871 // Interleave until store/load ports (estimated by max interleave count) are
4872 // saturated.
4873 unsigned NumStores = 0;
4874 unsigned NumLoads = 0;
4877 for (VPRecipeBase &R : *VPBB) {
4879 NumLoads++;
4880 continue;
4881 }
4883 NumStores++;
4884 continue;
4885 }
4886
4887 if (auto *InterleaveR = dyn_cast<VPInterleaveRecipe>(&R)) {
4888 if (unsigned StoreOps = InterleaveR->getNumStoreOperands())
4889 NumStores += StoreOps;
4890 else
4891 NumLoads += InterleaveR->getNumDefinedValues();
4892 continue;
4893 }
4894 if (auto *RepR = dyn_cast<VPReplicateRecipe>(&R)) {
4895 NumLoads += isa<LoadInst>(RepR->getUnderlyingInstr());
4896 NumStores += isa<StoreInst>(RepR->getUnderlyingInstr());
4897 continue;
4898 }
4899 if (isa<VPHistogramRecipe>(&R)) {
4900 NumLoads++;
4901 NumStores++;
4902 continue;
4903 }
4904 }
4905 }
4906 unsigned StoresIC = IC / (NumStores ? NumStores : 1);
4907 unsigned LoadsIC = IC / (NumLoads ? NumLoads : 1);
4908
4909 // There is little point in interleaving for reductions containing selects
4910 // and compares when VF=1 since it may just create more overhead than it's
4911 // worth for loops with small trip counts. This is because we still have to
4912 // do the final reduction after the loop.
4913 bool HasSelectCmpReductions =
4914 HasReductions &&
4916 [](VPRecipeBase &R) {
4917 auto *RedR = dyn_cast<VPReductionPHIRecipe>(&R);
4918 return RedR && (RecurrenceDescriptor::isAnyOfRecurrenceKind(
4919 RedR->getRecurrenceKind()) ||
4920 RecurrenceDescriptor::isFindIVRecurrenceKind(
4921 RedR->getRecurrenceKind()));
4922 });
4923 if (HasSelectCmpReductions) {
4924 LLVM_DEBUG(dbgs() << "LV: Not interleaving select-cmp reductions.\n");
4925 return 1;
4926 }
4927
4928 // If we have a scalar reduction (vector reductions are already dealt with
4929 // by this point), we can increase the critical path length if the loop
4930 // we're interleaving is inside another loop. For tree-wise reductions
4931 // set the limit to 2, and for ordered reductions it's best to disable
4932 // interleaving entirely.
4933 if (HasReductions && OrigLoop->getLoopDepth() > 1) {
4934 bool HasOrderedReductions =
4936 [](VPRecipeBase &R) {
4937 auto *RedR = dyn_cast<VPReductionPHIRecipe>(&R);
4938
4939 return RedR && RedR->isOrdered();
4940 });
4941 if (HasOrderedReductions) {
4942 LLVM_DEBUG(
4943 dbgs() << "LV: Not interleaving scalar ordered reductions.\n");
4944 return 1;
4945 }
4946
4947 unsigned F = MaxNestedScalarReductionIC;
4948 SmallIC = std::min(SmallIC, F);
4949 StoresIC = std::min(StoresIC, F);
4950 LoadsIC = std::min(LoadsIC, F);
4951 }
4952
4954 std::max(StoresIC, LoadsIC) > SmallIC) {
4955 LLVM_DEBUG(
4956 dbgs() << "LV: Interleaving to saturate store or load ports.\n");
4957 return std::max(StoresIC, LoadsIC);
4958 }
4959
4960 // If there are scalar reductions and TTI has enabled aggressive
4961 // interleaving for reductions, we will interleave to expose ILP.
4962 if (VF.isScalar() && AggressivelyInterleave) {
4963 LLVM_DEBUG(dbgs() << "LV: Interleaving to expose ILP.\n");
4964 // Interleave no less than SmallIC but not as aggressive as the normal IC
4965 // to satisfy the rare situation when resources are too limited.
4966 return std::max(IC / 2, SmallIC);
4967 }
4968
4969 LLVM_DEBUG(dbgs() << "LV: Interleaving to reduce branch cost.\n");
4970 return SmallIC;
4971 }
4972
4973 // Interleave if this is a large loop (small loops are already dealt with by
4974 // this point) that could benefit from interleaving.
4975 if (AggressivelyInterleave) {
4976 LLVM_DEBUG(dbgs() << "LV: Interleaving to expose ILP.\n");
4977 return IC;
4978 }
4979
4980 LLVM_DEBUG(dbgs() << "LV: Not Interleaving.\n");
4981 return 1;
4982}
4983
4985 ElementCount VF) {
4986 // TODO: Cost model for emulated masked load/store is completely
4987 // broken. This hack guides the cost model to use an artificially
4988 // high enough value to practically disable vectorization with such
4989 // operations, except where previously deployed legality hack allowed
4990 // using very low cost values. This is to avoid regressions coming simply
4991 // from moving "masked load/store" check from legality to cost model.
4992 // Masked Load/Gather emulation was previously never allowed.
4993 // Limited number of Masked Store/Scatter emulation was allowed.
4995 "Expecting a scalar emulated instruction");
4996 return isa<LoadInst>(I) ||
4997 (isa<StoreInst>(I) &&
4998 NumPredStores > NumberOfStoresToPredicate);
4999}
5000
5002 assert(VF.isVector() && "Expected VF >= 2");
5003
5004 // If we've already collected the instructions to scalarize or the predicated
5005 // BBs after vectorization, there's nothing to do. Collection may already have
5006 // occurred if we have a user-selected VF and are now computing the expected
5007 // cost for interleaving.
5008 if (InstsToScalarize.contains(VF) ||
5009 PredicatedBBsAfterVectorization.contains(VF))
5010 return;
5011
5012 // Initialize a mapping for VF in InstsToScalalarize. If we find that it's
5013 // not profitable to scalarize any instructions, the presence of VF in the
5014 // map will indicate that we've analyzed it already.
5015 ScalarCostsTy &ScalarCostsVF = InstsToScalarize[VF];
5016
5017 // Find all the instructions that are scalar with predication in the loop and
5018 // determine if it would be better to not if-convert the blocks they are in.
5019 // If so, we also record the instructions to scalarize.
5020 for (BasicBlock *BB : TheLoop->blocks()) {
5022 continue;
5023 for (Instruction &I : *BB)
5024 if (isScalarWithPredication(&I, VF)) {
5025 ScalarCostsTy ScalarCosts;
5026 // Do not apply discount logic for:
5027 // 1. Scalars after vectorization, as there will only be a single copy
5028 // of the instruction.
5029 // 2. Scalable VF, as that would lead to invalid scalarization costs.
5030 // 3. Emulated masked memrefs, if a hacked cost is needed.
5031 if (!isScalarAfterVectorization(&I, VF) && !VF.isScalable() &&
5033 computePredInstDiscount(&I, ScalarCosts, VF) >= 0) {
5034 for (const auto &[I, IC] : ScalarCosts)
5035 ScalarCostsVF.insert({I, IC});
5036 // Check if we decided to scalarize a call. If so, update the widening
5037 // decision of the call to CM_Scalarize with the computed scalar cost.
5038 for (const auto &[I, Cost] : ScalarCosts) {
5039 auto *CI = dyn_cast<CallInst>(I);
5040 if (!CI || !CallWideningDecisions.contains({CI, VF}))
5041 continue;
5042 CallWideningDecisions[{CI, VF}].Kind = CM_Scalarize;
5043 CallWideningDecisions[{CI, VF}].Cost = Cost;
5044 }
5045 }
5046 // Remember that BB will remain after vectorization.
5047 PredicatedBBsAfterVectorization[VF].insert(BB);
5048 for (auto *Pred : predecessors(BB)) {
5049 if (Pred->getSingleSuccessor() == BB)
5050 PredicatedBBsAfterVectorization[VF].insert(Pred);
5051 }
5052 }
5053 }
5054}
5055
5056InstructionCost LoopVectorizationCostModel::computePredInstDiscount(
5057 Instruction *PredInst, ScalarCostsTy &ScalarCosts, ElementCount VF) {
5058 assert(!isUniformAfterVectorization(PredInst, VF) &&
5059 "Instruction marked uniform-after-vectorization will be predicated");
5060
5061 // Initialize the discount to zero, meaning that the scalar version and the
5062 // vector version cost the same.
5063 InstructionCost Discount = 0;
5064
5065 // Holds instructions to analyze. The instructions we visit are mapped in
5066 // ScalarCosts. Those instructions are the ones that would be scalarized if
5067 // we find that the scalar version costs less.
5069
5070 // Returns true if the given instruction can be scalarized.
5071 auto CanBeScalarized = [&](Instruction *I) -> bool {
5072 // We only attempt to scalarize instructions forming a single-use chain
5073 // from the original predicated block that would otherwise be vectorized.
5074 // Although not strictly necessary, we give up on instructions we know will
5075 // already be scalar to avoid traversing chains that are unlikely to be
5076 // beneficial.
5077 if (!I->hasOneUse() || PredInst->getParent() != I->getParent() ||
5078 isScalarAfterVectorization(I, VF))
5079 return false;
5080
5081 // If the instruction is scalar with predication, it will be analyzed
5082 // separately. We ignore it within the context of PredInst.
5083 if (isScalarWithPredication(I, VF))
5084 return false;
5085
5086 // If any of the instruction's operands are uniform after vectorization,
5087 // the instruction cannot be scalarized. This prevents, for example, a
5088 // masked load from being scalarized.
5089 //
5090 // We assume we will only emit a value for lane zero of an instruction
5091 // marked uniform after vectorization, rather than VF identical values.
5092 // Thus, if we scalarize an instruction that uses a uniform, we would
5093 // create uses of values corresponding to the lanes we aren't emitting code
5094 // for. This behavior can be changed by allowing getScalarValue to clone
5095 // the lane zero values for uniforms rather than asserting.
5096 for (Use &U : I->operands())
5097 if (auto *J = dyn_cast<Instruction>(U.get()))
5098 if (isUniformAfterVectorization(J, VF))
5099 return false;
5100
5101 // Otherwise, we can scalarize the instruction.
5102 return true;
5103 };
5104
5105 // Compute the expected cost discount from scalarizing the entire expression
5106 // feeding the predicated instruction. We currently only consider expressions
5107 // that are single-use instruction chains.
5108 Worklist.push_back(PredInst);
5109 while (!Worklist.empty()) {
5110 Instruction *I = Worklist.pop_back_val();
5111
5112 // If we've already analyzed the instruction, there's nothing to do.
5113 if (ScalarCosts.contains(I))
5114 continue;
5115
5116 // Cannot scalarize fixed-order recurrence phis at the moment.
5117 if (isa<PHINode>(I) && Legal->isFixedOrderRecurrence(cast<PHINode>(I)))
5118 continue;
5119
5120 // Compute the cost of the vector instruction. Note that this cost already
5121 // includes the scalarization overhead of the predicated instruction.
5122 InstructionCost VectorCost = getInstructionCost(I, VF);
5123
5124 // Compute the cost of the scalarized instruction. This cost is the cost of
5125 // the instruction as if it wasn't if-converted and instead remained in the
5126 // predicated block. We will scale this cost by block probability after
5127 // computing the scalarization overhead.
5128 InstructionCost ScalarCost =
5129 VF.getFixedValue() * getInstructionCost(I, ElementCount::getFixed(1));
5130
5131 // Compute the scalarization overhead of needed insertelement instructions
5132 // and phi nodes.
5133 if (isScalarWithPredication(I, VF) && !I->getType()->isVoidTy()) {
5134 Type *WideTy = toVectorizedTy(I->getType(), VF);
5135 for (Type *VectorTy : getContainedTypes(WideTy)) {
5136 ScalarCost += TTI.getScalarizationOverhead(
5138 /*Insert=*/true,
5139 /*Extract=*/false, CostKind);
5140 }
5141 ScalarCost +=
5142 VF.getFixedValue() * TTI.getCFInstrCost(Instruction::PHI, CostKind);
5143 }
5144
5145 // Compute the scalarization overhead of needed extractelement
5146 // instructions. For each of the instruction's operands, if the operand can
5147 // be scalarized, add it to the worklist; otherwise, account for the
5148 // overhead.
5149 for (Use &U : I->operands())
5150 if (auto *J = dyn_cast<Instruction>(U.get())) {
5151 assert(canVectorizeTy(J->getType()) &&
5152 "Instruction has non-scalar type");
5153 if (CanBeScalarized(J))
5154 Worklist.push_back(J);
5155 else if (needsExtract(J, VF)) {
5156 Type *WideTy = toVectorizedTy(J->getType(), VF);
5157 for (Type *VectorTy : getContainedTypes(WideTy)) {
5158 ScalarCost += TTI.getScalarizationOverhead(
5159 cast<VectorType>(VectorTy),
5160 APInt::getAllOnes(VF.getFixedValue()), /*Insert*/ false,
5161 /*Extract*/ true, CostKind);
5162 }
5163 }
5164 }
5165
5166 // Scale the total scalar cost by block probability.
5167 ScalarCost /= getPredBlockCostDivisor(CostKind, I->getParent());
5168
5169 // Compute the discount. A non-negative discount means the vector version
5170 // of the instruction costs more, and scalarizing would be beneficial.
5171 Discount += VectorCost - ScalarCost;
5172 ScalarCosts[I] = ScalarCost;
5173 }
5174
5175 return Discount;
5176}
5177
5180
5181 // If the vector loop gets executed exactly once with the given VF, ignore the
5182 // costs of comparison and induction instructions, as they'll get simplified
5183 // away.
5184 SmallPtrSet<Instruction *, 2> ValuesToIgnoreForVF;
5185 auto TC = getSmallConstantTripCount(PSE.getSE(), TheLoop);
5186 if (TC == VF && !foldTailByMasking())
5188 ValuesToIgnoreForVF);
5189
5190 // For each block.
5191 for (BasicBlock *BB : TheLoop->blocks()) {
5192 InstructionCost BlockCost;
5193
5194 // For each instruction in the old loop.
5195 for (Instruction &I : *BB) {
5196 // Skip ignored values.
5197 if (ValuesToIgnore.count(&I) || ValuesToIgnoreForVF.count(&I) ||
5198 (VF.isVector() && VecValuesToIgnore.count(&I)))
5199 continue;
5200
5202
5203 // Check if we should override the cost.
5204 if (C.isValid() && ForceTargetInstructionCost.getNumOccurrences() > 0) {
5205 // For interleave groups, use ForceTargetInstructionCost once for the
5206 // whole group.
5207 if (VF.isVector() && getWideningDecision(&I, VF) == CM_Interleave) {
5208 if (getInterleavedAccessGroup(&I)->getInsertPos() == &I)
5210 else
5211 C = InstructionCost(0);
5212 } else {
5214 }
5215 }
5216
5217 BlockCost += C;
5218 LLVM_DEBUG(dbgs() << "LV: Found an estimated cost of " << C << " for VF "
5219 << VF << " For instruction: " << I << '\n');
5220 }
5221
5222 // If we are vectorizing a predicated block, it will have been
5223 // if-converted. This means that the block's instructions (aside from
5224 // stores and instructions that may divide by zero) will now be
5225 // unconditionally executed. For the scalar case, we may not always execute
5226 // the predicated block, if it is an if-else block. Thus, scale the block's
5227 // cost by the probability of executing it.
5228 // getPredBlockCostDivisor will return 1 for blocks that are only predicated
5229 // by the header mask when folding the tail.
5230 if (VF.isScalar())
5231 BlockCost /= getPredBlockCostDivisor(CostKind, BB);
5232
5233 Cost += BlockCost;
5234 }
5235
5236 return Cost;
5237}
5238
5239/// Gets the address access SCEV for Ptr, if it should be used for cost modeling
5240/// according to isAddressSCEVForCost.
5241///
5242/// This SCEV can be sent to the Target in order to estimate the address
5243/// calculation cost.
5245 Value *Ptr,
5247 const Loop *TheLoop) {
5248 const SCEV *Addr = PSE.getSCEV(Ptr);
5249 return vputils::isAddressSCEVForCost(Addr, *PSE.getSE(), TheLoop) ? Addr
5250 : nullptr;
5251}
5252
5254LoopVectorizationCostModel::getMemInstScalarizationCost(Instruction *I,
5255 ElementCount VF) {
5256 assert(VF.isVector() &&
5257 "Scalarization cost of instruction implies vectorization.");
5258 if (VF.isScalable())
5259 return InstructionCost::getInvalid();
5260
5261 Type *ValTy = getLoadStoreType(I);
5262 auto *SE = PSE.getSE();
5263
5264 unsigned AS = getLoadStoreAddressSpace(I);
5266 Type *PtrTy = toVectorTy(Ptr->getType(), VF);
5267 // NOTE: PtrTy is a vector to signal `TTI::getAddressComputationCost`
5268 // that it is being called from this specific place.
5269
5270 // Figure out whether the access is strided and get the stride value
5271 // if it's known in compile time
5272 const SCEV *PtrSCEV = getAddressAccessSCEV(Ptr, PSE, TheLoop);
5273
5274 // Get the cost of the scalar memory instruction and address computation.
5276 PtrTy, SE, PtrSCEV, CostKind);
5277
5278 // Don't pass *I here, since it is scalar but will actually be part of a
5279 // vectorized loop where the user of it is a vectorized instruction.
5280 const Align Alignment = getLoadStoreAlignment(I);
5281 TTI::OperandValueInfo OpInfo = TTI::getOperandInfo(I->getOperand(0));
5282 Cost += VF.getFixedValue() *
5283 TTI.getMemoryOpCost(I->getOpcode(), ValTy->getScalarType(), Alignment,
5284 AS, CostKind, OpInfo);
5285
5286 // Get the overhead of the extractelement and insertelement instructions
5287 // we might create due to scalarization.
5289
5290 // If we have a predicated load/store, it will need extra i1 extracts and
5291 // conditional branches, but may not be executed for each vector lane. Scale
5292 // the cost by the probability of executing the predicated block.
5293 if (isPredicatedInst(I)) {
5294 Cost /= getPredBlockCostDivisor(CostKind, I->getParent());
5295
5296 // Add the cost of an i1 extract and a branch
5297 auto *VecI1Ty =
5298 VectorType::get(IntegerType::getInt1Ty(ValTy->getContext()), VF);
5300 VecI1Ty, APInt::getAllOnes(VF.getFixedValue()),
5301 /*Insert=*/false, /*Extract=*/true, CostKind);
5302 Cost += TTI.getCFInstrCost(Instruction::CondBr, CostKind);
5303
5304 if (useEmulatedMaskMemRefHack(I, VF))
5305 // Artificially setting to a high enough value to practically disable
5306 // vectorization with such operations.
5307 Cost = 3000000;
5308 }
5309
5310 return Cost;
5311}
5312
5314LoopVectorizationCostModel::getConsecutiveMemOpCost(Instruction *I,
5315 ElementCount VF) {
5316 Type *ValTy = getLoadStoreType(I);
5317 auto *VectorTy = cast<VectorType>(toVectorTy(ValTy, VF));
5319 unsigned AS = getLoadStoreAddressSpace(I);
5320 int ConsecutiveStride = Legal->isConsecutivePtr(ValTy, Ptr);
5321
5322 assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) &&
5323 "Stride should be 1 or -1 for consecutive memory access");
5324 const Align Alignment = getLoadStoreAlignment(I);
5326 if (Legal->isMaskRequired(I)) {
5327 unsigned IID = I->getOpcode() == Instruction::Load
5328 ? Intrinsic::masked_load
5329 : Intrinsic::masked_store;
5331 MemIntrinsicCostAttributes(IID, VectorTy, Alignment, AS), CostKind);
5332 } else {
5333 TTI::OperandValueInfo OpInfo = TTI::getOperandInfo(I->getOperand(0));
5334 Cost += TTI.getMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS,
5335 CostKind, OpInfo, I);
5336 }
5337
5338 bool Reverse = ConsecutiveStride < 0;
5339 if (Reverse)
5341 VectorTy, {}, CostKind, 0);
5342 return Cost;
5343}
5344
5346LoopVectorizationCostModel::getUniformMemOpCost(Instruction *I,
5347 ElementCount VF) {
5348 assert(Legal->isUniformMemOp(*I, VF));
5349
5350 Type *ValTy = getLoadStoreType(I);
5352 auto *VectorTy = cast<VectorType>(toVectorTy(ValTy, VF));
5353 const Align Alignment = getLoadStoreAlignment(I);
5354 unsigned AS = getLoadStoreAddressSpace(I);
5355 if (isa<LoadInst>(I)) {
5356 return TTI.getAddressComputationCost(PtrTy, nullptr, nullptr, CostKind) +
5357 TTI.getMemoryOpCost(Instruction::Load, ValTy, Alignment, AS,
5358 CostKind) +
5360 VectorTy, {}, CostKind);
5361 }
5362 StoreInst *SI = cast<StoreInst>(I);
5363
5364 bool IsLoopInvariantStoreValue = Legal->isInvariant(SI->getValueOperand());
5365 // TODO: We have existing tests that request the cost of extracting element
5366 // VF.getKnownMinValue() - 1 from a scalable vector. This does not represent
5367 // the actual generated code, which involves extracting the last element of
5368 // a scalable vector where the lane to extract is unknown at compile time.
5370 TTI.getAddressComputationCost(PtrTy, nullptr, nullptr, CostKind) +
5371 TTI.getMemoryOpCost(Instruction::Store, ValTy, Alignment, AS, CostKind);
5372 if (!IsLoopInvariantStoreValue)
5373 Cost += TTI.getIndexedVectorInstrCostFromEnd(Instruction::ExtractElement,
5374 VectorTy, CostKind, 0);
5375 return Cost;
5376}
5377
5379LoopVectorizationCostModel::getGatherScatterCost(Instruction *I,
5380 ElementCount VF) {
5381 Type *ValTy = getLoadStoreType(I);
5382 auto *VectorTy = cast<VectorType>(toVectorTy(ValTy, VF));
5383 const Align Alignment = getLoadStoreAlignment(I);
5385 Type *PtrTy = Ptr->getType();
5386
5387 if (!Legal->isUniform(Ptr, VF))
5388 PtrTy = toVectorTy(PtrTy, VF);
5389
5390 unsigned IID = I->getOpcode() == Instruction::Load
5391 ? Intrinsic::masked_gather
5392 : Intrinsic::masked_scatter;
5393 return TTI.getAddressComputationCost(PtrTy, nullptr, nullptr, CostKind) +
5395 MemIntrinsicCostAttributes(IID, VectorTy, Ptr,
5396 Legal->isMaskRequired(I), Alignment, I),
5397 CostKind);
5398}
5399
5401LoopVectorizationCostModel::getInterleaveGroupCost(Instruction *I,
5402 ElementCount VF) {
5403 const auto *Group = getInterleavedAccessGroup(I);
5404 assert(Group && "Fail to get an interleaved access group.");
5405
5406 Instruction *InsertPos = Group->getInsertPos();
5407 Type *ValTy = getLoadStoreType(InsertPos);
5408 auto *VectorTy = cast<VectorType>(toVectorTy(ValTy, VF));
5409 unsigned AS = getLoadStoreAddressSpace(InsertPos);
5410
5411 unsigned InterleaveFactor = Group->getFactor();
5412 auto *WideVecTy = VectorType::get(ValTy, VF * InterleaveFactor);
5413
5414 // Holds the indices of existing members in the interleaved group.
5415 SmallVector<unsigned, 4> Indices;
5416 for (unsigned IF = 0; IF < InterleaveFactor; IF++)
5417 if (Group->getMember(IF))
5418 Indices.push_back(IF);
5419
5420 // Calculate the cost of the whole interleaved group.
5421 bool UseMaskForGaps =
5422 (Group->requiresScalarEpilogue() && !isScalarEpilogueAllowed()) ||
5423 (isa<StoreInst>(I) && !Group->isFull());
5425 InsertPos->getOpcode(), WideVecTy, Group->getFactor(), Indices,
5426 Group->getAlign(), AS, CostKind, Legal->isMaskRequired(I),
5427 UseMaskForGaps);
5428
5429 if (Group->isReverse()) {
5430 // TODO: Add support for reversed masked interleaved access.
5431 assert(!Legal->isMaskRequired(I) &&
5432 "Reverse masked interleaved access not supported.");
5433 Cost += Group->getNumMembers() *
5435 VectorTy, {}, CostKind, 0);
5436 }
5437 return Cost;
5438}
5439
5440std::optional<InstructionCost>
5442 ElementCount VF,
5443 Type *Ty) const {
5444 using namespace llvm::PatternMatch;
5445 // Early exit for no inloop reductions
5446 if (InLoopReductions.empty() || VF.isScalar() || !isa<VectorType>(Ty))
5447 return std::nullopt;
5448 auto *VectorTy = cast<VectorType>(Ty);
5449
5450 // We are looking for a pattern of, and finding the minimal acceptable cost:
5451 // reduce(mul(ext(A), ext(B))) or
5452 // reduce(mul(A, B)) or
5453 // reduce(ext(A)) or
5454 // reduce(A).
5455 // The basic idea is that we walk down the tree to do that, finding the root
5456 // reduction instruction in InLoopReductionImmediateChains. From there we find
5457 // the pattern of mul/ext and test the cost of the entire pattern vs the cost
5458 // of the components. If the reduction cost is lower then we return it for the
5459 // reduction instruction and 0 for the other instructions in the pattern. If
5460 // it is not we return an invalid cost specifying the orignal cost method
5461 // should be used.
5462 Instruction *RetI = I;
5463 if (match(RetI, m_ZExtOrSExt(m_Value()))) {
5464 if (!RetI->hasOneUser())
5465 return std::nullopt;
5466 RetI = RetI->user_back();
5467 }
5468
5469 if (match(RetI, m_OneUse(m_Mul(m_Value(), m_Value()))) &&
5470 RetI->user_back()->getOpcode() == Instruction::Add) {
5471 RetI = RetI->user_back();
5472 }
5473
5474 // Test if the found instruction is a reduction, and if not return an invalid
5475 // cost specifying the parent to use the original cost modelling.
5476 Instruction *LastChain = InLoopReductionImmediateChains.lookup(RetI);
5477 if (!LastChain)
5478 return std::nullopt;
5479
5480 // Find the reduction this chain is a part of and calculate the basic cost of
5481 // the reduction on its own.
5482 Instruction *ReductionPhi = LastChain;
5483 while (!isa<PHINode>(ReductionPhi))
5484 ReductionPhi = InLoopReductionImmediateChains.at(ReductionPhi);
5485
5486 const RecurrenceDescriptor &RdxDesc =
5487 Legal->getRecurrenceDescriptor(cast<PHINode>(ReductionPhi));
5488
5489 InstructionCost BaseCost;
5490 RecurKind RK = RdxDesc.getRecurrenceKind();
5493 BaseCost = TTI.getMinMaxReductionCost(MinMaxID, VectorTy,
5494 RdxDesc.getFastMathFlags(), CostKind);
5495 } else {
5496 BaseCost = TTI.getArithmeticReductionCost(
5497 RdxDesc.getOpcode(), VectorTy, RdxDesc.getFastMathFlags(), CostKind);
5498 }
5499
5500 // For a call to the llvm.fmuladd intrinsic we need to add the cost of a
5501 // normal fmul instruction to the cost of the fadd reduction.
5502 if (RK == RecurKind::FMulAdd)
5503 BaseCost +=
5504 TTI.getArithmeticInstrCost(Instruction::FMul, VectorTy, CostKind);
5505
5506 // If we're using ordered reductions then we can just return the base cost
5507 // here, since getArithmeticReductionCost calculates the full ordered
5508 // reduction cost when FP reassociation is not allowed.
5509 if (useOrderedReductions(RdxDesc))
5510 return BaseCost;
5511
5512 // Get the operand that was not the reduction chain and match it to one of the
5513 // patterns, returning the better cost if it is found.
5514 Instruction *RedOp = RetI->getOperand(1) == LastChain
5517
5518 VectorTy = VectorType::get(I->getOperand(0)->getType(), VectorTy);
5519
5520 Instruction *Op0, *Op1;
5521 if (RedOp && RdxDesc.getOpcode() == Instruction::Add &&
5522 match(RedOp,
5524 match(Op0, m_ZExtOrSExt(m_Value())) &&
5525 Op0->getOpcode() == Op1->getOpcode() &&
5526 Op0->getOperand(0)->getType() == Op1->getOperand(0)->getType() &&
5527 !TheLoop->isLoopInvariant(Op0) && !TheLoop->isLoopInvariant(Op1) &&
5528 (Op0->getOpcode() == RedOp->getOpcode() || Op0 == Op1)) {
5529
5530 // Matched reduce.add(ext(mul(ext(A), ext(B)))
5531 // Note that the extend opcodes need to all match, or if A==B they will have
5532 // been converted to zext(mul(sext(A), sext(A))) as it is known positive,
5533 // which is equally fine.
5534 bool IsUnsigned = isa<ZExtInst>(Op0);
5535 auto *ExtType = VectorType::get(Op0->getOperand(0)->getType(), VectorTy);
5536 auto *MulType = VectorType::get(Op0->getType(), VectorTy);
5537
5538 InstructionCost ExtCost =
5539 TTI.getCastInstrCost(Op0->getOpcode(), MulType, ExtType,
5541 InstructionCost MulCost =
5542 TTI.getArithmeticInstrCost(Instruction::Mul, MulType, CostKind);
5543 InstructionCost Ext2Cost =
5544 TTI.getCastInstrCost(RedOp->getOpcode(), VectorTy, MulType,
5546
5547 InstructionCost RedCost = TTI.getMulAccReductionCost(
5548 IsUnsigned, RdxDesc.getOpcode(), RdxDesc.getRecurrenceType(), ExtType,
5549 CostKind);
5550
5551 if (RedCost.isValid() &&
5552 RedCost < ExtCost * 2 + MulCost + Ext2Cost + BaseCost)
5553 return I == RetI ? RedCost : 0;
5554 } else if (RedOp && match(RedOp, m_ZExtOrSExt(m_Value())) &&
5555 !TheLoop->isLoopInvariant(RedOp)) {
5556 // Matched reduce(ext(A))
5557 bool IsUnsigned = isa<ZExtInst>(RedOp);
5558 auto *ExtType = VectorType::get(RedOp->getOperand(0)->getType(), VectorTy);
5559 InstructionCost RedCost = TTI.getExtendedReductionCost(
5560 RdxDesc.getOpcode(), IsUnsigned, RdxDesc.getRecurrenceType(), ExtType,
5561 RdxDesc.getFastMathFlags(), CostKind);
5562
5563 InstructionCost ExtCost =
5564 TTI.getCastInstrCost(RedOp->getOpcode(), VectorTy, ExtType,
5566 if (RedCost.isValid() && RedCost < BaseCost + ExtCost)
5567 return I == RetI ? RedCost : 0;
5568 } else if (RedOp && RdxDesc.getOpcode() == Instruction::Add &&
5569 match(RedOp, m_Mul(m_Instruction(Op0), m_Instruction(Op1)))) {
5570 if (match(Op0, m_ZExtOrSExt(m_Value())) &&
5571 Op0->getOpcode() == Op1->getOpcode() &&
5572 !TheLoop->isLoopInvariant(Op0) && !TheLoop->isLoopInvariant(Op1)) {
5573 bool IsUnsigned = isa<ZExtInst>(Op0);
5574 Type *Op0Ty = Op0->getOperand(0)->getType();
5575 Type *Op1Ty = Op1->getOperand(0)->getType();
5576 Type *LargestOpTy =
5577 Op0Ty->getIntegerBitWidth() < Op1Ty->getIntegerBitWidth() ? Op1Ty
5578 : Op0Ty;
5579 auto *ExtType = VectorType::get(LargestOpTy, VectorTy);
5580
5581 // Matched reduce.add(mul(ext(A), ext(B))), where the two ext may be of
5582 // different sizes. We take the largest type as the ext to reduce, and add
5583 // the remaining cost as, for example reduce(mul(ext(ext(A)), ext(B))).
5584 InstructionCost ExtCost0 = TTI.getCastInstrCost(
5585 Op0->getOpcode(), VectorTy, VectorType::get(Op0Ty, VectorTy),
5587 InstructionCost ExtCost1 = TTI.getCastInstrCost(
5588 Op1->getOpcode(), VectorTy, VectorType::get(Op1Ty, VectorTy),
5590 InstructionCost MulCost =
5591 TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy, CostKind);
5592
5593 InstructionCost RedCost = TTI.getMulAccReductionCost(
5594 IsUnsigned, RdxDesc.getOpcode(), RdxDesc.getRecurrenceType(), ExtType,
5595 CostKind);
5596 InstructionCost ExtraExtCost = 0;
5597 if (Op0Ty != LargestOpTy || Op1Ty != LargestOpTy) {
5598 Instruction *ExtraExtOp = (Op0Ty != LargestOpTy) ? Op0 : Op1;
5599 ExtraExtCost = TTI.getCastInstrCost(
5600 ExtraExtOp->getOpcode(), ExtType,
5601 VectorType::get(ExtraExtOp->getOperand(0)->getType(), VectorTy),
5603 }
5604
5605 if (RedCost.isValid() &&
5606 (RedCost + ExtraExtCost) < (ExtCost0 + ExtCost1 + MulCost + BaseCost))
5607 return I == RetI ? RedCost : 0;
5608 } else if (!match(I, m_ZExtOrSExt(m_Value()))) {
5609 // Matched reduce.add(mul())
5610 InstructionCost MulCost =
5611 TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy, CostKind);
5612
5613 InstructionCost RedCost = TTI.getMulAccReductionCost(
5614 true, RdxDesc.getOpcode(), RdxDesc.getRecurrenceType(), VectorTy,
5615 CostKind);
5616
5617 if (RedCost.isValid() && RedCost < MulCost + BaseCost)
5618 return I == RetI ? RedCost : 0;
5619 }
5620 }
5621
5622 return I == RetI ? std::optional<InstructionCost>(BaseCost) : std::nullopt;
5623}
5624
5626LoopVectorizationCostModel::getMemoryInstructionCost(Instruction *I,
5627 ElementCount VF) {
5628 // Calculate scalar cost only. Vectorization cost should be ready at this
5629 // moment.
5630 if (VF.isScalar()) {
5631 Type *ValTy = getLoadStoreType(I);
5633 const Align Alignment = getLoadStoreAlignment(I);
5634 unsigned AS = getLoadStoreAddressSpace(I);
5635
5636 TTI::OperandValueInfo OpInfo = TTI::getOperandInfo(I->getOperand(0));
5637 return TTI.getAddressComputationCost(PtrTy, nullptr, nullptr, CostKind) +
5638 TTI.getMemoryOpCost(I->getOpcode(), ValTy, Alignment, AS, CostKind,
5639 OpInfo, I);
5640 }
5641 return getWideningCost(I, VF);
5642}
5643
5645LoopVectorizationCostModel::getScalarizationOverhead(Instruction *I,
5646 ElementCount VF) const {
5647
5648 // There is no mechanism yet to create a scalable scalarization loop,
5649 // so this is currently Invalid.
5650 if (VF.isScalable())
5651 return InstructionCost::getInvalid();
5652
5653 if (VF.isScalar())
5654 return 0;
5655
5657 Type *RetTy = toVectorizedTy(I->getType(), VF);
5658 if (!RetTy->isVoidTy() &&
5660
5662 if (isa<LoadInst>(I))
5664 else if (isa<StoreInst>(I))
5666
5667 for (Type *VectorTy : getContainedTypes(RetTy)) {
5670 /*Insert=*/true, /*Extract=*/false, CostKind,
5671 /*ForPoisonSrc=*/true, {}, VIC);
5672 }
5673 }
5674
5675 // Some targets keep addresses scalar.
5677 return Cost;
5678
5679 // Some targets support efficient element stores.
5681 return Cost;
5682
5683 // Collect operands to consider.
5684 CallInst *CI = dyn_cast<CallInst>(I);
5685 Instruction::op_range Ops = CI ? CI->args() : I->operands();
5686
5687 // Skip operands that do not require extraction/scalarization and do not incur
5688 // any overhead.
5690 for (auto *V : filterExtractingOperands(Ops, VF))
5691 Tys.push_back(maybeVectorizeType(V->getType(), VF));
5692
5696 return Cost + TTI.getOperandsScalarizationOverhead(Tys, CostKind, OperandVIC);
5697}
5698
5700 if (VF.isScalar())
5701 return;
5702 NumPredStores = 0;
5703 for (BasicBlock *BB : TheLoop->blocks()) {
5704 // For each instruction in the old loop.
5705 for (Instruction &I : *BB) {
5707 if (!Ptr)
5708 continue;
5709
5710 // TODO: We should generate better code and update the cost model for
5711 // predicated uniform stores. Today they are treated as any other
5712 // predicated store (see added test cases in
5713 // invariant-store-vectorization.ll).
5715 NumPredStores++;
5716
5717 if (Legal->isUniformMemOp(I, VF)) {
5718 auto IsLegalToScalarize = [&]() {
5719 if (!VF.isScalable())
5720 // Scalarization of fixed length vectors "just works".
5721 return true;
5722
5723 // We have dedicated lowering for unpredicated uniform loads and
5724 // stores. Note that even with tail folding we know that at least
5725 // one lane is active (i.e. generalized predication is not possible
5726 // here), and the logic below depends on this fact.
5727 if (!foldTailByMasking())
5728 return true;
5729
5730 // For scalable vectors, a uniform memop load is always
5731 // uniform-by-parts and we know how to scalarize that.
5732 if (isa<LoadInst>(I))
5733 return true;
5734
5735 // A uniform store isn't neccessarily uniform-by-part
5736 // and we can't assume scalarization.
5737 auto &SI = cast<StoreInst>(I);
5738 return TheLoop->isLoopInvariant(SI.getValueOperand());
5739 };
5740
5741 const InstructionCost GatherScatterCost =
5743 getGatherScatterCost(&I, VF) : InstructionCost::getInvalid();
5744
5745 // Load: Scalar load + broadcast
5746 // Store: Scalar store + isLoopInvariantStoreValue ? 0 : extract
5747 // FIXME: This cost is a significant under-estimate for tail folded
5748 // memory ops.
5749 const InstructionCost ScalarizationCost =
5750 IsLegalToScalarize() ? getUniformMemOpCost(&I, VF)
5752
5753 // Choose better solution for the current VF, Note that Invalid
5754 // costs compare as maximumal large. If both are invalid, we get
5755 // scalable invalid which signals a failure and a vectorization abort.
5756 if (GatherScatterCost < ScalarizationCost)
5757 setWideningDecision(&I, VF, CM_GatherScatter, GatherScatterCost);
5758 else
5759 setWideningDecision(&I, VF, CM_Scalarize, ScalarizationCost);
5760 continue;
5761 }
5762
5763 // We assume that widening is the best solution when possible.
5764 if (memoryInstructionCanBeWidened(&I, VF)) {
5765 InstructionCost Cost = getConsecutiveMemOpCost(&I, VF);
5766 int ConsecutiveStride = Legal->isConsecutivePtr(
5768 assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) &&
5769 "Expected consecutive stride.");
5770 InstWidening Decision =
5771 ConsecutiveStride == 1 ? CM_Widen : CM_Widen_Reverse;
5772 setWideningDecision(&I, VF, Decision, Cost);
5773 continue;
5774 }
5775
5776 // Choose between Interleaving, Gather/Scatter or Scalarization.
5778 unsigned NumAccesses = 1;
5779 if (isAccessInterleaved(&I)) {
5780 const auto *Group = getInterleavedAccessGroup(&I);
5781 assert(Group && "Fail to get an interleaved access group.");
5782
5783 // Make one decision for the whole group.
5784 if (getWideningDecision(&I, VF) != CM_Unknown)
5785 continue;
5786
5787 NumAccesses = Group->getNumMembers();
5789 InterleaveCost = getInterleaveGroupCost(&I, VF);
5790 }
5791
5792 InstructionCost GatherScatterCost =
5794 ? getGatherScatterCost(&I, VF) * NumAccesses
5796
5797 InstructionCost ScalarizationCost =
5798 getMemInstScalarizationCost(&I, VF) * NumAccesses;
5799
5800 // Choose better solution for the current VF,
5801 // write down this decision and use it during vectorization.
5803 InstWidening Decision;
5804 if (InterleaveCost <= GatherScatterCost &&
5805 InterleaveCost < ScalarizationCost) {
5806 Decision = CM_Interleave;
5807 Cost = InterleaveCost;
5808 } else if (GatherScatterCost < ScalarizationCost) {
5809 Decision = CM_GatherScatter;
5810 Cost = GatherScatterCost;
5811 } else {
5812 Decision = CM_Scalarize;
5813 Cost = ScalarizationCost;
5814 }
5815 // If the instructions belongs to an interleave group, the whole group
5816 // receives the same decision. The whole group receives the cost, but
5817 // the cost will actually be assigned to one instruction.
5818 if (const auto *Group = getInterleavedAccessGroup(&I)) {
5819 if (Decision == CM_Scalarize) {
5820 for (unsigned Idx = 0; Idx < Group->getFactor(); ++Idx) {
5821 if (auto *I = Group->getMember(Idx)) {
5822 setWideningDecision(I, VF, Decision,
5823 getMemInstScalarizationCost(I, VF));
5824 }
5825 }
5826 } else {
5827 setWideningDecision(Group, VF, Decision, Cost);
5828 }
5829 } else
5830 setWideningDecision(&I, VF, Decision, Cost);
5831 }
5832 }
5833
5834 // Make sure that any load of address and any other address computation
5835 // remains scalar unless there is gather/scatter support. This avoids
5836 // inevitable extracts into address registers, and also has the benefit of
5837 // activating LSR more, since that pass can't optimize vectorized
5838 // addresses.
5839 if (TTI.prefersVectorizedAddressing())
5840 return;
5841
5842 // Start with all scalar pointer uses.
5844 for (BasicBlock *BB : TheLoop->blocks())
5845 for (Instruction &I : *BB) {
5846 Instruction *PtrDef =
5848 if (PtrDef && TheLoop->contains(PtrDef) &&
5850 AddrDefs.insert(PtrDef);
5851 }
5852
5853 // Add all instructions used to generate the addresses.
5855 append_range(Worklist, AddrDefs);
5856 while (!Worklist.empty()) {
5857 Instruction *I = Worklist.pop_back_val();
5858 for (auto &Op : I->operands())
5859 if (auto *InstOp = dyn_cast<Instruction>(Op))
5860 if (TheLoop->contains(InstOp) && !isa<PHINode>(InstOp) &&
5861 AddrDefs.insert(InstOp).second)
5862 Worklist.push_back(InstOp);
5863 }
5864
5865 auto UpdateMemOpUserCost = [this, VF](LoadInst *LI) {
5866 // If there are direct memory op users of the newly scalarized load,
5867 // their cost may have changed because there's no scalarization
5868 // overhead for the operand. Update it.
5869 for (User *U : LI->users()) {
5871 continue;
5873 continue;
5876 getMemInstScalarizationCost(cast<Instruction>(U), VF));
5877 }
5878 };
5879 for (auto *I : AddrDefs) {
5880 if (isa<LoadInst>(I)) {
5881 // Setting the desired widening decision should ideally be handled in
5882 // by cost functions, but since this involves the task of finding out
5883 // if the loaded register is involved in an address computation, it is
5884 // instead changed here when we know this is the case.
5885 InstWidening Decision = getWideningDecision(I, VF);
5886 if (!isPredicatedInst(I) &&
5887 (Decision == CM_Widen || Decision == CM_Widen_Reverse ||
5888 (!Legal->isUniformMemOp(*I, VF) && Decision == CM_Scalarize))) {
5889 // Scalarize a widened load of address or update the cost of a scalar
5890 // load of an address.
5892 I, VF, CM_Scalarize,
5893 (VF.getKnownMinValue() *
5894 getMemoryInstructionCost(I, ElementCount::getFixed(1))));
5895 UpdateMemOpUserCost(cast<LoadInst>(I));
5896 } else if (const auto *Group = getInterleavedAccessGroup(I)) {
5897 // Scalarize all members of this interleaved group when any member
5898 // is used as an address. The address-used load skips scalarization
5899 // overhead, other members include it.
5900 for (unsigned Idx = 0; Idx < Group->getFactor(); ++Idx) {
5901 if (Instruction *Member = Group->getMember(Idx)) {
5903 AddrDefs.contains(Member)
5904 ? (VF.getKnownMinValue() *
5905 getMemoryInstructionCost(Member,
5907 : getMemInstScalarizationCost(Member, VF);
5909 UpdateMemOpUserCost(cast<LoadInst>(Member));
5910 }
5911 }
5912 }
5913 } else {
5914 // Cannot scalarize fixed-order recurrence phis at the moment.
5915 if (isa<PHINode>(I) && Legal->isFixedOrderRecurrence(cast<PHINode>(I)))
5916 continue;
5917
5918 // Make sure I gets scalarized and a cost estimate without
5919 // scalarization overhead.
5920 ForcedScalars[VF].insert(I);
5921 }
5922 }
5923}
5924
5926 assert(!VF.isScalar() &&
5927 "Trying to set a vectorization decision for a scalar VF");
5928
5929 auto ForcedScalar = ForcedScalars.find(VF);
5930 for (BasicBlock *BB : TheLoop->blocks()) {
5931 // For each instruction in the old loop.
5932 for (Instruction &I : *BB) {
5934
5935 if (!CI)
5936 continue;
5937
5941 Function *ScalarFunc = CI->getCalledFunction();
5942 Type *ScalarRetTy = CI->getType();
5943 SmallVector<Type *, 4> Tys, ScalarTys;
5944 for (auto &ArgOp : CI->args())
5945 ScalarTys.push_back(ArgOp->getType());
5946
5947 // Estimate cost of scalarized vector call. The source operands are
5948 // assumed to be vectors, so we need to extract individual elements from
5949 // there, execute VF scalar calls, and then gather the result into the
5950 // vector return value.
5951 if (VF.isFixed()) {
5952 InstructionCost ScalarCallCost =
5953 TTI.getCallInstrCost(ScalarFunc, ScalarRetTy, ScalarTys, CostKind);
5954
5955 // Compute costs of unpacking argument values for the scalar calls and
5956 // packing the return values to a vector.
5957 InstructionCost ScalarizationCost = getScalarizationOverhead(CI, VF);
5958 ScalarCost = ScalarCallCost * VF.getKnownMinValue() + ScalarizationCost;
5959 } else {
5960 // There is no point attempting to calculate the scalar cost for a
5961 // scalable VF as we know it will be Invalid.
5963 "Unexpected valid cost for scalarizing scalable vectors");
5964 ScalarCost = InstructionCost::getInvalid();
5965 }
5966
5967 // Honor ForcedScalars and UniformAfterVectorization decisions.
5968 // TODO: For calls, it might still be more profitable to widen. Use
5969 // VPlan-based cost model to compare different options.
5970 if (VF.isVector() && ((ForcedScalar != ForcedScalars.end() &&
5971 ForcedScalar->second.contains(CI)) ||
5972 isUniformAfterVectorization(CI, VF))) {
5973 setCallWideningDecision(CI, VF, CM_Scalarize, nullptr,
5974 Intrinsic::not_intrinsic, std::nullopt,
5975 ScalarCost);
5976 continue;
5977 }
5978
5979 bool MaskRequired = Legal->isMaskRequired(CI);
5980 // Compute corresponding vector type for return value and arguments.
5981 Type *RetTy = toVectorizedTy(ScalarRetTy, VF);
5982 for (Type *ScalarTy : ScalarTys)
5983 Tys.push_back(toVectorizedTy(ScalarTy, VF));
5984
5985 // An in-loop reduction using an fmuladd intrinsic is a special case;
5986 // we don't want the normal cost for that intrinsic.
5988 if (auto RedCost = getReductionPatternCost(CI, VF, RetTy)) {
5991 std::nullopt, *RedCost);
5992 continue;
5993 }
5994
5995 // Find the cost of vectorizing the call, if we can find a suitable
5996 // vector variant of the function.
5997 VFInfo FuncInfo;
5998 Function *VecFunc = nullptr;
5999 // Search through any available variants for one we can use at this VF.
6000 for (VFInfo &Info : VFDatabase::getMappings(*CI)) {
6001 // Must match requested VF.
6002 if (Info.Shape.VF != VF)
6003 continue;
6004
6005 // Must take a mask argument if one is required
6006 if (MaskRequired && !Info.isMasked())
6007 continue;
6008
6009 // Check that all parameter kinds are supported
6010 bool ParamsOk = true;
6011 for (VFParameter Param : Info.Shape.Parameters) {
6012 switch (Param.ParamKind) {
6014 break;
6016 Value *ScalarParam = CI->getArgOperand(Param.ParamPos);
6017 // Make sure the scalar parameter in the loop is invariant.
6018 if (!PSE.getSE()->isLoopInvariant(PSE.getSCEV(ScalarParam),
6019 TheLoop))
6020 ParamsOk = false;
6021 break;
6022 }
6024 Value *ScalarParam = CI->getArgOperand(Param.ParamPos);
6025 // Find the stride for the scalar parameter in this loop and see if
6026 // it matches the stride for the variant.
6027 // TODO: do we need to figure out the cost of an extract to get the
6028 // first lane? Or do we hope that it will be folded away?
6029 ScalarEvolution *SE = PSE.getSE();
6030 if (!match(SE->getSCEV(ScalarParam),
6032 m_SCEV(), m_scev_SpecificSInt(Param.LinearStepOrPos),
6034 ParamsOk = false;
6035 break;
6036 }
6038 break;
6039 default:
6040 ParamsOk = false;
6041 break;
6042 }
6043 }
6044
6045 if (!ParamsOk)
6046 continue;
6047
6048 // Found a suitable candidate, stop here.
6049 VecFunc = CI->getModule()->getFunction(Info.VectorName);
6050 FuncInfo = Info;
6051 break;
6052 }
6053
6054 if (TLI && VecFunc && !CI->isNoBuiltin())
6055 VectorCost = TTI.getCallInstrCost(nullptr, RetTy, Tys, CostKind);
6056
6057 // Find the cost of an intrinsic; some targets may have instructions that
6058 // perform the operation without needing an actual call.
6060 if (IID != Intrinsic::not_intrinsic)
6062
6063 InstructionCost Cost = ScalarCost;
6064 InstWidening Decision = CM_Scalarize;
6065
6066 if (VectorCost.isValid() && VectorCost <= Cost) {
6067 Cost = VectorCost;
6068 Decision = CM_VectorCall;
6069 }
6070
6071 if (IntrinsicCost.isValid() && IntrinsicCost <= Cost) {
6073 Decision = CM_IntrinsicCall;
6074 }
6075
6076 setCallWideningDecision(CI, VF, Decision, VecFunc, IID,
6078 }
6079 }
6080}
6081
6083 if (!Legal->isInvariant(Op))
6084 return false;
6085 // Consider Op invariant, if it or its operands aren't predicated
6086 // instruction in the loop. In that case, it is not trivially hoistable.
6087 auto *OpI = dyn_cast<Instruction>(Op);
6088 return !OpI || !TheLoop->contains(OpI) ||
6089 (!isPredicatedInst(OpI) &&
6090 (!isa<PHINode>(OpI) || OpI->getParent() != TheLoop->getHeader()) &&
6091 all_of(OpI->operands(),
6092 [this](Value *Op) { return shouldConsiderInvariant(Op); }));
6093}
6094
6097 ElementCount VF) {
6098 // If we know that this instruction will remain uniform, check the cost of
6099 // the scalar version.
6101 VF = ElementCount::getFixed(1);
6102
6103 if (VF.isVector() && isProfitableToScalarize(I, VF))
6104 return InstsToScalarize[VF][I];
6105
6106 // Forced scalars do not have any scalarization overhead.
6107 auto ForcedScalar = ForcedScalars.find(VF);
6108 if (VF.isVector() && ForcedScalar != ForcedScalars.end()) {
6109 auto InstSet = ForcedScalar->second;
6110 if (InstSet.count(I))
6112 VF.getKnownMinValue();
6113 }
6114
6115 Type *RetTy = I->getType();
6117 RetTy = IntegerType::get(RetTy->getContext(), MinBWs[I]);
6118 auto *SE = PSE.getSE();
6119
6120 Type *VectorTy;
6121 if (isScalarAfterVectorization(I, VF)) {
6122 [[maybe_unused]] auto HasSingleCopyAfterVectorization =
6123 [this](Instruction *I, ElementCount VF) -> bool {
6124 if (VF.isScalar())
6125 return true;
6126
6127 auto Scalarized = InstsToScalarize.find(VF);
6128 assert(Scalarized != InstsToScalarize.end() &&
6129 "VF not yet analyzed for scalarization profitability");
6130 return !Scalarized->second.count(I) &&
6131 llvm::all_of(I->users(), [&](User *U) {
6132 auto *UI = cast<Instruction>(U);
6133 return !Scalarized->second.count(UI);
6134 });
6135 };
6136
6137 // With the exception of GEPs and PHIs, after scalarization there should
6138 // only be one copy of the instruction generated in the loop. This is
6139 // because the VF is either 1, or any instructions that need scalarizing
6140 // have already been dealt with by the time we get here. As a result,
6141 // it means we don't have to multiply the instruction cost by VF.
6142 assert(I->getOpcode() == Instruction::GetElementPtr ||
6143 I->getOpcode() == Instruction::PHI ||
6144 (I->getOpcode() == Instruction::BitCast &&
6145 I->getType()->isPointerTy()) ||
6146 HasSingleCopyAfterVectorization(I, VF));
6147 VectorTy = RetTy;
6148 } else
6149 VectorTy = toVectorizedTy(RetTy, VF);
6150
6151 if (VF.isVector() && VectorTy->isVectorTy() &&
6152 !TTI.getNumberOfParts(VectorTy))
6154
6155 // TODO: We need to estimate the cost of intrinsic calls.
6156 switch (I->getOpcode()) {
6157 case Instruction::GetElementPtr:
6158 // We mark this instruction as zero-cost because the cost of GEPs in
6159 // vectorized code depends on whether the corresponding memory instruction
6160 // is scalarized or not. Therefore, we handle GEPs with the memory
6161 // instruction cost.
6162 return 0;
6163 case Instruction::UncondBr:
6164 case Instruction::CondBr: {
6165 // In cases of scalarized and predicated instructions, there will be VF
6166 // predicated blocks in the vectorized loop. Each branch around these
6167 // blocks requires also an extract of its vector compare i1 element.
6168 // Note that the conditional branch from the loop latch will be replaced by
6169 // a single branch controlling the loop, so there is no extra overhead from
6170 // scalarization.
6171 bool ScalarPredicatedBB = false;
6173 if (VF.isVector() && BI &&
6174 (PredicatedBBsAfterVectorization[VF].count(BI->getSuccessor(0)) ||
6175 PredicatedBBsAfterVectorization[VF].count(BI->getSuccessor(1))) &&
6176 BI->getParent() != TheLoop->getLoopLatch())
6177 ScalarPredicatedBB = true;
6178
6179 if (ScalarPredicatedBB) {
6180 // Not possible to scalarize scalable vector with predicated instructions.
6181 if (VF.isScalable())
6183 // Return cost for branches around scalarized and predicated blocks.
6184 auto *VecI1Ty =
6186 return (TTI.getScalarizationOverhead(
6187 VecI1Ty, APInt::getAllOnes(VF.getFixedValue()),
6188 /*Insert*/ false, /*Extract*/ true, CostKind) +
6189 (TTI.getCFInstrCost(Instruction::CondBr, CostKind) *
6190 VF.getFixedValue()));
6191 }
6192
6193 if (I->getParent() == TheLoop->getLoopLatch() || VF.isScalar())
6194 // The back-edge branch will remain, as will all scalar branches.
6195 return TTI.getCFInstrCost(Instruction::UncondBr, CostKind);
6196
6197 // This branch will be eliminated by if-conversion.
6198 return 0;
6199 // Note: We currently assume zero cost for an unconditional branch inside
6200 // a predicated block since it will become a fall-through, although we
6201 // may decide in the future to call TTI for all branches.
6202 }
6203 case Instruction::Switch: {
6204 if (VF.isScalar())
6205 return TTI.getCFInstrCost(Instruction::Switch, CostKind);
6206 auto *Switch = cast<SwitchInst>(I);
6207 return Switch->getNumCases() *
6208 TTI.getCmpSelInstrCost(
6209 Instruction::ICmp,
6210 toVectorTy(Switch->getCondition()->getType(), VF),
6211 toVectorTy(Type::getInt1Ty(I->getContext()), VF),
6213 }
6214 case Instruction::PHI: {
6215 auto *Phi = cast<PHINode>(I);
6216
6217 // First-order recurrences are replaced by vector shuffles inside the loop.
6218 if (VF.isVector() && Legal->isFixedOrderRecurrence(Phi)) {
6220 std::iota(Mask.begin(), Mask.end(), VF.getKnownMinValue() - 1);
6221 return TTI.getShuffleCost(TargetTransformInfo::SK_Splice,
6222 cast<VectorType>(VectorTy),
6223 cast<VectorType>(VectorTy), Mask, CostKind,
6224 VF.getKnownMinValue() - 1);
6225 }
6226
6227 // Phi nodes in non-header blocks (not inductions, reductions, etc.) are
6228 // converted into select instructions. We require N - 1 selects per phi
6229 // node, where N is the number of incoming values.
6230 if (VF.isVector() && Phi->getParent() != TheLoop->getHeader()) {
6231 Type *ResultTy = Phi->getType();
6232
6233 // All instructions in an Any-of reduction chain are narrowed to bool.
6234 // Check if that is the case for this phi node.
6235 auto *HeaderUser = cast_if_present<PHINode>(
6236 find_singleton<User>(Phi->users(), [this](User *U, bool) -> User * {
6237 auto *Phi = dyn_cast<PHINode>(U);
6238 if (Phi && Phi->getParent() == TheLoop->getHeader())
6239 return Phi;
6240 return nullptr;
6241 }));
6242 if (HeaderUser) {
6243 auto &ReductionVars = Legal->getReductionVars();
6244 auto Iter = ReductionVars.find(HeaderUser);
6245 if (Iter != ReductionVars.end() &&
6247 Iter->second.getRecurrenceKind()))
6248 ResultTy = Type::getInt1Ty(Phi->getContext());
6249 }
6250 return (Phi->getNumIncomingValues() - 1) *
6251 TTI.getCmpSelInstrCost(
6252 Instruction::Select, toVectorTy(ResultTy, VF),
6253 toVectorTy(Type::getInt1Ty(Phi->getContext()), VF),
6255 }
6256
6257 // When tail folding with EVL, if the phi is part of an out of loop
6258 // reduction then it will be transformed into a wide vp_merge.
6259 if (VF.isVector() && foldTailWithEVL() &&
6260 Legal->getReductionVars().contains(Phi) && !isInLoopReduction(Phi)) {
6262 Intrinsic::vp_merge, toVectorTy(Phi->getType(), VF),
6263 {toVectorTy(Type::getInt1Ty(Phi->getContext()), VF)});
6264 return TTI.getIntrinsicInstrCost(ICA, CostKind);
6265 }
6266
6267 return TTI.getCFInstrCost(Instruction::PHI, CostKind);
6268 }
6269 case Instruction::UDiv:
6270 case Instruction::SDiv:
6271 case Instruction::URem:
6272 case Instruction::SRem:
6273 if (VF.isVector() && isPredicatedInst(I)) {
6274 const auto [ScalarCost, SafeDivisorCost] = getDivRemSpeculationCost(I, VF);
6275 return isDivRemScalarWithPredication(ScalarCost, SafeDivisorCost) ?
6276 ScalarCost : SafeDivisorCost;
6277 }
6278 // We've proven all lanes safe to speculate, fall through.
6279 [[fallthrough]];
6280 case Instruction::Add:
6281 case Instruction::Sub: {
6282 auto Info = Legal->getHistogramInfo(I);
6283 if (Info && VF.isVector()) {
6284 const HistogramInfo *HGram = Info.value();
6285 // Assume that a non-constant update value (or a constant != 1) requires
6286 // a multiply, and add that into the cost.
6288 ConstantInt *RHS = dyn_cast<ConstantInt>(I->getOperand(1));
6289 if (!RHS || RHS->getZExtValue() != 1)
6290 MulCost =
6291 TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy, CostKind);
6292
6293 // Find the cost of the histogram operation itself.
6294 Type *PtrTy = VectorType::get(HGram->Load->getPointerOperandType(), VF);
6295 Type *ScalarTy = I->getType();
6296 Type *MaskTy = VectorType::get(Type::getInt1Ty(I->getContext()), VF);
6297 IntrinsicCostAttributes ICA(Intrinsic::experimental_vector_histogram_add,
6298 Type::getVoidTy(I->getContext()),
6299 {PtrTy, ScalarTy, MaskTy});
6300
6301 // Add the costs together with the add/sub operation.
6302 return TTI.getIntrinsicInstrCost(ICA, CostKind) + MulCost +
6303 TTI.getArithmeticInstrCost(I->getOpcode(), VectorTy, CostKind);
6304 }
6305 [[fallthrough]];
6306 }
6307 case Instruction::FAdd:
6308 case Instruction::FSub:
6309 case Instruction::Mul:
6310 case Instruction::FMul:
6311 case Instruction::FDiv:
6312 case Instruction::FRem:
6313 case Instruction::Shl:
6314 case Instruction::LShr:
6315 case Instruction::AShr:
6316 case Instruction::And:
6317 case Instruction::Or:
6318 case Instruction::Xor: {
6319 // If we're speculating on the stride being 1, the multiplication may
6320 // fold away. We can generalize this for all operations using the notion
6321 // of neutral elements. (TODO)
6322 if (I->getOpcode() == Instruction::Mul &&
6323 ((TheLoop->isLoopInvariant(I->getOperand(0)) &&
6324 PSE.getSCEV(I->getOperand(0))->isOne()) ||
6325 (TheLoop->isLoopInvariant(I->getOperand(1)) &&
6326 PSE.getSCEV(I->getOperand(1))->isOne())))
6327 return 0;
6328
6329 // Detect reduction patterns
6330 if (auto RedCost = getReductionPatternCost(I, VF, VectorTy))
6331 return *RedCost;
6332
6333 // Certain instructions can be cheaper to vectorize if they have a constant
6334 // second vector operand. One example of this are shifts on x86.
6335 Value *Op2 = I->getOperand(1);
6336 if (!isa<Constant>(Op2) && TheLoop->isLoopInvariant(Op2) &&
6337 PSE.getSE()->isSCEVable(Op2->getType()) &&
6338 isa<SCEVConstant>(PSE.getSCEV(Op2))) {
6339 Op2 = cast<SCEVConstant>(PSE.getSCEV(Op2))->getValue();
6340 }
6341 auto Op2Info = TTI.getOperandInfo(Op2);
6342 if (Op2Info.Kind == TargetTransformInfo::OK_AnyValue &&
6345
6346 SmallVector<const Value *, 4> Operands(I->operand_values());
6347 return TTI.getArithmeticInstrCost(
6348 I->getOpcode(), VectorTy, CostKind,
6349 {TargetTransformInfo::OK_AnyValue, TargetTransformInfo::OP_None},
6350 Op2Info, Operands, I, TLI);
6351 }
6352 case Instruction::FNeg: {
6353 return TTI.getArithmeticInstrCost(
6354 I->getOpcode(), VectorTy, CostKind,
6355 {TargetTransformInfo::OK_AnyValue, TargetTransformInfo::OP_None},
6356 {TargetTransformInfo::OK_AnyValue, TargetTransformInfo::OP_None},
6357 I->getOperand(0), I);
6358 }
6359 case Instruction::Select: {
6361 const SCEV *CondSCEV = SE->getSCEV(SI->getCondition());
6362 bool ScalarCond = (SE->isLoopInvariant(CondSCEV, TheLoop));
6363
6364 const Value *Op0, *Op1;
6365 using namespace llvm::PatternMatch;
6366 if (!ScalarCond && (match(I, m_LogicalAnd(m_Value(Op0), m_Value(Op1))) ||
6367 match(I, m_LogicalOr(m_Value(Op0), m_Value(Op1))))) {
6368 // select x, y, false --> x & y
6369 // select x, true, y --> x | y
6370 const auto [Op1VK, Op1VP] = TTI::getOperandInfo(Op0);
6371 const auto [Op2VK, Op2VP] = TTI::getOperandInfo(Op1);
6372 assert(Op0->getType()->getScalarSizeInBits() == 1 &&
6373 Op1->getType()->getScalarSizeInBits() == 1);
6374
6375 return TTI.getArithmeticInstrCost(
6376 match(I, m_LogicalOr()) ? Instruction::Or : Instruction::And,
6377 VectorTy, CostKind, {Op1VK, Op1VP}, {Op2VK, Op2VP}, {Op0, Op1}, I);
6378 }
6379
6380 Type *CondTy = SI->getCondition()->getType();
6381 if (!ScalarCond)
6382 CondTy = VectorType::get(CondTy, VF);
6383
6385 if (auto *Cmp = dyn_cast<CmpInst>(SI->getCondition()))
6386 Pred = Cmp->getPredicate();
6387 return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, CondTy, Pred,
6388 CostKind, {TTI::OK_AnyValue, TTI::OP_None},
6389 {TTI::OK_AnyValue, TTI::OP_None}, I);
6390 }
6391 case Instruction::ICmp:
6392 case Instruction::FCmp: {
6393 Type *ValTy = I->getOperand(0)->getType();
6394
6396 [[maybe_unused]] Instruction *Op0AsInstruction =
6397 dyn_cast<Instruction>(I->getOperand(0));
6398 assert((!canTruncateToMinimalBitwidth(Op0AsInstruction, VF) ||
6399 MinBWs[I] == MinBWs[Op0AsInstruction]) &&
6400 "if both the operand and the compare are marked for "
6401 "truncation, they must have the same bitwidth");
6402 ValTy = IntegerType::get(ValTy->getContext(), MinBWs[I]);
6403 }
6404
6405 VectorTy = toVectorTy(ValTy, VF);
6406 return TTI.getCmpSelInstrCost(
6407 I->getOpcode(), VectorTy, CmpInst::makeCmpResultType(VectorTy),
6408 cast<CmpInst>(I)->getPredicate(), CostKind,
6409 {TTI::OK_AnyValue, TTI::OP_None}, {TTI::OK_AnyValue, TTI::OP_None}, I);
6410 }
6411 case Instruction::Store:
6412 case Instruction::Load: {
6413 ElementCount Width = VF;
6414 if (Width.isVector()) {
6415 InstWidening Decision = getWideningDecision(I, Width);
6416 assert(Decision != CM_Unknown &&
6417 "CM decision should be taken at this point");
6420 if (Decision == CM_Scalarize)
6421 Width = ElementCount::getFixed(1);
6422 }
6423 VectorTy = toVectorTy(getLoadStoreType(I), Width);
6424 return getMemoryInstructionCost(I, VF);
6425 }
6426 case Instruction::BitCast:
6427 if (I->getType()->isPointerTy())
6428 return 0;
6429 [[fallthrough]];
6430 case Instruction::ZExt:
6431 case Instruction::SExt:
6432 case Instruction::FPToUI:
6433 case Instruction::FPToSI:
6434 case Instruction::FPExt:
6435 case Instruction::PtrToInt:
6436 case Instruction::IntToPtr:
6437 case Instruction::SIToFP:
6438 case Instruction::UIToFP:
6439 case Instruction::Trunc:
6440 case Instruction::FPTrunc: {
6441 // Computes the CastContextHint from a Load/Store instruction.
6442 auto ComputeCCH = [&](Instruction *I) -> TTI::CastContextHint {
6444 "Expected a load or a store!");
6445
6446 if (VF.isScalar() || !TheLoop->contains(I))
6448
6449 switch (getWideningDecision(I, VF)) {
6461 llvm_unreachable("Instr did not go through cost modelling?");
6464 llvm_unreachable_internal("Instr has invalid widening decision");
6465 }
6466
6467 llvm_unreachable("Unhandled case!");
6468 };
6469
6470 unsigned Opcode = I->getOpcode();
6472 // For Trunc, the context is the only user, which must be a StoreInst.
6473 if (Opcode == Instruction::Trunc || Opcode == Instruction::FPTrunc) {
6474 if (I->hasOneUse())
6475 if (StoreInst *Store = dyn_cast<StoreInst>(*I->user_begin()))
6476 CCH = ComputeCCH(Store);
6477 }
6478 // For Z/Sext, the context is the operand, which must be a LoadInst.
6479 else if (Opcode == Instruction::ZExt || Opcode == Instruction::SExt ||
6480 Opcode == Instruction::FPExt) {
6481 if (LoadInst *Load = dyn_cast<LoadInst>(I->getOperand(0)))
6482 CCH = ComputeCCH(Load);
6483 }
6484
6485 // We optimize the truncation of induction variables having constant
6486 // integer steps. The cost of these truncations is the same as the scalar
6487 // operation.
6488 if (isOptimizableIVTruncate(I, VF)) {
6489 auto *Trunc = cast<TruncInst>(I);
6490 return TTI.getCastInstrCost(Instruction::Trunc, Trunc->getDestTy(),
6491 Trunc->getSrcTy(), CCH, CostKind, Trunc);
6492 }
6493
6494 // Detect reduction patterns
6495 if (auto RedCost = getReductionPatternCost(I, VF, VectorTy))
6496 return *RedCost;
6497
6498 Type *SrcScalarTy = I->getOperand(0)->getType();
6499 Instruction *Op0AsInstruction = dyn_cast<Instruction>(I->getOperand(0));
6500 if (canTruncateToMinimalBitwidth(Op0AsInstruction, VF))
6501 SrcScalarTy =
6502 IntegerType::get(SrcScalarTy->getContext(), MinBWs[Op0AsInstruction]);
6503 Type *SrcVecTy =
6504 VectorTy->isVectorTy() ? toVectorTy(SrcScalarTy, VF) : SrcScalarTy;
6505
6507 // If the result type is <= the source type, there will be no extend
6508 // after truncating the users to the minimal required bitwidth.
6509 if (VectorTy->getScalarSizeInBits() <= SrcVecTy->getScalarSizeInBits() &&
6510 (I->getOpcode() == Instruction::ZExt ||
6511 I->getOpcode() == Instruction::SExt))
6512 return 0;
6513 }
6514
6515 return TTI.getCastInstrCost(Opcode, VectorTy, SrcVecTy, CCH, CostKind, I);
6516 }
6517 case Instruction::Call:
6518 return getVectorCallCost(cast<CallInst>(I), VF);
6519 case Instruction::ExtractValue:
6520 return TTI.getInstructionCost(I, CostKind);
6521 case Instruction::Alloca:
6522 // We cannot easily widen alloca to a scalable alloca, as
6523 // the result would need to be a vector of pointers.
6524 if (VF.isScalable())
6526 return TTI.getArithmeticInstrCost(Instruction::Mul, RetTy, CostKind);
6527 default:
6528 // This opcode is unknown. Assume that it is the same as 'mul'.
6529 return TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy, CostKind);
6530 } // end of switch.
6531}
6532
6534 // Ignore ephemeral values.
6536
6537 SmallVector<Value *, 4> DeadInterleavePointerOps;
6539
6540 // If a scalar epilogue is required, users outside the loop won't use
6541 // live-outs from the vector loop but from the scalar epilogue. Ignore them if
6542 // that is the case.
6543 bool RequiresScalarEpilogue = requiresScalarEpilogue(true);
6544 auto IsLiveOutDead = [this, RequiresScalarEpilogue](User *U) {
6545 return RequiresScalarEpilogue &&
6546 !TheLoop->contains(cast<Instruction>(U)->getParent());
6547 };
6548
6550 DFS.perform(LI);
6551 for (BasicBlock *BB : reverse(make_range(DFS.beginRPO(), DFS.endRPO())))
6552 for (Instruction &I : reverse(*BB)) {
6553 if (VecValuesToIgnore.contains(&I) || ValuesToIgnore.contains(&I))
6554 continue;
6555
6556 // Add instructions that would be trivially dead and are only used by
6557 // values already ignored to DeadOps to seed worklist.
6559 all_of(I.users(), [this, IsLiveOutDead](User *U) {
6560 return VecValuesToIgnore.contains(U) ||
6561 ValuesToIgnore.contains(U) || IsLiveOutDead(U);
6562 }))
6563 DeadOps.push_back(&I);
6564
6565 // For interleave groups, we only create a pointer for the start of the
6566 // interleave group. Queue up addresses of group members except the insert
6567 // position for further processing.
6568 if (isAccessInterleaved(&I)) {
6569 auto *Group = getInterleavedAccessGroup(&I);
6570 if (Group->getInsertPos() == &I)
6571 continue;
6572 Value *PointerOp = getLoadStorePointerOperand(&I);
6573 DeadInterleavePointerOps.push_back(PointerOp);
6574 }
6575
6576 // Queue branches for analysis. They are dead, if their successors only
6577 // contain dead instructions.
6578 if (isa<CondBrInst>(&I))
6579 DeadOps.push_back(&I);
6580 }
6581
6582 // Mark ops feeding interleave group members as free, if they are only used
6583 // by other dead computations.
6584 for (unsigned I = 0; I != DeadInterleavePointerOps.size(); ++I) {
6585 auto *Op = dyn_cast<Instruction>(DeadInterleavePointerOps[I]);
6586 if (!Op || !TheLoop->contains(Op) || any_of(Op->users(), [this](User *U) {
6587 Instruction *UI = cast<Instruction>(U);
6588 return !VecValuesToIgnore.contains(U) &&
6589 (!isAccessInterleaved(UI) ||
6590 getInterleavedAccessGroup(UI)->getInsertPos() == UI);
6591 }))
6592 continue;
6593 VecValuesToIgnore.insert(Op);
6594 append_range(DeadInterleavePointerOps, Op->operands());
6595 }
6596
6597 // Mark ops that would be trivially dead and are only used by ignored
6598 // instructions as free.
6599 BasicBlock *Header = TheLoop->getHeader();
6600
6601 // Returns true if the block contains only dead instructions. Such blocks will
6602 // be removed by VPlan-to-VPlan transforms and won't be considered by the
6603 // VPlan-based cost model, so skip them in the legacy cost-model as well.
6604 auto IsEmptyBlock = [this](BasicBlock *BB) {
6605 return all_of(*BB, [this](Instruction &I) {
6606 return ValuesToIgnore.contains(&I) || VecValuesToIgnore.contains(&I) ||
6608 });
6609 };
6610 for (unsigned I = 0; I != DeadOps.size(); ++I) {
6611 auto *Op = dyn_cast<Instruction>(DeadOps[I]);
6612
6613 // Check if the branch should be considered dead.
6614 if (auto *Br = dyn_cast_or_null<CondBrInst>(Op)) {
6615 BasicBlock *ThenBB = Br->getSuccessor(0);
6616 BasicBlock *ElseBB = Br->getSuccessor(1);
6617 // Don't considers branches leaving the loop for simplification.
6618 if (!TheLoop->contains(ThenBB) || !TheLoop->contains(ElseBB))
6619 continue;
6620 bool ThenEmpty = IsEmptyBlock(ThenBB);
6621 bool ElseEmpty = IsEmptyBlock(ElseBB);
6622 if ((ThenEmpty && ElseEmpty) ||
6623 (ThenEmpty && ThenBB->getSingleSuccessor() == ElseBB &&
6624 ElseBB->phis().empty()) ||
6625 (ElseEmpty && ElseBB->getSingleSuccessor() == ThenBB &&
6626 ThenBB->phis().empty())) {
6627 VecValuesToIgnore.insert(Br);
6628 DeadOps.push_back(Br->getCondition());
6629 }
6630 continue;
6631 }
6632
6633 // Skip any op that shouldn't be considered dead.
6634 if (!Op || !TheLoop->contains(Op) ||
6635 (isa<PHINode>(Op) && Op->getParent() == Header) ||
6637 any_of(Op->users(), [this, IsLiveOutDead](User *U) {
6638 return !VecValuesToIgnore.contains(U) &&
6639 !ValuesToIgnore.contains(U) && !IsLiveOutDead(U);
6640 }))
6641 continue;
6642
6643 // If all of Op's users are in ValuesToIgnore, add it to ValuesToIgnore
6644 // which applies for both scalar and vector versions. Otherwise it is only
6645 // dead in vector versions, so only add it to VecValuesToIgnore.
6646 if (all_of(Op->users(),
6647 [this](User *U) { return ValuesToIgnore.contains(U); }))
6648 ValuesToIgnore.insert(Op);
6649
6650 VecValuesToIgnore.insert(Op);
6651 append_range(DeadOps, Op->operands());
6652 }
6653
6654 // Ignore type-promoting instructions we identified during reduction
6655 // detection.
6656 for (const auto &Reduction : Legal->getReductionVars()) {
6657 const RecurrenceDescriptor &RedDes = Reduction.second;
6658 const SmallPtrSetImpl<Instruction *> &Casts = RedDes.getCastInsts();
6659 VecValuesToIgnore.insert_range(Casts);
6660 }
6661 // Ignore type-casting instructions we identified during induction
6662 // detection.
6663 for (const auto &Induction : Legal->getInductionVars()) {
6664 const InductionDescriptor &IndDes = Induction.second;
6665 VecValuesToIgnore.insert_range(IndDes.getCastInsts());
6666 }
6667}
6668
6670 // Avoid duplicating work finding in-loop reductions.
6671 if (!InLoopReductions.empty())
6672 return;
6673
6674 for (const auto &Reduction : Legal->getReductionVars()) {
6675 PHINode *Phi = Reduction.first;
6676 const RecurrenceDescriptor &RdxDesc = Reduction.second;
6677
6678 // Multi-use reductions (e.g., used in FindLastIV patterns) are handled
6679 // separately and should not be considered for in-loop reductions.
6680 if (RdxDesc.hasUsesOutsideReductionChain())
6681 continue;
6682
6683 // We don't collect reductions that are type promoted (yet).
6684 if (RdxDesc.getRecurrenceType() != Phi->getType())
6685 continue;
6686
6687 // In-loop AnyOf and FindIV reductions are not yet supported.
6688 RecurKind Kind = RdxDesc.getRecurrenceKind();
6692 continue;
6693
6694 // If the target would prefer this reduction to happen "in-loop", then we
6695 // want to record it as such.
6696 if (!PreferInLoopReductions && !useOrderedReductions(RdxDesc) &&
6697 !TTI.preferInLoopReduction(Kind, Phi->getType()))
6698 continue;
6699
6700 // Check that we can correctly put the reductions into the loop, by
6701 // finding the chain of operations that leads from the phi to the loop
6702 // exit value.
6703 SmallVector<Instruction *, 4> ReductionOperations =
6704 RdxDesc.getReductionOpChain(Phi, TheLoop);
6705 bool InLoop = !ReductionOperations.empty();
6706
6707 if (InLoop) {
6708 InLoopReductions.insert(Phi);
6709 // Add the elements to InLoopReductionImmediateChains for cost modelling.
6710 Instruction *LastChain = Phi;
6711 for (auto *I : ReductionOperations) {
6712 InLoopReductionImmediateChains[I] = LastChain;
6713 LastChain = I;
6714 }
6715 }
6716 LLVM_DEBUG(dbgs() << "LV: Using " << (InLoop ? "inloop" : "out of loop")
6717 << " reduction for phi: " << *Phi << "\n");
6718 }
6719}
6720
6721// This function will select a scalable VF if the target supports scalable
6722// vectors and a fixed one otherwise.
6723// TODO: we could return a pair of values that specify the max VF and
6724// min VF, to be used in `buildVPlans(MinVF, MaxVF)` instead of
6725// `buildVPlans(VF, VF)`. We cannot do it because VPLAN at the moment
6726// doesn't have a cost model that can choose which plan to execute if
6727// more than one is generated.
6730 unsigned WidestType;
6731 std::tie(std::ignore, WidestType) = CM.getSmallestAndWidestTypes();
6732
6734 TTI.enableScalableVectorization()
6737
6738 TypeSize RegSize = TTI.getRegisterBitWidth(RegKind);
6739 unsigned N = RegSize.getKnownMinValue() / WidestType;
6740 return ElementCount::get(N, RegSize.isScalable());
6741}
6742
6745 ElementCount VF = UserVF;
6746 // Outer loop handling: They may require CFG and instruction level
6747 // transformations before even evaluating whether vectorization is profitable.
6748 // Since we cannot modify the incoming IR, we need to build VPlan upfront in
6749 // the vectorization pipeline.
6750 if (!OrigLoop->isInnermost()) {
6751 // If the user doesn't provide a vectorization factor, determine a
6752 // reasonable one.
6753 if (UserVF.isZero()) {
6754 VF = determineVPlanVF(TTI, CM);
6755 LLVM_DEBUG(dbgs() << "LV: VPlan computed VF " << VF << ".\n");
6756
6757 // Make sure we have a VF > 1 for stress testing.
6758 if (VPlanBuildStressTest && (VF.isScalar() || VF.isZero())) {
6759 LLVM_DEBUG(dbgs() << "LV: VPlan stress testing: "
6760 << "overriding computed VF.\n");
6761 VF = ElementCount::getFixed(4);
6762 }
6763 } else if (UserVF.isScalable() && !TTI.supportsScalableVectors() &&
6765 LLVM_DEBUG(dbgs() << "LV: Not vectorizing. Scalable VF requested, but "
6766 << "not supported by the target.\n");
6768 "Scalable vectorization requested but not supported by the target",
6769 "the scalable user-specified vectorization width for outer-loop "
6770 "vectorization cannot be used because the target does not support "
6771 "scalable vectors.",
6772 "ScalableVFUnfeasible", ORE, OrigLoop);
6774 }
6775 assert(EnableVPlanNativePath && "VPlan-native path is not enabled.");
6777 "VF needs to be a power of two");
6778 LLVM_DEBUG(dbgs() << "LV: Using " << (!UserVF.isZero() ? "user " : "")
6779 << "VF " << VF << " to build VPlans.\n");
6780 buildVPlans(VF, VF);
6781
6782 if (VPlans.empty())
6784
6785 // For VPlan build stress testing, we bail out after VPlan construction.
6788
6789 return {VF, 0 /*Cost*/, 0 /* ScalarCost */};
6790 }
6791
6792 LLVM_DEBUG(
6793 dbgs() << "LV: Not vectorizing. Inner loops aren't supported in the "
6794 "VPlan-native path.\n");
6796}
6797
6798void LoopVectorizationPlanner::plan(ElementCount UserVF, unsigned UserIC) {
6799 assert(OrigLoop->isInnermost() && "Inner loop expected.");
6800 CM.collectValuesToIgnore();
6801 CM.collectElementTypesForWidening();
6802
6803 FixedScalableVFPair MaxFactors = CM.computeMaxVF(UserVF, UserIC);
6804 if (!MaxFactors) // Cases that should not to be vectorized nor interleaved.
6805 return;
6806
6807 // Invalidate interleave groups if all blocks of loop will be predicated.
6808 if (CM.blockNeedsPredicationForAnyReason(OrigLoop->getHeader()) &&
6810 LLVM_DEBUG(
6811 dbgs()
6812 << "LV: Invalidate all interleaved groups due to fold-tail by masking "
6813 "which requires masked-interleaved support.\n");
6814 if (CM.InterleaveInfo.invalidateGroups())
6815 // Invalidating interleave groups also requires invalidating all decisions
6816 // based on them, which includes widening decisions and uniform and scalar
6817 // values.
6818 CM.invalidateCostModelingDecisions();
6819 }
6820
6821 if (CM.foldTailByMasking())
6822 Legal->prepareToFoldTailByMasking();
6823
6824 ElementCount MaxUserVF =
6825 UserVF.isScalable() ? MaxFactors.ScalableVF : MaxFactors.FixedVF;
6826 if (UserVF) {
6827 if (!ElementCount::isKnownLE(UserVF, MaxUserVF)) {
6829 "UserVF ignored because it may be larger than the maximal safe VF",
6830 "InvalidUserVF", ORE, OrigLoop);
6831 } else {
6833 "VF needs to be a power of two");
6834 // Collect the instructions (and their associated costs) that will be more
6835 // profitable to scalarize.
6836 CM.collectInLoopReductions();
6837 if (CM.selectUserVectorizationFactor(UserVF)) {
6838 LLVM_DEBUG(dbgs() << "LV: Using user VF " << UserVF << ".\n");
6839 buildVPlansWithVPRecipes(UserVF, UserVF);
6841 return;
6842 }
6843 reportVectorizationInfo("UserVF ignored because of invalid costs.",
6844 "InvalidCost", ORE, OrigLoop);
6845 }
6846 }
6847
6848 // Collect the Vectorization Factor Candidates.
6849 SmallVector<ElementCount> VFCandidates;
6850 for (auto VF = ElementCount::getFixed(1);
6851 ElementCount::isKnownLE(VF, MaxFactors.FixedVF); VF *= 2)
6852 VFCandidates.push_back(VF);
6853 for (auto VF = ElementCount::getScalable(1);
6854 ElementCount::isKnownLE(VF, MaxFactors.ScalableVF); VF *= 2)
6855 VFCandidates.push_back(VF);
6856
6857 CM.collectInLoopReductions();
6858 for (const auto &VF : VFCandidates) {
6859 // Collect Uniform and Scalar instructions after vectorization with VF.
6860 CM.collectNonVectorizedAndSetWideningDecisions(VF);
6861 }
6862
6863 buildVPlansWithVPRecipes(ElementCount::getFixed(1), MaxFactors.FixedVF);
6864 buildVPlansWithVPRecipes(ElementCount::getScalable(1), MaxFactors.ScalableVF);
6865
6867}
6868
6870 ElementCount VF) const {
6871 InstructionCost Cost = CM.getInstructionCost(UI, VF);
6872 if (Cost.isValid() && ForceTargetInstructionCost.getNumOccurrences())
6874 return Cost;
6875}
6876
6878 ElementCount VF) const {
6879 return CM.isUniformAfterVectorization(I, VF);
6880}
6881
6882bool VPCostContext::skipCostComputation(Instruction *UI, bool IsVector) const {
6883 return CM.ValuesToIgnore.contains(UI) ||
6884 (IsVector && CM.VecValuesToIgnore.contains(UI)) ||
6885 SkipCostComputation.contains(UI);
6886}
6887
6889 return CM.getPredBlockCostDivisor(CostKind, BB);
6890}
6891
6893LoopVectorizationPlanner::precomputeCosts(VPlan &Plan, ElementCount VF,
6894 VPCostContext &CostCtx) const {
6896 // Cost modeling for inductions is inaccurate in the legacy cost model
6897 // compared to the recipes that are generated. To match here initially during
6898 // VPlan cost model bring up directly use the induction costs from the legacy
6899 // cost model. Note that we do this as pre-processing; the VPlan may not have
6900 // any recipes associated with the original induction increment instruction
6901 // and may replace truncates with VPWidenIntOrFpInductionRecipe. We precompute
6902 // the cost of induction phis and increments (both that are represented by
6903 // recipes and those that are not), to avoid distinguishing between them here,
6904 // and skip all recipes that represent induction phis and increments (the
6905 // former case) later on, if they exist, to avoid counting them twice.
6906 // Similarly we pre-compute the cost of any optimized truncates.
6907 // TODO: Switch to more accurate costing based on VPlan.
6908 for (const auto &[IV, IndDesc] : Legal->getInductionVars()) {
6910 IV->getIncomingValueForBlock(OrigLoop->getLoopLatch()));
6911 SmallVector<Instruction *> IVInsts = {IVInc};
6912 for (unsigned I = 0; I != IVInsts.size(); I++) {
6913 for (Value *Op : IVInsts[I]->operands()) {
6914 auto *OpI = dyn_cast<Instruction>(Op);
6915 if (Op == IV || !OpI || !OrigLoop->contains(OpI) || !Op->hasOneUse())
6916 continue;
6917 IVInsts.push_back(OpI);
6918 }
6919 }
6920 IVInsts.push_back(IV);
6921 for (User *U : IV->users()) {
6922 auto *CI = cast<Instruction>(U);
6923 if (!CostCtx.CM.isOptimizableIVTruncate(CI, VF))
6924 continue;
6925 IVInsts.push_back(CI);
6926 }
6927
6928 // If the vector loop gets executed exactly once with the given VF, ignore
6929 // the costs of comparison and induction instructions, as they'll get
6930 // simplified away.
6931 // TODO: Remove this code after stepping away from the legacy cost model and
6932 // adding code to simplify VPlans before calculating their costs.
6933 auto TC = getSmallConstantTripCount(PSE.getSE(), OrigLoop);
6934 if (TC == VF && !CM.foldTailByMasking())
6935 addFullyUnrolledInstructionsToIgnore(OrigLoop, Legal->getInductionVars(),
6936 CostCtx.SkipCostComputation);
6937
6938 for (Instruction *IVInst : IVInsts) {
6939 if (CostCtx.skipCostComputation(IVInst, VF.isVector()))
6940 continue;
6941 InstructionCost InductionCost = CostCtx.getLegacyCost(IVInst, VF);
6942 LLVM_DEBUG({
6943 dbgs() << "Cost of " << InductionCost << " for VF " << VF
6944 << ": induction instruction " << *IVInst << "\n";
6945 });
6946 Cost += InductionCost;
6947 CostCtx.SkipCostComputation.insert(IVInst);
6948 }
6949 }
6950
6951 /// Compute the cost of all exiting conditions of the loop using the legacy
6952 /// cost model. This is to match the legacy behavior, which adds the cost of
6953 /// all exit conditions. Note that this over-estimates the cost, as there will
6954 /// be a single condition to control the vector loop.
6956 CM.TheLoop->getExitingBlocks(Exiting);
6957 SetVector<Instruction *> ExitInstrs;
6958 // Collect all exit conditions.
6959 for (BasicBlock *EB : Exiting) {
6960 auto *Term = dyn_cast<CondBrInst>(EB->getTerminator());
6961 if (!Term || CostCtx.skipCostComputation(Term, VF.isVector()))
6962 continue;
6963 if (auto *CondI = dyn_cast<Instruction>(Term->getOperand(0))) {
6964 ExitInstrs.insert(CondI);
6965 }
6966 }
6967 // Compute the cost of all instructions only feeding the exit conditions.
6968 for (unsigned I = 0; I != ExitInstrs.size(); ++I) {
6969 Instruction *CondI = ExitInstrs[I];
6970 if (!OrigLoop->contains(CondI) ||
6971 !CostCtx.SkipCostComputation.insert(CondI).second)
6972 continue;
6973 InstructionCost CondICost = CostCtx.getLegacyCost(CondI, VF);
6974 LLVM_DEBUG({
6975 dbgs() << "Cost of " << CondICost << " for VF " << VF
6976 << ": exit condition instruction " << *CondI << "\n";
6977 });
6978 Cost += CondICost;
6979 for (Value *Op : CondI->operands()) {
6980 auto *OpI = dyn_cast<Instruction>(Op);
6981 if (!OpI || CostCtx.skipCostComputation(OpI, VF.isVector()) ||
6982 any_of(OpI->users(), [&ExitInstrs](User *U) {
6983 return !ExitInstrs.contains(cast<Instruction>(U));
6984 }))
6985 continue;
6986 ExitInstrs.insert(OpI);
6987 }
6988 }
6989
6990 // Pre-compute the costs for branches except for the backedge, as the number
6991 // of replicate regions in a VPlan may not directly match the number of
6992 // branches, which would lead to different decisions.
6993 // TODO: Compute cost of branches for each replicate region in the VPlan,
6994 // which is more accurate than the legacy cost model.
6995 for (BasicBlock *BB : OrigLoop->blocks()) {
6996 if (CostCtx.skipCostComputation(BB->getTerminator(), VF.isVector()))
6997 continue;
6998 CostCtx.SkipCostComputation.insert(BB->getTerminator());
6999 if (BB == OrigLoop->getLoopLatch())
7000 continue;
7001 auto BranchCost = CostCtx.getLegacyCost(BB->getTerminator(), VF);
7002 Cost += BranchCost;
7003 }
7004
7005 // Don't apply special costs when instruction cost is forced to make sure the
7006 // forced cost is used for each recipe.
7007 if (ForceTargetInstructionCost.getNumOccurrences())
7008 return Cost;
7009
7010 // Pre-compute costs for instructions that are forced-scalar or profitable to
7011 // scalarize. Their costs will be computed separately in the legacy cost
7012 // model.
7013 for (Instruction *ForcedScalar : CM.ForcedScalars[VF]) {
7014 if (CostCtx.skipCostComputation(ForcedScalar, VF.isVector()))
7015 continue;
7016 CostCtx.SkipCostComputation.insert(ForcedScalar);
7017 InstructionCost ForcedCost = CostCtx.getLegacyCost(ForcedScalar, VF);
7018 LLVM_DEBUG({
7019 dbgs() << "Cost of " << ForcedCost << " for VF " << VF
7020 << ": forced scalar " << *ForcedScalar << "\n";
7021 });
7022 Cost += ForcedCost;
7023 }
7024 for (const auto &[Scalarized, ScalarCost] : CM.InstsToScalarize[VF]) {
7025 if (CostCtx.skipCostComputation(Scalarized, VF.isVector()))
7026 continue;
7027 CostCtx.SkipCostComputation.insert(Scalarized);
7028 LLVM_DEBUG({
7029 dbgs() << "Cost of " << ScalarCost << " for VF " << VF
7030 << ": profitable to scalarize " << *Scalarized << "\n";
7031 });
7032 Cost += ScalarCost;
7033 }
7034
7035 return Cost;
7036}
7037
7038InstructionCost LoopVectorizationPlanner::cost(VPlan &Plan, ElementCount VF,
7039 VPRegisterUsage *RU) const {
7040 VPCostContext CostCtx(CM.TTI, *CM.TLI, Plan, CM, CM.CostKind, PSE, OrigLoop);
7041 InstructionCost Cost = precomputeCosts(Plan, VF, CostCtx);
7042
7043 // Now compute and add the VPlan-based cost.
7044 Cost += Plan.cost(VF, CostCtx);
7045
7046 // Add the cost of spills due to excess register usage
7047 if (CM.shouldConsiderRegPressureForVF(VF))
7048 Cost += RU->spillCost(CostCtx, ForceTargetNumVectorRegs);
7049
7050#ifndef NDEBUG
7051 unsigned EstimatedWidth = estimateElementCount(VF, CM.getVScaleForTuning());
7052 LLVM_DEBUG(dbgs() << "Cost for VF " << VF << ": " << Cost
7053 << " (Estimated cost per lane: ");
7054 if (Cost.isValid()) {
7055 double CostPerLane = double(Cost.getValue()) / EstimatedWidth;
7056 LLVM_DEBUG(dbgs() << format("%.1f", CostPerLane));
7057 } else /* No point dividing an invalid cost - it will still be invalid */
7058 LLVM_DEBUG(dbgs() << "Invalid");
7059 LLVM_DEBUG(dbgs() << ")\n");
7060#endif
7061 return Cost;
7062}
7063
7064#ifndef NDEBUG
7065/// Return true if the original loop \ TheLoop contains any instructions that do
7066/// not have corresponding recipes in \p Plan and are not marked to be ignored
7067/// in \p CostCtx. This means the VPlan contains simplification that the legacy
7068/// cost-model did not account for.
7070 VPCostContext &CostCtx,
7071 Loop *TheLoop,
7072 ElementCount VF) {
7073 using namespace VPlanPatternMatch;
7074 // First collect all instructions for the recipes in Plan.
7075 auto GetInstructionForCost = [](const VPRecipeBase *R) -> Instruction * {
7076 if (auto *S = dyn_cast<VPSingleDefRecipe>(R))
7077 return dyn_cast_or_null<Instruction>(S->getUnderlyingValue());
7078 if (auto *WidenMem = dyn_cast<VPWidenMemoryRecipe>(R))
7079 return &WidenMem->getIngredient();
7080 return nullptr;
7081 };
7082
7083 // Check if a select for a safe divisor was hoisted to the pre-header. If so,
7084 // the select doesn't need to be considered for the vector loop cost; go with
7085 // the more accurate VPlan-based cost model.
7086 for (VPRecipeBase &R : *Plan.getVectorPreheader()) {
7087 auto *VPI = dyn_cast<VPInstruction>(&R);
7088 if (!VPI || VPI->getOpcode() != Instruction::Select)
7089 continue;
7090
7091 if (auto *WR = dyn_cast_or_null<VPWidenRecipe>(VPI->getSingleUser())) {
7092 switch (WR->getOpcode()) {
7093 case Instruction::UDiv:
7094 case Instruction::SDiv:
7095 case Instruction::URem:
7096 case Instruction::SRem:
7097 return true;
7098 default:
7099 break;
7100 }
7101 }
7102 }
7103
7104 DenseSet<Instruction *> SeenInstrs;
7105 auto Iter = vp_depth_first_deep(Plan.getVectorLoopRegion()->getEntry());
7107 for (VPRecipeBase &R : *VPBB) {
7108 if (auto *IR = dyn_cast<VPInterleaveRecipe>(&R)) {
7109 auto *IG = IR->getInterleaveGroup();
7110 unsigned NumMembers = IG->getNumMembers();
7111 for (unsigned I = 0; I != NumMembers; ++I) {
7112 if (Instruction *M = IG->getMember(I))
7113 SeenInstrs.insert(M);
7114 }
7115 continue;
7116 }
7117 // Unused FOR splices are removed by VPlan transforms, so the VPlan-based
7118 // cost model won't cost it whilst the legacy will.
7119 if (auto *FOR = dyn_cast<VPFirstOrderRecurrencePHIRecipe>(&R)) {
7120 if (none_of(FOR->users(),
7121 match_fn(m_VPInstruction<
7123 return true;
7124 }
7125 // The VPlan-based cost model is more accurate for partial reductions and
7126 // comparing against the legacy cost isn't desirable.
7127 if (auto *VPR = dyn_cast<VPReductionRecipe>(&R))
7128 if (VPR->isPartialReduction())
7129 return true;
7130
7131 // The VPlan-based cost model can analyze if recipes are scalar
7132 // recursively, but the legacy cost model cannot.
7133 if (auto *WidenMemR = dyn_cast<VPWidenMemoryRecipe>(&R)) {
7134 auto *AddrI = dyn_cast<Instruction>(
7135 getLoadStorePointerOperand(&WidenMemR->getIngredient()));
7136 if (AddrI && vputils::isSingleScalar(WidenMemR->getAddr()) !=
7137 CostCtx.isLegacyUniformAfterVectorization(AddrI, VF))
7138 return true;
7139
7140 if (WidenMemR->isReverse()) {
7141 // If the stored value of a reverse store is invariant, LICM will
7142 // hoist the reverse operation to the preheader. In this case, the
7143 // result of the VPlan-based cost model will diverge from that of
7144 // the legacy model.
7145 if (auto *StoreR = dyn_cast<VPWidenStoreRecipe>(WidenMemR))
7146 if (StoreR->getStoredValue()->isDefinedOutsideLoopRegions())
7147 return true;
7148
7149 if (auto *StoreR = dyn_cast<VPWidenStoreEVLRecipe>(WidenMemR))
7150 if (StoreR->getStoredValue()->isDefinedOutsideLoopRegions())
7151 return true;
7152 }
7153 }
7154
7155 // The legacy cost model costs non-header phis with a scalar VF as a phi,
7156 // but scalar unrolled VPlans will have VPBlendRecipes which emit selects.
7157 if (isa<VPBlendRecipe>(&R) &&
7158 vputils::onlyFirstLaneUsed(R.getVPSingleValue()))
7159 return true;
7160
7161 // The legacy cost model won't calculate the cost of the LogicalAnd which
7162 // will be replaced with vp_merge.
7164 return true;
7165
7166 /// If a VPlan transform folded a recipe to one producing a single-scalar,
7167 /// but the original instruction wasn't uniform-after-vectorization in the
7168 /// legacy cost model, the legacy cost overestimates the actual cost.
7169 if (auto *RepR = dyn_cast<VPReplicateRecipe>(&R)) {
7170 if (RepR->isSingleScalar() &&
7172 RepR->getUnderlyingInstr(), VF))
7173 return true;
7174 }
7175 if (Instruction *UI = GetInstructionForCost(&R)) {
7176 // If we adjusted the predicate of the recipe, the cost in the legacy
7177 // cost model may be different.
7178 CmpPredicate Pred;
7179 if (match(&R, m_Cmp(Pred, m_VPValue(), m_VPValue())) &&
7180 cast<VPRecipeWithIRFlags>(R).getPredicate() !=
7181 cast<CmpInst>(UI)->getPredicate())
7182 return true;
7183
7184 // Recipes with underlying instructions being moved out of the loop
7185 // region by LICM may cause discrepancies between the legacy cost model
7186 // and the VPlan-based cost model.
7187 if (!VPBB->getEnclosingLoopRegion())
7188 return true;
7189
7190 SeenInstrs.insert(UI);
7191 }
7192 }
7193 }
7194
7195 // If a reverse recipe has been sunk to the middle block (e.g., for a load
7196 // whose result is only used as a live-out), VPlan avoids the per-iteration
7197 // reverse shuffle cost that the legacy model accounts for.
7198 if (any_of(*Plan.getMiddleBlock(), [](const VPRecipeBase &R) {
7199 return match(&R, m_VPInstruction<VPInstruction::Reverse>());
7200 }))
7201 return true;
7202
7203 // Return true if the loop contains any instructions that are not also part of
7204 // the VPlan or are skipped for VPlan-based cost computations. This indicates
7205 // that the VPlan contains extra simplifications.
7206 return any_of(TheLoop->blocks(), [&SeenInstrs, &CostCtx,
7207 TheLoop](BasicBlock *BB) {
7208 return any_of(*BB, [&SeenInstrs, &CostCtx, TheLoop, BB](Instruction &I) {
7209 // Skip induction phis when checking for simplifications, as they may not
7210 // be lowered directly be lowered to a corresponding PHI recipe.
7211 if (isa<PHINode>(&I) && BB == TheLoop->getHeader() &&
7212 CostCtx.CM.Legal->isInductionPhi(cast<PHINode>(&I)))
7213 return false;
7214 return !SeenInstrs.contains(&I) && !CostCtx.skipCostComputation(&I, true);
7215 });
7216 });
7217}
7218#endif
7219
7221 if (VPlans.empty())
7223 // If there is a single VPlan with a single VF, return it directly.
7224 VPlan &FirstPlan = *VPlans[0];
7225 if (VPlans.size() == 1 && size(FirstPlan.vectorFactors()) == 1)
7226 return {*FirstPlan.vectorFactors().begin(), 0, 0};
7227
7228 LLVM_DEBUG(dbgs() << "LV: Computing best VF using cost kind: "
7229 << (CM.CostKind == TTI::TCK_RecipThroughput
7230 ? "Reciprocal Throughput\n"
7231 : CM.CostKind == TTI::TCK_Latency
7232 ? "Instruction Latency\n"
7233 : CM.CostKind == TTI::TCK_CodeSize ? "Code Size\n"
7234 : CM.CostKind == TTI::TCK_SizeAndLatency
7235 ? "Code Size and Latency\n"
7236 : "Unknown\n"));
7237
7239 assert(hasPlanWithVF(ScalarVF) &&
7240 "More than a single plan/VF w/o any plan having scalar VF");
7241
7242 // TODO: Compute scalar cost using VPlan-based cost model.
7243 InstructionCost ScalarCost = CM.expectedCost(ScalarVF);
7244 LLVM_DEBUG(dbgs() << "LV: Scalar loop costs: " << ScalarCost << ".\n");
7245 VectorizationFactor ScalarFactor(ScalarVF, ScalarCost, ScalarCost);
7246 VectorizationFactor BestFactor = ScalarFactor;
7247
7248 bool ForceVectorization = Hints.getForce() == LoopVectorizeHints::FK_Enabled;
7249 if (ForceVectorization) {
7250 // Ignore scalar width, because the user explicitly wants vectorization.
7251 // Initialize cost to max so that VF = 2 is, at least, chosen during cost
7252 // evaluation.
7253 BestFactor.Cost = InstructionCost::getMax();
7254 }
7255
7256 for (auto &P : VPlans) {
7257 ArrayRef<ElementCount> VFs(P->vectorFactors().begin(),
7258 P->vectorFactors().end());
7259
7261 bool ConsiderRegPressure = any_of(VFs, [this](ElementCount VF) {
7262 return CM.shouldConsiderRegPressureForVF(VF);
7263 });
7265 RUs = calculateRegisterUsageForPlan(*P, VFs, TTI, CM.ValuesToIgnore);
7266
7267 for (unsigned I = 0; I < VFs.size(); I++) {
7268 ElementCount VF = VFs[I];
7269 if (VF.isScalar())
7270 continue;
7271 if (!ForceVectorization && !willGenerateVectors(*P, VF, TTI)) {
7272 LLVM_DEBUG(
7273 dbgs()
7274 << "LV: Not considering vector loop of width " << VF
7275 << " because it will not generate any vector instructions.\n");
7276 continue;
7277 }
7278 if (CM.OptForSize && !ForceVectorization && hasReplicatorRegion(*P)) {
7279 LLVM_DEBUG(
7280 dbgs()
7281 << "LV: Not considering vector loop of width " << VF
7282 << " because it would cause replicated blocks to be generated,"
7283 << " which isn't allowed when optimizing for size.\n");
7284 continue;
7285 }
7286
7288 cost(*P, VF, ConsiderRegPressure ? &RUs[I] : nullptr);
7289 VectorizationFactor CurrentFactor(VF, Cost, ScalarCost);
7290
7291 if (isMoreProfitable(CurrentFactor, BestFactor, P->hasScalarTail()))
7292 BestFactor = CurrentFactor;
7293
7294 // If profitable add it to ProfitableVF list.
7295 if (isMoreProfitable(CurrentFactor, ScalarFactor, P->hasScalarTail()))
7296 ProfitableVFs.push_back(CurrentFactor);
7297 }
7298 }
7299
7300#ifndef NDEBUG
7301 // Select the optimal vectorization factor according to the legacy cost-model.
7302 // This is now only used to verify the decisions by the new VPlan-based
7303 // cost-model and will be retired once the VPlan-based cost-model is
7304 // stabilized.
7305 VectorizationFactor LegacyVF = selectVectorizationFactor();
7306 VPlan &BestPlan = getPlanFor(BestFactor.Width);
7307
7308 // Pre-compute the cost and use it to check if BestPlan contains any
7309 // simplifications not accounted for in the legacy cost model. If that's the
7310 // case, don't trigger the assertion, as the extra simplifications may cause a
7311 // different VF to be picked by the VPlan-based cost model.
7312 VPCostContext CostCtx(CM.TTI, *CM.TLI, BestPlan, CM, CM.CostKind, CM.PSE,
7313 OrigLoop);
7314 precomputeCosts(BestPlan, BestFactor.Width, CostCtx);
7315 // Verify that the VPlan-based and legacy cost models agree, except for
7316 // * VPlans with early exits,
7317 // * VPlans with additional VPlan simplifications,
7318 // * EVL-based VPlans with gather/scatters (the VPlan-based cost model uses
7319 // vp_scatter/vp_gather).
7320 // The legacy cost model doesn't properly model costs for such loops.
7321 bool UsesEVLGatherScatter =
7323 BestPlan.getVectorLoopRegion()->getEntry())),
7324 [](VPBasicBlock *VPBB) {
7325 return any_of(*VPBB, [](VPRecipeBase &R) {
7326 return isa<VPWidenLoadEVLRecipe, VPWidenStoreEVLRecipe>(&R) &&
7327 !cast<VPWidenMemoryRecipe>(&R)->isConsecutive();
7328 });
7329 });
7330 assert(
7331 (BestFactor.Width == LegacyVF.Width || BestPlan.hasEarlyExit() ||
7332 !Legal->getLAI()->getSymbolicStrides().empty() || UsesEVLGatherScatter ||
7334 getPlanFor(BestFactor.Width), CostCtx, OrigLoop, BestFactor.Width) ||
7336 getPlanFor(LegacyVF.Width), CostCtx, OrigLoop, LegacyVF.Width)) &&
7337 " VPlan cost model and legacy cost model disagreed");
7338 assert((BestFactor.Width.isScalar() || BestFactor.ScalarCost > 0) &&
7339 "when vectorizing, the scalar cost must be computed.");
7340#endif
7341
7342 LLVM_DEBUG(dbgs() << "LV: Selecting VF: " << BestFactor.Width << ".\n");
7343 return BestFactor;
7344}
7345
7346// If \p EpiResumePhiR is resume VPPhi for a reduction when vectorizing the
7347// epilog loop, fix the reduction's scalar PHI node by adding the incoming value
7348// from the main vector loop.
7350 VPPhi *EpiResumePhiR, PHINode &EpiResumePhi, BasicBlock *BypassBlock) {
7351 using namespace VPlanPatternMatch;
7352 // Get the VPInstruction computing the reduction result in the middle block.
7353 // The first operand may not be from the middle block if it is not connected
7354 // to the scalar preheader. In that case, there's nothing to fix.
7355 VPValue *Incoming = EpiResumePhiR->getOperand(0);
7358 auto *EpiRedResult = dyn_cast<VPInstruction>(Incoming);
7359 if (!EpiRedResult)
7360 return;
7361
7362 VPValue *BackedgeVal;
7363 bool IsFindIV = false;
7364 if (EpiRedResult->getOpcode() == VPInstruction::ComputeAnyOfResult ||
7365 EpiRedResult->getOpcode() == VPInstruction::ComputeReductionResult)
7366 BackedgeVal = EpiRedResult->getOperand(EpiRedResult->getNumOperands() - 1);
7367 else if (matchFindIVResult(EpiRedResult, m_VPValue(BackedgeVal), m_VPValue()))
7368 IsFindIV = true;
7369 else
7370 return;
7371
7372 auto *EpiRedHeaderPhi = cast_if_present<VPReductionPHIRecipe>(
7374 if (!EpiRedHeaderPhi) {
7375 match(BackedgeVal,
7377 VPlanPatternMatch::m_VPValue(BackedgeVal),
7379 EpiRedHeaderPhi = cast<VPReductionPHIRecipe>(
7381 }
7382
7383 Value *MainResumeValue;
7384 if (auto *VPI = dyn_cast<VPInstruction>(EpiRedHeaderPhi->getStartValue())) {
7385 assert((VPI->getOpcode() == VPInstruction::Broadcast ||
7386 VPI->getOpcode() == VPInstruction::ReductionStartVector) &&
7387 "unexpected start recipe");
7388 MainResumeValue = VPI->getOperand(0)->getUnderlyingValue();
7389 } else
7390 MainResumeValue = EpiRedHeaderPhi->getStartValue()->getUnderlyingValue();
7391 if (EpiRedResult->getOpcode() == VPInstruction::ComputeAnyOfResult) {
7392 [[maybe_unused]] Value *StartV =
7393 EpiRedResult->getOperand(0)->getLiveInIRValue();
7394 auto *Cmp = cast<ICmpInst>(MainResumeValue);
7395 assert(Cmp->getPredicate() == CmpInst::ICMP_NE &&
7396 "AnyOf expected to start with ICMP_NE");
7397 assert(Cmp->getOperand(1) == StartV &&
7398 "AnyOf expected to start by comparing main resume value to original "
7399 "start value");
7400 MainResumeValue = Cmp->getOperand(0);
7401 } else if (IsFindIV) {
7402 MainResumeValue = cast<SelectInst>(MainResumeValue)->getFalseValue();
7403 }
7404 PHINode *MainResumePhi = cast<PHINode>(MainResumeValue);
7405
7406 // When fixing reductions in the epilogue loop we should already have
7407 // created a bc.merge.rdx Phi after the main vector body. Ensure that we carry
7408 // over the incoming values correctly.
7409 EpiResumePhi.setIncomingValueForBlock(
7410 BypassBlock, MainResumePhi->getIncomingValueForBlock(BypassBlock));
7411}
7412
7414 ElementCount BestVF, unsigned BestUF, VPlan &BestVPlan,
7415 InnerLoopVectorizer &ILV, DominatorTree *DT, bool VectorizingEpilogue) {
7416 assert(BestVPlan.hasVF(BestVF) &&
7417 "Trying to execute plan with unsupported VF");
7418 assert(BestVPlan.hasUF(BestUF) &&
7419 "Trying to execute plan with unsupported UF");
7420 if (BestVPlan.hasEarlyExit())
7421 ++LoopsEarlyExitVectorized;
7422 // TODO: Move to VPlan transform stage once the transition to the VPlan-based
7423 // cost model is complete for better cost estimates.
7424 RUN_VPLAN_PASS(VPlanTransforms::unrollByUF, BestVPlan, BestUF);
7428 bool HasBranchWeights =
7429 hasBranchWeightMD(*OrigLoop->getLoopLatch()->getTerminator());
7430 if (HasBranchWeights) {
7431 std::optional<unsigned> VScale = CM.getVScaleForTuning();
7433 BestVPlan, BestVF, VScale);
7434 }
7435
7436 // Checks are the same for all VPlans, added to BestVPlan only for
7437 // compactness.
7438 attachRuntimeChecks(BestVPlan, ILV.RTChecks, HasBranchWeights);
7439
7440 // Retrieving VectorPH now when it's easier while VPlan still has Regions.
7441 VPBasicBlock *VectorPH = cast<VPBasicBlock>(BestVPlan.getVectorPreheader());
7442
7443 VPlanTransforms::optimizeForVFAndUF(BestVPlan, BestVF, BestUF, PSE);
7446 if (BestVPlan.getEntry()->getSingleSuccessor() ==
7447 BestVPlan.getScalarPreheader()) {
7448 // TODO: The vector loop would be dead, should not even try to vectorize.
7449 ORE->emit([&]() {
7450 return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationDead",
7451 OrigLoop->getStartLoc(),
7452 OrigLoop->getHeader())
7453 << "Created vector loop never executes due to insufficient trip "
7454 "count.";
7455 });
7457 }
7458
7460
7462 // Convert the exit condition to AVLNext == 0 for EVL tail folded loops.
7464 // Regions are dissolved after optimizing for VF and UF, which completely
7465 // removes unneeded loop regions first.
7467 // Expand BranchOnTwoConds after dissolution, when latch has direct access to
7468 // its successors.
7470 // Convert loops with variable-length stepping after regions are dissolved.
7474 BestVPlan, VectorPH, CM.foldTailByMasking(),
7475 CM.requiresScalarEpilogue(BestVF.isVector()), &BestVPlan.getVFxUF());
7476 VPlanTransforms::materializeFactors(BestVPlan, VectorPH, BestVF);
7477 VPlanTransforms::cse(BestVPlan);
7479
7480 // 0. Generate SCEV-dependent code in the entry, including TripCount, before
7481 // making any changes to the CFG.
7482 DenseMap<const SCEV *, Value *> ExpandedSCEVs =
7483 VPlanTransforms::expandSCEVs(BestVPlan, *PSE.getSE());
7484 if (!ILV.getTripCount()) {
7485 ILV.setTripCount(BestVPlan.getTripCount()->getLiveInIRValue());
7486 } else {
7487 assert(VectorizingEpilogue && "should only re-use the existing trip "
7488 "count during epilogue vectorization");
7489 }
7490
7491 // Perform the actual loop transformation.
7492 VPTransformState State(&TTI, BestVF, LI, DT, ILV.AC, ILV.Builder, &BestVPlan,
7493 OrigLoop->getParentLoop(),
7494 Legal->getWidestInductionType());
7495
7496#ifdef EXPENSIVE_CHECKS
7497 assert(DT->verify(DominatorTree::VerificationLevel::Fast));
7498#endif
7499
7500 // 1. Set up the skeleton for vectorization, including vector pre-header and
7501 // middle block. The vector loop is created during VPlan execution.
7502 State.CFG.PrevBB = ILV.createVectorizedLoopSkeleton();
7504 State.CFG.PrevBB->getSingleSuccessor(), &BestVPlan);
7506
7507 assert(verifyVPlanIsValid(BestVPlan) && "final VPlan is invalid");
7508
7509 // After vectorization, the exit blocks of the original loop will have
7510 // additional predecessors. Invalidate SCEVs for the exit phis in case SE
7511 // looked through single-entry phis.
7512 ScalarEvolution &SE = *PSE.getSE();
7513 for (VPIRBasicBlock *Exit : BestVPlan.getExitBlocks()) {
7514 if (!Exit->hasPredecessors())
7515 continue;
7516 for (VPRecipeBase &PhiR : Exit->phis())
7518 &cast<VPIRPhi>(PhiR).getIRPhi());
7519 }
7520 // Forget the original loop and block dispositions.
7521 SE.forgetLoop(OrigLoop);
7523
7525
7526 //===------------------------------------------------===//
7527 //
7528 // Notice: any optimization or new instruction that go
7529 // into the code below should also be implemented in
7530 // the cost-model.
7531 //
7532 //===------------------------------------------------===//
7533
7534 // Retrieve loop information before executing the plan, which may remove the
7535 // original loop, if it becomes unreachable.
7536 MDNode *LID = OrigLoop->getLoopID();
7537 unsigned OrigLoopInvocationWeight = 0;
7538 std::optional<unsigned> OrigAverageTripCount =
7539 getLoopEstimatedTripCount(OrigLoop, &OrigLoopInvocationWeight);
7540
7541 BestVPlan.execute(&State);
7542
7543 // 2.6. Maintain Loop Hints
7544 // Keep all loop hints from the original loop on the vector loop (we'll
7545 // replace the vectorizer-specific hints below).
7546 VPBasicBlock *HeaderVPBB = vputils::getFirstLoopHeader(BestVPlan, State.VPDT);
7547 // Add metadata to disable runtime unrolling a scalar loop when there
7548 // are no runtime checks about strides and memory. A scalar loop that is
7549 // rarely used is not worth unrolling.
7550 bool DisableRuntimeUnroll = !ILV.RTChecks.hasChecks() && !BestVF.isScalar();
7552 HeaderVPBB ? LI->getLoopFor(State.CFG.VPBB2IRBB.lookup(HeaderVPBB))
7553 : nullptr,
7554 HeaderVPBB, BestVPlan, VectorizingEpilogue, LID, OrigAverageTripCount,
7555 OrigLoopInvocationWeight,
7556 estimateElementCount(BestVF * BestUF, CM.getVScaleForTuning()),
7557 DisableRuntimeUnroll);
7558
7559 // 3. Fix the vectorized code: take care of header phi's, live-outs,
7560 // predication, updating analyses.
7561 ILV.fixVectorizedLoop(State);
7562
7564
7565 return ExpandedSCEVs;
7566}
7567
7568//===--------------------------------------------------------------------===//
7569// EpilogueVectorizerMainLoop
7570//===--------------------------------------------------------------------===//
7571
7572/// This function is partially responsible for generating the control flow
7573/// depicted in https://llvm.org/docs/Vectorizers.html#epilogue-vectorization.
7575 BasicBlock *ScalarPH = createScalarPreheader("");
7576 BasicBlock *VectorPH = ScalarPH->getSinglePredecessor();
7577
7578 // Generate the code to check the minimum iteration count of the vector
7579 // epilogue (see below).
7580 EPI.EpilogueIterationCountCheck =
7581 emitIterationCountCheck(VectorPH, ScalarPH, true);
7582 EPI.EpilogueIterationCountCheck->setName("iter.check");
7583
7584 VectorPH = cast<CondBrInst>(EPI.EpilogueIterationCountCheck->getTerminator())
7585 ->getSuccessor(1);
7586 // Generate the iteration count check for the main loop, *after* the check
7587 // for the epilogue loop, so that the path-length is shorter for the case
7588 // that goes directly through the vector epilogue. The longer-path length for
7589 // the main loop is compensated for, by the gain from vectorizing the larger
7590 // trip count. Note: the branch will get updated later on when we vectorize
7591 // the epilogue.
7592 EPI.MainLoopIterationCountCheck =
7593 emitIterationCountCheck(VectorPH, ScalarPH, false);
7594
7595 return cast<CondBrInst>(EPI.MainLoopIterationCountCheck->getTerminator())
7596 ->getSuccessor(1);
7597}
7598
7600 LLVM_DEBUG({
7601 dbgs() << "Create Skeleton for epilogue vectorized loop (first pass)\n"
7602 << "Main Loop VF:" << EPI.MainLoopVF
7603 << ", Main Loop UF:" << EPI.MainLoopUF
7604 << ", Epilogue Loop VF:" << EPI.EpilogueVF
7605 << ", Epilogue Loop UF:" << EPI.EpilogueUF << "\n";
7606 });
7607}
7608
7611 dbgs() << "intermediate fn:\n"
7612 << *OrigLoop->getHeader()->getParent() << "\n";
7613 });
7614}
7615
7617 BasicBlock *VectorPH, BasicBlock *Bypass, bool ForEpilogue) {
7618 assert(Bypass && "Expected valid bypass basic block.");
7621 Value *CheckMinIters = createIterationCountCheck(
7622 VectorPH, ForEpilogue ? EPI.EpilogueVF : EPI.MainLoopVF,
7623 ForEpilogue ? EPI.EpilogueUF : EPI.MainLoopUF);
7624
7625 BasicBlock *const TCCheckBlock = VectorPH;
7626 if (!ForEpilogue)
7627 TCCheckBlock->setName("vector.main.loop.iter.check");
7628
7629 // Create new preheader for vector loop.
7630 VectorPH = SplitBlock(TCCheckBlock, TCCheckBlock->getTerminator(),
7631 static_cast<DominatorTree *>(nullptr), LI, nullptr,
7632 "vector.ph");
7633 if (ForEpilogue) {
7634 // Save the trip count so we don't have to regenerate it in the
7635 // vec.epilog.iter.check. This is safe to do because the trip count
7636 // generated here dominates the vector epilog iter check.
7637 EPI.TripCount = Count;
7638 } else {
7640 }
7641
7642 CondBrInst &BI = *CondBrInst::Create(CheckMinIters, Bypass, VectorPH);
7643 if (hasBranchWeightMD(*OrigLoop->getLoopLatch()->getTerminator()))
7644 setBranchWeights(BI, MinItersBypassWeights, /*IsExpected=*/false);
7645 ReplaceInstWithInst(TCCheckBlock->getTerminator(), &BI);
7646
7647 // When vectorizing the main loop, its trip-count check is placed in a new
7648 // block, whereas the overall trip-count check is placed in the VPlan entry
7649 // block. When vectorizing the epilogue loop, its trip-count check is placed
7650 // in the VPlan entry block.
7651 if (!ForEpilogue)
7652 introduceCheckBlockInVPlan(TCCheckBlock);
7653 return TCCheckBlock;
7654}
7655
7656//===--------------------------------------------------------------------===//
7657// EpilogueVectorizerEpilogueLoop
7658//===--------------------------------------------------------------------===//
7659
7660/// This function creates a new scalar preheader, using the previous one as
7661/// entry block to the epilogue VPlan. The minimum iteration check is being
7662/// represented in VPlan.
7664 BasicBlock *NewScalarPH = createScalarPreheader("vec.epilog.");
7665 BasicBlock *OriginalScalarPH = NewScalarPH->getSinglePredecessor();
7666 OriginalScalarPH->setName("vec.epilog.iter.check");
7667 VPIRBasicBlock *NewEntry = Plan.createVPIRBasicBlock(OriginalScalarPH);
7668 VPBasicBlock *OldEntry = Plan.getEntry();
7669 for (auto &R : make_early_inc_range(*OldEntry)) {
7670 // Skip moving VPIRInstructions (including VPIRPhis), which are unmovable by
7671 // defining.
7672 if (isa<VPIRInstruction>(&R))
7673 continue;
7674 R.moveBefore(*NewEntry, NewEntry->end());
7675 }
7676
7677 VPBlockUtils::reassociateBlocks(OldEntry, NewEntry);
7678 Plan.setEntry(NewEntry);
7679 // OldEntry is now dead and will be cleaned up when the plan gets destroyed.
7680
7681 return OriginalScalarPH;
7682}
7683
7685 LLVM_DEBUG({
7686 dbgs() << "Create Skeleton for epilogue vectorized loop (second pass)\n"
7687 << "Epilogue Loop VF:" << EPI.EpilogueVF
7688 << ", Epilogue Loop UF:" << EPI.EpilogueUF << "\n";
7689 });
7690}
7691
7694 dbgs() << "final fn:\n" << *OrigLoop->getHeader()->getParent() << "\n";
7695 });
7696}
7697
7698VPRecipeBase *VPRecipeBuilder::tryToWidenMemory(VPInstruction *VPI,
7699 VFRange &Range) {
7700 assert((VPI->getOpcode() == Instruction::Load ||
7701 VPI->getOpcode() == Instruction::Store) &&
7702 "Must be called with either a load or store");
7704
7705 auto WillWiden = [&](ElementCount VF) -> bool {
7707 CM.getWideningDecision(I, VF);
7709 "CM decision should be taken at this point.");
7711 return true;
7712 if (CM.isScalarAfterVectorization(I, VF) ||
7713 CM.isProfitableToScalarize(I, VF))
7714 return false;
7716 };
7717
7719 return nullptr;
7720
7721 // If a mask is not required, drop it - use unmasked version for safe loads.
7722 // TODO: Determine if mask is needed in VPlan.
7723 VPValue *Mask = Legal->isMaskRequired(I) ? VPI->getMask() : nullptr;
7724
7725 // Determine if the pointer operand of the access is either consecutive or
7726 // reverse consecutive.
7728 CM.getWideningDecision(I, Range.Start);
7730 bool Consecutive =
7732
7733 VPValue *Ptr = VPI->getOpcode() == Instruction::Load ? VPI->getOperand(0)
7734 : VPI->getOperand(1);
7735 if (Consecutive) {
7738 VPSingleDefRecipe *VectorPtr;
7739 if (Reverse) {
7740 // When folding the tail, we may compute an address that we don't in the
7741 // original scalar loop: drop the GEP no-wrap flags in this case.
7742 // Otherwise preserve existing flags without no-unsigned-wrap, as we will
7743 // emit negative indices.
7744 GEPNoWrapFlags Flags =
7745 CM.foldTailByMasking() || !GEP
7747 : GEP->getNoWrapFlags().withoutNoUnsignedWrap();
7748 VectorPtr = new VPVectorEndPointerRecipe(
7749 Ptr, &Plan.getVF(), getLoadStoreType(I),
7750 /*Stride*/ -1, Flags, VPI->getDebugLoc());
7751 } else {
7752 VectorPtr = new VPVectorPointerRecipe(Ptr, getLoadStoreType(I),
7753 GEP ? GEP->getNoWrapFlags()
7755 VPI->getDebugLoc());
7756 }
7757 Builder.insert(VectorPtr);
7758 Ptr = VectorPtr;
7759 }
7760
7761 if (VPI->getOpcode() == Instruction::Load) {
7762 auto *Load = cast<LoadInst>(I);
7763 auto *LoadR = new VPWidenLoadRecipe(*Load, Ptr, Mask, Consecutive, Reverse,
7764 *VPI, Load->getDebugLoc());
7765 if (Reverse) {
7766 Builder.insert(LoadR);
7767 return new VPInstruction(VPInstruction::Reverse, LoadR, {}, {},
7768 LoadR->getDebugLoc());
7769 }
7770 return LoadR;
7771 }
7772
7773 StoreInst *Store = cast<StoreInst>(I);
7774 VPValue *StoredVal = VPI->getOperand(0);
7775 if (Reverse)
7776 StoredVal = Builder.createNaryOp(VPInstruction::Reverse, StoredVal,
7777 Store->getDebugLoc());
7778 return new VPWidenStoreRecipe(*Store, Ptr, StoredVal, Mask, Consecutive,
7779 Reverse, *VPI, Store->getDebugLoc());
7780}
7781
7783VPRecipeBuilder::tryToOptimizeInductionTruncate(VPInstruction *VPI,
7784 VFRange &Range) {
7785 auto *I = cast<TruncInst>(VPI->getUnderlyingInstr());
7786 // Optimize the special case where the source is a constant integer
7787 // induction variable. Notice that we can only optimize the 'trunc' case
7788 // because (a) FP conversions lose precision, (b) sext/zext may wrap, and
7789 // (c) other casts depend on pointer size.
7790
7791 // Determine whether \p K is a truncation based on an induction variable that
7792 // can be optimized.
7793 auto IsOptimizableIVTruncate =
7794 [&](Instruction *K) -> std::function<bool(ElementCount)> {
7795 return [=](ElementCount VF) -> bool {
7796 return CM.isOptimizableIVTruncate(K, VF);
7797 };
7798 };
7799
7801 IsOptimizableIVTruncate(I), Range))
7802 return nullptr;
7803
7805 VPI->getOperand(0)->getDefiningRecipe());
7806 PHINode *Phi = WidenIV->getPHINode();
7807 VPIRValue *Start = WidenIV->getStartValue();
7808 const InductionDescriptor &IndDesc = WidenIV->getInductionDescriptor();
7809
7810 // It is always safe to copy over the NoWrap and FastMath flags. In
7811 // particular, when folding tail by masking, the masked-off lanes are never
7812 // used, so it is safe.
7813 VPIRFlags Flags = vputils::getFlagsFromIndDesc(IndDesc);
7814 VPValue *Step =
7816 return new VPWidenIntOrFpInductionRecipe(
7817 Phi, Start, Step, &Plan.getVF(), IndDesc, I, Flags, VPI->getDebugLoc());
7818}
7819
7820VPSingleDefRecipe *VPRecipeBuilder::tryToWidenCall(VPInstruction *VPI,
7821 VFRange &Range) {
7822 CallInst *CI = cast<CallInst>(VPI->getUnderlyingInstr());
7824 [this, CI](ElementCount VF) {
7825 return CM.isScalarWithPredication(CI, VF);
7826 },
7827 Range);
7828
7829 if (IsPredicated)
7830 return nullptr;
7831
7833 if (ID && (ID == Intrinsic::assume || ID == Intrinsic::lifetime_end ||
7834 ID == Intrinsic::lifetime_start || ID == Intrinsic::sideeffect ||
7835 ID == Intrinsic::pseudoprobe ||
7836 ID == Intrinsic::experimental_noalias_scope_decl))
7837 return nullptr;
7838
7840 VPI->op_begin() + CI->arg_size());
7841
7842 // Is it beneficial to perform intrinsic call compared to lib call?
7843 bool ShouldUseVectorIntrinsic =
7845 [&](ElementCount VF) -> bool {
7846 return CM.getCallWideningDecision(CI, VF).Kind ==
7848 },
7849 Range);
7850 if (ShouldUseVectorIntrinsic)
7851 return new VPWidenIntrinsicRecipe(*CI, ID, Ops, CI->getType(), *VPI, *VPI,
7852 VPI->getDebugLoc());
7853
7854 Function *Variant = nullptr;
7855 std::optional<unsigned> MaskPos;
7856 // Is better to call a vectorized version of the function than to to scalarize
7857 // the call?
7858 auto ShouldUseVectorCall = LoopVectorizationPlanner::getDecisionAndClampRange(
7859 [&](ElementCount VF) -> bool {
7860 // The following case may be scalarized depending on the VF.
7861 // The flag shows whether we can use a usual Call for vectorized
7862 // version of the instruction.
7863
7864 // If we've found a variant at a previous VF, then stop looking. A
7865 // vectorized variant of a function expects input in a certain shape
7866 // -- basically the number of input registers, the number of lanes
7867 // per register, and whether there's a mask required.
7868 // We store a pointer to the variant in the VPWidenCallRecipe, so
7869 // once we have an appropriate variant it's only valid for that VF.
7870 // This will force a different vplan to be generated for each VF that
7871 // finds a valid variant.
7872 if (Variant)
7873 return false;
7874 LoopVectorizationCostModel::CallWideningDecision Decision =
7875 CM.getCallWideningDecision(CI, VF);
7877 Variant = Decision.Variant;
7878 MaskPos = Decision.MaskPos;
7879 return true;
7880 }
7881
7882 return false;
7883 },
7884 Range);
7885 if (ShouldUseVectorCall) {
7886 if (MaskPos.has_value()) {
7887 // We have 2 cases that would require a mask:
7888 // 1) The call needs to be predicated, either due to a conditional
7889 // in the scalar loop or use of an active lane mask with
7890 // tail-folding, and we use the appropriate mask for the block.
7891 // 2) No mask is required for the call instruction, but the only
7892 // available vector variant at this VF requires a mask, so we
7893 // synthesize an all-true mask.
7894 VPValue *Mask = VPI->isMasked() ? VPI->getMask() : Plan.getTrue();
7895
7896 Ops.insert(Ops.begin() + *MaskPos, Mask);
7897 }
7898
7899 Ops.push_back(VPI->getOperand(VPI->getNumOperandsWithoutMask() - 1));
7900 return new VPWidenCallRecipe(CI, Variant, Ops, *VPI, *VPI,
7901 VPI->getDebugLoc());
7902 }
7903
7904 return nullptr;
7905}
7906
7907bool VPRecipeBuilder::shouldWiden(Instruction *I, VFRange &Range) const {
7909 "Instruction should have been handled earlier");
7910 // Instruction should be widened, unless it is scalar after vectorization,
7911 // scalarization is profitable or it is predicated.
7912 auto WillScalarize = [this, I](ElementCount VF) -> bool {
7913 return CM.isScalarAfterVectorization(I, VF) ||
7914 CM.isProfitableToScalarize(I, VF) ||
7915 CM.isScalarWithPredication(I, VF);
7916 };
7918 Range);
7919}
7920
7921VPWidenRecipe *VPRecipeBuilder::tryToWiden(VPInstruction *VPI) {
7922 auto *I = VPI->getUnderlyingInstr();
7923 switch (VPI->getOpcode()) {
7924 default:
7925 return nullptr;
7926 case Instruction::SDiv:
7927 case Instruction::UDiv:
7928 case Instruction::SRem:
7929 case Instruction::URem: {
7930 // If not provably safe, use a select to form a safe divisor before widening the
7931 // div/rem operation itself. Otherwise fall through to general handling below.
7932 if (CM.isPredicatedInst(I)) {
7934 VPValue *Mask = VPI->getMask();
7935 VPValue *One = Plan.getConstantInt(I->getType(), 1u);
7936 auto *SafeRHS =
7937 Builder.createSelect(Mask, Ops[1], One, VPI->getDebugLoc());
7938 Ops[1] = SafeRHS;
7939 return new VPWidenRecipe(*I, Ops, *VPI, *VPI, VPI->getDebugLoc());
7940 }
7941 [[fallthrough]];
7942 }
7943 case Instruction::Add:
7944 case Instruction::And:
7945 case Instruction::AShr:
7946 case Instruction::FAdd:
7947 case Instruction::FCmp:
7948 case Instruction::FDiv:
7949 case Instruction::FMul:
7950 case Instruction::FNeg:
7951 case Instruction::FRem:
7952 case Instruction::FSub:
7953 case Instruction::ICmp:
7954 case Instruction::LShr:
7955 case Instruction::Mul:
7956 case Instruction::Or:
7957 case Instruction::Select:
7958 case Instruction::Shl:
7959 case Instruction::Sub:
7960 case Instruction::Xor:
7961 case Instruction::Freeze:
7962 return new VPWidenRecipe(*I, VPI->operandsWithoutMask(), *VPI, *VPI,
7963 VPI->getDebugLoc());
7964 case Instruction::ExtractValue: {
7966 auto *EVI = cast<ExtractValueInst>(I);
7967 assert(EVI->getNumIndices() == 1 && "Expected one extractvalue index");
7968 unsigned Idx = EVI->getIndices()[0];
7969 NewOps.push_back(Plan.getConstantInt(32, Idx));
7970 return new VPWidenRecipe(*I, NewOps, *VPI, *VPI, VPI->getDebugLoc());
7971 }
7972 };
7973}
7974
7975VPHistogramRecipe *VPRecipeBuilder::tryToWidenHistogram(const HistogramInfo *HI,
7976 VPInstruction *VPI) {
7977 // FIXME: Support other operations.
7978 unsigned Opcode = HI->Update->getOpcode();
7979 assert((Opcode == Instruction::Add || Opcode == Instruction::Sub) &&
7980 "Histogram update operation must be an Add or Sub");
7981
7983 // Bucket address.
7984 HGramOps.push_back(VPI->getOperand(1));
7985 // Increment value.
7986 HGramOps.push_back(getVPValueOrAddLiveIn(HI->Update->getOperand(1)));
7987
7988 // In case of predicated execution (due to tail-folding, or conditional
7989 // execution, or both), pass the relevant mask.
7990 if (Legal->isMaskRequired(HI->Store))
7991 HGramOps.push_back(VPI->getMask());
7992
7993 return new VPHistogramRecipe(Opcode, HGramOps, VPI->getDebugLoc());
7994}
7995
7997 VFRange &Range) {
7998 auto *I = VPI->getUnderlyingInstr();
8000 [&](ElementCount VF) { return CM.isUniformAfterVectorization(I, VF); },
8001 Range);
8002
8003 bool IsPredicated = CM.isPredicatedInst(I);
8004
8005 // Even if the instruction is not marked as uniform, there are certain
8006 // intrinsic calls that can be effectively treated as such, so we check for
8007 // them here. Conservatively, we only do this for scalable vectors, since
8008 // for fixed-width VFs we can always fall back on full scalarization.
8009 if (!IsUniform && Range.Start.isScalable() && isa<IntrinsicInst>(I)) {
8010 switch (cast<IntrinsicInst>(I)->getIntrinsicID()) {
8011 case Intrinsic::assume:
8012 case Intrinsic::lifetime_start:
8013 case Intrinsic::lifetime_end:
8014 // For scalable vectors if one of the operands is variant then we still
8015 // want to mark as uniform, which will generate one instruction for just
8016 // the first lane of the vector. We can't scalarize the call in the same
8017 // way as for fixed-width vectors because we don't know how many lanes
8018 // there are.
8019 //
8020 // The reasons for doing it this way for scalable vectors are:
8021 // 1. For the assume intrinsic generating the instruction for the first
8022 // lane is still be better than not generating any at all. For
8023 // example, the input may be a splat across all lanes.
8024 // 2. For the lifetime start/end intrinsics the pointer operand only
8025 // does anything useful when the input comes from a stack object,
8026 // which suggests it should always be uniform. For non-stack objects
8027 // the effect is to poison the object, which still allows us to
8028 // remove the call.
8029 IsUniform = true;
8030 break;
8031 default:
8032 break;
8033 }
8034 }
8035 VPValue *BlockInMask = nullptr;
8036 if (!IsPredicated) {
8037 // Finalize the recipe for Instr, first if it is not predicated.
8038 LLVM_DEBUG(dbgs() << "LV: Scalarizing:" << *I << "\n");
8039 } else {
8040 LLVM_DEBUG(dbgs() << "LV: Scalarizing and predicating:" << *I << "\n");
8041 // Instructions marked for predication are replicated and a mask operand is
8042 // added initially. Masked replicate recipes will later be placed under an
8043 // if-then construct to prevent side-effects. Generate recipes to compute
8044 // the block mask for this region.
8045 BlockInMask = VPI->getMask();
8046 }
8047
8048 // Note that there is some custom logic to mark some intrinsics as uniform
8049 // manually above for scalable vectors, which this assert needs to account for
8050 // as well.
8051 assert((Range.Start.isScalar() || !IsUniform || !IsPredicated ||
8052 (Range.Start.isScalable() && isa<IntrinsicInst>(I))) &&
8053 "Should not predicate a uniform recipe");
8054 auto *Recipe =
8055 new VPReplicateRecipe(I, VPI->operandsWithoutMask(), IsUniform,
8056 BlockInMask, *VPI, *VPI, VPI->getDebugLoc());
8057 return Recipe;
8058}
8059
8062 VFRange &Range) {
8063 assert(!R->isPhi() && "phis must be handled earlier");
8064 // First, check for specific widening recipes that deal with optimizing
8065 // truncates, calls and memory operations.
8066
8067 VPRecipeBase *Recipe;
8068 auto *VPI = cast<VPInstruction>(R);
8069 if (VPI->getOpcode() == Instruction::Trunc &&
8070 (Recipe = tryToOptimizeInductionTruncate(VPI, Range)))
8071 return Recipe;
8072
8073 // All widen recipes below deal only with VF > 1.
8075 [&](ElementCount VF) { return VF.isScalar(); }, Range))
8076 return nullptr;
8077
8078 if (VPI->getOpcode() == Instruction::Call)
8079 return tryToWidenCall(VPI, Range);
8080
8081 Instruction *Instr = R->getUnderlyingInstr();
8082 if (VPI->getOpcode() == Instruction::Store)
8083 if (auto HistInfo = Legal->getHistogramInfo(cast<StoreInst>(Instr)))
8084 return tryToWidenHistogram(*HistInfo, VPI);
8085
8086 if (VPI->getOpcode() == Instruction::Load ||
8087 VPI->getOpcode() == Instruction::Store)
8088 return tryToWidenMemory(VPI, Range);
8089
8090 if (!shouldWiden(Instr, Range))
8091 return nullptr;
8092
8093 if (VPI->getOpcode() == Instruction::GetElementPtr)
8094 return new VPWidenGEPRecipe(cast<GetElementPtrInst>(Instr),
8095 VPI->operandsWithoutMask(), *VPI,
8096 VPI->getDebugLoc());
8097
8098 if (Instruction::isCast(VPI->getOpcode())) {
8099 auto *CI = cast<CastInst>(Instr);
8100 auto *CastR = cast<VPInstructionWithType>(VPI);
8101 return new VPWidenCastRecipe(CI->getOpcode(), VPI->getOperand(0),
8102 CastR->getResultType(), CI, *VPI, *VPI,
8103 VPI->getDebugLoc());
8104 }
8105
8106 return tryToWiden(VPI);
8107}
8108
8109void LoopVectorizationPlanner::buildVPlansWithVPRecipes(ElementCount MinVF,
8110 ElementCount MaxVF) {
8111 if (ElementCount::isKnownGT(MinVF, MaxVF))
8112 return;
8113
8114 assert(OrigLoop->isInnermost() && "Inner loop expected.");
8115
8116 const LoopAccessInfo *LAI = Legal->getLAI();
8118 OrigLoop, LI, DT, PSE.getSE());
8119 if (!LAI->getRuntimePointerChecking()->getChecks().empty() &&
8121 // Only use noalias metadata when using memory checks guaranteeing no
8122 // overlap across all iterations.
8123 LVer.prepareNoAliasMetadata();
8124 }
8125
8126 // Create initial base VPlan0, to serve as common starting point for all
8127 // candidates built later for specific VF ranges.
8128 auto VPlan0 = VPlanTransforms::buildVPlan0(
8129 OrigLoop, *LI, Legal->getWidestInductionType(),
8130 getDebugLocFromInstOrOperands(Legal->getPrimaryInduction()), PSE, &LVer);
8131
8132 // Create recipes for header phis.
8134 *VPlan0, PSE, *OrigLoop, Legal->getInductionVars(),
8135 Legal->getReductionVars(), Legal->getFixedOrderRecurrences(),
8136 CM.getInLoopReductions(), Hints.allowReordering());
8137
8139 // If we're vectorizing a loop with an uncountable exit, make sure that the
8140 // recipes are safe to handle.
8141 // TODO: Remove this once we can properly check the VPlan itself for both
8142 // the presence of an uncountable exit and the presence of stores in
8143 // the loop inside handleEarlyExits itself.
8145 if (Legal->hasUncountableEarlyExit())
8146 EEStyle = Legal->hasUncountableExitWithSideEffects()
8149
8150 VPlanTransforms::handleEarlyExits(*VPlan0, EEStyle);
8153 if (CM.foldTailByMasking())
8156 *VPlan0);
8157
8158 auto MaxVFTimes2 = MaxVF * 2;
8159 for (ElementCount VF = MinVF; ElementCount::isKnownLT(VF, MaxVFTimes2);) {
8160 VFRange SubRange = {VF, MaxVFTimes2};
8161 if (auto Plan = tryToBuildVPlanWithVPRecipes(
8162 std::unique_ptr<VPlan>(VPlan0->duplicate()), SubRange, &LVer)) {
8163 // Now optimize the initial VPlan.
8164 VPlanTransforms::hoistPredicatedLoads(*Plan, PSE, OrigLoop);
8165 VPlanTransforms::sinkPredicatedStores(*Plan, PSE, OrigLoop);
8167 CM.getMinimalBitwidths());
8169 // TODO: try to put addExplicitVectorLength close to addActiveLaneMask
8170 if (CM.foldTailWithEVL()) {
8172 CM.getMaxSafeElements());
8174 }
8175
8176 if (auto P = VPlanTransforms::narrowInterleaveGroups(*Plan, TTI))
8177 VPlans.push_back(std::move(P));
8178
8179 assert(verifyVPlanIsValid(*Plan) && "VPlan is invalid");
8180 VPlans.push_back(std::move(Plan));
8181 }
8182 VF = SubRange.End;
8183 }
8184}
8185
8186VPlanPtr LoopVectorizationPlanner::tryToBuildVPlanWithVPRecipes(
8187 VPlanPtr Plan, VFRange &Range, LoopVersioning *LVer) {
8188
8189 using namespace llvm::VPlanPatternMatch;
8190 SmallPtrSet<const InterleaveGroup<Instruction> *, 1> InterleaveGroups;
8191
8192 // ---------------------------------------------------------------------------
8193 // Build initial VPlan: Scan the body of the loop in a topological order to
8194 // visit each basic block after having visited its predecessor basic blocks.
8195 // ---------------------------------------------------------------------------
8196
8197 bool RequiresScalarEpilogueCheck =
8199 [this](ElementCount VF) {
8200 return !CM.requiresScalarEpilogue(VF.isVector());
8201 },
8202 Range);
8203 // Update the branch in the middle block if a scalar epilogue is required.
8204 VPBasicBlock *MiddleVPBB = Plan->getMiddleBlock();
8205 if (!RequiresScalarEpilogueCheck && MiddleVPBB->getNumSuccessors() == 2) {
8206 auto *BranchOnCond = cast<VPInstruction>(MiddleVPBB->getTerminator());
8207 assert(MiddleVPBB->getSuccessors()[1] == Plan->getScalarPreheader() &&
8208 "second successor must be scalar preheader");
8209 BranchOnCond->setOperand(0, Plan->getFalse());
8210 }
8211
8212 // Don't use getDecisionAndClampRange here, because we don't know the UF
8213 // so this function is better to be conservative, rather than to split
8214 // it up into different VPlans.
8215 // TODO: Consider using getDecisionAndClampRange here to split up VPlans.
8216 bool IVUpdateMayOverflow = false;
8217 for (ElementCount VF : Range)
8218 IVUpdateMayOverflow |= !isIndvarOverflowCheckKnownFalse(&CM, VF);
8219
8220 TailFoldingStyle Style = CM.getTailFoldingStyle();
8221 // Use NUW for the induction increment if we proved that it won't overflow in
8222 // the vector loop or when not folding the tail. In the later case, we know
8223 // that the canonical induction increment will not overflow as the vector trip
8224 // count is >= increment and a multiple of the increment.
8225 VPRegionBlock *LoopRegion = Plan->getVectorLoopRegion();
8226 bool HasNUW = !IVUpdateMayOverflow || Style == TailFoldingStyle::None;
8227 if (!HasNUW) {
8228 auto *IVInc =
8229 LoopRegion->getExitingBasicBlock()->getTerminator()->getOperand(0);
8230 assert(match(IVInc,
8231 m_VPInstruction<Instruction::Add>(
8232 m_Specific(LoopRegion->getCanonicalIV()), m_VPValue())) &&
8233 "Did not find the canonical IV increment");
8234 cast<VPRecipeWithIRFlags>(IVInc)->dropPoisonGeneratingFlags();
8235 }
8236
8237 // ---------------------------------------------------------------------------
8238 // Pre-construction: record ingredients whose recipes we'll need to further
8239 // process after constructing the initial VPlan.
8240 // ---------------------------------------------------------------------------
8241
8242 // For each interleave group which is relevant for this (possibly trimmed)
8243 // Range, add it to the set of groups to be later applied to the VPlan and add
8244 // placeholders for its members' Recipes which we'll be replacing with a
8245 // single VPInterleaveRecipe.
8246 for (InterleaveGroup<Instruction> *IG : IAI.getInterleaveGroups()) {
8247 auto ApplyIG = [IG, this](ElementCount VF) -> bool {
8248 bool Result = (VF.isVector() && // Query is illegal for VF == 1
8249 CM.getWideningDecision(IG->getInsertPos(), VF) ==
8251 // For scalable vectors, the interleave factors must be <= 8 since we
8252 // require the (de)interleaveN intrinsics instead of shufflevectors.
8253 assert((!Result || !VF.isScalable() || IG->getFactor() <= 8) &&
8254 "Unsupported interleave factor for scalable vectors");
8255 return Result;
8256 };
8257 if (!getDecisionAndClampRange(ApplyIG, Range))
8258 continue;
8259 InterleaveGroups.insert(IG);
8260 }
8261
8262 // ---------------------------------------------------------------------------
8263 // Construct wide recipes and apply predication for original scalar
8264 // VPInstructions in the loop.
8265 // ---------------------------------------------------------------------------
8266 VPRecipeBuilder RecipeBuilder(*Plan, TLI, Legal, CM, Builder);
8267
8268 // Scan the body of the loop in a topological order to visit each basic block
8269 // after having visited its predecessor basic blocks.
8270 VPBasicBlock *HeaderVPBB = LoopRegion->getEntryBasicBlock();
8271 ReversePostOrderTraversal<VPBlockShallowTraversalWrapper<VPBlockBase *>> RPOT(
8272 HeaderVPBB);
8273
8274 VPBasicBlock::iterator MBIP = MiddleVPBB->getFirstNonPhi();
8275
8276 // Collect blocks that need predication for in-loop reduction recipes.
8277 DenseSet<BasicBlock *> BlocksNeedingPredication;
8278 for (BasicBlock *BB : OrigLoop->blocks())
8279 if (CM.blockNeedsPredicationForAnyReason(BB))
8280 BlocksNeedingPredication.insert(BB);
8281
8282 VPlanTransforms::createInLoopReductionRecipes(*Plan, BlocksNeedingPredication,
8283 Range.Start);
8284
8285 // Now process all other blocks and instructions.
8286 for (VPBasicBlock *VPBB : VPBlockUtils::blocksOnly<VPBasicBlock>(RPOT)) {
8287 // Convert input VPInstructions to widened recipes.
8288 for (VPRecipeBase &R : make_early_inc_range(
8289 make_range(VPBB->getFirstNonPhi(), VPBB->end()))) {
8290 // Skip recipes that do not need transforming.
8292 continue;
8293 auto *VPI = cast<VPInstruction>(&R);
8294 if (!VPI->getUnderlyingValue())
8295 continue;
8296
8297 // TODO: Gradually replace uses of underlying instruction by analyses on
8298 // VPlan. Migrate code relying on the underlying instruction from VPlan0
8299 // to construct recipes below to not use the underlying instruction.
8301 Builder.setInsertPoint(VPI);
8302
8303 // The stores with invariant address inside the loop will be deleted, and
8304 // in the exit block, a uniform store recipe will be created for the final
8305 // invariant store of the reduction.
8306 StoreInst *SI;
8307 if ((SI = dyn_cast<StoreInst>(Instr)) &&
8308 Legal->isInvariantAddressOfReduction(SI->getPointerOperand())) {
8309 // Only create recipe for the final invariant store of the reduction.
8310 if (Legal->isInvariantStoreOfReduction(SI)) {
8311 auto *Recipe = new VPReplicateRecipe(
8312 SI, VPI->operandsWithoutMask(), true /* IsUniform */,
8313 nullptr /*Mask*/, *VPI, *VPI, VPI->getDebugLoc());
8314 Recipe->insertBefore(*MiddleVPBB, MBIP);
8315 }
8316 R.eraseFromParent();
8317 continue;
8318 }
8319
8320 VPRecipeBase *Recipe =
8321 RecipeBuilder.tryToCreateWidenNonPhiRecipe(VPI, Range);
8322 if (!Recipe)
8323 Recipe =
8324 RecipeBuilder.handleReplication(cast<VPInstruction>(VPI), Range);
8325
8326 RecipeBuilder.setRecipe(Instr, Recipe);
8327 if (isa<VPWidenIntOrFpInductionRecipe>(Recipe) && isa<TruncInst>(Instr)) {
8328 // Optimized a truncate to VPWidenIntOrFpInductionRecipe. It needs to be
8329 // moved to the phi section in the header.
8330 Recipe->insertBefore(*HeaderVPBB, HeaderVPBB->getFirstNonPhi());
8331 } else {
8332 Builder.insert(Recipe);
8333 }
8334 if (Recipe->getNumDefinedValues() == 1) {
8335 VPI->replaceAllUsesWith(Recipe->getVPSingleValue());
8336 } else {
8337 assert(Recipe->getNumDefinedValues() == 0 &&
8338 "Unexpected multidef recipe");
8339 }
8340 R.eraseFromParent();
8341 }
8342 }
8343
8344 assert(isa<VPRegionBlock>(LoopRegion) &&
8345 !LoopRegion->getEntryBasicBlock()->empty() &&
8346 "entry block must be set to a VPRegionBlock having a non-empty entry "
8347 "VPBasicBlock");
8348
8349 // TODO: We can't call runPass on these transforms yet, due to verifier
8350 // failures.
8352
8353 // ---------------------------------------------------------------------------
8354 // Transform initial VPlan: Apply previously taken decisions, in order, to
8355 // bring the VPlan to its final state.
8356 // ---------------------------------------------------------------------------
8357
8358 addReductionResultComputation(Plan, RecipeBuilder, Range.Start);
8359
8360 // Optimize FindIV reductions to use sentinel-based approach when possible.
8362 *OrigLoop);
8364 CM.foldTailByMasking());
8365
8366 // Apply mandatory transformation to handle reductions with multiple in-loop
8367 // uses if possible, bail out otherwise.
8369 OrigLoop))
8370 return nullptr;
8371 // Apply mandatory transformation to handle FP maxnum/minnum reduction with
8372 // NaNs if possible, bail out otherwise.
8374 return nullptr;
8375
8376 // Create whole-vector selects for find-last recurrences.
8378 return nullptr;
8379
8380 // Create partial reduction recipes for scaled reductions and transform
8381 // recipes to abstract recipes if it is legal and beneficial and clamp the
8382 // range for better cost estimation.
8383 // TODO: Enable following transform when the EVL-version of extended-reduction
8384 // and mulacc-reduction are implemented.
8385 if (!CM.foldTailWithEVL()) {
8386 VPCostContext CostCtx(CM.TTI, *CM.TLI, *Plan, CM, CM.CostKind, CM.PSE,
8387 OrigLoop);
8389 Range);
8391 Range);
8392 }
8393
8394 for (ElementCount VF : Range)
8395 Plan->addVF(VF);
8396 Plan->setName("Initial VPlan");
8397
8398 // Interleave memory: for each Interleave Group we marked earlier as relevant
8399 // for this VPlan, replace the Recipes widening its memory instructions with a
8400 // single VPInterleaveRecipe at its insertion point.
8402 InterleaveGroups, RecipeBuilder, CM.isScalarEpilogueAllowed());
8403
8404 // Replace VPValues for known constant strides.
8406 Legal->getLAI()->getSymbolicStrides());
8407
8408 auto BlockNeedsPredication = [this](BasicBlock *BB) {
8409 return Legal->blockNeedsPredication(BB);
8410 };
8412 BlockNeedsPredication);
8413
8414 // Sink users of fixed-order recurrence past the recipe defining the previous
8415 // value and introduce FirstOrderRecurrenceSplice VPInstructions.
8417 Builder))
8418 return nullptr;
8419
8420 if (useActiveLaneMask(Style)) {
8421 // TODO: Move checks to VPlanTransforms::addActiveLaneMask once
8422 // TailFoldingStyle is visible there.
8423 bool ForControlFlow = useActiveLaneMaskForControlFlow(Style);
8424 VPlanTransforms::addActiveLaneMask(*Plan, ForControlFlow);
8425 }
8426
8427 assert(verifyVPlanIsValid(*Plan) && "VPlan is invalid");
8428 return Plan;
8429}
8430
8431VPlanPtr LoopVectorizationPlanner::tryToBuildVPlan(VFRange &Range) {
8432 // Outer loop handling: They may require CFG and instruction level
8433 // transformations before even evaluating whether vectorization is profitable.
8434 // Since we cannot modify the incoming IR, we need to build VPlan upfront in
8435 // the vectorization pipeline.
8436 assert(!OrigLoop->isInnermost());
8437 assert(EnableVPlanNativePath && "VPlan-native path is not enabled.");
8438
8439 auto Plan = VPlanTransforms::buildVPlan0(
8440 OrigLoop, *LI, Legal->getWidestInductionType(),
8441 getDebugLocFromInstOrOperands(Legal->getPrimaryInduction()), PSE);
8442
8444 *Plan, PSE, *OrigLoop, Legal->getInductionVars(),
8445 MapVector<PHINode *, RecurrenceDescriptor>(),
8446 SmallPtrSet<const PHINode *, 1>(), SmallPtrSet<PHINode *, 1>(),
8447 /*AllowReordering=*/false);
8450 VPlanTransforms::addMiddleCheck(*Plan, /*TailFolded*/ false);
8451
8453
8454 for (ElementCount VF : Range)
8455 Plan->addVF(VF);
8456
8458 return nullptr;
8459
8460 // Optimize induction live-out users to use precomputed end values.
8462 /*FoldTail=*/false);
8463
8464 assert(verifyVPlanIsValid(*Plan) && "VPlan is invalid");
8465 return Plan;
8466}
8467
8468void LoopVectorizationPlanner::addReductionResultComputation(
8469 VPlanPtr &Plan, VPRecipeBuilder &RecipeBuilder, ElementCount MinVF) {
8470 using namespace VPlanPatternMatch;
8471 VPTypeAnalysis TypeInfo(*Plan);
8472 VPRegionBlock *VectorLoopRegion = Plan->getVectorLoopRegion();
8473 VPBasicBlock *MiddleVPBB = Plan->getMiddleBlock();
8475 VPBasicBlock *LatchVPBB = VectorLoopRegion->getExitingBasicBlock();
8476 Builder.setInsertPoint(&*std::prev(std::prev(LatchVPBB->end())));
8477 VPBasicBlock::iterator IP = MiddleVPBB->getFirstNonPhi();
8478 for (VPRecipeBase &R :
8479 Plan->getVectorLoopRegion()->getEntryBasicBlock()->phis()) {
8480 VPReductionPHIRecipe *PhiR = dyn_cast<VPReductionPHIRecipe>(&R);
8481 // TODO: Remove check for constant incoming value once removeDeadRecipes is
8482 // used on VPlan0.
8483 if (!PhiR || isa<VPIRValue>(PhiR->getOperand(1)))
8484 continue;
8485
8486 RecurKind RecurrenceKind = PhiR->getRecurrenceKind();
8487 const RecurrenceDescriptor &RdxDesc = Legal->getRecurrenceDescriptor(
8489 Type *PhiTy = TypeInfo.inferScalarType(PhiR);
8490 // If tail is folded by masking, introduce selects between the phi
8491 // and the users outside the vector region of each reduction, at the
8492 // beginning of the dedicated latch block.
8493 auto *OrigExitingVPV = PhiR->getBackedgeValue();
8494 auto *NewExitingVPV = PhiR->getBackedgeValue();
8495 // Don't output selects for partial reductions because they have an output
8496 // with fewer lanes than the VF. So the operands of the select would have
8497 // different numbers of lanes. Partial reductions mask the input instead.
8498 auto *RR = dyn_cast<VPReductionRecipe>(OrigExitingVPV->getDefiningRecipe());
8499 if (!PhiR->isInLoop() && CM.foldTailByMasking() &&
8500 (!RR || !RR->isPartialReduction())) {
8501 VPValue *Cond = vputils::findHeaderMask(*Plan);
8502 NewExitingVPV =
8503 Builder.createSelect(Cond, OrigExitingVPV, PhiR, {}, "", *PhiR);
8504 OrigExitingVPV->replaceUsesWithIf(NewExitingVPV, [](VPUser &U, unsigned) {
8505 using namespace VPlanPatternMatch;
8506 return match(
8507 &U, m_CombineOr(
8508 m_VPInstruction<VPInstruction::ComputeAnyOfResult>(),
8509 m_VPInstruction<VPInstruction::ComputeReductionResult>()));
8510 });
8511
8512 if (CM.usePredicatedReductionSelect(RecurrenceKind))
8513 PhiR->setOperand(1, NewExitingVPV);
8514 }
8515
8516 // We want code in the middle block to appear to execute on the location of
8517 // the scalar loop's latch terminator because: (a) it is all compiler
8518 // generated, (b) these instructions are always executed after evaluating
8519 // the latch conditional branch, and (c) other passes may add new
8520 // predecessors which terminate on this line. This is the easiest way to
8521 // ensure we don't accidentally cause an extra step back into the loop while
8522 // debugging.
8523 DebugLoc ExitDL = OrigLoop->getLoopLatch()->getTerminator()->getDebugLoc();
8524
8525 // TODO: At the moment ComputeReductionResult also drives creation of the
8526 // bc.merge.rdx phi nodes, hence it needs to be created unconditionally here
8527 // even for in-loop reductions, until the reduction resume value handling is
8528 // also modeled in VPlan.
8529 VPInstruction *FinalReductionResult;
8530 VPBuilder::InsertPointGuard Guard(Builder);
8531 Builder.setInsertPoint(MiddleVPBB, IP);
8532 // For AnyOf reductions, find the select among PhiR's users. This is used
8533 // both to find NewVal for ComputeAnyOfResult and to adjust the reduction.
8534 VPRecipeBase *AnyOfSelect = nullptr;
8535 if (RecurrenceDescriptor::isAnyOfRecurrenceKind(RecurrenceKind)) {
8536 AnyOfSelect = cast<VPRecipeBase>(*find_if(PhiR->users(), [](VPUser *U) {
8537 return match(U, m_Select(m_VPValue(), m_VPValue(), m_VPValue()));
8538 }));
8539 }
8540 if (AnyOfSelect) {
8541 VPValue *Start = PhiR->getStartValue();
8542 // NewVal is the non-phi operand of the select.
8543 VPValue *NewVal = AnyOfSelect->getOperand(1) == PhiR
8544 ? AnyOfSelect->getOperand(2)
8545 : AnyOfSelect->getOperand(1);
8546 FinalReductionResult =
8547 Builder.createNaryOp(VPInstruction::ComputeAnyOfResult,
8548 {Start, NewVal, NewExitingVPV}, ExitDL);
8549 } else {
8550 VPIRFlags Flags(RecurrenceKind, PhiR->isOrdered(), PhiR->isInLoop(),
8551 PhiR->getFastMathFlags());
8552 FinalReductionResult =
8553 Builder.createNaryOp(VPInstruction::ComputeReductionResult,
8554 {NewExitingVPV}, Flags, ExitDL);
8555 }
8556 // If the vector reduction can be performed in a smaller type, we truncate
8557 // then extend the loop exit value to enable InstCombine to evaluate the
8558 // entire expression in the smaller type.
8559 if (MinVF.isVector() && PhiTy != RdxDesc.getRecurrenceType() &&
8561 assert(!PhiR->isInLoop() && "Unexpected truncated inloop reduction!");
8563 "Unexpected truncated min-max recurrence!");
8564 Type *RdxTy = RdxDesc.getRecurrenceType();
8565 VPWidenCastRecipe *Trunc;
8566 Instruction::CastOps ExtendOpc =
8567 RdxDesc.isSigned() ? Instruction::SExt : Instruction::ZExt;
8568 VPWidenCastRecipe *Extnd;
8569 {
8570 VPBuilder::InsertPointGuard Guard(Builder);
8571 Builder.setInsertPoint(
8572 NewExitingVPV->getDefiningRecipe()->getParent(),
8573 std::next(NewExitingVPV->getDefiningRecipe()->getIterator()));
8574 Trunc =
8575 Builder.createWidenCast(Instruction::Trunc, NewExitingVPV, RdxTy);
8576 Extnd = Builder.createWidenCast(ExtendOpc, Trunc, PhiTy);
8577 }
8578 if (PhiR->getOperand(1) == NewExitingVPV)
8579 PhiR->setOperand(1, Extnd->getVPSingleValue());
8580
8581 // Update ComputeReductionResult with the truncated exiting value and
8582 // extend its result. Operand 0 provides the values to be reduced.
8583 FinalReductionResult->setOperand(0, Trunc);
8584 FinalReductionResult =
8585 Builder.createScalarCast(ExtendOpc, FinalReductionResult, PhiTy, {});
8586 }
8587
8588 // Update all users outside the vector region. Also replace redundant
8589 // extracts.
8590 for (auto *U : to_vector(OrigExitingVPV->users())) {
8591 auto *Parent = cast<VPRecipeBase>(U)->getParent();
8592 if (FinalReductionResult == U || Parent->getParent())
8593 continue;
8594 // Skip FindIV reduction chain recipes (ComputeReductionResult, icmp).
8596 match(U, m_CombineOr(
8597 m_VPInstruction<VPInstruction::ComputeReductionResult>(),
8598 m_VPInstruction<Instruction::ICmp>())))
8599 continue;
8600 U->replaceUsesOfWith(OrigExitingVPV, FinalReductionResult);
8601
8602 // Look through ExtractLastPart.
8604 U = cast<VPInstruction>(U)->getSingleUser();
8605
8608 cast<VPInstruction>(U)->replaceAllUsesWith(FinalReductionResult);
8609 }
8610
8611 // Adjust AnyOf reductions; replace the reduction phi for the selected value
8612 // with a boolean reduction phi node to check if the condition is true in
8613 // any iteration. The final value is selected by the final
8614 // ComputeReductionResult.
8615 if (AnyOfSelect) {
8616 VPValue *Cmp = AnyOfSelect->getOperand(0);
8617 // If the compare is checking the reduction PHI node, adjust it to check
8618 // the start value.
8619 if (VPRecipeBase *CmpR = Cmp->getDefiningRecipe())
8620 CmpR->replaceUsesOfWith(PhiR, PhiR->getStartValue());
8621 Builder.setInsertPoint(AnyOfSelect);
8622
8623 // If the true value of the select is the reduction phi, the new value is
8624 // selected if the negated condition is true in any iteration.
8625 if (AnyOfSelect->getOperand(1) == PhiR)
8626 Cmp = Builder.createNot(Cmp);
8627 VPValue *Or = Builder.createOr(PhiR, Cmp);
8628 AnyOfSelect->getVPSingleValue()->replaceAllUsesWith(Or);
8629 // Delete AnyOfSelect now that it has invalid types.
8630 ToDelete.push_back(AnyOfSelect);
8631
8632 // Convert the reduction phi to operate on bools.
8633 PhiR->setOperand(0, Plan->getFalse());
8634 continue;
8635 }
8636
8637 RecurKind RK = PhiR->getRecurrenceKind();
8642 VPBuilder PHBuilder(Plan->getVectorPreheader());
8643 VPValue *Iden = Plan->getOrAddLiveIn(
8644 getRecurrenceIdentity(RK, PhiTy, PhiR->getFastMathFlags()));
8645 auto *ScaleFactorVPV = Plan->getConstantInt(32, 1);
8646 VPValue *StartV = PHBuilder.createNaryOp(
8648 {PhiR->getStartValue(), Iden, ScaleFactorVPV}, *PhiR);
8649 PhiR->setOperand(0, StartV);
8650 }
8651 }
8652 for (VPRecipeBase *R : ToDelete)
8653 R->eraseFromParent();
8654
8656}
8657
8658void LoopVectorizationPlanner::attachRuntimeChecks(
8659 VPlan &Plan, GeneratedRTChecks &RTChecks, bool HasBranchWeights) const {
8660 const auto &[SCEVCheckCond, SCEVCheckBlock] = RTChecks.getSCEVChecks();
8661 if (SCEVCheckBlock && SCEVCheckBlock->hasNPredecessors(0)) {
8662 assert((!CM.OptForSize ||
8663 CM.Hints->getForce() == LoopVectorizeHints::FK_Enabled) &&
8664 "Cannot SCEV check stride or overflow when optimizing for size");
8665 VPlanTransforms::attachCheckBlock(Plan, SCEVCheckCond, SCEVCheckBlock,
8666 HasBranchWeights);
8667 }
8668 const auto &[MemCheckCond, MemCheckBlock] = RTChecks.getMemRuntimeChecks();
8669 if (MemCheckBlock && MemCheckBlock->hasNPredecessors(0)) {
8670 // VPlan-native path does not do any analysis for runtime checks
8671 // currently.
8672 assert((!EnableVPlanNativePath || OrigLoop->isInnermost()) &&
8673 "Runtime checks are not supported for outer loops yet");
8674
8675 if (CM.OptForSize) {
8676 assert(
8677 CM.Hints->getForce() == LoopVectorizeHints::FK_Enabled &&
8678 "Cannot emit memory checks when optimizing for size, unless forced "
8679 "to vectorize.");
8680 ORE->emit([&]() {
8681 return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationCodeSize",
8682 OrigLoop->getStartLoc(),
8683 OrigLoop->getHeader())
8684 << "Code-size may be reduced by not forcing "
8685 "vectorization, or by source-code modifications "
8686 "eliminating the need for runtime checks "
8687 "(e.g., adding 'restrict').";
8688 });
8689 }
8690 VPlanTransforms::attachCheckBlock(Plan, MemCheckCond, MemCheckBlock,
8691 HasBranchWeights);
8692 }
8693}
8694
8696 VPlan &Plan, ElementCount VF, unsigned UF,
8697 ElementCount MinProfitableTripCount) const {
8698 const uint32_t *BranchWeights =
8699 hasBranchWeightMD(*OrigLoop->getLoopLatch()->getTerminator())
8701 : nullptr;
8703 Plan, VF, UF, MinProfitableTripCount,
8704 CM.requiresScalarEpilogue(VF.isVector()), CM.foldTailByMasking(),
8705 OrigLoop, BranchWeights,
8706 OrigLoop->getLoopPredecessor()->getTerminator()->getDebugLoc(), PSE);
8707}
8708
8709// Determine how to lower the scalar epilogue, which depends on 1) optimising
8710// for minimum code-size, 2) predicate compiler options, 3) loop hints forcing
8711// predication, and 4) a TTI hook that analyses whether the loop is suitable
8712// for predication.
8714 Function *F, Loop *L, LoopVectorizeHints &Hints, bool OptForSize,
8717 // 1) OptSize takes precedence over all other options, i.e. if this is set,
8718 // don't look at hints or options, and don't request a scalar epilogue.
8719 if (F->hasOptSize() ||
8720 (OptForSize && Hints.getForce() != LoopVectorizeHints::FK_Enabled))
8722
8723 // 2) If set, obey the directives
8724 if (PreferPredicateOverEpilogue.getNumOccurrences()) {
8732 };
8733 }
8734
8735 // 3) If set, obey the hints
8736 switch (Hints.getPredicate()) {
8741 };
8742
8743 // 4) if the TTI hook indicates this is profitable, request predication.
8744 TailFoldingInfo TFI(TLI, &LVL, IAI);
8745 if (TTI->preferPredicateOverEpilogue(&TFI))
8747
8749}
8750
8751// Process the loop in the VPlan-native vectorization path. This path builds
8752// VPlan upfront in the vectorization pipeline, which allows to apply
8753// VPlan-to-VPlan transformations from the very beginning without modifying the
8754// input LLVM IR.
8760 std::function<BlockFrequencyInfo &()> GetBFI, bool OptForSize,
8761 LoopVectorizeHints &Hints, LoopVectorizationRequirements &Requirements) {
8762
8764 LLVM_DEBUG(dbgs() << "LV: cannot compute the outer-loop trip count\n");
8765 return false;
8766 }
8767 assert(EnableVPlanNativePath && "VPlan-native path is disabled.");
8768 Function *F = L->getHeader()->getParent();
8769 InterleavedAccessInfo IAI(PSE, L, DT, LI, LVL->getLAI());
8770
8772 getScalarEpilogueLowering(F, L, Hints, OptForSize, TTI, TLI, *LVL, &IAI);
8773
8774 LoopVectorizationCostModel CM(SEL, L, PSE, LI, LVL, *TTI, TLI, DB, AC, ORE,
8775 GetBFI, F, &Hints, IAI, OptForSize);
8776 // Use the planner for outer loop vectorization.
8777 // TODO: CM is not used at this point inside the planner. Turn CM into an
8778 // optional argument if we don't need it in the future.
8779 LoopVectorizationPlanner LVP(L, LI, DT, TLI, *TTI, LVL, CM, IAI, PSE, Hints,
8780 ORE);
8781
8782 // Get user vectorization factor.
8783 ElementCount UserVF = Hints.getWidth();
8784
8786
8787 // Plan how to best vectorize, return the best VF and its cost.
8788 const VectorizationFactor VF = LVP.planInVPlanNativePath(UserVF);
8789
8790 // If we are stress testing VPlan builds, do not attempt to generate vector
8791 // code. Masked vector code generation support will follow soon.
8792 // Also, do not attempt to vectorize if no vector code will be produced.
8794 return false;
8795
8796 VPlan &BestPlan = LVP.getPlanFor(VF.Width);
8797
8798 {
8799 GeneratedRTChecks Checks(PSE, DT, LI, TTI, CM.CostKind);
8800 InnerLoopVectorizer LB(L, PSE, LI, DT, TTI, AC, VF.Width, /*UF=*/1, &CM,
8801 Checks, BestPlan);
8802 LLVM_DEBUG(dbgs() << "Vectorizing outer loop in \""
8803 << L->getHeader()->getParent()->getName() << "\"\n");
8804 LVP.addMinimumIterationCheck(BestPlan, VF.Width, /*UF=*/1,
8806
8807 LVP.executePlan(VF.Width, /*UF=*/1, BestPlan, LB, DT, false);
8808 }
8809
8810 reportVectorization(ORE, L, VF, 1);
8811
8812 assert(!verifyFunction(*L->getHeader()->getParent(), &dbgs()));
8813 return true;
8814}
8815
8816// Emit a remark if there are stores to floats that required a floating point
8817// extension. If the vectorized loop was generated with floating point there
8818// will be a performance penalty from the conversion overhead and the change in
8819// the vector width.
8822 for (BasicBlock *BB : L->getBlocks()) {
8823 for (Instruction &Inst : *BB) {
8824 if (auto *S = dyn_cast<StoreInst>(&Inst)) {
8825 if (S->getValueOperand()->getType()->isFloatTy())
8826 Worklist.push_back(S);
8827 }
8828 }
8829 }
8830
8831 // Traverse the floating point stores upwards searching, for floating point
8832 // conversions.
8835 while (!Worklist.empty()) {
8836 auto *I = Worklist.pop_back_val();
8837 if (!L->contains(I))
8838 continue;
8839 if (!Visited.insert(I).second)
8840 continue;
8841
8842 // Emit a remark if the floating point store required a floating
8843 // point conversion.
8844 // TODO: More work could be done to identify the root cause such as a
8845 // constant or a function return type and point the user to it.
8846 if (isa<FPExtInst>(I) && EmittedRemark.insert(I).second)
8847 ORE->emit([&]() {
8848 return OptimizationRemarkAnalysis(LV_NAME, "VectorMixedPrecision",
8849 I->getDebugLoc(), L->getHeader())
8850 << "floating point conversion changes vector width. "
8851 << "Mixed floating point precision requires an up/down "
8852 << "cast that will negatively impact performance.";
8853 });
8854
8855 for (Use &Op : I->operands())
8856 if (auto *OpI = dyn_cast<Instruction>(Op))
8857 Worklist.push_back(OpI);
8858 }
8859}
8860
8861/// For loops with uncountable early exits, find the cost of doing work when
8862/// exiting the loop early, such as calculating the final exit values of
8863/// variables used outside the loop.
8864/// TODO: This is currently overly pessimistic because the loop may not take
8865/// the early exit, but better to keep this conservative for now. In future,
8866/// it might be possible to relax this by using branch probabilities.
8868 VPlan &Plan, ElementCount VF) {
8869 InstructionCost Cost = 0;
8870 for (auto *ExitVPBB : Plan.getExitBlocks()) {
8871 for (auto *PredVPBB : ExitVPBB->getPredecessors()) {
8872 // If the predecessor is not the middle.block, then it must be the
8873 // vector.early.exit block, which may contain work to calculate the exit
8874 // values of variables used outside the loop.
8875 if (PredVPBB != Plan.getMiddleBlock()) {
8876 LLVM_DEBUG(dbgs() << "Calculating cost of work in exit block "
8877 << PredVPBB->getName() << ":\n");
8878 Cost += PredVPBB->cost(VF, CostCtx);
8879 }
8880 }
8881 }
8882 return Cost;
8883}
8884
8885/// This function determines whether or not it's still profitable to vectorize
8886/// the loop given the extra work we have to do outside of the loop:
8887/// 1. Perform the runtime checks before entering the loop to ensure it's safe
8888/// to vectorize.
8889/// 2. In the case of loops with uncountable early exits, we may have to do
8890/// extra work when exiting the loop early, such as calculating the final
8891/// exit values of variables used outside the loop.
8892/// 3. The middle block.
8893static bool isOutsideLoopWorkProfitable(GeneratedRTChecks &Checks,
8894 VectorizationFactor &VF, Loop *L,
8896 VPCostContext &CostCtx, VPlan &Plan,
8898 std::optional<unsigned> VScale) {
8899 InstructionCost RtC = Checks.getCost();
8900 if (!RtC.isValid())
8901 return false;
8902
8903 // When interleaving only scalar and vector cost will be equal, which in turn
8904 // would lead to a divide by 0. Fall back to hard threshold.
8905 if (VF.Width.isScalar()) {
8906 // TODO: Should we rename VectorizeMemoryCheckThreshold?
8908 LLVM_DEBUG(
8909 dbgs()
8910 << "LV: Interleaving only is not profitable due to runtime checks\n");
8911 return false;
8912 }
8913 return true;
8914 }
8915
8916 // The scalar cost should only be 0 when vectorizing with a user specified
8917 // VF/IC. In those cases, runtime checks should always be generated.
8918 uint64_t ScalarC = VF.ScalarCost.getValue();
8919 if (ScalarC == 0)
8920 return true;
8921
8922 InstructionCost TotalCost = RtC;
8923 // Add on the cost of any work required in the vector early exit block, if
8924 // one exists.
8925 TotalCost += calculateEarlyExitCost(CostCtx, Plan, VF.Width);
8926 TotalCost += Plan.getMiddleBlock()->cost(VF.Width, CostCtx);
8927
8928 // First, compute the minimum iteration count required so that the vector
8929 // loop outperforms the scalar loop.
8930 // The total cost of the scalar loop is
8931 // ScalarC * TC
8932 // where
8933 // * TC is the actual trip count of the loop.
8934 // * ScalarC is the cost of a single scalar iteration.
8935 //
8936 // The total cost of the vector loop is
8937 // TotalCost + VecC * (TC / VF) + EpiC
8938 // where
8939 // * TotalCost is the sum of the costs cost of
8940 // - the generated runtime checks, i.e. RtC
8941 // - performing any additional work in the vector.early.exit block for
8942 // loops with uncountable early exits.
8943 // - the middle block, if ExpectedTC <= VF.Width.
8944 // * VecC is the cost of a single vector iteration.
8945 // * TC is the actual trip count of the loop
8946 // * VF is the vectorization factor
8947 // * EpiCost is the cost of the generated epilogue, including the cost
8948 // of the remaining scalar operations.
8949 //
8950 // Vectorization is profitable once the total vector cost is less than the
8951 // total scalar cost:
8952 // TotalCost + VecC * (TC / VF) + EpiC < ScalarC * TC
8953 //
8954 // Now we can compute the minimum required trip count TC as
8955 // VF * (TotalCost + EpiC) / (ScalarC * VF - VecC) < TC
8956 //
8957 // For now we assume the epilogue cost EpiC = 0 for simplicity. Note that
8958 // the computations are performed on doubles, not integers and the result
8959 // is rounded up, hence we get an upper estimate of the TC.
8960 unsigned IntVF = estimateElementCount(VF.Width, VScale);
8961 uint64_t Div = ScalarC * IntVF - VF.Cost.getValue();
8962 uint64_t MinTC1 =
8963 Div == 0 ? 0 : divideCeil(TotalCost.getValue() * IntVF, Div);
8964
8965 // Second, compute a minimum iteration count so that the cost of the
8966 // runtime checks is only a fraction of the total scalar loop cost. This
8967 // adds a loop-dependent bound on the overhead incurred if the runtime
8968 // checks fail. In case the runtime checks fail, the cost is RtC + ScalarC
8969 // * TC. To bound the runtime check to be a fraction 1/X of the scalar
8970 // cost, compute
8971 // RtC < ScalarC * TC * (1 / X) ==> RtC * X / ScalarC < TC
8972 uint64_t MinTC2 = divideCeil(RtC.getValue() * 10, ScalarC);
8973
8974 // Now pick the larger minimum. If it is not a multiple of VF and a scalar
8975 // epilogue is allowed, choose the next closest multiple of VF. This should
8976 // partly compensate for ignoring the epilogue cost.
8977 uint64_t MinTC = std::max(MinTC1, MinTC2);
8978 if (SEL == CM_ScalarEpilogueAllowed)
8979 MinTC = alignTo(MinTC, IntVF);
8981
8982 LLVM_DEBUG(
8983 dbgs() << "LV: Minimum required TC for runtime checks to be profitable:"
8984 << VF.MinProfitableTripCount << "\n");
8985
8986 // Skip vectorization if the expected trip count is less than the minimum
8987 // required trip count.
8988 if (auto ExpectedTC = getSmallBestKnownTC(PSE, L)) {
8989 if (ElementCount::isKnownLT(*ExpectedTC, VF.MinProfitableTripCount)) {
8990 LLVM_DEBUG(dbgs() << "LV: Vectorization is not beneficial: expected "
8991 "trip count < minimum profitable VF ("
8992 << *ExpectedTC << " < " << VF.MinProfitableTripCount
8993 << ")\n");
8994
8995 return false;
8996 }
8997 }
8998 return true;
8999}
9000
9002 : InterleaveOnlyWhenForced(Opts.InterleaveOnlyWhenForced ||
9004 VectorizeOnlyWhenForced(Opts.VectorizeOnlyWhenForced ||
9006
9007/// Prepare \p MainPlan for vectorizing the main vector loop during epilogue
9008/// vectorization. Remove ResumePhis from \p MainPlan for inductions that
9009/// don't have a corresponding wide induction in \p EpiPlan.
9010static void preparePlanForMainVectorLoop(VPlan &MainPlan, VPlan &EpiPlan) {
9011 // Collect PHI nodes of widened phis in the VPlan for the epilogue. Those
9012 // will need their resume-values computed in the main vector loop. Others
9013 // can be removed from the main VPlan.
9014 SmallPtrSet<PHINode *, 2> EpiWidenedPhis;
9015 for (VPRecipeBase &R :
9018 continue;
9019 EpiWidenedPhis.insert(
9020 cast<PHINode>(R.getVPSingleValue()->getUnderlyingValue()));
9021 }
9022 for (VPRecipeBase &R :
9023 make_early_inc_range(MainPlan.getScalarHeader()->phis())) {
9024 auto *VPIRInst = cast<VPIRPhi>(&R);
9025 if (EpiWidenedPhis.contains(&VPIRInst->getIRPhi()))
9026 continue;
9027 // There is no corresponding wide induction in the epilogue plan that would
9028 // need a resume value. Remove the VPIRInst wrapping the scalar header phi
9029 // together with the corresponding ResumePhi. The resume values for the
9030 // scalar loop will be created during execution of EpiPlan.
9031 VPRecipeBase *ResumePhi = VPIRInst->getOperand(0)->getDefiningRecipe();
9032 VPIRInst->eraseFromParent();
9033 ResumePhi->eraseFromParent();
9034 }
9036
9037 using namespace VPlanPatternMatch;
9038 // When vectorizing the epilogue, FindFirstIV & FindLastIV reductions can
9039 // introduce multiple uses of undef/poison. If the reduction start value may
9040 // be undef or poison it needs to be frozen and the frozen start has to be
9041 // used when computing the reduction result. We also need to use the frozen
9042 // value in the resume phi generated by the main vector loop, as this is also
9043 // used to compute the reduction result after the epilogue vector loop.
9044 auto AddFreezeForFindLastIVReductions = [](VPlan &Plan,
9045 bool UpdateResumePhis) {
9046 VPBuilder Builder(Plan.getEntry());
9047 for (VPRecipeBase &R : *Plan.getMiddleBlock()) {
9048 auto *VPI = dyn_cast<VPInstruction>(&R);
9049 if (!VPI)
9050 continue;
9051 VPValue *OrigStart;
9052 if (!matchFindIVResult(VPI, m_VPValue(), m_VPValue(OrigStart)))
9053 continue;
9055 continue;
9056 VPInstruction *Freeze =
9057 Builder.createNaryOp(Instruction::Freeze, {OrigStart}, {}, "fr");
9058 VPI->setOperand(2, Freeze);
9059 if (UpdateResumePhis)
9060 OrigStart->replaceUsesWithIf(Freeze, [Freeze](VPUser &U, unsigned) {
9061 return Freeze != &U && isa<VPPhi>(&U);
9062 });
9063 }
9064 };
9065 AddFreezeForFindLastIVReductions(MainPlan, true);
9066 AddFreezeForFindLastIVReductions(EpiPlan, false);
9067
9068 VPValue *VectorTC = nullptr;
9069 auto *Term =
9071 [[maybe_unused]] bool MatchedTC =
9072 match(Term, m_BranchOnCount(m_VPValue(), m_VPValue(VectorTC)));
9073 assert(MatchedTC && "must match vector trip count");
9074
9075 // If there is a suitable resume value for the canonical induction in the
9076 // scalar (which will become vector) epilogue loop, use it and move it to the
9077 // beginning of the scalar preheader. Otherwise create it below.
9078 VPBasicBlock *MainScalarPH = MainPlan.getScalarPreheader();
9079 auto ResumePhiIter =
9080 find_if(MainScalarPH->phis(), [VectorTC](VPRecipeBase &R) {
9081 return match(&R, m_VPInstruction<Instruction::PHI>(m_Specific(VectorTC),
9082 m_ZeroInt()));
9083 });
9084 VPPhi *ResumePhi = nullptr;
9085 if (ResumePhiIter == MainScalarPH->phis().end()) {
9086 using namespace llvm::VPlanPatternMatch;
9087 assert(
9089 m_ZeroInt()) &&
9090 "canonical IV must start at 0");
9091 Type *Ty = VPTypeAnalysis(MainPlan).inferScalarType(VectorTC);
9092 VPBuilder ScalarPHBuilder(MainScalarPH, MainScalarPH->begin());
9093 ResumePhi = ScalarPHBuilder.createScalarPhi(
9094 {VectorTC, MainPlan.getZero(Ty)}, {}, "vec.epilog.resume.val");
9095 } else {
9096 ResumePhi = cast<VPPhi>(&*ResumePhiIter);
9097 if (MainScalarPH->begin() == MainScalarPH->end())
9098 ResumePhi->moveBefore(*MainScalarPH, MainScalarPH->end());
9099 else if (&*MainScalarPH->begin() != ResumePhi)
9100 ResumePhi->moveBefore(*MainScalarPH, MainScalarPH->begin());
9101 }
9102 // Add a user to to make sure the resume phi won't get removed.
9103 VPBuilder(MainScalarPH)
9105}
9106
9107/// Prepare \p Plan for vectorizing the epilogue loop. That is, re-use expanded
9108/// SCEVs from \p ExpandedSCEVs and set resume values for header recipes. Some
9109/// reductions require creating new instructions to compute the resume values.
9110/// They are collected in a vector and returned. They must be moved to the
9111/// preheader of the vector epilogue loop, after created by the execution of \p
9112/// Plan.
9114 VPlan &Plan, Loop *L, const SCEV2ValueTy &ExpandedSCEVs,
9116 ScalarEvolution &SE) {
9117 VPRegionBlock *VectorLoop = Plan.getVectorLoopRegion();
9118 VPBasicBlock *Header = VectorLoop->getEntryBasicBlock();
9119 Header->setName("vec.epilog.vector.body");
9120
9121 VPCanonicalIVPHIRecipe *IV = VectorLoop->getCanonicalIV();
9122 // When vectorizing the epilogue loop, the canonical induction needs to be
9123 // adjusted by the value after the main vector loop. Find the resume value
9124 // created during execution of the main VPlan. It must be the first phi in the
9125 // loop preheader. Use the value to increment the canonical IV, and update all
9126 // users in the loop region to use the adjusted value.
9127 // FIXME: Improve modeling for canonical IV start values in the epilogue
9128 // loop.
9129 using namespace llvm::PatternMatch;
9130 PHINode *EPResumeVal = &*L->getLoopPreheader()->phis().begin();
9131 for (Value *Inc : EPResumeVal->incoming_values()) {
9132 if (match(Inc, m_SpecificInt(0)))
9133 continue;
9134 assert(!EPI.VectorTripCount &&
9135 "Must only have a single non-zero incoming value");
9136 EPI.VectorTripCount = Inc;
9137 }
9138 // If we didn't find a non-zero vector trip count, all incoming values
9139 // must be zero, which also means the vector trip count is zero. Pick the
9140 // first zero as vector trip count.
9141 // TODO: We should not choose VF * UF so the main vector loop is known to
9142 // be dead.
9143 if (!EPI.VectorTripCount) {
9144 assert(EPResumeVal->getNumIncomingValues() > 0 &&
9145 all_of(EPResumeVal->incoming_values(),
9146 [](Value *Inc) { return match(Inc, m_SpecificInt(0)); }) &&
9147 "all incoming values must be 0");
9148 EPI.VectorTripCount = EPResumeVal->getOperand(0);
9149 }
9150 VPValue *VPV = Plan.getOrAddLiveIn(EPResumeVal);
9151 assert(all_of(IV->users(),
9152 [](const VPUser *U) {
9153 return isa<VPScalarIVStepsRecipe>(U) ||
9154 isa<VPDerivedIVRecipe>(U) ||
9155 cast<VPRecipeBase>(U)->isScalarCast() ||
9156 cast<VPInstruction>(U)->getOpcode() ==
9157 Instruction::Add;
9158 }) &&
9159 "the canonical IV should only be used by its increment or "
9160 "ScalarIVSteps when resetting the start value");
9161 VPBuilder Builder(Header, Header->getFirstNonPhi());
9162 VPInstruction *Add = Builder.createAdd(IV, VPV);
9163 IV->replaceAllUsesWith(Add);
9164 Add->setOperand(0, IV);
9165
9167 SmallVector<Instruction *> InstsToMove;
9168 // Ensure that the start values for all header phi recipes are updated before
9169 // vectorizing the epilogue loop. Skip the canonical IV, which has been
9170 // handled above.
9171 for (VPRecipeBase &R : drop_begin(Header->phis())) {
9172 Value *ResumeV = nullptr;
9173 // TODO: Move setting of resume values to prepareToExecute.
9174 if (auto *ReductionPhi = dyn_cast<VPReductionPHIRecipe>(&R)) {
9175 // Find the reduction result by searching users of the phi or its backedge
9176 // value.
9177 auto IsReductionResult = [](VPRecipeBase *R) {
9178 auto *VPI = dyn_cast<VPInstruction>(R);
9179 if (!VPI)
9180 return false;
9183 };
9184 auto *RdxResult = cast<VPInstruction>(
9185 vputils::findRecipe(ReductionPhi->getBackedgeValue(), IsReductionResult));
9186 assert(RdxResult && "expected to find reduction result");
9187
9188 ResumeV = cast<PHINode>(ReductionPhi->getUnderlyingInstr())
9189 ->getIncomingValueForBlock(L->getLoopPreheader());
9190
9191 // Check for FindIV pattern by looking for icmp user of RdxResult.
9192 // The pattern is: select(icmp ne RdxResult, Sentinel), RdxResult, Start
9193 using namespace VPlanPatternMatch;
9194 VPValue *SentinelVPV = nullptr;
9195 bool IsFindIV = any_of(RdxResult->users(), [&](VPUser *U) {
9196 return match(U, VPlanPatternMatch::m_SpecificICmp(
9197 ICmpInst::ICMP_NE, m_Specific(RdxResult),
9198 m_VPValue(SentinelVPV)));
9199 });
9200
9201 if (RdxResult->getOpcode() == VPInstruction::ComputeAnyOfResult) {
9202 Value *StartV = RdxResult->getOperand(0)->getLiveInIRValue();
9203 // VPReductionPHIRecipes for AnyOf reductions expect a boolean as
9204 // start value; compare the final value from the main vector loop
9205 // to the start value.
9206 BasicBlock *PBB = cast<Instruction>(ResumeV)->getParent();
9207 IRBuilder<> Builder(PBB, PBB->getFirstNonPHIIt());
9208 ResumeV = Builder.CreateICmpNE(ResumeV, StartV);
9209 if (auto *I = dyn_cast<Instruction>(ResumeV))
9210 InstsToMove.push_back(I);
9211 } else if (IsFindIV) {
9212 assert(SentinelVPV && "expected to find icmp using RdxResult");
9213
9214 // Get the frozen start value from the main loop.
9215 Value *FrozenStartV = cast<PHINode>(ResumeV)->getIncomingValueForBlock(
9217 if (auto *FreezeI = dyn_cast<FreezeInst>(FrozenStartV))
9218 ToFrozen[FreezeI->getOperand(0)] = FrozenStartV;
9219
9220 // Adjust resume: select(icmp eq ResumeV, FrozenStartV), Sentinel,
9221 // ResumeV
9222 BasicBlock *ResumeBB = cast<Instruction>(ResumeV)->getParent();
9223 IRBuilder<> Builder(ResumeBB, ResumeBB->getFirstNonPHIIt());
9224 Value *Cmp = Builder.CreateICmpEQ(ResumeV, FrozenStartV);
9225 if (auto *I = dyn_cast<Instruction>(Cmp))
9226 InstsToMove.push_back(I);
9227 ResumeV =
9228 Builder.CreateSelect(Cmp, SentinelVPV->getLiveInIRValue(), ResumeV);
9229 if (auto *I = dyn_cast<Instruction>(ResumeV))
9230 InstsToMove.push_back(I);
9231 } else {
9232 VPValue *StartVal = Plan.getOrAddLiveIn(ResumeV);
9233 auto *PhiR = dyn_cast<VPReductionPHIRecipe>(&R);
9234 if (auto *VPI = dyn_cast<VPInstruction>(PhiR->getStartValue())) {
9236 "unexpected start value");
9237 // Partial sub-reductions always start at 0 and account for the
9238 // reduction start value in a final subtraction. Update it to use the
9239 // resume value from the main vector loop.
9240 if (PhiR->getVFScaleFactor() > 1 &&
9241 PhiR->getRecurrenceKind() == RecurKind::Sub) {
9242 auto *Sub = cast<VPInstruction>(RdxResult->getSingleUser());
9243 assert(Sub->getOpcode() == Instruction::Sub && "Unexpected opcode");
9244 assert(isa<VPIRValue>(Sub->getOperand(0)) &&
9245 "Expected operand to match the original start value of the "
9246 "reduction");
9249 "Expected start value for partial sub-reduction to start at "
9250 "zero");
9251 Sub->setOperand(0, StartVal);
9252 } else
9253 VPI->setOperand(0, StartVal);
9254 continue;
9255 }
9256 }
9257 } else {
9258 // Retrieve the induction resume values for wide inductions from
9259 // their original phi nodes in the scalar loop.
9260 PHINode *IndPhi = cast<VPWidenInductionRecipe>(&R)->getPHINode();
9261 // Hook up to the PHINode generated by a ResumePhi recipe of main
9262 // loop VPlan, which feeds the scalar loop.
9263 ResumeV = IndPhi->getIncomingValueForBlock(L->getLoopPreheader());
9264 }
9265 assert(ResumeV && "Must have a resume value");
9266 VPValue *StartVal = Plan.getOrAddLiveIn(ResumeV);
9267 cast<VPHeaderPHIRecipe>(&R)->setStartValue(StartVal);
9268 }
9269
9270 // For some VPValues in the epilogue plan we must re-use the generated IR
9271 // values from the main plan. Replace them with live-in VPValues.
9272 // TODO: This is a workaround needed for epilogue vectorization and it
9273 // should be removed once induction resume value creation is done
9274 // directly in VPlan.
9275 for (auto &R : make_early_inc_range(*Plan.getEntry())) {
9276 // Re-use frozen values from the main plan for Freeze VPInstructions in the
9277 // epilogue plan. This ensures all users use the same frozen value.
9278 auto *VPI = dyn_cast<VPInstruction>(&R);
9279 if (VPI && VPI->getOpcode() == Instruction::Freeze) {
9281 ToFrozen.lookup(VPI->getOperand(0)->getLiveInIRValue())));
9282 continue;
9283 }
9284
9285 // Re-use the trip count and steps expanded for the main loop, as
9286 // skeleton creation needs it as a value that dominates both the scalar
9287 // and vector epilogue loops
9288 auto *ExpandR = dyn_cast<VPExpandSCEVRecipe>(&R);
9289 if (!ExpandR)
9290 continue;
9291 VPValue *ExpandedVal =
9292 Plan.getOrAddLiveIn(ExpandedSCEVs.lookup(ExpandR->getSCEV()));
9293 ExpandR->replaceAllUsesWith(ExpandedVal);
9294 if (Plan.getTripCount() == ExpandR)
9295 Plan.resetTripCount(ExpandedVal);
9296 ExpandR->eraseFromParent();
9297 }
9298
9299 auto VScale = CM.getVScaleForTuning();
9300 unsigned MainLoopStep =
9301 estimateElementCount(EPI.MainLoopVF * EPI.MainLoopUF, VScale);
9302 unsigned EpilogueLoopStep =
9303 estimateElementCount(EPI.EpilogueVF * EPI.EpilogueUF, VScale);
9305 Plan, EPI.TripCount, EPI.VectorTripCount,
9307 EPI.EpilogueUF, MainLoopStep, EpilogueLoopStep, SE);
9308
9309 return InstsToMove;
9310}
9311
9312// Generate bypass values from the additional bypass block. Note that when the
9313// vectorized epilogue is skipped due to iteration count check, then the
9314// resume value for the induction variable comes from the trip count of the
9315// main vector loop, passed as the second argument.
9317 PHINode *OrigPhi, const InductionDescriptor &II, IRBuilder<> &BypassBuilder,
9318 const SCEV2ValueTy &ExpandedSCEVs, Value *MainVectorTripCount,
9319 Instruction *OldInduction) {
9320 Value *Step = getExpandedStep(II, ExpandedSCEVs);
9321 // For the primary induction the additional bypass end value is known.
9322 // Otherwise it is computed.
9323 Value *EndValueFromAdditionalBypass = MainVectorTripCount;
9324 if (OrigPhi != OldInduction) {
9325 auto *BinOp = II.getInductionBinOp();
9326 // Fast-math-flags propagate from the original induction instruction.
9328 BypassBuilder.setFastMathFlags(BinOp->getFastMathFlags());
9329
9330 // Compute the end value for the additional bypass.
9331 EndValueFromAdditionalBypass =
9332 emitTransformedIndex(BypassBuilder, MainVectorTripCount,
9333 II.getStartValue(), Step, II.getKind(), BinOp);
9334 EndValueFromAdditionalBypass->setName("ind.end");
9335 }
9336 return EndValueFromAdditionalBypass;
9337}
9338
9340 VPlan &BestEpiPlan,
9342 const SCEV2ValueTy &ExpandedSCEVs,
9343 Value *MainVectorTripCount) {
9344 // Fix reduction resume values from the additional bypass block.
9345 BasicBlock *PH = L->getLoopPreheader();
9346 for (auto *Pred : predecessors(PH)) {
9347 for (PHINode &Phi : PH->phis()) {
9348 if (Phi.getBasicBlockIndex(Pred) != -1)
9349 continue;
9350 Phi.addIncoming(Phi.getIncomingValueForBlock(BypassBlock), Pred);
9351 }
9352 }
9353 auto *ScalarPH = cast<VPIRBasicBlock>(BestEpiPlan.getScalarPreheader());
9354 if (ScalarPH->hasPredecessors()) {
9355 // If ScalarPH has predecessors, we may need to update its reduction
9356 // resume values.
9357 for (const auto &[R, IRPhi] :
9358 zip(ScalarPH->phis(), ScalarPH->getIRBasicBlock()->phis())) {
9360 BypassBlock);
9361 }
9362 }
9363
9364 // Fix induction resume values from the additional bypass block.
9365 IRBuilder<> BypassBuilder(BypassBlock, BypassBlock->getFirstInsertionPt());
9366 for (const auto &[IVPhi, II] : LVL.getInductionVars()) {
9368 IVPhi, II, BypassBuilder, ExpandedSCEVs, MainVectorTripCount,
9369 LVL.getPrimaryInduction());
9370 // TODO: Directly add as extra operand to the VPResumePHI recipe.
9371 if (auto *Inc = dyn_cast<PHINode>(IVPhi->getIncomingValueForBlock(PH))) {
9372 if (Inc->getBasicBlockIndex(BypassBlock) != -1)
9373 Inc->setIncomingValueForBlock(BypassBlock, V);
9374 } else {
9375 // If the resume value in the scalar preheader was simplified (e.g., when
9376 // narrowInterleaveGroups optimized away the resume PHIs), create a new
9377 // PHI to merge the bypass value with the original value.
9378 Value *OrigVal = IVPhi->getIncomingValueForBlock(PH);
9379 PHINode *NewPhi =
9380 PHINode::Create(IVPhi->getType(), pred_size(PH), "bc.resume.val",
9381 PH->getFirstNonPHIIt());
9382 for (auto *Pred : predecessors(PH)) {
9383 if (Pred == BypassBlock)
9384 NewPhi->addIncoming(V, Pred);
9385 else
9386 NewPhi->addIncoming(OrigVal, Pred);
9387 }
9388 IVPhi->setIncomingValueForBlock(PH, NewPhi);
9389 }
9390 }
9391}
9392
9393/// Connect the epilogue vector loop generated for \p EpiPlan to the main vector
9394// loop, after both plans have executed, updating branches from the iteration
9395// and runtime checks of the main loop, as well as updating various phis. \p
9396// InstsToMove contains instructions that need to be moved to the preheader of
9397// the epilogue vector loop.
9399 VPlan &EpiPlan, Loop *L, EpilogueLoopVectorizationInfo &EPI,
9401 DenseMap<const SCEV *, Value *> &ExpandedSCEVs, GeneratedRTChecks &Checks,
9402 ArrayRef<Instruction *> InstsToMove) {
9403 BasicBlock *VecEpilogueIterationCountCheck =
9404 cast<VPIRBasicBlock>(EpiPlan.getEntry())->getIRBasicBlock();
9405
9406 BasicBlock *VecEpiloguePreHeader =
9407 cast<CondBrInst>(VecEpilogueIterationCountCheck->getTerminator())
9408 ->getSuccessor(1);
9409 // Adjust the control flow taking the state info from the main loop
9410 // vectorization into account.
9412 "expected this to be saved from the previous pass.");
9413 DomTreeUpdater DTU(DT, DomTreeUpdater::UpdateStrategy::Eager);
9415 VecEpilogueIterationCountCheck, VecEpiloguePreHeader);
9416
9418 VecEpilogueIterationCountCheck},
9420 VecEpiloguePreHeader}});
9421
9422 BasicBlock *ScalarPH =
9423 cast<VPIRBasicBlock>(EpiPlan.getScalarPreheader())->getIRBasicBlock();
9425 VecEpilogueIterationCountCheck, ScalarPH);
9426 DTU.applyUpdates(
9428 VecEpilogueIterationCountCheck},
9430
9431 // Adjust the terminators of runtime check blocks and phis using them.
9432 BasicBlock *SCEVCheckBlock = Checks.getSCEVChecks().second;
9433 BasicBlock *MemCheckBlock = Checks.getMemRuntimeChecks().second;
9434 if (SCEVCheckBlock) {
9435 SCEVCheckBlock->getTerminator()->replaceUsesOfWith(
9436 VecEpilogueIterationCountCheck, ScalarPH);
9437 DTU.applyUpdates({{DominatorTree::Delete, SCEVCheckBlock,
9438 VecEpilogueIterationCountCheck},
9439 {DominatorTree::Insert, SCEVCheckBlock, ScalarPH}});
9440 }
9441 if (MemCheckBlock) {
9442 MemCheckBlock->getTerminator()->replaceUsesOfWith(
9443 VecEpilogueIterationCountCheck, ScalarPH);
9444 DTU.applyUpdates(
9445 {{DominatorTree::Delete, MemCheckBlock, VecEpilogueIterationCountCheck},
9446 {DominatorTree::Insert, MemCheckBlock, ScalarPH}});
9447 }
9448
9449 // The vec.epilog.iter.check block may contain Phi nodes from inductions
9450 // or reductions which merge control-flow from the latch block and the
9451 // middle block. Update the incoming values here and move the Phi into the
9452 // preheader.
9453 SmallVector<PHINode *, 4> PhisInBlock(
9454 llvm::make_pointer_range(VecEpilogueIterationCountCheck->phis()));
9455
9456 for (PHINode *Phi : PhisInBlock) {
9457 Phi->moveBefore(VecEpiloguePreHeader->getFirstNonPHIIt());
9458 Phi->replaceIncomingBlockWith(
9459 VecEpilogueIterationCountCheck->getSinglePredecessor(),
9460 VecEpilogueIterationCountCheck);
9461
9462 // If the phi doesn't have an incoming value from the
9463 // EpilogueIterationCountCheck, we are done. Otherwise remove the
9464 // incoming value and also those from other check blocks. This is needed
9465 // for reduction phis only.
9466 if (none_of(Phi->blocks(), [&](BasicBlock *IncB) {
9467 return EPI.EpilogueIterationCountCheck == IncB;
9468 }))
9469 continue;
9470 Phi->removeIncomingValue(EPI.EpilogueIterationCountCheck);
9471 if (SCEVCheckBlock)
9472 Phi->removeIncomingValue(SCEVCheckBlock);
9473 if (MemCheckBlock)
9474 Phi->removeIncomingValue(MemCheckBlock);
9475 }
9476
9477 auto IP = VecEpiloguePreHeader->getFirstNonPHIIt();
9478 for (auto *I : InstsToMove)
9479 I->moveBefore(IP);
9480
9481 // VecEpilogueIterationCountCheck conditionally skips over the epilogue loop
9482 // after executing the main loop. We need to update the resume values of
9483 // inductions and reductions during epilogue vectorization.
9484 fixScalarResumeValuesFromBypass(VecEpilogueIterationCountCheck, L, EpiPlan,
9485 LVL, ExpandedSCEVs, EPI.VectorTripCount);
9486}
9487
9489 assert((EnableVPlanNativePath || L->isInnermost()) &&
9490 "VPlan-native path is not enabled. Only process inner loops.");
9491
9492 LLVM_DEBUG(dbgs() << "\nLV: Checking a loop in '"
9493 << L->getHeader()->getParent()->getName() << "' from "
9494 << L->getLocStr() << "\n");
9495
9496 LoopVectorizeHints Hints(L, InterleaveOnlyWhenForced, *ORE, TTI);
9497
9498 LLVM_DEBUG(
9499 dbgs() << "LV: Loop hints:"
9500 << " force="
9502 ? "disabled"
9504 ? "enabled"
9505 : "?"))
9506 << " width=" << Hints.getWidth()
9507 << " interleave=" << Hints.getInterleave() << "\n");
9508
9509 // Function containing loop
9510 Function *F = L->getHeader()->getParent();
9511
9512 // Looking at the diagnostic output is the only way to determine if a loop
9513 // was vectorized (other than looking at the IR or machine code), so it
9514 // is important to generate an optimization remark for each loop. Most of
9515 // these messages are generated as OptimizationRemarkAnalysis. Remarks
9516 // generated as OptimizationRemark and OptimizationRemarkMissed are
9517 // less verbose reporting vectorized loops and unvectorized loops that may
9518 // benefit from vectorization, respectively.
9519
9520 if (!Hints.allowVectorization(F, L, VectorizeOnlyWhenForced)) {
9521 LLVM_DEBUG(dbgs() << "LV: Loop hints prevent vectorization.\n");
9522 return false;
9523 }
9524
9525 PredicatedScalarEvolution PSE(*SE, *L);
9526
9527 // Query this against the original loop and save it here because the profile
9528 // of the original loop header may change as the transformation happens.
9529 bool OptForSize = llvm::shouldOptimizeForSize(
9530 L->getHeader(), PSI,
9531 PSI && PSI->hasProfileSummary() ? &GetBFI() : nullptr,
9533
9534 // Check if it is legal to vectorize the loop.
9535 LoopVectorizationRequirements Requirements;
9536 LoopVectorizationLegality LVL(L, PSE, DT, TTI, TLI, F, *LAIs, LI, ORE,
9537 &Requirements, &Hints, DB, AC,
9538 /*AllowRuntimeSCEVChecks=*/!OptForSize, AA);
9540 LLVM_DEBUG(dbgs() << "LV: Not vectorizing: Cannot prove legality.\n");
9541 Hints.emitRemarkWithHints();
9542 return false;
9543 }
9544
9545 if (LVL.hasUncountableEarlyExit()) {
9547 reportVectorizationFailure("Auto-vectorization of loops with uncountable "
9548 "early exit is not enabled",
9549 "UncountableEarlyExitLoopsDisabled", ORE, L);
9550 return false;
9551 }
9552 }
9553
9554 if (!LVL.getPotentiallyFaultingLoads().empty()) {
9555 reportVectorizationFailure("Auto-vectorization of loops with potentially "
9556 "faulting load is not supported",
9557 "PotentiallyFaultingLoadsNotSupported", ORE, L);
9558 return false;
9559 }
9560
9561 // Entrance to the VPlan-native vectorization path. Outer loops are processed
9562 // here. They may require CFG and instruction level transformations before
9563 // even evaluating whether vectorization is profitable. Since we cannot modify
9564 // the incoming IR, we need to build VPlan upfront in the vectorization
9565 // pipeline.
9566 if (!L->isInnermost())
9567 return processLoopInVPlanNativePath(L, PSE, LI, DT, &LVL, TTI, TLI, DB, AC,
9568 ORE, GetBFI, OptForSize, Hints,
9569 Requirements);
9570
9571 assert(L->isInnermost() && "Inner loop expected.");
9572
9573 InterleavedAccessInfo IAI(PSE, L, DT, LI, LVL.getLAI());
9574 bool UseInterleaved = TTI->enableInterleavedAccessVectorization();
9575
9576 // If an override option has been passed in for interleaved accesses, use it.
9577 if (EnableInterleavedMemAccesses.getNumOccurrences() > 0)
9578 UseInterleaved = EnableInterleavedMemAccesses;
9579
9580 // Analyze interleaved memory accesses.
9581 if (UseInterleaved)
9583
9584 if (LVL.hasUncountableEarlyExit()) {
9585 BasicBlock *LoopLatch = L->getLoopLatch();
9586 if (IAI.requiresScalarEpilogue() ||
9588 [LoopLatch](BasicBlock *BB) { return BB != LoopLatch; })) {
9589 reportVectorizationFailure("Auto-vectorization of early exit loops "
9590 "requiring a scalar epilogue is unsupported",
9591 "UncountableEarlyExitUnsupported", ORE, L);
9592 return false;
9593 }
9594 }
9595
9596 // Check the function attributes and profiles to find out if this function
9597 // should be optimized for size.
9599 getScalarEpilogueLowering(F, L, Hints, OptForSize, TTI, TLI, LVL, &IAI);
9600
9601 // Check the loop for a trip count threshold: vectorize loops with a tiny trip
9602 // count by optimizing for size, to minimize overheads.
9603 auto ExpectedTC = getSmallBestKnownTC(PSE, L);
9604 if (ExpectedTC && ExpectedTC->isFixed() &&
9605 ExpectedTC->getFixedValue() < TinyTripCountVectorThreshold) {
9606 LLVM_DEBUG(dbgs() << "LV: Found a loop with a very small trip count. "
9607 << "This loop is worth vectorizing only if no scalar "
9608 << "iteration overheads are incurred.");
9610 LLVM_DEBUG(dbgs() << " But vectorizing was explicitly forced.\n");
9611 else {
9612 LLVM_DEBUG(dbgs() << "\n");
9613 // Predicate tail-folded loops are efficient even when the loop
9614 // iteration count is low. However, setting the epilogue policy to
9615 // `CM_ScalarEpilogueNotAllowedLowTripLoop` prevents vectorizing loops
9616 // with runtime checks. It's more effective to let
9617 // `isOutsideLoopWorkProfitable` determine if vectorization is
9618 // beneficial for the loop.
9621 }
9622 }
9623
9624 // Check the function attributes to see if implicit floats or vectors are
9625 // allowed.
9626 if (F->hasFnAttribute(Attribute::NoImplicitFloat)) {
9628 "Can't vectorize when the NoImplicitFloat attribute is used",
9629 "loop not vectorized due to NoImplicitFloat attribute",
9630 "NoImplicitFloat", ORE, L);
9631 Hints.emitRemarkWithHints();
9632 return false;
9633 }
9634
9635 // Check if the target supports potentially unsafe FP vectorization.
9636 // FIXME: Add a check for the type of safety issue (denormal, signaling)
9637 // for the target we're vectorizing for, to make sure none of the
9638 // additional fp-math flags can help.
9639 if (Hints.isPotentiallyUnsafe() &&
9640 TTI->isFPVectorizationPotentiallyUnsafe()) {
9642 "Potentially unsafe FP op prevents vectorization",
9643 "loop not vectorized due to unsafe FP support.",
9644 "UnsafeFP", ORE, L);
9645 Hints.emitRemarkWithHints();
9646 return false;
9647 }
9648
9649 bool AllowOrderedReductions;
9650 // If the flag is set, use that instead and override the TTI behaviour.
9651 if (ForceOrderedReductions.getNumOccurrences() > 0)
9652 AllowOrderedReductions = ForceOrderedReductions;
9653 else
9654 AllowOrderedReductions = TTI->enableOrderedReductions();
9655 if (!LVL.canVectorizeFPMath(AllowOrderedReductions)) {
9656 ORE->emit([&]() {
9657 auto *ExactFPMathInst = Requirements.getExactFPInst();
9658 return OptimizationRemarkAnalysisFPCommute(DEBUG_TYPE, "CantReorderFPOps",
9659 ExactFPMathInst->getDebugLoc(),
9660 ExactFPMathInst->getParent())
9661 << "loop not vectorized: cannot prove it is safe to reorder "
9662 "floating-point operations";
9663 });
9664 LLVM_DEBUG(dbgs() << "LV: loop not vectorized: cannot prove it is safe to "
9665 "reorder floating-point operations\n");
9666 Hints.emitRemarkWithHints();
9667 return false;
9668 }
9669
9670 // Use the cost model.
9671 LoopVectorizationCostModel CM(SEL, L, PSE, LI, &LVL, *TTI, TLI, DB, AC, ORE,
9672 GetBFI, F, &Hints, IAI, OptForSize);
9673 // Use the planner for vectorization.
9674 LoopVectorizationPlanner LVP(L, LI, DT, TLI, *TTI, &LVL, CM, IAI, PSE, Hints,
9675 ORE);
9676
9677 // Get user vectorization factor and interleave count.
9678 ElementCount UserVF = Hints.getWidth();
9679 unsigned UserIC = Hints.getInterleave();
9680 if (UserIC > 1 && !LVL.isSafeForAnyVectorWidth())
9681 UserIC = 1;
9682
9683 // Plan how to best vectorize.
9684 LVP.plan(UserVF, UserIC);
9686 unsigned IC = 1;
9687
9688 if (ORE->allowExtraAnalysis(LV_NAME))
9690
9691 GeneratedRTChecks Checks(PSE, DT, LI, TTI, CM.CostKind);
9692 if (LVP.hasPlanWithVF(VF.Width)) {
9693 // Select the interleave count.
9694 IC = LVP.selectInterleaveCount(LVP.getPlanFor(VF.Width), VF.Width, VF.Cost);
9695
9696 unsigned SelectedIC = std::max(IC, UserIC);
9697 // Optimistically generate runtime checks if they are needed. Drop them if
9698 // they turn out to not be profitable.
9699 if (VF.Width.isVector() || SelectedIC > 1) {
9700 Checks.create(L, *LVL.getLAI(), PSE.getPredicate(), VF.Width, SelectedIC,
9701 *ORE);
9702
9703 // Bail out early if either the SCEV or memory runtime checks are known to
9704 // fail. In that case, the vector loop would never execute.
9705 using namespace llvm::PatternMatch;
9706 if (Checks.getSCEVChecks().first &&
9707 match(Checks.getSCEVChecks().first, m_One()))
9708 return false;
9709 if (Checks.getMemRuntimeChecks().first &&
9710 match(Checks.getMemRuntimeChecks().first, m_One()))
9711 return false;
9712 }
9713
9714 // Check if it is profitable to vectorize with runtime checks.
9715 bool ForceVectorization =
9717 VPCostContext CostCtx(CM.TTI, *CM.TLI, LVP.getPlanFor(VF.Width), CM,
9718 CM.CostKind, CM.PSE, L);
9719 if (!ForceVectorization &&
9720 !isOutsideLoopWorkProfitable(Checks, VF, L, PSE, CostCtx,
9721 LVP.getPlanFor(VF.Width), SEL,
9722 CM.getVScaleForTuning())) {
9723 ORE->emit([&]() {
9725 DEBUG_TYPE, "CantReorderMemOps", L->getStartLoc(),
9726 L->getHeader())
9727 << "loop not vectorized: cannot prove it is safe to reorder "
9728 "memory operations";
9729 });
9730 LLVM_DEBUG(dbgs() << "LV: Too many memory checks needed.\n");
9731 Hints.emitRemarkWithHints();
9732 return false;
9733 }
9734 }
9735
9736 // Identify the diagnostic messages that should be produced.
9737 std::pair<StringRef, std::string> VecDiagMsg, IntDiagMsg;
9738 bool VectorizeLoop = true, InterleaveLoop = true;
9739 if (VF.Width.isScalar()) {
9740 LLVM_DEBUG(dbgs() << "LV: Vectorization is possible but not beneficial.\n");
9741 VecDiagMsg = {
9742 "VectorizationNotBeneficial",
9743 "the cost-model indicates that vectorization is not beneficial"};
9744 VectorizeLoop = false;
9745 }
9746
9747 if (UserIC == 1 && Hints.getInterleave() > 1) {
9749 "UserIC should only be ignored due to unsafe dependencies");
9750 LLVM_DEBUG(dbgs() << "LV: Ignoring user-specified interleave count.\n");
9751 IntDiagMsg = {"InterleavingUnsafe",
9752 "Ignoring user-specified interleave count due to possibly "
9753 "unsafe dependencies in the loop."};
9754 InterleaveLoop = false;
9755 } else if (!LVP.hasPlanWithVF(VF.Width) && UserIC > 1) {
9756 // Tell the user interleaving was avoided up-front, despite being explicitly
9757 // requested.
9758 LLVM_DEBUG(dbgs() << "LV: Ignoring UserIC, because vectorization and "
9759 "interleaving should be avoided up front\n");
9760 IntDiagMsg = {"InterleavingAvoided",
9761 "Ignoring UserIC, because interleaving was avoided up front"};
9762 InterleaveLoop = false;
9763 } else if (IC == 1 && UserIC <= 1) {
9764 // Tell the user interleaving is not beneficial.
9765 LLVM_DEBUG(dbgs() << "LV: Interleaving is not beneficial.\n");
9766 IntDiagMsg = {
9767 "InterleavingNotBeneficial",
9768 "the cost-model indicates that interleaving is not beneficial"};
9769 InterleaveLoop = false;
9770 if (UserIC == 1) {
9771 IntDiagMsg.first = "InterleavingNotBeneficialAndDisabled";
9772 IntDiagMsg.second +=
9773 " and is explicitly disabled or interleave count is set to 1";
9774 }
9775 } else if (IC > 1 && UserIC == 1) {
9776 // Tell the user interleaving is beneficial, but it explicitly disabled.
9777 LLVM_DEBUG(dbgs() << "LV: Interleaving is beneficial but is explicitly "
9778 "disabled.\n");
9779 IntDiagMsg = {"InterleavingBeneficialButDisabled",
9780 "the cost-model indicates that interleaving is beneficial "
9781 "but is explicitly disabled or interleave count is set to 1"};
9782 InterleaveLoop = false;
9783 }
9784
9785 // If there is a histogram in the loop, do not just interleave without
9786 // vectorizing. The order of operations will be incorrect without the
9787 // histogram intrinsics, which are only used for recipes with VF > 1.
9788 if (!VectorizeLoop && InterleaveLoop && LVL.hasHistograms()) {
9789 LLVM_DEBUG(dbgs() << "LV: Not interleaving without vectorization due "
9790 << "to histogram operations.\n");
9791 IntDiagMsg = {
9792 "HistogramPreventsScalarInterleaving",
9793 "Unable to interleave without vectorization due to constraints on "
9794 "the order of histogram operations"};
9795 InterleaveLoop = false;
9796 }
9797
9798 // Override IC if user provided an interleave count.
9799 IC = UserIC > 0 ? UserIC : IC;
9800
9801 // Emit diagnostic messages, if any.
9802 const char *VAPassName = Hints.vectorizeAnalysisPassName();
9803 if (!VectorizeLoop && !InterleaveLoop) {
9804 // Do not vectorize or interleaving the loop.
9805 ORE->emit([&]() {
9806 return OptimizationRemarkMissed(VAPassName, VecDiagMsg.first,
9807 L->getStartLoc(), L->getHeader())
9808 << VecDiagMsg.second;
9809 });
9810 ORE->emit([&]() {
9811 return OptimizationRemarkMissed(LV_NAME, IntDiagMsg.first,
9812 L->getStartLoc(), L->getHeader())
9813 << IntDiagMsg.second;
9814 });
9815 return false;
9816 }
9817
9818 if (!VectorizeLoop && InterleaveLoop) {
9819 LLVM_DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n');
9820 ORE->emit([&]() {
9821 return OptimizationRemarkAnalysis(VAPassName, VecDiagMsg.first,
9822 L->getStartLoc(), L->getHeader())
9823 << VecDiagMsg.second;
9824 });
9825 } else if (VectorizeLoop && !InterleaveLoop) {
9826 LLVM_DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width
9827 << ") in " << L->getLocStr() << '\n');
9828 ORE->emit([&]() {
9829 return OptimizationRemarkAnalysis(LV_NAME, IntDiagMsg.first,
9830 L->getStartLoc(), L->getHeader())
9831 << IntDiagMsg.second;
9832 });
9833 } else if (VectorizeLoop && InterleaveLoop) {
9834 LLVM_DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width
9835 << ") in " << L->getLocStr() << '\n');
9836 LLVM_DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n');
9837 }
9838
9839 // Report the vectorization decision.
9840 if (VF.Width.isScalar()) {
9841 using namespace ore;
9842 assert(IC > 1);
9843 ORE->emit([&]() {
9844 return OptimizationRemark(LV_NAME, "Interleaved", L->getStartLoc(),
9845 L->getHeader())
9846 << "interleaved loop (interleaved count: "
9847 << NV("InterleaveCount", IC) << ")";
9848 });
9849 } else {
9850 // Report the vectorization decision.
9851 reportVectorization(ORE, L, VF, IC);
9852 }
9853 if (ORE->allowExtraAnalysis(LV_NAME))
9855
9856 // If we decided that it is *legal* to interleave or vectorize the loop, then
9857 // do it.
9858
9859 VPlan &BestPlan = LVP.getPlanFor(VF.Width);
9860 // Consider vectorizing the epilogue too if it's profitable.
9861 VectorizationFactor EpilogueVF =
9863 if (EpilogueVF.Width.isVector()) {
9864 std::unique_ptr<VPlan> BestMainPlan(BestPlan.duplicate());
9865
9866 // The first pass vectorizes the main loop and creates a scalar epilogue
9867 // to be vectorized by executing the plan (potentially with a different
9868 // factor) again shortly afterwards.
9869 VPlan &BestEpiPlan = LVP.getPlanFor(EpilogueVF.Width);
9870 BestEpiPlan.getMiddleBlock()->setName("vec.epilog.middle.block");
9871 BestEpiPlan.getVectorPreheader()->setName("vec.epilog.ph");
9872 preparePlanForMainVectorLoop(*BestMainPlan, BestEpiPlan);
9873 EpilogueLoopVectorizationInfo EPI(VF.Width, IC, EpilogueVF.Width, 1,
9874 BestEpiPlan);
9875 EpilogueVectorizerMainLoop MainILV(L, PSE, LI, DT, TTI, AC, EPI, &CM,
9876 Checks, *BestMainPlan);
9877 auto ExpandedSCEVs = LVP.executePlan(EPI.MainLoopVF, EPI.MainLoopUF,
9878 *BestMainPlan, MainILV, DT, false);
9879 ++LoopsVectorized;
9880
9881 // Second pass vectorizes the epilogue and adjusts the control flow
9882 // edges from the first pass.
9883 EpilogueVectorizerEpilogueLoop EpilogILV(L, PSE, LI, DT, TTI, AC, EPI, &CM,
9884 Checks, BestEpiPlan);
9886 BestEpiPlan, L, ExpandedSCEVs, EPI, CM, *PSE.getSE());
9887 LVP.executePlan(EPI.EpilogueVF, EPI.EpilogueUF, BestEpiPlan, EpilogILV, DT,
9888 true);
9889 connectEpilogueVectorLoop(BestEpiPlan, L, EPI, DT, LVL, ExpandedSCEVs,
9890 Checks, InstsToMove);
9891 ++LoopsEpilogueVectorized;
9892 } else {
9893 InnerLoopVectorizer LB(L, PSE, LI, DT, TTI, AC, VF.Width, IC, &CM, Checks,
9894 BestPlan);
9895 // TODO: Move to general VPlan pipeline once epilogue loops are also
9896 // supported.
9898 BestPlan, VF.Width, IC, PSE);
9899 LVP.addMinimumIterationCheck(BestPlan, VF.Width, IC,
9901
9902 LVP.executePlan(VF.Width, IC, BestPlan, LB, DT, false);
9903 ++LoopsVectorized;
9904 }
9905
9906 assert(DT->verify(DominatorTree::VerificationLevel::Fast) &&
9907 "DT not preserved correctly");
9908 assert(!verifyFunction(*F, &dbgs()));
9909
9910 return true;
9911}
9912
9914
9915 // Don't attempt if
9916 // 1. the target claims to have no vector registers, and
9917 // 2. interleaving won't help ILP.
9918 //
9919 // The second condition is necessary because, even if the target has no
9920 // vector registers, loop vectorization may still enable scalar
9921 // interleaving.
9922 if (!TTI->getNumberOfRegisters(TTI->getRegisterClassForType(true)) &&
9923 TTI->getMaxInterleaveFactor(ElementCount::getFixed(1)) < 2)
9924 return LoopVectorizeResult(false, false);
9925
9926 bool Changed = false, CFGChanged = false;
9927
9928 // The vectorizer requires loops to be in simplified form.
9929 // Since simplification may add new inner loops, it has to run before the
9930 // legality and profitability checks. This means running the loop vectorizer
9931 // will simplify all loops, regardless of whether anything end up being
9932 // vectorized.
9933 for (const auto &L : *LI)
9934 Changed |= CFGChanged |=
9935 simplifyLoop(L, DT, LI, SE, AC, nullptr, false /* PreserveLCSSA */);
9936
9937 // Build up a worklist of inner-loops to vectorize. This is necessary as
9938 // the act of vectorizing or partially unrolling a loop creates new loops
9939 // and can invalidate iterators across the loops.
9940 SmallVector<Loop *, 8> Worklist;
9941
9942 for (Loop *L : *LI)
9943 collectSupportedLoops(*L, LI, ORE, Worklist);
9944
9945 LoopsAnalyzed += Worklist.size();
9946
9947 // Now walk the identified inner loops.
9948 while (!Worklist.empty()) {
9949 Loop *L = Worklist.pop_back_val();
9950
9951 // For the inner loops we actually process, form LCSSA to simplify the
9952 // transform.
9953 Changed |= formLCSSARecursively(*L, *DT, LI, SE);
9954
9955 Changed |= CFGChanged |= processLoop(L);
9956
9957 if (Changed) {
9958 LAIs->clear();
9959
9960#ifndef NDEBUG
9961 if (VerifySCEV)
9962 SE->verify();
9963#endif
9964 }
9965 }
9966
9967 // Process each loop nest in the function.
9968 return LoopVectorizeResult(Changed, CFGChanged);
9969}
9970
9973 LI = &AM.getResult<LoopAnalysis>(F);
9974 // There are no loops in the function. Return before computing other
9975 // expensive analyses.
9976 if (LI->empty())
9977 return PreservedAnalyses::all();
9986 AA = &AM.getResult<AAManager>(F);
9987
9988 auto &MAMProxy = AM.getResult<ModuleAnalysisManagerFunctionProxy>(F);
9989 PSI = MAMProxy.getCachedResult<ProfileSummaryAnalysis>(*F.getParent());
9990 GetBFI = [&AM, &F]() -> BlockFrequencyInfo & {
9992 };
9993 LoopVectorizeResult Result = runImpl(F);
9994 if (!Result.MadeAnyChange)
9995 return PreservedAnalyses::all();
9997
9998 if (isAssignmentTrackingEnabled(*F.getParent())) {
9999 for (auto &BB : F)
10001 }
10002
10003 PA.preserve<LoopAnalysis>();
10007
10008 if (Result.MadeCFGChange) {
10009 // Making CFG changes likely means a loop got vectorized. Indicate that
10010 // extra simplification passes should be run.
10011 // TODO: MadeCFGChanges is not a prefect proxy. Extra passes should only
10012 // be run if runtime checks have been added.
10015 } else {
10017 }
10018 return PA;
10019}
10020
10022 raw_ostream &OS, function_ref<StringRef(StringRef)> MapClassName2PassName) {
10023 static_cast<PassInfoMixin<LoopVectorizePass> *>(this)->printPipeline(
10024 OS, MapClassName2PassName);
10025
10026 OS << '<';
10027 OS << (InterleaveOnlyWhenForced ? "" : "no-") << "interleave-forced-only;";
10028 OS << (VectorizeOnlyWhenForced ? "" : "no-") << "vectorize-forced-only;";
10029 OS << '>';
10030}
for(const MachineOperand &MO :llvm::drop_begin(OldMI.operands(), Desc.getNumOperands()))
static unsigned getIntrinsicID(const SDNode *N)
unsigned RegSize
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
aarch64 promote const
AMDGPU Lower Kernel Arguments
This file implements a class to represent arbitrary precision integral constant values and operations...
@ PostInc
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static bool isEqual(const Function &Caller, const Function &Callee)
This file contains the simple types necessary to represent the attributes associated with functions a...
static const Function * getParent(const Value *V)
This is the interface for LLVM's primary stateless and local alias analysis.
#define X(NUM, ENUM, NAME)
Definition ELF.h:849
static bool IsEmptyBlock(MachineBasicBlock *MBB)
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
#define clEnumValN(ENUMVAL, FLAGNAME, DESC)
This file contains the declarations for the subclasses of Constant, which represent the different fla...
static cl::opt< OutputCostKind > CostKind("cost-kind", cl::desc("Target cost kind"), cl::init(OutputCostKind::RecipThroughput), cl::values(clEnumValN(OutputCostKind::RecipThroughput, "throughput", "Reciprocal throughput"), clEnumValN(OutputCostKind::Latency, "latency", "Instruction latency"), clEnumValN(OutputCostKind::CodeSize, "code-size", "Code size"), clEnumValN(OutputCostKind::SizeAndLatency, "size-latency", "Code size and latency"), clEnumValN(OutputCostKind::All, "all", "Print all cost kinds")))
static cl::opt< IntrinsicCostStrategy > IntrinsicCost("intrinsic-cost-strategy", cl::desc("Costing strategy for intrinsic instructions"), cl::init(IntrinsicCostStrategy::InstructionCost), cl::values(clEnumValN(IntrinsicCostStrategy::InstructionCost, "instruction-cost", "Use TargetTransformInfo::getInstructionCost"), clEnumValN(IntrinsicCostStrategy::IntrinsicCost, "intrinsic-cost", "Use TargetTransformInfo::getIntrinsicInstrCost"), clEnumValN(IntrinsicCostStrategy::TypeBasedIntrinsicCost, "type-based-intrinsic-cost", "Calculate the intrinsic cost based only on argument types")))
static InstructionCost getCost(Instruction &Inst, TTI::TargetCostKind CostKind, TargetTransformInfo &TTI)
Definition CostModel.cpp:73
This file defines DenseMapInfo traits for DenseMap.
This file defines the DenseMap class.
#define DEBUG_TYPE
This is the interface for a simple mod/ref and alias analysis over globals.
Hexagon Common GEP
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
Module.h This file contains the declarations for the Module class.
This defines the Use class.
static bool hasNoUnsignedWrap(BinaryOperator &I)
This file defines an InstructionCost class that is used when calculating the cost of an instruction,...
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
Legalize the Machine IR a function s Machine IR
Definition Legalizer.cpp:81
static cl::opt< unsigned, true > VectorizationFactor("force-vector-width", cl::Hidden, cl::desc("Sets the SIMD width. Zero is autoselect."), cl::location(VectorizerParams::VectorizationFactor))
This header provides classes for managing per-loop analyses.
static cl::opt< bool > WidenIV("loop-flatten-widen-iv", cl::Hidden, cl::init(true), cl::desc("Widen the loop induction variables, if possible, so " "overflow checks won't reject flattening"))
static const char * VerboseDebug
#define LV_NAME
This file defines the LoopVectorizationLegality class.
This file provides a LoopVectorizationPlanner class.
static void collectSupportedLoops(Loop &L, LoopInfo *LI, OptimizationRemarkEmitter *ORE, SmallVectorImpl< Loop * > &V)
static cl::opt< unsigned > EpilogueVectorizationMinVF("epilogue-vectorization-minimum-VF", cl::Hidden, cl::desc("Only loops with vectorization factor equal to or larger than " "the specified value are considered for epilogue vectorization."))
static cl::opt< unsigned > EpilogueVectorizationForceVF("epilogue-vectorization-force-VF", cl::init(1), cl::Hidden, cl::desc("When epilogue vectorization is enabled, and a value greater than " "1 is specified, forces the given VF for all applicable epilogue " "loops."))
static Type * maybeVectorizeType(Type *Ty, ElementCount VF)
static ElementCount determineVPlanVF(const TargetTransformInfo &TTI, LoopVectorizationCostModel &CM)
static ElementCount getSmallConstantTripCount(ScalarEvolution *SE, const Loop *L)
A version of ScalarEvolution::getSmallConstantTripCount that returns an ElementCount to include loops...
static bool hasUnsupportedHeaderPhiRecipe(VPlan &Plan)
Returns true if the VPlan contains header phi recipes that are not currently supported for epilogue v...
static cl::opt< unsigned > VectorizeMemoryCheckThreshold("vectorize-memory-check-threshold", cl::init(128), cl::Hidden, cl::desc("The maximum allowed number of runtime memory checks"))
static void preparePlanForMainVectorLoop(VPlan &MainPlan, VPlan &EpiPlan)
Prepare MainPlan for vectorizing the main vector loop during epilogue vectorization.
static cl::opt< unsigned > TinyTripCountVectorThreshold("vectorizer-min-trip-count", cl::init(16), cl::Hidden, cl::desc("Loops with a constant trip count that is smaller than this " "value are vectorized only if no scalar iteration overheads " "are incurred."))
Loops with a known constant trip count below this number are vectorized only if no scalar iteration o...
static void debugVectorizationMessage(const StringRef Prefix, const StringRef DebugMsg, Instruction *I)
Write a DebugMsg about vectorization to the debug output stream.
static cl::opt< bool > EnableCondStoresVectorization("enable-cond-stores-vec", cl::init(true), cl::Hidden, cl::desc("Enable if predication of stores during vectorization."))
static void legacyCSE(BasicBlock *BB)
FIXME: This legacy common-subexpression-elimination routine is scheduled for removal,...
static VPIRBasicBlock * replaceVPBBWithIRVPBB(VPBasicBlock *VPBB, BasicBlock *IRBB, VPlan *Plan=nullptr)
Replace VPBB with a VPIRBasicBlock wrapping IRBB.
static DebugLoc getDebugLocFromInstOrOperands(Instruction *I)
Look for a meaningful debug location on the instruction or its operands.
static Value * createInductionAdditionalBypassValues(PHINode *OrigPhi, const InductionDescriptor &II, IRBuilder<> &BypassBuilder, const SCEV2ValueTy &ExpandedSCEVs, Value *MainVectorTripCount, Instruction *OldInduction)
static void fixReductionScalarResumeWhenVectorizingEpilog(VPPhi *EpiResumePhiR, PHINode &EpiResumePhi, BasicBlock *BypassBlock)
static cl::opt< bool > ForceTargetSupportsScalableVectors("force-target-supports-scalable-vectors", cl::init(false), cl::Hidden, cl::desc("Pretend that scalable vectors are supported, even if the target does " "not support them. This flag should only be used for testing."))
static bool useActiveLaneMaskForControlFlow(TailFoldingStyle Style)
static cl::opt< bool > EnableEarlyExitVectorization("enable-early-exit-vectorization", cl::init(true), cl::Hidden, cl::desc("Enable vectorization of early exit loops with uncountable exits."))
static bool processLoopInVPlanNativePath(Loop *L, PredicatedScalarEvolution &PSE, LoopInfo *LI, DominatorTree *DT, LoopVectorizationLegality *LVL, TargetTransformInfo *TTI, TargetLibraryInfo *TLI, DemandedBits *DB, AssumptionCache *AC, OptimizationRemarkEmitter *ORE, std::function< BlockFrequencyInfo &()> GetBFI, bool OptForSize, LoopVectorizeHints &Hints, LoopVectorizationRequirements &Requirements)
static cl::opt< bool > ConsiderRegPressure("vectorizer-consider-reg-pressure", cl::init(false), cl::Hidden, cl::desc("Discard VFs if their register pressure is too high."))
static unsigned estimateElementCount(ElementCount VF, std::optional< unsigned > VScale)
This function attempts to return a value that represents the ElementCount at runtime.
static constexpr uint32_t MinItersBypassWeights[]
static cl::opt< unsigned > ForceTargetNumScalarRegs("force-target-num-scalar-regs", cl::init(0), cl::Hidden, cl::desc("A flag that overrides the target's number of scalar registers."))
static cl::opt< bool > UseWiderVFIfCallVariantsPresent("vectorizer-maximize-bandwidth-for-vector-calls", cl::init(true), cl::Hidden, cl::desc("Try wider VFs if they enable the use of vector variants"))
static std::optional< unsigned > getMaxVScale(const Function &F, const TargetTransformInfo &TTI)
static cl::opt< unsigned > SmallLoopCost("small-loop-cost", cl::init(20), cl::Hidden, cl::desc("The cost of a loop that is considered 'small' by the interleaver."))
static void connectEpilogueVectorLoop(VPlan &EpiPlan, Loop *L, EpilogueLoopVectorizationInfo &EPI, DominatorTree *DT, LoopVectorizationLegality &LVL, DenseMap< const SCEV *, Value * > &ExpandedSCEVs, GeneratedRTChecks &Checks, ArrayRef< Instruction * > InstsToMove)
Connect the epilogue vector loop generated for EpiPlan to the main vector.
static bool planContainsAdditionalSimplifications(VPlan &Plan, VPCostContext &CostCtx, Loop *TheLoop, ElementCount VF)
Return true if the original loop \ TheLoop contains any instructions that do not have corresponding r...
static cl::opt< unsigned > ForceTargetNumVectorRegs("force-target-num-vector-regs", cl::init(0), cl::Hidden, cl::desc("A flag that overrides the target's number of vector registers."))
static bool isExplicitVecOuterLoop(Loop *OuterLp, OptimizationRemarkEmitter *ORE)
static cl::opt< bool > EnableIndVarRegisterHeur("enable-ind-var-reg-heur", cl::init(true), cl::Hidden, cl::desc("Count the induction variable only once when interleaving"))
static cl::opt< TailFoldingStyle > ForceTailFoldingStyle("force-tail-folding-style", cl::desc("Force the tail folding style"), cl::init(TailFoldingStyle::None), cl::values(clEnumValN(TailFoldingStyle::None, "none", "Disable tail folding"), clEnumValN(TailFoldingStyle::Data, "data", "Create lane mask for data only, using active.lane.mask intrinsic"), clEnumValN(TailFoldingStyle::DataWithoutLaneMask, "data-without-lane-mask", "Create lane mask with compare/stepvector"), clEnumValN(TailFoldingStyle::DataAndControlFlow, "data-and-control", "Create lane mask using active.lane.mask intrinsic, and use " "it for both data and control flow"), clEnumValN(TailFoldingStyle::DataWithEVL, "data-with-evl", "Use predicated EVL instructions for tail folding. If EVL " "is unsupported, fallback to data-without-lane-mask.")))
static ScalarEpilogueLowering getScalarEpilogueLowering(Function *F, Loop *L, LoopVectorizeHints &Hints, bool OptForSize, TargetTransformInfo *TTI, TargetLibraryInfo *TLI, LoopVectorizationLegality &LVL, InterleavedAccessInfo *IAI)
static cl::opt< bool > EnableEpilogueVectorization("enable-epilogue-vectorization", cl::init(true), cl::Hidden, cl::desc("Enable vectorization of epilogue loops."))
static cl::opt< bool > PreferPredicatedReductionSelect("prefer-predicated-reduction-select", cl::init(false), cl::Hidden, cl::desc("Prefer predicating a reduction operation over an after loop select."))
static cl::opt< bool > PreferInLoopReductions("prefer-inloop-reductions", cl::init(false), cl::Hidden, cl::desc("Prefer in-loop vector reductions, " "overriding the targets preference."))
static SmallVector< Instruction * > preparePlanForEpilogueVectorLoop(VPlan &Plan, Loop *L, const SCEV2ValueTy &ExpandedSCEVs, EpilogueLoopVectorizationInfo &EPI, LoopVectorizationCostModel &CM, ScalarEvolution &SE)
Prepare Plan for vectorizing the epilogue loop.
static const SCEV * getAddressAccessSCEV(Value *Ptr, PredicatedScalarEvolution &PSE, const Loop *TheLoop)
Gets the address access SCEV for Ptr, if it should be used for cost modeling according to isAddressSC...
static cl::opt< bool > EnableLoadStoreRuntimeInterleave("enable-loadstore-runtime-interleave", cl::init(true), cl::Hidden, cl::desc("Enable runtime interleaving until load/store ports are saturated"))
static cl::opt< bool > VPlanBuildStressTest("vplan-build-stress-test", cl::init(false), cl::Hidden, cl::desc("Build VPlan for every supported loop nest in the function and bail " "out right after the build (stress test the VPlan H-CFG construction " "in the VPlan-native vectorization path)."))
static bool hasIrregularType(Type *Ty, const DataLayout &DL)
A helper function that returns true if the given type is irregular.
static cl::opt< bool > LoopVectorizeWithBlockFrequency("loop-vectorize-with-block-frequency", cl::init(true), cl::Hidden, cl::desc("Enable the use of the block frequency analysis to access PGO " "heuristics minimizing code growth in cold regions and being more " "aggressive in hot regions."))
static std::optional< ElementCount > getSmallBestKnownTC(PredicatedScalarEvolution &PSE, Loop *L, bool CanUseConstantMax=true)
Returns "best known" trip count, which is either a valid positive trip count or std::nullopt when an ...
static Value * getExpandedStep(const InductionDescriptor &ID, const SCEV2ValueTy &ExpandedSCEVs)
Return the expanded step for ID using ExpandedSCEVs to look up SCEV expansion results.
static bool useActiveLaneMask(TailFoldingStyle Style)
static bool hasReplicatorRegion(VPlan &Plan)
static bool isIndvarOverflowCheckKnownFalse(const LoopVectorizationCostModel *Cost, ElementCount VF, std::optional< unsigned > UF=std::nullopt)
For the given VF and UF and maximum trip count computed for the loop, return whether the induction va...
static void addFullyUnrolledInstructionsToIgnore(Loop *L, const LoopVectorizationLegality::InductionList &IL, SmallPtrSetImpl< Instruction * > &InstsToIgnore)
Knowing that loop L executes a single vector iteration, add instructions that will get simplified and...
static cl::opt< PreferPredicateTy::Option > PreferPredicateOverEpilogue("prefer-predicate-over-epilogue", cl::init(PreferPredicateTy::ScalarEpilogue), cl::Hidden, cl::desc("Tail-folding and predication preferences over creating a scalar " "epilogue loop."), cl::values(clEnumValN(PreferPredicateTy::ScalarEpilogue, "scalar-epilogue", "Don't tail-predicate loops, create scalar epilogue"), clEnumValN(PreferPredicateTy::PredicateElseScalarEpilogue, "predicate-else-scalar-epilogue", "prefer tail-folding, create scalar epilogue if tail " "folding fails."), clEnumValN(PreferPredicateTy::PredicateOrDontVectorize, "predicate-dont-vectorize", "prefers tail-folding, don't attempt vectorization if " "tail-folding fails.")))
static bool hasFindLastReductionPhi(VPlan &Plan)
Returns true if the VPlan contains a VPReductionPHIRecipe with FindLast recurrence kind.
static cl::opt< bool > EnableInterleavedMemAccesses("enable-interleaved-mem-accesses", cl::init(false), cl::Hidden, cl::desc("Enable vectorization on interleaved memory accesses in a loop"))
static cl::opt< bool > EnableMaskedInterleavedMemAccesses("enable-masked-interleaved-mem-accesses", cl::init(false), cl::Hidden, cl::desc("Enable vectorization on masked interleaved memory accesses in a loop"))
An interleave-group may need masking if it resides in a block that needs predication,...
static cl::opt< bool > ForceOrderedReductions("force-ordered-reductions", cl::init(false), cl::Hidden, cl::desc("Enable the vectorisation of loops with in-order (strict) " "FP reductions"))
static cl::opt< cl::boolOrDefault > ForceSafeDivisor("force-widen-divrem-via-safe-divisor", cl::Hidden, cl::desc("Override cost based safe divisor widening for div/rem instructions"))
static InstructionCost calculateEarlyExitCost(VPCostContext &CostCtx, VPlan &Plan, ElementCount VF)
For loops with uncountable early exits, find the cost of doing work when exiting the loop early,...
static cl::opt< unsigned > ForceTargetMaxVectorInterleaveFactor("force-target-max-vector-interleave", cl::init(0), cl::Hidden, cl::desc("A flag that overrides the target's max interleave factor for " "vectorized loops."))
static bool useMaskedInterleavedAccesses(const TargetTransformInfo &TTI)
cl::opt< unsigned > NumberOfStoresToPredicate("vectorize-num-stores-pred", cl::init(1), cl::Hidden, cl::desc("Max number of stores to be predicated behind an if."))
The number of stores in a loop that are allowed to need predication.
static cl::opt< unsigned > MaxNestedScalarReductionIC("max-nested-scalar-reduction-interleave", cl::init(2), cl::Hidden, cl::desc("The maximum interleave count to use when interleaving a scalar " "reduction in a nested loop."))
static cl::opt< unsigned > ForceTargetMaxScalarInterleaveFactor("force-target-max-scalar-interleave", cl::init(0), cl::Hidden, cl::desc("A flag that overrides the target's max interleave factor for " "scalar loops."))
static void checkMixedPrecision(Loop *L, OptimizationRemarkEmitter *ORE)
static bool willGenerateVectors(VPlan &Plan, ElementCount VF, const TargetTransformInfo &TTI)
Check if any recipe of Plan will generate a vector value, which will be assigned a vector register.
static cl::opt< bool > ForceTargetSupportsMaskedMemoryOps("force-target-supports-masked-memory-ops", cl::init(false), cl::Hidden, cl::desc("Assume the target supports masked memory operations (used for " "testing)."))
Note: This currently only applies to llvm.masked.load and llvm.masked.store.
static bool isOutsideLoopWorkProfitable(GeneratedRTChecks &Checks, VectorizationFactor &VF, Loop *L, PredicatedScalarEvolution &PSE, VPCostContext &CostCtx, VPlan &Plan, ScalarEpilogueLowering SEL, std::optional< unsigned > VScale)
This function determines whether or not it's still profitable to vectorize the loop given the extra w...
static void fixScalarResumeValuesFromBypass(BasicBlock *BypassBlock, Loop *L, VPlan &BestEpiPlan, LoopVectorizationLegality &LVL, const SCEV2ValueTy &ExpandedSCEVs, Value *MainVectorTripCount)
static cl::opt< bool > MaximizeBandwidth("vectorizer-maximize-bandwidth", cl::init(false), cl::Hidden, cl::desc("Maximize bandwidth when selecting vectorization factor which " "will be determined by the smallest type in loop."))
static OptimizationRemarkAnalysis createLVAnalysis(const char *PassName, StringRef RemarkName, Loop *TheLoop, Instruction *I, DebugLoc DL={})
Create an analysis remark that explains why vectorization failed.
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
This file implements a map that provides insertion order iteration.
This file contains the declarations for metadata subclasses.
#define T
ConstantRange Range(APInt(BitWidth, Low), APInt(BitWidth, High))
uint64_t IntrinsicInst * II
#define P(N)
This file contains the declarations for profiling metadata utility functions.
const SmallVectorImpl< MachineOperand > & Cond
static BinaryOperator * CreateMul(Value *S1, Value *S2, const Twine &Name, BasicBlock::iterator InsertBefore, Value *FlagsOp)
static BinaryOperator * CreateAdd(Value *S1, Value *S2, const Twine &Name, BasicBlock::iterator InsertBefore, Value *FlagsOp)
static bool isValid(const char C)
Returns true if C is a valid mangled character: <0-9a-zA-Z_>.
static InstructionCost getScalarizationOverhead(const TargetTransformInfo &TTI, Type *ScalarTy, VectorType *Ty, const APInt &DemandedElts, bool Insert, bool Extract, TTI::TargetCostKind CostKind, bool ForPoisonSrc=true, ArrayRef< Value * > VL={})
This is similar to TargetTransformInfo::getScalarizationOverhead, but if ScalarTy is a FixedVectorTyp...
This file contains some templates that are useful if you are working with the STL at all.
#define OP(OPC)
Definition Instruction.h:46
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Definition Statistic.h:171
#define LLVM_DEBUG(...)
Definition Debug.h:114
#define DEBUG_WITH_TYPE(TYPE,...)
DEBUG_WITH_TYPE macro - This macro should be used by passes to emit debug information.
Definition Debug.h:72
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
This pass exposes codegen information to IR-level passes.
LocallyHashedType DenseMapInfo< LocallyHashedType >::Empty
This file implements the TypeSwitch template, which mimics a switch() statement whose cases are type ...
This file contains the declarations of different VPlan-related auxiliary helpers.
This file provides utility VPlan to VPlan transformations.
#define RUN_VPLAN_PASS(PASS,...)
#define RUN_VPLAN_PASS_NO_VERIFY(PASS,...)
This file declares the class VPlanVerifier, which contains utility functions to check the consistency...
This file contains the declarations of the Vectorization Plan base classes:
static const char PassName[]
Value * RHS
Value * LHS
static const uint32_t IV[8]
Definition blake3_impl.h:83
A manager for alias analyses.
Class for arbitrary precision integers.
Definition APInt.h:78
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
Definition APInt.h:235
uint64_t getZExtValue() const
Get zero extended value.
Definition APInt.h:1555
unsigned getActiveBits() const
Compute the number of active bits in the value.
Definition APInt.h:1527
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:40
size_t size() const
size - Get the array size.
Definition ArrayRef.h:142
A function analysis which provides an AssumptionCache.
A cache of @llvm.assume calls within a function.
LLVM_ABI unsigned getVScaleRangeMin() const
Returns the minimum value for the vscale_range attribute.
LLVM Basic Block Representation.
Definition BasicBlock.h:62
iterator_range< const_phi_iterator > phis() const
Returns a range that iterates over the phis in the basic block.
Definition BasicBlock.h:518
LLVM_ABI const_iterator getFirstInsertionPt() const
Returns an iterator to the first instruction in this block that is suitable for inserting a non-PHI i...
const Function * getParent() const
Return the enclosing method, or null if none.
Definition BasicBlock.h:213
LLVM_ABI InstListType::const_iterator getFirstNonPHIIt() const
Returns an iterator to the first instruction in this block that is not a PHINode instruction.
LLVM_ABI const BasicBlock * getSinglePredecessor() const
Return the predecessor of this block if it has a single predecessor block.
LLVM_ABI const BasicBlock * getSingleSuccessor() const
Return the successor of this block if it has a single successor.
LLVM_ABI const DataLayout & getDataLayout() const
Get the data layout of the module this basic block belongs to.
LLVM_ABI LLVMContext & getContext() const
Get the context in which this basic block lives.
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition BasicBlock.h:233
BinaryOps getOpcode() const
Definition InstrTypes.h:374
Analysis pass which computes BlockFrequencyInfo.
BlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate IR basic block frequen...
Represents analyses that only rely on functions' control flow.
Definition Analysis.h:73
bool isNoBuiltin() const
Return true if the call should not be treated as a call to a builtin.
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
Value * getArgOperand(unsigned i) const
iterator_range< User::op_iterator > args()
Iteration adapter for range-for loops.
unsigned arg_size() const
This class represents a function call, abstracting a target machine's calling convention.
static Type * makeCmpResultType(Type *opnd_type)
Create a result type for fcmp/icmp.
Definition InstrTypes.h:986
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition InstrTypes.h:676
@ ICMP_UGT
unsigned greater than
Definition InstrTypes.h:699
@ ICMP_ULT
unsigned less than
Definition InstrTypes.h:701
@ ICMP_NE
not equal
Definition InstrTypes.h:698
@ ICMP_ULE
unsigned less or equal
Definition InstrTypes.h:702
Predicate getInversePredicate() const
For example, EQ -> NE, UGT -> ULE, SLT -> SGE, OEQ -> UNE, UGT -> OLE, OLT -> UGE,...
Definition InstrTypes.h:789
An abstraction over a floating-point predicate, and a pack of an integer predicate with samesign info...
Conditional Branch instruction.
static CondBrInst * Create(Value *Cond, BasicBlock *IfTrue, BasicBlock *IfFalse, InsertPosition InsertBefore=nullptr)
BasicBlock * getSuccessor(unsigned i) const
This is the shared class of boolean and integer constants.
Definition Constants.h:87
static LLVM_ABI ConstantInt * getTrue(LLVMContext &Context)
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:64
A debug info location.
Definition DebugLoc.h:123
static DebugLoc getTemporary()
Definition DebugLoc.h:160
static DebugLoc getUnknown()
Definition DebugLoc.h:161
An analysis that produces DemandedBits for a function.
ValueT lookup(const_arg_type_t< KeyT > Val) const
lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...
Definition DenseMap.h:205
iterator find(const_arg_type_t< KeyT > Val)
Definition DenseMap.h:178
std::pair< iterator, bool > try_emplace(KeyT &&Key, Ts &&...Args)
Definition DenseMap.h:256
iterator end()
Definition DenseMap.h:81
bool contains(const_arg_type_t< KeyT > Val) const
Return true if the specified key is in the map, false otherwise.
Definition DenseMap.h:169
void insert_range(Range &&R)
Inserts range of 'std::pair<KeyT, ValueT>' values into the map.
Definition DenseMap.h:294
Implements a dense probed hash-table based set.
Definition DenseSet.h:279
Analysis pass which computes a DominatorTree.
Definition Dominators.h:278
void changeImmediateDominator(DomTreeNodeBase< NodeT > *N, DomTreeNodeBase< NodeT > *NewIDom)
changeImmediateDominator - This method is used to update the dominator tree information when a node's...
void eraseNode(NodeT *BB)
eraseNode - Removes a node from the dominator tree.
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition Dominators.h:159
constexpr bool isVector() const
One or more elements.
Definition TypeSize.h:324
static constexpr ElementCount getScalable(ScalarTy MinVal)
Definition TypeSize.h:312
static constexpr ElementCount getFixed(ScalarTy MinVal)
Definition TypeSize.h:309
static constexpr ElementCount get(ScalarTy MinVal, bool Scalable)
Definition TypeSize.h:315
constexpr bool isScalar() const
Exactly one element.
Definition TypeSize.h:320
EpilogueVectorizerEpilogueLoop(Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI, DominatorTree *DT, const TargetTransformInfo *TTI, AssumptionCache *AC, EpilogueLoopVectorizationInfo &EPI, LoopVectorizationCostModel *CM, GeneratedRTChecks &Checks, VPlan &Plan)
BasicBlock * createVectorizedLoopSkeleton() final
Implements the interface for creating a vectorized skeleton using the epilogue loop strategy (i....
void printDebugTracesAtStart() override
Allow subclasses to override and print debug traces before/after vplan execution, when trace informat...
A specialized derived class of inner loop vectorizer that performs vectorization of main loops in the...
void introduceCheckBlockInVPlan(BasicBlock *CheckIRBB)
Introduces a new VPIRBasicBlock for CheckIRBB to Plan between the vector preheader and its predecesso...
BasicBlock * emitIterationCountCheck(BasicBlock *VectorPH, BasicBlock *Bypass, bool ForEpilogue)
Emits an iteration count bypass check once for the main loop (when ForEpilogue is false) and once for...
Value * createIterationCountCheck(BasicBlock *VectorPH, ElementCount VF, unsigned UF) const
void printDebugTracesAtStart() override
Allow subclasses to override and print debug traces before/after vplan execution, when trace informat...
EpilogueVectorizerMainLoop(Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI, DominatorTree *DT, const TargetTransformInfo *TTI, AssumptionCache *AC, EpilogueLoopVectorizationInfo &EPI, LoopVectorizationCostModel *CM, GeneratedRTChecks &Check, VPlan &Plan)
BasicBlock * createVectorizedLoopSkeleton() final
Implements the interface for creating a vectorized skeleton using the main loop strategy (i....
Convenience struct for specifying and reasoning about fast-math flags.
Definition FMF.h:23
Class to represent function types.
param_iterator param_begin() const
param_iterator param_end() const
FunctionType * getFunctionType() const
Returns the FunctionType for me.
Definition Function.h:211
Attribute getFnAttribute(Attribute::AttrKind Kind) const
Return the attribute for the given attribute kind.
Definition Function.cpp:763
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
Definition Function.cpp:728
Represents flags for the getelementptr instruction/expression.
static GEPNoWrapFlags none()
void applyUpdates(ArrayRef< UpdateT > Updates)
Submit updates to all available trees.
Common base class shared among various IRBuilders.
Definition IRBuilder.h:114
void setFastMathFlags(FastMathFlags NewFMF)
Set the fast-math flags to be used with generated fp-math operators.
Definition IRBuilder.h:345
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition IRBuilder.h:2811
A struct for saving information about induction variables.
const SCEV * getStep() const
ArrayRef< Instruction * > getCastInsts() const
Returns an ArrayRef to the type cast instructions in the induction update chain, that are redundant w...
InductionKind
This enum represents the kinds of inductions that we support.
@ IK_NoInduction
Not an induction variable.
@ IK_FpInduction
Floating point induction variable.
@ IK_PtrInduction
Pointer induction var. Step = C.
@ IK_IntInduction
Integer induction variable. Step = C.
InnerLoopAndEpilogueVectorizer(Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI, DominatorTree *DT, const TargetTransformInfo *TTI, AssumptionCache *AC, EpilogueLoopVectorizationInfo &EPI, LoopVectorizationCostModel *CM, GeneratedRTChecks &Checks, VPlan &Plan, ElementCount VecWidth, ElementCount MinProfitableTripCount, unsigned UnrollFactor)
EpilogueLoopVectorizationInfo & EPI
Holds and updates state information required to vectorize the main loop and its epilogue in two separ...
InnerLoopVectorizer vectorizes loops which contain only one basic block to a specified vectorization ...
virtual void printDebugTracesAtStart()
Allow subclasses to override and print debug traces before/after vplan execution, when trace informat...
Value * TripCount
Trip count of the original loop.
const TargetTransformInfo * TTI
Target Transform Info.
LoopVectorizationCostModel * Cost
The profitablity analysis.
Value * getTripCount() const
Returns the original loop trip count.
friend class LoopVectorizationPlanner
InnerLoopVectorizer(Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI, DominatorTree *DT, const TargetTransformInfo *TTI, AssumptionCache *AC, ElementCount VecWidth, unsigned UnrollFactor, LoopVectorizationCostModel *CM, GeneratedRTChecks &RTChecks, VPlan &Plan)
PredicatedScalarEvolution & PSE
A wrapper around ScalarEvolution used to add runtime SCEV checks.
LoopInfo * LI
Loop Info.
DominatorTree * DT
Dominator Tree.
void setTripCount(Value *TC)
Used to set the trip count after ILV's construction and after the preheader block has been executed.
void fixVectorizedLoop(VPTransformState &State)
Fix the vectorized code, taking care of header phi's, and more.
virtual BasicBlock * createVectorizedLoopSkeleton()
Creates a basic block for the scalar preheader.
virtual void printDebugTracesAtEnd()
AssumptionCache * AC
Assumption Cache.
IRBuilder Builder
The builder that we use.
void fixNonInductionPHIs(VPTransformState &State)
Fix the non-induction PHIs in Plan.
VPBasicBlock * VectorPHVPBB
The vector preheader block of Plan, used as target for check blocks introduced during skeleton creati...
unsigned UF
The vectorization unroll factor to use.
GeneratedRTChecks & RTChecks
Structure to hold information about generated runtime checks, responsible for cleaning the checks,...
virtual ~InnerLoopVectorizer()=default
ElementCount VF
The vectorization SIMD factor to use.
Loop * OrigLoop
The original loop.
BasicBlock * createScalarPreheader(StringRef Prefix)
Create and return a new IR basic block for the scalar preheader whose name is prefixed with Prefix.
InstSimplifyFolder - Use InstructionSimplify to fold operations to existing values.
static InstructionCost getInvalid(CostType Val=0)
static InstructionCost getMax()
CostType getValue() const
This function is intended to be used as sparingly as possible, since the class provides the full rang...
bool isCast() const
LLVM_ABI const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
LLVM_ABI void moveBefore(InstListType::iterator InsertPos)
Unlink this instruction from its current basic block and insert it into the basic block that MovePos ...
LLVM_ABI InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
Instruction * user_back()
Specialize the methods defined in Value, as we know that an instruction can only be used by other ins...
const char * getOpcodeName() const
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
Class to represent integer types.
static LLVM_ABI IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
Definition Type.cpp:354
LLVM_ABI APInt getMask() const
For example, this is 0xFF for an 8 bit integer, 0xFFFF for i16, etc.
Definition Type.cpp:378
The group of interleaved loads/stores sharing the same stride and close to each other.
uint32_t getFactor() const
InstTy * getMember(uint32_t Index) const
Get the member with the given index Index.
InstTy * getInsertPos() const
uint32_t getNumMembers() const
Drive the analysis of interleaved memory accesses in the loop.
bool requiresScalarEpilogue() const
Returns true if an interleaved group that may access memory out-of-bounds requires a scalar epilogue ...
LLVM_ABI void analyzeInterleaving(bool EnableMaskedInterleavedGroup)
Analyze the interleaved accesses and collect them in interleave groups.
An instruction for reading from memory.
Type * getPointerOperandType() const
This analysis provides dependence information for the memory accesses of a loop.
Drive the analysis of memory accesses in the loop.
const RuntimePointerChecking * getRuntimePointerChecking() const
unsigned getNumRuntimePointerChecks() const
Number of memchecks required to prove independence of otherwise may-alias pointers.
Analysis pass that exposes the LoopInfo for a function.
Definition LoopInfo.h:569
bool contains(const LoopT *L) const
Return true if the specified loop is contained within in this loop.
BlockT * getLoopLatch() const
If there is a single latch block for this loop, return it.
bool isInnermost() const
Return true if the loop does not contain any (natural) loops.
void getExitingBlocks(SmallVectorImpl< BlockT * > &ExitingBlocks) const
Return all blocks inside the loop that have successors outside of the loop.
BlockT * getHeader() const
iterator_range< block_iterator > blocks() const
ArrayRef< BlockT * > getBlocks() const
Get a list of the basic blocks which make up this loop.
Store the result of a depth first search within basic blocks contained by a single loop.
RPOIterator beginRPO() const
Reverse iterate over the cached postorder blocks.
void perform(const LoopInfo *LI)
Traverse the loop blocks and store the DFS result.
RPOIterator endRPO() const
Wrapper class to LoopBlocksDFS that provides a standard begin()/end() interface for the DFS reverse p...
void perform(const LoopInfo *LI)
Traverse the loop blocks and store the DFS result.
void removeBlock(BlockT *BB)
This method completely removes BB from all data structures, including all of the Loop objects it is n...
LoopVectorizationCostModel - estimates the expected speedups due to vectorization.
SmallPtrSet< Type *, 16 > ElementTypesInLoop
All element types found in the loop.
bool isLegalMaskedLoad(Type *DataType, Value *Ptr, Align Alignment, unsigned AddressSpace) const
Returns true if the target machine supports masked load operation for the given DataType and kind of ...
void collectElementTypesForWidening()
Collect all element types in the loop for which widening is needed.
bool canVectorizeReductions(ElementCount VF) const
Returns true if the target machine supports all of the reduction variables found for the given VF.
bool isLegalMaskedStore(Type *DataType, Value *Ptr, Align Alignment, unsigned AddressSpace) const
Returns true if the target machine supports masked store operation for the given DataType and kind of...
bool isEpilogueVectorizationProfitable(const ElementCount VF, const unsigned IC) const
Returns true if epilogue vectorization is considered profitable, and false otherwise.
bool useWideActiveLaneMask() const
Returns true if the use of wide lane masks is requested and the loop is using tail-folding with a lan...
bool isPredicatedInst(Instruction *I) const
Returns true if I is an instruction that needs to be predicated at runtime.
void collectValuesToIgnore()
Collect values we want to ignore in the cost model.
BlockFrequencyInfo * BFI
The BlockFrequencyInfo returned from GetBFI.
void collectInLoopReductions()
Split reductions into those that happen in the loop, and those that happen outside.
BlockFrequencyInfo & getBFI()
Returns the BlockFrequencyInfo for the function if cached, otherwise fetches it via GetBFI.
std::pair< unsigned, unsigned > getSmallestAndWidestTypes()
bool isUniformAfterVectorization(Instruction *I, ElementCount VF) const
Returns true if I is known to be uniform after vectorization.
bool useEmulatedMaskMemRefHack(Instruction *I, ElementCount VF)
Returns true if an artificially high cost for emulated masked memrefs should be used.
void collectNonVectorizedAndSetWideningDecisions(ElementCount VF)
Collect values that will not be widened, including Uniforms, Scalars, and Instructions to Scalarize f...
PredicatedScalarEvolution & PSE
Predicated scalar evolution analysis.
const LoopVectorizeHints * Hints
Loop Vectorize Hint.
std::optional< unsigned > getMaxSafeElements() const
Return maximum safe number of elements to be processed per vector iteration, which do not prevent sto...
const TargetTransformInfo & TTI
Vector target information.
LoopVectorizationLegality * Legal
Vectorization legality.
uint64_t getPredBlockCostDivisor(TargetTransformInfo::TargetCostKind CostKind, const BasicBlock *BB)
A helper function that returns how much we should divide the cost of a predicated block by.
std::optional< InstructionCost > getReductionPatternCost(Instruction *I, ElementCount VF, Type *VectorTy) const
Return the cost of instructions in an inloop reduction pattern, if I is part of that pattern.
InstructionCost getInstructionCost(Instruction *I, ElementCount VF)
Returns the execution time cost of an instruction for a given vector width.
DemandedBits * DB
Demanded bits analysis.
bool interleavedAccessCanBeWidened(Instruction *I, ElementCount VF) const
Returns true if I is a memory instruction in an interleaved-group of memory accesses that can be vect...
const TargetLibraryInfo * TLI
Target Library Info.
bool memoryInstructionCanBeWidened(Instruction *I, ElementCount VF)
Returns true if I is a memory instruction with consecutive memory access that can be widened.
const InterleaveGroup< Instruction > * getInterleavedAccessGroup(Instruction *Instr) const
Get the interleaved access group that Instr belongs to.
InstructionCost getVectorIntrinsicCost(CallInst *CI, ElementCount VF) const
Estimate cost of an intrinsic call instruction CI if it were vectorized with factor VF.
bool OptForSize
Whether this loop should be optimized for size based on function attribute or profile information.
bool useMaxBandwidth(TargetTransformInfo::RegisterKind RegKind)
bool isScalarAfterVectorization(Instruction *I, ElementCount VF) const
Returns true if I is known to be scalar after vectorization.
bool isOptimizableIVTruncate(Instruction *I, ElementCount VF)
Return True if instruction I is an optimizable truncate whose operand is an induction variable.
FixedScalableVFPair computeMaxVF(ElementCount UserVF, unsigned UserIC)
bool shouldConsiderRegPressureForVF(ElementCount VF)
Loop * TheLoop
The loop that we evaluate.
TTI::TargetCostKind CostKind
The kind of cost that we are calculating.
InterleavedAccessInfo & InterleaveInfo
The interleave access information contains groups of interleaved accesses with the same stride and cl...
SmallPtrSet< const Value *, 16 > ValuesToIgnore
Values to ignore in the cost model.
void setVectorizedCallDecision(ElementCount VF)
A call may be vectorized in different ways depending on whether we have vectorized variants available...
void invalidateCostModelingDecisions()
Invalidates decisions already taken by the cost model.
bool isAccessInterleaved(Instruction *Instr) const
Check if Instr belongs to any interleaved access group.
bool selectUserVectorizationFactor(ElementCount UserVF)
Setup cost-based decisions for user vectorization factor.
std::optional< unsigned > getVScaleForTuning() const
Return the value of vscale used for tuning the cost model.
void setTailFoldingStyle(bool IsScalableVF, unsigned UserIC)
Selects and saves TailFoldingStyle.
OptimizationRemarkEmitter * ORE
Interface to emit optimization remarks.
bool preferPredicatedLoop() const
Returns true if tail-folding is preferred over a scalar epilogue.
LoopInfo * LI
Loop Info analysis.
bool requiresScalarEpilogue(bool IsVectorizing) const
Returns true if we're required to use a scalar epilogue for at least the final iteration of the origi...
SmallPtrSet< const Value *, 16 > VecValuesToIgnore
Values to ignore in the cost model when VF > 1.
bool isInLoopReduction(PHINode *Phi) const
Returns true if the Phi is part of an inloop reduction.
bool isProfitableToScalarize(Instruction *I, ElementCount VF) const
void setWideningDecision(const InterleaveGroup< Instruction > *Grp, ElementCount VF, InstWidening W, InstructionCost Cost)
Save vectorization decision W and Cost taken by the cost model for interleaving group Grp and vector ...
const MapVector< Instruction *, uint64_t > & getMinimalBitwidths() const
CallWideningDecision getCallWideningDecision(CallInst *CI, ElementCount VF) const
bool isLegalGatherOrScatter(Value *V, ElementCount VF)
Returns true if the target machine can represent V as a masked gather or scatter operation.
bool canTruncateToMinimalBitwidth(Instruction *I, ElementCount VF) const
bool shouldConsiderInvariant(Value *Op)
Returns true if Op should be considered invariant and if it is trivially hoistable.
bool foldTailByMasking() const
Returns true if all loop blocks should be masked to fold tail loop.
bool foldTailWithEVL() const
Returns true if VP intrinsics with explicit vector length support should be generated in the tail fol...
bool blockNeedsPredicationForAnyReason(BasicBlock *BB) const
Returns true if the instructions in this block requires predication for any reason,...
void setCallWideningDecision(CallInst *CI, ElementCount VF, InstWidening Kind, Function *Variant, Intrinsic::ID IID, std::optional< unsigned > MaskPos, InstructionCost Cost)
AssumptionCache * AC
Assumption cache.
void setWideningDecision(Instruction *I, ElementCount VF, InstWidening W, InstructionCost Cost)
Save vectorization decision W and Cost taken by the cost model for instruction I and vector width VF.
InstWidening
Decision that was taken during cost calculation for memory instruction.
bool usePredicatedReductionSelect(RecurKind RecurrenceKind) const
Returns true if the predicated reduction select should be used to set the incoming value for the redu...
std::pair< InstructionCost, InstructionCost > getDivRemSpeculationCost(Instruction *I, ElementCount VF)
Return the costs for our two available strategies for lowering a div/rem operation which requires spe...
InstructionCost getVectorCallCost(CallInst *CI, ElementCount VF) const
Estimate cost of a call instruction CI if it were vectorized with factor VF.
bool isScalarWithPredication(Instruction *I, ElementCount VF)
Returns true if I is an instruction which requires predication and for which our chosen predication s...
bool useOrderedReductions(const RecurrenceDescriptor &RdxDesc) const
Returns true if we should use strict in-order reductions for the given RdxDesc.
bool isDivRemScalarWithPredication(InstructionCost ScalarCost, InstructionCost SafeDivisorCost) const
Given costs for both strategies, return true if the scalar predication lowering should be used for di...
std::function< BlockFrequencyInfo &()> GetBFI
A function to lazily fetch BlockFrequencyInfo.
LoopVectorizationCostModel(ScalarEpilogueLowering SEL, Loop *L, PredicatedScalarEvolution &PSE, LoopInfo *LI, LoopVectorizationLegality *Legal, const TargetTransformInfo &TTI, const TargetLibraryInfo *TLI, DemandedBits *DB, AssumptionCache *AC, OptimizationRemarkEmitter *ORE, std::function< BlockFrequencyInfo &()> GetBFI, const Function *F, const LoopVectorizeHints *Hints, InterleavedAccessInfo &IAI, bool OptForSize)
InstructionCost expectedCost(ElementCount VF)
Returns the expected execution cost.
void setCostBasedWideningDecision(ElementCount VF)
Memory access instruction may be vectorized in more than one way.
InstWidening getWideningDecision(Instruction *I, ElementCount VF) const
Return the cost model decision for the given instruction I and vector width VF.
FixedScalableVFPair MaxPermissibleVFWithoutMaxBW
The highest VF possible for this loop, without using MaxBandwidth.
const SmallPtrSetImpl< PHINode * > & getInLoopReductions() const
Returns the set of in-loop reduction PHIs.
bool isScalarEpilogueAllowed() const
Returns true if a scalar epilogue is not allowed due to optsize or a loop hint annotation.
InstructionCost getWideningCost(Instruction *I, ElementCount VF)
Return the vectorization cost for the given instruction I and vector width VF.
TailFoldingStyle getTailFoldingStyle() const
Returns the TailFoldingStyle that is best for the current loop.
void collectInstsToScalarize(ElementCount VF)
Collects the instructions to scalarize for each predicated instruction in the loop.
LoopVectorizationLegality checks if it is legal to vectorize a loop, and to what vectorization factor...
MapVector< PHINode *, InductionDescriptor > InductionList
InductionList saves induction variables and maps them to the induction descriptor.
const SmallPtrSetImpl< const Instruction * > & getPotentiallyFaultingLoads() const
Returns potentially faulting loads.
bool canVectorize(bool UseVPlanNativePath)
Returns true if it is legal to vectorize this loop.
bool canVectorizeFPMath(bool EnableStrictReductions)
Returns true if it is legal to vectorize the FP math operations in this loop.
PHINode * getPrimaryInduction()
Returns the primary induction variable.
const SmallVector< BasicBlock *, 4 > & getCountableExitingBlocks() const
Returns all exiting blocks with a countable exit, i.e.
const InductionList & getInductionVars() const
Returns the induction variables found in the loop.
bool hasUncountableEarlyExit() const
Returns true if the loop has uncountable early exits, i.e.
bool hasHistograms() const
Returns a list of all known histogram operations in the loop.
const LoopAccessInfo * getLAI() const
Planner drives the vectorization process after having passed Legality checks.
VectorizationFactor selectEpilogueVectorizationFactor(const ElementCount MainLoopVF, unsigned IC)
VPlan & getPlanFor(ElementCount VF) const
Return the VPlan for VF.
Definition VPlan.cpp:1638
VectorizationFactor planInVPlanNativePath(ElementCount UserVF)
Use the VPlan-native path to plan how to best vectorize, return the best VF and its cost.
void updateLoopMetadataAndProfileInfo(Loop *VectorLoop, VPBasicBlock *HeaderVPBB, const VPlan &Plan, bool VectorizingEpilogue, MDNode *OrigLoopID, std::optional< unsigned > OrigAverageTripCount, unsigned OrigLoopInvocationWeight, unsigned EstimatedVFxUF, bool DisableRuntimeUnroll)
Update loop metadata and profile info for both the scalar remainder loop and VectorLoop,...
Definition VPlan.cpp:1689
void buildVPlans(ElementCount MinVF, ElementCount MaxVF)
Build VPlans for power-of-2 VF's between MinVF and MaxVF inclusive, according to the information gath...
Definition VPlan.cpp:1622
VectorizationFactor computeBestVF()
Compute and return the most profitable vectorization factor.
DenseMap< const SCEV *, Value * > executePlan(ElementCount VF, unsigned UF, VPlan &BestPlan, InnerLoopVectorizer &LB, DominatorTree *DT, bool VectorizingEpilogue)
Generate the IR code for the vectorized loop captured in VPlan BestPlan according to the best selecte...
unsigned selectInterleaveCount(VPlan &Plan, ElementCount VF, InstructionCost LoopCost)
void emitInvalidCostRemarks(OptimizationRemarkEmitter *ORE)
Emit remarks for recipes with invalid costs in the available VPlans.
static bool getDecisionAndClampRange(const std::function< bool(ElementCount)> &Predicate, VFRange &Range)
Test a Predicate on a Range of VF's.
Definition VPlan.cpp:1603
void printPlans(raw_ostream &O)
Definition VPlan.cpp:1783
void plan(ElementCount UserVF, unsigned UserIC)
Build VPlans for the specified UserVF and UserIC if they are non-zero or all applicable candidate VFs...
void addMinimumIterationCheck(VPlan &Plan, ElementCount VF, unsigned UF, ElementCount MinProfitableTripCount) const
Create a check to Plan to see if the vector loop should be executed based on its trip count.
bool hasPlanWithVF(ElementCount VF) const
Look through the existing plans and return true if we have one with vectorization factor VF.
This holds vectorization requirements that must be verified late in the process.
Utility class for getting and setting loop vectorizer hints in the form of loop metadata.
bool allowVectorization(Function *F, Loop *L, bool VectorizeOnlyWhenForced) const
bool allowReordering() const
When enabling loop hints are provided we allow the vectorizer to change the order of operations that ...
void emitRemarkWithHints() const
Dumps all the hint information.
const char * vectorizeAnalysisPassName() const
If hints are provided that force vectorization, use the AlwaysPrint pass name to force the frontend t...
This class emits a version of the loop where run-time checks ensure that may-alias pointers can't ove...
Represents a single loop in the control flow graph.
Definition LoopInfo.h:40
bool hasLoopInvariantOperands(const Instruction *I) const
Return true if all the operands of the specified instruction are loop invariant.
Definition LoopInfo.cpp:73
DebugLoc getStartLoc() const
Return the debug location of the start of this loop.
Definition LoopInfo.cpp:653
bool isLoopInvariant(const Value *V) const
Return true if the specified value is loop invariant.
Definition LoopInfo.cpp:67
Metadata node.
Definition Metadata.h:1080
This class implements a map that also provides access to all stored values in a deterministic order.
Definition MapVector.h:36
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition MapVector.h:124
Function * getFunction(StringRef Name) const
Look up the specified function in the module symbol table.
Definition Module.cpp:235
Diagnostic information for optimization analysis remarks related to pointer aliasing.
Diagnostic information for optimization analysis remarks related to floating-point non-commutativity.
Diagnostic information for optimization analysis remarks.
The optimization diagnostic interface.
LLVM_ABI void emit(DiagnosticInfoOptimizationBase &OptDiag)
Output the remark via the diagnostic handler and to the optimization record file.
Diagnostic information for missed-optimization remarks.
Diagnostic information for applied optimization remarks.
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
op_range incoming_values()
void setIncomingValueForBlock(const BasicBlock *BB, Value *V)
Set every incoming value(s) for block BB to V.
Value * getIncomingValueForBlock(const BasicBlock *BB) const
unsigned getNumIncomingValues() const
Return the number of incoming edges.
static PHINode * Create(Type *Ty, unsigned NumReservedValues, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructors - NumReservedValues is a hint for the number of incoming edges that this phi node will h...
An interface layer with SCEV used to manage how we see SCEV expressions for values in the context of ...
ScalarEvolution * getSE() const
Returns the ScalarEvolution analysis used.
LLVM_ABI const SCEVPredicate & getPredicate() const
LLVM_ABI unsigned getSmallConstantMaxTripCount()
Returns the upper bound of the loop trip count as a normal unsigned value, or 0 if the trip count is ...
LLVM_ABI const SCEV * getBackedgeTakenCount()
Get the (predicated) backedge count for the analyzed loop.
LLVM_ABI const SCEV * getSCEV(Value *V)
Returns the SCEV expression of V, in the context of the current SCEV predicate.
A set of analyses that are preserved following a run of a transformation pass.
Definition Analysis.h:112
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition Analysis.h:118
PreservedAnalyses & preserveSet()
Mark an analysis set as preserved.
Definition Analysis.h:151
PreservedAnalyses & preserve()
Mark an analysis as preserved.
Definition Analysis.h:132
An analysis pass based on the new PM to deliver ProfileSummaryInfo.
The RecurrenceDescriptor is used to identify recurrences variables in a loop.
static bool isFMulAddIntrinsic(Instruction *I)
Returns true if the instruction is a call to the llvm.fmuladd intrinsic.
FastMathFlags getFastMathFlags() const
static LLVM_ABI unsigned getOpcode(RecurKind Kind)
Returns the opcode corresponding to the RecurrenceKind.
Type * getRecurrenceType() const
Returns the type of the recurrence.
bool hasUsesOutsideReductionChain() const
Returns true if the reduction PHI has any uses outside the reduction chain.
const SmallPtrSet< Instruction *, 8 > & getCastInsts() const
Returns a reference to the instructions used for type-promoting the recurrence.
static bool isFindLastRecurrenceKind(RecurKind Kind)
Returns true if the recurrence kind is of the form select(cmp(),x,y) where one of (x,...
unsigned getMinWidthCastToRecurrenceTypeInBits() const
Returns the minimum width used by the recurrence in bits.
LLVM_ABI SmallVector< Instruction *, 4 > getReductionOpChain(PHINode *Phi, Loop *L) const
Attempts to find a chain of operations from Phi to LoopExitInst that can be treated as a set of reduc...
static bool isAnyOfRecurrenceKind(RecurKind Kind)
Returns true if the recurrence kind is of the form select(cmp(),x,y) where one of (x,...
bool isSigned() const
Returns true if all source operands of the recurrence are SExtInsts.
RecurKind getRecurrenceKind() const
bool isOrdered() const
Expose an ordered FP reduction to the instance users.
static bool isFindIVRecurrenceKind(RecurKind Kind)
Returns true if the recurrence kind is of the form select(cmp(),x,y) where one of (x,...
static bool isMinMaxRecurrenceKind(RecurKind Kind)
Returns true if the recurrence kind is any min/max kind.
std::optional< ArrayRef< PointerDiffInfo > > getDiffChecks() const
const SmallVectorImpl< RuntimePointerCheck > & getChecks() const
Returns the checks that generateChecks created.
This class uses information about analyze scalars to rewrite expressions in canonical form.
ScalarEvolution * getSE()
bool isInsertedInstruction(Instruction *I) const
Return true if the specified instruction was inserted by the code rewriter.
LLVM_ABI Value * expandCodeForPredicate(const SCEVPredicate *Pred, Instruction *Loc)
Generates a code sequence that evaluates this predicate.
void eraseDeadInstructions(Value *Root)
Remove inserted instructions that are dead, e.g.
virtual bool isAlwaysTrue() const =0
Returns true if the predicate is always true.
This class represents an analyzed expression in the program.
LLVM_ABI bool isZero() const
Return true if the expression is a constant zero.
LLVM_ABI Type * getType() const
Return the LLVM type of this SCEV expression.
Analysis pass that exposes the ScalarEvolution for a function.
The main scalar evolution driver.
LLVM_ABI const SCEV * getURemExpr(SCEVUse LHS, SCEVUse RHS)
Represents an unsigned remainder expression based on unsigned division.
LLVM_ABI const SCEV * getBackedgeTakenCount(const Loop *L, ExitCountKind Kind=Exact)
If the specified loop has a predictable backedge-taken count, return it, otherwise return a SCEVCould...
LLVM_ABI const SCEV * getConstant(ConstantInt *V)
LLVM_ABI const SCEV * getSCEV(Value *V)
Return a SCEV expression for the full generality of the specified expression.
LLVM_ABI const SCEV * getTripCountFromExitCount(const SCEV *ExitCount)
A version of getTripCountFromExitCount below which always picks an evaluation type which can not resu...
const SCEV * getOne(Type *Ty)
Return a SCEV for the constant 1 of a specific type.
LLVM_ABI void forgetLoop(const Loop *L)
This method should be called by the client when it has changed a loop in a way that may effect Scalar...
LLVM_ABI bool isLoopInvariant(const SCEV *S, const Loop *L)
Return true if the value of the given SCEV is unchanging in the specified loop.
LLVM_ABI const SCEV * getElementCount(Type *Ty, ElementCount EC, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap)
LLVM_ABI void forgetValue(Value *V)
This method should be called by the client when it has changed a value in a way that may effect its v...
LLVM_ABI void forgetBlockAndLoopDispositions(Value *V=nullptr)
Called when the client has changed the disposition of values in a loop or block.
const SCEV * getMinusOne(Type *Ty)
Return a SCEV for the constant -1 of a specific type.
LLVM_ABI void forgetLcssaPhiWithNewPredecessor(Loop *L, PHINode *V)
Forget LCSSA phi node V of loop L to which a new predecessor was added, such that it may no longer be...
LLVM_ABI const SCEV * getMulExpr(SmallVectorImpl< SCEVUse > &Ops, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)
Get a canonical multiply expression, or something simpler if possible.
LLVM_ABI unsigned getSmallConstantTripCount(const Loop *L)
Returns the exact trip count of the loop if we can compute it, and the result is a small constant.
APInt getUnsignedRangeMax(const SCEV *S)
Determine the max of the unsigned range for a particular SCEV.
LLVM_ABI const SCEV * getAddExpr(SmallVectorImpl< SCEVUse > &Ops, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)
Get a canonical add expression, or something simpler if possible.
LLVM_ABI const SCEV * applyLoopGuards(const SCEV *Expr, const Loop *L)
Try to apply information from loop guards for L to Expr.
LLVM_ABI bool isKnownPredicate(CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS)
Test if the given expression is known to satisfy the condition described by Pred, LHS,...
This class represents the LLVM 'select' instruction.
A vector that has set insertion semantics.
Definition SetVector.h:57
size_type size() const
Determine the number of elements in the SetVector.
Definition SetVector.h:103
void insert_range(Range &&R)
Definition SetVector.h:176
size_type count(const_arg_type key) const
Count the number of elements of a given key in the SetVector.
Definition SetVector.h:262
bool insert(const value_type &X)
Insert a new element into the SetVector.
Definition SetVector.h:151
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
bool contains(ConstPtrType Ptr) const
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
A SetVector that performs no allocations if smaller than a certain size.
Definition SetVector.h:339
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
An instruction for storing to memory.
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
Analysis pass providing the TargetTransformInfo.
Analysis pass providing the TargetLibraryInfo.
Provides information about what library functions are available for the current target.
This pass provides access to the codegen interfaces that are needed for IR-level transformations.
LLVM_ABI std::optional< unsigned > getVScaleForTuning() const
LLVM_ABI bool supportsEfficientVectorElementLoadStore() const
If target has efficient vector element load/store instructions, it can return true here so that inser...
VectorInstrContext
Represents a hint about the context in which an insert/extract is used.
@ None
The insert/extract is not used with a load/store.
@ Load
The value being inserted comes from a load (InsertElement only).
@ Store
The extracted value is stored (ExtractElement only).
LLVM_ABI bool prefersVectorizedAddressing() const
Return true if target doesn't mind addresses in vectors.
LLVM_ABI TypeSize getRegisterBitWidth(RegisterKind K) const
LLVM_ABI InstructionCost getOperandsScalarizationOverhead(ArrayRef< Type * > Tys, TTI::TargetCostKind CostKind, TTI::VectorInstrContext VIC=TTI::VectorInstrContext::None) const
Estimate the overhead of scalarizing operands with the given types.
LLVM_ABI bool preferFixedOverScalableIfEqualCost(bool IsEpilogue) const
LLVM_ABI InstructionCost getMemoryOpCost(unsigned Opcode, Type *Src, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind=TTI::TCK_RecipThroughput, OperandValueInfo OpdInfo={OK_AnyValue, OP_None}, const Instruction *I=nullptr) const
LLVM_ABI InstructionCost getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef< unsigned > Indices, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind=TTI::TCK_RecipThroughput, bool UseMaskForCond=false, bool UseMaskForGaps=false) const
LLVM_ABI InstructionCost getShuffleCost(ShuffleKind Kind, VectorType *DstTy, VectorType *SrcTy, ArrayRef< int > Mask={}, TTI::TargetCostKind CostKind=TTI::TCK_RecipThroughput, int Index=0, VectorType *SubTp=nullptr, ArrayRef< const Value * > Args={}, const Instruction *CxtI=nullptr) const
static LLVM_ABI OperandValueInfo getOperandInfo(const Value *V)
Collect properties of V used in cost analysis, e.g. OP_PowerOf2.
LLVM_ABI bool isElementTypeLegalForScalableVector(Type *Ty) const
LLVM_ABI ElementCount getMinimumVF(unsigned ElemWidth, bool IsScalable) const
TargetCostKind
The kind of cost model.
@ TCK_RecipThroughput
Reciprocal throughput.
@ TCK_CodeSize
Instruction code size.
@ TCK_SizeAndLatency
The weighted sum of size and latency.
@ TCK_Latency
The latency of instruction.
LLVM_ABI InstructionCost getMemIntrinsicInstrCost(const MemIntrinsicCostAttributes &MICA, TTI::TargetCostKind CostKind) const
LLVM_ABI InstructionCost getAddressComputationCost(Type *PtrTy, ScalarEvolution *SE, const SCEV *Ptr, TTI::TargetCostKind CostKind) const
LLVM_ABI bool supportsScalableVectors() const
@ TCC_Free
Expected to fold away in lowering.
LLVM_ABI InstructionCost getInstructionCost(const User *U, ArrayRef< const Value * > Operands, TargetCostKind CostKind) const
Estimate the cost of a given IR user when lowered.
LLVM_ABI InstructionCost getIndexedVectorInstrCostFromEnd(unsigned Opcode, Type *Val, TTI::TargetCostKind CostKind, unsigned Index) const
LLVM_ABI InstructionCost getScalarizationOverhead(VectorType *Ty, const APInt &DemandedElts, bool Insert, bool Extract, TTI::TargetCostKind CostKind, bool ForPoisonSrc=true, ArrayRef< Value * > VL={}, TTI::VectorInstrContext VIC=TTI::VectorInstrContext::None) const
Estimate the overhead of scalarizing an instruction.
@ SK_Splice
Concatenates elements from the first input vector with elements of the second input vector.
@ SK_Broadcast
Broadcast element 0 to all other elements.
@ SK_Reverse
Reverse the order of the vector.
LLVM_ABI InstructionCost getCFInstrCost(unsigned Opcode, TTI::TargetCostKind CostKind=TTI::TCK_SizeAndLatency, const Instruction *I=nullptr) const
CastContextHint
Represents a hint about the context in which a cast is used.
@ Reversed
The cast is used with a reversed load/store.
@ Masked
The cast is used with a masked load/store.
@ Normal
The cast is used with a normal load/store.
@ Interleave
The cast is used with an interleaved load/store.
@ GatherScatter
The cast is used with a gather/scatter.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition Twine.h:82
This class implements a switch-like dispatch statement for a value of 'T' using dyn_cast functionalit...
Definition TypeSwitch.h:89
TypeSwitch< T, ResultT > & Case(CallableT &&caseFn)
Add a case on the given type.
Definition TypeSwitch.h:98
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:46
LLVM_ABI unsigned getIntegerBitWidth() const
bool isVectorTy() const
True if this is an instance of VectorType.
Definition Type.h:290
static LLVM_ABI Type * getVoidTy(LLVMContext &C)
Definition Type.cpp:286
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
Definition Type.h:370
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
Definition Type.h:130
LLVM_ABI unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
Definition Type.cpp:236
static LLVM_ABI IntegerType * getInt1Ty(LLVMContext &C)
Definition Type.cpp:310
bool isFloatingPointTy() const
Return true if this is one of the floating-point types.
Definition Type.h:186
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition Type.h:257
bool isVoidTy() const
Return true if this is 'void'.
Definition Type.h:141
A Use represents the edge between a Value definition and its users.
Definition Use.h:35
op_range operands()
Definition User.h:267
LLVM_ABI bool replaceUsesOfWith(Value *From, Value *To)
Replace uses of one Value with another.
Definition User.cpp:25
Value * getOperand(unsigned i) const
Definition User.h:207
static SmallVector< VFInfo, 8 > getMappings(const CallInst &CI)
Retrieve all the VFInfo instances associated to the CallInst CI.
Definition VectorUtils.h:74
VPBasicBlock serves as the leaf of the Hierarchical Control-Flow Graph.
Definition VPlan.h:4255
RecipeListTy::iterator iterator
Instruction iterators...
Definition VPlan.h:4282
iterator end()
Definition VPlan.h:4292
iterator begin()
Recipe iterator methods.
Definition VPlan.h:4290
iterator_range< iterator > phis()
Returns an iterator range over the PHI-like recipes in the block.
Definition VPlan.h:4343
InstructionCost cost(ElementCount VF, VPCostContext &Ctx) override
Return the cost of this VPBasicBlock.
Definition VPlan.cpp:784
iterator getFirstNonPhi()
Return the position of the first non-phi node recipe in the block.
Definition VPlan.cpp:232
VPRecipeBase * getTerminator()
If the block has multiple successors, return the branch recipe terminating the block.
Definition VPlan.cpp:644
bool empty() const
Definition VPlan.h:4301
VPBlockBase is the building block of the Hierarchical Control-Flow Graph.
Definition VPlan.h:98
const VPBasicBlock * getExitingBasicBlock() const
Definition VPlan.cpp:202
void setName(const Twine &newName)
Definition VPlan.h:183
size_t getNumSuccessors() const
Definition VPlan.h:241
void swapSuccessors()
Swap successors of the block. The block must have exactly 2 successors.
Definition VPlan.h:341
size_t getNumPredecessors() const
Definition VPlan.h:242
VPlan * getPlan()
Definition VPlan.cpp:177
const VPBasicBlock * getEntryBasicBlock() const
Definition VPlan.cpp:182
VPBlockBase * getSingleSuccessor() const
Definition VPlan.h:231
const VPBlocksTy & getSuccessors() const
Definition VPlan.h:215
static auto blocksOnly(const T &Range)
Return an iterator range over Range which only includes BlockTy blocks.
Definition VPlanUtils.h:266
static void insertOnEdge(VPBlockBase *From, VPBlockBase *To, VPBlockBase *BlockPtr)
Inserts BlockPtr on the edge between From and To.
Definition VPlanUtils.h:287
static void connectBlocks(VPBlockBase *From, VPBlockBase *To, unsigned PredIdx=-1u, unsigned SuccIdx=-1u)
Connect VPBlockBases From and To bi-directionally.
Definition VPlanUtils.h:218
static void reassociateBlocks(VPBlockBase *Old, VPBlockBase *New)
Reassociate all the blocks connected to Old so that they now point to New.
Definition VPlanUtils.h:244
VPlan-based builder utility analogous to IRBuilder.
VPPhi * createScalarPhi(ArrayRef< VPValue * > IncomingValues, DebugLoc DL=DebugLoc::getUnknown(), const Twine &Name="", const VPIRFlags &Flags={})
VPInstruction * createNaryOp(unsigned Opcode, ArrayRef< VPValue * > Operands, Instruction *Inst=nullptr, const VPIRFlags &Flags={}, const VPIRMetadata &MD={}, DebugLoc DL=DebugLoc::getUnknown(), const Twine &Name="")
Create an N-ary operation with Opcode, Operands and set Inst as its underlying Instruction.
Canonical scalar induction phi of the vector loop.
Definition VPlan.h:3817
VPIRValue * getStartValue() const
Returns the start value of the canonical induction.
Definition VPlan.h:3839
unsigned getNumDefinedValues() const
Returns the number of values defined by the VPDef.
Definition VPlanValue.h:466
VPValue * getVPSingleValue()
Returns the only VPValue defined by the VPDef.
Definition VPlanValue.h:439
A pure virtual base class for all recipes modeling header phis, including phis for first order recurr...
Definition VPlan.h:2292
virtual VPValue * getBackedgeValue()
Returns the incoming value from the loop backedge.
Definition VPlan.h:2334
VPValue * getStartValue()
Returns the start value of the phi, if one is set.
Definition VPlan.h:2323
A recipe representing a sequence of load -> update -> store as part of a histogram operation.
Definition VPlan.h:2034
A special type of VPBasicBlock that wraps an existing IR basic block.
Definition VPlan.h:4408
LLVM_ABI_FOR_TEST FastMathFlags getFastMathFlags() const
This is a concrete Recipe that models a single VPlan-level instruction.
Definition VPlan.h:1211
unsigned getNumOperandsWithoutMask() const
Returns the number of operands, excluding the mask if the VPInstruction is masked.
Definition VPlan.h:1442
iterator_range< operand_iterator > operandsWithoutMask()
Returns an iterator range over the operands excluding the mask operand if present.
Definition VPlan.h:1462
@ ComputeAnyOfResult
Compute the final result of a AnyOf reduction with select(cmp(),x,y), where one of (x,...
Definition VPlan.h:1258
@ ResumeForEpilogue
Explicit user for the resume phi of the canonical induction in the main VPlan, used by the epilogue v...
Definition VPlan.h:1316
@ ReductionStartVector
Start vector for reductions with 3 operands: the original start value, the identity value for the red...
Definition VPlan.h:1307
unsigned getOpcode() const
Definition VPlan.h:1391
VPValue * getMask() const
Returns the mask for the VPInstruction.
Definition VPlan.h:1456
bool isMasked() const
Returns true if the VPInstruction has a mask operand.
Definition VPlan.h:1432
VPInterleaveRecipe is a recipe for transforming an interleave group of load or stores into one wide l...
Definition VPlan.h:2956
detail::zippy< llvm::detail::zip_first, VPUser::const_operand_range, const_incoming_blocks_range > incoming_values_and_blocks() const
Returns an iterator range over pairs of incoming values and corresponding incoming blocks.
Definition VPlan.h:1619
VPRecipeBase is a base class modeling a sequence of one or more output IR instructions.
Definition VPlan.h:406
DebugLoc getDebugLoc() const
Returns the debug location of the recipe.
Definition VPlan.h:555
void moveBefore(VPBasicBlock &BB, iplist< VPRecipeBase >::iterator I)
Unlink this recipe and insert into BB before I.
void insertBefore(VPRecipeBase *InsertPos)
Insert an unlinked recipe into a basic block immediately before the specified recipe.
iplist< VPRecipeBase >::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
Helper class to create VPRecipies from IR instructions.
VPRecipeBase * tryToCreateWidenNonPhiRecipe(VPSingleDefRecipe *R, VFRange &Range)
Create and return a widened recipe for a non-phi recipe R if one can be created within the given VF R...
VPValue * getVPValueOrAddLiveIn(Value *V)
VPReplicateRecipe * handleReplication(VPInstruction *VPI, VFRange &Range)
Build a VPReplicationRecipe for VPI.
bool isOrdered() const
Returns true, if the phi is part of an ordered reduction.
Definition VPlan.h:2747
unsigned getVFScaleFactor() const
Get the factor that the VF of this recipe's output should be scaled by, or 1 if it isn't scaled.
Definition VPlan.h:2726
bool isInLoop() const
Returns true if the phi is part of an in-loop reduction.
Definition VPlan.h:2750
RecurKind getRecurrenceKind() const
Returns the recurrence kind of the reduction.
Definition VPlan.h:2744
A recipe to represent inloop, ordered or partial reduction operations.
Definition VPlan.h:3049
VPRegionBlock represents a collection of VPBasicBlocks and VPRegionBlocks which form a Single-Entry-S...
Definition VPlan.h:4443
const VPBlockBase * getEntry() const
Definition VPlan.h:4479
VPCanonicalIVPHIRecipe * getCanonicalIV()
Returns the canonical induction recipe of the region.
Definition VPlan.h:4541
VPReplicateRecipe replicates a given instruction producing multiple scalar copies of the original sca...
Definition VPlan.h:3203
VPSingleDef is a base class for recipes for modeling a sequence of one or more output IR that define ...
Definition VPlan.h:607
Instruction * getUnderlyingInstr()
Returns the underlying instruction.
Definition VPlan.h:675
An analysis for type-inference for VPValues.
Type * inferScalarType(const VPValue *V)
Infer the type of V. Returns the scalar type of V.
This class augments VPValue with operands which provide the inverse def-use edges from VPValue's user...
Definition VPlanValue.h:297
void setOperand(unsigned I, VPValue *New)
Definition VPlanValue.h:341
operand_iterator op_begin()
Definition VPlanValue.h:361
VPValue * getOperand(unsigned N) const
Definition VPlanValue.h:336
This is the base class of the VPlan Def/Use graph, used for modeling the data flow into,...
Definition VPlanValue.h:46
Value * getLiveInIRValue() const
Return the underlying IR value for a VPIRValue.
Definition VPlan.cpp:137
VPRecipeBase * getDefiningRecipe()
Returns the recipe defining this VPValue or nullptr if it is not defined by a recipe,...
Definition VPlan.cpp:127
Value * getUnderlyingValue() const
Return the underlying Value attached to this VPValue.
Definition VPlanValue.h:71
void replaceAllUsesWith(VPValue *New)
Definition VPlan.cpp:1434
void replaceUsesWithIf(VPValue *New, llvm::function_ref< bool(VPUser &U, unsigned Idx)> ShouldReplace)
Go through the uses list for this VPValue and make each use point to New if the callback ShouldReplac...
Definition VPlan.cpp:1440
user_range users()
Definition VPlanValue.h:150
A recipe to compute a pointer to the last element of each part of a widened memory access for widened...
Definition VPlan.h:2140
VPWidenCastRecipe is a recipe to create vector cast instructions.
Definition VPlan.h:1826
A recipe for handling GEP instructions.
Definition VPlan.h:2076
A recipe for handling phi nodes of integer and floating-point inductions, producing their vector valu...
Definition VPlan.h:2440
A recipe for widened phis.
Definition VPlan.h:2576
VPWidenRecipe is a recipe for producing a widened instruction using the opcode and operands of the re...
Definition VPlan.h:1770
VPlan models a candidate for vectorization, encoding various decisions take to produce efficient outp...
Definition VPlan.h:4573
bool hasVF(ElementCount VF) const
Definition VPlan.h:4786
VPBasicBlock * getEntry()
Definition VPlan.h:4665
VPValue * getTripCount() const
The trip count of the original loop.
Definition VPlan.h:4723
iterator_range< SmallSetVector< ElementCount, 2 >::iterator > vectorFactors() const
Returns an iterator range over all VFs of the plan.
Definition VPlan.h:4793
VPSymbolicValue & getVFxUF()
Returns VF * UF of the vector loop region.
Definition VPlan.h:4762
bool hasUF(unsigned UF) const
Definition VPlan.h:4804
ArrayRef< VPIRBasicBlock * > getExitBlocks() const
Return an ArrayRef containing VPIRBasicBlocks wrapping the exit blocks of the original scalar loop.
Definition VPlan.h:4713
VPIRValue * getOrAddLiveIn(Value *V)
Gets the live-in VPIRValue for V or adds a new live-in (if none exists yet) for V.
Definition VPlan.h:4829
VPIRValue * getZero(Type *Ty)
Return a VPIRValue wrapping the null value of type Ty.
Definition VPlan.h:4855
LLVM_ABI_FOR_TEST VPRegionBlock * getVectorLoopRegion()
Returns the VPRegionBlock of the vector loop.
Definition VPlan.cpp:1064
bool hasEarlyExit() const
Returns true if the VPlan is based on a loop with an early exit.
Definition VPlan.h:4950
InstructionCost cost(ElementCount VF, VPCostContext &Ctx)
Return the cost of this plan.
Definition VPlan.cpp:1046
void resetTripCount(VPValue *NewTripCount)
Resets the trip count for the VPlan.
Definition VPlan.h:4737
VPBasicBlock * getMiddleBlock()
Returns the 'middle' block of the plan, that is the block that selects whether to execute the scalar ...
Definition VPlan.h:4690
VPBasicBlock * getScalarPreheader() const
Return the VPBasicBlock for the preheader of the scalar loop.
Definition VPlan.h:4704
void execute(VPTransformState *State)
Generate the IR code for this VPlan.
Definition VPlan.cpp:928
VPIRBasicBlock * getScalarHeader() const
Return the VPIRBasicBlock wrapping the header of the scalar loop.
Definition VPlan.h:4709
VPBasicBlock * getVectorPreheader()
Returns the preheader of the vector loop region, if one exists, or null otherwise.
Definition VPlan.h:4670
VPSymbolicValue & getVF()
Returns the VF of the vector loop region.
Definition VPlan.h:4755
LLVM_ABI_FOR_TEST VPlan * duplicate()
Clone the current VPlan, update all VPValues of the new VPlan and cloned recipes to refer to the clon...
Definition VPlan.cpp:1212
LLVM Value Representation.
Definition Value.h:75
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:256
LLVM_ABI bool hasOneUser() const
Return true if there is exactly one user of this value.
Definition Value.cpp:166
LLVM_ABI void setName(const Twine &Name)
Change the name of the value.
Definition Value.cpp:397
LLVM_ABI void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
Definition Value.cpp:553
iterator_range< user_iterator > users()
Definition Value.h:427
LLVM_ABI const Value * stripPointerCasts() const
Strip off pointer casts, all-zero GEPs and address space casts.
Definition Value.cpp:713
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
Definition Value.cpp:322
Base class of all SIMD vector types.
ElementCount getElementCount() const
Return an ElementCount instance to represent the (possibly scalable) number of elements in the vector...
static LLVM_ABI VectorType * get(Type *ElementType, ElementCount EC)
This static method is the primary way to construct an VectorType.
std::pair< iterator, bool > insert(const ValueT &V)
Definition DenseSet.h:202
bool contains(const_arg_type_t< ValueT > V) const
Check if the set contains the given element.
Definition DenseSet.h:175
constexpr ScalarTy getFixedValue() const
Definition TypeSize.h:200
static constexpr bool isKnownLE(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
Definition TypeSize.h:230
constexpr bool isNonZero() const
Definition TypeSize.h:155
static constexpr bool isKnownLT(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
Definition TypeSize.h:216
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
Definition TypeSize.h:168
constexpr LeafTy multiplyCoefficientBy(ScalarTy RHS) const
Definition TypeSize.h:256
constexpr bool isFixed() const
Returns true if the quantity is not scaled by vscale.
Definition TypeSize.h:171
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
Definition TypeSize.h:165
constexpr bool isZero() const
Definition TypeSize.h:153
static constexpr bool isKnownGT(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
Definition TypeSize.h:223
constexpr LeafTy divideCoefficientBy(ScalarTy RHS) const
We do not provide the '/' operator here because division for polynomial types does not work in the sa...
Definition TypeSize.h:252
static constexpr bool isKnownGE(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
Definition TypeSize.h:237
An efficient, type-erasing, non-owning reference to a callable.
const ParentTy * getParent() const
Definition ilist_node.h:34
self_iterator getIterator()
Definition ilist_node.h:123
IteratorT end() const
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition raw_ostream.h:53
A raw_ostream that writes to an std::string.
Changed
This provides a very simple, boring adaptor for a begin and end iterator into a range type.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
@ Entry
Definition COFF.h:862
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
@ Tail
Attemps to make calls as fast as possible while guaranteeing that tail call optimization can always b...
Definition CallingConv.h:76
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
@ BasicBlock
Various leaf nodes.
Definition ISDOpcodes.h:81
std::variant< std::monostate, Loc::Single, Loc::Multi, Loc::MMI, Loc::EntryValue > Variant
Alias for the std::variant specialization base class of DbgVariable.
Definition DwarfDebug.h:190
SpecificConstantMatch m_ZeroInt()
Convenience matchers for specific integer values.
OneUse_match< SubPat > m_OneUse(const SubPat &SP)
specific_intval< false > m_SpecificInt(const APInt &V)
Match a specific integer value or vector with all elements equal to the value.
bool match(Val *V, const Pattern &P)
bind_ty< Instruction > m_Instruction(Instruction *&I)
Match an instruction, capturing it if we match.
specificval_ty m_Specific(const Value *V)
Match if we have a specific specified value.
auto match_fn(const Pattern &P)
A match functor that can be used as a UnaryPredicate in functional algorithms like all_of.
cst_pred_ty< is_one > m_One()
Match an integer 1 or a vector with all elements equal to 1.
IntrinsicID_match m_Intrinsic()
Match intrinsic calls like this: m_Intrinsic<Intrinsic::fabs>(m_Value(X))
BinaryOp_match< LHS, RHS, Instruction::Mul > m_Mul(const LHS &L, const RHS &R)
auto m_LogicalOr()
Matches L || R where L and R are arbitrary values.
class_match< CmpInst > m_Cmp()
Matches any compare instruction and ignore it.
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
match_combine_or< CastInst_match< OpTy, ZExtInst >, CastInst_match< OpTy, SExtInst > > m_ZExtOrSExt(const OpTy &Op)
auto m_LogicalAnd()
Matches L && R where L and R are arbitrary values.
match_combine_or< LTy, RTy > m_CombineOr(const LTy &L, const RTy &R)
Combine two pattern matchers matching L || R.
class_match< const SCEVVScale > m_SCEVVScale()
bind_cst_ty m_scev_APInt(const APInt *&C)
Match an SCEV constant and bind it to an APInt.
specificloop_ty m_SpecificLoop(const Loop *L)
cst_pred_ty< is_specific_signed_cst > m_scev_SpecificSInt(int64_t V)
Match an SCEV constant with a plain signed integer (sign-extended value will be matched)
SCEVAffineAddRec_match< Op0_t, Op1_t, class_match< const Loop > > m_scev_AffineAddRec(const Op0_t &Op0, const Op1_t &Op1)
bind_ty< const SCEVMulExpr > m_scev_Mul(const SCEVMulExpr *&V)
bool match(const SCEV *S, const Pattern &P)
SCEVBinaryExpr_match< SCEVMulExpr, Op0_t, Op1_t, SCEV::FlagAnyWrap, true > m_scev_c_Mul(const Op0_t &Op0, const Op1_t &Op1)
class_match< const SCEV > m_SCEV()
AllRecipe_match< Instruction::Select, Op0_t, Op1_t, Op2_t > m_Select(const Op0_t &Op0, const Op1_t &Op1, const Op2_t &Op2)
int_pred_ty< is_zero_int > m_ZeroInt()
Match an integer 0 or a vector with all elements equal to 0.
bool matchFindIVResult(VPInstruction *VPI, Op0_t ReducedIV, Op1_t Start)
Match FindIV result pattern: select(icmp ne ComputeReductionResult(ReducedIV), Sentinel),...
match_combine_or< AllRecipe_match< Instruction::ZExt, Op0_t >, AllRecipe_match< Instruction::SExt, Op0_t > > m_ZExtOrSExt(const Op0_t &Op0)
VPInstruction_match< VPInstruction::ExtractLastLane, Op0_t > m_ExtractLastLane(const Op0_t &Op0)
VPInstruction_match< VPInstruction::BranchOnCount > m_BranchOnCount()
VPInstruction_match< VPInstruction::ExtractLastPart, Op0_t > m_ExtractLastPart(const Op0_t &Op0)
bool match(Val *V, const Pattern &P)
class_match< VPValue > m_VPValue()
Match an arbitrary VPValue and ignore it.
VPInstruction_match< VPInstruction::ExtractLane, Op0_t, Op1_t > m_ExtractLane(const Op0_t &Op0, const Op1_t &Op1)
ValuesClass values(OptsTy... Options)
Helper to build a ValuesClass by forwarding a variable number of arguments as an initializer list to ...
initializer< Ty > init(const Ty &Val)
Add a small namespace to avoid name clashes with the classes used in the streaming interface.
DiagnosticInfoOptimizationBase::Argument NV
NodeAddr< InstrNode * > Instr
Definition RDFGraph.h:389
NodeAddr< PhiNode * > Phi
Definition RDFGraph.h:390
friend class Instruction
Iterator for Instructions in a `BasicBlock.
Definition BasicBlock.h:73
bool isSingleScalar(const VPValue *VPV)
Returns true if VPV is a single scalar, either because it produces the same value for all lanes or on...
VPValue * getOrCreateVPValueForSCEVExpr(VPlan &Plan, const SCEV *Expr)
Get or create a VPValue that corresponds to the expansion of Expr.
VPBasicBlock * getFirstLoopHeader(VPlan &Plan, VPDominatorTree &VPDT)
Returns the header block of the first, top-level loop, or null if none exist.
bool isAddressSCEVForCost(const SCEV *Addr, ScalarEvolution &SE, const Loop *L)
Returns true if Addr is an address SCEV that can be passed to TTI::getAddressComputationCost,...
bool onlyFirstLaneUsed(const VPValue *Def)
Returns true if only the first lane of Def is used.
VPIRFlags getFlagsFromIndDesc(const InductionDescriptor &ID)
Extracts and returns NoWrap and FastMath flags from the induction binop in ID.
Definition VPlanUtils.h:94
VPRecipeBase * findRecipe(VPValue *Start, PredT Pred)
Search Start's users for a recipe satisfying Pred, looking through recipes with definitions.
Definition VPlanUtils.h:111
VPSingleDefRecipe * findHeaderMask(VPlan &Plan)
Collect the header mask with the pattern: (ICMP_ULE, WideCanonicalIV, backedge-taken-count) TODO: Int...
const SCEV * getSCEVExprForVPValue(const VPValue *V, PredicatedScalarEvolution &PSE, const Loop *L=nullptr)
Return the SCEV expression for V.
This is an optimization pass for GlobalISel generic memory operations.
Definition Types.h:26
LLVM_ABI bool simplifyLoop(Loop *L, DominatorTree *DT, LoopInfo *LI, ScalarEvolution *SE, AssumptionCache *AC, MemorySSAUpdater *MSSAU, bool PreserveLCSSA)
Simplify each loop in a loop nest recursively.
LLVM_ABI void ReplaceInstWithInst(BasicBlock *BB, BasicBlock::iterator &BI, Instruction *I)
Replace the instruction specified by BI with the instruction specified by I.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Definition STLExtras.h:316
@ Offset
Definition DWP.cpp:532
detail::zippy< detail::zip_shortest, T, U, Args... > zip(T &&t, U &&u, Args &&...args)
zip iterator for two or more iteratable types.
Definition STLExtras.h:831
FunctionAddr VTableAddr Value
Definition InstrProf.h:137
LLVM_ABI Value * addRuntimeChecks(Instruction *Loc, Loop *TheLoop, const SmallVectorImpl< RuntimePointerCheck > &PointerChecks, SCEVExpander &Expander, bool HoistRuntimeChecks=false)
Add code that checks at runtime if the accessed arrays in PointerChecks overlap.
auto cast_if_present(const Y &Val)
cast_if_present<X> - Functionally identical to cast, except that a null value is accepted.
Definition Casting.h:683
LLVM_ABI bool RemoveRedundantDbgInstrs(BasicBlock *BB)
Try to remove redundant dbg.value instructions from given basic block.
LLVM_ABI_FOR_TEST cl::opt< bool > VerifyEachVPlan
LLVM_ABI std::optional< unsigned > getLoopEstimatedTripCount(Loop *L, unsigned *EstimatedLoopInvocationWeight=nullptr)
Return either:
static void reportVectorization(OptimizationRemarkEmitter *ORE, Loop *TheLoop, VectorizationFactor VF, unsigned IC)
Report successful vectorization of the loop.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1739
unsigned getLoadStoreAddressSpace(const Value *I)
A helper function that returns the address space of the pointer operand of load or store instruction.
LLVM_ABI Intrinsic::ID getMinMaxReductionIntrinsicOp(Intrinsic::ID RdxID)
Returns the min/max intrinsic used when expanding a min/max reduction.
auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)
Get the size of a range.
Definition STLExtras.h:1669
LLVM_ABI Intrinsic::ID getVectorIntrinsicIDForCall(const CallInst *CI, const TargetLibraryInfo *TLI)
Returns intrinsic ID for call.
InstructionCost Cost
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
LLVM_ABI bool verifyFunction(const Function &F, raw_ostream *OS=nullptr)
Check a function for errors, useful for use when debugging a pass.
const Value * getLoadStorePointerOperand(const Value *V)
A helper function that returns the pointer operand of a load or store instruction.
OuterAnalysisManagerProxy< ModuleAnalysisManager, Function > ModuleAnalysisManagerFunctionProxy
Provide the ModuleAnalysisManager to Function proxy.
Value * getRuntimeVF(IRBuilderBase &B, Type *Ty, ElementCount VF)
Return the runtime value for VF.
LLVM_ABI bool formLCSSARecursively(Loop &L, const DominatorTree &DT, const LoopInfo *LI, ScalarEvolution *SE)
Put a loop nest into LCSSA form.
Definition LCSSA.cpp:449
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
Definition STLExtras.h:2208
LLVM_ABI bool shouldOptimizeForSize(const MachineFunction *MF, ProfileSummaryInfo *PSI, const MachineBlockFrequencyInfo *BFI, PGSOQueryType QueryType=PGSOQueryType::Other)
Returns true if machine function MF is suggested to be size-optimized based on the profile.
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
Definition STLExtras.h:634
auto pred_size(const MachineBasicBlock *BB)
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
Definition MathExtras.h:284
Align getLoadStoreAlignment(const Value *I)
A helper function that returns the alignment of load or store instruction.
iterator_range< df_iterator< VPBlockShallowTraversalWrapper< VPBlockBase * > > > vp_depth_first_shallow(VPBlockBase *G)
Returns an iterator range to traverse the graph starting at G in depth-first order.
Definition VPlanCFG.h:253
LLVM_ABI bool VerifySCEV
LLVM_ABI_FOR_TEST cl::opt< bool > VPlanPrintAfterAll
LLVM_ABI bool isSafeToSpeculativelyExecute(const Instruction *I, const Instruction *CtxI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr, const TargetLibraryInfo *TLI=nullptr, bool UseVariableInfo=true, bool IgnoreUBImplyingAttrs=true)
Return true if the instruction does not have any effects besides calculating the result and does not ...
bool isa_and_nonnull(const Y &Val)
Definition Casting.h:676
iterator_range< df_iterator< VPBlockDeepTraversalWrapper< VPBlockBase * > > > vp_depth_first_deep(VPBlockBase *G)
Returns an iterator range to traverse the graph starting at G in depth-first order while traversing t...
Definition VPlanCFG.h:280
SmallVector< VPRegisterUsage, 8 > calculateRegisterUsageForPlan(VPlan &Plan, ArrayRef< ElementCount > VFs, const TargetTransformInfo &TTI, const SmallPtrSetImpl< const Value * > &ValuesToIgnore)
Estimate the register usage for Plan and vectorization factors in VFs by calculating the highest numb...
unsigned Log2_64(uint64_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
Definition MathExtras.h:337
LLVM_ABI void setBranchWeights(Instruction &I, ArrayRef< uint32_t > Weights, bool IsExpected, bool ElideAllZero=false)
Create a new branch_weights metadata node and add or overwrite a prof metadata reference to instructi...
auto dyn_cast_or_null(const Y &Val)
Definition Casting.h:753
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1746
void collectEphemeralRecipesForVPlan(VPlan &Plan, DenseSet< VPRecipeBase * > &EphRecipes)
auto reverse(ContainerTy &&C)
Definition STLExtras.h:408
bool containsIrreducibleCFG(RPOTraversalT &RPOTraversal, const LoopInfoT &LI)
Return true if the control flow in RPOTraversal is irreducible.
Definition CFG.h:149
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
Definition MathExtras.h:279
void sort(IteratorTy Start, IteratorTy End)
Definition STLExtras.h:1636
LLVM_ABI_FOR_TEST cl::opt< bool > EnableWideActiveLaneMask
UncountableExitStyle
Different methods of handling early exits.
Definition VPlan.h:83
@ ReadOnly
No side effects to worry about, so we can process any uncountable exits in the loop and branch either...
Definition VPlan.h:88
@ MaskedHandleExitInScalarLoop
All memory operations other than the load(s) required to determine whether an uncountable exit occurr...
Definition VPlan.h:93
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:207
bool none_of(R &&Range, UnaryPredicate P)
Provide wrappers to std::none_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1753
LLVM_ABI cl::opt< bool > EnableLoopVectorization
LLVM_ABI_FOR_TEST cl::list< std::string > VPlanPrintAfterPasses
LLVM_ABI bool wouldInstructionBeTriviallyDead(const Instruction *I, const TargetLibraryInfo *TLI=nullptr)
Return true if the result produced by the instruction would have no side effects if it was not used.
Definition Local.cpp:422
FunctionAddr VTableAddr Count
Definition InstrProf.h:139
SmallVector< ValueTypeFromRangeType< R >, Size > to_vector(R &&Range)
Given a range of type R, iterate the entire range and return a SmallVector with elements of the vecto...
Type * toVectorizedTy(Type *Ty, ElementCount EC)
A helper for converting to vectorized types.
LLVM_ABI void llvm_unreachable_internal(const char *msg=nullptr, const char *file=nullptr, unsigned line=0)
This function calls abort(), and prints the optional message to stderr.
T * find_singleton(R &&Range, Predicate P, bool AllowRepeats=false)
Return the single value in Range that satisfies P(<member of Range> *, AllowRepeats)->T * returning n...
Definition STLExtras.h:1837
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
cl::opt< unsigned > ForceTargetInstructionCost
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
format_object< Ts... > format(const char *Fmt, const Ts &... Vals)
These are helper functions used to produce formatted output.
Definition Format.h:129
constexpr T divideCeil(U Numerator, V Denominator)
Returns the integer ceil(Numerator / Denominator).
Definition MathExtras.h:394
bool canVectorizeTy(Type *Ty)
Returns true if Ty is a valid vector element type, void, or an unpacked literal struct where all elem...
TargetTransformInfo TTI
static void reportVectorizationInfo(const StringRef Msg, const StringRef ORETag, OptimizationRemarkEmitter *ORE, Loop *TheLoop, Instruction *I=nullptr, DebugLoc DL={})
Reports an informative message: print Msg for debugging purposes as well as an optimization remark.
LLVM_ABI bool isAssignmentTrackingEnabled(const Module &M)
Return true if assignment tracking is enabled for module M.
RecurKind
These are the kinds of recurrences that we support.
@ Or
Bitwise or logical OR of integers.
@ FMulAdd
Sum of float products with llvm.fmuladd(a * b + sum).
@ Sub
Subtraction of integers.
@ Add
Sum of integers.
LLVM_ABI Value * getRecurrenceIdentity(RecurKind K, Type *Tp, FastMathFlags FMF)
Given information about an recurrence kind, return the identity for the @llvm.vector....
LLVM_ABI BasicBlock * SplitBlock(BasicBlock *Old, BasicBlock::iterator SplitPt, DominatorTree *DT, LoopInfo *LI=nullptr, MemorySSAUpdater *MSSAU=nullptr, const Twine &BBName="")
Split the specified block at the specified instruction.
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
Definition Alignment.h:144
LLVM_ABI void reportVectorizationFailure(const StringRef DebugMsg, const StringRef OREMsg, const StringRef ORETag, OptimizationRemarkEmitter *ORE, Loop *TheLoop, Instruction *I=nullptr)
Reports a vectorization failure: print DebugMsg for debugging purposes along with the corresponding o...
DWARFExpression::Operation Op
ScalarEpilogueLowering
@ CM_ScalarEpilogueNotAllowedLowTripLoop
@ CM_ScalarEpilogueNotNeededUsePredicate
@ CM_ScalarEpilogueNotAllowedOptSize
@ CM_ScalarEpilogueAllowed
@ CM_ScalarEpilogueNotAllowedUsePredicate
LLVM_ABI bool isGuaranteedNotToBeUndefOrPoison(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Return true if this function can prove that V does not have undef bits and is never poison.
ArrayRef(const T &OneElt) -> ArrayRef< T >
Value * createStepForVF(IRBuilderBase &B, Type *Ty, ElementCount VF, int64_t Step)
Return a value for Step multiplied by VF.
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1772
Value * emitTransformedIndex(IRBuilderBase &B, Value *Index, Value *StartValue, Value *Step, InductionDescriptor::InductionKind InductionKind, const BinaryOperator *InductionBinOp)
Compute the transformed value of Index at offset StartValue using step StepValue.
auto predecessors(const MachineBasicBlock *BB)
iterator_range< pointer_iterator< WrappedIteratorT > > make_pointer_range(RangeT &&Range)
Definition iterator.h:368
cl::opt< bool > EnableVPlanNativePath
Type * getLoadStoreType(const Value *I)
A helper function that returns the type of a load or store instruction.
ArrayRef< Type * > getContainedTypes(Type *const &Ty)
Returns the types contained in Ty.
LLVM_ABI Value * addDiffRuntimeChecks(Instruction *Loc, ArrayRef< PointerDiffInfo > Checks, SCEVExpander &Expander, function_ref< Value *(IRBuilderBase &, unsigned)> GetVF, unsigned IC)
bool pred_empty(const BasicBlock *BB)
Definition CFG.h:119
@ None
Don't use tail folding.
@ DataWithEVL
Use predicated EVL instructions for tail-folding.
@ DataAndControlFlow
Use predicate to control both data and control flow.
@ DataWithoutLaneMask
Same as Data, but avoids using the get.active.lane.mask intrinsic to calculate the mask and instead i...
@ Data
Use predicate only to mask operations on data in the loop.
AnalysisManager< Function > FunctionAnalysisManager
Convenience typedef for the Function analysis manager.
LLVM_ABI bool hasBranchWeightMD(const Instruction &I)
Checks if an instructions has Branch Weight Metadata.
hash_code hash_combine(const Ts &...args)
Combine values into a single hash_code.
Definition Hashing.h:592
T bit_floor(T Value)
Returns the largest integral power of two no greater than Value if Value is nonzero.
Definition bit.h:330
Type * toVectorTy(Type *Scalar, ElementCount EC)
A helper function for converting Scalar types to vector types.
std::unique_ptr< VPlan > VPlanPtr
Definition VPlan.h:78
constexpr detail::IsaCheckPredicate< Types... > IsaPred
Function object wrapper for the llvm::isa type check.
Definition Casting.h:866
LLVM_ABI_FOR_TEST bool verifyVPlanIsValid(const VPlan &Plan)
Verify invariants for general VPlans.
LLVM_ABI MapVector< Instruction *, uint64_t > computeMinimumValueSizes(ArrayRef< BasicBlock * > Blocks, DemandedBits &DB, const TargetTransformInfo *TTI=nullptr)
Compute a map of integer instructions to their minimum legal type size.
hash_code hash_combine_range(InputIteratorT first, InputIteratorT last)
Compute a hash_code for a sequence of values.
Definition Hashing.h:466
LLVM_ABI_FOR_TEST cl::opt< bool > VPlanPrintVectorRegionScope
LLVM_ABI cl::opt< bool > EnableLoopInterleaving
#define N
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition Alignment.h:39
A special type used by analysis passes to provide an address that identifies that particular analysis...
Definition Analysis.h:29
static LLVM_ABI void collectEphemeralValues(const Loop *L, AssumptionCache *AC, SmallPtrSetImpl< const Value * > &EphValues)
Collect a loop's ephemeral values (those used only by an assume or similar intrinsics in the loop).
An information struct used to provide DenseMap with the various necessary components for a given valu...
Encapsulate information regarding vectorization of a loop and its epilogue.
EpilogueLoopVectorizationInfo(ElementCount MVF, unsigned MUF, ElementCount EVF, unsigned EUF, VPlan &EpiloguePlan)
A class that represents two vectorization factors (initialized with 0 by default).
static FixedScalableVFPair getNone()
This holds details about a histogram operation – a load -> update -> store sequence where each lane i...
Incoming for lane mask phi as machine instruction, incoming register Reg and incoming block Block are...
TargetLibraryInfo * TLI
LLVM_ABI LoopVectorizeResult runImpl(Function &F)
LLVM_ABI bool processLoop(Loop *L)
ProfileSummaryInfo * PSI
LoopAccessInfoManager * LAIs
LLVM_ABI void printPipeline(raw_ostream &OS, function_ref< StringRef(StringRef)> MapClassName2PassName)
LLVM_ABI LoopVectorizePass(LoopVectorizeOptions Opts={})
ScalarEvolution * SE
AssumptionCache * AC
LLVM_ABI PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM)
OptimizationRemarkEmitter * ORE
std::function< BlockFrequencyInfo &()> GetBFI
TargetTransformInfo * TTI
Storage for information about made changes.
A CRTP mix-in to automatically provide informational APIs needed for passes.
Definition PassManager.h:70
A marker analysis to determine if extra passes should be run after loop vectorization.
static LLVM_ABI AnalysisKey Key
Holds the VFShape for a specific scalar to vector function mapping.
std::optional< unsigned > getParamIndexForOptionalMask() const
Instruction Set Architecture.
Encapsulates information needed to describe a parameter.
A range of powers-of-2 vectorization factors with fixed start and adjustable end.
ElementCount End
Struct to hold various analysis needed for cost computations.
unsigned getPredBlockCostDivisor(BasicBlock *BB) const
LoopVectorizationCostModel & CM
bool isLegacyUniformAfterVectorization(Instruction *I, ElementCount VF) const
Return true if I is considered uniform-after-vectorization in the legacy cost model for VF.
bool skipCostComputation(Instruction *UI, bool IsVector) const
Return true if the cost for UI shouldn't be computed, e.g.
InstructionCost getLegacyCost(Instruction *UI, ElementCount VF) const
Return the cost for UI with VF using the legacy cost model as fallback until computing the cost of al...
TargetTransformInfo::TargetCostKind CostKind
SmallPtrSet< Instruction *, 8 > SkipCostComputation
A struct that represents some properties of the register usage of a loop.
InstructionCost spillCost(VPCostContext &Ctx, unsigned OverrideMaxNumRegs=0) const
Calculate the estimated cost of any spills due to using more registers than the number available for ...
VPTransformState holds information passed down when "executing" a VPlan, needed for generating the ou...
A recipe for widening load operations, using the address to load from and an optional mask.
Definition VPlan.h:3605
A recipe for widening store operations, using the stored value, the address to store to and an option...
Definition VPlan.h:3688
static LLVM_ABI_FOR_TEST bool tryToConvertVPInstructionsToVPRecipes(VPlan &Plan, const TargetLibraryInfo &TLI)
Replaces the VPInstructions in Plan with corresponding widen recipes.
static void materializeBroadcasts(VPlan &Plan)
Add explicit broadcasts for live-ins and VPValues defined in Plan's entry block if they are used as v...
static void materializePacksAndUnpacks(VPlan &Plan)
Add explicit Build[Struct]Vector recipes to Pack multiple scalar values into vectors and Unpack recip...
static LLVM_ABI_FOR_TEST std::unique_ptr< VPlan > buildVPlan0(Loop *TheLoop, LoopInfo &LI, Type *InductionTy, DebugLoc IVDL, PredicatedScalarEvolution &PSE, LoopVersioning *LVer=nullptr)
Create a base VPlan0, serving as the common starting point for all later candidates.
static void introduceMasksAndLinearize(VPlan &Plan)
Predicate and linearize the control-flow in the only loop region of Plan.
static void materializeFactors(VPlan &Plan, VPBasicBlock *VectorPH, ElementCount VF)
Materialize UF, VF and VFxUF to be computed explicitly using VPInstructions.
static void createInLoopReductionRecipes(VPlan &Plan, const DenseSet< BasicBlock * > &BlocksNeedingPredication, ElementCount MinVF)
Create VPReductionRecipes for in-loop reductions.
static void foldTailByMasking(VPlan &Plan)
Adapts the vector loop region for tail folding by introducing a header mask and conditionally executi...
static void materializeBackedgeTakenCount(VPlan &Plan, VPBasicBlock *VectorPH)
Materialize the backedge-taken count to be computed explicitly using VPInstructions.
static void addActiveLaneMask(VPlan &Plan, bool UseActiveLaneMaskForControlFlow)
Replace (ICMP_ULE, wide canonical IV, backedge-taken-count) checks with an (active-lane-mask recipe,...
static bool handleMultiUseReductions(VPlan &Plan, OptimizationRemarkEmitter *ORE, Loop *TheLoop)
Try to legalize reductions with multiple in-loop uses.
static void dropPoisonGeneratingRecipes(VPlan &Plan, const std::function< bool(BasicBlock *)> &BlockNeedsPredication)
Drop poison flags from recipes that may generate a poison value that is used after vectorization,...
static void convertToVariableLengthStep(VPlan &Plan)
Transform loops with variable-length stepping after region dissolution.
static void createInterleaveGroups(VPlan &Plan, const SmallPtrSetImpl< const InterleaveGroup< Instruction > * > &InterleaveGroups, VPRecipeBuilder &RecipeBuilder, const bool &ScalarEpilogueAllowed)
static void addBranchWeightToMiddleTerminator(VPlan &Plan, ElementCount VF, std::optional< unsigned > VScaleForTuning)
Add branch weight metadata, if the Plan's middle block is terminated by a BranchOnCond recipe.
static std::unique_ptr< VPlan > narrowInterleaveGroups(VPlan &Plan, const TargetTransformInfo &TTI)
Try to find a single VF among Plan's VFs for which all interleave groups (with known minimum VF eleme...
static bool handleFindLastReductions(VPlan &Plan)
Check if Plan contains any FindLast reductions.
static void unrollByUF(VPlan &Plan, unsigned UF)
Explicitly unroll Plan by UF.
static DenseMap< const SCEV *, Value * > expandSCEVs(VPlan &Plan, ScalarEvolution &SE)
Expand VPExpandSCEVRecipes in Plan's entry block.
static void convertToConcreteRecipes(VPlan &Plan)
Lower abstract recipes to concrete ones, that can be codegen'd.
static LLVM_ABI_FOR_TEST void handleEarlyExits(VPlan &Plan, UncountableExitStyle Style)
Update Plan to account for all early exits.
static void expandBranchOnTwoConds(VPlan &Plan)
Expand BranchOnTwoConds instructions into explicit CFG with BranchOnCond instructions.
static void hoistPredicatedLoads(VPlan &Plan, PredicatedScalarEvolution &PSE, const Loop *L)
Hoist predicated loads from the same address to the loop entry block, if they are guaranteed to execu...
static void optimizeFindIVReductions(VPlan &Plan, PredicatedScalarEvolution &PSE, Loop &L)
Optimize FindLast reductions selecting IVs (or expressions of IVs) by converting them to FindIV reduc...
static void convertToAbstractRecipes(VPlan &Plan, VPCostContext &Ctx, VFRange &Range)
This function converts initial recipes to the abstract recipes and clamps Range based on cost model f...
static void materializeConstantVectorTripCount(VPlan &Plan, ElementCount BestVF, unsigned BestUF, PredicatedScalarEvolution &PSE)
static void addExitUsersForFirstOrderRecurrences(VPlan &Plan, VFRange &Range)
Handle users in the exit block for first order reductions in the original exit block.
static void createHeaderPhiRecipes(VPlan &Plan, PredicatedScalarEvolution &PSE, Loop &OrigLoop, const MapVector< PHINode *, InductionDescriptor > &Inductions, const MapVector< PHINode *, RecurrenceDescriptor > &Reductions, const SmallPtrSetImpl< const PHINode * > &FixedOrderRecurrences, const SmallPtrSetImpl< PHINode * > &InLoopReductions, bool AllowReordering)
Replace VPPhi recipes in Plan's header with corresponding VPHeaderPHIRecipe subclasses for inductions...
static void addExplicitVectorLength(VPlan &Plan, const std::optional< unsigned > &MaxEVLSafeElements)
Add a VPCurrentIterationPHIRecipe and related recipes to Plan and replaces all uses except the canoni...
static void optimizeEVLMasks(VPlan &Plan)
Optimize recipes which use an EVL-based header mask to VP intrinsics, for example:
static void replaceSymbolicStrides(VPlan &Plan, PredicatedScalarEvolution &PSE, const DenseMap< Value *, const SCEV * > &StridesMap)
Replace symbolic strides from StridesMap in Plan with constants when possible.
static bool handleMaxMinNumReductions(VPlan &Plan)
Check if Plan contains any FMaxNum or FMinNum reductions.
static void removeBranchOnConst(VPlan &Plan)
Remove BranchOnCond recipes with true or false conditions together with removing dead edges to their ...
static LLVM_ABI_FOR_TEST void createLoopRegions(VPlan &Plan)
Replace loops in Plan's flat CFG with VPRegionBlocks, turning Plan's flat CFG into a hierarchical CFG...
static void removeDeadRecipes(VPlan &Plan)
Remove dead recipes from Plan.
static void attachCheckBlock(VPlan &Plan, Value *Cond, BasicBlock *CheckBlock, bool AddBranchWeights)
Wrap runtime check block CheckBlock in a VPIRBB and Cond in a VPValue and connect the block to Plan,...
static void simplifyRecipes(VPlan &Plan)
Perform instcombine-like simplifications on recipes in Plan.
static void sinkPredicatedStores(VPlan &Plan, PredicatedScalarEvolution &PSE, const Loop *L)
Sink predicated stores to the same address with complementary predicates (P and NOT P) to an uncondit...
static void addMinimumIterationCheck(VPlan &Plan, ElementCount VF, unsigned UF, ElementCount MinProfitableTripCount, bool RequiresScalarEpilogue, bool TailFolded, Loop *OrigLoop, const uint32_t *MinItersBypassWeights, DebugLoc DL, PredicatedScalarEvolution &PSE)
static void replicateByVF(VPlan &Plan, ElementCount VF)
Replace each replicating VPReplicateRecipe and VPInstruction outside of any replicate region in Plan ...
static void clearReductionWrapFlags(VPlan &Plan)
Clear NSW/NUW flags from reduction instructions if necessary.
static void optimizeInductionLiveOutUsers(VPlan &Plan, PredicatedScalarEvolution &PSE, bool FoldTail)
If there's a single exit block, optimize its phi recipes that use exiting IV values by feeding them p...
static void createPartialReductions(VPlan &Plan, VPCostContext &CostCtx, VFRange &Range)
Detect and create partial reduction recipes for scaled reductions in Plan.
static void cse(VPlan &Plan)
Perform common-subexpression-elimination on Plan.
static void materializeVectorTripCount(VPlan &Plan, VPBasicBlock *VectorPHVPBB, bool TailByMasking, bool RequiresScalarEpilogue, VPValue *Step)
Materialize vector trip count computations to a set of VPInstructions.
static LLVM_ABI_FOR_TEST void optimize(VPlan &Plan)
Apply VPlan-to-VPlan optimizations to Plan, including induction recipe optimizations,...
static void dissolveLoopRegions(VPlan &Plan)
Replace loop regions with explicit CFG.
static void truncateToMinimalBitwidths(VPlan &Plan, const MapVector< Instruction *, uint64_t > &MinBWs)
Insert truncates and extends for any truncated recipe.
static bool adjustFixedOrderRecurrences(VPlan &Plan, VPBuilder &Builder)
Try to have all users of fixed-order recurrences appear after the recipe defining their previous valu...
static void optimizeForVFAndUF(VPlan &Plan, ElementCount BestVF, unsigned BestUF, PredicatedScalarEvolution &PSE)
Optimize Plan based on BestVF and BestUF.
static void addMinimumVectorEpilogueIterationCheck(VPlan &Plan, Value *TripCount, Value *VectorTripCount, bool RequiresScalarEpilogue, ElementCount EpilogueVF, unsigned EpilogueUF, unsigned MainLoopStep, unsigned EpilogueLoopStep, ScalarEvolution &SE)
Add a check to Plan to see if the epilogue vector loop should be executed.
static void convertEVLExitCond(VPlan &Plan)
Replaces the exit condition from (branch-on-cond eq CanonicalIVInc, VectorTripCount) to (branch-on-co...
static LLVM_ABI_FOR_TEST void addMiddleCheck(VPlan &Plan, bool TailFolded)
If a check is needed to guard executing the scalar epilogue loop, it will be added to the middle bloc...
TODO: The following VectorizationFactor was pulled out of LoopVectorizationCostModel class.
InstructionCost Cost
Cost of the loop with that width.
ElementCount MinProfitableTripCount
The minimum trip count required to make vectorization profitable, e.g.
ElementCount Width
Vector width with best cost.
InstructionCost ScalarCost
Cost of the scalar loop.
static VectorizationFactor Disabled()
Width 1 means no vectorization, cost 0 means uncomputed cost.
static LLVM_ABI bool HoistRuntimeChecks