LLVM 23.0.0git
LoopVectorize.cpp
Go to the documentation of this file.
1//===- LoopVectorize.cpp - A Loop Vectorizer ------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This is the LLVM loop vectorizer. This pass modifies 'vectorizable' loops
10// and generates target-independent LLVM-IR.
11// The vectorizer uses the TargetTransformInfo analysis to estimate the costs
12// of instructions in order to estimate the profitability of vectorization.
13//
14// The loop vectorizer combines consecutive loop iterations into a single
15// 'wide' iteration. After this transformation the index is incremented
16// by the SIMD vector width, and not by one.
17//
18// This pass has three parts:
19// 1. The main loop pass that drives the different parts.
20// 2. LoopVectorizationLegality - A unit that checks for the legality
21// of the vectorization.
22// 3. InnerLoopVectorizer - A unit that performs the actual
23// widening of instructions.
24// 4. LoopVectorizationCostModel - A unit that checks for the profitability
25// of vectorization. It decides on the optimal vector width, which
26// can be one, if vectorization is not profitable.
27//
28// There is a development effort going on to migrate loop vectorizer to the
29// VPlan infrastructure and to introduce outer loop vectorization support (see
30// docs/VectorizationPlan.rst and
31// http://lists.llvm.org/pipermail/llvm-dev/2017-December/119523.html). For this
32// purpose, we temporarily introduced the VPlan-native vectorization path: an
33// alternative vectorization path that is natively implemented on top of the
34// VPlan infrastructure. See EnableVPlanNativePath for enabling.
35//
36//===----------------------------------------------------------------------===//
37//
38// The reduction-variable vectorization is based on the paper:
39// D. Nuzman and R. Henderson. Multi-platform Auto-vectorization.
40//
41// Variable uniformity checks are inspired by:
42// Karrenberg, R. and Hack, S. Whole Function Vectorization.
43//
44// The interleaved access vectorization is based on the paper:
45// Dorit Nuzman, Ira Rosen and Ayal Zaks. Auto-Vectorization of Interleaved
46// Data for SIMD
47//
48// Other ideas/concepts are from:
49// A. Zaks and D. Nuzman. Autovectorization in GCC-two years later.
50//
51// S. Maleki, Y. Gao, M. Garzaran, T. Wong and D. Padua. An Evaluation of
52// Vectorizing Compilers.
53//
54//===----------------------------------------------------------------------===//
55
58#include "VPRecipeBuilder.h"
59#include "VPlan.h"
60#include "VPlanAnalysis.h"
61#include "VPlanCFG.h"
62#include "VPlanHelpers.h"
63#include "VPlanPatternMatch.h"
64#include "VPlanTransforms.h"
65#include "VPlanUtils.h"
66#include "VPlanVerifier.h"
67#include "llvm/ADT/APInt.h"
68#include "llvm/ADT/ArrayRef.h"
69#include "llvm/ADT/DenseMap.h"
71#include "llvm/ADT/Hashing.h"
72#include "llvm/ADT/MapVector.h"
73#include "llvm/ADT/STLExtras.h"
76#include "llvm/ADT/Statistic.h"
77#include "llvm/ADT/StringRef.h"
78#include "llvm/ADT/Twine.h"
79#include "llvm/ADT/TypeSwitch.h"
84#include "llvm/Analysis/CFG.h"
101#include "llvm/IR/Attributes.h"
102#include "llvm/IR/BasicBlock.h"
103#include "llvm/IR/CFG.h"
104#include "llvm/IR/Constant.h"
105#include "llvm/IR/Constants.h"
106#include "llvm/IR/DataLayout.h"
107#include "llvm/IR/DebugInfo.h"
108#include "llvm/IR/DebugLoc.h"
109#include "llvm/IR/DerivedTypes.h"
111#include "llvm/IR/Dominators.h"
112#include "llvm/IR/Function.h"
113#include "llvm/IR/IRBuilder.h"
114#include "llvm/IR/InstrTypes.h"
115#include "llvm/IR/Instruction.h"
116#include "llvm/IR/Instructions.h"
118#include "llvm/IR/Intrinsics.h"
119#include "llvm/IR/MDBuilder.h"
120#include "llvm/IR/Metadata.h"
121#include "llvm/IR/Module.h"
122#include "llvm/IR/Operator.h"
123#include "llvm/IR/PatternMatch.h"
125#include "llvm/IR/Type.h"
126#include "llvm/IR/Use.h"
127#include "llvm/IR/User.h"
128#include "llvm/IR/Value.h"
129#include "llvm/IR/Verifier.h"
130#include "llvm/Support/Casting.h"
132#include "llvm/Support/Debug.h"
147#include <algorithm>
148#include <cassert>
149#include <cmath>
150#include <cstdint>
151#include <functional>
152#include <iterator>
153#include <limits>
154#include <memory>
155#include <string>
156#include <tuple>
157#include <utility>
158
159using namespace llvm;
160using namespace SCEVPatternMatch;
161
162#define LV_NAME "loop-vectorize"
163#define DEBUG_TYPE LV_NAME
164
165#ifndef NDEBUG
166const char VerboseDebug[] = DEBUG_TYPE "-verbose";
167#endif
168
169STATISTIC(LoopsVectorized, "Number of loops vectorized");
170STATISTIC(LoopsAnalyzed, "Number of loops analyzed for vectorization");
171STATISTIC(LoopsEpilogueVectorized, "Number of epilogues vectorized");
172STATISTIC(LoopsEarlyExitVectorized, "Number of early exit loops vectorized");
173
175 "enable-epilogue-vectorization", cl::init(true), cl::Hidden,
176 cl::desc("Enable vectorization of epilogue loops."));
177
179 "epilogue-vectorization-force-VF", cl::init(1), cl::Hidden,
180 cl::desc("When epilogue vectorization is enabled, and a value greater than "
181 "1 is specified, forces the given VF for all applicable epilogue "
182 "loops."));
183
185 "epilogue-vectorization-minimum-VF", cl::Hidden,
186 cl::desc("Only loops with vectorization factor equal to or larger than "
187 "the specified value are considered for epilogue vectorization."));
188
189/// Loops with a known constant trip count below this number are vectorized only
190/// if no scalar iteration overheads are incurred.
192 "vectorizer-min-trip-count", cl::init(16), cl::Hidden,
193 cl::desc("Loops with a constant trip count that is smaller than this "
194 "value are vectorized only if no scalar iteration overheads "
195 "are incurred."));
196
198 "vectorize-memory-check-threshold", cl::init(128), cl::Hidden,
199 cl::desc("The maximum allowed number of runtime memory checks"));
200
201/// Option tail-folding-policy indicates that an epilogue is undesired, that
202/// tail folding is preferred, and this lists all options. I.e., the vectorizer
203/// will try to fold the tail-loop (epilogue) into the vector body and predicate
204/// the instructions accordingly. If tail-folding fails, there are different
205/// fallback strategies depending on these values:
207
209 "tail-folding-policy", cl::init(TailFoldingPolicyTy::None), cl::Hidden,
210 cl::desc("Tail-folding preferences over creating an epilogue loop."),
212 clEnumValN(TailFoldingPolicyTy::None, "dont-fold-tail",
213 "Don't tail-fold loops."),
215 "prefer tail-folding, otherwise create an epilogue when "
216 "appropriate."),
218 "always tail-fold, don't attempt vectorization if "
219 "tail-folding fails.")));
220
222 "force-tail-folding-style", cl::desc("Force the tail folding style"),
225 clEnumValN(TailFoldingStyle::None, "none", "Disable tail folding"),
228 "Create lane mask for data only, using active.lane.mask intrinsic"),
230 "data-without-lane-mask",
231 "Create lane mask with compare/stepvector"),
233 "Create lane mask using active.lane.mask intrinsic, and use "
234 "it for both data and control flow"),
236 "Use predicated EVL instructions for tail folding. If EVL "
237 "is unsupported, fallback to data-without-lane-mask.")));
238
240 "enable-wide-lane-mask", cl::init(false), cl::Hidden,
241 cl::desc("Enable use of wide lane masks when used for control flow in "
242 "tail-folded loops"));
243
245 "enable-interleaved-mem-accesses", cl::init(false), cl::Hidden,
246 cl::desc("Enable vectorization on interleaved memory accesses in a loop"));
247
248/// An interleave-group may need masking if it resides in a block that needs
249/// predication, or in order to mask away gaps.
251 "enable-masked-interleaved-mem-accesses", cl::init(false), cl::Hidden,
252 cl::desc("Enable vectorization on masked interleaved memory accesses in a loop"));
253
255 "force-target-num-scalar-regs", cl::init(0), cl::Hidden,
256 cl::desc("A flag that overrides the target's number of scalar registers."));
257
259 "force-target-num-vector-regs", cl::init(0), cl::Hidden,
260 cl::desc("A flag that overrides the target's number of vector registers."));
261
263 "force-target-max-scalar-interleave", cl::init(0), cl::Hidden,
264 cl::desc("A flag that overrides the target's max interleave factor for "
265 "scalar loops."));
266
268 "force-target-max-vector-interleave", cl::init(0), cl::Hidden,
269 cl::desc("A flag that overrides the target's max interleave factor for "
270 "vectorized loops."));
271
273 "force-target-instruction-cost", cl::init(0), cl::Hidden,
274 cl::desc("A flag that overrides the target's expected cost for "
275 "an instruction to a single constant value. Mostly "
276 "useful for getting consistent testing."));
277
279 "small-loop-cost", cl::init(20), cl::Hidden,
280 cl::desc(
281 "The cost of a loop that is considered 'small' by the interleaver."));
282
284 "loop-vectorize-with-block-frequency", cl::init(true), cl::Hidden,
285 cl::desc("Enable the use of the block frequency analysis to access PGO "
286 "heuristics minimizing code growth in cold regions and being more "
287 "aggressive in hot regions."));
288
289// Runtime interleave loops for load/store throughput.
291 "enable-loadstore-runtime-interleave", cl::init(true), cl::Hidden,
292 cl::desc(
293 "Enable runtime interleaving until load/store ports are saturated"));
294
295/// The number of stores in a loop that are allowed to need predication.
297 "vectorize-num-stores-pred", cl::init(1), cl::Hidden,
298 cl::desc("Max number of stores to be predicated behind an if."));
299
301 "enable-ind-var-reg-heur", cl::init(true), cl::Hidden,
302 cl::desc("Count the induction variable only once when interleaving"));
303
305 "max-nested-scalar-reduction-interleave", cl::init(2), cl::Hidden,
306 cl::desc("The maximum interleave count to use when interleaving a scalar "
307 "reduction in a nested loop."));
308
310 "force-ordered-reductions", cl::init(false), cl::Hidden,
311 cl::desc("Enable the vectorisation of loops with in-order (strict) "
312 "FP reductions"));
313
315 "prefer-predicated-reduction-select", cl::init(false), cl::Hidden,
316 cl::desc(
317 "Prefer predicating a reduction operation over an after loop select."));
318
320 "enable-vplan-native-path", cl::Hidden,
321 cl::desc("Enable VPlan-native vectorization path with "
322 "support for outer loop vectorization."));
323
325 llvm::VerifyEachVPlan("vplan-verify-each",
326#ifdef EXPENSIVE_CHECKS
327 cl::init(true),
328#else
329 cl::init(false),
330#endif
332 cl::desc("Verify VPlans after VPlan transforms."));
333
334#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
336 "vplan-print-after-all", cl::init(false), cl::Hidden,
337 cl::desc("Print VPlans after all VPlan transformations."));
338
340 "vplan-print-after", cl::Hidden,
341 cl::desc("Print VPlans after specified VPlan transformations (regexp)."));
342
344 "vplan-print-vector-region-scope", cl::init(false), cl::Hidden,
345 cl::desc("Limit VPlan printing to vector loop region in "
346 "`-vplan-print-after*` if the plan has one."));
347#endif
348
349// This flag enables the stress testing of the VPlan H-CFG construction in the
350// VPlan-native vectorization path. It must be used in conjuction with
351// -enable-vplan-native-path. -vplan-verify-hcfg can also be used to enable the
352// verification of the H-CFGs built.
354 "vplan-build-stress-test", cl::init(false), cl::Hidden,
355 cl::desc(
356 "Build VPlan for every supported loop nest in the function and bail "
357 "out right after the build (stress test the VPlan H-CFG construction "
358 "in the VPlan-native vectorization path)."));
359
361 "interleave-loops", cl::init(true), cl::Hidden,
362 cl::desc("Enable loop interleaving in Loop vectorization passes"));
364 "vectorize-loops", cl::init(true), cl::Hidden,
365 cl::desc("Run the Loop vectorization passes"));
366
368 "force-widen-divrem-via-safe-divisor", cl::Hidden,
369 cl::desc(
370 "Override cost based safe divisor widening for div/rem instructions"));
371
373 "enable-early-exit-vectorization", cl::init(true), cl::Hidden,
374 cl::desc(
375 "Enable vectorization of early exit loops with uncountable exits."));
376
377// Likelyhood of bypassing the vectorized loop because there are zero trips left
378// after prolog. See `emitIterationCountCheck`.
379static constexpr uint32_t MinItersBypassWeights[] = {1, 127};
380
381/// A helper function that returns true if the given type is irregular. The
382/// type is irregular if its allocated size doesn't equal the store size of an
383/// element of the corresponding vector type.
384static bool hasIrregularType(Type *Ty, const DataLayout &DL) {
385 // Determine if an array of N elements of type Ty is "bitcast compatible"
386 // with a <N x Ty> vector.
387 // This is only true if there is no padding between the array elements.
388 return DL.getTypeAllocSizeInBits(Ty) != DL.getTypeSizeInBits(Ty);
389}
390
391/// A version of ScalarEvolution::getSmallConstantTripCount that returns an
392/// ElementCount to include loops whose trip count is a function of vscale.
394 const Loop *L) {
395 if (unsigned ExpectedTC = SE->getSmallConstantTripCount(L))
396 return ElementCount::getFixed(ExpectedTC);
397
398 const SCEV *BTC = SE->getBackedgeTakenCount(L);
400 return ElementCount::getFixed(0);
401
402 const SCEV *ExitCount = SE->getTripCountFromExitCount(BTC, BTC->getType(), L);
403 if (isa<SCEVVScale>(ExitCount))
405
406 const APInt *Scale;
407 if (match(ExitCount, m_scev_Mul(m_scev_APInt(Scale), m_SCEVVScale())))
408 if (cast<SCEVMulExpr>(ExitCount)->hasNoUnsignedWrap())
409 if (Scale->getActiveBits() <= 32)
411
412 return ElementCount::getFixed(0);
413}
414
415/// Get the maximum trip count for \p L from the SCEV unsigned range, excluding
416/// zero from the range. Only valid when not folding the tail, as the minimum
417/// iteration count check guards against a zero trip count. Returns 0 if
418/// unknown.
420 Loop *L) {
421 const SCEV *BTC = PSE.getBackedgeTakenCount();
423 return 0;
424 ScalarEvolution *SE = PSE.getSE();
425 const SCEV *TripCount = SE->getTripCountFromExitCount(BTC, BTC->getType(), L);
426 ConstantRange TCRange = SE->getUnsignedRange(TripCount);
427 APInt MaxTCFromRange = TCRange.getUnsignedMax();
428 if (!MaxTCFromRange.isZero() && MaxTCFromRange.getActiveBits() <= 32)
429 return MaxTCFromRange.getZExtValue();
430 return 0;
431}
432
433/// Returns "best known" trip count, which is either a valid positive trip count
434/// or std::nullopt when an estimate cannot be made (including when the trip
435/// count would overflow), for the specified loop \p L as defined by the
436/// following procedure:
437/// 1) Returns exact trip count if it is known.
438/// 2) Returns expected trip count according to profile data if any.
439/// 3) Returns upper bound estimate if known, and if \p CanUseConstantMax.
440/// 4) Returns the maximum trip count from the SCEV range excluding zero,
441/// if \p CanUseConstantMax and \p CanExcludeZeroTrips.
442/// 5) Returns std::nullopt if all of the above failed.
443static std::optional<ElementCount>
445 bool CanUseConstantMax = true,
446 bool CanExcludeZeroTrips = false) {
447 // Check if exact trip count is known.
448 if (auto ExpectedTC = getSmallConstantTripCount(PSE.getSE(), L))
449 return ExpectedTC;
450
451 // Check if there is an expected trip count available from profile data.
453 if (auto EstimatedTC = getLoopEstimatedTripCount(L))
454 return ElementCount::getFixed(*EstimatedTC);
455
456 if (!CanUseConstantMax)
457 return std::nullopt;
458
459 // Check if upper bound estimate is known.
460 if (unsigned ExpectedTC = PSE.getSmallConstantMaxTripCount())
461 return ElementCount::getFixed(ExpectedTC);
462
463 // Get the maximum trip count from the SCEV range excluding zero. This is
464 // only safe when not folding the tail, as the minimum iteration count check
465 // prevents entering the vector loop with a zero trip count.
466 if (CanUseConstantMax && CanExcludeZeroTrips)
467 if (unsigned RefinedTC = getMaxTCFromNonZeroRange(PSE, L))
468 return ElementCount::getFixed(RefinedTC);
469
470 return std::nullopt;
471}
472
473namespace {
474// Forward declare GeneratedRTChecks.
475class GeneratedRTChecks;
476
477using SCEV2ValueTy = DenseMap<const SCEV *, Value *>;
478} // namespace
479
480namespace llvm {
481
483
484/// InnerLoopVectorizer vectorizes loops which contain only one basic
485/// block to a specified vectorization factor (VF).
486/// This class performs the widening of scalars into vectors, or multiple
487/// scalars. This class also implements the following features:
488/// * It inserts an epilogue loop for handling loops that don't have iteration
489/// counts that are known to be a multiple of the vectorization factor.
490/// * It handles the code generation for reduction variables.
491/// * Scalarization (implementation using scalars) of un-vectorizable
492/// instructions.
493/// InnerLoopVectorizer does not perform any vectorization-legality
494/// checks, and relies on the caller to check for the different legality
495/// aspects. The InnerLoopVectorizer relies on the
496/// LoopVectorizationLegality class to provide information about the induction
497/// and reduction variables that were found to a given vectorization factor.
499public:
503 ElementCount VecWidth, unsigned UnrollFactor,
505 GeneratedRTChecks &RTChecks, VPlan &Plan)
506 : OrigLoop(OrigLoop), PSE(PSE), LI(LI), DT(DT), TTI(TTI), AC(AC),
507 VF(VecWidth), UF(UnrollFactor), Builder(PSE.getSE()->getContext()),
510 Plan.getVectorLoopRegion()->getSinglePredecessor())) {}
511
512 virtual ~InnerLoopVectorizer() = default;
513
514 /// Creates a basic block for the scalar preheader. Both
515 /// EpilogueVectorizerMainLoop and EpilogueVectorizerEpilogueLoop overwrite
516 /// the method to create additional blocks and checks needed for epilogue
517 /// vectorization.
519
520 /// Fix the vectorized code, taking care of header phi's, and more.
522
523 /// Fix the non-induction PHIs in \p Plan.
525
526protected:
528
529 /// Create and return a new IR basic block for the scalar preheader whose name
530 /// is prefixed with \p Prefix.
532
533 /// Allow subclasses to override and print debug traces before/after vplan
534 /// execution, when trace information is requested.
535 virtual void printDebugTracesAtStart() {}
536 virtual void printDebugTracesAtEnd() {}
537
538 /// The original loop.
540
541 /// A wrapper around ScalarEvolution used to add runtime SCEV checks. Applies
542 /// dynamic knowledge to simplify SCEV expressions and converts them to a
543 /// more usable form.
545
546 /// Loop Info.
548
549 /// Dominator Tree.
551
552 /// Target Transform Info.
554
555 /// Assumption Cache.
557
558 /// The vectorization SIMD factor to use. Each vector will have this many
559 /// vector elements.
561
562 /// The vectorization unroll factor to use. Each scalar is vectorized to this
563 /// many different vector instructions.
564 unsigned UF;
565
566 /// The builder that we use
568
569 // --- Vectorization state ---
570
571 /// The profitablity analysis.
573
574 /// Structure to hold information about generated runtime checks, responsible
575 /// for cleaning the checks, if vectorization turns out unprofitable.
576 GeneratedRTChecks &RTChecks;
577
579
580 /// The vector preheader block of \p Plan, used as target for check blocks
581 /// introduced during skeleton creation.
583};
584
585/// Encapsulate information regarding vectorization of a loop and its epilogue.
586/// This information is meant to be updated and used across two stages of
587/// epilogue vectorization.
590 unsigned MainLoopUF = 0;
592 unsigned EpilogueUF = 0;
597
599 ElementCount EVF, unsigned EUF,
601 : MainLoopVF(MVF), MainLoopUF(MUF), EpilogueVF(EVF), EpilogueUF(EUF),
603 assert(EUF == 1 &&
604 "A high UF for the epilogue loop is likely not beneficial.");
605 }
606};
607
608/// An extension of the inner loop vectorizer that creates a skeleton for a
609/// vectorized loop that has its epilogue (residual) also vectorized.
610/// The idea is to run the vplan on a given loop twice, firstly to setup the
611/// skeleton and vectorize the main loop, and secondly to complete the skeleton
612/// from the first step and vectorize the epilogue. This is achieved by
613/// deriving two concrete strategy classes from this base class and invoking
614/// them in succession from the loop vectorizer planner.
616public:
626
627 /// Holds and updates state information required to vectorize the main loop
628 /// and its epilogue in two separate passes. This setup helps us avoid
629 /// regenerating and recomputing runtime safety checks. It also helps us to
630 /// shorten the iteration-count-check path length for the cases where the
631 /// iteration count of the loop is so small that the main vector loop is
632 /// completely skipped.
634
635protected:
637};
638
639/// A specialized derived class of inner loop vectorizer that performs
640/// vectorization of *main* loops in the process of vectorizing loops and their
641/// epilogues.
643public:
654
655protected:
656 void printDebugTracesAtStart() override;
657 void printDebugTracesAtEnd() override;
658};
659
660// A specialized derived class of inner loop vectorizer that performs
661// vectorization of *epilogue* loops in the process of vectorizing loops and
662// their epilogues.
664public:
671 GeneratedRTChecks &Checks, VPlan &Plan)
673 Checks, Plan, EPI.EpilogueVF,
674 EPI.EpilogueVF, EPI.EpilogueUF) {}
675 /// Implements the interface for creating a vectorized skeleton using the
676 /// *epilogue loop* strategy (i.e., the second pass of VPlan execution).
678
679protected:
680 void printDebugTracesAtStart() override;
681 void printDebugTracesAtEnd() override;
682};
683} // end namespace llvm
684
685/// Look for a meaningful debug location on the instruction or its operands.
687 if (!I)
688 return DebugLoc::getUnknown();
689
691 if (I->getDebugLoc() != Empty)
692 return I->getDebugLoc();
693
694 for (Use &Op : I->operands()) {
695 if (Instruction *OpInst = dyn_cast<Instruction>(Op))
696 if (OpInst->getDebugLoc() != Empty)
697 return OpInst->getDebugLoc();
698 }
699
700 return I->getDebugLoc();
701}
702
703/// Write a \p DebugMsg about vectorization to the debug output stream. If \p I
704/// is passed, the message relates to that particular instruction.
705#ifndef NDEBUG
706static void debugVectorizationMessage(const StringRef Prefix,
707 const StringRef DebugMsg,
708 Instruction *I) {
709 dbgs() << "LV: " << Prefix << DebugMsg;
710 if (I != nullptr)
711 dbgs() << " " << *I;
712 else
713 dbgs() << '.';
714 dbgs() << '\n';
715}
716#endif
717
718/// Create an analysis remark that explains why vectorization failed
719///
720/// \p PassName is the name of the pass (e.g. can be AlwaysPrint). \p
721/// RemarkName is the identifier for the remark. If \p I is passed it is an
722/// instruction that prevents vectorization. Otherwise \p TheLoop is used for
723/// the location of the remark. If \p DL is passed, use it as debug location for
724/// the remark. \return the remark object that can be streamed to.
725static OptimizationRemarkAnalysis
726createLVAnalysis(const char *PassName, StringRef RemarkName,
727 const Loop *TheLoop, Instruction *I, DebugLoc DL = {}) {
728 BasicBlock *CodeRegion = I ? I->getParent() : TheLoop->getHeader();
729 // If debug location is attached to the instruction, use it. Otherwise if DL
730 // was not provided, use the loop's.
731 if (I && I->getDebugLoc())
732 DL = I->getDebugLoc();
733 else if (!DL)
734 DL = TheLoop->getStartLoc();
735
736 return OptimizationRemarkAnalysis(PassName, RemarkName, DL, CodeRegion);
737}
738
739namespace llvm {
740
741/// Return the runtime value for VF.
743 return B.CreateElementCount(Ty, VF);
744}
745
747 const StringRef OREMsg, const StringRef ORETag,
748 OptimizationRemarkEmitter *ORE, Loop *TheLoop,
749 Instruction *I) {
750 LLVM_DEBUG(debugVectorizationMessage("Not vectorizing: ", DebugMsg, I));
751 LoopVectorizeHints Hints(TheLoop, false /* doesn't matter */, *ORE);
752 ORE->emit(
753 createLVAnalysis(Hints.vectorizeAnalysisPassName(), ORETag, TheLoop, I)
754 << "loop not vectorized: " << OREMsg);
755}
756
757void reportVectorizationInfo(const StringRef Msg, const StringRef ORETag,
759 const Loop *TheLoop, Instruction *I, DebugLoc DL) {
761 LoopVectorizeHints Hints(TheLoop, false /* doesn't matter */, *ORE);
762 ORE->emit(createLVAnalysis(Hints.vectorizeAnalysisPassName(), ORETag, TheLoop,
763 I, DL)
764 << Msg);
765}
766
767/// Report successful vectorization of the loop. In case an outer loop is
768/// vectorized, prepend "outer" to the vectorization remark.
770 VectorizationFactor VF, unsigned IC) {
772 "Vectorizing: ", TheLoop->isInnermost() ? "innermost loop" : "outer loop",
773 nullptr));
774 StringRef LoopType = TheLoop->isInnermost() ? "" : "outer ";
775 ORE->emit([&]() {
776 return OptimizationRemark(LV_NAME, "Vectorized", TheLoop->getStartLoc(),
777 TheLoop->getHeader())
778 << "vectorized " << LoopType << "loop (vectorization width: "
779 << ore::NV("VectorizationFactor", VF.Width)
780 << ", interleaved count: " << ore::NV("InterleaveCount", IC) << ")";
781 });
782}
783
784} // end namespace llvm
785
786namespace llvm {
787
788// Loop vectorization cost-model hints how the epilogue/tail loop should be
789// lowered.
791
792 // The default: allowing epilogues.
794
795 // Vectorization with OptForSize: don't allow epilogues.
797
798 // A special case of vectorisation with OptForSize: loops with a very small
799 // trip count are considered for vectorization under OptForSize, thereby
800 // making sure the cost of their loop body is dominant, free of runtime
801 // guards and scalar iteration overheads.
803
804 // Loop hint indicating an epilogue is undesired, apply tail folding.
806
807 // Directive indicating we must either fold the epilogue/tail or not vectorize
809};
810
811/// LoopVectorizationCostModel - estimates the expected speedups due to
812/// vectorization.
813/// In many cases vectorization is not profitable. This can happen because of
814/// a number of reasons. In this class we mainly attempt to predict the
815/// expected speedup/slowdowns due to the supported instruction set. We use the
816/// TargetTransformInfo to query the different backends for the cost of
817/// different operations.
820
821public:
835
836 /// \return An upper bound for the vectorization factors (both fixed and
837 /// scalable). If the factors are 0, vectorization and interleaving should be
838 /// avoided up front.
839 FixedScalableVFPair computeMaxVF(ElementCount UserVF, unsigned UserIC);
840
841 /// Memory access instruction may be vectorized in more than one way.
842 /// Form of instruction after vectorization depends on cost.
843 /// This function takes cost-based decisions for Load/Store instructions
844 /// and collects them in a map. This decisions map is used for building
845 /// the lists of loop-uniform and loop-scalar instructions.
846 /// The calculated cost is saved with widening decision in order to
847 /// avoid redundant calculations.
848 void setCostBasedWideningDecision(ElementCount VF);
849
850 /// A call may be vectorized in different ways depending on whether we have
851 /// vectorized variants available and whether the target supports masking.
852 /// This function analyzes all calls in the function at the supplied VF,
853 /// makes a decision based on the costs of available options, and stores that
854 /// decision in a map for use in planning and plan execution.
855 void setVectorizedCallDecision(ElementCount VF);
856
857 /// Collect values we want to ignore in the cost model.
858 void collectValuesToIgnore();
859
860 /// \returns True if it is more profitable to scalarize instruction \p I for
861 /// vectorization factor \p VF.
863 assert(VF.isVector() &&
864 "Profitable to scalarize relevant only for VF > 1.");
865 assert(
866 TheLoop->isInnermost() &&
867 "cost-model should not be used for outer loops (in VPlan-native path)");
868
869 auto Scalars = InstsToScalarize.find(VF);
870 assert(Scalars != InstsToScalarize.end() &&
871 "VF not yet analyzed for scalarization profitability");
872 return Scalars->second.contains(I);
873 }
874
875 /// Returns true if \p I is known to be uniform after vectorization.
877 assert(
878 TheLoop->isInnermost() &&
879 "cost-model should not be used for outer loops (in VPlan-native path)");
880
881 // If VF is scalar, then all instructions are trivially uniform.
882 if (VF.isScalar())
883 return true;
884
885 // Pseudo probes must be duplicated per vector lane so that the
886 // profiled loop trip count is not undercounted.
888 return false;
889
890 auto UniformsPerVF = Uniforms.find(VF);
891 assert(UniformsPerVF != Uniforms.end() &&
892 "VF not yet analyzed for uniformity");
893 return UniformsPerVF->second.count(I);
894 }
895
896 /// Returns true if \p I is known to be scalar after vectorization.
898 assert(
899 TheLoop->isInnermost() &&
900 "cost-model should not be used for outer loops (in VPlan-native path)");
901 if (VF.isScalar())
902 return true;
903
904 auto ScalarsPerVF = Scalars.find(VF);
905 assert(ScalarsPerVF != Scalars.end() &&
906 "Scalar values are not calculated for VF");
907 return ScalarsPerVF->second.count(I);
908 }
909
910 /// \returns True if instruction \p I can be truncated to a smaller bitwidth
911 /// for vectorization factor \p VF.
913 const auto &MinBWs = Config.getMinimalBitwidths();
914 // Truncs must truncate at most to their destination type.
915 if (isa_and_nonnull<TruncInst>(I) && MinBWs.contains(I) &&
916 I->getType()->getScalarSizeInBits() < MinBWs.lookup(I))
917 return false;
918 return VF.isVector() && MinBWs.contains(I) &&
921 }
922
923 /// Decision that was taken during cost calculation for memory instruction.
926 CM_Widen, // For consecutive accesses with stride +1.
927 CM_Widen_Reverse, // For consecutive accesses with stride -1.
933 };
934
935 /// Save vectorization decision \p W and \p Cost taken by the cost model for
936 /// instruction \p I and vector width \p VF.
939 assert(VF.isVector() && "Expected VF >=2");
940 WideningDecisions[{I, VF}] = {W, Cost};
941 }
942
943 /// Save vectorization decision \p W and \p Cost taken by the cost model for
944 /// interleaving group \p Grp and vector width \p VF.
948 assert(VF.isVector() && "Expected VF >=2");
949 /// Broadcast this decicion to all instructions inside the group.
950 /// When interleaving, the cost will only be assigned one instruction, the
951 /// insert position. For other cases, add the appropriate fraction of the
952 /// total cost to each instruction. This ensures accurate costs are used,
953 /// even if the insert position instruction is not used.
954 InstructionCost InsertPosCost = Cost;
955 InstructionCost OtherMemberCost = 0;
956 if (W != CM_Interleave)
957 OtherMemberCost = InsertPosCost = Cost / Grp->getNumMembers();
958 ;
959 for (auto *I : Grp->members()) {
960 if (Grp->getInsertPos() == I)
961 WideningDecisions[{I, VF}] = {W, InsertPosCost};
962 else
963 WideningDecisions[{I, VF}] = {W, OtherMemberCost};
964 }
965 }
966
967 /// Return the cost model decision for the given instruction \p I and vector
968 /// width \p VF. Return CM_Unknown if this instruction did not pass
969 /// through the cost modeling.
971 assert(VF.isVector() && "Expected VF to be a vector VF");
972 assert(
973 TheLoop->isInnermost() &&
974 "cost-model should not be used for outer loops (in VPlan-native path)");
975
976 std::pair<Instruction *, ElementCount> InstOnVF(I, VF);
977 auto Itr = WideningDecisions.find(InstOnVF);
978 if (Itr == WideningDecisions.end())
979 return CM_Unknown;
980 return Itr->second.first;
981 }
982
983 /// Return the vectorization cost for the given instruction \p I and vector
984 /// width \p VF.
986 assert(VF.isVector() && "Expected VF >=2");
987 std::pair<Instruction *, ElementCount> InstOnVF(I, VF);
988 assert(WideningDecisions.contains(InstOnVF) &&
989 "The cost is not calculated");
990 return WideningDecisions[InstOnVF].second;
991 }
992
1000
1002 Function *Variant, Intrinsic::ID IID,
1003 std::optional<unsigned> MaskPos,
1005 assert(!VF.isScalar() && "Expected vector VF");
1006 CallWideningDecisions[{CI, VF}] = {Kind, Variant, IID, MaskPos, Cost};
1007 }
1008
1010 ElementCount VF) const {
1011 assert(!VF.isScalar() && "Expected vector VF");
1012 auto I = CallWideningDecisions.find({CI, VF});
1013 if (I == CallWideningDecisions.end())
1014 return {CM_Unknown, nullptr, Intrinsic::not_intrinsic, std::nullopt, 0};
1015 return I->second;
1016 }
1017
1018 /// Return True if instruction \p I is an optimizable truncate whose operand
1019 /// is an induction variable. Such a truncate will be removed by adding a new
1020 /// induction variable with the destination type.
1022 // If the instruction is not a truncate, return false.
1023 auto *Trunc = dyn_cast<TruncInst>(I);
1024 if (!Trunc)
1025 return false;
1026
1027 // Get the source and destination types of the truncate.
1028 Type *SrcTy = toVectorTy(Trunc->getSrcTy(), VF);
1029 Type *DestTy = toVectorTy(Trunc->getDestTy(), VF);
1030
1031 // If the truncate is free for the given types, return false. Replacing a
1032 // free truncate with an induction variable would add an induction variable
1033 // update instruction to each iteration of the loop. We exclude from this
1034 // check the primary induction variable since it will need an update
1035 // instruction regardless.
1036 Value *Op = Trunc->getOperand(0);
1037 if (Op != Legal->getPrimaryInduction() && TTI.isTruncateFree(SrcTy, DestTy))
1038 return false;
1039
1040 // If the truncated value is not an induction variable, return false.
1041 return Legal->isInductionPhi(Op);
1042 }
1043
1044 /// Collects the instructions to scalarize for each predicated instruction in
1045 /// the loop.
1046 void collectInstsToScalarize(ElementCount VF);
1047
1048 /// Collect values that will not be widened, including Uniforms, Scalars, and
1049 /// Instructions to Scalarize for the given \p VF.
1050 /// The sets depend on CM decision for Load/Store instructions
1051 /// that may be vectorized as interleave, gather-scatter or scalarized.
1052 /// Also make a decision on what to do about call instructions in the loop
1053 /// at that VF -- scalarize, call a known vector routine, or call a
1054 /// vector intrinsic.
1056 // Do the analysis once.
1057 if (VF.isScalar() || Uniforms.contains(VF))
1058 return;
1060 collectLoopUniforms(VF);
1062 collectLoopScalars(VF);
1064 }
1065
1066 /// Given costs for both strategies, return true if the scalar predication
1067 /// lowering should be used for div/rem. This incorporates an override
1068 /// option so it is not simply a cost comparison.
1070 InstructionCost SafeDivisorCost) const {
1071 switch (ForceSafeDivisor) {
1072 case cl::BOU_UNSET:
1073 return ScalarCost < SafeDivisorCost;
1074 case cl::BOU_TRUE:
1075 return false;
1076 case cl::BOU_FALSE:
1077 return true;
1078 }
1079 llvm_unreachable("impossible case value");
1080 }
1081
1082 /// Returns true if \p I is an instruction which requires predication and
1083 /// for which our chosen predication strategy is scalarization (i.e. we
1084 /// don't have an alternate strategy such as masking available).
1085 /// \p VF is the vectorization factor that will be used to vectorize \p I.
1086 bool isScalarWithPredication(Instruction *I, ElementCount VF);
1087
1088 /// Wrapper function for LoopVectorizationLegality::isMaskRequired,
1089 /// that passes the Instruction \p I and if we fold tail.
1090 bool isMaskRequired(Instruction *I) const;
1091
1092 /// Returns true if \p I is an instruction that needs to be predicated
1093 /// at runtime. The result is independent of the predication mechanism.
1094 /// Superset of instructions that return true for isScalarWithPredication.
1095 bool isPredicatedInst(Instruction *I) const;
1096
1097 /// A helper function that returns how much we should divide the cost of a
1098 /// predicated block by. Typically this is the reciprocal of the block
1099 /// probability, i.e. if we return X we are assuming the predicated block will
1100 /// execute once for every X iterations of the loop header so the block should
1101 /// only contribute 1/X of its cost to the total cost calculation, but when
1102 /// optimizing for code size it will just be 1 as code size costs don't depend
1103 /// on execution probabilities.
1104 ///
1105 /// Note that if a block wasn't originally predicated but was predicated due
1106 /// to tail folding, the divisor will still be 1 because it will execute for
1107 /// every iteration of the loop header.
1108 inline uint64_t
1109 getPredBlockCostDivisor(TargetTransformInfo::TargetCostKind CostKind,
1110 const BasicBlock *BB);
1111
1112 /// Returns true if an artificially high cost for emulated masked memrefs
1113 /// should be used.
1114 bool useEmulatedMaskMemRefHack(Instruction *I, ElementCount VF);
1115
1116 /// Return the costs for our two available strategies for lowering a
1117 /// div/rem operation which requires speculating at least one lane.
1118 /// First result is for scalarization (will be invalid for scalable
1119 /// vectors); second is for the safe-divisor strategy.
1120 std::pair<InstructionCost, InstructionCost>
1121 getDivRemSpeculationCost(Instruction *I, ElementCount VF);
1122
1123 /// Returns true if \p I is a memory instruction with consecutive memory
1124 /// access that can be widened.
1125 bool memoryInstructionCanBeWidened(Instruction *I, ElementCount VF);
1126
1127 /// Returns true if \p I is a memory instruction in an interleaved-group
1128 /// of memory accesses that can be vectorized with wide vector loads/stores
1129 /// and shuffles.
1130 bool interleavedAccessCanBeWidened(Instruction *I, ElementCount VF) const;
1131
1132 /// Check if \p Instr belongs to any interleaved access group.
1134 return InterleaveInfo.isInterleaved(Instr);
1135 }
1136
1137 /// Get the interleaved access group that \p Instr belongs to.
1140 return InterleaveInfo.getInterleaveGroup(Instr);
1141 }
1142
1143 /// Returns true if we're required to use a scalar epilogue for at least
1144 /// the final iteration of the original loop.
1145 bool requiresScalarEpilogue(bool IsVectorizing) const {
1146 if (!isEpilogueAllowed()) {
1147 LLVM_DEBUG(dbgs() << "LV: Loop does not require scalar epilogue\n");
1148 return false;
1149 }
1150 // If we might exit from anywhere but the latch and early exit vectorization
1151 // is disabled, we must run the exiting iteration in scalar form.
1152 if (TheLoop->getExitingBlock() != TheLoop->getLoopLatch() &&
1153 !(EnableEarlyExitVectorization && Legal->hasUncountableEarlyExit())) {
1154 LLVM_DEBUG(dbgs() << "LV: Loop requires scalar epilogue: not exiting "
1155 "from latch block\n");
1156 return true;
1157 }
1158 if (IsVectorizing && InterleaveInfo.requiresScalarEpilogue()) {
1159 LLVM_DEBUG(dbgs() << "LV: Loop requires scalar epilogue: "
1160 "interleaved group requires scalar epilogue\n");
1161 return true;
1162 }
1163 LLVM_DEBUG(dbgs() << "LV: Loop does not require scalar epilogue\n");
1164 return false;
1165 }
1166
1167 /// Returns true if an epilogue is allowed (e.g., not prevented by
1168 /// optsize or a loop hint annotation).
1169 bool isEpilogueAllowed() const {
1170 return EpilogueLoweringStatus == CM_EpilogueAllowed;
1171 }
1172
1173 /// Returns true if tail-folding is preferred over an epilogue.
1175 return EpilogueLoweringStatus == CM_EpilogueNotNeededFoldTail ||
1176 EpilogueLoweringStatus == CM_EpilogueNotAllowedFoldTail;
1177 }
1178
1179 /// Returns the TailFoldingStyle that is best for the current loop.
1181 return ChosenTailFoldingStyle;
1182 }
1183
1184 /// Selects and saves TailFoldingStyle.
1185 /// \param IsScalableVF true if scalable vector factors enabled.
1186 /// \param UserIC User specific interleave count.
1187 void setTailFoldingStyle(bool IsScalableVF, unsigned UserIC) {
1188 assert(ChosenTailFoldingStyle == TailFoldingStyle::None &&
1189 "Tail folding must not be selected yet.");
1190 if (!Legal->canFoldTailByMasking()) {
1191 ChosenTailFoldingStyle = TailFoldingStyle::None;
1192 return;
1193 }
1194
1195 // Default to TTI preference, but allow command line override.
1196 ChosenTailFoldingStyle = TTI.getPreferredTailFoldingStyle();
1197 if (ForceTailFoldingStyle.getNumOccurrences())
1198 ChosenTailFoldingStyle = ForceTailFoldingStyle.getValue();
1199
1200 if (ChosenTailFoldingStyle != TailFoldingStyle::DataWithEVL)
1201 return;
1202 // Override EVL styles if needed.
1203 // FIXME: Investigate opportunity for fixed vector factor.
1204 bool EVLIsLegal = UserIC <= 1 && IsScalableVF &&
1205 TTI.hasActiveVectorLength() && !EnableVPlanNativePath;
1206 if (EVLIsLegal)
1207 return;
1208 // If for some reason EVL mode is unsupported, fallback to an epilogue
1209 // if it's allowed, or DataWithoutLaneMask otherwise.
1210 if (EpilogueLoweringStatus == CM_EpilogueAllowed ||
1211 EpilogueLoweringStatus == CM_EpilogueNotNeededFoldTail)
1212 ChosenTailFoldingStyle = TailFoldingStyle::None;
1213 else
1214 ChosenTailFoldingStyle = TailFoldingStyle::DataWithoutLaneMask;
1215
1216 LLVM_DEBUG(
1217 dbgs() << "LV: Preference for VP intrinsics indicated. Will "
1218 "not try to generate VP Intrinsics "
1219 << (UserIC > 1
1220 ? "since interleave count specified is greater than 1.\n"
1221 : "due to non-interleaving reasons.\n"));
1222 }
1223
1224 /// Returns true if all loop blocks should be masked to fold tail loop.
1225 bool foldTailByMasking() const {
1227 }
1228
1229 /// Returns true if the use of wide lane masks is requested and the loop is
1230 /// using tail-folding with a lane mask for control flow.
1233 return false;
1234
1236 }
1237
1238 /// Returns true if the instructions in this block requires predication
1239 /// for any reason, e.g. because tail folding now requires a predicate
1240 /// or because the block in the original loop was predicated.
1242 return foldTailByMasking() || Legal->blockNeedsPredication(BB);
1243 }
1244
1245 /// Returns true if VP intrinsics with explicit vector length support should
1246 /// be generated in the tail folded loop.
1250
1251 /// Returns true if the predicated reduction select should be used to set the
1252 /// incoming value for the reduction phi.
1253 bool usePredicatedReductionSelect(RecurKind RecurrenceKind) const {
1254 // Force to use predicated reduction select since the EVL of the
1255 // second-to-last iteration might not be VF*UF.
1256 if (foldTailWithEVL())
1257 return true;
1258
1259 // Note: For FindLast recurrences we prefer a predicated select to simplify
1260 // matching in handleFindLastReductions(), rather than handle multiple
1261 // cases.
1263 return true;
1264
1266 TTI.preferPredicatedReductionSelect();
1267 }
1268
1269 /// Estimate cost of an intrinsic call instruction CI if it were vectorized
1270 /// with factor VF. Return the cost of the instruction, including
1271 /// scalarization overhead if it's needed.
1272 InstructionCost getVectorIntrinsicCost(CallInst *CI, ElementCount VF) const;
1273
1274 /// Estimate cost of a call instruction CI if it were vectorized with factor
1275 /// VF. Return the cost of the instruction, including scalarization overhead
1276 /// if it's needed.
1277 InstructionCost getVectorCallCost(CallInst *CI, ElementCount VF) const;
1278
1279 /// Invalidates decisions already taken by the cost model.
1281 WideningDecisions.clear();
1282 CallWideningDecisions.clear();
1283 Uniforms.clear();
1284 Scalars.clear();
1285 }
1286
1287 /// Returns the expected execution cost. The unit of the cost does
1288 /// not matter because we use the 'cost' units to compare different
1289 /// vector widths. The cost that is returned is *not* normalized by
1290 /// the factor width.
1291 InstructionCost expectedCost(ElementCount VF);
1292
1293 /// Returns true if epilogue vectorization is considered profitable, and
1294 /// false otherwise.
1295 /// \p VF is the vectorization factor chosen for the original loop.
1296 /// \p Multiplier is an aditional scaling factor applied to VF before
1297 /// comparing to EpilogueVectorizationMinVF.
1298 bool isEpilogueVectorizationProfitable(const ElementCount VF,
1299 const unsigned IC) const;
1300
1301 /// Returns the execution time cost of an instruction for a given vector
1302 /// width. Vector width of one means scalar.
1303 InstructionCost getInstructionCost(Instruction *I, ElementCount VF);
1304
1305 /// Return the cost of instructions in an inloop reduction pattern, if I is
1306 /// part of that pattern.
1307 std::optional<InstructionCost> getReductionPatternCost(Instruction *I,
1308 ElementCount VF,
1309 Type *VectorTy) const;
1310
1311 /// Returns true if \p Op should be considered invariant and if it is
1312 /// trivially hoistable.
1313 bool shouldConsiderInvariant(Value *Op);
1314
1315private:
1316 unsigned NumPredStores = 0;
1317
1318 /// VF selection state independent of cost-modeling decisions.
1319 VFSelectionContext &Config;
1320
1321 /// Calculate vectorization cost of memory instruction \p I.
1322 InstructionCost getMemoryInstructionCost(Instruction *I, ElementCount VF);
1323
1324 /// The cost computation for scalarized memory instruction.
1325 InstructionCost getMemInstScalarizationCost(Instruction *I, ElementCount VF);
1326
1327 /// The cost computation for interleaving group of memory instructions.
1328 InstructionCost getInterleaveGroupCost(Instruction *I, ElementCount VF);
1329
1330 /// The cost computation for Gather/Scatter instruction.
1331 InstructionCost getGatherScatterCost(Instruction *I, ElementCount VF);
1332
1333 /// The cost computation for widening instruction \p I with consecutive
1334 /// memory access.
1335 InstructionCost getConsecutiveMemOpCost(Instruction *I, ElementCount VF);
1336
1337 /// The cost calculation for Load/Store instruction \p I with uniform pointer -
1338 /// Load: scalar load + broadcast.
1339 /// Store: scalar store + (loop invariant value stored? 0 : extract of last
1340 /// element)
1341 InstructionCost getUniformMemOpCost(Instruction *I, ElementCount VF);
1342
1343 /// Estimate the overhead of scalarizing an instruction. This is a
1344 /// convenience wrapper for the type-based getScalarizationOverhead API.
1346 ElementCount VF) const;
1347
1348 /// A type representing the costs for instructions if they were to be
1349 /// scalarized rather than vectorized. The entries are Instruction-Cost
1350 /// pairs.
1351 using ScalarCostsTy = MapVector<Instruction *, InstructionCost>;
1352
1353 /// A set containing all BasicBlocks that are known to present after
1354 /// vectorization as a predicated block.
1356 PredicatedBBsAfterVectorization;
1357
1358 /// Records whether it is allowed to have the original scalar loop execute at
1359 /// least once. This may be needed as a fallback loop in case runtime
1360 /// aliasing/dependence checks fail, or to handle the tail/remainder
1361 /// iterations when the trip count is unknown or doesn't divide by the VF,
1362 /// or as a peel-loop to handle gaps in interleave-groups.
1363 /// Under optsize and when the trip count is very small we don't allow any
1364 /// iterations to execute in the scalar loop.
1365 EpilogueLowering EpilogueLoweringStatus = CM_EpilogueAllowed;
1366
1367 /// Control finally chosen tail folding style.
1368 TailFoldingStyle ChosenTailFoldingStyle = TailFoldingStyle::None;
1369
1370 /// A map holding scalar costs for different vectorization factors. The
1371 /// presence of a cost for an instruction in the mapping indicates that the
1372 /// instruction will be scalarized when vectorizing with the associated
1373 /// vectorization factor. The entries are VF-ScalarCostTy pairs.
1375
1376 /// Holds the instructions known to be uniform after vectorization.
1377 /// The data is collected per VF.
1379
1380 /// Holds the instructions known to be scalar after vectorization.
1381 /// The data is collected per VF.
1383
1384 /// Holds the instructions (address computations) that are forced to be
1385 /// scalarized.
1387
1388 /// Returns the expected difference in cost from scalarizing the expression
1389 /// feeding a predicated instruction \p PredInst. The instructions to
1390 /// scalarize and their scalar costs are collected in \p ScalarCosts. A
1391 /// non-negative return value implies the expression will be scalarized.
1392 /// Currently, only single-use chains are considered for scalarization.
1393 InstructionCost computePredInstDiscount(Instruction *PredInst,
1394 ScalarCostsTy &ScalarCosts,
1395 ElementCount VF);
1396
1397 /// Collect the instructions that are uniform after vectorization. An
1398 /// instruction is uniform if we represent it with a single scalar value in
1399 /// the vectorized loop corresponding to each vector iteration. Examples of
1400 /// uniform instructions include pointer operands of consecutive or
1401 /// interleaved memory accesses. Note that although uniformity implies an
1402 /// instruction will be scalar, the reverse is not true. In general, a
1403 /// scalarized instruction will be represented by VF scalar values in the
1404 /// vectorized loop, each corresponding to an iteration of the original
1405 /// scalar loop.
1406 void collectLoopUniforms(ElementCount VF);
1407
1408 /// Collect the instructions that are scalar after vectorization. An
1409 /// instruction is scalar if it is known to be uniform or will be scalarized
1410 /// during vectorization. collectLoopScalars should only add non-uniform nodes
1411 /// to the list if they are used by a load/store instruction that is marked as
1412 /// CM_Scalarize. Non-uniform scalarized instructions will be represented by
1413 /// VF values in the vectorized loop, each corresponding to an iteration of
1414 /// the original scalar loop.
1415 void collectLoopScalars(ElementCount VF);
1416
1417 /// Keeps cost model vectorization decision and cost for instructions.
1418 /// Right now it is used for memory instructions only.
1420 std::pair<InstWidening, InstructionCost>>;
1421
1422 DecisionList WideningDecisions;
1423
1424 using CallDecisionList =
1425 DenseMap<std::pair<CallInst *, ElementCount>, CallWideningDecision>;
1426
1427 CallDecisionList CallWideningDecisions;
1428
1429 /// Returns true if \p V is expected to be vectorized and it needs to be
1430 /// extracted.
1431 bool needsExtract(Value *V, ElementCount VF) const {
1433 if (VF.isScalar() || !I || !TheLoop->contains(I) ||
1434 TheLoop->isLoopInvariant(I) ||
1435 getWideningDecision(I, VF) == CM_Scalarize ||
1436 (isa<CallInst>(I) &&
1437 getCallWideningDecision(cast<CallInst>(I), VF).Kind == CM_Scalarize))
1438 return false;
1439
1440 // Assume we can vectorize V (and hence we need extraction) if the
1441 // scalars are not computed yet. This can happen, because it is called
1442 // via getScalarizationOverhead from setCostBasedWideningDecision, before
1443 // the scalars are collected. That should be a safe assumption in most
1444 // cases, because we check if the operands have vectorizable types
1445 // beforehand in LoopVectorizationLegality.
1446 return !Scalars.contains(VF) || !isScalarAfterVectorization(I, VF);
1447 };
1448
1449 /// Returns a range containing only operands needing to be extracted.
1450 SmallVector<Value *, 4> filterExtractingOperands(Instruction::op_range Ops,
1451 ElementCount VF) const {
1452
1453 SmallPtrSet<const Value *, 4> UniqueOperands;
1455 for (Value *Op : Ops) {
1456 if (isa<Constant>(Op) || !UniqueOperands.insert(Op).second ||
1457 !needsExtract(Op, VF))
1458 continue;
1459 Res.push_back(Op);
1460 }
1461 return Res;
1462 }
1463
1464public:
1465 /// The loop that we evaluate.
1467
1468 /// Predicated scalar evolution analysis.
1470
1471 /// Loop Info analysis.
1473
1474 /// Vectorization legality.
1476
1477 /// Vector target information.
1479
1480 /// Target Library Info.
1482
1483 /// Assumption cache.
1485
1486 /// Interface to emit optimization remarks.
1488
1489 /// A function to lazily fetch BlockFrequencyInfo. This avoids computing it
1490 /// unless necessary, e.g. when the loop isn't legal to vectorize or when
1491 /// there is no predication.
1492 std::function<BlockFrequencyInfo &()> GetBFI;
1493 /// The BlockFrequencyInfo returned from GetBFI.
1495 /// Returns the BlockFrequencyInfo for the function if cached, otherwise
1496 /// fetches it via GetBFI. Avoids an indirect call to the std::function.
1498 if (!BFI)
1499 BFI = &GetBFI();
1500 return *BFI;
1501 }
1502
1504
1505 /// Loop Vectorize Hint.
1507
1508 /// The interleave access information contains groups of interleaved accesses
1509 /// with the same stride and close to each other.
1511
1512 /// Values to ignore in the cost model.
1514
1515 /// Values to ignore in the cost model when VF > 1.
1517};
1518} // end namespace llvm
1519
1520namespace {
1521/// Helper struct to manage generating runtime checks for vectorization.
1522///
1523/// The runtime checks are created up-front in temporary blocks to allow better
1524/// estimating the cost and un-linked from the existing IR. After deciding to
1525/// vectorize, the checks are moved back. If deciding not to vectorize, the
1526/// temporary blocks are completely removed.
1527class GeneratedRTChecks {
1528 /// Basic block which contains the generated SCEV checks, if any.
1529 BasicBlock *SCEVCheckBlock = nullptr;
1530
1531 /// The value representing the result of the generated SCEV checks. If it is
1532 /// nullptr no SCEV checks have been generated.
1533 Value *SCEVCheckCond = nullptr;
1534
1535 /// Basic block which contains the generated memory runtime checks, if any.
1536 BasicBlock *MemCheckBlock = nullptr;
1537
1538 /// The value representing the result of the generated memory runtime checks.
1539 /// If it is nullptr no memory runtime checks have been generated.
1540 Value *MemRuntimeCheckCond = nullptr;
1541
1542 DominatorTree *DT;
1543 LoopInfo *LI;
1545
1546 SCEVExpander SCEVExp;
1547 SCEVExpander MemCheckExp;
1548
1549 bool CostTooHigh = false;
1550
1551 Loop *OuterLoop = nullptr;
1552
1554
1555 /// The kind of cost that we are calculating
1557
1558public:
1559 GeneratedRTChecks(PredicatedScalarEvolution &PSE, DominatorTree *DT,
1562 : DT(DT), LI(LI), TTI(TTI),
1563 SCEVExp(*PSE.getSE(), "scev.check", /*PreserveLCSSA=*/false),
1564 MemCheckExp(*PSE.getSE(), "scev.check", /*PreserveLCSSA=*/false),
1565 PSE(PSE), CostKind(CostKind) {}
1566
1567 /// Generate runtime checks in SCEVCheckBlock and MemCheckBlock, so we can
1568 /// accurately estimate the cost of the runtime checks. The blocks are
1569 /// un-linked from the IR and are added back during vector code generation. If
1570 /// there is no vector code generation, the check blocks are removed
1571 /// completely.
1572 void create(Loop *L, const LoopAccessInfo &LAI,
1573 const SCEVPredicate &UnionPred, ElementCount VF, unsigned IC,
1574 OptimizationRemarkEmitter &ORE) {
1575
1576 // Hard cutoff to limit compile-time increase in case a very large number of
1577 // runtime checks needs to be generated.
1578 // TODO: Skip cutoff if the loop is guaranteed to execute, e.g. due to
1579 // profile info.
1580 CostTooHigh =
1582 if (CostTooHigh) {
1583 // Mark runtime checks as never succeeding when they exceed the threshold.
1584 MemRuntimeCheckCond = ConstantInt::getTrue(L->getHeader()->getContext());
1585 SCEVCheckCond = ConstantInt::getTrue(L->getHeader()->getContext());
1586 ORE.emit([&]() {
1587 return OptimizationRemarkAnalysisAliasing(
1588 DEBUG_TYPE, "TooManyMemoryRuntimeChecks", L->getStartLoc(),
1589 L->getHeader())
1590 << "loop not vectorized: too many memory checks needed";
1591 });
1592 LLVM_DEBUG(dbgs() << "LV: Too many memory checks needed.\n");
1593 return;
1594 }
1595
1596 BasicBlock *LoopHeader = L->getHeader();
1597 BasicBlock *Preheader = L->getLoopPreheader();
1598
1599 // Use SplitBlock to create blocks for SCEV & memory runtime checks to
1600 // ensure the blocks are properly added to LoopInfo & DominatorTree. Those
1601 // may be used by SCEVExpander. The blocks will be un-linked from their
1602 // predecessors and removed from LI & DT at the end of the function.
1603 if (!UnionPred.isAlwaysTrue()) {
1604 SCEVCheckBlock = SplitBlock(Preheader, Preheader->getTerminator(), DT, LI,
1605 nullptr, "vector.scevcheck");
1606
1607 SCEVCheckCond = SCEVExp.expandCodeForPredicate(
1608 &UnionPred, SCEVCheckBlock->getTerminator());
1609 if (isa<Constant>(SCEVCheckCond)) {
1610 // Clean up directly after expanding the predicate to a constant, to
1611 // avoid further expansions re-using anything left over from SCEVExp.
1612 SCEVExpanderCleaner SCEVCleaner(SCEVExp);
1613 SCEVCleaner.cleanup();
1614 }
1615 }
1616
1617 const auto &RtPtrChecking = *LAI.getRuntimePointerChecking();
1618 if (RtPtrChecking.Need) {
1619 auto *Pred = SCEVCheckBlock ? SCEVCheckBlock : Preheader;
1620 MemCheckBlock = SplitBlock(Pred, Pred->getTerminator(), DT, LI, nullptr,
1621 "vector.memcheck");
1622
1623 auto DiffChecks = RtPtrChecking.getDiffChecks();
1624 if (DiffChecks) {
1625 Value *RuntimeVF = nullptr;
1626 MemRuntimeCheckCond = addDiffRuntimeChecks(
1627 MemCheckBlock->getTerminator(), *DiffChecks, MemCheckExp,
1628 [VF, &RuntimeVF](IRBuilderBase &B, unsigned Bits) {
1629 if (!RuntimeVF)
1630 RuntimeVF = getRuntimeVF(B, B.getIntNTy(Bits), VF);
1631 return RuntimeVF;
1632 },
1633 IC);
1634 } else {
1635 MemRuntimeCheckCond = addRuntimeChecks(
1636 MemCheckBlock->getTerminator(), L, RtPtrChecking.getChecks(),
1638 }
1639 assert(MemRuntimeCheckCond &&
1640 "no RT checks generated although RtPtrChecking "
1641 "claimed checks are required");
1642 }
1643
1644 SCEVExp.eraseDeadInstructions(SCEVCheckCond);
1645
1646 if (!MemCheckBlock && !SCEVCheckBlock)
1647 return;
1648
1649 // Unhook the temporary block with the checks, update various places
1650 // accordingly.
1651 if (SCEVCheckBlock)
1652 SCEVCheckBlock->replaceAllUsesWith(Preheader);
1653 if (MemCheckBlock)
1654 MemCheckBlock->replaceAllUsesWith(Preheader);
1655
1656 if (SCEVCheckBlock) {
1657 SCEVCheckBlock->getTerminator()->moveBefore(
1658 Preheader->getTerminator()->getIterator());
1659 auto *UI = new UnreachableInst(Preheader->getContext(), SCEVCheckBlock);
1660 UI->setDebugLoc(DebugLoc::getTemporary());
1661 Preheader->getTerminator()->eraseFromParent();
1662 }
1663 if (MemCheckBlock) {
1664 MemCheckBlock->getTerminator()->moveBefore(
1665 Preheader->getTerminator()->getIterator());
1666 auto *UI = new UnreachableInst(Preheader->getContext(), MemCheckBlock);
1667 UI->setDebugLoc(DebugLoc::getTemporary());
1668 Preheader->getTerminator()->eraseFromParent();
1669 }
1670
1671 DT->changeImmediateDominator(LoopHeader, Preheader);
1672 if (MemCheckBlock) {
1673 DT->eraseNode(MemCheckBlock);
1674 LI->removeBlock(MemCheckBlock);
1675 }
1676 if (SCEVCheckBlock) {
1677 DT->eraseNode(SCEVCheckBlock);
1678 LI->removeBlock(SCEVCheckBlock);
1679 }
1680
1681 // Outer loop is used as part of the later cost calculations.
1682 OuterLoop = L->getParentLoop();
1683 }
1684
1686 if (SCEVCheckBlock || MemCheckBlock)
1687 LLVM_DEBUG(dbgs() << "Calculating cost of runtime checks:\n");
1688
1689 if (CostTooHigh) {
1691 Cost.setInvalid();
1692 LLVM_DEBUG(dbgs() << " number of checks exceeded threshold\n");
1693 return Cost;
1694 }
1695
1696 InstructionCost RTCheckCost = 0;
1697 if (SCEVCheckBlock)
1698 for (Instruction &I : *SCEVCheckBlock) {
1699 if (SCEVCheckBlock->getTerminator() == &I)
1700 continue;
1702 LLVM_DEBUG(dbgs() << " " << C << " for " << I << "\n");
1703 RTCheckCost += C;
1704 }
1705 if (MemCheckBlock) {
1706 InstructionCost MemCheckCost = 0;
1707 for (Instruction &I : *MemCheckBlock) {
1708 if (MemCheckBlock->getTerminator() == &I)
1709 continue;
1711 LLVM_DEBUG(dbgs() << " " << C << " for " << I << "\n");
1712 MemCheckCost += C;
1713 }
1714
1715 // If the runtime memory checks are being created inside an outer loop
1716 // we should find out if these checks are outer loop invariant. If so,
1717 // the checks will likely be hoisted out and so the effective cost will
1718 // reduce according to the outer loop trip count.
1719 if (OuterLoop) {
1720 ScalarEvolution *SE = MemCheckExp.getSE();
1721 // TODO: If profitable, we could refine this further by analysing every
1722 // individual memory check, since there could be a mixture of loop
1723 // variant and invariant checks that mean the final condition is
1724 // variant.
1725 const SCEV *Cond = SE->getSCEV(MemRuntimeCheckCond);
1726 if (SE->isLoopInvariant(Cond, OuterLoop)) {
1727 // It seems reasonable to assume that we can reduce the effective
1728 // cost of the checks even when we know nothing about the trip
1729 // count. Assume that the outer loop executes at least twice.
1730 unsigned BestTripCount = 2;
1731
1732 // Get the best known TC estimate.
1733 if (auto EstimatedTC = getSmallBestKnownTC(
1734 PSE, OuterLoop, /* CanUseConstantMax = */ false))
1735 if (EstimatedTC->isFixed())
1736 BestTripCount = EstimatedTC->getFixedValue();
1737
1738 InstructionCost NewMemCheckCost = MemCheckCost / BestTripCount;
1739
1740 // Let's ensure the cost is always at least 1.
1741 NewMemCheckCost = std::max(NewMemCheckCost.getValue(),
1742 (InstructionCost::CostType)1);
1743
1744 if (BestTripCount > 1)
1746 << "We expect runtime memory checks to be hoisted "
1747 << "out of the outer loop. Cost reduced from "
1748 << MemCheckCost << " to " << NewMemCheckCost << '\n');
1749
1750 MemCheckCost = NewMemCheckCost;
1751 }
1752 }
1753
1754 RTCheckCost += MemCheckCost;
1755 }
1756
1757 if (SCEVCheckBlock || MemCheckBlock)
1758 LLVM_DEBUG(dbgs() << "Total cost of runtime checks: " << RTCheckCost
1759 << "\n");
1760
1761 return RTCheckCost;
1762 }
1763
1764 /// Remove the created SCEV & memory runtime check blocks & instructions, if
1765 /// unused.
1766 ~GeneratedRTChecks() {
1767 SCEVExpanderCleaner SCEVCleaner(SCEVExp);
1768 SCEVExpanderCleaner MemCheckCleaner(MemCheckExp);
1769 bool SCEVChecksUsed = !SCEVCheckBlock || !pred_empty(SCEVCheckBlock);
1770 bool MemChecksUsed = !MemCheckBlock || !pred_empty(MemCheckBlock);
1771 if (SCEVChecksUsed)
1772 SCEVCleaner.markResultUsed();
1773
1774 if (MemChecksUsed) {
1775 MemCheckCleaner.markResultUsed();
1776 } else {
1777 auto &SE = *MemCheckExp.getSE();
1778 // Memory runtime check generation creates compares that use expanded
1779 // values. Remove them before running the SCEVExpanderCleaners.
1780 for (auto &I : make_early_inc_range(reverse(*MemCheckBlock))) {
1781 if (MemCheckExp.isInsertedInstruction(&I))
1782 continue;
1783 SE.forgetValue(&I);
1784 I.eraseFromParent();
1785 }
1786 }
1787 MemCheckCleaner.cleanup();
1788 SCEVCleaner.cleanup();
1789
1790 if (!SCEVChecksUsed)
1791 SCEVCheckBlock->eraseFromParent();
1792 if (!MemChecksUsed)
1793 MemCheckBlock->eraseFromParent();
1794 }
1795
1796 /// Retrieves the SCEVCheckCond and SCEVCheckBlock that were generated as IR
1797 /// outside VPlan.
1798 std::pair<Value *, BasicBlock *> getSCEVChecks() const {
1799 using namespace llvm::PatternMatch;
1800 if (!SCEVCheckCond || match(SCEVCheckCond, m_ZeroInt()))
1801 return {nullptr, nullptr};
1802
1803 return {SCEVCheckCond, SCEVCheckBlock};
1804 }
1805
1806 /// Retrieves the MemCheckCond and MemCheckBlock that were generated as IR
1807 /// outside VPlan.
1808 std::pair<Value *, BasicBlock *> getMemRuntimeChecks() const {
1809 using namespace llvm::PatternMatch;
1810 if (MemRuntimeCheckCond && match(MemRuntimeCheckCond, m_ZeroInt()))
1811 return {nullptr, nullptr};
1812 return {MemRuntimeCheckCond, MemCheckBlock};
1813 }
1814
1815 /// Return true if any runtime checks have been added
1816 bool hasChecks() const {
1817 return getSCEVChecks().first || getMemRuntimeChecks().first;
1818 }
1819};
1820} // namespace
1821
1823 return Style == TailFoldingStyle::Data ||
1825}
1826
1830
1831// Return true if \p OuterLp is an outer loop annotated with hints for explicit
1832// vectorization. The loop needs to be annotated with #pragma omp simd
1833// simdlen(#) or #pragma clang vectorize(enable) vectorize_width(#). If the
1834// vector length information is not provided, vectorization is not considered
1835// explicit. Interleave hints are not allowed either. These limitations will be
1836// relaxed in the future.
1837// Please, note that we are currently forced to abuse the pragma 'clang
1838// vectorize' semantics. This pragma provides *auto-vectorization hints*
1839// (i.e., LV must check that vectorization is legal) whereas pragma 'omp simd'
1840// provides *explicit vectorization hints* (LV can bypass legal checks and
1841// assume that vectorization is legal). However, both hints are implemented
1842// using the same metadata (llvm.loop.vectorize, processed by
1843// LoopVectorizeHints). This will be fixed in the future when the native IR
1844// representation for pragma 'omp simd' is introduced.
1845static bool isExplicitVecOuterLoop(Loop *OuterLp,
1847 assert(!OuterLp->isInnermost() && "This is not an outer loop");
1848 LoopVectorizeHints Hints(OuterLp, true /*DisableInterleaving*/, *ORE);
1849
1850 // Only outer loops with an explicit vectorization hint are supported.
1851 // Unannotated outer loops are ignored.
1853 return false;
1854
1855 Function *Fn = OuterLp->getHeader()->getParent();
1856 if (!Hints.allowVectorization(Fn, OuterLp,
1857 true /*VectorizeOnlyWhenForced*/)) {
1858 LLVM_DEBUG(dbgs() << "LV: Loop hints prevent outer loop vectorization.\n");
1859 return false;
1860 }
1861
1862 if (Hints.getInterleave() > 1) {
1863 // TODO: Interleave support is future work.
1864 LLVM_DEBUG(dbgs() << "LV: Not vectorizing: Interleave is not supported for "
1865 "outer loops.\n");
1866 Hints.emitRemarkWithHints();
1867 return false;
1868 }
1869
1870 return true;
1871}
1872
1876 // Collect inner loops and outer loops without irreducible control flow. For
1877 // now, only collect outer loops that have explicit vectorization hints. If we
1878 // are stress testing the VPlan H-CFG construction, we collect the outermost
1879 // loop of every loop nest.
1880 if (L.isInnermost() || VPlanBuildStressTest ||
1882 LoopBlocksRPO RPOT(&L);
1883 RPOT.perform(LI);
1885 V.push_back(&L);
1886 // TODO: Collect inner loops inside marked outer loops in case
1887 // vectorization fails for the outer loop. Do not invoke
1888 // 'containsIrreducibleCFG' again for inner loops when the outer loop is
1889 // already known to be reducible. We can use an inherited attribute for
1890 // that.
1891 return;
1892 }
1893 }
1894 for (Loop *InnerL : L)
1895 collectSupportedLoops(*InnerL, LI, ORE, V);
1896}
1897
1898//===----------------------------------------------------------------------===//
1899// Implementation of LoopVectorizationLegality, InnerLoopVectorizer and
1900// LoopVectorizationCostModel and LoopVectorizationPlanner.
1901//===----------------------------------------------------------------------===//
1902
1903/// For the given VF and UF and maximum trip count computed for the loop, return
1904/// whether the induction variable might overflow in the vectorized loop. If not,
1905/// then we know a runtime overflow check always evaluates to false and can be
1906/// removed.
1908 const LoopVectorizationCostModel *Cost,
1909 ElementCount VF, std::optional<unsigned> UF = std::nullopt) {
1910 // Always be conservative if we don't know the exact unroll factor.
1911 unsigned MaxUF = UF ? *UF : Cost->TTI.getMaxInterleaveFactor(VF);
1912
1913 IntegerType *IdxTy = Cost->Legal->getWidestInductionType();
1914 APInt MaxUIntTripCount = IdxTy->getMask();
1915
1916 // We know the runtime overflow check is known false iff the (max) trip-count
1917 // is known and (max) trip-count + (VF * UF) does not overflow in the type of
1918 // the vector loop induction variable.
1919 if (unsigned TC = Cost->PSE.getSmallConstantMaxTripCount()) {
1920 uint64_t MaxVF = VF.getKnownMinValue();
1921 if (VF.isScalable()) {
1922 std::optional<unsigned> MaxVScale =
1923 getMaxVScale(*Cost->TheFunction, Cost->TTI);
1924 if (!MaxVScale)
1925 return false;
1926 MaxVF *= *MaxVScale;
1927 }
1928
1929 return (MaxUIntTripCount - TC).ugt(MaxVF * MaxUF);
1930 }
1931
1932 return false;
1933}
1934
1935// Return whether we allow using masked interleave-groups (for dealing with
1936// strided loads/stores that reside in predicated blocks, or for dealing
1937// with gaps).
1939 // If an override option has been passed in for interleaved accesses, use it.
1940 if (EnableMaskedInterleavedMemAccesses.getNumOccurrences() > 0)
1942
1943 return TTI.enableMaskedInterleavedAccessVectorization();
1944}
1945
1946/// Replace \p VPBB with a VPIRBasicBlock wrapping \p IRBB. All recipes from \p
1947/// VPBB are moved to the end of the newly created VPIRBasicBlock. All
1948/// predecessors and successors of VPBB, if any, are rewired to the new
1949/// VPIRBasicBlock. If \p VPBB may be unreachable, \p Plan must be passed.
1951 BasicBlock *IRBB,
1952 VPlan *Plan = nullptr) {
1953 if (!Plan)
1954 Plan = VPBB->getPlan();
1955 VPIRBasicBlock *IRVPBB = Plan->createVPIRBasicBlock(IRBB);
1956 auto IP = IRVPBB->begin();
1957 for (auto &R : make_early_inc_range(VPBB->phis()))
1958 R.moveBefore(*IRVPBB, IP);
1959
1960 for (auto &R :
1962 R.moveBefore(*IRVPBB, IRVPBB->end());
1963
1964 VPBlockUtils::reassociateBlocks(VPBB, IRVPBB);
1965 // VPBB is now dead and will be cleaned up when the plan gets destroyed.
1966 return IRVPBB;
1967}
1968
1970 BasicBlock *VectorPH = OrigLoop->getLoopPreheader();
1971 assert(VectorPH && "Invalid loop structure");
1972 assert((OrigLoop->getUniqueLatchExitBlock() ||
1973 Cost->requiresScalarEpilogue(VF.isVector())) &&
1974 "loops not exiting via the latch without required epilogue?");
1975
1976 // NOTE: The Plan's scalar preheader VPBB isn't replaced with a VPIRBasicBlock
1977 // wrapping the newly created scalar preheader here at the moment, because the
1978 // Plan's scalar preheader may be unreachable at this point. Instead it is
1979 // replaced in executePlan.
1980 return SplitBlock(VectorPH, VectorPH->getTerminator(), DT, LI, nullptr,
1981 Twine(Prefix) + "scalar.ph");
1982}
1983
1984/// Knowing that loop \p L executes a single vector iteration, add instructions
1985/// that will get simplified and thus should not have any cost to \p
1986/// InstsToIgnore.
1989 SmallPtrSetImpl<Instruction *> &InstsToIgnore) {
1990 auto *Cmp = L->getLatchCmpInst();
1991 if (Cmp)
1992 InstsToIgnore.insert(Cmp);
1993 for (const auto &KV : IL) {
1994 // Extract the key by hand so that it can be used in the lambda below. Note
1995 // that captured structured bindings are a C++20 extension.
1996 const PHINode *IV = KV.first;
1997
1998 // Get next iteration value of the induction variable.
1999 Instruction *IVInst =
2000 cast<Instruction>(IV->getIncomingValueForBlock(L->getLoopLatch()));
2001 if (all_of(IVInst->users(),
2002 [&](const User *U) { return U == IV || U == Cmp; }))
2003 InstsToIgnore.insert(IVInst);
2004 }
2005}
2006
2008 // Create a new IR basic block for the scalar preheader.
2009 BasicBlock *ScalarPH = createScalarPreheader("");
2010 return ScalarPH->getSinglePredecessor();
2011}
2012
2013namespace {
2014
2015struct CSEDenseMapInfo {
2016 static bool canHandle(const Instruction *I) {
2019 }
2020
2021 static inline Instruction *getEmptyKey() {
2023 }
2024
2025 static inline Instruction *getTombstoneKey() {
2026 return DenseMapInfo<Instruction *>::getTombstoneKey();
2027 }
2028
2029 static unsigned getHashValue(const Instruction *I) {
2030 assert(canHandle(I) && "Unknown instruction!");
2031 return hash_combine(I->getOpcode(),
2032 hash_combine_range(I->operand_values()));
2033 }
2034
2035 static bool isEqual(const Instruction *LHS, const Instruction *RHS) {
2036 if (LHS == getEmptyKey() || RHS == getEmptyKey() ||
2037 LHS == getTombstoneKey() || RHS == getTombstoneKey())
2038 return LHS == RHS;
2039 return LHS->isIdenticalTo(RHS);
2040 }
2041};
2042
2043} // end anonymous namespace
2044
2045/// FIXME: This legacy common-subexpression-elimination routine is scheduled for
2046/// removal, in favor of the VPlan-based one.
2047static void legacyCSE(BasicBlock *BB) {
2048 // Perform simple cse.
2050 for (Instruction &In : llvm::make_early_inc_range(*BB)) {
2051 if (!CSEDenseMapInfo::canHandle(&In))
2052 continue;
2053
2054 // Check if we can replace this instruction with any of the
2055 // visited instructions.
2056 if (Instruction *V = CSEMap.lookup(&In)) {
2057 In.replaceAllUsesWith(V);
2058 In.eraseFromParent();
2059 continue;
2060 }
2061
2062 CSEMap[&In] = &In;
2063 }
2064}
2065
2066/// This function attempts to return a value that represents the ElementCount
2067/// at runtime. For fixed-width VFs we know this precisely at compile
2068/// time, but for scalable VFs we calculate it based on an estimate of the
2069/// vscale value.
2071 std::optional<unsigned> VScale) {
2072 unsigned EstimatedVF = VF.getKnownMinValue();
2073 if (VF.isScalable())
2074 if (VScale)
2075 EstimatedVF *= *VScale;
2076 assert(EstimatedVF >= 1 && "Estimated VF shouldn't be less than 1");
2077 return EstimatedVF;
2078}
2079
2082 ElementCount VF) const {
2083 // We only need to calculate a cost if the VF is scalar; for actual vectors
2084 // we should already have a pre-calculated cost at each VF.
2085 if (!VF.isScalar())
2086 return getCallWideningDecision(CI, VF).Cost;
2087
2088 Type *RetTy = CI->getType();
2090 if (auto RedCost = getReductionPatternCost(CI, VF, RetTy))
2091 return *RedCost;
2092
2094 for (auto &ArgOp : CI->args())
2095 Tys.push_back(ArgOp->getType());
2096
2097 InstructionCost ScalarCallCost = TTI.getCallInstrCost(
2098 CI->getCalledFunction(), RetTy, Tys, Config.CostKind);
2099
2100 // If this is an intrinsic we may have a lower cost for it.
2103 return std::min(ScalarCallCost, IntrinsicCost);
2104 }
2105 return ScalarCallCost;
2106}
2107
2109 if (VF.isScalar() || !canVectorizeTy(Ty))
2110 return Ty;
2111 return toVectorizedTy(Ty, VF);
2112}
2113
2116 ElementCount VF) const {
2118 assert(ID && "Expected intrinsic call!");
2119 Type *RetTy = maybeVectorizeType(CI->getType(), VF);
2120 FastMathFlags FMF;
2121 if (auto *FPMO = dyn_cast<FPMathOperator>(CI))
2122 FMF = FPMO->getFastMathFlags();
2123
2126 SmallVector<Type *> ParamTys;
2127 std::transform(FTy->param_begin(), FTy->param_end(),
2128 std::back_inserter(ParamTys),
2129 [&](Type *Ty) { return maybeVectorizeType(Ty, VF); });
2130
2131 IntrinsicCostAttributes CostAttrs(ID, RetTy, Arguments, ParamTys, FMF,
2134 return TTI.getIntrinsicInstrCost(CostAttrs, Config.CostKind);
2135}
2136
2138 // Fix widened non-induction PHIs by setting up the PHI operands.
2139 fixNonInductionPHIs(State);
2140
2141 // Don't apply optimizations below when no (vector) loop remains, as they all
2142 // require one at the moment.
2143 VPBasicBlock *HeaderVPBB =
2144 vputils::getFirstLoopHeader(*State.Plan, State.VPDT);
2145 if (!HeaderVPBB)
2146 return;
2147
2148 BasicBlock *HeaderBB = State.CFG.VPBB2IRBB[HeaderVPBB];
2149
2150 // Remove redundant induction instructions.
2151 legacyCSE(HeaderBB);
2152}
2153
2155 auto Iter = vp_depth_first_shallow(Plan.getEntry());
2157 for (VPRecipeBase &P : VPBB->phis()) {
2159 if (!VPPhi)
2160 continue;
2161 PHINode *NewPhi = cast<PHINode>(State.get(VPPhi));
2162 // Make sure the builder has a valid insert point.
2163 Builder.SetInsertPoint(NewPhi);
2164 for (const auto &[Inc, VPBB] : VPPhi->incoming_values_and_blocks())
2165 NewPhi->addIncoming(State.get(Inc), State.CFG.VPBB2IRBB[VPBB]);
2166 }
2167 }
2168}
2169
2170void LoopVectorizationCostModel::collectLoopScalars(ElementCount VF) {
2171 // We should not collect Scalars more than once per VF. Right now, this
2172 // function is called from collectUniformsAndScalars(), which already does
2173 // this check. Collecting Scalars for VF=1 does not make any sense.
2174 assert(VF.isVector() && !Scalars.contains(VF) &&
2175 "This function should not be visited twice for the same VF");
2176
2177 // This avoids any chances of creating a REPLICATE recipe during planning
2178 // since that would result in generation of scalarized code during execution,
2179 // which is not supported for scalable vectors.
2180 if (VF.isScalable()) {
2181 Scalars[VF].insert_range(Uniforms[VF]);
2182 return;
2183 }
2184
2186
2187 // These sets are used to seed the analysis with pointers used by memory
2188 // accesses that will remain scalar.
2190 SmallPtrSet<Instruction *, 8> PossibleNonScalarPtrs;
2191 auto *Latch = TheLoop->getLoopLatch();
2192
2193 // A helper that returns true if the use of Ptr by MemAccess will be scalar.
2194 // The pointer operands of loads and stores will be scalar as long as the
2195 // memory access is not a gather or scatter operation. The value operand of a
2196 // store will remain scalar if the store is scalarized.
2197 auto IsScalarUse = [&](Instruction *MemAccess, Value *Ptr) {
2198 InstWidening WideningDecision = getWideningDecision(MemAccess, VF);
2199 assert(WideningDecision != CM_Unknown &&
2200 "Widening decision should be ready at this moment");
2201 if (auto *Store = dyn_cast<StoreInst>(MemAccess))
2202 if (Ptr == Store->getValueOperand())
2203 return WideningDecision == CM_Scalarize;
2204 assert(Ptr == getLoadStorePointerOperand(MemAccess) &&
2205 "Ptr is neither a value or pointer operand");
2206 return WideningDecision != CM_GatherScatter;
2207 };
2208
2209 // A helper that returns true if the given value is a getelementptr
2210 // instruction contained in the loop.
2211 auto IsLoopVaryingGEP = [&](Value *V) {
2212 return isa<GetElementPtrInst>(V) && !TheLoop->isLoopInvariant(V);
2213 };
2214
2215 // A helper that evaluates a memory access's use of a pointer. If the use will
2216 // be a scalar use and the pointer is only used by memory accesses, we place
2217 // the pointer in ScalarPtrs. Otherwise, the pointer is placed in
2218 // PossibleNonScalarPtrs.
2219 auto EvaluatePtrUse = [&](Instruction *MemAccess, Value *Ptr) {
2220 // We only care about bitcast and getelementptr instructions contained in
2221 // the loop.
2222 if (!IsLoopVaryingGEP(Ptr))
2223 return;
2224
2225 // If the pointer has already been identified as scalar (e.g., if it was
2226 // also identified as uniform), there's nothing to do.
2227 auto *I = cast<Instruction>(Ptr);
2228 if (Worklist.count(I))
2229 return;
2230
2231 // If the use of the pointer will be a scalar use, and all users of the
2232 // pointer are memory accesses, place the pointer in ScalarPtrs. Otherwise,
2233 // place the pointer in PossibleNonScalarPtrs.
2234 if (IsScalarUse(MemAccess, Ptr) &&
2236 ScalarPtrs.insert(I);
2237 else
2238 PossibleNonScalarPtrs.insert(I);
2239 };
2240
2241 // We seed the scalars analysis with three classes of instructions: (1)
2242 // instructions marked uniform-after-vectorization and (2) bitcast,
2243 // getelementptr and (pointer) phi instructions used by memory accesses
2244 // requiring a scalar use.
2245 //
2246 // (1) Add to the worklist all instructions that have been identified as
2247 // uniform-after-vectorization.
2248 Worklist.insert_range(Uniforms[VF]);
2249
2250 // (2) Add to the worklist all bitcast and getelementptr instructions used by
2251 // memory accesses requiring a scalar use. The pointer operands of loads and
2252 // stores will be scalar unless the operation is a gather or scatter.
2253 // The value operand of a store will remain scalar if the store is scalarized.
2254 for (auto *BB : TheLoop->blocks())
2255 for (auto &I : *BB) {
2256 if (auto *Load = dyn_cast<LoadInst>(&I)) {
2257 EvaluatePtrUse(Load, Load->getPointerOperand());
2258 } else if (auto *Store = dyn_cast<StoreInst>(&I)) {
2259 EvaluatePtrUse(Store, Store->getPointerOperand());
2260 EvaluatePtrUse(Store, Store->getValueOperand());
2261 }
2262 }
2263 for (auto *I : ScalarPtrs)
2264 if (!PossibleNonScalarPtrs.count(I)) {
2265 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *I << "\n");
2266 Worklist.insert(I);
2267 }
2268
2269 // Insert the forced scalars.
2270 // FIXME: Currently VPWidenPHIRecipe() often creates a dead vector
2271 // induction variable when the PHI user is scalarized.
2272 auto ForcedScalar = ForcedScalars.find(VF);
2273 if (ForcedScalar != ForcedScalars.end())
2274 for (auto *I : ForcedScalar->second) {
2275 LLVM_DEBUG(dbgs() << "LV: Found (forced) scalar instruction: " << *I << "\n");
2276 Worklist.insert(I);
2277 }
2278
2279 // Expand the worklist by looking through any bitcasts and getelementptr
2280 // instructions we've already identified as scalar. This is similar to the
2281 // expansion step in collectLoopUniforms(); however, here we're only
2282 // expanding to include additional bitcasts and getelementptr instructions.
2283 unsigned Idx = 0;
2284 while (Idx != Worklist.size()) {
2285 Instruction *Dst = Worklist[Idx++];
2286 if (!IsLoopVaryingGEP(Dst->getOperand(0)))
2287 continue;
2288 auto *Src = cast<Instruction>(Dst->getOperand(0));
2289 if (llvm::all_of(Src->users(), [&](User *U) -> bool {
2290 auto *J = cast<Instruction>(U);
2291 return !TheLoop->contains(J) || Worklist.count(J) ||
2292 ((isa<LoadInst>(J) || isa<StoreInst>(J)) &&
2293 IsScalarUse(J, Src));
2294 })) {
2295 Worklist.insert(Src);
2296 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Src << "\n");
2297 }
2298 }
2299
2300 // An induction variable will remain scalar if all users of the induction
2301 // variable and induction variable update remain scalar.
2302 for (const auto &Induction : Legal->getInductionVars()) {
2303 auto *Ind = Induction.first;
2304 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch));
2305
2306 // If tail-folding is applied, the primary induction variable will be used
2307 // to feed a vector compare.
2308 if (Ind == Legal->getPrimaryInduction() && foldTailByMasking())
2309 continue;
2310
2311 // Returns true if \p Indvar is a pointer induction that is used directly by
2312 // load/store instruction \p I.
2313 auto IsDirectLoadStoreFromPtrIndvar = [&](Instruction *Indvar,
2314 Instruction *I) {
2315 return Induction.second.getKind() ==
2318 Indvar == getLoadStorePointerOperand(I) && IsScalarUse(I, Indvar);
2319 };
2320
2321 // Determine if all users of the induction variable are scalar after
2322 // vectorization.
2323 bool ScalarInd = all_of(Ind->users(), [&](User *U) -> bool {
2324 auto *I = cast<Instruction>(U);
2325 return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I) ||
2326 IsDirectLoadStoreFromPtrIndvar(Ind, I);
2327 });
2328 if (!ScalarInd)
2329 continue;
2330
2331 // If the induction variable update is a fixed-order recurrence, neither the
2332 // induction variable or its update should be marked scalar after
2333 // vectorization.
2334 auto *IndUpdatePhi = dyn_cast<PHINode>(IndUpdate);
2335 if (IndUpdatePhi && Legal->isFixedOrderRecurrence(IndUpdatePhi))
2336 continue;
2337
2338 // Determine if all users of the induction variable update instruction are
2339 // scalar after vectorization.
2340 bool ScalarIndUpdate = all_of(IndUpdate->users(), [&](User *U) -> bool {
2341 auto *I = cast<Instruction>(U);
2342 return I == Ind || !TheLoop->contains(I) || Worklist.count(I) ||
2343 IsDirectLoadStoreFromPtrIndvar(IndUpdate, I);
2344 });
2345 if (!ScalarIndUpdate)
2346 continue;
2347
2348 // The induction variable and its update instruction will remain scalar.
2349 Worklist.insert(Ind);
2350 Worklist.insert(IndUpdate);
2351 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Ind << "\n");
2352 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *IndUpdate
2353 << "\n");
2354 }
2355
2356 Scalars[VF].insert_range(Worklist);
2357}
2358
2360 ElementCount VF) {
2361 if (!isPredicatedInst(I))
2362 return false;
2363
2364 // Do we have a non-scalar lowering for this predicated
2365 // instruction? No - it is scalar with predication.
2366 switch(I->getOpcode()) {
2367 default:
2368 return true;
2369 case Instruction::Call:
2370 if (VF.isScalar())
2371 return true;
2373 case Instruction::Load:
2374 case Instruction::Store: {
2375 return !Config.isLegalMaskedLoadOrStore(I, VF) &&
2376 !Config.isLegalGatherOrScatter(I, VF);
2377 }
2378 case Instruction::UDiv:
2379 case Instruction::SDiv:
2380 case Instruction::SRem:
2381 case Instruction::URem: {
2382 // We have the option to use the safe-divisor idiom to avoid predication.
2383 // The cost based decision here will always select safe-divisor for
2384 // scalable vectors as scalarization isn't legal.
2385 const auto [ScalarCost, SafeDivisorCost] = getDivRemSpeculationCost(I, VF);
2386 return isDivRemScalarWithPredication(ScalarCost, SafeDivisorCost);
2387 }
2388 }
2389}
2390
2392 return Legal->isMaskRequired(I, foldTailByMasking());
2393}
2394
2395// TODO: Fold into LoopVectorizationLegality::isMaskRequired.
2397 // TODO: We can use the loop-preheader as context point here and get
2398 // context sensitive reasoning for isSafeToSpeculativelyExecute.
2402 return false;
2403
2404 // If the instruction was executed conditionally in the original scalar loop,
2405 // predication is needed with a mask whose lanes are all possibly inactive.
2406 if (Legal->blockNeedsPredication(I->getParent()))
2407 return true;
2408
2409 // If we're not folding the tail by masking, predication is unnecessary.
2410 if (!foldTailByMasking())
2411 return false;
2412
2413 // All that remain are instructions with side-effects originally executed in
2414 // the loop unconditionally, but now execute under a tail-fold mask (only)
2415 // having at least one active lane (the first). If the side-effects of the
2416 // instruction are invariant, executing it w/o (the tail-folding) mask is safe
2417 // - it will cause the same side-effects as when masked.
2418 switch(I->getOpcode()) {
2419 default:
2421 "instruction should have been considered by earlier checks");
2422 case Instruction::Call:
2423 // Side-effects of a Call are assumed to be non-invariant, needing a
2424 // (fold-tail) mask.
2426 "should have returned earlier for calls not needing a mask");
2427 return true;
2428 case Instruction::Load:
2429 // If the address is loop invariant no predication is needed.
2430 return !Legal->isInvariant(getLoadStorePointerOperand(I));
2431 case Instruction::Store: {
2432 // For stores, we need to prove both speculation safety (which follows from
2433 // the same argument as loads), but also must prove the value being stored
2434 // is correct. The easiest form of the later is to require that all values
2435 // stored are the same.
2436 return !(Legal->isInvariant(getLoadStorePointerOperand(I)) &&
2437 TheLoop->isLoopInvariant(cast<StoreInst>(I)->getValueOperand()));
2438 }
2439 case Instruction::UDiv:
2440 case Instruction::URem:
2441 // If the divisor is loop-invariant no predication is needed.
2442 return !Legal->isInvariant(I->getOperand(1));
2443 case Instruction::SDiv:
2444 case Instruction::SRem:
2445 // Conservative for now, since masked-off lanes may be poison and could
2446 // trigger signed overflow.
2447 return true;
2448 }
2449}
2450
2454 return 1;
2455 // If the block wasn't originally predicated then return early to avoid
2456 // computing BlockFrequencyInfo unnecessarily.
2457 if (!Legal->blockNeedsPredication(BB))
2458 return 1;
2459
2460 uint64_t HeaderFreq =
2461 getBFI().getBlockFreq(TheLoop->getHeader()).getFrequency();
2462 uint64_t BBFreq = getBFI().getBlockFreq(BB).getFrequency();
2463 assert(HeaderFreq >= BBFreq &&
2464 "Header has smaller block freq than dominated BB?");
2465 return std::round((double)HeaderFreq / BBFreq);
2466}
2467
2468std::pair<InstructionCost, InstructionCost>
2470 ElementCount VF) {
2471 assert(I->getOpcode() == Instruction::UDiv ||
2472 I->getOpcode() == Instruction::SDiv ||
2473 I->getOpcode() == Instruction::SRem ||
2474 I->getOpcode() == Instruction::URem);
2476
2477 // Scalarization isn't legal for scalable vector types
2478 InstructionCost ScalarizationCost = InstructionCost::getInvalid();
2479 if (!VF.isScalable()) {
2480 // Get the scalarization cost and scale this amount by the probability of
2481 // executing the predicated block. If the instruction is not predicated,
2482 // we fall through to the next case.
2483 ScalarizationCost = 0;
2484
2485 // These instructions have a non-void type, so account for the phi nodes
2486 // that we will create. This cost is likely to be zero. The phi node
2487 // cost, if any, should be scaled by the block probability because it
2488 // models a copy at the end of each predicated block.
2489 ScalarizationCost += VF.getFixedValue() *
2490 TTI.getCFInstrCost(Instruction::PHI, Config.CostKind);
2491
2492 // The cost of the non-predicated instruction.
2493 ScalarizationCost +=
2494 VF.getFixedValue() * TTI.getArithmeticInstrCost(
2495 I->getOpcode(), I->getType(), Config.CostKind);
2496
2497 // The cost of insertelement and extractelement instructions needed for
2498 // scalarization.
2499 ScalarizationCost += getScalarizationOverhead(I, VF);
2500
2501 // Scale the cost by the probability of executing the predicated blocks.
2502 // This assumes the predicated block for each vector lane is equally
2503 // likely.
2504 ScalarizationCost =
2505 ScalarizationCost /
2506 getPredBlockCostDivisor(Config.CostKind, I->getParent());
2507 }
2508
2509 InstructionCost SafeDivisorCost = 0;
2510 auto *VecTy = toVectorTy(I->getType(), VF);
2511 // The cost of the select guard to ensure all lanes are well defined
2512 // after we speculate above any internal control flow.
2513 SafeDivisorCost +=
2514 TTI.getCmpSelInstrCost(Instruction::Select, VecTy,
2515 toVectorTy(Type::getInt1Ty(I->getContext()), VF),
2516 CmpInst::BAD_ICMP_PREDICATE, Config.CostKind);
2517
2518 SmallVector<const Value *, 4> Operands(I->operand_values());
2519 SafeDivisorCost += TTI.getArithmeticInstrCost(
2520 I->getOpcode(), VecTy, Config.CostKind,
2521 {TargetTransformInfo::OK_AnyValue, TargetTransformInfo::OP_None},
2522 {TargetTransformInfo::OK_AnyValue, TargetTransformInfo::OP_None},
2523 Operands, I);
2524 return {ScalarizationCost, SafeDivisorCost};
2525}
2526
2528 Instruction *I, ElementCount VF) const {
2529 assert(isAccessInterleaved(I) && "Expecting interleaved access.");
2531 "Decision should not be set yet.");
2532 auto *Group = getInterleavedAccessGroup(I);
2533 assert(Group && "Must have a group.");
2534 unsigned InterleaveFactor = Group->getFactor();
2535
2536 // If the instruction's allocated size doesn't equal its type size, it
2537 // requires padding and will be scalarized.
2538 auto &DL = I->getDataLayout();
2539 auto *ScalarTy = getLoadStoreType(I);
2540 if (hasIrregularType(ScalarTy, DL))
2541 return false;
2542
2543 // For scalable vectors, the interleave factors must be <= 8 since we require
2544 // the (de)interleaveN intrinsics instead of shufflevectors.
2545 if (VF.isScalable() && InterleaveFactor > 8)
2546 return false;
2547
2548 // If the group involves a non-integral pointer, we may not be able to
2549 // losslessly cast all values to a common type.
2550 bool ScalarNI = DL.isNonIntegralPointerType(ScalarTy);
2551 for (Instruction *Member : Group->members()) {
2552 auto *MemberTy = getLoadStoreType(Member);
2553 bool MemberNI = DL.isNonIntegralPointerType(MemberTy);
2554 // Don't coerce non-integral pointers to integers or vice versa.
2555 if (MemberNI != ScalarNI)
2556 // TODO: Consider adding special nullptr value case here
2557 return false;
2558 if (MemberNI && ScalarNI &&
2559 ScalarTy->getPointerAddressSpace() !=
2560 MemberTy->getPointerAddressSpace())
2561 return false;
2562 }
2563
2564 // Check if masking is required.
2565 // A Group may need masking for one of two reasons: it resides in a block that
2566 // needs predication, or it was decided to use masking to deal with gaps
2567 // (either a gap at the end of a load-access that may result in a speculative
2568 // load, or any gaps in a store-access).
2569 bool PredicatedAccessRequiresMasking =
2571 bool LoadAccessWithGapsRequiresEpilogMasking =
2572 isa<LoadInst>(I) && Group->requiresScalarEpilogue() &&
2574 bool StoreAccessWithGapsRequiresMasking =
2575 isa<StoreInst>(I) && !Group->isFull();
2576 if (!PredicatedAccessRequiresMasking &&
2577 !LoadAccessWithGapsRequiresEpilogMasking &&
2578 !StoreAccessWithGapsRequiresMasking)
2579 return true;
2580
2581 // If masked interleaving is required, we expect that the user/target had
2582 // enabled it, because otherwise it either wouldn't have been created or
2583 // it should have been invalidated by the CostModel.
2585 "Masked interleave-groups for predicated accesses are not enabled.");
2586
2587 if (Group->isReverse())
2588 return false;
2589
2590 // TODO: Support interleaved access that requires a gap mask for scalable VFs.
2591 bool NeedsMaskForGaps = LoadAccessWithGapsRequiresEpilogMasking ||
2592 StoreAccessWithGapsRequiresMasking;
2593 if (VF.isScalable() && NeedsMaskForGaps)
2594 return false;
2595
2596 auto *Ty = getLoadStoreType(I);
2597 const Align Alignment = getLoadStoreAlignment(I);
2598 unsigned AS = getLoadStoreAddressSpace(I);
2599 return isa<LoadInst>(I) ? TTI.isLegalMaskedLoad(Ty, Alignment, AS)
2600 : TTI.isLegalMaskedStore(Ty, Alignment, AS);
2601}
2602
2604 Instruction *I, ElementCount VF) {
2605 // Get and ensure we have a valid memory instruction.
2606 assert((isa<LoadInst, StoreInst>(I)) && "Invalid memory instruction");
2607
2608 auto *Ptr = getLoadStorePointerOperand(I);
2609 auto *ScalarTy = getLoadStoreType(I);
2610
2611 // In order to be widened, the pointer should be consecutive, first of all.
2612 if (!Legal->isConsecutivePtr(ScalarTy, Ptr))
2613 return false;
2614
2615 // If the instruction is a store located in a predicated block, it will be
2616 // scalarized.
2617 if (isScalarWithPredication(I, VF))
2618 return false;
2619
2620 // If the instruction's allocated size doesn't equal it's type size, it
2621 // requires padding and will be scalarized.
2622 auto &DL = I->getDataLayout();
2623 if (hasIrregularType(ScalarTy, DL))
2624 return false;
2625
2626 return true;
2627}
2628
2629void LoopVectorizationCostModel::collectLoopUniforms(ElementCount VF) {
2630 // We should not collect Uniforms more than once per VF. Right now,
2631 // this function is called from collectUniformsAndScalars(), which
2632 // already does this check. Collecting Uniforms for VF=1 does not make any
2633 // sense.
2634
2635 assert(VF.isVector() && !Uniforms.contains(VF) &&
2636 "This function should not be visited twice for the same VF");
2637
2638 // Visit the list of Uniforms. If we find no uniform value, we won't
2639 // analyze again. Uniforms.count(VF) will return 1.
2640 Uniforms[VF].clear();
2641
2642 // Now we know that the loop is vectorizable!
2643 // Collect instructions inside the loop that will remain uniform after
2644 // vectorization.
2645
2646 // Global values, params and instructions outside of current loop are out of
2647 // scope.
2648 auto IsOutOfScope = [&](Value *V) -> bool {
2650 return (!I || !TheLoop->contains(I));
2651 };
2652
2653 // Worklist containing uniform instructions demanding lane 0.
2654 SetVector<Instruction *> Worklist;
2655
2656 // Add uniform instructions demanding lane 0 to the worklist. Instructions
2657 // that require predication must not be considered uniform after
2658 // vectorization, because that would create an erroneous replicating region
2659 // where only a single instance out of VF should be formed.
2660 auto AddToWorklistIfAllowed = [&](Instruction *I) -> void {
2661 if (IsOutOfScope(I)) {
2662 LLVM_DEBUG(dbgs() << "LV: Found not uniform due to scope: "
2663 << *I << "\n");
2664 return;
2665 }
2666 if (isPredicatedInst(I)) {
2667 LLVM_DEBUG(
2668 dbgs() << "LV: Found not uniform due to requiring predication: " << *I
2669 << "\n");
2670 return;
2671 }
2672 LLVM_DEBUG(dbgs() << "LV: Found uniform instruction: " << *I << "\n");
2673 Worklist.insert(I);
2674 };
2675
2676 // Start with the conditional branches exiting the loop. If the branch
2677 // condition is an instruction contained in the loop that is only used by the
2678 // branch, it is uniform. Note conditions from uncountable early exits are not
2679 // uniform.
2681 TheLoop->getExitingBlocks(Exiting);
2682 for (BasicBlock *E : Exiting) {
2683 if (Legal->hasUncountableEarlyExit() && TheLoop->getLoopLatch() != E)
2684 continue;
2685 auto *Cmp = dyn_cast<Instruction>(E->getTerminator()->getOperand(0));
2686 if (Cmp && TheLoop->contains(Cmp) && Cmp->hasOneUse())
2687 AddToWorklistIfAllowed(Cmp);
2688 }
2689
2690 auto PrevVF = VF.divideCoefficientBy(2);
2691 // Return true if all lanes perform the same memory operation, and we can
2692 // thus choose to execute only one.
2693 auto IsUniformMemOpUse = [&](Instruction *I) {
2694 // If the value was already known to not be uniform for the previous
2695 // (smaller VF), it cannot be uniform for the larger VF.
2696 if (PrevVF.isVector()) {
2697 auto Iter = Uniforms.find(PrevVF);
2698 if (Iter != Uniforms.end() && !Iter->second.contains(I))
2699 return false;
2700 }
2701 if (!Legal->isUniformMemOp(*I, VF))
2702 return false;
2703 if (isa<LoadInst>(I))
2704 // Loading the same address always produces the same result - at least
2705 // assuming aliasing and ordering which have already been checked.
2706 return true;
2707 // Storing the same value on every iteration.
2708 return TheLoop->isLoopInvariant(cast<StoreInst>(I)->getValueOperand());
2709 };
2710
2711 auto IsUniformDecision = [&](Instruction *I, ElementCount VF) {
2712 InstWidening WideningDecision = getWideningDecision(I, VF);
2713 assert(WideningDecision != CM_Unknown &&
2714 "Widening decision should be ready at this moment");
2715
2716 if (IsUniformMemOpUse(I))
2717 return true;
2718
2719 return (WideningDecision == CM_Widen ||
2720 WideningDecision == CM_Widen_Reverse ||
2721 WideningDecision == CM_Interleave);
2722 };
2723
2724 // Returns true if Ptr is the pointer operand of a memory access instruction
2725 // I, I is known to not require scalarization, and the pointer is not also
2726 // stored.
2727 auto IsVectorizedMemAccessUse = [&](Instruction *I, Value *Ptr) -> bool {
2728 if (isa<StoreInst>(I) && I->getOperand(0) == Ptr)
2729 return false;
2730 return getLoadStorePointerOperand(I) == Ptr &&
2731 (IsUniformDecision(I, VF) || Legal->isInvariant(Ptr));
2732 };
2733
2734 // Holds a list of values which are known to have at least one uniform use.
2735 // Note that there may be other uses which aren't uniform. A "uniform use"
2736 // here is something which only demands lane 0 of the unrolled iterations;
2737 // it does not imply that all lanes produce the same value (e.g. this is not
2738 // the usual meaning of uniform)
2739 SetVector<Value *> HasUniformUse;
2740
2741 // Scan the loop for instructions which are either a) known to have only
2742 // lane 0 demanded or b) are uses which demand only lane 0 of their operand.
2743 for (auto *BB : TheLoop->blocks())
2744 for (auto &I : *BB) {
2745 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(&I)) {
2746 switch (II->getIntrinsicID()) {
2747 case Intrinsic::sideeffect:
2748 case Intrinsic::experimental_noalias_scope_decl:
2749 case Intrinsic::assume:
2750 case Intrinsic::lifetime_start:
2751 case Intrinsic::lifetime_end:
2752 if (TheLoop->hasLoopInvariantOperands(&I))
2753 AddToWorklistIfAllowed(&I);
2754 break;
2755 default:
2756 break;
2757 }
2758 }
2759
2760 if (auto *EVI = dyn_cast<ExtractValueInst>(&I)) {
2761 if (IsOutOfScope(EVI->getAggregateOperand())) {
2762 AddToWorklistIfAllowed(EVI);
2763 continue;
2764 }
2765 // Only ExtractValue instructions where the aggregate value comes from a
2766 // call are allowed to be non-uniform.
2767 assert(isa<CallInst>(EVI->getAggregateOperand()) &&
2768 "Expected aggregate value to be call return value");
2769 }
2770
2771 // If there's no pointer operand, there's nothing to do.
2772 auto *Ptr = getLoadStorePointerOperand(&I);
2773 if (!Ptr)
2774 continue;
2775
2776 // If the pointer can be proven to be uniform, always add it to the
2777 // worklist.
2778 if (isa<Instruction>(Ptr) && Legal->isUniform(Ptr, VF))
2779 AddToWorklistIfAllowed(cast<Instruction>(Ptr));
2780
2781 if (IsUniformMemOpUse(&I))
2782 AddToWorklistIfAllowed(&I);
2783
2784 if (IsVectorizedMemAccessUse(&I, Ptr))
2785 HasUniformUse.insert(Ptr);
2786 }
2787
2788 // Add to the worklist any operands which have *only* uniform (e.g. lane 0
2789 // demanding) users. Since loops are assumed to be in LCSSA form, this
2790 // disallows uses outside the loop as well.
2791 for (auto *V : HasUniformUse) {
2792 if (IsOutOfScope(V))
2793 continue;
2794 auto *I = cast<Instruction>(V);
2795 bool UsersAreMemAccesses = all_of(I->users(), [&](User *U) -> bool {
2796 auto *UI = cast<Instruction>(U);
2797 return TheLoop->contains(UI) && IsVectorizedMemAccessUse(UI, V);
2798 });
2799 if (UsersAreMemAccesses)
2800 AddToWorklistIfAllowed(I);
2801 }
2802
2803 // Expand Worklist in topological order: whenever a new instruction
2804 // is added , its users should be already inside Worklist. It ensures
2805 // a uniform instruction will only be used by uniform instructions.
2806 unsigned Idx = 0;
2807 while (Idx != Worklist.size()) {
2808 Instruction *I = Worklist[Idx++];
2809
2810 for (auto *OV : I->operand_values()) {
2811 // isOutOfScope operands cannot be uniform instructions.
2812 if (IsOutOfScope(OV))
2813 continue;
2814 // First order recurrence Phi's should typically be considered
2815 // non-uniform.
2816 auto *OP = dyn_cast<PHINode>(OV);
2817 if (OP && Legal->isFixedOrderRecurrence(OP))
2818 continue;
2819 // If all the users of the operand are uniform, then add the
2820 // operand into the uniform worklist.
2821 auto *OI = cast<Instruction>(OV);
2822 if (llvm::all_of(OI->users(), [&](User *U) -> bool {
2823 auto *J = cast<Instruction>(U);
2824 return Worklist.count(J) || IsVectorizedMemAccessUse(J, OI);
2825 }))
2826 AddToWorklistIfAllowed(OI);
2827 }
2828 }
2829
2830 // For an instruction to be added into Worklist above, all its users inside
2831 // the loop should also be in Worklist. However, this condition cannot be
2832 // true for phi nodes that form a cyclic dependence. We must process phi
2833 // nodes separately. An induction variable will remain uniform if all users
2834 // of the induction variable and induction variable update remain uniform.
2835 // The code below handles both pointer and non-pointer induction variables.
2836 BasicBlock *Latch = TheLoop->getLoopLatch();
2837 for (const auto &Induction : Legal->getInductionVars()) {
2838 auto *Ind = Induction.first;
2839 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch));
2840
2841 // Determine if all users of the induction variable are uniform after
2842 // vectorization.
2843 bool UniformInd = all_of(Ind->users(), [&](User *U) -> bool {
2844 auto *I = cast<Instruction>(U);
2845 return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I) ||
2846 IsVectorizedMemAccessUse(I, Ind);
2847 });
2848 if (!UniformInd)
2849 continue;
2850
2851 // Determine if all users of the induction variable update instruction are
2852 // uniform after vectorization.
2853 bool UniformIndUpdate = all_of(IndUpdate->users(), [&](User *U) -> bool {
2854 auto *I = cast<Instruction>(U);
2855 return I == Ind || Worklist.count(I) ||
2856 IsVectorizedMemAccessUse(I, IndUpdate);
2857 });
2858 if (!UniformIndUpdate)
2859 continue;
2860
2861 // The induction variable and its update instruction will remain uniform.
2862 AddToWorklistIfAllowed(Ind);
2863 AddToWorklistIfAllowed(IndUpdate);
2864 }
2865
2866 Uniforms[VF].insert_range(Worklist);
2867}
2868
2869FixedScalableVFPair
2871 if (Legal->getRuntimePointerChecking()->Need && TTI.hasBranchDivergence()) {
2872 // TODO: It may be useful to do since it's still likely to be dynamically
2873 // uniform if the target can skip.
2875 "Not inserting runtime ptr check for divergent target",
2876 "runtime pointer checks needed. Not enabled for divergent target",
2877 "CantVersionLoopWithDivergentTarget", ORE, TheLoop);
2879 }
2880
2881 ScalarEvolution *SE = PSE.getSE();
2883 unsigned MaxTC = PSE.getSmallConstantMaxTripCount();
2884 if (!MaxTC && EpilogueLoweringStatus == CM_EpilogueAllowed)
2886 LLVM_DEBUG(dbgs() << "LV: Found trip count: " << TC << '\n');
2887 if (TC != ElementCount::getFixed(MaxTC))
2888 LLVM_DEBUG(dbgs() << "LV: Found maximum trip count: " << MaxTC << '\n');
2889 if (TC.isScalar()) {
2890 reportVectorizationFailure("Single iteration (non) loop",
2891 "loop trip count is one, irrelevant for vectorization",
2892 "SingleIterationLoop", ORE, TheLoop);
2894 }
2895
2896 // If BTC matches the widest induction type and is -1 then the trip count
2897 // computation will wrap to 0 and the vector trip count will be 0. Do not try
2898 // to vectorize.
2899 const SCEV *BTC = SE->getBackedgeTakenCount(TheLoop);
2900 if (!isa<SCEVCouldNotCompute>(BTC) &&
2901 BTC->getType()->getScalarSizeInBits() >=
2902 Legal->getWidestInductionType()->getScalarSizeInBits() &&
2904 SE->getMinusOne(BTC->getType()))) {
2906 "Trip count computation wrapped",
2907 "backedge-taken count is -1, loop trip count wrapped to 0",
2908 "TripCountWrapped", ORE, TheLoop);
2910 }
2911
2912 assert(WideningDecisions.empty() && CallWideningDecisions.empty() &&
2913 Uniforms.empty() && Scalars.empty() &&
2914 "No cost-modeling decisions should have been taken at this point");
2915
2916 switch (EpilogueLoweringStatus) {
2917 case CM_EpilogueAllowed:
2918 return Config.computeFeasibleMaxVF(MaxTC, UserVF, UserIC, false,
2921 [[fallthrough]];
2923 LLVM_DEBUG(dbgs() << "LV: tail-folding hint/switch found.\n"
2924 << "LV: Not allowing epilogue, creating tail-folded "
2925 << "vector loop.\n");
2926 break;
2928 // fallthrough as a special case of OptForSize
2930 if (EpilogueLoweringStatus == CM_EpilogueNotAllowedOptSize)
2931 LLVM_DEBUG(dbgs() << "LV: Not allowing epilogue due to -Os/-Oz.\n");
2932 else
2933 LLVM_DEBUG(dbgs() << "LV: Not allowing epilogue due to low trip "
2934 << "count.\n");
2935
2936 // Bail if runtime checks are required, which are not good when optimising
2937 // for size.
2938 if (Config.runtimeChecksRequired())
2940
2941 break;
2942 }
2943
2944 // Now try the tail folding
2945
2946 // Invalidate interleave groups that require an epilogue if we can't mask
2947 // the interleave-group.
2949 // Note: There is no need to invalidate any cost modeling decisions here, as
2950 // none were taken so far (see assertion above).
2951 InterleaveInfo.invalidateGroupsRequiringScalarEpilogue();
2952 }
2953
2954 FixedScalableVFPair MaxFactors = Config.computeFeasibleMaxVF(
2955 MaxTC, UserVF, UserIC, true, requiresScalarEpilogue(true));
2956
2957 // Avoid tail folding if the trip count is known to be a multiple of any VF
2958 // we choose.
2959 std::optional<unsigned> MaxPowerOf2RuntimeVF =
2960 MaxFactors.FixedVF.getFixedValue();
2961 if (MaxFactors.ScalableVF) {
2962 std::optional<unsigned> MaxVScale = getMaxVScale(*TheFunction, TTI);
2963 if (MaxVScale) {
2964 MaxPowerOf2RuntimeVF = std::max<unsigned>(
2965 *MaxPowerOf2RuntimeVF,
2966 *MaxVScale * MaxFactors.ScalableVF.getKnownMinValue());
2967 } else
2968 MaxPowerOf2RuntimeVF = std::nullopt; // Stick with tail-folding for now.
2969 }
2970
2971 auto NoScalarEpilogueNeeded = [this, &UserIC](unsigned MaxVF) {
2972 // Return false if the loop is neither a single-latch-exit loop nor an
2973 // early-exit loop as tail-folding is not supported in that case.
2974 if (TheLoop->getExitingBlock() != TheLoop->getLoopLatch() &&
2975 !Legal->hasUncountableEarlyExit())
2976 return false;
2977 unsigned MaxVFtimesIC = UserIC ? MaxVF * UserIC : MaxVF;
2978 ScalarEvolution *SE = PSE.getSE();
2979 // Calling getSymbolicMaxBackedgeTakenCount enables support for loops
2980 // with uncountable exits. For countable loops, the symbolic maximum must
2981 // remain identical to the known back-edge taken count.
2982 const SCEV *BackedgeTakenCount = PSE.getSymbolicMaxBackedgeTakenCount();
2983 assert((Legal->hasUncountableEarlyExit() ||
2984 BackedgeTakenCount == PSE.getBackedgeTakenCount()) &&
2985 "Invalid loop count");
2986 const SCEV *ExitCount = SE->getAddExpr(
2987 BackedgeTakenCount, SE->getOne(BackedgeTakenCount->getType()));
2988 const SCEV *Rem = SE->getURemExpr(
2989 SE->applyLoopGuards(ExitCount, TheLoop),
2990 SE->getConstant(BackedgeTakenCount->getType(), MaxVFtimesIC));
2991 return Rem->isZero();
2992 };
2993
2994 if (MaxPowerOf2RuntimeVF > 0u) {
2995 assert((UserVF.isNonZero() || isPowerOf2_32(*MaxPowerOf2RuntimeVF)) &&
2996 "MaxFixedVF must be a power of 2");
2997 if (NoScalarEpilogueNeeded(*MaxPowerOf2RuntimeVF)) {
2998 // Accept MaxFixedVF if we do not have a tail.
2999 LLVM_DEBUG(dbgs() << "LV: No tail will remain for any chosen VF.\n");
3000 return MaxFactors;
3001 }
3002 }
3003
3004 auto ExpectedTC = getSmallBestKnownTC(PSE, TheLoop);
3005 if (ExpectedTC && ExpectedTC->isFixed() &&
3006 ExpectedTC->getFixedValue() <=
3007 TTI.getMinTripCountTailFoldingThreshold()) {
3008 if (MaxPowerOf2RuntimeVF > 0u) {
3009 // If we have a low-trip-count, and the fixed-width VF is known to divide
3010 // the trip count but the scalable factor does not, use the fixed-width
3011 // factor in preference to allow the generation of a non-predicated loop.
3012 if (EpilogueLoweringStatus == CM_EpilogueNotAllowedLowTripLoop &&
3013 NoScalarEpilogueNeeded(MaxFactors.FixedVF.getFixedValue())) {
3014 LLVM_DEBUG(dbgs() << "LV: Picking a fixed-width so that no tail will "
3015 "remain for any chosen VF.\n");
3016 MaxFactors.ScalableVF = ElementCount::getScalable(0);
3017 return MaxFactors;
3018 }
3019 }
3020
3022 "The trip count is below the minial threshold value.",
3023 "loop trip count is too low, avoiding vectorization", "LowTripCount",
3024 ORE, TheLoop);
3026 }
3027
3028 // If we don't know the precise trip count, or if the trip count that we
3029 // found modulo the vectorization factor is not zero, try to fold the tail
3030 // by masking.
3031 // FIXME: look for a smaller MaxVF that does divide TC rather than masking.
3032 bool ContainsScalableVF = MaxFactors.ScalableVF.isNonZero();
3033 setTailFoldingStyle(ContainsScalableVF, UserIC);
3034 if (foldTailByMasking()) {
3035 if (foldTailWithEVL()) {
3036 LLVM_DEBUG(
3037 dbgs()
3038 << "LV: tail is folded with EVL, forcing unroll factor to be 1. Will "
3039 "try to generate VP Intrinsics with scalable vector "
3040 "factors only.\n");
3041 // Tail folded loop using VP intrinsics restricts the VF to be scalable
3042 // for now.
3043 // TODO: extend it for fixed vectors, if required.
3044 assert(ContainsScalableVF && "Expected scalable vector factor.");
3045
3046 MaxFactors.FixedVF = ElementCount::getFixed(1);
3047 }
3048 return MaxFactors;
3049 }
3050
3051 // If there was a tail-folding hint/switch, but we can't fold the tail by
3052 // masking, fallback to a vectorization with an epilogue.
3053 if (EpilogueLoweringStatus == CM_EpilogueNotNeededFoldTail) {
3054 LLVM_DEBUG(dbgs() << "LV: Cannot fold tail by masking: vectorize with an "
3055 "epilogue instead.\n");
3056 EpilogueLoweringStatus = CM_EpilogueAllowed;
3057 return MaxFactors;
3058 }
3059
3060 if (EpilogueLoweringStatus == CM_EpilogueNotAllowedFoldTail) {
3061 LLVM_DEBUG(dbgs() << "LV: Can't fold tail by masking: don't vectorize\n");
3063 }
3064
3065 if (TC.isZero()) {
3067 "unable to calculate the loop count due to complex control flow",
3068 "UnknownLoopCountComplexCFG", ORE, TheLoop);
3070 }
3071
3073 "Cannot optimize for size and vectorize at the same time.",
3074 "cannot optimize for size and vectorize at the same time. "
3075 "Enable vectorization of this loop with '#pragma clang loop "
3076 "vectorize(enable)' when compiling with -Os/-Oz",
3077 "NoTailLoopWithOptForSize", ORE, TheLoop);
3079}
3080
3081bool LoopVectorizationPlanner::isMoreProfitable(const VectorizationFactor &A,
3082 const VectorizationFactor &B,
3083 const unsigned MaxTripCount,
3084 bool HasTail,
3085 bool IsEpilogue) const {
3086 InstructionCost CostA = A.Cost;
3087 InstructionCost CostB = B.Cost;
3088
3089 // When there is a hint to always prefer scalable vectors, honour that hint.
3090 if (Hints.isScalableVectorizationAlwaysPreferred())
3091 if (A.Width.isScalable() && CostA.isValid() && !B.Width.isScalable() &&
3092 !B.Width.isScalar())
3093 return true;
3094
3095 // Improve estimate for the vector width if it is scalable.
3096 unsigned EstimatedWidthA = A.Width.getKnownMinValue();
3097 unsigned EstimatedWidthB = B.Width.getKnownMinValue();
3098 if (std::optional<unsigned> VScale = Config.getVScaleForTuning()) {
3099 if (A.Width.isScalable())
3100 EstimatedWidthA *= *VScale;
3101 if (B.Width.isScalable())
3102 EstimatedWidthB *= *VScale;
3103 }
3104
3105 // When optimizing for size choose whichever is smallest, which will be the
3106 // one with the smallest cost for the whole loop. On a tie pick the larger
3107 // vector width, on the assumption that throughput will be greater.
3108 if (Config.CostKind == TTI::TCK_CodeSize)
3109 return CostA < CostB ||
3110 (CostA == CostB && EstimatedWidthA > EstimatedWidthB);
3111
3112 // Assume vscale may be larger than 1 (or the value being tuned for),
3113 // so that scalable vectorization is slightly favorable over fixed-width
3114 // vectorization.
3115 bool PreferScalable = !TTI.preferFixedOverScalableIfEqualCost(IsEpilogue) &&
3116 A.Width.isScalable() && !B.Width.isScalable();
3117
3118 auto CmpFn = [PreferScalable](const InstructionCost &LHS,
3119 const InstructionCost &RHS) {
3120 return PreferScalable ? LHS <= RHS : LHS < RHS;
3121 };
3122
3123 // To avoid the need for FP division:
3124 // (CostA / EstimatedWidthA) < (CostB / EstimatedWidthB)
3125 // <=> (CostA * EstimatedWidthB) < (CostB * EstimatedWidthA)
3126 bool LowerCostWithoutTC =
3127 CmpFn(CostA * EstimatedWidthB, CostB * EstimatedWidthA);
3128 if (!MaxTripCount)
3129 return LowerCostWithoutTC;
3130
3131 auto GetCostForTC = [MaxTripCount, HasTail](unsigned VF,
3132 InstructionCost VectorCost,
3133 InstructionCost ScalarCost) {
3134 // If the trip count is a known (possibly small) constant, the trip count
3135 // will be rounded up to an integer number of iterations under
3136 // FoldTailByMasking. The total cost in that case will be
3137 // VecCost*ceil(TripCount/VF). When not folding the tail, the total
3138 // cost will be VecCost*floor(TC/VF) + ScalarCost*(TC%VF). There will be
3139 // some extra overheads, but for the purpose of comparing the costs of
3140 // different VFs we can use this to compare the total loop-body cost
3141 // expected after vectorization.
3142 if (HasTail)
3143 return VectorCost * (MaxTripCount / VF) +
3144 ScalarCost * (MaxTripCount % VF);
3145 return VectorCost * divideCeil(MaxTripCount, VF);
3146 };
3147
3148 auto RTCostA = GetCostForTC(EstimatedWidthA, CostA, A.ScalarCost);
3149 auto RTCostB = GetCostForTC(EstimatedWidthB, CostB, B.ScalarCost);
3150 bool LowerCostWithTC = CmpFn(RTCostA, RTCostB);
3151 LLVM_DEBUG(if (LowerCostWithTC != LowerCostWithoutTC) {
3152 dbgs() << "LV: VF " << (LowerCostWithTC ? A.Width : B.Width)
3153 << " has lower cost than VF "
3154 << (LowerCostWithTC ? B.Width : A.Width)
3155 << " when taking the cost of the remaining scalar loop iterations "
3156 "into consideration for a maximum trip count of "
3157 << MaxTripCount << ".\n";
3158 });
3159 return LowerCostWithTC;
3160}
3161
3162bool LoopVectorizationPlanner::isMoreProfitable(const VectorizationFactor &A,
3163 const VectorizationFactor &B,
3164 bool HasTail,
3165 bool IsEpilogue) const {
3166 const unsigned MaxTripCount = PSE.getSmallConstantMaxTripCount();
3167 return LoopVectorizationPlanner::isMoreProfitable(A, B, MaxTripCount, HasTail,
3168 IsEpilogue);
3169}
3170
3173 using RecipeVFPair = std::pair<VPRecipeBase *, ElementCount>;
3174 SmallVector<RecipeVFPair> InvalidCosts;
3175 for (const auto &Plan : VPlans) {
3176 for (ElementCount VF : Plan->vectorFactors()) {
3177 // The VPlan-based cost model is designed for computing vector cost.
3178 // Querying VPlan-based cost model with a scarlar VF will cause some
3179 // errors because we expect the VF is vector for most of the widen
3180 // recipes.
3181 if (VF.isScalar())
3182 continue;
3183
3184 VPCostContext CostCtx(CM.TTI, *CM.TLI, *Plan, CM, Config.CostKind, CM.PSE,
3185 OrigLoop);
3186 precomputeCosts(*Plan, VF, CostCtx);
3187 auto Iter = vp_depth_first_deep(Plan->getVectorLoopRegion()->getEntry());
3189 for (auto &R : *VPBB) {
3190 if (!R.cost(VF, CostCtx).isValid())
3191 InvalidCosts.emplace_back(&R, VF);
3192 }
3193 }
3194 }
3195 }
3196 if (InvalidCosts.empty())
3197 return;
3198
3199 // Emit a report of VFs with invalid costs in the loop.
3200
3201 // Group the remarks per recipe, keeping the recipe order from InvalidCosts.
3203 unsigned I = 0;
3204 for (auto &Pair : InvalidCosts)
3205 if (Numbering.try_emplace(Pair.first, I).second)
3206 ++I;
3207
3208 // Sort the list, first on recipe(number) then on VF.
3209 sort(InvalidCosts, [&Numbering](RecipeVFPair &A, RecipeVFPair &B) {
3210 unsigned NA = Numbering[A.first];
3211 unsigned NB = Numbering[B.first];
3212 if (NA != NB)
3213 return NA < NB;
3214 return ElementCount::isKnownLT(A.second, B.second);
3215 });
3216
3217 // For a list of ordered recipe-VF pairs:
3218 // [(load, VF1), (load, VF2), (store, VF1)]
3219 // group the recipes together to emit separate remarks for:
3220 // load (VF1, VF2)
3221 // store (VF1)
3222 auto Tail = ArrayRef<RecipeVFPair>(InvalidCosts);
3223 auto Subset = ArrayRef<RecipeVFPair>();
3224 do {
3225 if (Subset.empty())
3226 Subset = Tail.take_front(1);
3227
3228 VPRecipeBase *R = Subset.front().first;
3229
3230 unsigned Opcode =
3232 .Case([](const VPHeaderPHIRecipe *R) { return Instruction::PHI; })
3233 .Case(
3234 [](const VPWidenStoreRecipe *R) { return Instruction::Store; })
3235 .Case([](const VPWidenLoadRecipe *R) { return Instruction::Load; })
3236 .Case<VPWidenCallRecipe, VPWidenIntrinsicRecipe>(
3237 [](const auto *R) { return Instruction::Call; })
3240 [](const auto *R) { return R->getOpcode(); })
3241 .Case([](const VPInterleaveRecipe *R) {
3242 return R->getStoredValues().empty() ? Instruction::Load
3243 : Instruction::Store;
3244 })
3245 .Case([](const VPReductionRecipe *R) {
3246 return RecurrenceDescriptor::getOpcode(R->getRecurrenceKind());
3247 });
3248
3249 // If the next recipe is different, or if there are no other pairs,
3250 // emit a remark for the collated subset. e.g.
3251 // [(load, VF1), (load, VF2))]
3252 // to emit:
3253 // remark: invalid costs for 'load' at VF=(VF1, VF2)
3254 if (Subset == Tail || Tail[Subset.size()].first != R) {
3255 std::string OutString;
3256 raw_string_ostream OS(OutString);
3257 assert(!Subset.empty() && "Unexpected empty range");
3258 OS << "Recipe with invalid costs prevented vectorization at VF=(";
3259 for (const auto &Pair : Subset)
3260 OS << (Pair.second == Subset.front().second ? "" : ", ") << Pair.second;
3261 OS << "):";
3262 if (Opcode == Instruction::Call) {
3263 StringRef Name = "";
3264 if (auto *Int = dyn_cast<VPWidenIntrinsicRecipe>(R)) {
3265 Name = Int->getIntrinsicName();
3266 } else {
3267 auto *WidenCall = dyn_cast<VPWidenCallRecipe>(R);
3268 Function *CalledFn =
3269 WidenCall ? WidenCall->getCalledScalarFunction()
3270 : cast<Function>(R->getOperand(R->getNumOperands() - 1)
3271 ->getLiveInIRValue());
3272 Name = CalledFn->getName();
3273 }
3274 OS << " call to " << Name;
3275 } else
3276 OS << " " << Instruction::getOpcodeName(Opcode);
3277 reportVectorizationInfo(OutString, "InvalidCost", ORE, OrigLoop, nullptr,
3278 R->getDebugLoc());
3279 Tail = Tail.drop_front(Subset.size());
3280 Subset = {};
3281 } else
3282 // Grow the subset by one element
3283 Subset = Tail.take_front(Subset.size() + 1);
3284 } while (!Tail.empty());
3285}
3286
3287/// Check if any recipe of \p Plan will generate a vector value, which will be
3288/// assigned a vector register.
3290 const TargetTransformInfo &TTI) {
3291 assert(VF.isVector() && "Checking a scalar VF?");
3292 VPTypeAnalysis TypeInfo(Plan);
3293 DenseSet<VPRecipeBase *> EphemeralRecipes;
3294 collectEphemeralRecipesForVPlan(Plan, EphemeralRecipes);
3295 // Set of already visited types.
3296 DenseSet<Type *> Visited;
3299 for (VPRecipeBase &R : *VPBB) {
3300 if (EphemeralRecipes.contains(&R))
3301 continue;
3302 // Continue early if the recipe is considered to not produce a vector
3303 // result. Note that this includes VPInstruction where some opcodes may
3304 // produce a vector, to preserve existing behavior as VPInstructions model
3305 // aspects not directly mapped to existing IR instructions.
3306 switch (R.getVPRecipeID()) {
3307 case VPRecipeBase::VPDerivedIVSC:
3308 case VPRecipeBase::VPScalarIVStepsSC:
3309 case VPRecipeBase::VPReplicateSC:
3310 case VPRecipeBase::VPInstructionSC:
3311 case VPRecipeBase::VPCurrentIterationPHISC:
3312 case VPRecipeBase::VPVectorPointerSC:
3313 case VPRecipeBase::VPVectorEndPointerSC:
3314 case VPRecipeBase::VPExpandSCEVSC:
3315 case VPRecipeBase::VPPredInstPHISC:
3316 case VPRecipeBase::VPBranchOnMaskSC:
3317 continue;
3318 case VPRecipeBase::VPReductionSC:
3319 case VPRecipeBase::VPActiveLaneMaskPHISC:
3320 case VPRecipeBase::VPWidenCallSC:
3321 case VPRecipeBase::VPWidenCanonicalIVSC:
3322 case VPRecipeBase::VPWidenCastSC:
3323 case VPRecipeBase::VPWidenGEPSC:
3324 case VPRecipeBase::VPWidenIntrinsicSC:
3325 case VPRecipeBase::VPWidenSC:
3326 case VPRecipeBase::VPBlendSC:
3327 case VPRecipeBase::VPFirstOrderRecurrencePHISC:
3328 case VPRecipeBase::VPHistogramSC:
3329 case VPRecipeBase::VPWidenPHISC:
3330 case VPRecipeBase::VPWidenIntOrFpInductionSC:
3331 case VPRecipeBase::VPWidenPointerInductionSC:
3332 case VPRecipeBase::VPReductionPHISC:
3333 case VPRecipeBase::VPInterleaveEVLSC:
3334 case VPRecipeBase::VPInterleaveSC:
3335 case VPRecipeBase::VPWidenLoadEVLSC:
3336 case VPRecipeBase::VPWidenLoadSC:
3337 case VPRecipeBase::VPWidenStoreEVLSC:
3338 case VPRecipeBase::VPWidenStoreSC:
3339 break;
3340 default:
3341 llvm_unreachable("unhandled recipe");
3342 }
3343
3344 auto WillGenerateTargetVectors = [&TTI, VF](Type *VectorTy) {
3345 unsigned NumLegalParts = TTI.getNumberOfParts(VectorTy);
3346 if (!NumLegalParts)
3347 return false;
3348 if (VF.isScalable()) {
3349 // <vscale x 1 x iN> is assumed to be profitable over iN because
3350 // scalable registers are a distinct register class from scalar
3351 // ones. If we ever find a target which wants to lower scalable
3352 // vectors back to scalars, we'll need to update this code to
3353 // explicitly ask TTI about the register class uses for each part.
3354 return NumLegalParts <= VF.getKnownMinValue();
3355 }
3356 // Two or more elements that share a register - are vectorized.
3357 return NumLegalParts < VF.getFixedValue();
3358 };
3359
3360 // If no def nor is a store, e.g., branches, continue - no value to check.
3361 if (R.getNumDefinedValues() == 0 &&
3363 continue;
3364 // For multi-def recipes, currently only interleaved loads, suffice to
3365 // check first def only.
3366 // For stores check their stored value; for interleaved stores suffice
3367 // the check first stored value only. In all cases this is the second
3368 // operand.
3369 VPValue *ToCheck =
3370 R.getNumDefinedValues() >= 1 ? R.getVPValue(0) : R.getOperand(1);
3371 Type *ScalarTy = TypeInfo.inferScalarType(ToCheck);
3372 if (!Visited.insert({ScalarTy}).second)
3373 continue;
3374 Type *WideTy = toVectorizedTy(ScalarTy, VF);
3375 if (any_of(getContainedTypes(WideTy), WillGenerateTargetVectors))
3376 return true;
3377 }
3378 }
3379
3380 return false;
3381}
3382
3383static bool hasReplicatorRegion(VPlan &Plan) {
3385 Plan.getVectorLoopRegion()->getEntry())),
3386 [](auto *VPRB) { return VPRB->isReplicator(); });
3387}
3388
3389/// Returns true if the VPlan contains a VPReductionPHIRecipe with
3390/// FindLast recurrence kind.
3391static bool hasFindLastReductionPhi(VPlan &Plan) {
3393 [](VPRecipeBase &R) {
3394 auto *RedPhi = dyn_cast<VPReductionPHIRecipe>(&R);
3395 return RedPhi &&
3396 RecurrenceDescriptor::isFindLastRecurrenceKind(
3397 RedPhi->getRecurrenceKind());
3398 });
3399}
3400
3401/// Returns true if the VPlan contains header phi recipes that are not currently
3402/// supported for epilogue vectorization.
3404 return any_of(
3406 [](VPRecipeBase &R) {
3407 switch (R.getVPRecipeID()) {
3408 case VPRecipeBase::VPFirstOrderRecurrencePHISC:
3409 // TODO: Add support for fixed-order recurrences.
3410 return true;
3411 case VPRecipeBase::VPWidenIntOrFpInductionSC:
3412 return !cast<VPWidenIntOrFpInductionRecipe>(&R)->getPHINode();
3413 case VPRecipeBase::VPReductionPHISC: {
3414 auto *RedPhi = cast<VPReductionPHIRecipe>(&R);
3415 // TODO: Support FMinNum/FMaxNum, FindLast reductions, and reductions
3416 // without underlying values.
3417 RecurKind Kind = RedPhi->getRecurrenceKind();
3418 if (RecurrenceDescriptor::isFPMinMaxNumRecurrenceKind(Kind) ||
3419 RecurrenceDescriptor::isFindLastRecurrenceKind(Kind) ||
3420 !RedPhi->getUnderlyingValue())
3421 return true;
3422 // TODO: Add support for FindIV reductions with sunk expressions: the
3423 // resume value from the main loop is in expression domain (e.g.,
3424 // mul(ReducedIV, 3)), but the epilogue tracks raw IV values. A sunk
3425 // expression is identified by a non-VPInstruction user of
3426 // ComputeReductionResult.
3427 if (RecurrenceDescriptor::isFindIVRecurrenceKind(Kind)) {
3428 auto *RdxResult = vputils::findComputeReductionResult(RedPhi);
3429 assert(RdxResult &&
3430 "FindIV reduction must have ComputeReductionResult");
3431 return any_of(RdxResult->users(),
3432 std::not_fn(IsaPred<VPInstruction>));
3433 }
3434 return false;
3435 }
3436 default:
3437 return false;
3438 };
3439 });
3440}
3441
3442bool LoopVectorizationPlanner::isCandidateForEpilogueVectorization(
3443 VPlan &MainPlan) const {
3444 // Bail out if the plan contains header phi recipes not yet supported
3445 // for epilogue vectorization.
3446 if (hasUnsupportedHeaderPhiRecipe(MainPlan))
3447 return false;
3448
3449 // Epilogue vectorization code has not been auditted to ensure it handles
3450 // non-latch exits properly. It may be fine, but it needs auditted and
3451 // tested.
3452 // TODO: Add support for loops with an early exit.
3453 if (OrigLoop->getExitingBlock() != OrigLoop->getLoopLatch())
3454 return false;
3455
3456 return true;
3457}
3458
3460 const ElementCount VF, const unsigned IC) const {
3461 // FIXME: We need a much better cost-model to take different parameters such
3462 // as register pressure, code size increase and cost of extra branches into
3463 // account. For now we apply a very crude heuristic and only consider loops
3464 // with vectorization factors larger than a certain value.
3465
3466 // Allow the target to opt out.
3467 if (!TTI.preferEpilogueVectorization(VF * IC))
3468 return false;
3469
3470 unsigned MinVFThreshold = EpilogueVectorizationMinVF.getNumOccurrences() > 0
3472 : TTI.getEpilogueVectorizationMinVF();
3473 return estimateElementCount(VF * IC, Config.getVScaleForTuning()) >=
3474 MinVFThreshold;
3475}
3476
3478 VPlan &MainPlan, ElementCount MainLoopVF, unsigned IC) {
3480 LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization is disabled.\n");
3481 return nullptr;
3482 }
3483
3484 if (!CM.isEpilogueAllowed()) {
3485 LLVM_DEBUG(dbgs() << "LEV: Unable to vectorize epilogue because no "
3486 "epilogue is allowed.\n");
3487 return nullptr;
3488 }
3489
3490 // Not really a cost consideration, but check for unsupported cases here to
3491 // simplify the logic.
3492 if (!isCandidateForEpilogueVectorization(MainPlan)) {
3493 LLVM_DEBUG(dbgs() << "LEV: Unable to vectorize epilogue because the loop "
3494 "is not a supported candidate.\n");
3495 return nullptr;
3496 }
3497
3500 IC * estimateElementCount(MainLoopVF, Config.getVScaleForTuning())) {
3501 // Note that the main loop leaves IC * MainLoopVF iterations iff a scalar
3502 // epilogue is required, but then the epilogue loop also requires a scalar
3503 // epilogue.
3504 LLVM_DEBUG(dbgs() << "LEV: Forced epilogue VF results in dead epilogue "
3505 "vector loop, skipping vectorizing epilogue.\n");
3506 return nullptr;
3507 }
3508
3509 LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization factor is forced.\n");
3511 if (hasPlanWithVF(ForcedEC)) {
3512 std::unique_ptr<VPlan> Clone(getPlanFor(ForcedEC).duplicate());
3513 Clone->setVF(ForcedEC);
3514 return Clone;
3515 }
3516
3517 LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization forced factor is not "
3518 "viable.\n");
3519 return nullptr;
3520 }
3521
3522 if (OrigLoop->getHeader()->getParent()->hasOptSize()) {
3523 LLVM_DEBUG(
3524 dbgs() << "LEV: Epilogue vectorization skipped due to opt for size.\n");
3525 return nullptr;
3526 }
3527
3528 if (!CM.isEpilogueVectorizationProfitable(MainLoopVF, IC)) {
3529 LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization is not profitable for "
3530 "this loop\n");
3531 return nullptr;
3532 }
3533
3534 // Check if a plan's vector loop processes fewer iterations than VF (e.g. when
3535 // interleave groups have been narrowed) narrowInterleaveGroups) and return
3536 // the adjusted, effective VF.
3537 using namespace VPlanPatternMatch;
3538 auto GetEffectiveVF = [](VPlan &Plan, ElementCount VF) -> ElementCount {
3539 auto *Exiting = Plan.getVectorLoopRegion()->getExitingBasicBlock();
3540 if (match(&Exiting->back(),
3541 m_BranchOnCount(m_Add(m_CanonicalIV(), m_Specific(&Plan.getUF())),
3542 m_VPValue())))
3543 return ElementCount::get(1, VF.isScalable());
3544 return VF;
3545 };
3546
3547 // Check if the main loop processes fewer than MainLoopVF elements per
3548 // iteration (e.g. due to narrowing interleave groups). Adjust MainLoopVF
3549 // as needed.
3550 MainLoopVF = GetEffectiveVF(MainPlan, MainLoopVF);
3551
3552 // If MainLoopVF = vscale x 2, and vscale is expected to be 4, then we know
3553 // the main loop handles 8 lanes per iteration. We could still benefit from
3554 // vectorizing the epilogue loop with VF=4.
3555 ElementCount EstimatedRuntimeVF = ElementCount::getFixed(
3556 estimateElementCount(MainLoopVF, Config.getVScaleForTuning()));
3557
3558 Type *TCType = Legal->getWidestInductionType();
3559 const SCEV *RemainingIterations = nullptr;
3560 unsigned MaxTripCount = 0;
3561 const SCEV *TC = vputils::getSCEVExprForVPValue(MainPlan.getTripCount(), PSE);
3562 assert(!isa<SCEVCouldNotCompute>(TC) && "Trip count SCEV must be computable");
3563 const SCEV *KnownMinTC;
3564 bool ScalableTC = match(TC, m_scev_c_Mul(m_SCEV(KnownMinTC), m_SCEVVScale()));
3565 bool ScalableRemIter = false;
3566 ScalarEvolution &SE = *PSE.getSE();
3567 // Use versions of TC and VF in which both are either scalable or fixed.
3568 if (ScalableTC == MainLoopVF.isScalable()) {
3569 ScalableRemIter = ScalableTC;
3570 RemainingIterations =
3571 SE.getURemExpr(TC, SE.getElementCount(TCType, MainLoopVF * IC));
3572 } else if (ScalableTC) {
3573 const SCEV *EstimatedTC = SE.getMulExpr(
3574 KnownMinTC,
3575 SE.getConstant(TCType, Config.getVScaleForTuning().value_or(1)));
3576 RemainingIterations = SE.getURemExpr(
3577 EstimatedTC, SE.getElementCount(TCType, MainLoopVF * IC));
3578 } else
3579 RemainingIterations =
3580 SE.getURemExpr(TC, SE.getElementCount(TCType, EstimatedRuntimeVF * IC));
3581
3582 // No iterations left to process in the epilogue.
3583 if (RemainingIterations->isZero())
3584 return nullptr;
3585
3586 if (MainLoopVF.isFixed()) {
3587 MaxTripCount = MainLoopVF.getFixedValue() * IC - 1;
3588 if (SE.isKnownPredicate(CmpInst::ICMP_ULT, RemainingIterations,
3589 SE.getConstant(TCType, MaxTripCount))) {
3590 MaxTripCount = SE.getUnsignedRangeMax(RemainingIterations).getZExtValue();
3591 }
3592 LLVM_DEBUG(dbgs() << "LEV: Maximum Trip Count for Epilogue: "
3593 << MaxTripCount << "\n");
3594 }
3595
3596 auto SkipVF = [&](const SCEV *VF, const SCEV *RemIter) -> bool {
3597 return SE.isKnownPredicate(CmpInst::ICMP_UGT, VF, RemIter);
3598 };
3600 VPlan *BestPlan = nullptr;
3601 for (auto &NextVF : ProfitableVFs) {
3602 // Skip candidate VFs without a corresponding VPlan.
3603 if (!hasPlanWithVF(NextVF.Width))
3604 continue;
3605
3606 VPlan &CurrentPlan = getPlanFor(NextVF.Width);
3607 ElementCount EffectiveVF = GetEffectiveVF(CurrentPlan, NextVF.Width);
3608 // Skip candidate VFs with widths >= the (estimated) runtime VF (scalable
3609 // vectors) or > the VF of the main loop (fixed vectors).
3610 if ((!EffectiveVF.isScalable() && MainLoopVF.isScalable() &&
3611 ElementCount::isKnownGE(EffectiveVF, EstimatedRuntimeVF)) ||
3612 (EffectiveVF.isScalable() &&
3613 ElementCount::isKnownGE(EffectiveVF, MainLoopVF)) ||
3614 (!EffectiveVF.isScalable() && !MainLoopVF.isScalable() &&
3615 ElementCount::isKnownGT(EffectiveVF, MainLoopVF)))
3616 continue;
3617
3618 // If EffectiveVF is greater than the number of remaining iterations, the
3619 // epilogue loop would be dead. Skip such factors. If the epilogue plan
3620 // also has narrowed interleave groups, use the effective VF since
3621 // the epilogue step will be reduced to its IC.
3622 // TODO: We should also consider comparing against a scalable
3623 // RemainingIterations when SCEV be able to evaluate non-canonical
3624 // vscale-based expressions.
3625 if (!ScalableRemIter) {
3626 // Handle the case where EffectiveVF and RemainingIterations are in
3627 // different numerical spaces.
3628 if (EffectiveVF.isScalable())
3629 EffectiveVF = ElementCount::getFixed(
3630 estimateElementCount(EffectiveVF, Config.getVScaleForTuning()));
3631 if (SkipVF(SE.getElementCount(TCType, EffectiveVF), RemainingIterations))
3632 continue;
3633 }
3634
3635 if (Result.Width.isScalar() ||
3636 isMoreProfitable(NextVF, Result, MaxTripCount, !CM.foldTailByMasking(),
3637 /*IsEpilogue*/ true)) {
3638 Result = NextVF;
3639 BestPlan = &CurrentPlan;
3640 }
3641 }
3642
3643 if (!BestPlan)
3644 return nullptr;
3645
3646 LLVM_DEBUG(dbgs() << "LEV: Vectorizing epilogue loop with VF = "
3647 << Result.Width << "\n");
3648 std::unique_ptr<VPlan> Clone(BestPlan->duplicate());
3649 Clone->setVF(Result.Width);
3650 return Clone;
3651}
3652
3653unsigned
3655 InstructionCost LoopCost) {
3656 // -- The interleave heuristics --
3657 // We interleave the loop in order to expose ILP and reduce the loop overhead.
3658 // There are many micro-architectural considerations that we can't predict
3659 // at this level. For example, frontend pressure (on decode or fetch) due to
3660 // code size, or the number and capabilities of the execution ports.
3661 //
3662 // We use the following heuristics to select the interleave count:
3663 // 1. If the code has reductions, then we interleave to break the cross
3664 // iteration dependency.
3665 // 2. If the loop is really small, then we interleave to reduce the loop
3666 // overhead.
3667 // 3. We don't interleave if we think that we will spill registers to memory
3668 // due to the increased register pressure.
3669
3670 // Only interleave tail-folded loops if wide lane masks are requested, as the
3671 // overhead of multiple instructions to calculate the predicate is likely
3672 // not beneficial. If an epilogue is not allowed for any other reason,
3673 // do not interleave.
3674 if (!CM.isEpilogueAllowed() &&
3675 !(CM.preferTailFoldedLoop() && CM.useWideActiveLaneMask()))
3676 return 1;
3677
3680 LLVM_DEBUG(dbgs() << "LV: Loop requires variable-length step. "
3681 "Unroll factor forced to be 1.\n");
3682 return 1;
3683 }
3684
3685 // We used the distance for the interleave count.
3686 if (!Legal->isSafeForAnyVectorWidth())
3687 return 1;
3688
3689 // We don't attempt to perform interleaving for loops with uncountable early
3690 // exits because the VPInstruction::AnyOf code cannot currently handle
3691 // multiple parts.
3692 if (Plan.hasEarlyExit())
3693 return 1;
3694
3695 const bool HasReductions =
3698
3699 // FIXME: implement interleaving for FindLast transform correctly.
3700 if (hasFindLastReductionPhi(Plan))
3701 return 1;
3702
3703 VPRegisterUsage R =
3704 calculateRegisterUsageForPlan(Plan, {VF}, TTI, CM.ValuesToIgnore)[0];
3705
3706 // If we did not calculate the cost for VF (because the user selected the VF)
3707 // then we calculate the cost of VF here.
3708 if (LoopCost == 0) {
3709 if (VF.isScalar())
3710 LoopCost = CM.expectedCost(VF);
3711 else
3712 LoopCost = cost(Plan, VF, &R);
3713 assert(LoopCost.isValid() && "Expected to have chosen a VF with valid cost");
3714
3715 // Loop body is free and there is no need for interleaving.
3716 if (LoopCost == 0)
3717 return 1;
3718 }
3719
3720 // We divide by these constants so assume that we have at least one
3721 // instruction that uses at least one register.
3722 for (auto &Pair : R.MaxLocalUsers) {
3723 Pair.second = std::max(Pair.second, 1U);
3724 }
3725
3726 // We calculate the interleave count using the following formula.
3727 // Subtract the number of loop invariants from the number of available
3728 // registers. These registers are used by all of the interleaved instances.
3729 // Next, divide the remaining registers by the number of registers that is
3730 // required by the loop, in order to estimate how many parallel instances
3731 // fit without causing spills. All of this is rounded down if necessary to be
3732 // a power of two. We want power of two interleave count to simplify any
3733 // addressing operations or alignment considerations.
3734 // We also want power of two interleave counts to ensure that the induction
3735 // variable of the vector loop wraps to zero, when tail is folded by masking;
3736 // this currently happens when OptForSize, in which case IC is set to 1 above.
3737 unsigned IC = UINT_MAX;
3738
3739 for (const auto &Pair : R.MaxLocalUsers) {
3740 unsigned TargetNumRegisters = TTI.getNumberOfRegisters(Pair.first);
3741 LLVM_DEBUG(dbgs() << "LV: The target has " << TargetNumRegisters
3742 << " registers of "
3743 << TTI.getRegisterClassName(Pair.first)
3744 << " register class\n");
3745 if (VF.isScalar()) {
3746 if (ForceTargetNumScalarRegs.getNumOccurrences() > 0)
3747 TargetNumRegisters = ForceTargetNumScalarRegs;
3748 } else {
3749 if (ForceTargetNumVectorRegs.getNumOccurrences() > 0)
3750 TargetNumRegisters = ForceTargetNumVectorRegs;
3751 }
3752 unsigned MaxLocalUsers = Pair.second;
3753 unsigned LoopInvariantRegs = 0;
3754 if (R.LoopInvariantRegs.contains(Pair.first))
3755 LoopInvariantRegs = R.LoopInvariantRegs[Pair.first];
3756
3757 unsigned TmpIC = llvm::bit_floor((TargetNumRegisters - LoopInvariantRegs) /
3758 MaxLocalUsers);
3759 // Don't count the induction variable as interleaved.
3761 TmpIC = llvm::bit_floor((TargetNumRegisters - LoopInvariantRegs - 1) /
3762 std::max(1U, (MaxLocalUsers - 1)));
3763 }
3764
3765 IC = std::min(IC, TmpIC);
3766 }
3767
3768 // Clamp the interleave ranges to reasonable counts.
3769 unsigned MaxInterleaveCount = TTI.getMaxInterleaveFactor(VF);
3770 LLVM_DEBUG(dbgs() << "LV: MaxInterleaveFactor for the target is "
3771 << MaxInterleaveCount << "\n");
3772
3773 // Check if the user has overridden the max.
3774 if (VF.isScalar()) {
3775 if (ForceTargetMaxScalarInterleaveFactor.getNumOccurrences() > 0)
3776 MaxInterleaveCount = ForceTargetMaxScalarInterleaveFactor;
3777 } else {
3778 if (ForceTargetMaxVectorInterleaveFactor.getNumOccurrences() > 0)
3779 MaxInterleaveCount = ForceTargetMaxVectorInterleaveFactor;
3780 }
3781
3782 // Try to get the exact trip count, or an estimate based on profiling data or
3783 // ConstantMax from PSE, failing that.
3784 auto BestKnownTC =
3785 getSmallBestKnownTC(PSE, OrigLoop,
3786 /*CanUseConstantMax=*/true,
3787 /*CanExcludeZeroTrips=*/CM.isEpilogueAllowed());
3788
3789 // For fixed length VFs treat a scalable trip count as unknown.
3790 if (BestKnownTC && (BestKnownTC->isFixed() || VF.isScalable())) {
3791 // Re-evaluate trip counts and VFs to be in the same numerical space.
3792 unsigned AvailableTC =
3793 estimateElementCount(*BestKnownTC, Config.getVScaleForTuning());
3794 unsigned EstimatedVF =
3795 estimateElementCount(VF, Config.getVScaleForTuning());
3796
3797 // At least one iteration must be scalar when this constraint holds. So the
3798 // maximum available iterations for interleaving is one less.
3799 if (CM.requiresScalarEpilogue(VF.isVector()))
3800 --AvailableTC;
3801
3802 unsigned InterleaveCountLB = bit_floor(std::max(
3803 1u, std::min(AvailableTC / (EstimatedVF * 2), MaxInterleaveCount)));
3804
3805 if (getSmallConstantTripCount(PSE.getSE(), OrigLoop).isNonZero()) {
3806 // If the best known trip count is exact, we select between two
3807 // prospective ICs, where
3808 //
3809 // 1) the aggressive IC is capped by the trip count divided by VF
3810 // 2) the conservative IC is capped by the trip count divided by (VF * 2)
3811 //
3812 // The final IC is selected in a way that the epilogue loop trip count is
3813 // minimized while maximizing the IC itself, so that we either run the
3814 // vector loop at least once if it generates a small epilogue loop, or
3815 // else we run the vector loop at least twice.
3816
3817 unsigned InterleaveCountUB = bit_floor(std::max(
3818 1u, std::min(AvailableTC / EstimatedVF, MaxInterleaveCount)));
3819 MaxInterleaveCount = InterleaveCountLB;
3820
3821 if (InterleaveCountUB != InterleaveCountLB) {
3822 unsigned TailTripCountUB =
3823 (AvailableTC % (EstimatedVF * InterleaveCountUB));
3824 unsigned TailTripCountLB =
3825 (AvailableTC % (EstimatedVF * InterleaveCountLB));
3826 // If both produce same scalar tail, maximize the IC to do the same work
3827 // in fewer vector loop iterations
3828 if (TailTripCountUB == TailTripCountLB)
3829 MaxInterleaveCount = InterleaveCountUB;
3830 }
3831 } else {
3832 // If trip count is an estimated compile time constant, limit the
3833 // IC to be capped by the trip count divided by VF * 2, such that the
3834 // vector loop runs at least twice to make interleaving seem profitable
3835 // when there is an epilogue loop present. Since exact Trip count is not
3836 // known we choose to be conservative in our IC estimate.
3837 MaxInterleaveCount = InterleaveCountLB;
3838 }
3839 }
3840
3841 assert(MaxInterleaveCount > 0 &&
3842 "Maximum interleave count must be greater than 0");
3843
3844 // Clamp the calculated IC to be between the 1 and the max interleave count
3845 // that the target and trip count allows.
3846 if (IC > MaxInterleaveCount)
3847 IC = MaxInterleaveCount;
3848 else
3849 // Make sure IC is greater than 0.
3850 IC = std::max(1u, IC);
3851
3852 assert(IC > 0 && "Interleave count must be greater than 0.");
3853
3854 // Interleave if we vectorized this loop and there is a reduction that could
3855 // benefit from interleaving.
3856 if (VF.isVector() && HasReductions) {
3857 LLVM_DEBUG(dbgs() << "LV: Interleaving because of reductions.\n");
3858 return IC;
3859 }
3860
3861 // For any scalar loop that either requires runtime checks or tail-folding we
3862 // are better off leaving this to the unroller. Note that if we've already
3863 // vectorized the loop we will have done the runtime check and so interleaving
3864 // won't require further checks.
3865 bool ScalarInterleavingRequiresPredication =
3866 (VF.isScalar() && any_of(OrigLoop->blocks(), [this](BasicBlock *BB) {
3867 return Legal->blockNeedsPredication(BB);
3868 }));
3869 bool ScalarInterleavingRequiresRuntimePointerCheck =
3870 (VF.isScalar() && Legal->getRuntimePointerChecking()->Need);
3871
3872 // We want to interleave small loops in order to reduce the loop overhead and
3873 // potentially expose ILP opportunities.
3874 LLVM_DEBUG(dbgs() << "LV: Loop cost is " << LoopCost << '\n'
3875 << "LV: IC is " << IC << '\n'
3876 << "LV: VF is " << VF << '\n');
3877 const bool AggressivelyInterleave =
3878 TTI.enableAggressiveInterleaving(HasReductions);
3879 if (!ScalarInterleavingRequiresRuntimePointerCheck &&
3880 !ScalarInterleavingRequiresPredication && LoopCost < SmallLoopCost) {
3881 // We assume that the cost overhead is 1 and we use the cost model
3882 // to estimate the cost of the loop and interleave until the cost of the
3883 // loop overhead is about 5% of the cost of the loop.
3884 unsigned SmallIC = std::min(IC, (unsigned)llvm::bit_floor<uint64_t>(
3885 SmallLoopCost / LoopCost.getValue()));
3886
3887 // Interleave until store/load ports (estimated by max interleave count) are
3888 // saturated.
3889 unsigned NumStores = 0;
3890 unsigned NumLoads = 0;
3893 for (VPRecipeBase &R : *VPBB) {
3895 NumLoads++;
3896 continue;
3897 }
3899 NumStores++;
3900 continue;
3901 }
3902
3903 if (auto *InterleaveR = dyn_cast<VPInterleaveRecipe>(&R)) {
3904 if (unsigned StoreOps = InterleaveR->getNumStoreOperands())
3905 NumStores += StoreOps;
3906 else
3907 NumLoads += InterleaveR->getNumDefinedValues();
3908 continue;
3909 }
3910 if (auto *RepR = dyn_cast<VPReplicateRecipe>(&R)) {
3911 NumLoads += isa<LoadInst>(RepR->getUnderlyingInstr());
3912 NumStores += isa<StoreInst>(RepR->getUnderlyingInstr());
3913 continue;
3914 }
3915 if (isa<VPHistogramRecipe>(&R)) {
3916 NumLoads++;
3917 NumStores++;
3918 continue;
3919 }
3920 }
3921 }
3922 unsigned StoresIC = IC / (NumStores ? NumStores : 1);
3923 unsigned LoadsIC = IC / (NumLoads ? NumLoads : 1);
3924
3925 // There is little point in interleaving for reductions containing selects
3926 // and compares when VF=1 since it may just create more overhead than it's
3927 // worth for loops with small trip counts. This is because we still have to
3928 // do the final reduction after the loop.
3929 bool HasSelectCmpReductions =
3930 HasReductions &&
3932 [](VPRecipeBase &R) {
3933 auto *RedR = dyn_cast<VPReductionPHIRecipe>(&R);
3934 return RedR && (RecurrenceDescriptor::isAnyOfRecurrenceKind(
3935 RedR->getRecurrenceKind()) ||
3936 RecurrenceDescriptor::isFindIVRecurrenceKind(
3937 RedR->getRecurrenceKind()));
3938 });
3939 if (HasSelectCmpReductions) {
3940 LLVM_DEBUG(dbgs() << "LV: Not interleaving select-cmp reductions.\n");
3941 return 1;
3942 }
3943
3944 // If we have a scalar reduction (vector reductions are already dealt with
3945 // by this point), we can increase the critical path length if the loop
3946 // we're interleaving is inside another loop. For tree-wise reductions
3947 // set the limit to 2, and for ordered reductions it's best to disable
3948 // interleaving entirely.
3949 if (HasReductions && OrigLoop->getLoopDepth() > 1) {
3950 bool HasOrderedReductions =
3952 [](VPRecipeBase &R) {
3953 auto *RedR = dyn_cast<VPReductionPHIRecipe>(&R);
3954
3955 return RedR && RedR->isOrdered();
3956 });
3957 if (HasOrderedReductions) {
3958 LLVM_DEBUG(
3959 dbgs() << "LV: Not interleaving scalar ordered reductions.\n");
3960 return 1;
3961 }
3962
3963 unsigned F = MaxNestedScalarReductionIC;
3964 SmallIC = std::min(SmallIC, F);
3965 StoresIC = std::min(StoresIC, F);
3966 LoadsIC = std::min(LoadsIC, F);
3967 }
3968
3970 std::max(StoresIC, LoadsIC) > SmallIC) {
3971 LLVM_DEBUG(
3972 dbgs() << "LV: Interleaving to saturate store or load ports.\n");
3973 return std::max(StoresIC, LoadsIC);
3974 }
3975
3976 // If there are scalar reductions and TTI has enabled aggressive
3977 // interleaving for reductions, we will interleave to expose ILP.
3978 if (VF.isScalar() && AggressivelyInterleave) {
3979 LLVM_DEBUG(dbgs() << "LV: Interleaving to expose ILP.\n");
3980 // Interleave no less than SmallIC but not as aggressive as the normal IC
3981 // to satisfy the rare situation when resources are too limited.
3982 return std::max(IC / 2, SmallIC);
3983 }
3984
3985 LLVM_DEBUG(dbgs() << "LV: Interleaving to reduce branch cost.\n");
3986 return SmallIC;
3987 }
3988
3989 // Interleave if this is a large loop (small loops are already dealt with by
3990 // this point) that could benefit from interleaving.
3991 if (AggressivelyInterleave) {
3992 LLVM_DEBUG(dbgs() << "LV: Interleaving to expose ILP.\n");
3993 return IC;
3994 }
3995
3996 LLVM_DEBUG(dbgs() << "LV: Not Interleaving.\n");
3997 return 1;
3998}
3999
4001 ElementCount VF) {
4002 // TODO: Cost model for emulated masked load/store is completely
4003 // broken. This hack guides the cost model to use an artificially
4004 // high enough value to practically disable vectorization with such
4005 // operations, except where previously deployed legality hack allowed
4006 // using very low cost values. This is to avoid regressions coming simply
4007 // from moving "masked load/store" check from legality to cost model.
4008 // Masked Load/Gather emulation was previously never allowed.
4009 // Limited number of Masked Store/Scatter emulation was allowed.
4011 "Expecting a scalar emulated instruction");
4012 return isa<LoadInst>(I) ||
4013 (isa<StoreInst>(I) &&
4014 NumPredStores > NumberOfStoresToPredicate);
4015}
4016
4018 assert(VF.isVector() && "Expected VF >= 2");
4019
4020 // If we've already collected the instructions to scalarize or the predicated
4021 // BBs after vectorization, there's nothing to do. Collection may already have
4022 // occurred if we have a user-selected VF and are now computing the expected
4023 // cost for interleaving.
4024 if (InstsToScalarize.contains(VF) ||
4025 PredicatedBBsAfterVectorization.contains(VF))
4026 return;
4027
4028 // Initialize a mapping for VF in InstsToScalalarize. If we find that it's
4029 // not profitable to scalarize any instructions, the presence of VF in the
4030 // map will indicate that we've analyzed it already.
4031 ScalarCostsTy &ScalarCostsVF = InstsToScalarize[VF];
4032
4033 // Find all the instructions that are scalar with predication in the loop and
4034 // determine if it would be better to not if-convert the blocks they are in.
4035 // If so, we also record the instructions to scalarize.
4036 for (BasicBlock *BB : TheLoop->blocks()) {
4038 continue;
4039 for (Instruction &I : *BB)
4040 if (isScalarWithPredication(&I, VF)) {
4041 ScalarCostsTy ScalarCosts;
4042 // Do not apply discount logic for:
4043 // 1. Scalars after vectorization, as there will only be a single copy
4044 // of the instruction.
4045 // 2. Scalable VF, as that would lead to invalid scalarization costs.
4046 // 3. Emulated masked memrefs, if a hacked cost is needed.
4047 if (!isScalarAfterVectorization(&I, VF) && !VF.isScalable() &&
4049 computePredInstDiscount(&I, ScalarCosts, VF) >= 0) {
4050 for (const auto &[I, IC] : ScalarCosts)
4051 ScalarCostsVF.insert({I, IC});
4052 // Check if we decided to scalarize a call. If so, update the widening
4053 // decision of the call to CM_Scalarize with the computed scalar cost.
4054 for (const auto &[I, Cost] : ScalarCosts) {
4055 auto *CI = dyn_cast<CallInst>(I);
4056 if (!CI || !CallWideningDecisions.contains({CI, VF}))
4057 continue;
4058 CallWideningDecisions[{CI, VF}].Kind = CM_Scalarize;
4059 CallWideningDecisions[{CI, VF}].Cost = Cost;
4060 }
4061 }
4062 // Remember that BB will remain after vectorization.
4063 PredicatedBBsAfterVectorization[VF].insert(BB);
4064 for (auto *Pred : predecessors(BB)) {
4065 if (Pred->getSingleSuccessor() == BB)
4066 PredicatedBBsAfterVectorization[VF].insert(Pred);
4067 }
4068 }
4069 }
4070}
4071
4072InstructionCost LoopVectorizationCostModel::computePredInstDiscount(
4073 Instruction *PredInst, ScalarCostsTy &ScalarCosts, ElementCount VF) {
4074 assert(!isUniformAfterVectorization(PredInst, VF) &&
4075 "Instruction marked uniform-after-vectorization will be predicated");
4076
4077 // Initialize the discount to zero, meaning that the scalar version and the
4078 // vector version cost the same.
4079 InstructionCost Discount = 0;
4080
4081 // Holds instructions to analyze. The instructions we visit are mapped in
4082 // ScalarCosts. Those instructions are the ones that would be scalarized if
4083 // we find that the scalar version costs less.
4085
4086 // Returns true if the given instruction can be scalarized.
4087 auto CanBeScalarized = [&](Instruction *I) -> bool {
4088 // We only attempt to scalarize instructions forming a single-use chain
4089 // from the original predicated block that would otherwise be vectorized.
4090 // Although not strictly necessary, we give up on instructions we know will
4091 // already be scalar to avoid traversing chains that are unlikely to be
4092 // beneficial.
4093 if (!I->hasOneUse() || PredInst->getParent() != I->getParent() ||
4095 return false;
4096
4097 // If the instruction is scalar with predication, it will be analyzed
4098 // separately. We ignore it within the context of PredInst.
4099 if (isScalarWithPredication(I, VF))
4100 return false;
4101
4102 // If any of the instruction's operands are uniform after vectorization,
4103 // the instruction cannot be scalarized. This prevents, for example, a
4104 // masked load from being scalarized.
4105 //
4106 // We assume we will only emit a value for lane zero of an instruction
4107 // marked uniform after vectorization, rather than VF identical values.
4108 // Thus, if we scalarize an instruction that uses a uniform, we would
4109 // create uses of values corresponding to the lanes we aren't emitting code
4110 // for. This behavior can be changed by allowing getScalarValue to clone
4111 // the lane zero values for uniforms rather than asserting.
4112 for (Use &U : I->operands())
4113 if (auto *J = dyn_cast<Instruction>(U.get()))
4114 if (isUniformAfterVectorization(J, VF))
4115 return false;
4116
4117 // Otherwise, we can scalarize the instruction.
4118 return true;
4119 };
4120
4121 // Compute the expected cost discount from scalarizing the entire expression
4122 // feeding the predicated instruction. We currently only consider expressions
4123 // that are single-use instruction chains.
4124 Worklist.push_back(PredInst);
4125 while (!Worklist.empty()) {
4126 Instruction *I = Worklist.pop_back_val();
4127
4128 // If we've already analyzed the instruction, there's nothing to do.
4129 if (ScalarCosts.contains(I))
4130 continue;
4131
4132 // Cannot scalarize fixed-order recurrence phis at the moment.
4133 if (isa<PHINode>(I) && Legal->isFixedOrderRecurrence(cast<PHINode>(I)))
4134 continue;
4135
4136 // Compute the cost of the vector instruction. Note that this cost already
4137 // includes the scalarization overhead of the predicated instruction.
4138 InstructionCost VectorCost = getInstructionCost(I, VF);
4139
4140 // Compute the cost of the scalarized instruction. This cost is the cost of
4141 // the instruction as if it wasn't if-converted and instead remained in the
4142 // predicated block. We will scale this cost by block probability after
4143 // computing the scalarization overhead.
4144 InstructionCost ScalarCost =
4146
4147 // Compute the scalarization overhead of needed insertelement instructions
4148 // and phi nodes.
4149 if (isScalarWithPredication(I, VF) && !I->getType()->isVoidTy()) {
4150 Type *WideTy = toVectorizedTy(I->getType(), VF);
4151 for (Type *VectorTy : getContainedTypes(WideTy)) {
4152 ScalarCost += TTI.getScalarizationOverhead(
4154 /*Insert=*/true,
4155 /*Extract=*/false, Config.CostKind);
4156 }
4157 ScalarCost += VF.getFixedValue() *
4158 TTI.getCFInstrCost(Instruction::PHI, Config.CostKind);
4159 }
4160
4161 // Compute the scalarization overhead of needed extractelement
4162 // instructions. For each of the instruction's operands, if the operand can
4163 // be scalarized, add it to the worklist; otherwise, account for the
4164 // overhead.
4165 for (Use &U : I->operands())
4166 if (auto *J = dyn_cast<Instruction>(U.get())) {
4167 assert(canVectorizeTy(J->getType()) &&
4168 "Instruction has non-scalar type");
4169 if (CanBeScalarized(J))
4170 Worklist.push_back(J);
4171 else if (needsExtract(J, VF)) {
4172 Type *WideTy = toVectorizedTy(J->getType(), VF);
4173 for (Type *VectorTy : getContainedTypes(WideTy)) {
4174 ScalarCost += TTI.getScalarizationOverhead(
4175 cast<VectorType>(VectorTy),
4176 APInt::getAllOnes(VF.getFixedValue()), /*Insert*/ false,
4177 /*Extract*/ true, Config.CostKind);
4178 }
4179 }
4180 }
4181
4182 // Scale the total scalar cost by block probability.
4183 ScalarCost /= getPredBlockCostDivisor(Config.CostKind, I->getParent());
4184
4185 // Compute the discount. A non-negative discount means the vector version
4186 // of the instruction costs more, and scalarizing would be beneficial.
4187 Discount += VectorCost - ScalarCost;
4188 ScalarCosts[I] = ScalarCost;
4189 }
4190
4191 return Discount;
4192}
4193
4196 assert(VF.isScalar() && "must only be called for scalar VFs");
4197
4198 // For each block.
4199 for (BasicBlock *BB : TheLoop->blocks()) {
4200 InstructionCost BlockCost;
4201
4202 // For each instruction in the old loop.
4203 for (Instruction &I : *BB) {
4204 // Skip ignored values.
4205 if (ValuesToIgnore.count(&I) ||
4206 (VF.isVector() && VecValuesToIgnore.count(&I)))
4207 continue;
4208
4210
4211 // Check if we should override the cost.
4212 if (C.isValid() && ForceTargetInstructionCost.getNumOccurrences() > 0)
4214
4215 BlockCost += C;
4216 LLVM_DEBUG(dbgs() << "LV: Found an estimated cost of " << C << " for VF "
4217 << VF << " For instruction: " << I << '\n');
4218 }
4219
4220 // In the scalar loop, we may not always execute the predicated block, if it
4221 // is an if-else block. Thus, scale the block's cost by the probability of
4222 // executing it. getPredBlockCostDivisor will return 1 for blocks that are
4223 // only predicated by the header mask when folding the tail.
4224 Cost += BlockCost / getPredBlockCostDivisor(Config.CostKind, BB);
4225 }
4226
4227 return Cost;
4228}
4229
4230/// Gets the address access SCEV for Ptr, if it should be used for cost modeling
4231/// according to isAddressSCEVForCost.
4232///
4233/// This SCEV can be sent to the Target in order to estimate the address
4234/// calculation cost.
4236 Value *Ptr,
4238 const Loop *TheLoop) {
4239 const SCEV *Addr = PSE.getSCEV(Ptr);
4240 return vputils::isAddressSCEVForCost(Addr, *PSE.getSE(), TheLoop) ? Addr
4241 : nullptr;
4242}
4243
4245LoopVectorizationCostModel::getMemInstScalarizationCost(Instruction *I,
4246 ElementCount VF) {
4247 assert(VF.isVector() &&
4248 "Scalarization cost of instruction implies vectorization.");
4249 if (VF.isScalable())
4251
4252 Type *ValTy = getLoadStoreType(I);
4253 auto *SE = PSE.getSE();
4254
4255 unsigned AS = getLoadStoreAddressSpace(I);
4257 Type *PtrTy = toVectorTy(Ptr->getType(), VF);
4258 // NOTE: PtrTy is a vector to signal `TTI::getAddressComputationCost`
4259 // that it is being called from this specific place.
4260
4261 // Figure out whether the access is strided and get the stride value
4262 // if it's known in compile time
4263 const SCEV *PtrSCEV = getAddressAccessSCEV(Ptr, PSE, TheLoop);
4264
4265 // Get the cost of the scalar memory instruction and address computation.
4267 VF.getFixedValue() *
4268 TTI.getAddressComputationCost(PtrTy, SE, PtrSCEV, Config.CostKind);
4269
4270 // Don't pass *I here, since it is scalar but will actually be part of a
4271 // vectorized loop where the user of it is a vectorized instruction.
4272 const Align Alignment = getLoadStoreAlignment(I);
4273 TTI::OperandValueInfo OpInfo = TTI::getOperandInfo(I->getOperand(0));
4274 Cost += VF.getFixedValue() *
4275 TTI.getMemoryOpCost(I->getOpcode(), ValTy->getScalarType(), Alignment,
4276 AS, Config.CostKind, OpInfo);
4277
4278 // Get the overhead of the extractelement and insertelement instructions
4279 // we might create due to scalarization.
4280 Cost += getScalarizationOverhead(I, VF);
4281
4282 // If we have a predicated load/store, it will need extra i1 extracts and
4283 // conditional branches, but may not be executed for each vector lane. Scale
4284 // the cost by the probability of executing the predicated block.
4285 if (isPredicatedInst(I)) {
4286 Cost /= getPredBlockCostDivisor(Config.CostKind, I->getParent());
4287
4288 // Add the cost of an i1 extract and a branch
4289 auto *VecI1Ty =
4291 Cost += TTI.getScalarizationOverhead(
4292 VecI1Ty, APInt::getAllOnes(VF.getFixedValue()),
4293 /*Insert=*/false, /*Extract=*/true, Config.CostKind);
4294 Cost += TTI.getCFInstrCost(Instruction::CondBr, Config.CostKind);
4295
4297 // Artificially setting to a high enough value to practically disable
4298 // vectorization with such operations.
4299 Cost = 3000000;
4300 }
4301
4302 return Cost;
4303}
4304
4306LoopVectorizationCostModel::getConsecutiveMemOpCost(Instruction *I,
4307 ElementCount VF) {
4308 Type *ValTy = getLoadStoreType(I);
4309 auto *VectorTy = cast<VectorType>(toVectorTy(ValTy, VF));
4311 unsigned AS = getLoadStoreAddressSpace(I);
4312 int ConsecutiveStride = Legal->isConsecutivePtr(ValTy, Ptr);
4313
4314 assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) &&
4315 "Stride should be 1 or -1 for consecutive memory access");
4316 const Align Alignment = getLoadStoreAlignment(I);
4318 if (isMaskRequired(I)) {
4319 unsigned IID = I->getOpcode() == Instruction::Load
4320 ? Intrinsic::masked_load
4321 : Intrinsic::masked_store;
4322 Cost += TTI.getMemIntrinsicInstrCost(
4323 MemIntrinsicCostAttributes(IID, VectorTy, Alignment, AS),
4324 Config.CostKind);
4325 } else {
4326 TTI::OperandValueInfo OpInfo = TTI::getOperandInfo(I->getOperand(0));
4327 Cost += TTI.getMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS,
4328 Config.CostKind, OpInfo, I);
4329 }
4330
4331 bool Reverse = ConsecutiveStride < 0;
4332 if (Reverse)
4333 Cost += TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy,
4334 VectorTy, {}, Config.CostKind, 0);
4335 return Cost;
4336}
4337
4339LoopVectorizationCostModel::getUniformMemOpCost(Instruction *I,
4340 ElementCount VF) {
4341 assert(Legal->isUniformMemOp(*I, VF));
4342
4343 Type *ValTy = getLoadStoreType(I);
4345 auto *VectorTy = cast<VectorType>(toVectorTy(ValTy, VF));
4346 const Align Alignment = getLoadStoreAlignment(I);
4347 unsigned AS = getLoadStoreAddressSpace(I);
4348 if (isa<LoadInst>(I)) {
4349 return TTI.getAddressComputationCost(PtrTy, nullptr, nullptr,
4350 Config.CostKind) +
4351 TTI.getMemoryOpCost(Instruction::Load, ValTy, Alignment, AS,
4352 Config.CostKind) +
4353 TTI.getShuffleCost(TargetTransformInfo::SK_Broadcast, VectorTy,
4354 VectorTy, {}, Config.CostKind);
4355 }
4356 StoreInst *SI = cast<StoreInst>(I);
4357
4358 bool IsLoopInvariantStoreValue = Legal->isInvariant(SI->getValueOperand());
4359 // TODO: We have existing tests that request the cost of extracting element
4360 // VF.getKnownMinValue() - 1 from a scalable vector. This does not represent
4361 // the actual generated code, which involves extracting the last element of
4362 // a scalable vector where the lane to extract is unknown at compile time.
4364 TTI.getAddressComputationCost(PtrTy, nullptr, nullptr, Config.CostKind) +
4365 TTI.getMemoryOpCost(Instruction::Store, ValTy, Alignment, AS,
4366 Config.CostKind);
4367 if (!IsLoopInvariantStoreValue)
4368 Cost += TTI.getIndexedVectorInstrCostFromEnd(Instruction::ExtractElement,
4369 VectorTy, Config.CostKind, 0);
4370 return Cost;
4371}
4372
4374LoopVectorizationCostModel::getGatherScatterCost(Instruction *I,
4375 ElementCount VF) {
4376 Type *ValTy = getLoadStoreType(I);
4377 auto *VectorTy = cast<VectorType>(toVectorTy(ValTy, VF));
4378 const Align Alignment = getLoadStoreAlignment(I);
4380 Type *PtrTy = Ptr->getType();
4381
4382 if (!Legal->isUniform(Ptr, VF))
4383 PtrTy = toVectorTy(PtrTy, VF);
4384
4385 unsigned IID = I->getOpcode() == Instruction::Load
4386 ? Intrinsic::masked_gather
4387 : Intrinsic::masked_scatter;
4388 return TTI.getAddressComputationCost(PtrTy, nullptr, nullptr,
4389 Config.CostKind) +
4390 TTI.getMemIntrinsicInstrCost(
4391 MemIntrinsicCostAttributes(IID, VectorTy, Ptr, isMaskRequired(I),
4392 Alignment, I),
4393 Config.CostKind);
4394}
4395
4397LoopVectorizationCostModel::getInterleaveGroupCost(Instruction *I,
4398 ElementCount VF) {
4399 const auto *Group = getInterleavedAccessGroup(I);
4400 assert(Group && "Fail to get an interleaved access group.");
4401
4402 Instruction *InsertPos = Group->getInsertPos();
4403 Type *ValTy = getLoadStoreType(InsertPos);
4404 auto *VectorTy = cast<VectorType>(toVectorTy(ValTy, VF));
4405 unsigned AS = getLoadStoreAddressSpace(InsertPos);
4406
4407 unsigned InterleaveFactor = Group->getFactor();
4408 auto *WideVecTy = VectorType::get(ValTy, VF * InterleaveFactor);
4409
4410 // Holds the indices of existing members in the interleaved group.
4411 SmallVector<unsigned, 4> Indices;
4412 for (unsigned IF = 0; IF < InterleaveFactor; IF++)
4413 if (Group->getMember(IF))
4414 Indices.push_back(IF);
4415
4416 // Calculate the cost of the whole interleaved group.
4417 bool UseMaskForGaps =
4418 (Group->requiresScalarEpilogue() && !isEpilogueAllowed()) ||
4419 (isa<StoreInst>(I) && !Group->isFull());
4420 InstructionCost Cost = TTI.getInterleavedMemoryOpCost(
4421 InsertPos->getOpcode(), WideVecTy, Group->getFactor(), Indices,
4422 Group->getAlign(), AS, Config.CostKind, isMaskRequired(I),
4423 UseMaskForGaps);
4424
4425 if (Group->isReverse()) {
4426 // TODO: Add support for reversed masked interleaved access.
4428 "Reverse masked interleaved access not supported.");
4429 Cost += Group->getNumMembers() *
4430 TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy,
4431 VectorTy, {}, Config.CostKind, 0);
4432 }
4433 return Cost;
4434}
4435
4436std::optional<InstructionCost>
4438 ElementCount VF,
4439 Type *Ty) const {
4440 using namespace llvm::PatternMatch;
4441 // Early exit for no inloop reductions
4442 if (Config.getInLoopReductions().empty() || VF.isScalar() ||
4443 !isa<VectorType>(Ty))
4444 return std::nullopt;
4445 auto *VectorTy = cast<VectorType>(Ty);
4446
4447 // We are looking for a pattern of, and finding the minimal acceptable cost:
4448 // reduce(mul(ext(A), ext(B))) or
4449 // reduce(mul(A, B)) or
4450 // reduce(ext(A)) or
4451 // reduce(A).
4452 // The basic idea is that we walk down the tree to do that, finding the root
4453 // reduction instruction in InLoopReductionImmediateChains. From there we find
4454 // the pattern of mul/ext and test the cost of the entire pattern vs the cost
4455 // of the components. If the reduction cost is lower then we return it for the
4456 // reduction instruction and 0 for the other instructions in the pattern. If
4457 // it is not we return an invalid cost specifying the orignal cost method
4458 // should be used.
4459 Instruction *RetI = I;
4460 if (match(RetI, m_ZExtOrSExt(m_Value()))) {
4461 if (!RetI->hasOneUser())
4462 return std::nullopt;
4463 RetI = RetI->user_back();
4464 }
4465
4466 if (match(RetI, m_OneUse(m_Mul(m_Value(), m_Value()))) &&
4467 RetI->user_back()->getOpcode() == Instruction::Add) {
4468 RetI = RetI->user_back();
4469 }
4470
4471 // Test if the found instruction is a reduction, and if not return an invalid
4472 // cost specifying the parent to use the original cost modelling.
4473 Instruction *LastChain = Config.getInLoopReductionImmediateChain(RetI);
4474 if (!LastChain)
4475 return std::nullopt;
4476
4477 // Find the reduction this chain is a part of and calculate the basic cost of
4478 // the reduction on its own.
4479 Instruction *ReductionPhi = LastChain;
4480 while (!isa<PHINode>(ReductionPhi))
4481 ReductionPhi = Config.getInLoopReductionImmediateChain(ReductionPhi);
4482
4483 const RecurrenceDescriptor &RdxDesc =
4484 Legal->getRecurrenceDescriptor(cast<PHINode>(ReductionPhi));
4485
4486 InstructionCost BaseCost;
4487 RecurKind RK = RdxDesc.getRecurrenceKind();
4490 BaseCost = TTI.getMinMaxReductionCost(
4491 MinMaxID, VectorTy, RdxDesc.getFastMathFlags(), Config.CostKind);
4492 } else {
4493 BaseCost = TTI.getArithmeticReductionCost(RdxDesc.getOpcode(), VectorTy,
4494 RdxDesc.getFastMathFlags(),
4495 Config.CostKind);
4496 }
4497
4498 // For a call to the llvm.fmuladd intrinsic we need to add the cost of a
4499 // normal fmul instruction to the cost of the fadd reduction.
4500 if (RK == RecurKind::FMulAdd)
4501 BaseCost += TTI.getArithmeticInstrCost(Instruction::FMul, VectorTy,
4502 Config.CostKind);
4503
4504 // If we're using ordered reductions then we can just return the base cost
4505 // here, since getArithmeticReductionCost calculates the full ordered
4506 // reduction cost when FP reassociation is not allowed.
4507 if (Config.useOrderedReductions(RdxDesc))
4508 return BaseCost;
4509
4510 // Get the operand that was not the reduction chain and match it to one of the
4511 // patterns, returning the better cost if it is found.
4512 Instruction *RedOp = RetI->getOperand(1) == LastChain
4515
4516 VectorTy = VectorType::get(I->getOperand(0)->getType(), VectorTy);
4517
4518 Instruction *Op0, *Op1;
4519 if (RedOp && RdxDesc.getOpcode() == Instruction::Add &&
4520 match(RedOp,
4522 match(Op0, m_ZExtOrSExt(m_Value())) &&
4523 Op0->getOpcode() == Op1->getOpcode() &&
4524 Op0->getOperand(0)->getType() == Op1->getOperand(0)->getType() &&
4525 !TheLoop->isLoopInvariant(Op0) && !TheLoop->isLoopInvariant(Op1) &&
4526 (Op0->getOpcode() == RedOp->getOpcode() || Op0 == Op1)) {
4527
4528 // Matched reduce.add(ext(mul(ext(A), ext(B)))
4529 // Note that the extend opcodes need to all match, or if A==B they will have
4530 // been converted to zext(mul(sext(A), sext(A))) as it is known positive,
4531 // which is equally fine.
4532 bool IsUnsigned = isa<ZExtInst>(Op0);
4533 auto *ExtType = VectorType::get(Op0->getOperand(0)->getType(), VectorTy);
4534 auto *MulType = VectorType::get(Op0->getType(), VectorTy);
4535
4536 InstructionCost ExtCost =
4537 TTI.getCastInstrCost(Op0->getOpcode(), MulType, ExtType,
4538 TTI::CastContextHint::None, Config.CostKind, Op0);
4539 InstructionCost MulCost =
4540 TTI.getArithmeticInstrCost(Instruction::Mul, MulType, Config.CostKind);
4541 InstructionCost Ext2Cost = TTI.getCastInstrCost(
4542 RedOp->getOpcode(), VectorTy, MulType, TTI::CastContextHint::None,
4543 Config.CostKind, RedOp);
4544
4545 InstructionCost RedCost = TTI.getMulAccReductionCost(
4546 IsUnsigned, RdxDesc.getOpcode(), RdxDesc.getRecurrenceType(), ExtType,
4547 Config.CostKind);
4548
4549 if (RedCost.isValid() &&
4550 RedCost < ExtCost * 2 + MulCost + Ext2Cost + BaseCost)
4551 return I == RetI ? RedCost : 0;
4552 } else if (RedOp && match(RedOp, m_ZExtOrSExt(m_Value())) &&
4553 !TheLoop->isLoopInvariant(RedOp)) {
4554 // Matched reduce(ext(A))
4555 bool IsUnsigned = isa<ZExtInst>(RedOp);
4556 auto *ExtType = VectorType::get(RedOp->getOperand(0)->getType(), VectorTy);
4557 InstructionCost RedCost = TTI.getExtendedReductionCost(
4558 RdxDesc.getOpcode(), IsUnsigned, RdxDesc.getRecurrenceType(), ExtType,
4559 RdxDesc.getFastMathFlags(), Config.CostKind);
4560
4561 InstructionCost ExtCost = TTI.getCastInstrCost(
4562 RedOp->getOpcode(), VectorTy, ExtType, TTI::CastContextHint::None,
4563 Config.CostKind, RedOp);
4564 if (RedCost.isValid() && RedCost < BaseCost + ExtCost)
4565 return I == RetI ? RedCost : 0;
4566 } else if (RedOp && RdxDesc.getOpcode() == Instruction::Add &&
4567 match(RedOp, m_Mul(m_Instruction(Op0), m_Instruction(Op1)))) {
4568 if (match(Op0, m_ZExtOrSExt(m_Value())) &&
4569 Op0->getOpcode() == Op1->getOpcode() &&
4570 !TheLoop->isLoopInvariant(Op0) && !TheLoop->isLoopInvariant(Op1)) {
4571 bool IsUnsigned = isa<ZExtInst>(Op0);
4572 Type *Op0Ty = Op0->getOperand(0)->getType();
4573 Type *Op1Ty = Op1->getOperand(0)->getType();
4574 Type *LargestOpTy =
4575 Op0Ty->getIntegerBitWidth() < Op1Ty->getIntegerBitWidth() ? Op1Ty
4576 : Op0Ty;
4577 auto *ExtType = VectorType::get(LargestOpTy, VectorTy);
4578
4579 // Matched reduce.add(mul(ext(A), ext(B))), where the two ext may be of
4580 // different sizes. We take the largest type as the ext to reduce, and add
4581 // the remaining cost as, for example reduce(mul(ext(ext(A)), ext(B))).
4582 InstructionCost ExtCost0 = TTI.getCastInstrCost(
4583 Op0->getOpcode(), VectorTy, VectorType::get(Op0Ty, VectorTy),
4584 TTI::CastContextHint::None, Config.CostKind, Op0);
4585 InstructionCost ExtCost1 = TTI.getCastInstrCost(
4586 Op1->getOpcode(), VectorTy, VectorType::get(Op1Ty, VectorTy),
4587 TTI::CastContextHint::None, Config.CostKind, Op1);
4588 InstructionCost MulCost = TTI.getArithmeticInstrCost(
4589 Instruction::Mul, VectorTy, Config.CostKind);
4590
4591 InstructionCost RedCost = TTI.getMulAccReductionCost(
4592 IsUnsigned, RdxDesc.getOpcode(), RdxDesc.getRecurrenceType(), ExtType,
4593 Config.CostKind);
4594 InstructionCost ExtraExtCost = 0;
4595 if (Op0Ty != LargestOpTy || Op1Ty != LargestOpTy) {
4596 Instruction *ExtraExtOp = (Op0Ty != LargestOpTy) ? Op0 : Op1;
4597 ExtraExtCost = TTI.getCastInstrCost(
4598 ExtraExtOp->getOpcode(), ExtType,
4599 VectorType::get(ExtraExtOp->getOperand(0)->getType(), VectorTy),
4600 TTI::CastContextHint::None, Config.CostKind, ExtraExtOp);
4601 }
4602
4603 if (RedCost.isValid() &&
4604 (RedCost + ExtraExtCost) < (ExtCost0 + ExtCost1 + MulCost + BaseCost))
4605 return I == RetI ? RedCost : 0;
4606 } else if (!match(I, m_ZExtOrSExt(m_Value()))) {
4607 // Matched reduce.add(mul())
4608 InstructionCost MulCost = TTI.getArithmeticInstrCost(
4609 Instruction::Mul, VectorTy, Config.CostKind);
4610
4611 InstructionCost RedCost = TTI.getMulAccReductionCost(
4612 true, RdxDesc.getOpcode(), RdxDesc.getRecurrenceType(), VectorTy,
4613 Config.CostKind);
4614
4615 if (RedCost.isValid() && RedCost < MulCost + BaseCost)
4616 return I == RetI ? RedCost : 0;
4617 }
4618 }
4619
4620 return I == RetI ? std::optional<InstructionCost>(BaseCost) : std::nullopt;
4621}
4622
4624LoopVectorizationCostModel::getMemoryInstructionCost(Instruction *I,
4625 ElementCount VF) {
4626 // Calculate scalar cost only. Vectorization cost should be ready at this
4627 // moment.
4628 if (VF.isScalar()) {
4629 Type *ValTy = getLoadStoreType(I);
4631 const Align Alignment = getLoadStoreAlignment(I);
4632 unsigned AS = getLoadStoreAddressSpace(I);
4633
4634 TTI::OperandValueInfo OpInfo = TTI::getOperandInfo(I->getOperand(0));
4635 return TTI.getAddressComputationCost(PtrTy, nullptr, nullptr,
4636 Config.CostKind) +
4637 TTI.getMemoryOpCost(I->getOpcode(), ValTy, Alignment, AS,
4638 Config.CostKind, OpInfo, I);
4639 }
4640 return getWideningCost(I, VF);
4641}
4642
4644LoopVectorizationCostModel::getScalarizationOverhead(Instruction *I,
4645 ElementCount VF) const {
4646
4647 // There is no mechanism yet to create a scalable scalarization loop,
4648 // so this is currently Invalid.
4649 if (VF.isScalable())
4651
4652 if (VF.isScalar())
4653 return 0;
4654
4656 Type *RetTy = toVectorizedTy(I->getType(), VF);
4657 if (!RetTy->isVoidTy() &&
4658 (!isa<LoadInst>(I) || !TTI.supportsEfficientVectorElementLoadStore())) {
4659
4661 if (isa<LoadInst>(I))
4663 else if (isa<StoreInst>(I))
4665
4666 for (Type *VectorTy : getContainedTypes(RetTy)) {
4667 Cost += TTI.getScalarizationOverhead(
4669 /*Insert=*/true, /*Extract=*/false, Config.CostKind,
4670 /*ForPoisonSrc=*/true, {}, VIC);
4671 }
4672 }
4673
4674 // Some targets keep addresses scalar.
4675 if (isa<LoadInst>(I) && !TTI.prefersVectorizedAddressing())
4676 return Cost;
4677
4678 // Some targets support efficient element stores.
4679 if (isa<StoreInst>(I) && TTI.supportsEfficientVectorElementLoadStore())
4680 return Cost;
4681
4682 // Collect operands to consider.
4683 CallInst *CI = dyn_cast<CallInst>(I);
4684 Instruction::op_range Ops = CI ? CI->args() : I->operands();
4685
4686 // Skip operands that do not require extraction/scalarization and do not incur
4687 // any overhead.
4689 for (auto *V : filterExtractingOperands(Ops, VF))
4690 Tys.push_back(maybeVectorizeType(V->getType(), VF));
4691
4695 return Cost +
4696 TTI.getOperandsScalarizationOverhead(Tys, Config.CostKind, OperandVIC);
4697}
4698
4700 if (VF.isScalar())
4701 return;
4702 NumPredStores = 0;
4703 for (BasicBlock *BB : TheLoop->blocks()) {
4704 // For each instruction in the old loop.
4705 for (Instruction &I : *BB) {
4707 if (!Ptr)
4708 continue;
4709
4710 // TODO: We should generate better code and update the cost model for
4711 // predicated uniform stores. Today they are treated as any other
4712 // predicated store (see added test cases in
4713 // invariant-store-vectorization.ll).
4715 NumPredStores++;
4716
4717 if (Legal->isUniformMemOp(I, VF)) {
4718 auto IsLegalToScalarize = [&]() {
4719 if (!VF.isScalable())
4720 // Scalarization of fixed length vectors "just works".
4721 return true;
4722
4723 // We have dedicated lowering for unpredicated uniform loads and
4724 // stores. Note that even with tail folding we know that at least
4725 // one lane is active (i.e. generalized predication is not possible
4726 // here), and the logic below depends on this fact.
4727 if (!foldTailByMasking())
4728 return true;
4729
4730 // For scalable vectors, a uniform memop load is always
4731 // uniform-by-parts and we know how to scalarize that.
4732 if (isa<LoadInst>(I))
4733 return true;
4734
4735 // A uniform store isn't neccessarily uniform-by-part
4736 // and we can't assume scalarization.
4737 auto &SI = cast<StoreInst>(I);
4738 return TheLoop->isLoopInvariant(SI.getValueOperand());
4739 };
4740
4741 const InstructionCost GatherScatterCost =
4742 Config.isLegalGatherOrScatter(&I, VF)
4743 ? getGatherScatterCost(&I, VF)
4745
4746 // Load: Scalar load + broadcast
4747 // Store: Scalar store + isLoopInvariantStoreValue ? 0 : extract
4748 // FIXME: This cost is a significant under-estimate for tail folded
4749 // memory ops.
4750 const InstructionCost ScalarizationCost =
4751 IsLegalToScalarize() ? getUniformMemOpCost(&I, VF)
4753
4754 // Choose better solution for the current VF, Note that Invalid
4755 // costs compare as maximumal large. If both are invalid, we get
4756 // scalable invalid which signals a failure and a vectorization abort.
4757 if (GatherScatterCost < ScalarizationCost)
4758 setWideningDecision(&I, VF, CM_GatherScatter, GatherScatterCost);
4759 else
4760 setWideningDecision(&I, VF, CM_Scalarize, ScalarizationCost);
4761 continue;
4762 }
4763
4764 // We assume that widening is the best solution when possible.
4765 if (memoryInstructionCanBeWidened(&I, VF)) {
4766 InstructionCost Cost = getConsecutiveMemOpCost(&I, VF);
4767 int ConsecutiveStride = Legal->isConsecutivePtr(
4769 assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) &&
4770 "Expected consecutive stride.");
4771 InstWidening Decision =
4772 ConsecutiveStride == 1 ? CM_Widen : CM_Widen_Reverse;
4773 setWideningDecision(&I, VF, Decision, Cost);
4774 continue;
4775 }
4776
4777 // Choose between Interleaving, Gather/Scatter or Scalarization.
4779 unsigned NumAccesses = 1;
4780 if (isAccessInterleaved(&I)) {
4781 const auto *Group = getInterleavedAccessGroup(&I);
4782 assert(Group && "Fail to get an interleaved access group.");
4783
4784 // Make one decision for the whole group.
4785 if (getWideningDecision(&I, VF) != CM_Unknown)
4786 continue;
4787
4788 NumAccesses = Group->getNumMembers();
4790 InterleaveCost = getInterleaveGroupCost(&I, VF);
4791 }
4792
4793 InstructionCost GatherScatterCost =
4794 Config.isLegalGatherOrScatter(&I, VF)
4795 ? getGatherScatterCost(&I, VF) * NumAccesses
4797
4798 InstructionCost ScalarizationCost =
4799 getMemInstScalarizationCost(&I, VF) * NumAccesses;
4800
4801 // Choose better solution for the current VF,
4802 // write down this decision and use it during vectorization.
4804 InstWidening Decision;
4805 if (InterleaveCost <= GatherScatterCost &&
4806 InterleaveCost < ScalarizationCost) {
4807 Decision = CM_Interleave;
4808 Cost = InterleaveCost;
4809 } else if (GatherScatterCost < ScalarizationCost) {
4810 Decision = CM_GatherScatter;
4811 Cost = GatherScatterCost;
4812 } else {
4813 Decision = CM_Scalarize;
4814 Cost = ScalarizationCost;
4815 }
4816 // If the instructions belongs to an interleave group, the whole group
4817 // receives the same decision. The whole group receives the cost, but
4818 // the cost will actually be assigned to one instruction.
4819 if (const auto *Group = getInterleavedAccessGroup(&I)) {
4820 if (Decision == CM_Scalarize) {
4821 for (Instruction *I : Group->members())
4822 setWideningDecision(I, VF, Decision,
4823 getMemInstScalarizationCost(I, VF));
4824 } else {
4825 setWideningDecision(Group, VF, Decision, Cost);
4826 }
4827 } else
4828 setWideningDecision(&I, VF, Decision, Cost);
4829 }
4830 }
4831
4832 // Make sure that any load of address and any other address computation
4833 // remains scalar unless there is gather/scatter support. This avoids
4834 // inevitable extracts into address registers, and also has the benefit of
4835 // activating LSR more, since that pass can't optimize vectorized
4836 // addresses.
4837 if (TTI.prefersVectorizedAddressing())
4838 return;
4839
4840 // Start with all scalar pointer uses.
4842 for (BasicBlock *BB : TheLoop->blocks())
4843 for (Instruction &I : *BB) {
4844 Instruction *PtrDef =
4846 if (PtrDef && TheLoop->contains(PtrDef) &&
4848 AddrDefs.insert(PtrDef);
4849 }
4850
4851 // Add all instructions used to generate the addresses.
4853 append_range(Worklist, AddrDefs);
4854 while (!Worklist.empty()) {
4855 Instruction *I = Worklist.pop_back_val();
4856 for (auto &Op : I->operands())
4857 if (auto *InstOp = dyn_cast<Instruction>(Op))
4858 if (TheLoop->contains(InstOp) && !isa<PHINode>(InstOp) &&
4859 AddrDefs.insert(InstOp).second)
4860 Worklist.push_back(InstOp);
4861 }
4862
4863 auto UpdateMemOpUserCost = [this, VF](LoadInst *LI) {
4864 // If there are direct memory op users of the newly scalarized load,
4865 // their cost may have changed because there's no scalarization
4866 // overhead for the operand. Update it.
4867 for (User *U : LI->users()) {
4869 continue;
4871 continue;
4874 getMemInstScalarizationCost(cast<Instruction>(U), VF));
4875 }
4876 };
4877 for (auto *I : AddrDefs) {
4878 if (isa<LoadInst>(I)) {
4879 // Setting the desired widening decision should ideally be handled in
4880 // by cost functions, but since this involves the task of finding out
4881 // if the loaded register is involved in an address computation, it is
4882 // instead changed here when we know this is the case.
4883 InstWidening Decision = getWideningDecision(I, VF);
4884 if (!isPredicatedInst(I) &&
4885 (Decision == CM_Widen || Decision == CM_Widen_Reverse ||
4886 (!Legal->isUniformMemOp(*I, VF) && Decision == CM_Scalarize))) {
4887 // Scalarize a widened load of address or update the cost of a scalar
4888 // load of an address.
4890 I, VF, CM_Scalarize,
4891 (VF.getKnownMinValue() *
4892 getMemoryInstructionCost(I, ElementCount::getFixed(1))));
4893 UpdateMemOpUserCost(cast<LoadInst>(I));
4894 } else if (const auto *Group = getInterleavedAccessGroup(I)) {
4895 // Scalarize all members of this interleaved group when any member
4896 // is used as an address. The address-used load skips scalarization
4897 // overhead, other members include it.
4898 for (Instruction *Member : Group->members()) {
4899 InstructionCost Cost = AddrDefs.contains(Member)
4900 ? (VF.getKnownMinValue() *
4901 getMemoryInstructionCost(
4902 Member, ElementCount::getFixed(1)))
4903 : getMemInstScalarizationCost(Member, VF);
4905 UpdateMemOpUserCost(cast<LoadInst>(Member));
4906 }
4907 }
4908 } else {
4909 // Cannot scalarize fixed-order recurrence phis at the moment.
4910 if (isa<PHINode>(I) && Legal->isFixedOrderRecurrence(cast<PHINode>(I)))
4911 continue;
4912
4913 // Make sure I gets scalarized and a cost estimate without
4914 // scalarization overhead.
4915 ForcedScalars[VF].insert(I);
4916 }
4917 }
4918}
4919
4921 assert(!VF.isScalar() &&
4922 "Trying to set a vectorization decision for a scalar VF");
4923
4924 auto ForcedScalar = ForcedScalars.find(VF);
4925 for (BasicBlock *BB : TheLoop->blocks()) {
4926 // For each instruction in the old loop.
4927 for (Instruction &I : *BB) {
4929
4930 if (!CI)
4931 continue;
4932
4936 Function *ScalarFunc = CI->getCalledFunction();
4937 Type *ScalarRetTy = CI->getType();
4938 SmallVector<Type *, 4> Tys, ScalarTys;
4939 for (auto &ArgOp : CI->args())
4940 ScalarTys.push_back(ArgOp->getType());
4941
4942 // Estimate cost of scalarized vector call. The source operands are
4943 // assumed to be vectors, so we need to extract individual elements from
4944 // there, execute VF scalar calls, and then gather the result into the
4945 // vector return value.
4946 if (VF.isFixed()) {
4947 InstructionCost ScalarCallCost = TTI.getCallInstrCost(
4948 ScalarFunc, ScalarRetTy, ScalarTys, Config.CostKind);
4949
4950 // Compute costs of unpacking argument values for the scalar calls and
4951 // packing the return values to a vector.
4952 InstructionCost ScalarizationCost = getScalarizationOverhead(CI, VF);
4953 ScalarCost = ScalarCallCost * VF.getKnownMinValue() + ScalarizationCost;
4954 } else {
4955 // There is no point attempting to calculate the scalar cost for a
4956 // scalable VF as we know it will be Invalid.
4957 assert(!getScalarizationOverhead(CI, VF).isValid() &&
4958 "Unexpected valid cost for scalarizing scalable vectors");
4959 ScalarCost = InstructionCost::getInvalid();
4960 }
4961
4962 // Honor ForcedScalars and UniformAfterVectorization decisions.
4963 // TODO: For calls, it might still be more profitable to widen. Use
4964 // VPlan-based cost model to compare different options.
4965 if (VF.isVector() && ((ForcedScalar != ForcedScalars.end() &&
4966 ForcedScalar->second.contains(CI)) ||
4967 isUniformAfterVectorization(CI, VF))) {
4968 setCallWideningDecision(CI, VF, CM_Scalarize, nullptr,
4969 Intrinsic::not_intrinsic, std::nullopt,
4970 ScalarCost);
4971 continue;
4972 }
4973
4974 bool MaskRequired = isMaskRequired(CI);
4975 // Compute corresponding vector type for return value and arguments.
4976 Type *RetTy = toVectorizedTy(ScalarRetTy, VF);
4977 for (Type *ScalarTy : ScalarTys)
4978 Tys.push_back(toVectorizedTy(ScalarTy, VF));
4979
4980 // An in-loop reduction using an fmuladd intrinsic is a special case;
4981 // we don't want the normal cost for that intrinsic.
4983 if (auto RedCost = getReductionPatternCost(CI, VF, RetTy)) {
4986 std::nullopt, *RedCost);
4987 continue;
4988 }
4989
4990 // Find the cost of vectorizing the call, if we can find a suitable
4991 // vector variant of the function.
4992 VFInfo FuncInfo;
4993 Function *VecFunc = nullptr;
4994 // Search through any available variants for one we can use at this VF.
4995 for (VFInfo &Info : VFDatabase::getMappings(*CI)) {
4996 // Must match requested VF.
4997 if (Info.Shape.VF != VF)
4998 continue;
4999
5000 // Must take a mask argument if one is required
5001 if (MaskRequired && !Info.isMasked())
5002 continue;
5003
5004 // Check that all parameter kinds are supported
5005 bool ParamsOk = true;
5006 for (VFParameter Param : Info.Shape.Parameters) {
5007 switch (Param.ParamKind) {
5009 break;
5011 Value *ScalarParam = CI->getArgOperand(Param.ParamPos);
5012 // Make sure the scalar parameter in the loop is invariant.
5013 if (!PSE.getSE()->isLoopInvariant(PSE.getSCEV(ScalarParam),
5014 TheLoop))
5015 ParamsOk = false;
5016 break;
5017 }
5019 Value *ScalarParam = CI->getArgOperand(Param.ParamPos);
5020 // Find the stride for the scalar parameter in this loop and see if
5021 // it matches the stride for the variant.
5022 // TODO: do we need to figure out the cost of an extract to get the
5023 // first lane? Or do we hope that it will be folded away?
5024 ScalarEvolution *SE = PSE.getSE();
5025 if (!match(SE->getSCEV(ScalarParam),
5027 m_SCEV(), m_scev_SpecificSInt(Param.LinearStepOrPos),
5029 ParamsOk = false;
5030 break;
5031 }
5033 break;
5034 default:
5035 ParamsOk = false;
5036 break;
5037 }
5038 }
5039
5040 if (!ParamsOk)
5041 continue;
5042
5043 // Found a suitable candidate, stop here.
5044 VecFunc = CI->getModule()->getFunction(Info.VectorName);
5045 FuncInfo = Info;
5046 break;
5047 }
5048
5049 if (TLI && VecFunc && !CI->isNoBuiltin())
5050 VectorCost = TTI.getCallInstrCost(nullptr, RetTy, Tys, Config.CostKind);
5051
5052 // Find the cost of an intrinsic; some targets may have instructions that
5053 // perform the operation without needing an actual call.
5055 if (IID != Intrinsic::not_intrinsic)
5057
5058 InstructionCost Cost = ScalarCost;
5059 InstWidening Decision = CM_Scalarize;
5060
5061 if (VectorCost.isValid() && VectorCost <= Cost) {
5062 Cost = VectorCost;
5063 Decision = CM_VectorCall;
5064 }
5065
5066 if (IntrinsicCost.isValid() && IntrinsicCost <= Cost) {
5068 Decision = CM_IntrinsicCall;
5069 }
5070
5071 setCallWideningDecision(CI, VF, Decision, VecFunc, IID,
5073 }
5074 }
5075}
5076
5078 if (!Legal->isInvariant(Op))
5079 return false;
5080 // Consider Op invariant, if it or its operands aren't predicated
5081 // instruction in the loop. In that case, it is not trivially hoistable.
5082 auto *OpI = dyn_cast<Instruction>(Op);
5083 return !OpI || !TheLoop->contains(OpI) ||
5084 (!isPredicatedInst(OpI) &&
5085 (!isa<PHINode>(OpI) || OpI->getParent() != TheLoop->getHeader()) &&
5086 all_of(OpI->operands(),
5087 [this](Value *Op) { return shouldConsiderInvariant(Op); }));
5088}
5089
5092 ElementCount VF) {
5093 // If we know that this instruction will remain uniform, check the cost of
5094 // the scalar version.
5096 VF = ElementCount::getFixed(1);
5097
5098 if (VF.isVector() && isProfitableToScalarize(I, VF))
5099 return InstsToScalarize[VF][I];
5100
5101 // Forced scalars do not have any scalarization overhead.
5102 auto ForcedScalar = ForcedScalars.find(VF);
5103 if (VF.isVector() && ForcedScalar != ForcedScalars.end()) {
5104 auto InstSet = ForcedScalar->second;
5105 if (InstSet.count(I))
5107 VF.getKnownMinValue();
5108 }
5109
5110 const auto &MinBWs = Config.getMinimalBitwidths();
5111 uint64_t InstrMinBWs = MinBWs.lookup(I);
5112 Type *RetTy = I->getType();
5114 RetTy = IntegerType::get(RetTy->getContext(), InstrMinBWs);
5115 auto *SE = PSE.getSE();
5116
5117 Type *VectorTy;
5118 if (isScalarAfterVectorization(I, VF)) {
5119 [[maybe_unused]] auto HasSingleCopyAfterVectorization =
5120 [this](Instruction *I, ElementCount VF) -> bool {
5121 if (VF.isScalar())
5122 return true;
5123
5124 auto Scalarized = InstsToScalarize.find(VF);
5125 assert(Scalarized != InstsToScalarize.end() &&
5126 "VF not yet analyzed for scalarization profitability");
5127 return !Scalarized->second.count(I) &&
5128 llvm::all_of(I->users(), [&](User *U) {
5129 auto *UI = cast<Instruction>(U);
5130 return !Scalarized->second.count(UI);
5131 });
5132 };
5133
5134 // With the exception of GEPs and PHIs, after scalarization there should
5135 // only be one copy of the instruction generated in the loop. This is
5136 // because the VF is either 1, or any instructions that need scalarizing
5137 // have already been dealt with by the time we get here. As a result,
5138 // it means we don't have to multiply the instruction cost by VF.
5139 assert(I->getOpcode() == Instruction::GetElementPtr ||
5140 I->getOpcode() == Instruction::PHI ||
5141 (I->getOpcode() == Instruction::BitCast &&
5142 I->getType()->isPointerTy()) ||
5143 HasSingleCopyAfterVectorization(I, VF));
5144 VectorTy = RetTy;
5145 } else
5146 VectorTy = toVectorizedTy(RetTy, VF);
5147
5148 if (VF.isVector() && VectorTy->isVectorTy() &&
5149 !TTI.getNumberOfParts(VectorTy))
5151
5152 // TODO: We need to estimate the cost of intrinsic calls.
5153 switch (I->getOpcode()) {
5154 case Instruction::GetElementPtr:
5155 // We mark this instruction as zero-cost because the cost of GEPs in
5156 // vectorized code depends on whether the corresponding memory instruction
5157 // is scalarized or not. Therefore, we handle GEPs with the memory
5158 // instruction cost.
5159 return 0;
5160 case Instruction::UncondBr:
5161 case Instruction::CondBr: {
5162 // In cases of scalarized and predicated instructions, there will be VF
5163 // predicated blocks in the vectorized loop. Each branch around these
5164 // blocks requires also an extract of its vector compare i1 element.
5165 // Note that the conditional branch from the loop latch will be replaced by
5166 // a single branch controlling the loop, so there is no extra overhead from
5167 // scalarization.
5168 bool ScalarPredicatedBB = false;
5170 if (VF.isVector() && BI &&
5171 (PredicatedBBsAfterVectorization[VF].count(BI->getSuccessor(0)) ||
5172 PredicatedBBsAfterVectorization[VF].count(BI->getSuccessor(1))) &&
5173 BI->getParent() != TheLoop->getLoopLatch())
5174 ScalarPredicatedBB = true;
5175
5176 if (ScalarPredicatedBB) {
5177 // Not possible to scalarize scalable vector with predicated instructions.
5178 if (VF.isScalable())
5180 // Return cost for branches around scalarized and predicated blocks.
5181 auto *VecI1Ty =
5183 return (TTI.getScalarizationOverhead(
5184 VecI1Ty, APInt::getAllOnes(VF.getFixedValue()),
5185 /*Insert*/ false, /*Extract*/ true, Config.CostKind) +
5186 (TTI.getCFInstrCost(Instruction::CondBr, Config.CostKind) *
5187 VF.getFixedValue()));
5188 }
5189
5190 if (I->getParent() == TheLoop->getLoopLatch() || VF.isScalar())
5191 // The back-edge branch will remain, as will all scalar branches.
5192 return TTI.getCFInstrCost(Instruction::UncondBr, Config.CostKind);
5193
5194 // This branch will be eliminated by if-conversion.
5195 return 0;
5196 // Note: We currently assume zero cost for an unconditional branch inside
5197 // a predicated block since it will become a fall-through, although we
5198 // may decide in the future to call TTI for all branches.
5199 }
5200 case Instruction::Switch: {
5201 if (VF.isScalar())
5202 return TTI.getCFInstrCost(Instruction::Switch, Config.CostKind);
5203 auto *Switch = cast<SwitchInst>(I);
5204 return Switch->getNumCases() *
5205 TTI.getCmpSelInstrCost(
5206 Instruction::ICmp,
5207 toVectorTy(Switch->getCondition()->getType(), VF),
5208 toVectorTy(Type::getInt1Ty(I->getContext()), VF),
5209 CmpInst::ICMP_EQ, Config.CostKind);
5210 }
5211 case Instruction::PHI: {
5212 auto *Phi = cast<PHINode>(I);
5213
5214 // First-order recurrences are replaced by vector shuffles inside the loop.
5215 if (VF.isVector() && Legal->isFixedOrderRecurrence(Phi)) {
5216 return TTI.getShuffleCost(
5218 cast<VectorType>(VectorTy), {}, Config.CostKind, -1);
5219 }
5220
5221 // Phi nodes in non-header blocks (not inductions, reductions, etc.) are
5222 // converted into select instructions. We require N - 1 selects per phi
5223 // node, where N is the number of incoming values.
5224 if (VF.isVector() && Phi->getParent() != TheLoop->getHeader()) {
5225 Type *ResultTy = Phi->getType();
5226
5227 // All instructions in an Any-of reduction chain are narrowed to bool.
5228 // Check if that is the case for this phi node.
5229 auto *HeaderUser = cast_if_present<PHINode>(
5230 find_singleton<User>(Phi->users(), [this](User *U, bool) -> User * {
5231 auto *Phi = dyn_cast<PHINode>(U);
5232 if (Phi && Phi->getParent() == TheLoop->getHeader())
5233 return Phi;
5234 return nullptr;
5235 }));
5236 if (HeaderUser) {
5237 auto &ReductionVars = Legal->getReductionVars();
5238 auto Iter = ReductionVars.find(HeaderUser);
5239 if (Iter != ReductionVars.end() &&
5241 Iter->second.getRecurrenceKind()))
5242 ResultTy = Type::getInt1Ty(Phi->getContext());
5243 }
5244 return (Phi->getNumIncomingValues() - 1) *
5245 TTI.getCmpSelInstrCost(
5246 Instruction::Select, toVectorTy(ResultTy, VF),
5247 toVectorTy(Type::getInt1Ty(Phi->getContext()), VF),
5248 CmpInst::BAD_ICMP_PREDICATE, Config.CostKind);
5249 }
5250
5251 // When tail folding with EVL, if the phi is part of an out of loop
5252 // reduction then it will be transformed into a wide vp_merge.
5253 if (VF.isVector() && foldTailWithEVL() &&
5254 Legal->getReductionVars().contains(Phi) &&
5255 !Config.isInLoopReduction(Phi)) {
5257 Intrinsic::vp_merge, toVectorTy(Phi->getType(), VF),
5258 {toVectorTy(Type::getInt1Ty(Phi->getContext()), VF)});
5259 return TTI.getIntrinsicInstrCost(ICA, Config.CostKind);
5260 }
5261
5262 return TTI.getCFInstrCost(Instruction::PHI, Config.CostKind);
5263 }
5264 case Instruction::UDiv:
5265 case Instruction::SDiv:
5266 case Instruction::URem:
5267 case Instruction::SRem:
5268 if (VF.isVector() && isPredicatedInst(I)) {
5269 const auto [ScalarCost, SafeDivisorCost] = getDivRemSpeculationCost(I, VF);
5270 return isDivRemScalarWithPredication(ScalarCost, SafeDivisorCost) ?
5271 ScalarCost : SafeDivisorCost;
5272 }
5273 // We've proven all lanes safe to speculate, fall through.
5274 [[fallthrough]];
5275 case Instruction::Add:
5276 case Instruction::Sub: {
5277 auto Info = Legal->getHistogramInfo(I);
5278 if (Info && VF.isVector()) {
5279 const HistogramInfo *HGram = Info.value();
5280 // Assume that a non-constant update value (or a constant != 1) requires
5281 // a multiply, and add that into the cost.
5283 ConstantInt *RHS = dyn_cast<ConstantInt>(I->getOperand(1));
5284 if (!RHS || RHS->getZExtValue() != 1)
5285 MulCost = TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy,
5286 Config.CostKind);
5287
5288 // Find the cost of the histogram operation itself.
5289 Type *PtrTy = VectorType::get(HGram->Load->getPointerOperandType(), VF);
5290 Type *ScalarTy = I->getType();
5291 Type *MaskTy = VectorType::get(Type::getInt1Ty(I->getContext()), VF);
5292 IntrinsicCostAttributes ICA(Intrinsic::experimental_vector_histogram_add,
5293 Type::getVoidTy(I->getContext()),
5294 {PtrTy, ScalarTy, MaskTy});
5295
5296 // Add the costs together with the add/sub operation.
5297 return TTI.getIntrinsicInstrCost(ICA, Config.CostKind) + MulCost +
5298 TTI.getArithmeticInstrCost(I->getOpcode(), VectorTy,
5299 Config.CostKind);
5300 }
5301 [[fallthrough]];
5302 }
5303 case Instruction::FAdd:
5304 case Instruction::FSub:
5305 case Instruction::Mul:
5306 case Instruction::FMul:
5307 case Instruction::FDiv:
5308 case Instruction::FRem:
5309 case Instruction::Shl:
5310 case Instruction::LShr:
5311 case Instruction::AShr:
5312 case Instruction::And:
5313 case Instruction::Or:
5314 case Instruction::Xor: {
5315 // If we're speculating on the stride being 1, the multiplication may
5316 // fold away. We can generalize this for all operations using the notion
5317 // of neutral elements. (TODO)
5318 if (I->getOpcode() == Instruction::Mul &&
5319 ((TheLoop->isLoopInvariant(I->getOperand(0)) &&
5320 PSE.getSCEV(I->getOperand(0))->isOne()) ||
5321 (TheLoop->isLoopInvariant(I->getOperand(1)) &&
5322 PSE.getSCEV(I->getOperand(1))->isOne())))
5323 return 0;
5324
5325 // Detect reduction patterns
5326 if (auto RedCost = getReductionPatternCost(I, VF, VectorTy))
5327 return *RedCost;
5328
5329 // Certain instructions can be cheaper to vectorize if they have a constant
5330 // second vector operand. One example of this are shifts on x86.
5331 Value *Op2 = I->getOperand(1);
5332 if (!isa<Constant>(Op2) && TheLoop->isLoopInvariant(Op2) &&
5333 PSE.getSE()->isSCEVable(Op2->getType()) &&
5334 isa<SCEVConstant>(PSE.getSCEV(Op2))) {
5335 Op2 = cast<SCEVConstant>(PSE.getSCEV(Op2))->getValue();
5336 }
5337 auto Op2Info = TTI.getOperandInfo(Op2);
5338 if (Op2Info.Kind == TargetTransformInfo::OK_AnyValue &&
5341
5342 SmallVector<const Value *, 4> Operands(I->operand_values());
5343 return TTI.getArithmeticInstrCost(
5344 I->getOpcode(), VectorTy, Config.CostKind,
5345 {TargetTransformInfo::OK_AnyValue, TargetTransformInfo::OP_None},
5346 Op2Info, Operands, I, TLI);
5347 }
5348 case Instruction::FNeg: {
5349 return TTI.getArithmeticInstrCost(
5350 I->getOpcode(), VectorTy, Config.CostKind,
5351 {TargetTransformInfo::OK_AnyValue, TargetTransformInfo::OP_None},
5352 {TargetTransformInfo::OK_AnyValue, TargetTransformInfo::OP_None},
5353 I->getOperand(0), I);
5354 }
5355 case Instruction::Select: {
5357 const SCEV *CondSCEV = SE->getSCEV(SI->getCondition());
5358 bool ScalarCond = (SE->isLoopInvariant(CondSCEV, TheLoop));
5359
5360 const Value *Op0, *Op1;
5361 using namespace llvm::PatternMatch;
5362 if (!ScalarCond && (match(I, m_LogicalAnd(m_Value(Op0), m_Value(Op1))) ||
5363 match(I, m_LogicalOr(m_Value(Op0), m_Value(Op1))))) {
5364 // select x, y, false --> x & y
5365 // select x, true, y --> x | y
5366 const auto [Op1VK, Op1VP] = TTI::getOperandInfo(Op0);
5367 const auto [Op2VK, Op2VP] = TTI::getOperandInfo(Op1);
5368 assert(Op0->getType()->getScalarSizeInBits() == 1 &&
5369 Op1->getType()->getScalarSizeInBits() == 1);
5370
5371 return TTI.getArithmeticInstrCost(
5372 match(I, m_LogicalOr()) ? Instruction::Or : Instruction::And,
5373 VectorTy, Config.CostKind, {Op1VK, Op1VP}, {Op2VK, Op2VP}, {Op0, Op1},
5374 I);
5375 }
5376
5377 Type *CondTy = SI->getCondition()->getType();
5378 if (!ScalarCond)
5379 CondTy = VectorType::get(CondTy, VF);
5380
5382 if (auto *Cmp = dyn_cast<CmpInst>(SI->getCondition()))
5383 Pred = Cmp->getPredicate();
5384 return TTI.getCmpSelInstrCost(
5385 I->getOpcode(), VectorTy, CondTy, Pred, Config.CostKind,
5386 {TTI::OK_AnyValue, TTI::OP_None}, {TTI::OK_AnyValue, TTI::OP_None}, I);
5387 }
5388 case Instruction::ICmp:
5389 case Instruction::FCmp: {
5390 Type *ValTy = I->getOperand(0)->getType();
5391
5393 [[maybe_unused]] Instruction *Op0AsInstruction =
5394 dyn_cast<Instruction>(I->getOperand(0));
5395 assert((!canTruncateToMinimalBitwidth(Op0AsInstruction, VF) ||
5396 InstrMinBWs == MinBWs.lookup(Op0AsInstruction)) &&
5397 "if both the operand and the compare are marked for "
5398 "truncation, they must have the same bitwidth");
5399 ValTy = IntegerType::get(ValTy->getContext(), InstrMinBWs);
5400 }
5401
5402 VectorTy = toVectorTy(ValTy, VF);
5403 return TTI.getCmpSelInstrCost(
5404 I->getOpcode(), VectorTy, CmpInst::makeCmpResultType(VectorTy),
5405 cast<CmpInst>(I)->getPredicate(), Config.CostKind,
5406 {TTI::OK_AnyValue, TTI::OP_None}, {TTI::OK_AnyValue, TTI::OP_None}, I);
5407 }
5408 case Instruction::Store:
5409 case Instruction::Load: {
5410 ElementCount Width = VF;
5411 if (Width.isVector()) {
5412 InstWidening Decision = getWideningDecision(I, Width);
5413 assert(Decision != CM_Unknown &&
5414 "CM decision should be taken at this point");
5417 if (Decision == CM_Scalarize)
5418 Width = ElementCount::getFixed(1);
5419 }
5420 VectorTy = toVectorTy(getLoadStoreType(I), Width);
5421 return getMemoryInstructionCost(I, VF);
5422 }
5423 case Instruction::BitCast:
5424 if (I->getType()->isPointerTy())
5425 return 0;
5426 [[fallthrough]];
5427 case Instruction::ZExt:
5428 case Instruction::SExt:
5429 case Instruction::FPToUI:
5430 case Instruction::FPToSI:
5431 case Instruction::FPExt:
5432 case Instruction::PtrToInt:
5433 case Instruction::IntToPtr:
5434 case Instruction::SIToFP:
5435 case Instruction::UIToFP:
5436 case Instruction::Trunc:
5437 case Instruction::FPTrunc: {
5438 // Computes the CastContextHint from a Load/Store instruction.
5439 auto ComputeCCH = [&](Instruction *I) -> TTI::CastContextHint {
5441 "Expected a load or a store!");
5442
5443 if (VF.isScalar() || !TheLoop->contains(I))
5445
5446 switch (getWideningDecision(I, VF)) {
5458 llvm_unreachable("Instr did not go through cost modelling?");
5461 llvm_unreachable_internal("Instr has invalid widening decision");
5462 }
5463
5464 llvm_unreachable("Unhandled case!");
5465 };
5466
5467 unsigned Opcode = I->getOpcode();
5469 // For Trunc, the context is the only user, which must be a StoreInst.
5470 if (Opcode == Instruction::Trunc || Opcode == Instruction::FPTrunc) {
5471 if (I->hasOneUse())
5472 if (StoreInst *Store = dyn_cast<StoreInst>(*I->user_begin()))
5473 CCH = ComputeCCH(Store);
5474 }
5475 // For Z/Sext, the context is the operand, which must be a LoadInst.
5476 else if (Opcode == Instruction::ZExt || Opcode == Instruction::SExt ||
5477 Opcode == Instruction::FPExt) {
5478 if (LoadInst *Load = dyn_cast<LoadInst>(I->getOperand(0)))
5479 CCH = ComputeCCH(Load);
5480 }
5481
5482 // We optimize the truncation of induction variables having constant
5483 // integer steps. The cost of these truncations is the same as the scalar
5484 // operation.
5485 if (isOptimizableIVTruncate(I, VF)) {
5486 auto *Trunc = cast<TruncInst>(I);
5487 return TTI.getCastInstrCost(Instruction::Trunc, Trunc->getDestTy(),
5488 Trunc->getSrcTy(), CCH, Config.CostKind,
5489 Trunc);
5490 }
5491
5492 // Detect reduction patterns
5493 if (auto RedCost = getReductionPatternCost(I, VF, VectorTy))
5494 return *RedCost;
5495
5496 Type *SrcScalarTy = I->getOperand(0)->getType();
5497 Instruction *Op0AsInstruction = dyn_cast<Instruction>(I->getOperand(0));
5498 if (canTruncateToMinimalBitwidth(Op0AsInstruction, VF))
5499 SrcScalarTy = IntegerType::get(SrcScalarTy->getContext(),
5500 MinBWs.lookup(Op0AsInstruction));
5501 Type *SrcVecTy =
5502 VectorTy->isVectorTy() ? toVectorTy(SrcScalarTy, VF) : SrcScalarTy;
5503
5505 // If the result type is <= the source type, there will be no extend
5506 // after truncating the users to the minimal required bitwidth.
5507 if (VectorTy->getScalarSizeInBits() <= SrcVecTy->getScalarSizeInBits() &&
5508 (I->getOpcode() == Instruction::ZExt ||
5509 I->getOpcode() == Instruction::SExt))
5510 return 0;
5511 }
5512
5513 return TTI.getCastInstrCost(Opcode, VectorTy, SrcVecTy, CCH,
5514 Config.CostKind, I);
5515 }
5516 case Instruction::Call:
5517 return getVectorCallCost(cast<CallInst>(I), VF);
5518 case Instruction::ExtractValue:
5519 return TTI.getInstructionCost(I, Config.CostKind);
5520 case Instruction::Alloca:
5521 // We cannot easily widen alloca to a scalable alloca, as
5522 // the result would need to be a vector of pointers.
5523 if (VF.isScalable())
5525 return TTI.getArithmeticInstrCost(Instruction::Mul, RetTy, Config.CostKind);
5526 default:
5527 // This opcode is unknown. Assume that it is the same as 'mul'.
5528 return TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy,
5529 Config.CostKind);
5530 } // end of switch.
5531}
5532
5534 // Ignore ephemeral values.
5536
5537 SmallVector<Value *, 4> DeadInterleavePointerOps;
5539
5540 // If a scalar epilogue is required, users outside the loop won't use
5541 // live-outs from the vector loop but from the scalar epilogue. Ignore them if
5542 // that is the case.
5543 bool RequiresScalarEpilogue = requiresScalarEpilogue(true);
5544 auto IsLiveOutDead = [this, RequiresScalarEpilogue](User *U) {
5545 return RequiresScalarEpilogue &&
5546 !TheLoop->contains(cast<Instruction>(U)->getParent());
5547 };
5548
5550 DFS.perform(LI);
5551 for (BasicBlock *BB : reverse(make_range(DFS.beginRPO(), DFS.endRPO())))
5552 for (Instruction &I : reverse(*BB)) {
5553 if (VecValuesToIgnore.contains(&I) || ValuesToIgnore.contains(&I))
5554 continue;
5555
5556 // Add instructions that would be trivially dead and are only used by
5557 // values already ignored to DeadOps to seed worklist.
5559 all_of(I.users(), [this, IsLiveOutDead](User *U) {
5560 return VecValuesToIgnore.contains(U) ||
5561 ValuesToIgnore.contains(U) || IsLiveOutDead(U);
5562 }))
5563 DeadOps.push_back(&I);
5564
5565 // For interleave groups, we only create a pointer for the start of the
5566 // interleave group. Queue up addresses of group members except the insert
5567 // position for further processing.
5568 if (isAccessInterleaved(&I)) {
5569 auto *Group = getInterleavedAccessGroup(&I);
5570 if (Group->getInsertPos() == &I)
5571 continue;
5572 Value *PointerOp = getLoadStorePointerOperand(&I);
5573 DeadInterleavePointerOps.push_back(PointerOp);
5574 }
5575
5576 // Queue branches for analysis. They are dead, if their successors only
5577 // contain dead instructions.
5578 if (isa<CondBrInst>(&I))
5579 DeadOps.push_back(&I);
5580 }
5581
5582 // Mark ops feeding interleave group members as free, if they are only used
5583 // by other dead computations.
5584 for (unsigned I = 0; I != DeadInterleavePointerOps.size(); ++I) {
5585 auto *Op = dyn_cast<Instruction>(DeadInterleavePointerOps[I]);
5586 if (!Op || !TheLoop->contains(Op) || any_of(Op->users(), [this](User *U) {
5587 Instruction *UI = cast<Instruction>(U);
5588 return !VecValuesToIgnore.contains(U) &&
5589 (!isAccessInterleaved(UI) ||
5590 getInterleavedAccessGroup(UI)->getInsertPos() == UI);
5591 }))
5592 continue;
5593 VecValuesToIgnore.insert(Op);
5594 append_range(DeadInterleavePointerOps, Op->operands());
5595 }
5596
5597 // Mark ops that would be trivially dead and are only used by ignored
5598 // instructions as free.
5599 BasicBlock *Header = TheLoop->getHeader();
5600
5601 // Returns true if the block contains only dead instructions. Such blocks will
5602 // be removed by VPlan-to-VPlan transforms and won't be considered by the
5603 // VPlan-based cost model, so skip them in the legacy cost-model as well.
5604 auto IsEmptyBlock = [this](BasicBlock *BB) {
5605 return all_of(*BB, [this](Instruction &I) {
5606 return ValuesToIgnore.contains(&I) || VecValuesToIgnore.contains(&I) ||
5608 });
5609 };
5610 for (unsigned I = 0; I != DeadOps.size(); ++I) {
5611 auto *Op = dyn_cast<Instruction>(DeadOps[I]);
5612
5613 // Check if the branch should be considered dead.
5614 if (auto *Br = dyn_cast_or_null<CondBrInst>(Op)) {
5615 BasicBlock *ThenBB = Br->getSuccessor(0);
5616 BasicBlock *ElseBB = Br->getSuccessor(1);
5617 // Don't considers branches leaving the loop for simplification.
5618 if (!TheLoop->contains(ThenBB) || !TheLoop->contains(ElseBB))
5619 continue;
5620 bool ThenEmpty = IsEmptyBlock(ThenBB);
5621 bool ElseEmpty = IsEmptyBlock(ElseBB);
5622 if ((ThenEmpty && ElseEmpty) ||
5623 (ThenEmpty && ThenBB->getSingleSuccessor() == ElseBB &&
5624 ElseBB->phis().empty()) ||
5625 (ElseEmpty && ElseBB->getSingleSuccessor() == ThenBB &&
5626 ThenBB->phis().empty())) {
5627 VecValuesToIgnore.insert(Br);
5628 DeadOps.push_back(Br->getCondition());
5629 }
5630 continue;
5631 }
5632
5633 // Skip any op that shouldn't be considered dead.
5634 if (!Op || !TheLoop->contains(Op) ||
5635 (isa<PHINode>(Op) && Op->getParent() == Header) ||
5637 any_of(Op->users(), [this, IsLiveOutDead](User *U) {
5638 return !VecValuesToIgnore.contains(U) &&
5639 !ValuesToIgnore.contains(U) && !IsLiveOutDead(U);
5640 }))
5641 continue;
5642
5643 // If all of Op's users are in ValuesToIgnore, add it to ValuesToIgnore
5644 // which applies for both scalar and vector versions. Otherwise it is only
5645 // dead in vector versions, so only add it to VecValuesToIgnore.
5646 if (all_of(Op->users(),
5647 [this](User *U) { return ValuesToIgnore.contains(U); }))
5648 ValuesToIgnore.insert(Op);
5649
5650 VecValuesToIgnore.insert(Op);
5651 append_range(DeadOps, Op->operands());
5652 }
5653
5654 // Ignore type-promoting instructions we identified during reduction
5655 // detection.
5656 for (const auto &Reduction : Legal->getReductionVars()) {
5657 const RecurrenceDescriptor &RedDes = Reduction.second;
5658 const SmallPtrSetImpl<Instruction *> &Casts = RedDes.getCastInsts();
5659 VecValuesToIgnore.insert_range(Casts);
5660 }
5661 // Ignore type-casting instructions we identified during induction
5662 // detection.
5663 for (const auto &Induction : Legal->getInductionVars()) {
5664 const InductionDescriptor &IndDes = Induction.second;
5665 VecValuesToIgnore.insert_range(IndDes.getCastInsts());
5666 }
5667}
5668
5669// This function will select a scalable VF if the target supports scalable
5670// vectors and a fixed one otherwise.
5671// TODO: we could return a pair of values that specify the max VF and
5672// min VF, to be used in `buildVPlans(MinVF, MaxVF)` instead of
5673// `buildVPlans(VF, VF)`. We cannot do it because VPLAN at the moment
5674// doesn't have a cost model that can choose which plan to execute if
5675// more than one is generated.
5677 VFSelectionContext &Config) {
5678 unsigned WidestType = Config.getSmallestAndWidestTypes().second;
5679
5681 TTI.enableScalableVectorization()
5684
5685 TypeSize RegSize = TTI.getRegisterBitWidth(RegKind);
5686 unsigned N = RegSize.getKnownMinValue() / WidestType;
5687 return ElementCount::get(N, RegSize.isScalable());
5688}
5689
5692 ElementCount VF = UserVF;
5693 // Outer loop handling: They may require CFG and instruction level
5694 // transformations before even evaluating whether vectorization is profitable.
5695 // Since we cannot modify the incoming IR, we need to build VPlan upfront in
5696 // the vectorization pipeline.
5697 if (!OrigLoop->isInnermost()) {
5698 // If the user doesn't provide a vectorization factor, determine a
5699 // reasonable one.
5700 if (UserVF.isZero()) {
5701 VF = determineVPlanVF(TTI, Config);
5702 LLVM_DEBUG(dbgs() << "LV: VPlan computed VF " << VF << ".\n");
5703
5704 // Make sure we have a VF > 1 for stress testing.
5705 if (VPlanBuildStressTest && (VF.isScalar() || VF.isZero())) {
5706 LLVM_DEBUG(dbgs() << "LV: VPlan stress testing: "
5707 << "overriding computed VF.\n");
5708 VF = ElementCount::getFixed(4);
5709 }
5710 } else if (UserVF.isScalable() && !Config.supportsScalableVectors()) {
5711 LLVM_DEBUG(dbgs() << "LV: Not vectorizing. Scalable VF requested, but "
5712 << "not supported by the target.\n");
5714 "Scalable vectorization requested but not supported by the target",
5715 "the scalable user-specified vectorization width for outer-loop "
5716 "vectorization cannot be used because the target does not support "
5717 "scalable vectors.",
5718 "ScalableVFUnfeasible", ORE, OrigLoop);
5720 }
5721 assert(EnableVPlanNativePath && "VPlan-native path is not enabled.");
5723 "VF needs to be a power of two");
5724 LLVM_DEBUG(dbgs() << "LV: Using " << (!UserVF.isZero() ? "user " : "")
5725 << "VF " << VF << " to build VPlans.\n");
5726 buildVPlans(VF, VF);
5727
5728 if (VPlans.empty())
5730
5731 // For VPlan build stress testing, we bail out after VPlan construction.
5734
5735 return {VF, 0 /*Cost*/, 0 /* ScalarCost */};
5736 }
5737
5738 LLVM_DEBUG(
5739 dbgs() << "LV: Not vectorizing. Inner loops aren't supported in the "
5740 "VPlan-native path.\n");
5742}
5743
5744void LoopVectorizationPlanner::plan(ElementCount UserVF, unsigned UserIC) {
5745 assert(OrigLoop->isInnermost() && "Inner loop expected.");
5746 CM.collectValuesToIgnore();
5747 Config.collectElementTypesForWidening(&CM.ValuesToIgnore);
5748
5749 FixedScalableVFPair MaxFactors = CM.computeMaxVF(UserVF, UserIC);
5750 if (!MaxFactors) // Cases that should not to be vectorized nor interleaved.
5751 return;
5752
5753 // Compute the minimal bitwidths required for integer operations in the loop
5754 // for later use by the cost model.
5755 Config.computeMinimalBitwidths();
5756
5757 // Invalidate interleave groups if all blocks of loop will be predicated.
5758 if (CM.blockNeedsPredicationForAnyReason(OrigLoop->getHeader()) &&
5760 LLVM_DEBUG(
5761 dbgs()
5762 << "LV: Invalidate all interleaved groups due to fold-tail by masking "
5763 "which requires masked-interleaved support.\n");
5764 if (CM.InterleaveInfo.invalidateGroups())
5765 // Invalidating interleave groups also requires invalidating all decisions
5766 // based on them, which includes widening decisions and uniform and scalar
5767 // values.
5768 CM.invalidateCostModelingDecisions();
5769 }
5770
5771 if (CM.foldTailByMasking())
5772 Legal->prepareToFoldTailByMasking();
5773
5774 ElementCount MaxUserVF =
5775 UserVF.isScalable() ? MaxFactors.ScalableVF : MaxFactors.FixedVF;
5776 if (UserVF) {
5777 if (!ElementCount::isKnownLE(UserVF, MaxUserVF)) {
5779 "UserVF ignored because it may be larger than the maximal safe VF",
5780 "InvalidUserVF", ORE, OrigLoop);
5781 } else {
5783 "VF needs to be a power of two");
5784 // Collect the instructions (and their associated costs) that will be more
5785 // profitable to scalarize.
5786 Config.collectInLoopReductions();
5787 CM.collectNonVectorizedAndSetWideningDecisions(UserVF);
5788 ElementCount EpilogueUserVF =
5790 if (EpilogueUserVF.isVector() &&
5791 ElementCount::isKnownLT(EpilogueUserVF, UserVF)) {
5792 CM.collectNonVectorizedAndSetWideningDecisions(EpilogueUserVF);
5793 buildVPlansWithVPRecipes(EpilogueUserVF, EpilogueUserVF);
5794 }
5795 buildVPlansWithVPRecipes(UserVF, UserVF);
5796 if (!VPlans.empty() && VPlans.back()->getSingleVF() == UserVF) {
5797 // For scalar VF, skip VPlan cost check as VPlan cost is designed for
5798 // vector VFs only.
5799 if (UserVF.isScalar() ||
5800 cost(*VPlans.back(), UserVF, /*RU=*/nullptr).isValid()) {
5801 LLVM_DEBUG(dbgs() << "LV: Using user VF " << UserVF << ".\n");
5803 return;
5804 }
5805 }
5806 VPlans.clear();
5807 reportVectorizationInfo("UserVF ignored because of invalid costs.",
5808 "InvalidCost", ORE, OrigLoop);
5809 }
5810 }
5811
5812 // Collect the Vectorization Factor Candidates.
5813 SmallVector<ElementCount> VFCandidates;
5814 for (auto VF = ElementCount::getFixed(1);
5815 ElementCount::isKnownLE(VF, MaxFactors.FixedVF); VF *= 2)
5816 VFCandidates.push_back(VF);
5817 for (auto VF = ElementCount::getScalable(1);
5818 ElementCount::isKnownLE(VF, MaxFactors.ScalableVF); VF *= 2)
5819 VFCandidates.push_back(VF);
5820
5821 Config.collectInLoopReductions();
5822 for (const auto &VF : VFCandidates) {
5823 // Collect Uniform and Scalar instructions after vectorization with VF.
5824 CM.collectNonVectorizedAndSetWideningDecisions(VF);
5825 }
5826
5827 buildVPlansWithVPRecipes(ElementCount::getFixed(1), MaxFactors.FixedVF);
5828 buildVPlansWithVPRecipes(ElementCount::getScalable(1), MaxFactors.ScalableVF);
5829
5831}
5832
5834 ElementCount VF) const {
5835 InstructionCost Cost = CM.getInstructionCost(UI, VF);
5836 if (Cost.isValid() && ForceTargetInstructionCost.getNumOccurrences())
5838 return Cost;
5839}
5840
5841bool VPCostContext::skipCostComputation(Instruction *UI, bool IsVector) const {
5842 return CM.ValuesToIgnore.contains(UI) ||
5843 (IsVector && CM.VecValuesToIgnore.contains(UI)) ||
5844 SkipCostComputation.contains(UI);
5845}
5846
5848 return CM.getPredBlockCostDivisor(CostKind, BB);
5849}
5850
5852LoopVectorizationPlanner::precomputeCosts(VPlan &Plan, ElementCount VF,
5853 VPCostContext &CostCtx) const {
5855 // Cost modeling for inductions is inaccurate in the legacy cost model
5856 // compared to the recipes that are generated. To match here initially during
5857 // VPlan cost model bring up directly use the induction costs from the legacy
5858 // cost model. Note that we do this as pre-processing; the VPlan may not have
5859 // any recipes associated with the original induction increment instruction
5860 // and may replace truncates with VPWidenIntOrFpInductionRecipe. We precompute
5861 // the cost of induction phis and increments (both that are represented by
5862 // recipes and those that are not), to avoid distinguishing between them here,
5863 // and skip all recipes that represent induction phis and increments (the
5864 // former case) later on, if they exist, to avoid counting them twice.
5865 // Similarly we pre-compute the cost of any optimized truncates.
5866 // TODO: Switch to more accurate costing based on VPlan.
5867 for (const auto &[IV, IndDesc] : Legal->getInductionVars()) {
5869 IV->getIncomingValueForBlock(OrigLoop->getLoopLatch()));
5870 SmallVector<Instruction *> IVInsts = {IVInc};
5871 for (unsigned I = 0; I != IVInsts.size(); I++) {
5872 for (Value *Op : IVInsts[I]->operands()) {
5873 auto *OpI = dyn_cast<Instruction>(Op);
5874 if (Op == IV || !OpI || !OrigLoop->contains(OpI) || !Op->hasOneUse())
5875 continue;
5876 IVInsts.push_back(OpI);
5877 }
5878 }
5879 IVInsts.push_back(IV);
5880 for (User *U : IV->users()) {
5881 auto *CI = cast<Instruction>(U);
5882 if (!CostCtx.CM.isOptimizableIVTruncate(CI, VF))
5883 continue;
5884 IVInsts.push_back(CI);
5885 }
5886
5887 // If the vector loop gets executed exactly once with the given VF, ignore
5888 // the costs of comparison and induction instructions, as they'll get
5889 // simplified away.
5890 // TODO: Remove this code after stepping away from the legacy cost model and
5891 // adding code to simplify VPlans before calculating their costs.
5892 auto TC = getSmallConstantTripCount(PSE.getSE(), OrigLoop);
5893 if (TC == VF && !CM.foldTailByMasking())
5894 addFullyUnrolledInstructionsToIgnore(OrigLoop, Legal->getInductionVars(),
5895 CostCtx.SkipCostComputation);
5896
5897 for (Instruction *IVInst : IVInsts) {
5898 if (CostCtx.skipCostComputation(IVInst, VF.isVector()))
5899 continue;
5900 InstructionCost InductionCost = CostCtx.getLegacyCost(IVInst, VF);
5901 LLVM_DEBUG({
5902 dbgs() << "Cost of " << InductionCost << " for VF " << VF
5903 << ": induction instruction " << *IVInst << "\n";
5904 });
5905 Cost += InductionCost;
5906 CostCtx.SkipCostComputation.insert(IVInst);
5907 }
5908 }
5909
5910 /// Compute the cost of all exiting conditions of the loop using the legacy
5911 /// cost model. This is to match the legacy behavior, which adds the cost of
5912 /// all exit conditions. Note that this over-estimates the cost, as there will
5913 /// be a single condition to control the vector loop.
5915 CM.TheLoop->getExitingBlocks(Exiting);
5916 SetVector<Instruction *> ExitInstrs;
5917 // Collect all exit conditions.
5918 for (BasicBlock *EB : Exiting) {
5919 auto *Term = dyn_cast<CondBrInst>(EB->getTerminator());
5920 if (!Term || CostCtx.skipCostComputation(Term, VF.isVector()))
5921 continue;
5922 if (auto *CondI = dyn_cast<Instruction>(Term->getOperand(0))) {
5923 ExitInstrs.insert(CondI);
5924 }
5925 }
5926 // Compute the cost of all instructions only feeding the exit conditions.
5927 for (unsigned I = 0; I != ExitInstrs.size(); ++I) {
5928 Instruction *CondI = ExitInstrs[I];
5929 if (!OrigLoop->contains(CondI) ||
5930 !CostCtx.SkipCostComputation.insert(CondI).second)
5931 continue;
5932 InstructionCost CondICost = CostCtx.getLegacyCost(CondI, VF);
5933 LLVM_DEBUG({
5934 dbgs() << "Cost of " << CondICost << " for VF " << VF
5935 << ": exit condition instruction " << *CondI << "\n";
5936 });
5937 Cost += CondICost;
5938 for (Value *Op : CondI->operands()) {
5939 auto *OpI = dyn_cast<Instruction>(Op);
5940 if (!OpI || CostCtx.skipCostComputation(OpI, VF.isVector()) ||
5941 any_of(OpI->users(), [&ExitInstrs](User *U) {
5942 return !ExitInstrs.contains(cast<Instruction>(U));
5943 }))
5944 continue;
5945 ExitInstrs.insert(OpI);
5946 }
5947 }
5948
5949 // Pre-compute the costs for branches except for the backedge, as the number
5950 // of replicate regions in a VPlan may not directly match the number of
5951 // branches, which would lead to different decisions.
5952 // TODO: Compute cost of branches for each replicate region in the VPlan,
5953 // which is more accurate than the legacy cost model.
5954 for (BasicBlock *BB : OrigLoop->blocks()) {
5955 if (CostCtx.skipCostComputation(BB->getTerminator(), VF.isVector()))
5956 continue;
5957 CostCtx.SkipCostComputation.insert(BB->getTerminator());
5958 if (BB == OrigLoop->getLoopLatch())
5959 continue;
5960 auto BranchCost = CostCtx.getLegacyCost(BB->getTerminator(), VF);
5961 Cost += BranchCost;
5962 }
5963
5964 // Don't apply special costs when instruction cost is forced to make sure the
5965 // forced cost is used for each recipe.
5966 if (ForceTargetInstructionCost.getNumOccurrences())
5967 return Cost;
5968
5969 // Pre-compute costs for instructions that are forced-scalar or profitable to
5970 // scalarize. For most such instructions, their scalarization costs are
5971 // accounted for here using the legacy cost model. However, some opcodes
5972 // are excluded from these precomputed scalarization costs and are instead
5973 // modeled later by the VPlan cost model (see UseVPlanCostModel below).
5974 for (Instruction *ForcedScalar : CM.ForcedScalars[VF]) {
5975 if (CostCtx.skipCostComputation(ForcedScalar, VF.isVector()))
5976 continue;
5977 CostCtx.SkipCostComputation.insert(ForcedScalar);
5978 InstructionCost ForcedCost = CostCtx.getLegacyCost(ForcedScalar, VF);
5979 LLVM_DEBUG({
5980 dbgs() << "Cost of " << ForcedCost << " for VF " << VF
5981 << ": forced scalar " << *ForcedScalar << "\n";
5982 });
5983 Cost += ForcedCost;
5984 }
5985
5986 auto UseVPlanCostModel = [](Instruction *I) -> bool {
5987 switch (I->getOpcode()) {
5988 case Instruction::SDiv:
5989 case Instruction::UDiv:
5990 case Instruction::SRem:
5991 case Instruction::URem:
5992 return true;
5993 default:
5994 return false;
5995 }
5996 };
5997 for (const auto &[Scalarized, ScalarCost] : CM.InstsToScalarize[VF]) {
5998 if (UseVPlanCostModel(Scalarized) ||
5999 CostCtx.skipCostComputation(Scalarized, VF.isVector()))
6000 continue;
6001 CostCtx.SkipCostComputation.insert(Scalarized);
6002 LLVM_DEBUG({
6003 dbgs() << "Cost of " << ScalarCost << " for VF " << VF
6004 << ": profitable to scalarize " << *Scalarized << "\n";
6005 });
6006 Cost += ScalarCost;
6007 }
6008
6009 return Cost;
6010}
6011
6012InstructionCost LoopVectorizationPlanner::cost(VPlan &Plan, ElementCount VF,
6013 VPRegisterUsage *RU) const {
6014 VPCostContext CostCtx(CM.TTI, *CM.TLI, Plan, CM, Config.CostKind, PSE,
6015 OrigLoop);
6016 InstructionCost Cost = precomputeCosts(Plan, VF, CostCtx);
6017
6018 // Now compute and add the VPlan-based cost.
6019 Cost += Plan.cost(VF, CostCtx);
6020
6021 // Add the cost of spills due to excess register usage
6022 if (RU && Config.shouldConsiderRegPressureForVF(VF))
6023 Cost += RU->spillCost(CM.TTI, Config.CostKind, ForceTargetNumVectorRegs);
6024
6025#ifndef NDEBUG
6026 unsigned EstimatedWidth =
6027 estimateElementCount(VF, Config.getVScaleForTuning());
6028 LLVM_DEBUG(dbgs() << "Cost for VF " << VF << ": " << Cost
6029 << " (Estimated cost per lane: ");
6030 if (Cost.isValid()) {
6031 double CostPerLane = double(Cost.getValue()) / EstimatedWidth;
6032 LLVM_DEBUG(dbgs() << format("%.1f", CostPerLane));
6033 } else /* No point dividing an invalid cost - it will still be invalid */
6034 LLVM_DEBUG(dbgs() << "Invalid");
6035 LLVM_DEBUG(dbgs() << ")\n");
6036#endif
6037 return Cost;
6038}
6039
6040std::pair<VectorizationFactor, VPlan *>
6042 if (VPlans.empty())
6043 return {VectorizationFactor::Disabled(), nullptr};
6044 // If there is a single VPlan with a single VF, return it directly.
6045 VPlan &FirstPlan = *VPlans[0];
6046 ElementCount UserVF = Hints.getWidth();
6047 if (hasPlanWithVF(UserVF)) {
6048 if (VPlans.size() == 1) {
6049 assert(FirstPlan.getSingleVF() == UserVF &&
6050 "UserVF must match single VF");
6051 return {VectorizationFactor(FirstPlan.getSingleVF(), 0, 0), &FirstPlan};
6052 }
6054 assert(VPlans.size() == 2 && "Must have exactly 2 VPlans built");
6055 assert(VPlans[0]->getSingleVF() ==
6057 "expected first plan to be for the forced epilogue VF");
6058 assert(VPlans[1]->getSingleVF() == UserVF &&
6059 "expected second plan to be for the forced UserVF");
6060 return {VectorizationFactor(UserVF, 0, 0), VPlans[1].get()};
6061 }
6062 }
6063
6064 LLVM_DEBUG(dbgs() << "LV: Computing best VF using cost kind: "
6065 << (Config.CostKind == TTI::TCK_RecipThroughput
6066 ? "Reciprocal Throughput\n"
6067 : Config.CostKind == TTI::TCK_Latency
6068 ? "Instruction Latency\n"
6069 : Config.CostKind == TTI::TCK_CodeSize ? "Code Size\n"
6070 : Config.CostKind == TTI::TCK_SizeAndLatency
6071 ? "Code Size and Latency\n"
6072 : "Unknown\n"));
6073
6075 assert(FirstPlan.hasVF(ScalarVF) &&
6076 "More than a single plan/VF w/o any plan having scalar VF");
6077
6078 // TODO: Compute scalar cost using VPlan-based cost model.
6079 InstructionCost ScalarCost = CM.expectedCost(ScalarVF);
6080 LLVM_DEBUG(dbgs() << "LV: Scalar loop costs: " << ScalarCost << ".\n");
6081 VectorizationFactor ScalarFactor(ScalarVF, ScalarCost, ScalarCost);
6082 VectorizationFactor BestFactor = ScalarFactor;
6083
6084 bool ForceVectorization = Hints.getForce() == LoopVectorizeHints::FK_Enabled;
6085 if (ForceVectorization) {
6086 // Ignore scalar width, because the user explicitly wants vectorization.
6087 // Initialize cost to max so that VF = 2 is, at least, chosen during cost
6088 // evaluation.
6089 BestFactor.Cost = InstructionCost::getMax();
6090 }
6091
6092 VPlan *PlanForBestVF = &FirstPlan;
6093
6094 for (auto &P : VPlans) {
6095 ArrayRef<ElementCount> VFs(P->vectorFactors().begin(),
6096 P->vectorFactors().end());
6097
6099 bool ConsiderRegPressure = any_of(VFs, [this](ElementCount VF) {
6100 return Config.shouldConsiderRegPressureForVF(VF);
6101 });
6103 RUs = calculateRegisterUsageForPlan(*P, VFs, TTI, CM.ValuesToIgnore);
6104
6105 for (unsigned I = 0; I < VFs.size(); I++) {
6106 ElementCount VF = VFs[I];
6107 if (VF.isScalar())
6108 continue;
6109 if (!ForceVectorization && !willGenerateVectors(*P, VF, TTI)) {
6110 LLVM_DEBUG(
6111 dbgs()
6112 << "LV: Not considering vector loop of width " << VF
6113 << " because it will not generate any vector instructions.\n");
6114 continue;
6115 }
6116 if (Config.OptForSize && !ForceVectorization && hasReplicatorRegion(*P)) {
6117 LLVM_DEBUG(
6118 dbgs()
6119 << "LV: Not considering vector loop of width " << VF
6120 << " because it would cause replicated blocks to be generated,"
6121 << " which isn't allowed when optimizing for size.\n");
6122 continue;
6123 }
6124
6126 cost(*P, VF, ConsiderRegPressure ? &RUs[I] : nullptr);
6127 VectorizationFactor CurrentFactor(VF, Cost, ScalarCost);
6128
6129 if (isMoreProfitable(CurrentFactor, BestFactor, P->hasScalarTail())) {
6130 BestFactor = CurrentFactor;
6131 PlanForBestVF = P.get();
6132 }
6133
6134 // If profitable add it to ProfitableVF list.
6135 if (isMoreProfitable(CurrentFactor, ScalarFactor, P->hasScalarTail()))
6136 ProfitableVFs.push_back(CurrentFactor);
6137 }
6138 }
6139
6140 VPlan &BestPlan = *PlanForBestVF;
6141
6142 assert((BestFactor.Width.isScalar() || BestFactor.ScalarCost > 0) &&
6143 "when vectorizing, the scalar cost must be computed.");
6144
6145 LLVM_DEBUG(dbgs() << "LV: Selecting VF: " << BestFactor.Width << ".\n");
6146 return {BestFactor, &BestPlan};
6147}
6148
6150 ElementCount BestVF, unsigned BestUF, VPlan &BestVPlan,
6152 EpilogueVectorizationKind EpilogueVecKind) {
6153 assert(BestVPlan.hasVF(BestVF) &&
6154 "Trying to execute plan with unsupported VF");
6155 assert(BestVPlan.hasUF(BestUF) &&
6156 "Trying to execute plan with unsupported UF");
6157 if (BestVPlan.hasEarlyExit())
6158 ++LoopsEarlyExitVectorized;
6159
6161 BestVPlan, *PSE.getSE(), CM.TTI, Config.CostKind, BestVF, BestUF,
6162 CM.ValuesToIgnore);
6163 // TODO: Move to VPlan transform stage once the transition to the VPlan-based
6164 // cost model is complete for better cost estimates.
6165 RUN_VPLAN_PASS(VPlanTransforms::unrollByUF, BestVPlan, BestUF);
6169 bool HasBranchWeights =
6170 hasBranchWeightMD(*OrigLoop->getLoopLatch()->getTerminator());
6171 if (HasBranchWeights) {
6172 std::optional<unsigned> VScale = Config.getVScaleForTuning();
6174 BestVPlan, BestVF, VScale);
6175 }
6176
6177 // Retrieving VectorPH now when it's easier while VPlan still has Regions.
6178 VPBasicBlock *VectorPH = cast<VPBasicBlock>(BestVPlan.getVectorPreheader());
6179
6181 BestVF, BestUF, PSE);
6182 RUN_VPLAN_PASS(VPlanTransforms::optimizeForVFAndUF, BestVPlan, BestVF, BestUF,
6183 PSE);
6185 if (EpilogueVecKind == EpilogueVectorizationKind::None)
6187 /*OnlyLatches=*/false);
6188 if (BestVPlan.getEntry()->getSingleSuccessor() ==
6189 BestVPlan.getScalarPreheader()) {
6190 // TODO: The vector loop would be dead, should not even try to vectorize.
6191 ORE->emit([&]() {
6192 return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationDead",
6193 OrigLoop->getStartLoc(),
6194 OrigLoop->getHeader())
6195 << "Created vector loop never executes due to insufficient trip "
6196 "count.";
6197 });
6199 }
6200
6202
6204 // Convert the exit condition to AVLNext == 0 for EVL tail folded loops.
6206 // Regions are dissolved after optimizing for VF and UF, which completely
6207 // removes unneeded loop regions first.
6209 // Expand BranchOnTwoConds after dissolution, when latch has direct access to
6210 // its successors.
6212 // Convert loops with variable-length stepping after regions are dissolved.
6214 // Remove dead back-edges for single-iteration loops with BranchOnCond(true).
6215 // Only process loop latches to avoid removing edges from the middle block,
6216 // which may be needed for epilogue vectorization.
6217 VPlanTransforms::removeBranchOnConst(BestVPlan, /*OnlyLatches=*/true);
6219 std::optional<uint64_t> MaxRuntimeStep;
6220 if (auto MaxVScale = getMaxVScale(*CM.TheFunction, CM.TTI))
6221 MaxRuntimeStep = uint64_t(*MaxVScale) * BestVF.getKnownMinValue() * BestUF;
6223 BestVPlan, VectorPH, CM.foldTailByMasking(),
6224 CM.requiresScalarEpilogue(BestVF.isVector()), &BestVPlan.getVFxUF(),
6225 MaxRuntimeStep);
6226 VPlanTransforms::materializeFactors(BestVPlan, VectorPH, BestVF);
6227 VPlanTransforms::cse(BestVPlan);
6229 VPlanTransforms::simplifyKnownEVL(BestVPlan, BestVF, PSE);
6230
6231 // 0. Generate SCEV-dependent code in the entry, including TripCount, before
6232 // making any changes to the CFG.
6233 DenseMap<const SCEV *, Value *> ExpandedSCEVs =
6234 VPlanTransforms::expandSCEVs(BestVPlan, *PSE.getSE());
6235
6236 // Perform the actual loop transformation.
6237 VPTransformState State(&TTI, BestVF, LI, DT, ILV.AC, ILV.Builder, &BestVPlan,
6238 OrigLoop->getParentLoop(),
6239 Legal->getWidestInductionType());
6240
6241#ifdef EXPENSIVE_CHECKS
6242 assert(DT->verify(DominatorTree::VerificationLevel::Fast));
6243#endif
6244
6245 // 1. Set up the skeleton for vectorization, including vector pre-header and
6246 // middle block. The vector loop is created during VPlan execution.
6247 State.CFG.PrevBB = ILV.createVectorizedLoopSkeleton();
6248 if (VPBasicBlock *ScalarPH = BestVPlan.getScalarPreheader())
6249 replaceVPBBWithIRVPBB(ScalarPH, State.CFG.PrevBB->getSingleSuccessor(),
6250 &BestVPlan);
6252
6253 assert(verifyVPlanIsValid(BestVPlan) && "final VPlan is invalid");
6254
6255 // After vectorization, the exit blocks of the original loop will have
6256 // additional predecessors. Invalidate SCEVs for the exit phis in case SE
6257 // looked through single-entry phis.
6258 ScalarEvolution &SE = *PSE.getSE();
6259 for (VPIRBasicBlock *Exit : BestVPlan.getExitBlocks()) {
6260 if (!Exit->hasPredecessors())
6261 continue;
6262 for (VPRecipeBase &PhiR : Exit->phis())
6264 &cast<VPIRPhi>(PhiR).getIRPhi());
6265 }
6266 // Forget the original loop and block dispositions.
6267 SE.forgetLoop(OrigLoop);
6269
6271
6272 //===------------------------------------------------===//
6273 //
6274 // Notice: any optimization or new instruction that go
6275 // into the code below should also be implemented in
6276 // the cost-model.
6277 //
6278 //===------------------------------------------------===//
6279
6280 // Retrieve loop information before executing the plan, which may remove the
6281 // original loop, if it becomes unreachable.
6282 MDNode *LID = OrigLoop->getLoopID();
6283 unsigned OrigLoopInvocationWeight = 0;
6284 std::optional<unsigned> OrigAverageTripCount =
6285 getLoopEstimatedTripCount(OrigLoop, &OrigLoopInvocationWeight);
6286
6287 BestVPlan.execute(&State);
6288
6289 // 2.6. Maintain Loop Hints
6290 // Keep all loop hints from the original loop on the vector loop (we'll
6291 // replace the vectorizer-specific hints below).
6292 VPBasicBlock *HeaderVPBB = vputils::getFirstLoopHeader(BestVPlan, State.VPDT);
6293 // Add metadata to disable runtime unrolling a scalar loop when there
6294 // are no runtime checks about strides and memory. A scalar loop that is
6295 // rarely used is not worth unrolling.
6296 bool DisableRuntimeUnroll = !ILV.RTChecks.hasChecks() && !BestVF.isScalar();
6298 HeaderVPBB ? LI->getLoopFor(State.CFG.VPBB2IRBB.lookup(HeaderVPBB))
6299 : nullptr,
6300 HeaderVPBB, BestVPlan,
6301 EpilogueVecKind == EpilogueVectorizationKind::Epilogue, LID,
6302 OrigAverageTripCount, OrigLoopInvocationWeight,
6303 estimateElementCount(BestVF * BestUF, Config.getVScaleForTuning()),
6304 DisableRuntimeUnroll);
6305
6306 // 3. Fix the vectorized code: take care of header phi's, live-outs,
6307 // predication, updating analyses.
6308 ILV.fixVectorizedLoop(State);
6309
6311
6312 return ExpandedSCEVs;
6313}
6314
6315//===--------------------------------------------------------------------===//
6316// EpilogueVectorizerMainLoop
6317//===--------------------------------------------------------------------===//
6318
6320 LLVM_DEBUG({
6321 dbgs() << "Create Skeleton for epilogue vectorized loop (first pass)\n"
6322 << "Main Loop VF:" << EPI.MainLoopVF
6323 << ", Main Loop UF:" << EPI.MainLoopUF
6324 << ", Epilogue Loop VF:" << EPI.EpilogueVF
6325 << ", Epilogue Loop UF:" << EPI.EpilogueUF << "\n";
6326 });
6327}
6328
6331 dbgs() << "intermediate fn:\n"
6332 << *OrigLoop->getHeader()->getParent() << "\n";
6333 });
6334}
6335
6336//===--------------------------------------------------------------------===//
6337// EpilogueVectorizerEpilogueLoop
6338//===--------------------------------------------------------------------===//
6339
6340/// This function creates a new scalar preheader, using the previous one as
6341/// entry block to the epilogue VPlan. The minimum iteration check is being
6342/// represented in VPlan.
6344 BasicBlock *NewScalarPH = createScalarPreheader("vec.epilog.");
6345 BasicBlock *OriginalScalarPH = NewScalarPH->getSinglePredecessor();
6346 OriginalScalarPH->setName("vec.epilog.iter.check");
6347 VPIRBasicBlock *NewEntry = Plan.createVPIRBasicBlock(OriginalScalarPH);
6348 VPBasicBlock *OldEntry = Plan.getEntry();
6349 for (auto &R : make_early_inc_range(*OldEntry)) {
6350 // Skip moving VPIRInstructions (including VPIRPhis), which are unmovable by
6351 // defining.
6352 if (isa<VPIRInstruction>(&R))
6353 continue;
6354 R.moveBefore(*NewEntry, NewEntry->end());
6355 }
6356
6357 VPBlockUtils::reassociateBlocks(OldEntry, NewEntry);
6358 Plan.setEntry(NewEntry);
6359 // OldEntry is now dead and will be cleaned up when the plan gets destroyed.
6360
6361 return OriginalScalarPH;
6362}
6363
6365 LLVM_DEBUG({
6366 dbgs() << "Create Skeleton for epilogue vectorized loop (second pass)\n"
6367 << "Epilogue Loop VF:" << EPI.EpilogueVF
6368 << ", Epilogue Loop UF:" << EPI.EpilogueUF << "\n";
6369 });
6370}
6371
6374 dbgs() << "final fn:\n" << *OrigLoop->getHeader()->getParent() << "\n";
6375 });
6376}
6377
6379 VFRange &Range) {
6380 assert((VPI->getOpcode() == Instruction::Load ||
6381 VPI->getOpcode() == Instruction::Store) &&
6382 "Must be called with either a load or store");
6384
6385 auto WillWiden = [&](ElementCount VF) -> bool {
6387 CM.getWideningDecision(I, VF);
6389 "CM decision should be taken at this point.");
6391 return true;
6392 if (CM.isScalarAfterVectorization(I, VF) ||
6393 CM.isProfitableToScalarize(I, VF))
6394 return false;
6396 };
6397
6399 return nullptr;
6400
6401 // If a mask is not required, drop it - use unmasked version for safe loads.
6402 // TODO: Determine if mask is needed in VPlan.
6403 VPValue *Mask = CM.isMaskRequired(I) ? VPI->getMask() : nullptr;
6404
6405 // Determine if the pointer operand of the access is either consecutive or
6406 // reverse consecutive.
6408 CM.getWideningDecision(I, Range.Start);
6410 bool Consecutive =
6412
6413 VPValue *Ptr = VPI->getOpcode() == Instruction::Load ? VPI->getOperand(0)
6414 : VPI->getOperand(1);
6415 if (Consecutive) {
6417 VPSingleDefRecipe *VectorPtr;
6418 if (Reverse) {
6419 // When folding the tail, we may compute an address that we don't in the
6420 // original scalar loop: drop the GEP no-wrap flags in this case.
6421 // Otherwise preserve existing flags without no-unsigned-wrap, as we will
6422 // emit negative indices.
6423 GEPNoWrapFlags ReverseFlags = CM.foldTailByMasking()
6425 : Flags.withoutNoUnsignedWrap();
6426 VectorPtr = new VPVectorEndPointerRecipe(
6427 Ptr, &Plan.getVF(), getLoadStoreType(I),
6428 /*Stride*/ -1, ReverseFlags, VPI->getDebugLoc());
6429 } else {
6430 VectorPtr = new VPVectorPointerRecipe(Ptr, getLoadStoreType(I), Flags,
6431 VPI->getDebugLoc());
6432 }
6433 Builder.setInsertPoint(VPI);
6434 Builder.insert(VectorPtr);
6435 Ptr = VectorPtr;
6436 }
6437
6438 if (Reverse && Mask)
6439 Mask = Builder.createNaryOp(VPInstruction::Reverse, Mask, I->getDebugLoc());
6440
6441 if (VPI->getOpcode() == Instruction::Load) {
6442 auto *Load = cast<LoadInst>(I);
6443 auto *LoadR = new VPWidenLoadRecipe(*Load, Ptr, Mask, Consecutive, *VPI,
6444 Load->getDebugLoc());
6445 if (Reverse) {
6446 Builder.insert(LoadR);
6447 return new VPInstruction(VPInstruction::Reverse, LoadR, {}, {},
6448 LoadR->getDebugLoc());
6449 }
6450 return LoadR;
6451 }
6452
6453 StoreInst *Store = cast<StoreInst>(I);
6454 VPValue *StoredVal = VPI->getOperand(0);
6455 if (Reverse)
6456 StoredVal = Builder.createNaryOp(VPInstruction::Reverse, StoredVal,
6457 Store->getDebugLoc());
6458 return new VPWidenStoreRecipe(*Store, Ptr, StoredVal, Mask, Consecutive, *VPI,
6459 Store->getDebugLoc());
6460}
6461
6463VPRecipeBuilder::tryToOptimizeInductionTruncate(VPInstruction *VPI,
6464 VFRange &Range) {
6465 auto *I = cast<TruncInst>(VPI->getUnderlyingInstr());
6466 // Optimize the special case where the source is a constant integer
6467 // induction variable. Notice that we can only optimize the 'trunc' case
6468 // because (a) FP conversions lose precision, (b) sext/zext may wrap, and
6469 // (c) other casts depend on pointer size.
6470
6471 // Determine whether \p K is a truncation based on an induction variable that
6472 // can be optimized.
6475 I),
6476 Range))
6477 return nullptr;
6478
6480 VPI->getOperand(0)->getDefiningRecipe());
6481 PHINode *Phi = WidenIV->getPHINode();
6482 VPIRValue *Start = WidenIV->getStartValue();
6483 const InductionDescriptor &IndDesc = WidenIV->getInductionDescriptor();
6484
6485 // Wrap flags from the original induction do not apply to the truncated type,
6486 // so do not propagate them.
6487 VPIRFlags Flags = VPIRFlags::WrapFlagsTy(false, false);
6488 VPValue *Step =
6491 Phi, Start, Step, &Plan.getVF(), IndDesc, I, Flags, VPI->getDebugLoc());
6492}
6493
6494VPSingleDefRecipe *VPRecipeBuilder::tryToWidenCall(VPInstruction *VPI,
6495 VFRange &Range) {
6498 [this, CI](ElementCount VF) {
6499 return CM.isScalarWithPredication(CI, VF);
6500 },
6501 Range);
6502
6503 if (IsPredicated)
6504 return nullptr;
6505
6508 return nullptr;
6509
6511 VPI->op_begin() + CI->arg_size());
6512
6513 // Is it beneficial to perform intrinsic call compared to lib call?
6514 bool ShouldUseVectorIntrinsic =
6516 [&](ElementCount VF) -> bool {
6517 return CM.getCallWideningDecision(CI, VF).Kind ==
6519 },
6520 Range);
6521 if (ShouldUseVectorIntrinsic)
6522 return new VPWidenIntrinsicRecipe(*CI, ID, Ops, CI->getType(), *VPI, *VPI,
6523 VPI->getDebugLoc());
6524
6525 Function *Variant = nullptr;
6526 std::optional<unsigned> MaskPos;
6527 // Is better to call a vectorized version of the function than to to scalarize
6528 // the call?
6529 auto ShouldUseVectorCall = LoopVectorizationPlanner::getDecisionAndClampRange(
6530 [&](ElementCount VF) -> bool {
6531 // The following case may be scalarized depending on the VF.
6532 // The flag shows whether we can use a usual Call for vectorized
6533 // version of the instruction.
6534
6535 // If we've found a variant at a previous VF, then stop looking. A
6536 // vectorized variant of a function expects input in a certain shape
6537 // -- basically the number of input registers, the number of lanes
6538 // per register, and whether there's a mask required.
6539 // We store a pointer to the variant in the VPWidenCallRecipe, so
6540 // once we have an appropriate variant it's only valid for that VF.
6541 // This will force a different vplan to be generated for each VF that
6542 // finds a valid variant.
6543 if (Variant)
6544 return false;
6545 LoopVectorizationCostModel::CallWideningDecision Decision =
6546 CM.getCallWideningDecision(CI, VF);
6548 Variant = Decision.Variant;
6549 MaskPos = Decision.MaskPos;
6550 return true;
6551 }
6552
6553 return false;
6554 },
6555 Range);
6556 if (ShouldUseVectorCall) {
6557 if (MaskPos.has_value()) {
6558 // We have 2 cases that would require a mask:
6559 // 1) The call needs to be predicated, either due to a conditional
6560 // in the scalar loop or use of an active lane mask with
6561 // tail-folding, and we use the appropriate mask for the block.
6562 // 2) No mask is required for the call instruction, but the only
6563 // available vector variant at this VF requires a mask, so we
6564 // synthesize an all-true mask.
6565 VPValue *Mask = VPI->isMasked() ? VPI->getMask() : Plan.getTrue();
6566
6567 Ops.insert(Ops.begin() + *MaskPos, Mask);
6568 }
6569
6570 Ops.push_back(VPI->getOperand(VPI->getNumOperandsWithoutMask() - 1));
6571 return new VPWidenCallRecipe(CI, Variant, Ops, *VPI, *VPI,
6572 VPI->getDebugLoc());
6573 }
6574
6575 return nullptr;
6576}
6577
6578bool VPRecipeBuilder::shouldWiden(Instruction *I, VFRange &Range) const {
6580 "Instruction should have been handled earlier");
6581 // Instruction should be widened, unless it is scalar after vectorization,
6582 // scalarization is profitable or it is predicated.
6583 auto WillScalarize = [this, I](ElementCount VF) -> bool {
6584 return CM.isScalarAfterVectorization(I, VF) ||
6585 CM.isProfitableToScalarize(I, VF) ||
6586 CM.isScalarWithPredication(I, VF);
6587 };
6589 Range);
6590}
6591
6592VPWidenRecipe *VPRecipeBuilder::tryToWiden(VPInstruction *VPI) {
6593 auto *I = VPI->getUnderlyingInstr();
6594 switch (VPI->getOpcode()) {
6595 default:
6596 return nullptr;
6597 case Instruction::SDiv:
6598 case Instruction::UDiv:
6599 case Instruction::SRem:
6600 case Instruction::URem: {
6601 // If not provably safe, use a select to form a safe divisor before widening the
6602 // div/rem operation itself. Otherwise fall through to general handling below.
6603 if (CM.isPredicatedInst(I)) {
6605 VPValue *Mask = VPI->getMask();
6606 VPValue *One = Plan.getConstantInt(I->getType(), 1u);
6607 auto *SafeRHS =
6608 Builder.createSelect(Mask, Ops[1], One, VPI->getDebugLoc());
6609 Ops[1] = SafeRHS;
6610 return new VPWidenRecipe(*I, Ops, *VPI, *VPI, VPI->getDebugLoc());
6611 }
6612 [[fallthrough]];
6613 }
6614 case Instruction::Add:
6615 case Instruction::And:
6616 case Instruction::AShr:
6617 case Instruction::FAdd:
6618 case Instruction::FCmp:
6619 case Instruction::FDiv:
6620 case Instruction::FMul:
6621 case Instruction::FNeg:
6622 case Instruction::FRem:
6623 case Instruction::FSub:
6624 case Instruction::ICmp:
6625 case Instruction::LShr:
6626 case Instruction::Mul:
6627 case Instruction::Or:
6628 case Instruction::Select:
6629 case Instruction::Shl:
6630 case Instruction::Sub:
6631 case Instruction::Xor:
6632 case Instruction::Freeze:
6633 return new VPWidenRecipe(*I, VPI->operandsWithoutMask(), *VPI, *VPI,
6634 VPI->getDebugLoc());
6635 case Instruction::ExtractValue: {
6637 auto *EVI = cast<ExtractValueInst>(I);
6638 assert(EVI->getNumIndices() == 1 && "Expected one extractvalue index");
6639 unsigned Idx = EVI->getIndices()[0];
6640 NewOps.push_back(Plan.getConstantInt(32, Idx));
6641 return new VPWidenRecipe(*I, NewOps, *VPI, *VPI, VPI->getDebugLoc());
6642 }
6643 };
6644}
6645
6647 if (VPI->getOpcode() != Instruction::Store)
6648 return nullptr;
6649
6650 auto HistInfo =
6651 Legal->getHistogramInfo(cast<StoreInst>(VPI->getUnderlyingInstr()));
6652 if (!HistInfo)
6653 return nullptr;
6654
6655 const HistogramInfo *HI = *HistInfo;
6656 // FIXME: Support other operations.
6657 unsigned Opcode = HI->Update->getOpcode();
6658 assert((Opcode == Instruction::Add || Opcode == Instruction::Sub) &&
6659 "Histogram update operation must be an Add or Sub");
6660
6662 // Bucket address.
6663 HGramOps.push_back(VPI->getOperand(1));
6664 // Increment value.
6665 HGramOps.push_back(Plan.getOrAddLiveIn(HI->Update->getOperand(1)));
6666
6667 // In case of predicated execution (due to tail-folding, or conditional
6668 // execution, or both), pass the relevant mask.
6669 if (CM.isMaskRequired(HI->Store))
6670 HGramOps.push_back(VPI->getMask());
6671
6672 return new VPHistogramRecipe(Opcode, HGramOps, VPI->getDebugLoc());
6673}
6674
6676 VPInstruction *VPI, VPBuilder &FinalRedStoresBuilder) {
6677 StoreInst *SI;
6678 if ((SI = dyn_cast<StoreInst>(VPI->getUnderlyingInstr())) &&
6679 Legal->isInvariantAddressOfReduction(SI->getPointerOperand())) {
6680 // Only create recipe for the final invariant store of the reduction.
6681 if (Legal->isInvariantStoreOfReduction(SI)) {
6682 auto *Recipe = new VPReplicateRecipe(
6683 SI, VPI->operandsWithoutMask(), true /* IsUniform */,
6684 nullptr /*Mask*/, *VPI, *VPI, VPI->getDebugLoc());
6685 FinalRedStoresBuilder.insert(Recipe);
6686 }
6687 VPI->eraseFromParent();
6688 return true;
6689 }
6690
6691 return false;
6692}
6693
6695 VFRange &Range) {
6696 auto *I = VPI->getUnderlyingInstr();
6698 [&](ElementCount VF) { return CM.isUniformAfterVectorization(I, VF); },
6699 Range);
6700
6701 bool IsPredicated = CM.isPredicatedInst(I);
6702
6703 // Even if the instruction is not marked as uniform, there are certain
6704 // intrinsic calls that can be effectively treated as such, so we check for
6705 // them here. Conservatively, we only do this for scalable vectors, since
6706 // for fixed-width VFs we can always fall back on full scalarization.
6707 if (!IsUniform && Range.Start.isScalable() && isa<IntrinsicInst>(I)) {
6708 switch (cast<IntrinsicInst>(I)->getIntrinsicID()) {
6709 case Intrinsic::assume:
6710 case Intrinsic::lifetime_start:
6711 case Intrinsic::lifetime_end:
6712 // For scalable vectors if one of the operands is variant then we still
6713 // want to mark as uniform, which will generate one instruction for just
6714 // the first lane of the vector. We can't scalarize the call in the same
6715 // way as for fixed-width vectors because we don't know how many lanes
6716 // there are.
6717 //
6718 // The reasons for doing it this way for scalable vectors are:
6719 // 1. For the assume intrinsic generating the instruction for the first
6720 // lane is still be better than not generating any at all. For
6721 // example, the input may be a splat across all lanes.
6722 // 2. For the lifetime start/end intrinsics the pointer operand only
6723 // does anything useful when the input comes from a stack object,
6724 // which suggests it should always be uniform. For non-stack objects
6725 // the effect is to poison the object, which still allows us to
6726 // remove the call.
6727 IsUniform = true;
6728 break;
6729 default:
6730 break;
6731 }
6732 }
6733 VPValue *BlockInMask = nullptr;
6734 if (!IsPredicated) {
6735 // Finalize the recipe for Instr, first if it is not predicated.
6736 LLVM_DEBUG(dbgs() << "LV: Scalarizing:" << *I << "\n");
6737 } else {
6738 LLVM_DEBUG(dbgs() << "LV: Scalarizing and predicating:" << *I << "\n");
6739 // Instructions marked for predication are replicated and a mask operand is
6740 // added initially. Masked replicate recipes will later be placed under an
6741 // if-then construct to prevent side-effects. Generate recipes to compute
6742 // the block mask for this region.
6743 BlockInMask = VPI->getMask();
6744 }
6745
6746 // Note that there is some custom logic to mark some intrinsics as uniform
6747 // manually above for scalable vectors, which this assert needs to account for
6748 // as well.
6749 assert((Range.Start.isScalar() || !IsUniform || !IsPredicated ||
6750 (Range.Start.isScalable() && isa<IntrinsicInst>(I))) &&
6751 "Should not predicate a uniform recipe");
6752 auto *Recipe =
6753 new VPReplicateRecipe(I, VPI->operandsWithoutMask(), IsUniform,
6754 BlockInMask, *VPI, *VPI, VPI->getDebugLoc());
6755 return Recipe;
6756}
6757
6760 VFRange &Range) {
6761 assert(!R->isPhi() && "phis must be handled earlier");
6762 // First, check for specific widening recipes that deal with optimizing
6763 // truncates, calls and memory operations.
6764
6765 VPRecipeBase *Recipe;
6766 auto *VPI = cast<VPInstruction>(R);
6767 if (VPI->getOpcode() == Instruction::Trunc &&
6768 (Recipe = tryToOptimizeInductionTruncate(VPI, Range)))
6769 return Recipe;
6770
6771 // All widen recipes below deal only with VF > 1.
6773 [&](ElementCount VF) { return VF.isScalar(); }, Range))
6774 return nullptr;
6775
6776 if (VPI->getOpcode() == Instruction::Call)
6777 return tryToWidenCall(VPI, Range);
6778
6779 Instruction *Instr = R->getUnderlyingInstr();
6780 assert(!is_contained({Instruction::Load, Instruction::Store},
6781 VPI->getOpcode()) &&
6782 "Should have been handled prior to this!");
6783
6784 if (!shouldWiden(Instr, Range))
6785 return nullptr;
6786
6787 if (VPI->getOpcode() == Instruction::GetElementPtr)
6788 return new VPWidenGEPRecipe(cast<GetElementPtrInst>(Instr),
6789 VPI->operandsWithoutMask(), *VPI,
6790 VPI->getDebugLoc());
6791
6792 if (Instruction::isCast(VPI->getOpcode())) {
6793 auto *CI = cast<CastInst>(Instr);
6794 auto *CastR = cast<VPInstructionWithType>(VPI);
6795 return new VPWidenCastRecipe(CI->getOpcode(), VPI->getOperand(0),
6796 CastR->getResultType(), CI, *VPI, *VPI,
6797 VPI->getDebugLoc());
6798 }
6799
6800 return tryToWiden(VPI);
6801}
6802
6803// To allow RUN_VPLAN_PASS to print the VPlan after VF/UF independent
6804// optimizations.
6806
6807void LoopVectorizationPlanner::buildVPlansWithVPRecipes(ElementCount MinVF,
6808 ElementCount MaxVF) {
6809 if (ElementCount::isKnownGT(MinVF, MaxVF))
6810 return;
6811
6812 assert(OrigLoop->isInnermost() && "Inner loop expected.");
6813
6814 const LoopAccessInfo *LAI = Legal->getLAI();
6815 LoopVersioning LVer(*LAI, LAI->getRuntimePointerChecking()->getChecks(),
6816 OrigLoop, LI, DT, PSE.getSE());
6817 if (!LAI->getRuntimePointerChecking()->getChecks().empty() &&
6819 // Only use noalias metadata when using memory checks guaranteeing no
6820 // overlap across all iterations.
6821 LVer.prepareNoAliasMetadata();
6822 }
6823
6824 // Create initial base VPlan0, to serve as common starting point for all
6825 // candidates built later for specific VF ranges.
6826 auto VPlan0 = VPlanTransforms::buildVPlan0(
6827 OrigLoop, *LI, Legal->getWidestInductionType(),
6828 getDebugLocFromInstOrOperands(Legal->getPrimaryInduction()), PSE, &LVer);
6829
6830 // Create recipes for header phis.
6832 *OrigLoop, Legal->getInductionVars(),
6833 Legal->getReductionVars(),
6834 Legal->getFixedOrderRecurrences(),
6835 Config.getInLoopReductions(), Hints.allowReordering()))
6836 return;
6837
6840 // If we're vectorizing a loop with an uncountable exit, make sure that the
6841 // recipes are safe to handle.
6842 // TODO: Remove this once we can properly check the VPlan itself for both
6843 // the presence of an uncountable exit and the presence of stores in
6844 // the loop inside handleEarlyExits itself.
6846 if (Legal->hasUncountableEarlyExit())
6847 EEStyle = Legal->hasUncountableExitWithSideEffects()
6850
6852 OrigLoop, PSE, *DT, Legal->getAssumptionCache()))
6853 return;
6854
6856 CM.foldTailByMasking());
6858 if (CM.foldTailByMasking())
6861
6862 auto MaxVFTimes2 = MaxVF * 2;
6863 for (ElementCount VF = MinVF; ElementCount::isKnownLT(VF, MaxVFTimes2);) {
6864 VFRange SubRange = {VF, MaxVFTimes2};
6865 auto Plan = tryToBuildVPlanWithVPRecipes(
6866 std::unique_ptr<VPlan>(VPlan0->duplicate()), SubRange);
6867 VF = SubRange.End;
6868
6869 if (!Plan)
6870 continue;
6871
6872 // Now optimize the initial VPlan.
6876 Config.getMinimalBitwidths());
6878 // TODO: try to put addExplicitVectorLength close to addActiveLaneMask
6879 if (CM.foldTailWithEVL()) {
6881 Config.getMaxSafeElements());
6883 }
6884
6885 if (auto P = VPlanTransforms::narrowInterleaveGroups(*Plan, TTI))
6886 VPlans.push_back(std::move(P));
6887
6889 assert(verifyVPlanIsValid(*Plan) && "VPlan is invalid");
6890 VPlans.push_back(std::move(Plan));
6891 }
6892}
6893
6895LoopVectorizationPlanner::tryToBuildVPlanWithVPRecipes(VPlanPtr Plan,
6896 VFRange &Range) {
6897
6898 using namespace llvm::VPlanPatternMatch;
6899 SmallPtrSet<const InterleaveGroup<Instruction> *, 1> InterleaveGroups;
6900
6901 // ---------------------------------------------------------------------------
6902 // Build initial VPlan: Scan the body of the loop in a topological order to
6903 // visit each basic block after having visited its predecessor basic blocks.
6904 // ---------------------------------------------------------------------------
6905
6906 bool RequiresScalarEpilogueCheck =
6908 [this](ElementCount VF) {
6909 return !CM.requiresScalarEpilogue(VF.isVector());
6910 },
6911 Range);
6912 // Update the branch in the middle block if a scalar epilogue is required.
6913 VPBasicBlock *MiddleVPBB = Plan->getMiddleBlock();
6914 if (!RequiresScalarEpilogueCheck && MiddleVPBB->getNumSuccessors() == 2) {
6915 auto *BranchOnCond = cast<VPInstruction>(MiddleVPBB->getTerminator());
6916 assert(MiddleVPBB->getSuccessors()[1] == Plan->getScalarPreheader() &&
6917 "second successor must be scalar preheader");
6918 BranchOnCond->setOperand(0, Plan->getFalse());
6919 }
6920
6921 // Don't use getDecisionAndClampRange here, because we don't know the UF
6922 // so this function is better to be conservative, rather than to split
6923 // it up into different VPlans.
6924 // TODO: Consider using getDecisionAndClampRange here to split up VPlans.
6925 bool IVUpdateMayOverflow = false;
6926 for (ElementCount VF : Range)
6927 IVUpdateMayOverflow |= !isIndvarOverflowCheckKnownFalse(&CM, VF);
6928
6929 TailFoldingStyle Style = CM.getTailFoldingStyle();
6930 // Use NUW for the induction increment if we proved that it won't overflow in
6931 // the vector loop or when not folding the tail. In the later case, we know
6932 // that the canonical induction increment will not overflow as the vector trip
6933 // count is >= increment and a multiple of the increment.
6934 VPRegionBlock *LoopRegion = Plan->getVectorLoopRegion();
6935 bool HasNUW = !IVUpdateMayOverflow || Style == TailFoldingStyle::None;
6936 if (!HasNUW) {
6937 auto *IVInc =
6938 LoopRegion->getExitingBasicBlock()->getTerminator()->getOperand(0);
6939 assert(match(IVInc,
6940 m_VPInstruction<Instruction::Add>(
6941 m_Specific(LoopRegion->getCanonicalIV()), m_VPValue())) &&
6942 "Did not find the canonical IV increment");
6943 LoopRegion->clearCanonicalIVNUW(cast<VPInstruction>(IVInc));
6944 }
6945
6946 // ---------------------------------------------------------------------------
6947 // Pre-construction: record ingredients whose recipes we'll need to further
6948 // process after constructing the initial VPlan.
6949 // ---------------------------------------------------------------------------
6950
6951 // For each interleave group which is relevant for this (possibly trimmed)
6952 // Range, add it to the set of groups to be later applied to the VPlan and add
6953 // placeholders for its members' Recipes which we'll be replacing with a
6954 // single VPInterleaveRecipe.
6955 for (InterleaveGroup<Instruction> *IG : IAI.getInterleaveGroups()) {
6956 auto ApplyIG = [IG, this](ElementCount VF) -> bool {
6957 bool Result = (VF.isVector() && // Query is illegal for VF == 1
6958 CM.getWideningDecision(IG->getInsertPos(), VF) ==
6960 // For scalable vectors, the interleave factors must be <= 8 since we
6961 // require the (de)interleaveN intrinsics instead of shufflevectors.
6962 assert((!Result || !VF.isScalable() || IG->getFactor() <= 8) &&
6963 "Unsupported interleave factor for scalable vectors");
6964 return Result;
6965 };
6966 if (!getDecisionAndClampRange(ApplyIG, Range))
6967 continue;
6968 InterleaveGroups.insert(IG);
6969 }
6970
6971 // ---------------------------------------------------------------------------
6972 // Construct wide recipes and apply predication for original scalar
6973 // VPInstructions in the loop.
6974 // ---------------------------------------------------------------------------
6975 VPRecipeBuilder RecipeBuilder(*Plan, TLI, Legal, CM, Builder);
6976
6977 // Scan the body of the loop in a topological order to visit each basic block
6978 // after having visited its predecessor basic blocks.
6979 VPBasicBlock *HeaderVPBB = LoopRegion->getEntryBasicBlock();
6980 ReversePostOrderTraversal<VPBlockShallowTraversalWrapper<VPBlockBase *>> RPOT(
6981 HeaderVPBB);
6982
6984 Range.Start);
6985
6986 VPCostContext CostCtx(CM.TTI, *CM.TLI, *Plan, CM, Config.CostKind, CM.PSE,
6987 OrigLoop);
6988
6990 RecipeBuilder);
6991
6993
6994 // Now process all other blocks and instructions.
6995 for (VPBasicBlock *VPBB : VPBlockUtils::blocksOnly<VPBasicBlock>(RPOT)) {
6996 // Convert input VPInstructions to widened recipes.
6997 for (VPRecipeBase &R : make_early_inc_range(
6998 make_range(VPBB->getFirstNonPhi(), VPBB->end()))) {
6999 // Skip recipes that do not need transforming or have already been
7000 // transformed.
7001 if (isa<VPWidenCanonicalIVRecipe, VPBlendRecipe, VPReductionRecipe,
7002 VPReplicateRecipe, VPWidenLoadRecipe, VPWidenStoreRecipe,
7003 VPVectorPointerRecipe, VPVectorEndPointerRecipe,
7004 VPHistogramRecipe>(&R))
7005 continue;
7006 auto *VPI = cast<VPInstruction>(&R);
7007 if (!VPI->getUnderlyingValue())
7008 continue;
7009
7010 // TODO: Gradually replace uses of underlying instruction by analyses on
7011 // VPlan. Migrate code relying on the underlying instruction from VPlan0
7012 // to construct recipes below to not use the underlying instruction.
7014 Builder.setInsertPoint(VPI);
7015
7016 VPRecipeBase *Recipe =
7017 RecipeBuilder.tryToCreateWidenNonPhiRecipe(VPI, Range);
7018 if (!Recipe)
7019 Recipe =
7020 RecipeBuilder.handleReplication(cast<VPInstruction>(VPI), Range);
7021
7022 if (isa<VPWidenIntOrFpInductionRecipe>(Recipe) && isa<TruncInst>(Instr)) {
7023 // Optimized a truncate to VPWidenIntOrFpInductionRecipe. It needs to be
7024 // moved to the phi section in the header.
7025 Recipe->insertBefore(*HeaderVPBB, HeaderVPBB->getFirstNonPhi());
7026 } else {
7027 Builder.insert(Recipe);
7028 }
7029 if (Recipe->getNumDefinedValues() == 1) {
7030 VPI->replaceAllUsesWith(Recipe->getVPSingleValue());
7031 } else {
7032 assert(Recipe->getNumDefinedValues() == 0 &&
7033 "Unexpected multidef recipe");
7034 }
7035 R.eraseFromParent();
7036 }
7037 }
7038
7039 assert(isa<VPRegionBlock>(LoopRegion) &&
7040 !LoopRegion->getEntryBasicBlock()->empty() &&
7041 "entry block must be set to a VPRegionBlock having a non-empty entry "
7042 "VPBasicBlock");
7043
7045 Range);
7046
7047 // ---------------------------------------------------------------------------
7048 // Transform initial VPlan: Apply previously taken decisions, in order, to
7049 // bring the VPlan to its final state.
7050 // ---------------------------------------------------------------------------
7051
7052 addReductionResultComputation(Plan, RecipeBuilder, Range.Start);
7053
7054 // Optimize FindIV reductions to use sentinel-based approach when possible.
7056 *OrigLoop);
7058 CM.foldTailByMasking());
7059
7060 // Apply mandatory transformation to handle reductions with multiple in-loop
7061 // uses if possible, bail out otherwise.
7063 OrigLoop))
7064 return nullptr;
7065 // Apply mandatory transformation to handle FP maxnum/minnum reduction with
7066 // NaNs if possible, bail out otherwise.
7068 return nullptr;
7069
7070 // Create whole-vector selects for find-last recurrences.
7072 return nullptr;
7073
7075
7076 // Create partial reduction recipes for scaled reductions and transform
7077 // recipes to abstract recipes if it is legal and beneficial and clamp the
7078 // range for better cost estimation.
7079 // TODO: Enable following transform when the EVL-version of extended-reduction
7080 // and mulacc-reduction are implemented.
7081 if (!CM.foldTailWithEVL()) {
7083 Range);
7085 Range);
7086 }
7087
7088 // Ensure scalar VF plans only contain VF=1, as required by hasScalarVFOnly.
7089 if (Range.Start.isScalar())
7090 Range.End = Range.Start * 2;
7091
7092 for (ElementCount VF : Range)
7093 Plan->addVF(VF);
7094 Plan->setName("Initial VPlan");
7095
7096 // Interleave memory: for each Interleave Group we marked earlier as relevant
7097 // for this VPlan, replace the Recipes widening its memory instructions with a
7098 // single VPInterleaveRecipe at its insertion point.
7100 InterleaveGroups, CM.isEpilogueAllowed());
7101
7102 // Replace VPValues for known constant strides.
7104 Legal->getLAI()->getSymbolicStrides());
7105
7106 auto BlockNeedsPredication = [this](BasicBlock *BB) {
7107 return Legal->blockNeedsPredication(BB);
7108 };
7110 BlockNeedsPredication);
7111
7112 if (useActiveLaneMask(Style)) {
7113 // TODO: Move checks to VPlanTransforms::addActiveLaneMask once
7114 // TailFoldingStyle is visible there.
7115 bool ForControlFlow = useActiveLaneMaskForControlFlow(Style);
7116 RUN_VPLAN_PASS(VPlanTransforms::addActiveLaneMask, *Plan, ForControlFlow);
7117 }
7118
7119 assert(verifyVPlanIsValid(*Plan) && "VPlan is invalid");
7120 return Plan;
7121}
7122
7123VPlanPtr LoopVectorizationPlanner::tryToBuildVPlan(VFRange &Range) {
7124 // Outer loop handling: They may require CFG and instruction level
7125 // transformations before even evaluating whether vectorization is profitable.
7126 // Since we cannot modify the incoming IR, we need to build VPlan upfront in
7127 // the vectorization pipeline.
7128 assert(!OrigLoop->isInnermost());
7129 assert(EnableVPlanNativePath && "VPlan-native path is not enabled.");
7130
7131 auto Plan = VPlanTransforms::buildVPlan0(
7132 OrigLoop, *LI, Legal->getWidestInductionType(),
7133 getDebugLocFromInstOrOperands(Legal->getPrimaryInduction()), PSE);
7134
7136 *Plan, PSE, *OrigLoop, Legal->getInductionVars(),
7137 MapVector<PHINode *, RecurrenceDescriptor>(),
7138 SmallPtrSet<const PHINode *, 1>(), SmallPtrSet<PHINode *, 1>(),
7139 /*AllowReordering=*/false))
7140 return nullptr;
7141 [[maybe_unused]] bool CanHandleExits = VPlanTransforms::handleEarlyExits(
7142 *Plan, UncountableExitStyle::NoUncountableExit, OrigLoop, PSE, *DT,
7143 Legal->getAssumptionCache());
7144 assert(CanHandleExits &&
7145 "early-exits are not supported in VPlan-native path");
7146 VPlanTransforms::addMiddleCheck(*Plan, /*TailFolded*/ false);
7147
7149
7150 for (ElementCount VF : Range)
7151 Plan->addVF(VF);
7152
7154 return nullptr;
7155
7156 // Optimize induction live-out users to use precomputed end values.
7158 /*FoldTail=*/false);
7159
7160 assert(verifyVPlanIsValid(*Plan) && "VPlan is invalid");
7161 return Plan;
7162}
7163
7164void LoopVectorizationPlanner::addReductionResultComputation(
7165 VPlanPtr &Plan, VPRecipeBuilder &RecipeBuilder, ElementCount MinVF) {
7166 using namespace VPlanPatternMatch;
7167 VPTypeAnalysis TypeInfo(*Plan);
7168 VPRegionBlock *VectorLoopRegion = Plan->getVectorLoopRegion();
7169 VPBasicBlock *MiddleVPBB = Plan->getMiddleBlock();
7171 VPBasicBlock *LatchVPBB = VectorLoopRegion->getExitingBasicBlock();
7172 Builder.setInsertPoint(&*std::prev(std::prev(LatchVPBB->end())));
7173 VPBasicBlock::iterator IP = MiddleVPBB->getFirstNonPhi();
7174 for (VPRecipeBase &R :
7175 Plan->getVectorLoopRegion()->getEntryBasicBlock()->phis()) {
7176 VPReductionPHIRecipe *PhiR = dyn_cast<VPReductionPHIRecipe>(&R);
7177 if (!PhiR)
7178 continue;
7179
7180 RecurKind RecurrenceKind = PhiR->getRecurrenceKind();
7181 const RecurrenceDescriptor &RdxDesc = Legal->getRecurrenceDescriptor(
7183 Type *PhiTy = TypeInfo.inferScalarType(PhiR);
7184 // If tail is folded by masking, introduce selects between the phi
7185 // and the users outside the vector region of each reduction, at the
7186 // beginning of the dedicated latch block.
7187 auto *OrigExitingVPV = PhiR->getBackedgeValue();
7188 auto *NewExitingVPV = PhiR->getBackedgeValue();
7189 if (!PhiR->isInLoop() && CM.foldTailByMasking()) {
7190 VPValue *Cond = vputils::findHeaderMask(*Plan);
7191 NewExitingVPV =
7192 Builder.createSelect(Cond, OrigExitingVPV, PhiR, {}, "", *PhiR);
7193 OrigExitingVPV->replaceUsesWithIf(NewExitingVPV, [](VPUser &U, unsigned) {
7194 return match(&U,
7195 m_VPInstruction<VPInstruction::ComputeReductionResult>());
7196 });
7197
7198 if (CM.usePredicatedReductionSelect(RecurrenceKind))
7199 PhiR->setOperand(1, NewExitingVPV);
7200 }
7201
7202 // We want code in the middle block to appear to execute on the location of
7203 // the scalar loop's latch terminator because: (a) it is all compiler
7204 // generated, (b) these instructions are always executed after evaluating
7205 // the latch conditional branch, and (c) other passes may add new
7206 // predecessors which terminate on this line. This is the easiest way to
7207 // ensure we don't accidentally cause an extra step back into the loop while
7208 // debugging.
7209 DebugLoc ExitDL = OrigLoop->getLoopLatch()->getTerminator()->getDebugLoc();
7210
7211 // TODO: At the moment ComputeReductionResult also drives creation of the
7212 // bc.merge.rdx phi nodes, hence it needs to be created unconditionally here
7213 // even for in-loop reductions, until the reduction resume value handling is
7214 // also modeled in VPlan.
7215 VPInstruction *FinalReductionResult;
7216 VPBuilder::InsertPointGuard Guard(Builder);
7217 Builder.setInsertPoint(MiddleVPBB, IP);
7218 // For AnyOf reductions, find the select among PhiR's users and convert
7219 // the reduction phi to operate on bools before creating the final
7220 // reduction result.
7221 if (RecurrenceDescriptor::isAnyOfRecurrenceKind(RecurrenceKind)) {
7222 auto *AnyOfSelect =
7223 cast<VPSingleDefRecipe>(*find_if(PhiR->users(), [](VPUser *U) {
7224 return match(U, m_Select(m_VPValue(), m_VPValue(), m_VPValue()));
7225 }));
7226 VPValue *Start = PhiR->getStartValue();
7227 bool TrueValIsPhi = AnyOfSelect->getOperand(1) == PhiR;
7228 // NewVal is the non-phi operand of the select.
7229 VPValue *NewVal = TrueValIsPhi ? AnyOfSelect->getOperand(2)
7230 : AnyOfSelect->getOperand(1);
7231
7232 // Adjust AnyOf reductions; replace the reduction phi for the selected
7233 // value with a boolean reduction phi node to check if the condition is
7234 // true in any iteration. The final value is selected by the final
7235 // ComputeReductionResult.
7236 VPValue *Cmp = AnyOfSelect->getOperand(0);
7237 // If the compare is checking the reduction PHI node, adjust it to check
7238 // the start value.
7239 if (VPRecipeBase *CmpR = Cmp->getDefiningRecipe())
7240 CmpR->replaceUsesOfWith(PhiR, PhiR->getStartValue());
7241 Builder.setInsertPoint(AnyOfSelect);
7242
7243 // If the true value of the select is the reduction phi, the new value
7244 // is selected if the negated condition is true in any iteration.
7245 if (TrueValIsPhi)
7246 Cmp = Builder.createNot(Cmp);
7247 VPValue *Or = Builder.createOr(PhiR, Cmp);
7248 // Only replace uses inside the vector region with Or. External uses
7249 // (e.g. scalar preheader resume phis) must be replaced by the user
7250 // update loop below with FinalReductionResult.
7251 AnyOfSelect->replaceUsesWithIf(Or, [](VPUser &U, unsigned) {
7252 return cast<VPRecipeBase>(&U)->getRegion();
7253 });
7254 ToDelete.push_back(AnyOfSelect);
7255
7256 // Convert the reduction phi to operate on bools.
7257 PhiR->setOperand(0, Plan->getFalse());
7258
7259 // Update NewExitingVPV if it was pointing to the now-replaced select.
7260 if (NewExitingVPV == AnyOfSelect)
7261 NewExitingVPV = Or;
7262
7263 Builder.setInsertPoint(MiddleVPBB, IP);
7264
7265 FinalReductionResult =
7266 Builder.createAnyOfReduction(NewExitingVPV, NewVal, Start, ExitDL);
7267 } else {
7268 VPIRFlags Flags(RecurrenceKind, PhiR->isOrdered(), PhiR->isInLoop(),
7269 PhiR->getFastMathFlags());
7270 FinalReductionResult =
7271 Builder.createNaryOp(VPInstruction::ComputeReductionResult,
7272 {NewExitingVPV}, Flags, ExitDL);
7273 }
7274 // If the vector reduction can be performed in a smaller type, we truncate
7275 // then extend the loop exit value to enable InstCombine to evaluate the
7276 // entire expression in the smaller type.
7277 if (MinVF.isVector() && PhiTy != RdxDesc.getRecurrenceType() &&
7279 assert(!PhiR->isInLoop() && "Unexpected truncated inloop reduction!");
7281 "Unexpected truncated min-max recurrence!");
7282 Type *RdxTy = RdxDesc.getRecurrenceType();
7283 VPWidenCastRecipe *Trunc;
7284 Instruction::CastOps ExtendOpc =
7285 RdxDesc.isSigned() ? Instruction::SExt : Instruction::ZExt;
7286 VPWidenCastRecipe *Extnd;
7287 {
7288 VPBuilder::InsertPointGuard Guard(Builder);
7289 Builder.setInsertPoint(
7290 NewExitingVPV->getDefiningRecipe()->getParent(),
7291 std::next(NewExitingVPV->getDefiningRecipe()->getIterator()));
7292 Trunc =
7293 Builder.createWidenCast(Instruction::Trunc, NewExitingVPV, RdxTy);
7294 Extnd = Builder.createWidenCast(ExtendOpc, Trunc, PhiTy);
7295 }
7296 if (PhiR->getOperand(1) == NewExitingVPV)
7297 PhiR->setOperand(1, Extnd->getVPSingleValue());
7298
7299 // Update ComputeReductionResult with the truncated exiting value and
7300 // extend its result. Operand 0 provides the values to be reduced.
7301 FinalReductionResult->setOperand(0, Trunc);
7302 FinalReductionResult =
7303 Builder.createScalarCast(ExtendOpc, FinalReductionResult, PhiTy, {});
7304 }
7305
7306 // Update all users outside the vector region. Also replace redundant
7307 // extracts.
7308 for (auto *U : to_vector(OrigExitingVPV->users())) {
7309 auto *Parent = cast<VPRecipeBase>(U)->getParent();
7310 if (FinalReductionResult == U || Parent->getParent())
7311 continue;
7312 // Skip ComputeReductionResult and FindIV reductions when they are not the
7313 // final result.
7314 if (match(U, m_VPInstruction<VPInstruction::ComputeReductionResult>()) ||
7316 match(U, m_VPInstruction<Instruction::ICmp>())))
7317 continue;
7318 U->replaceUsesOfWith(OrigExitingVPV, FinalReductionResult);
7319
7320 // Look through ExtractLastPart.
7322 U = cast<VPInstruction>(U)->getSingleUser();
7323
7326 cast<VPInstruction>(U)->replaceAllUsesWith(FinalReductionResult);
7327 }
7328
7329 RecurKind RK = PhiR->getRecurrenceKind();
7334 VPBuilder PHBuilder(Plan->getVectorPreheader());
7335 VPValue *Iden = Plan->getOrAddLiveIn(
7336 getRecurrenceIdentity(RK, PhiTy, PhiR->getFastMathFlags()));
7337 auto *ScaleFactorVPV = Plan->getConstantInt(32, 1);
7338 VPValue *StartV = PHBuilder.createNaryOp(
7340 {PhiR->getStartValue(), Iden, ScaleFactorVPV}, *PhiR);
7341 PhiR->setOperand(0, StartV);
7342 }
7343 }
7344 for (VPRecipeBase *R : ToDelete)
7345 R->eraseFromParent();
7346
7348}
7349
7351 VPlan &Plan, GeneratedRTChecks &RTChecks, bool HasBranchWeights) const {
7352 const auto &[SCEVCheckCond, SCEVCheckBlock] = RTChecks.getSCEVChecks();
7353 if (SCEVCheckBlock && SCEVCheckBlock->hasNPredecessors(0)) {
7354 assert((!Config.OptForSize ||
7355 CM.Hints->getForce() == LoopVectorizeHints::FK_Enabled) &&
7356 "Cannot SCEV check stride or overflow when optimizing for size");
7358 SCEVCheckBlock, HasBranchWeights);
7359 }
7360 const auto &[MemCheckCond, MemCheckBlock] = RTChecks.getMemRuntimeChecks();
7361 if (MemCheckBlock && MemCheckBlock->hasNPredecessors(0)) {
7362 // VPlan-native path does not do any analysis for runtime checks
7363 // currently.
7364 assert((!EnableVPlanNativePath || OrigLoop->isInnermost()) &&
7365 "Runtime checks are not supported for outer loops yet");
7366
7367 if (Config.OptForSize) {
7368 assert(
7369 CM.Hints->getForce() == LoopVectorizeHints::FK_Enabled &&
7370 "Cannot emit memory checks when optimizing for size, unless forced "
7371 "to vectorize.");
7372 ORE->emit([&]() {
7373 return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationCodeSize",
7374 OrigLoop->getStartLoc(),
7375 OrigLoop->getHeader())
7376 << "Code-size may be reduced by not forcing "
7377 "vectorization, or by source-code modifications "
7378 "eliminating the need for runtime checks "
7379 "(e.g., adding 'restrict').";
7380 });
7381 }
7383 MemCheckBlock, HasBranchWeights);
7384 }
7385}
7386
7388 VPlan &Plan, ElementCount VF, unsigned UF,
7389 ElementCount MinProfitableTripCount) const {
7390 const uint32_t *BranchWeights =
7391 hasBranchWeightMD(*OrigLoop->getLoopLatch()->getTerminator())
7393 : nullptr;
7395 MinProfitableTripCount,
7396 CM.requiresScalarEpilogue(VF.isVector()),
7397 CM.foldTailByMasking(), OrigLoop, BranchWeights,
7398 OrigLoop->getLoopPredecessor()->getTerminator()->getDebugLoc(),
7399 PSE, /*CheckBlock=*/nullptr);
7400}
7401
7402// Determine how to lower the epilogue, which depends on 1) optimising
7403// for minimum code-size, 2) tail-folding compiler options, 3) loop
7404// hints forcing tail-folding, and 4) a TTI hook that analyses whether the loop
7405// is suitable for tail-folding.
7406static EpilogueLowering
7408 bool OptForSize, TargetTransformInfo *TTI,
7410 InterleavedAccessInfo *IAI) {
7411 // 1) OptSize takes precedence over all other options, i.e. if this is set,
7412 // don't look at hints or options, and don't request an epilogue.
7413 if (F->hasOptSize() ||
7414 (OptForSize && Hints.getForce() != LoopVectorizeHints::FK_Enabled))
7416
7417 // 2) If set, obey the directives
7418 if (TailFoldingPolicy.getNumOccurrences()) {
7419 switch (TailFoldingPolicy) {
7421 return CM_EpilogueAllowed;
7426 };
7427 }
7428
7429 // 3) If set, obey the hints
7430 switch (Hints.getPredicate()) {
7434 return CM_EpilogueAllowed;
7435 };
7436
7437 // 4) if the TTI hook indicates this is profitable, request tail-folding.
7438 TailFoldingInfo TFI(TLI, &LVL, IAI);
7439 if (TTI->preferTailFoldingOverEpilogue(&TFI))
7441
7442 return CM_EpilogueAllowed;
7443}
7444
7445// Process the loop in the VPlan-native vectorization path. This path builds
7446// VPlan upfront in the vectorization pipeline, which allows to apply
7447// VPlan-to-VPlan transformations from the very beginning without modifying the
7448// input LLVM IR.
7454 std::function<BlockFrequencyInfo &()> GetBFI, bool OptForSize,
7455 LoopVectorizeHints &Hints, LoopVectorizationRequirements &Requirements) {
7456
7458 LLVM_DEBUG(dbgs() << "LV: cannot compute the outer-loop trip count\n");
7459 return false;
7460 }
7461 assert(EnableVPlanNativePath && "VPlan-native path is disabled.");
7462 Function *F = L->getHeader()->getParent();
7463 InterleavedAccessInfo IAI(PSE, L, DT, LI, LVL->getLAI());
7464
7465 EpilogueLowering SEL =
7466 getEpilogueLowering(F, L, Hints, OptForSize, TTI, TLI, *LVL, &IAI);
7467
7468 VFSelectionContext Config(*TTI, LVL, L, *F, PSE, DB, ORE, &Hints, OptForSize);
7469 LoopVectorizationCostModel CM(SEL, L, PSE, LI, LVL, *TTI, TLI, AC, ORE,
7470 GetBFI, F, &Hints, IAI, Config);
7471 // Use the planner for outer loop vectorization.
7472 // TODO: CM is not used at this point inside the planner. Turn CM into an
7473 // optional argument if we don't need it in the future.
7474 LoopVectorizationPlanner LVP(L, LI, DT, TLI, *TTI, LVL, CM, Config, IAI, PSE,
7475 Hints, ORE);
7476
7477 // Get user vectorization factor.
7478 ElementCount UserVF = Hints.getWidth();
7479
7481
7482 // Plan how to best vectorize, return the best VF and its cost.
7483 const VectorizationFactor VF = LVP.planInVPlanNativePath(UserVF);
7484
7485 // If we are stress testing VPlan builds, do not attempt to generate vector
7486 // code. Masked vector code generation support will follow soon.
7487 // Also, do not attempt to vectorize if no vector code will be produced.
7489 return false;
7490
7491 VPlan &BestPlan = LVP.getPlanFor(VF.Width);
7492
7493 {
7494 GeneratedRTChecks Checks(PSE, DT, LI, TTI, Config.CostKind);
7495 InnerLoopVectorizer LB(L, PSE, LI, DT, TTI, AC, VF.Width, /*UF=*/1, &CM,
7496 Checks, BestPlan);
7497 LLVM_DEBUG(dbgs() << "Vectorizing outer loop in \"" << F->getName()
7498 << "\"\n");
7499 LVP.addMinimumIterationCheck(BestPlan, VF.Width, /*UF=*/1,
7501 bool HasBranchWeights =
7502 hasBranchWeightMD(*L->getLoopLatch()->getTerminator());
7503 LVP.attachRuntimeChecks(BestPlan, Checks, HasBranchWeights);
7504
7505 reportVectorization(ORE, L, VF, 1);
7506
7507 LVP.executePlan(VF.Width, /*UF=*/1, BestPlan, LB, DT);
7508 }
7509
7510 assert(!verifyFunction(*F, &dbgs()));
7511 return true;
7512}
7513
7514// Emit a remark if there are stores to floats that required a floating point
7515// extension. If the vectorized loop was generated with floating point there
7516// will be a performance penalty from the conversion overhead and the change in
7517// the vector width.
7520 for (BasicBlock *BB : L->getBlocks()) {
7521 for (Instruction &Inst : *BB) {
7522 if (auto *S = dyn_cast<StoreInst>(&Inst)) {
7523 if (S->getValueOperand()->getType()->isFloatTy())
7524 Worklist.push_back(S);
7525 }
7526 }
7527 }
7528
7529 // Traverse the floating point stores upwards searching, for floating point
7530 // conversions.
7533 while (!Worklist.empty()) {
7534 auto *I = Worklist.pop_back_val();
7535 if (!L->contains(I))
7536 continue;
7537 if (!Visited.insert(I).second)
7538 continue;
7539
7540 // Emit a remark if the floating point store required a floating
7541 // point conversion.
7542 // TODO: More work could be done to identify the root cause such as a
7543 // constant or a function return type and point the user to it.
7544 if (isa<FPExtInst>(I) && EmittedRemark.insert(I).second)
7545 ORE->emit([&]() {
7546 return OptimizationRemarkAnalysis(LV_NAME, "VectorMixedPrecision",
7547 I->getDebugLoc(), L->getHeader())
7548 << "floating point conversion changes vector width. "
7549 << "Mixed floating point precision requires an up/down "
7550 << "cast that will negatively impact performance.";
7551 });
7552
7553 for (Use &Op : I->operands())
7554 if (auto *OpI = dyn_cast<Instruction>(Op))
7555 Worklist.push_back(OpI);
7556 }
7557}
7558
7559/// For loops with uncountable early exits, find the cost of doing work when
7560/// exiting the loop early, such as calculating the final exit values of
7561/// variables used outside the loop.
7562/// TODO: This is currently overly pessimistic because the loop may not take
7563/// the early exit, but better to keep this conservative for now. In future,
7564/// it might be possible to relax this by using branch probabilities.
7566 VPlan &Plan, ElementCount VF) {
7567 InstructionCost Cost = 0;
7568 for (auto *ExitVPBB : Plan.getExitBlocks()) {
7569 for (auto *PredVPBB : ExitVPBB->getPredecessors()) {
7570 // If the predecessor is not the middle.block, then it must be the
7571 // vector.early.exit block, which may contain work to calculate the exit
7572 // values of variables used outside the loop.
7573 if (PredVPBB != Plan.getMiddleBlock()) {
7574 LLVM_DEBUG(dbgs() << "Calculating cost of work in exit block "
7575 << PredVPBB->getName() << ":\n");
7576 Cost += PredVPBB->cost(VF, CostCtx);
7577 }
7578 }
7579 }
7580 return Cost;
7581}
7582
7583/// This function determines whether or not it's still profitable to vectorize
7584/// the loop given the extra work we have to do outside of the loop:
7585/// 1. Perform the runtime checks before entering the loop to ensure it's safe
7586/// to vectorize.
7587/// 2. In the case of loops with uncountable early exits, we may have to do
7588/// extra work when exiting the loop early, such as calculating the final
7589/// exit values of variables used outside the loop.
7590/// 3. The middle block.
7591static bool isOutsideLoopWorkProfitable(GeneratedRTChecks &Checks,
7592 VectorizationFactor &VF, Loop *L,
7594 VPCostContext &CostCtx, VPlan &Plan,
7595 EpilogueLowering SEL,
7596 std::optional<unsigned> VScale) {
7597 InstructionCost RtC = Checks.getCost();
7598 if (!RtC.isValid())
7599 return false;
7600
7601 // When interleaving only scalar and vector cost will be equal, which in turn
7602 // would lead to a divide by 0. Fall back to hard threshold.
7603 if (VF.Width.isScalar()) {
7604 // TODO: Should we rename VectorizeMemoryCheckThreshold?
7606 LLVM_DEBUG(
7607 dbgs()
7608 << "LV: Interleaving only is not profitable due to runtime checks\n");
7609 return false;
7610 }
7611 return true;
7612 }
7613
7614 // The scalar cost should only be 0 when vectorizing with a user specified
7615 // VF/IC. In those cases, runtime checks should always be generated.
7616 uint64_t ScalarC = VF.ScalarCost.getValue();
7617 if (ScalarC == 0)
7618 return true;
7619
7620 InstructionCost TotalCost = RtC;
7621 // Add on the cost of any work required in the vector early exit block, if
7622 // one exists.
7623 TotalCost += calculateEarlyExitCost(CostCtx, Plan, VF.Width);
7624 TotalCost += Plan.getMiddleBlock()->cost(VF.Width, CostCtx);
7625
7626 // First, compute the minimum iteration count required so that the vector
7627 // loop outperforms the scalar loop.
7628 // The total cost of the scalar loop is
7629 // ScalarC * TC
7630 // where
7631 // * TC is the actual trip count of the loop.
7632 // * ScalarC is the cost of a single scalar iteration.
7633 //
7634 // The total cost of the vector loop is
7635 // TotalCost + VecC * (TC / VF) + EpiC
7636 // where
7637 // * TotalCost is the sum of the costs cost of
7638 // - the generated runtime checks, i.e. RtC
7639 // - performing any additional work in the vector.early.exit block for
7640 // loops with uncountable early exits.
7641 // - the middle block, if ExpectedTC <= VF.Width.
7642 // * VecC is the cost of a single vector iteration.
7643 // * TC is the actual trip count of the loop
7644 // * VF is the vectorization factor
7645 // * EpiCost is the cost of the generated epilogue, including the cost
7646 // of the remaining scalar operations.
7647 //
7648 // Vectorization is profitable once the total vector cost is less than the
7649 // total scalar cost:
7650 // TotalCost + VecC * (TC / VF) + EpiC < ScalarC * TC
7651 //
7652 // Now we can compute the minimum required trip count TC as
7653 // VF * (TotalCost + EpiC) / (ScalarC * VF - VecC) < TC
7654 //
7655 // For now we assume the epilogue cost EpiC = 0 for simplicity. Note that
7656 // the computations are performed on doubles, not integers and the result
7657 // is rounded up, hence we get an upper estimate of the TC.
7658 unsigned IntVF = estimateElementCount(VF.Width, VScale);
7659 uint64_t Div = ScalarC * IntVF - VF.Cost.getValue();
7660 uint64_t MinTC1 =
7661 Div == 0 ? 0 : divideCeil(TotalCost.getValue() * IntVF, Div);
7662
7663 // Second, compute a minimum iteration count so that the cost of the
7664 // runtime checks is only a fraction of the total scalar loop cost. This
7665 // adds a loop-dependent bound on the overhead incurred if the runtime
7666 // checks fail. In case the runtime checks fail, the cost is RtC + ScalarC
7667 // * TC. To bound the runtime check to be a fraction 1/X of the scalar
7668 // cost, compute
7669 // RtC < ScalarC * TC * (1 / X) ==> RtC * X / ScalarC < TC
7670 uint64_t MinTC2 = divideCeil(RtC.getValue() * 10, ScalarC);
7671
7672 // Now pick the larger minimum. If it is not a multiple of VF and an epilogue
7673 // is allowed, choose the next closest multiple of VF. This should partly
7674 // compensate for ignoring the epilogue cost.
7675 uint64_t MinTC = std::max(MinTC1, MinTC2);
7676 if (SEL == CM_EpilogueAllowed)
7677 MinTC = alignTo(MinTC, IntVF);
7679
7680 LLVM_DEBUG(
7681 dbgs() << "LV: Minimum required TC for runtime checks to be profitable:"
7682 << VF.MinProfitableTripCount << "\n");
7683
7684 // Skip vectorization if the expected trip count is less than the minimum
7685 // required trip count.
7686 if (auto ExpectedTC = getSmallBestKnownTC(PSE, L)) {
7687 if (ElementCount::isKnownLT(*ExpectedTC, VF.MinProfitableTripCount)) {
7688 LLVM_DEBUG(dbgs() << "LV: Vectorization is not beneficial: expected "
7689 "trip count < minimum profitable VF ("
7690 << *ExpectedTC << " < " << VF.MinProfitableTripCount
7691 << ")\n");
7692
7693 return false;
7694 }
7695 }
7696 return true;
7697}
7698
7700 : InterleaveOnlyWhenForced(Opts.InterleaveOnlyWhenForced ||
7702 VectorizeOnlyWhenForced(Opts.VectorizeOnlyWhenForced ||
7704
7705/// Prepare \p MainPlan for vectorizing the main vector loop during epilogue
7706/// vectorization.
7709 using namespace VPlanPatternMatch;
7710 // When vectorizing the epilogue, FindFirstIV & FindLastIV reductions can
7711 // introduce multiple uses of undef/poison. If the reduction start value may
7712 // be undef or poison it needs to be frozen and the frozen start has to be
7713 // used when computing the reduction result. We also need to use the frozen
7714 // value in the resume phi generated by the main vector loop, as this is also
7715 // used to compute the reduction result after the epilogue vector loop.
7716 auto AddFreezeForFindLastIVReductions = [](VPlan &Plan,
7717 bool UpdateResumePhis) {
7718 VPBuilder Builder(Plan.getEntry());
7719 for (VPRecipeBase &R : *Plan.getMiddleBlock()) {
7720 auto *VPI = dyn_cast<VPInstruction>(&R);
7721 if (!VPI)
7722 continue;
7723 VPValue *OrigStart;
7724 if (!matchFindIVResult(VPI, m_VPValue(), m_VPValue(OrigStart)))
7725 continue;
7727 continue;
7728 VPInstruction *Freeze =
7729 Builder.createNaryOp(Instruction::Freeze, {OrigStart}, {}, "fr");
7730 VPI->setOperand(2, Freeze);
7731 if (UpdateResumePhis)
7732 OrigStart->replaceUsesWithIf(Freeze, [Freeze](VPUser &U, unsigned) {
7733 return Freeze != &U && isa<VPPhi>(&U);
7734 });
7735 }
7736 };
7737 AddFreezeForFindLastIVReductions(MainPlan, true);
7738 AddFreezeForFindLastIVReductions(EpiPlan, false);
7739
7740 VPValue *VectorTC = nullptr;
7741 auto *Term =
7743 [[maybe_unused]] bool MatchedTC =
7744 match(Term, m_BranchOnCount(m_VPValue(), m_VPValue(VectorTC)));
7745 assert(MatchedTC && "must match vector trip count");
7746
7747 // If there is a suitable resume value for the canonical induction in the
7748 // scalar (which will become vector) epilogue loop, use it and move it to the
7749 // beginning of the scalar preheader. Otherwise create it below.
7750 VPBasicBlock *MainScalarPH = MainPlan.getScalarPreheader();
7751 auto ResumePhiIter =
7752 find_if(MainScalarPH->phis(), [VectorTC](VPRecipeBase &R) {
7753 return match(&R, m_VPInstruction<Instruction::PHI>(m_Specific(VectorTC),
7754 m_ZeroInt()));
7755 });
7756 VPPhi *ResumePhi = nullptr;
7757 if (ResumePhiIter == MainScalarPH->phis().end()) {
7758 Type *Ty = VPTypeAnalysis(MainPlan).inferScalarType(VectorTC);
7759 VPBuilder ScalarPHBuilder(MainScalarPH, MainScalarPH->begin());
7760 ResumePhi = ScalarPHBuilder.createScalarPhi(
7761 {VectorTC, MainPlan.getZero(Ty)}, {}, "vec.epilog.resume.val");
7762 } else {
7763 ResumePhi = cast<VPPhi>(&*ResumePhiIter);
7764 ResumePhi->setName("vec.epilog.resume.val");
7765 if (&MainScalarPH->front() != ResumePhi)
7766 ResumePhi->moveBefore(*MainScalarPH, MainScalarPH->begin());
7767 }
7768
7769 // Create a ResumeForEpilogue for the canonical IV resume as the
7770 // first non-phi, to keep it alive for the epilogue.
7771 VPBuilder ResumeBuilder(MainScalarPH);
7772 ResumeBuilder.createNaryOp(VPInstruction::ResumeForEpilogue, ResumePhi);
7773
7774 // Create ResumeForEpilogue instructions for the resume phis of the
7775 // VPIRPhis in the scalar header of the main plan and return them so they can
7776 // be used as resume values when vectorizing the epilogue.
7777 return to_vector(
7778 map_range(MainPlan.getScalarHeader()->phis(), [&](VPRecipeBase &R) {
7779 assert(isa<VPIRPhi>(R) &&
7780 "only VPIRPhis expected in the scalar header");
7781 return ResumeBuilder.createNaryOp(VPInstruction::ResumeForEpilogue,
7782 R.getOperand(0));
7783 }));
7784}
7785
7786/// Prepare \p Plan for vectorizing the epilogue loop. That is, re-use expanded
7787/// SCEVs from \p ExpandedSCEVs and set resume values for header recipes. Some
7788/// reductions require creating new instructions to compute the resume values.
7789/// They are collected in a vector and returned. They must be moved to the
7790/// preheader of the vector epilogue loop, after created by the execution of \p
7791/// Plan.
7793 VPlan &Plan, Loop *L, const SCEV2ValueTy &ExpandedSCEVs,
7795 VFSelectionContext &Config, ScalarEvolution &SE) {
7796 VPRegionBlock *VectorLoop = Plan.getVectorLoopRegion();
7797 VPBasicBlock *Header = VectorLoop->getEntryBasicBlock();
7798 Header->setName("vec.epilog.vector.body");
7799
7800 VPValue *IV = VectorLoop->getCanonicalIV();
7801 // When vectorizing the epilogue loop, the canonical induction needs to start
7802 // at the resume value from the main vector loop. Find the resume value
7803 // created during execution of the main VPlan. It must be the first phi in the
7804 // loop preheader. Add this resume value as an offset to the canonical IV of
7805 // the epilogue loop.
7806 using namespace llvm::PatternMatch;
7807 PHINode *EPResumeVal = &*L->getLoopPreheader()->phis().begin();
7808 for (Value *Inc : EPResumeVal->incoming_values()) {
7809 if (match(Inc, m_SpecificInt(0)))
7810 continue;
7811 assert(!EPI.VectorTripCount &&
7812 "Must only have a single non-zero incoming value");
7813 EPI.VectorTripCount = Inc;
7814 }
7815 // If we didn't find a non-zero vector trip count, all incoming values
7816 // must be zero, which also means the vector trip count is zero. Pick the
7817 // first zero as vector trip count.
7818 // TODO: We should not choose VF * UF so the main vector loop is known to
7819 // be dead.
7820 if (!EPI.VectorTripCount) {
7821 assert(EPResumeVal->getNumIncomingValues() > 0 &&
7822 all_of(EPResumeVal->incoming_values(), match_fn(m_SpecificInt(0))) &&
7823 "all incoming values must be 0");
7824 EPI.VectorTripCount = EPResumeVal->getOperand(0);
7825 }
7826 VPValue *VPV = Plan.getOrAddLiveIn(EPResumeVal);
7827 assert(all_of(IV->users(),
7828 [](const VPUser *U) {
7829 return isa<VPScalarIVStepsRecipe>(U) ||
7830 isa<VPDerivedIVRecipe>(U) ||
7831 cast<VPRecipeBase>(U)->isScalarCast() ||
7832 cast<VPInstruction>(U)->getOpcode() ==
7833 Instruction::Add;
7834 }) &&
7835 "the canonical IV should only be used by its increment or "
7836 "ScalarIVSteps when resetting the start value");
7837 VPBuilder Builder(Header, Header->getFirstNonPhi());
7838 VPInstruction *Add = Builder.createAdd(IV, VPV);
7839 // Replace all users of the canonical IV and its increment with the offset
7840 // version, except for the Add itself and the canonical IV increment.
7842 assert(Increment && "Must have a canonical IV increment at this point");
7843 IV->replaceUsesWithIf(Add, [Add, Increment](VPUser &U, unsigned) {
7844 return &U != Add && &U != Increment;
7845 });
7846 VPInstruction *OffsetIVInc =
7848 Increment->replaceAllUsesWith(OffsetIVInc);
7849 OffsetIVInc->setOperand(0, Increment);
7850
7852 SmallVector<Instruction *> InstsToMove;
7853 // Ensure that the start values for all header phi recipes are updated before
7854 // vectorizing the epilogue loop.
7855 for (VPRecipeBase &R : Header->phis()) {
7856 Value *ResumeV = nullptr;
7857 // TODO: Move setting of resume values to prepareToExecute.
7858 if (auto *ReductionPhi = dyn_cast<VPReductionPHIRecipe>(&R)) {
7859 // Find the reduction result by searching users of the phi or its backedge
7860 // value.
7861 auto IsReductionResult = [](VPRecipeBase *R) {
7862 auto *VPI = dyn_cast<VPInstruction>(R);
7863 return VPI && VPI->getOpcode() == VPInstruction::ComputeReductionResult;
7864 };
7865 auto *RdxResult = cast<VPInstruction>(
7866 vputils::findRecipe(ReductionPhi->getBackedgeValue(), IsReductionResult));
7867 assert(RdxResult && "expected to find reduction result");
7868
7869 ResumeV = cast<PHINode>(ReductionPhi->getUnderlyingInstr())
7870 ->getIncomingValueForBlock(L->getLoopPreheader());
7871
7872 // Check for FindIV pattern by looking for icmp user of RdxResult.
7873 // The pattern is: select(icmp ne RdxResult, Sentinel), RdxResult, Start
7874 using namespace VPlanPatternMatch;
7875 VPValue *SentinelVPV = nullptr;
7876 bool IsFindIV = any_of(RdxResult->users(), [&](VPUser *U) {
7877 return match(U, VPlanPatternMatch::m_SpecificICmp(
7878 ICmpInst::ICMP_NE, m_Specific(RdxResult),
7879 m_VPValue(SentinelVPV)));
7880 });
7881
7882 RecurKind RK = ReductionPhi->getRecurrenceKind();
7883 if (RecurrenceDescriptor::isAnyOfRecurrenceKind(RK) || IsFindIV) {
7884 auto *ResumePhi = cast<PHINode>(ResumeV);
7885 Value *StartV = ResumePhi->getIncomingValueForBlock(
7887 IRBuilder<> Builder(ResumePhi->getParent(),
7888 ResumePhi->getParent()->getFirstNonPHIIt());
7889
7891 // VPReductionPHIRecipes for AnyOf reductions expect a boolean as
7892 // start value; compare the final value from the main vector loop
7893 // to the start value.
7894 ResumeV = Builder.CreateICmpNE(ResumeV, StartV);
7895 if (auto *I = dyn_cast<Instruction>(ResumeV))
7896 InstsToMove.push_back(I);
7897 } else {
7898 assert(SentinelVPV && "expected to find icmp using RdxResult");
7899 if (auto *FreezeI = dyn_cast<FreezeInst>(StartV))
7900 ToFrozen[FreezeI->getOperand(0)] = StartV;
7901
7902 // Adjust resume: select(icmp eq ResumeV, StartV), Sentinel, ResumeV
7903 Value *Cmp = Builder.CreateICmpEQ(ResumeV, StartV);
7904 if (auto *I = dyn_cast<Instruction>(Cmp))
7905 InstsToMove.push_back(I);
7906 ResumeV = Builder.CreateSelect(Cmp, SentinelVPV->getLiveInIRValue(),
7907 ResumeV);
7908 if (auto *I = dyn_cast<Instruction>(ResumeV))
7909 InstsToMove.push_back(I);
7910 }
7911 } else {
7912 VPValue *StartVal = Plan.getOrAddLiveIn(ResumeV);
7913 auto *PhiR = dyn_cast<VPReductionPHIRecipe>(&R);
7914 if (auto *VPI = dyn_cast<VPInstruction>(PhiR->getStartValue())) {
7916 "unexpected start value");
7917 // Partial sub-reductions always start at 0 and account for the
7918 // reduction start value in a final subtraction. Update it to use the
7919 // resume value from the main vector loop.
7920 if (PhiR->getVFScaleFactor() > 1 &&
7921 PhiR->getRecurrenceKind() == RecurKind::Sub) {
7922 auto *Sub = cast<VPInstruction>(RdxResult->getSingleUser());
7923 assert(Sub->getOpcode() == Instruction::Sub && "Unexpected opcode");
7924 assert(isa<VPIRValue>(Sub->getOperand(0)) &&
7925 "Expected operand to match the original start value of the "
7926 "reduction");
7929 "Expected start value for partial sub-reduction to start at "
7930 "zero");
7931 Sub->setOperand(0, StartVal);
7932 } else
7933 VPI->setOperand(0, StartVal);
7934 continue;
7935 }
7936 }
7937 } else {
7938 // Retrieve the induction resume values for wide inductions from
7939 // their original phi nodes in the scalar loop.
7940 PHINode *IndPhi = cast<VPWidenInductionRecipe>(&R)->getPHINode();
7941 // Hook up to the PHINode generated by a ResumePhi recipe of main
7942 // loop VPlan, which feeds the scalar loop.
7943 ResumeV = IndPhi->getIncomingValueForBlock(L->getLoopPreheader());
7944 }
7945 assert(ResumeV && "Must have a resume value");
7946 VPValue *StartVal = Plan.getOrAddLiveIn(ResumeV);
7947 cast<VPHeaderPHIRecipe>(&R)->setStartValue(StartVal);
7948 }
7949
7950 // For some VPValues in the epilogue plan we must re-use the generated IR
7951 // values from the main plan. Replace them with live-in VPValues.
7952 // TODO: This is a workaround needed for epilogue vectorization and it
7953 // should be removed once induction resume value creation is done
7954 // directly in VPlan.
7955 for (auto &R : make_early_inc_range(*Plan.getEntry())) {
7956 // Re-use frozen values from the main plan for Freeze VPInstructions in the
7957 // epilogue plan. This ensures all users use the same frozen value.
7958 auto *VPI = dyn_cast<VPInstruction>(&R);
7959 if (VPI && VPI->getOpcode() == Instruction::Freeze) {
7961 ToFrozen.lookup(VPI->getOperand(0)->getLiveInIRValue())));
7962 continue;
7963 }
7964
7965 // Re-use the trip count and steps expanded for the main loop, as
7966 // skeleton creation needs it as a value that dominates both the scalar
7967 // and vector epilogue loops
7968 auto *ExpandR = dyn_cast<VPExpandSCEVRecipe>(&R);
7969 if (!ExpandR)
7970 continue;
7971 VPValue *ExpandedVal =
7972 Plan.getOrAddLiveIn(ExpandedSCEVs.lookup(ExpandR->getSCEV()));
7973 ExpandR->replaceAllUsesWith(ExpandedVal);
7974 if (Plan.getTripCount() == ExpandR)
7975 Plan.resetTripCount(ExpandedVal);
7976 ExpandR->eraseFromParent();
7977 }
7978
7979 auto VScale = Config.getVScaleForTuning();
7980 unsigned MainLoopStep =
7981 estimateElementCount(EPI.MainLoopVF * EPI.MainLoopUF, VScale);
7982 unsigned EpilogueLoopStep =
7983 estimateElementCount(EPI.EpilogueVF * EPI.EpilogueUF, VScale);
7987 EPI.EpilogueVF, EPI.EpilogueUF, MainLoopStep, EpilogueLoopStep, SE);
7988
7989 return InstsToMove;
7990}
7991
7992static void
7994 VPlan &BestEpiPlan,
7995 ArrayRef<VPInstruction *> ResumeValues) {
7996 // Fix resume values from the additional bypass block.
7997 BasicBlock *PH = L->getLoopPreheader();
7998 for (auto *Pred : predecessors(PH)) {
7999 for (PHINode &Phi : PH->phis()) {
8000 if (Phi.getBasicBlockIndex(Pred) != -1)
8001 continue;
8002 Phi.addIncoming(Phi.getIncomingValueForBlock(BypassBlock), Pred);
8003 }
8004 }
8005 auto *ScalarPH = cast<VPIRBasicBlock>(BestEpiPlan.getScalarPreheader());
8006 if (ScalarPH->hasPredecessors()) {
8007 // Fix resume values for inductions and reductions from the additional
8008 // bypass block using the incoming values from the main loop's resume phis.
8009 // ResumeValues correspond 1:1 with the scalar loop header phis.
8010 for (auto [ResumeV, HeaderPhi] :
8011 zip(ResumeValues, BestEpiPlan.getScalarHeader()->phis())) {
8012 auto *HeaderPhiR = cast<VPIRPhi>(&HeaderPhi);
8013 auto *EpiResumePhi =
8014 cast<PHINode>(HeaderPhiR->getIRPhi().getIncomingValueForBlock(PH));
8015 if (EpiResumePhi->getBasicBlockIndex(BypassBlock) == -1)
8016 continue;
8017 auto *MainResumePhi = cast<PHINode>(ResumeV->getUnderlyingValue());
8018 EpiResumePhi->setIncomingValueForBlock(
8019 BypassBlock, MainResumePhi->getIncomingValueForBlock(BypassBlock));
8020 }
8021 }
8022}
8023
8024/// Connect the epilogue vector loop generated for \p EpiPlan to the main vector
8025/// loop, after both plans have executed, updating branches from the iteration
8026/// and runtime checks of the main loop, as well as updating various phis. \p
8027/// InstsToMove contains instructions that need to be moved to the preheader of
8028/// the epilogue vector loop.
8029static void connectEpilogueVectorLoop(VPlan &EpiPlan, Loop *L,
8031 DominatorTree *DT,
8032 GeneratedRTChecks &Checks,
8033 ArrayRef<Instruction *> InstsToMove,
8034 ArrayRef<VPInstruction *> ResumeValues) {
8035 BasicBlock *VecEpilogueIterationCountCheck =
8036 cast<VPIRBasicBlock>(EpiPlan.getEntry())->getIRBasicBlock();
8037
8038 BasicBlock *VecEpiloguePreHeader =
8039 cast<CondBrInst>(VecEpilogueIterationCountCheck->getTerminator())
8040 ->getSuccessor(1);
8041 // Adjust the control flow taking the state info from the main loop
8042 // vectorization into account.
8044 "expected this to be saved from the previous pass.");
8045 DomTreeUpdater DTU(DT, DomTreeUpdater::UpdateStrategy::Eager);
8047 VecEpilogueIterationCountCheck, VecEpiloguePreHeader);
8048
8050 VecEpilogueIterationCountCheck},
8052 VecEpiloguePreHeader}});
8053
8054 BasicBlock *ScalarPH =
8055 cast<VPIRBasicBlock>(EpiPlan.getScalarPreheader())->getIRBasicBlock();
8057 VecEpilogueIterationCountCheck, ScalarPH);
8058 DTU.applyUpdates(
8060 VecEpilogueIterationCountCheck},
8062
8063 // Adjust the terminators of runtime check blocks and phis using them.
8064 BasicBlock *SCEVCheckBlock = Checks.getSCEVChecks().second;
8065 BasicBlock *MemCheckBlock = Checks.getMemRuntimeChecks().second;
8066 if (SCEVCheckBlock) {
8067 SCEVCheckBlock->getTerminator()->replaceUsesOfWith(
8068 VecEpilogueIterationCountCheck, ScalarPH);
8069 DTU.applyUpdates({{DominatorTree::Delete, SCEVCheckBlock,
8070 VecEpilogueIterationCountCheck},
8071 {DominatorTree::Insert, SCEVCheckBlock, ScalarPH}});
8072 }
8073 if (MemCheckBlock) {
8074 MemCheckBlock->getTerminator()->replaceUsesOfWith(
8075 VecEpilogueIterationCountCheck, ScalarPH);
8076 DTU.applyUpdates(
8077 {{DominatorTree::Delete, MemCheckBlock, VecEpilogueIterationCountCheck},
8078 {DominatorTree::Insert, MemCheckBlock, ScalarPH}});
8079 }
8080
8081 // The vec.epilog.iter.check block may contain Phi nodes from inductions
8082 // or reductions which merge control-flow from the latch block and the
8083 // middle block. Update the incoming values here and move the Phi into the
8084 // preheader.
8085 SmallVector<PHINode *, 4> PhisInBlock(
8086 llvm::make_pointer_range(VecEpilogueIterationCountCheck->phis()));
8087
8088 for (PHINode *Phi : PhisInBlock) {
8089 Phi->moveBefore(VecEpiloguePreHeader->getFirstNonPHIIt());
8090 Phi->replaceIncomingBlockWith(
8091 VecEpilogueIterationCountCheck->getSinglePredecessor(),
8092 VecEpilogueIterationCountCheck);
8093
8094 // If the phi doesn't have an incoming value from the
8095 // EpilogueIterationCountCheck, we are done. Otherwise remove the
8096 // incoming value and also those from other check blocks. This is needed
8097 // for reduction phis only.
8098 if (none_of(Phi->blocks(), [&](BasicBlock *IncB) {
8099 return EPI.EpilogueIterationCountCheck == IncB;
8100 }))
8101 continue;
8102 Phi->removeIncomingValue(EPI.EpilogueIterationCountCheck);
8103 if (SCEVCheckBlock)
8104 Phi->removeIncomingValue(SCEVCheckBlock);
8105 if (MemCheckBlock)
8106 Phi->removeIncomingValue(MemCheckBlock);
8107 }
8108
8109 auto IP = VecEpiloguePreHeader->getFirstNonPHIIt();
8110 for (auto *I : InstsToMove)
8111 I->moveBefore(IP);
8112
8113 // VecEpilogueIterationCountCheck conditionally skips over the epilogue loop
8114 // after executing the main loop. We need to update the resume values of
8115 // inductions and reductions during epilogue vectorization.
8116 fixScalarResumeValuesFromBypass(VecEpilogueIterationCountCheck, L, EpiPlan,
8117 ResumeValues);
8118
8119 // Remove dead phis that were moved to the epilogue preheader but are unused
8120 // (e.g., resume phis for inductions not widened in the epilogue vector loop).
8121 for (PHINode &Phi : make_early_inc_range(VecEpiloguePreHeader->phis()))
8122 if (Phi.use_empty())
8123 Phi.eraseFromParent();
8124}
8125
8127 assert((EnableVPlanNativePath || L->isInnermost()) &&
8128 "VPlan-native path is not enabled. Only process inner loops.");
8129
8130 LLVM_DEBUG(dbgs() << "\nLV: Checking a loop in '"
8131 << L->getHeader()->getParent()->getName() << "' from "
8132 << L->getLocStr() << "\n");
8133
8134 LoopVectorizeHints Hints(L, InterleaveOnlyWhenForced, *ORE, TTI);
8135
8136 LLVM_DEBUG(
8137 dbgs() << "LV: Loop hints:"
8138 << " force="
8140 ? "disabled"
8142 ? "enabled"
8143 : "?"))
8144 << " width=" << Hints.getWidth()
8145 << " interleave=" << Hints.getInterleave() << "\n");
8146
8147 // Function containing loop
8148 Function *F = L->getHeader()->getParent();
8149
8150 // Looking at the diagnostic output is the only way to determine if a loop
8151 // was vectorized (other than looking at the IR or machine code), so it
8152 // is important to generate an optimization remark for each loop. Most of
8153 // these messages are generated as OptimizationRemarkAnalysis. Remarks
8154 // generated as OptimizationRemark and OptimizationRemarkMissed are
8155 // less verbose reporting vectorized loops and unvectorized loops that may
8156 // benefit from vectorization, respectively.
8157
8158 if (!Hints.allowVectorization(F, L, VectorizeOnlyWhenForced)) {
8159 LLVM_DEBUG(dbgs() << "LV: Loop hints prevent vectorization.\n");
8160 return false;
8161 }
8162
8163 PredicatedScalarEvolution PSE(*SE, *L);
8164
8165 // Query this against the original loop and save it here because the profile
8166 // of the original loop header may change as the transformation happens.
8167 bool OptForSize = llvm::shouldOptimizeForSize(
8168 L->getHeader(), PSI,
8169 PSI && PSI->hasProfileSummary() ? &GetBFI() : nullptr,
8171
8172 // Check if it is legal to vectorize the loop.
8173 LoopVectorizationRequirements Requirements;
8174 LoopVectorizationLegality LVL(L, PSE, DT, TTI, TLI, F, *LAIs, LI, ORE,
8175 &Requirements, &Hints, DB, AC,
8176 /*AllowRuntimeSCEVChecks=*/!OptForSize, AA);
8178 LLVM_DEBUG(dbgs() << "LV: Not vectorizing: Cannot prove legality.\n");
8179 Hints.emitRemarkWithHints();
8180 return false;
8181 }
8182
8183 if (LVL.hasUncountableEarlyExit()) {
8185 reportVectorizationFailure("Auto-vectorization of loops with uncountable "
8186 "early exit is not enabled",
8187 "UncountableEarlyExitLoopsDisabled", ORE, L);
8188 return false;
8189 }
8190 }
8191
8192 // Entrance to the VPlan-native vectorization path. Outer loops are processed
8193 // here. They may require CFG and instruction level transformations before
8194 // even evaluating whether vectorization is profitable. Since we cannot modify
8195 // the incoming IR, we need to build VPlan upfront in the vectorization
8196 // pipeline.
8197 if (!L->isInnermost())
8198 return processLoopInVPlanNativePath(L, PSE, LI, DT, &LVL, TTI, TLI, DB, AC,
8199 ORE, GetBFI, OptForSize, Hints,
8200 Requirements);
8201
8202 assert(L->isInnermost() && "Inner loop expected.");
8203
8204 InterleavedAccessInfo IAI(PSE, L, DT, LI, LVL.getLAI());
8205 bool UseInterleaved = TTI->enableInterleavedAccessVectorization();
8206
8207 // If an override option has been passed in for interleaved accesses, use it.
8208 if (EnableInterleavedMemAccesses.getNumOccurrences() > 0)
8209 UseInterleaved = EnableInterleavedMemAccesses;
8210
8211 // Analyze interleaved memory accesses.
8212 if (UseInterleaved)
8214
8215 if (LVL.hasUncountableEarlyExit()) {
8216 BasicBlock *LoopLatch = L->getLoopLatch();
8217 if (IAI.requiresScalarEpilogue() ||
8218 any_of(LVL.getCountableExitingBlocks(), not_equal_to(LoopLatch))) {
8219 reportVectorizationFailure("Auto-vectorization of early exit loops "
8220 "requiring a scalar epilogue is unsupported",
8221 "UncountableEarlyExitUnsupported", ORE, L);
8222 return false;
8223 }
8224 }
8225
8226 // Check the function attributes and profiles to find out if this function
8227 // should be optimized for size.
8228 EpilogueLowering SEL =
8229 getEpilogueLowering(F, L, Hints, OptForSize, TTI, TLI, LVL, &IAI);
8230
8231 // Check the loop for a trip count threshold: vectorize loops with a tiny trip
8232 // count by optimizing for size, to minimize overheads.
8233 auto ExpectedTC = getSmallBestKnownTC(PSE, L);
8234 if (ExpectedTC && ExpectedTC->isFixed() &&
8235 ExpectedTC->getFixedValue() < TinyTripCountVectorThreshold) {
8236 LLVM_DEBUG(dbgs() << "LV: Found a loop with a very small trip count. "
8237 << "This loop is worth vectorizing only if no scalar "
8238 << "iteration overheads are incurred.");
8240 LLVM_DEBUG(dbgs() << " But vectorizing was explicitly forced.\n");
8241 else {
8242 LLVM_DEBUG(dbgs() << "\n");
8243 // Tail-folded loops are efficient even when the loop
8244 // iteration count is low. However, setting the epilogue policy to
8245 // `CM_EpilogueNotAllowedLowTripLoop` prevents vectorizing loops
8246 // with runtime checks. It's more effective to let
8247 // `isOutsideLoopWorkProfitable` determine if vectorization is
8248 // beneficial for the loop.
8251 }
8252 }
8253
8254 // Check the function attributes to see if implicit floats or vectors are
8255 // allowed.
8256 if (F->hasFnAttribute(Attribute::NoImplicitFloat)) {
8258 "Can't vectorize when the NoImplicitFloat attribute is used",
8259 "loop not vectorized due to NoImplicitFloat attribute",
8260 "NoImplicitFloat", ORE, L);
8261 Hints.emitRemarkWithHints();
8262 return false;
8263 }
8264
8265 // Check if the target supports potentially unsafe FP vectorization.
8266 // FIXME: Add a check for the type of safety issue (denormal, signaling)
8267 // for the target we're vectorizing for, to make sure none of the
8268 // additional fp-math flags can help.
8269 if (Hints.isPotentiallyUnsafe() &&
8270 TTI->isFPVectorizationPotentiallyUnsafe()) {
8272 "Potentially unsafe FP op prevents vectorization",
8273 "loop not vectorized due to unsafe FP support.",
8274 "UnsafeFP", ORE, L);
8275 Hints.emitRemarkWithHints();
8276 return false;
8277 }
8278
8279 bool AllowOrderedReductions;
8280 // If the flag is set, use that instead and override the TTI behaviour.
8281 if (ForceOrderedReductions.getNumOccurrences() > 0)
8282 AllowOrderedReductions = ForceOrderedReductions;
8283 else
8284 AllowOrderedReductions = TTI->enableOrderedReductions();
8285 if (!LVL.canVectorizeFPMath(AllowOrderedReductions)) {
8286 ORE->emit([&]() {
8287 auto *ExactFPMathInst = Requirements.getExactFPInst();
8288 return OptimizationRemarkAnalysisFPCommute(DEBUG_TYPE, "CantReorderFPOps",
8289 ExactFPMathInst->getDebugLoc(),
8290 ExactFPMathInst->getParent())
8291 << "loop not vectorized: cannot prove it is safe to reorder "
8292 "floating-point operations";
8293 });
8294 LLVM_DEBUG(dbgs() << "LV: loop not vectorized: cannot prove it is safe to "
8295 "reorder floating-point operations\n");
8296 Hints.emitRemarkWithHints();
8297 return false;
8298 }
8299
8300 // Use the cost model.
8301 VFSelectionContext Config(*TTI, &LVL, L, *F, PSE, DB, ORE, &Hints,
8302 OptForSize);
8303 LoopVectorizationCostModel CM(SEL, L, PSE, LI, &LVL, *TTI, TLI, AC, ORE,
8304 GetBFI, F, &Hints, IAI, Config);
8305 // Use the planner for vectorization.
8306 LoopVectorizationPlanner LVP(L, LI, DT, TLI, *TTI, &LVL, CM, Config, IAI, PSE,
8307 Hints, ORE);
8308
8309 // Get user vectorization factor and interleave count.
8310 ElementCount UserVF = Hints.getWidth();
8311 unsigned UserIC = Hints.getInterleave();
8312 if (UserIC > 1 && !LVL.isSafeForAnyVectorWidth())
8313 UserIC = 1;
8314
8315 // Plan how to best vectorize.
8316 LVP.plan(UserVF, UserIC);
8317 auto [VF, BestPlanPtr] = LVP.computeBestVF();
8318 unsigned IC = 1;
8319
8320 if (ORE->allowExtraAnalysis(LV_NAME))
8322
8323 GeneratedRTChecks Checks(PSE, DT, LI, TTI, Config.CostKind);
8324 if (LVP.hasPlanWithVF(VF.Width)) {
8325 // Select the interleave count.
8326 IC = LVP.selectInterleaveCount(*BestPlanPtr, VF.Width, VF.Cost);
8327
8328 unsigned SelectedIC = std::max(IC, UserIC);
8329 // Optimistically generate runtime checks if they are needed. Drop them if
8330 // they turn out to not be profitable.
8331 if (VF.Width.isVector() || SelectedIC > 1) {
8332 Checks.create(L, *LVL.getLAI(), PSE.getPredicate(), VF.Width, SelectedIC,
8333 *ORE);
8334
8335 // Bail out early if either the SCEV or memory runtime checks are known to
8336 // fail. In that case, the vector loop would never execute.
8337 using namespace llvm::PatternMatch;
8338 if (Checks.getSCEVChecks().first &&
8339 match(Checks.getSCEVChecks().first, m_One()))
8340 return false;
8341 if (Checks.getMemRuntimeChecks().first &&
8342 match(Checks.getMemRuntimeChecks().first, m_One()))
8343 return false;
8344 }
8345
8346 // Check if it is profitable to vectorize with runtime checks.
8347 bool ForceVectorization =
8349 VPCostContext CostCtx(CM.TTI, *CM.TLI, *BestPlanPtr, CM, Config.CostKind,
8350 CM.PSE, L);
8351 if (!ForceVectorization &&
8352 !isOutsideLoopWorkProfitable(Checks, VF, L, PSE, CostCtx, *BestPlanPtr,
8353 SEL, Config.getVScaleForTuning())) {
8354 ORE->emit([&]() {
8356 DEBUG_TYPE, "CantReorderMemOps", L->getStartLoc(),
8357 L->getHeader())
8358 << "loop not vectorized: cannot prove it is safe to reorder "
8359 "memory operations";
8360 });
8361 LLVM_DEBUG(dbgs() << "LV: Too many memory checks needed.\n");
8362 Hints.emitRemarkWithHints();
8363 return false;
8364 }
8365 }
8366
8367 // Identify the diagnostic messages that should be produced.
8368 std::pair<StringRef, std::string> VecDiagMsg, IntDiagMsg;
8369 bool VectorizeLoop = true, InterleaveLoop = true;
8370 if (VF.Width.isScalar()) {
8371 LLVM_DEBUG(dbgs() << "LV: Vectorization is possible but not beneficial.\n");
8372 VecDiagMsg = {
8373 "VectorizationNotBeneficial",
8374 "the cost-model indicates that vectorization is not beneficial"};
8375 VectorizeLoop = false;
8376 }
8377
8378 if (UserIC == 1 && Hints.getInterleave() > 1) {
8380 "UserIC should only be ignored due to unsafe dependencies");
8381 LLVM_DEBUG(dbgs() << "LV: Ignoring user-specified interleave count.\n");
8382 IntDiagMsg = {"InterleavingUnsafe",
8383 "Ignoring user-specified interleave count due to possibly "
8384 "unsafe dependencies in the loop."};
8385 InterleaveLoop = false;
8386 } else if (!LVP.hasPlanWithVF(VF.Width) && UserIC > 1) {
8387 // Tell the user interleaving was avoided up-front, despite being explicitly
8388 // requested.
8389 LLVM_DEBUG(dbgs() << "LV: Ignoring UserIC, because vectorization and "
8390 "interleaving should be avoided up front\n");
8391 IntDiagMsg = {"InterleavingAvoided",
8392 "Ignoring UserIC, because interleaving was avoided up front"};
8393 InterleaveLoop = false;
8394 } else if (IC == 1 && UserIC <= 1) {
8395 // Tell the user interleaving is not beneficial.
8396 LLVM_DEBUG(dbgs() << "LV: Interleaving is not beneficial.\n");
8397 IntDiagMsg = {
8398 "InterleavingNotBeneficial",
8399 "the cost-model indicates that interleaving is not beneficial"};
8400 InterleaveLoop = false;
8401 if (UserIC == 1) {
8402 IntDiagMsg.first = "InterleavingNotBeneficialAndDisabled";
8403 IntDiagMsg.second +=
8404 " and is explicitly disabled or interleave count is set to 1";
8405 }
8406 } else if (IC > 1 && UserIC == 1) {
8407 // Tell the user interleaving is beneficial, but it explicitly disabled.
8408 LLVM_DEBUG(dbgs() << "LV: Interleaving is beneficial but is explicitly "
8409 "disabled.\n");
8410 IntDiagMsg = {"InterleavingBeneficialButDisabled",
8411 "the cost-model indicates that interleaving is beneficial "
8412 "but is explicitly disabled or interleave count is set to 1"};
8413 InterleaveLoop = false;
8414 }
8415
8416 // If there is a histogram in the loop, do not just interleave without
8417 // vectorizing. The order of operations will be incorrect without the
8418 // histogram intrinsics, which are only used for recipes with VF > 1.
8419 if (!VectorizeLoop && InterleaveLoop && LVL.hasHistograms()) {
8420 LLVM_DEBUG(dbgs() << "LV: Not interleaving without vectorization due "
8421 << "to histogram operations.\n");
8422 IntDiagMsg = {
8423 "HistogramPreventsScalarInterleaving",
8424 "Unable to interleave without vectorization due to constraints on "
8425 "the order of histogram operations"};
8426 InterleaveLoop = false;
8427 }
8428
8429 // Override IC if user provided an interleave count.
8430 IC = UserIC > 0 ? UserIC : IC;
8431
8432 // Emit diagnostic messages, if any.
8433 const char *VAPassName = Hints.vectorizeAnalysisPassName();
8434 if (!VectorizeLoop && !InterleaveLoop) {
8435 // Do not vectorize or interleaving the loop.
8436 ORE->emit([&]() {
8437 return OptimizationRemarkMissed(VAPassName, VecDiagMsg.first,
8438 L->getStartLoc(), L->getHeader())
8439 << VecDiagMsg.second;
8440 });
8441 ORE->emit([&]() {
8442 return OptimizationRemarkMissed(LV_NAME, IntDiagMsg.first,
8443 L->getStartLoc(), L->getHeader())
8444 << IntDiagMsg.second;
8445 });
8446 return false;
8447 }
8448
8449 if (!VectorizeLoop && InterleaveLoop) {
8450 LLVM_DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n');
8451 ORE->emit([&]() {
8452 return OptimizationRemarkAnalysis(VAPassName, VecDiagMsg.first,
8453 L->getStartLoc(), L->getHeader())
8454 << VecDiagMsg.second;
8455 });
8456 } else if (VectorizeLoop && !InterleaveLoop) {
8457 LLVM_DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width
8458 << ") in " << L->getLocStr() << '\n');
8459 ORE->emit([&]() {
8460 return OptimizationRemarkAnalysis(LV_NAME, IntDiagMsg.first,
8461 L->getStartLoc(), L->getHeader())
8462 << IntDiagMsg.second;
8463 });
8464 } else if (VectorizeLoop && InterleaveLoop) {
8465 LLVM_DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width
8466 << ") in " << L->getLocStr() << '\n');
8467 LLVM_DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n');
8468 }
8469
8470 // Report the vectorization decision.
8471 if (VF.Width.isScalar()) {
8472 using namespace ore;
8473 assert(IC > 1);
8474 ORE->emit([&]() {
8475 return OptimizationRemark(LV_NAME, "Interleaved", L->getStartLoc(),
8476 L->getHeader())
8477 << "interleaved loop (interleaved count: "
8478 << NV("InterleaveCount", IC) << ")";
8479 });
8480 } else {
8481 // Report the vectorization decision.
8482 reportVectorization(ORE, L, VF, IC);
8483 }
8484 if (ORE->allowExtraAnalysis(LV_NAME))
8486
8487 // If we decided that it is *legal* to interleave or vectorize the loop, then
8488 // do it.
8489
8490 VPlan &BestPlan = *BestPlanPtr;
8491 // Consider vectorizing the epilogue too if it's profitable.
8492 std::unique_ptr<VPlan> EpiPlan =
8493 LVP.selectBestEpiloguePlan(BestPlan, VF.Width, IC);
8494 bool HasBranchWeights =
8495 hasBranchWeightMD(*L->getLoopLatch()->getTerminator());
8496 if (EpiPlan) {
8497 VPlan &BestEpiPlan = *EpiPlan;
8498 VPlan &BestMainPlan = BestPlan;
8499 ElementCount EpilogueVF = BestEpiPlan.getSingleVF();
8500
8501 // The first pass vectorizes the main loop and creates a scalar epilogue
8502 // to be vectorized by executing the plan (potentially with a different
8503 // factor) again shortly afterwards.
8504 BestEpiPlan.getMiddleBlock()->setName("vec.epilog.middle.block");
8505 BestEpiPlan.getVectorPreheader()->setName("vec.epilog.ph");
8506 SmallVector<VPInstruction *> ResumeValues =
8507 preparePlanForMainVectorLoop(BestMainPlan, BestEpiPlan);
8508 EpilogueLoopVectorizationInfo EPI(VF.Width, IC, EpilogueVF, 1, BestEpiPlan);
8509
8510 // Add minimum iteration check for the epilogue plan, followed by runtime
8511 // checks for the main plan.
8512 LVP.addMinimumIterationCheck(BestMainPlan, EPI.EpilogueVF, EPI.EpilogueUF,
8514 LVP.attachRuntimeChecks(BestMainPlan, Checks, HasBranchWeights);
8516 EPI.MainLoopVF, EPI.MainLoopUF,
8518 HasBranchWeights ? MinItersBypassWeights : nullptr,
8519 L->getLoopPredecessor()->getTerminator()->getDebugLoc(),
8520 PSE);
8521
8522 EpilogueVectorizerMainLoop MainILV(L, PSE, LI, DT, TTI, AC, EPI, &CM,
8523 Checks, BestMainPlan);
8524 auto ExpandedSCEVs = LVP.executePlan(
8525 EPI.MainLoopVF, EPI.MainLoopUF, BestMainPlan, MainILV, DT,
8527 ++LoopsVectorized;
8528
8529 // Derive EPI fields from VPlan-generated IR.
8530 BasicBlock *EntryBB =
8531 cast<VPIRBasicBlock>(BestMainPlan.getEntry())->getIRBasicBlock();
8532 EntryBB->setName("iter.check");
8533 EPI.EpilogueIterationCountCheck = EntryBB;
8534 // The check chain is: Entry -> [SCEV] -> [Mem] -> MainCheck -> VecPH.
8535 // MainCheck is the non-bypass successor of the last runtime check block
8536 // (or Entry if there are no runtime checks).
8537 BasicBlock *LastCheck = EntryBB;
8538 if (BasicBlock *MemBB = Checks.getMemRuntimeChecks().second)
8539 LastCheck = MemBB;
8540 else if (BasicBlock *SCEVBB = Checks.getSCEVChecks().second)
8541 LastCheck = SCEVBB;
8542 BasicBlock *ScalarPH = L->getLoopPreheader();
8543 auto *BI = cast<CondBrInst>(LastCheck->getTerminator());
8545 BI->getSuccessor(BI->getSuccessor(0) == ScalarPH);
8546
8547 // Second pass vectorizes the epilogue and adjusts the control flow
8548 // edges from the first pass.
8549 EpilogueVectorizerEpilogueLoop EpilogILV(L, PSE, LI, DT, TTI, AC, EPI, &CM,
8550 Checks, BestEpiPlan);
8552 BestEpiPlan, L, ExpandedSCEVs, EPI, CM, Config, *PSE.getSE());
8553 LVP.attachRuntimeChecks(BestEpiPlan, Checks, HasBranchWeights);
8554 LVP.executePlan(
8555 EPI.EpilogueVF, EPI.EpilogueUF, BestEpiPlan, EpilogILV, DT,
8557 connectEpilogueVectorLoop(BestEpiPlan, L, EPI, DT, Checks, InstsToMove,
8558 ResumeValues);
8559 ++LoopsEpilogueVectorized;
8560 } else {
8561 InnerLoopVectorizer LB(L, PSE, LI, DT, TTI, AC, VF.Width, IC, &CM, Checks,
8562 BestPlan);
8563 LVP.addMinimumIterationCheck(BestPlan, VF.Width, IC,
8564 VF.MinProfitableTripCount);
8565 LVP.attachRuntimeChecks(BestPlan, Checks, HasBranchWeights);
8566
8567 LVP.executePlan(VF.Width, IC, BestPlan, LB, DT);
8568 ++LoopsVectorized;
8569 }
8570
8571 assert(DT->verify(DominatorTree::VerificationLevel::Fast) &&
8572 "DT not preserved correctly");
8573 assert(!verifyFunction(*F, &dbgs()));
8574
8575 return true;
8576}
8577
8579
8580 // Don't attempt if
8581 // 1. the target claims to have no vector registers, and
8582 // 2. interleaving won't help ILP.
8583 //
8584 // The second condition is necessary because, even if the target has no
8585 // vector registers, loop vectorization may still enable scalar
8586 // interleaving.
8587 if (!TTI->getNumberOfRegisters(TTI->getRegisterClassForType(true)) &&
8588 TTI->getMaxInterleaveFactor(ElementCount::getFixed(1)) < 2)
8589 return LoopVectorizeResult(false, false);
8590
8591 bool Changed = false, CFGChanged = false;
8592
8593 // The vectorizer requires loops to be in simplified form.
8594 // Since simplification may add new inner loops, it has to run before the
8595 // legality and profitability checks. This means running the loop vectorizer
8596 // will simplify all loops, regardless of whether anything end up being
8597 // vectorized.
8598 for (const auto &L : *LI)
8599 Changed |= CFGChanged |=
8600 simplifyLoop(L, DT, LI, SE, AC, nullptr, false /* PreserveLCSSA */);
8601
8602 // Build up a worklist of inner-loops to vectorize. This is necessary as
8603 // the act of vectorizing or partially unrolling a loop creates new loops
8604 // and can invalidate iterators across the loops.
8605 SmallVector<Loop *, 8> Worklist;
8606
8607 for (Loop *L : *LI)
8608 collectSupportedLoops(*L, LI, ORE, Worklist);
8609
8610 LoopsAnalyzed += Worklist.size();
8611
8612 // Now walk the identified inner loops.
8613 while (!Worklist.empty()) {
8614 Loop *L = Worklist.pop_back_val();
8615
8616 // For the inner loops we actually process, form LCSSA to simplify the
8617 // transform.
8618 Changed |= formLCSSARecursively(*L, *DT, LI, SE);
8619
8620 Changed |= CFGChanged |= processLoop(L);
8621
8622 if (Changed) {
8623 LAIs->clear();
8624
8625#ifndef NDEBUG
8626 if (VerifySCEV)
8627 SE->verify();
8628#endif
8629 }
8630 }
8631
8632 // Process each loop nest in the function.
8633 return LoopVectorizeResult(Changed, CFGChanged);
8634}
8635
8638 LI = &AM.getResult<LoopAnalysis>(F);
8639 // There are no loops in the function. Return before computing other
8640 // expensive analyses.
8641 if (LI->empty())
8642 return PreservedAnalyses::all();
8651 AA = &AM.getResult<AAManager>(F);
8652
8653 auto &MAMProxy = AM.getResult<ModuleAnalysisManagerFunctionProxy>(F);
8654 PSI = MAMProxy.getCachedResult<ProfileSummaryAnalysis>(*F.getParent());
8655 GetBFI = [&AM, &F]() -> BlockFrequencyInfo & {
8657 };
8658 LoopVectorizeResult Result = runImpl(F);
8659 if (!Result.MadeAnyChange)
8660 return PreservedAnalyses::all();
8662
8663 if (isAssignmentTrackingEnabled(*F.getParent())) {
8664 for (auto &BB : F)
8666 }
8667
8668 PA.preserve<LoopAnalysis>();
8672
8673 if (Result.MadeCFGChange) {
8674 // Making CFG changes likely means a loop got vectorized. Indicate that
8675 // extra simplification passes should be run.
8676 // TODO: MadeCFGChanges is not a prefect proxy. Extra passes should only
8677 // be run if runtime checks have been added.
8680 } else {
8682 }
8683 return PA;
8684}
8685
8687 raw_ostream &OS, function_ref<StringRef(StringRef)> MapClassName2PassName) {
8688 static_cast<PassInfoMixin<LoopVectorizePass> *>(this)->printPipeline(
8689 OS, MapClassName2PassName);
8690
8691 OS << '<';
8692 OS << (InterleaveOnlyWhenForced ? "" : "no-") << "interleave-forced-only;";
8693 OS << (VectorizeOnlyWhenForced ? "" : "no-") << "vectorize-forced-only;";
8694 OS << '>';
8695}
for(const MachineOperand &MO :llvm::drop_begin(OldMI.operands(), Desc.getNumOperands()))
static unsigned getIntrinsicID(const SDNode *N)
unsigned RegSize
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
AMDGPU Lower Kernel Arguments
This file implements a class to represent arbitrary precision integral constant values and operations...
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static bool isEqual(const Function &Caller, const Function &Callee)
This file contains the simple types necessary to represent the attributes associated with functions a...
static const Function * getParent(const Value *V)
This is the interface for LLVM's primary stateless and local alias analysis.
static bool IsEmptyBlock(MachineBasicBlock *MBB)
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
#define clEnumValN(ENUMVAL, FLAGNAME, DESC)
This file contains the declarations for the subclasses of Constant, which represent the different fla...
static cl::opt< OutputCostKind > CostKind("cost-kind", cl::desc("Target cost kind"), cl::init(OutputCostKind::RecipThroughput), cl::values(clEnumValN(OutputCostKind::RecipThroughput, "throughput", "Reciprocal throughput"), clEnumValN(OutputCostKind::Latency, "latency", "Instruction latency"), clEnumValN(OutputCostKind::CodeSize, "code-size", "Code size"), clEnumValN(OutputCostKind::SizeAndLatency, "size-latency", "Code size and latency"), clEnumValN(OutputCostKind::All, "all", "Print all cost kinds")))
static cl::opt< IntrinsicCostStrategy > IntrinsicCost("intrinsic-cost-strategy", cl::desc("Costing strategy for intrinsic instructions"), cl::init(IntrinsicCostStrategy::InstructionCost), cl::values(clEnumValN(IntrinsicCostStrategy::InstructionCost, "instruction-cost", "Use TargetTransformInfo::getInstructionCost"), clEnumValN(IntrinsicCostStrategy::IntrinsicCost, "intrinsic-cost", "Use TargetTransformInfo::getIntrinsicInstrCost"), clEnumValN(IntrinsicCostStrategy::TypeBasedIntrinsicCost, "type-based-intrinsic-cost", "Calculate the intrinsic cost based only on argument types")))
static InstructionCost getCost(Instruction &Inst, TTI::TargetCostKind CostKind, TargetTransformInfo &TTI)
Definition CostModel.cpp:73
This file defines DenseMapInfo traits for DenseMap.
This file defines the DenseMap class.
#define DEBUG_TYPE
This is the interface for a simple mod/ref and alias analysis over globals.
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
Module.h This file contains the declarations for the Module class.
This defines the Use class.
static bool hasNoUnsignedWrap(BinaryOperator &I)
This file defines an InstructionCost class that is used when calculating the cost of an instruction,...
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
static cl::opt< unsigned, true > VectorizationFactor("force-vector-width", cl::Hidden, cl::desc("Sets the SIMD width. Zero is autoselect."), cl::location(VectorizerParams::VectorizationFactor))
This header provides classes for managing per-loop analyses.
static const char * VerboseDebug
#define LV_NAME
This file defines the LoopVectorizationLegality class.
static cl::opt< bool > ConsiderRegPressure("vectorizer-consider-reg-pressure", cl::init(false), cl::Hidden, cl::desc("Discard VFs if their register pressure is too high."))
This file provides a LoopVectorizationPlanner class.
static void collectSupportedLoops(Loop &L, LoopInfo *LI, OptimizationRemarkEmitter *ORE, SmallVectorImpl< Loop * > &V)
static cl::opt< unsigned > EpilogueVectorizationMinVF("epilogue-vectorization-minimum-VF", cl::Hidden, cl::desc("Only loops with vectorization factor equal to or larger than " "the specified value are considered for epilogue vectorization."))
static cl::opt< unsigned > EpilogueVectorizationForceVF("epilogue-vectorization-force-VF", cl::init(1), cl::Hidden, cl::desc("When epilogue vectorization is enabled, and a value greater than " "1 is specified, forces the given VF for all applicable epilogue " "loops."))
static unsigned getMaxTCFromNonZeroRange(PredicatedScalarEvolution &PSE, Loop *L)
Get the maximum trip count for L from the SCEV unsigned range, excluding zero from the range.
static Type * maybeVectorizeType(Type *Ty, ElementCount VF)
static ElementCount getSmallConstantTripCount(ScalarEvolution *SE, const Loop *L)
A version of ScalarEvolution::getSmallConstantTripCount that returns an ElementCount to include loops...
static bool hasUnsupportedHeaderPhiRecipe(VPlan &Plan)
Returns true if the VPlan contains header phi recipes that are not currently supported for epilogue v...
static cl::opt< unsigned > VectorizeMemoryCheckThreshold("vectorize-memory-check-threshold", cl::init(128), cl::Hidden, cl::desc("The maximum allowed number of runtime memory checks"))
static void connectEpilogueVectorLoop(VPlan &EpiPlan, Loop *L, EpilogueLoopVectorizationInfo &EPI, DominatorTree *DT, GeneratedRTChecks &Checks, ArrayRef< Instruction * > InstsToMove, ArrayRef< VPInstruction * > ResumeValues)
Connect the epilogue vector loop generated for EpiPlan to the main vector loop, after both plans have...
static cl::opt< unsigned > TinyTripCountVectorThreshold("vectorizer-min-trip-count", cl::init(16), cl::Hidden, cl::desc("Loops with a constant trip count that is smaller than this " "value are vectorized only if no scalar iteration overheads " "are incurred."))
Loops with a known constant trip count below this number are vectorized only if no scalar iteration o...
static void debugVectorizationMessage(const StringRef Prefix, const StringRef DebugMsg, Instruction *I)
Write a DebugMsg about vectorization to the debug output stream.
static void legacyCSE(BasicBlock *BB)
FIXME: This legacy common-subexpression-elimination routine is scheduled for removal,...
static VPIRBasicBlock * replaceVPBBWithIRVPBB(VPBasicBlock *VPBB, BasicBlock *IRBB, VPlan *Plan=nullptr)
Replace VPBB with a VPIRBasicBlock wrapping IRBB.
static DebugLoc getDebugLocFromInstOrOperands(Instruction *I)
Look for a meaningful debug location on the instruction or its operands.
TailFoldingPolicyTy
Option tail-folding-policy indicates that an epilogue is undesired, that tail folding is preferred,...
static bool useActiveLaneMaskForControlFlow(TailFoldingStyle Style)
static cl::opt< bool > EnableEarlyExitVectorization("enable-early-exit-vectorization", cl::init(true), cl::Hidden, cl::desc("Enable vectorization of early exit loops with uncountable exits."))
static bool processLoopInVPlanNativePath(Loop *L, PredicatedScalarEvolution &PSE, LoopInfo *LI, DominatorTree *DT, LoopVectorizationLegality *LVL, TargetTransformInfo *TTI, TargetLibraryInfo *TLI, DemandedBits *DB, AssumptionCache *AC, OptimizationRemarkEmitter *ORE, std::function< BlockFrequencyInfo &()> GetBFI, bool OptForSize, LoopVectorizeHints &Hints, LoopVectorizationRequirements &Requirements)
static unsigned estimateElementCount(ElementCount VF, std::optional< unsigned > VScale)
This function attempts to return a value that represents the ElementCount at runtime.
static constexpr uint32_t MinItersBypassWeights[]
static cl::opt< unsigned > ForceTargetNumScalarRegs("force-target-num-scalar-regs", cl::init(0), cl::Hidden, cl::desc("A flag that overrides the target's number of scalar registers."))
static SmallVector< VPInstruction * > preparePlanForMainVectorLoop(VPlan &MainPlan, VPlan &EpiPlan)
Prepare MainPlan for vectorizing the main vector loop during epilogue vectorization.
static cl::opt< unsigned > SmallLoopCost("small-loop-cost", cl::init(20), cl::Hidden, cl::desc("The cost of a loop that is considered 'small' by the interleaver."))
static OptimizationRemarkAnalysis createLVAnalysis(const char *PassName, StringRef RemarkName, const Loop *TheLoop, Instruction *I, DebugLoc DL={})
Create an analysis remark that explains why vectorization failed.
static cl::opt< unsigned > ForceTargetNumVectorRegs("force-target-num-vector-regs", cl::init(0), cl::Hidden, cl::desc("A flag that overrides the target's number of vector registers."))
static bool isExplicitVecOuterLoop(Loop *OuterLp, OptimizationRemarkEmitter *ORE)
static cl::opt< bool > EnableIndVarRegisterHeur("enable-ind-var-reg-heur", cl::init(true), cl::Hidden, cl::desc("Count the induction variable only once when interleaving"))
static cl::opt< TailFoldingStyle > ForceTailFoldingStyle("force-tail-folding-style", cl::desc("Force the tail folding style"), cl::init(TailFoldingStyle::None), cl::values(clEnumValN(TailFoldingStyle::None, "none", "Disable tail folding"), clEnumValN(TailFoldingStyle::Data, "data", "Create lane mask for data only, using active.lane.mask intrinsic"), clEnumValN(TailFoldingStyle::DataWithoutLaneMask, "data-without-lane-mask", "Create lane mask with compare/stepvector"), clEnumValN(TailFoldingStyle::DataAndControlFlow, "data-and-control", "Create lane mask using active.lane.mask intrinsic, and use " "it for both data and control flow"), clEnumValN(TailFoldingStyle::DataWithEVL, "data-with-evl", "Use predicated EVL instructions for tail folding. If EVL " "is unsupported, fallback to data-without-lane-mask.")))
static void printOptimizedVPlan(VPlan &)
static SmallVector< Instruction * > preparePlanForEpilogueVectorLoop(VPlan &Plan, Loop *L, const SCEV2ValueTy &ExpandedSCEVs, EpilogueLoopVectorizationInfo &EPI, LoopVectorizationCostModel &CM, VFSelectionContext &Config, ScalarEvolution &SE)
Prepare Plan for vectorizing the epilogue loop.
static cl::opt< bool > EnableEpilogueVectorization("enable-epilogue-vectorization", cl::init(true), cl::Hidden, cl::desc("Enable vectorization of epilogue loops."))
static cl::opt< bool > PreferPredicatedReductionSelect("prefer-predicated-reduction-select", cl::init(false), cl::Hidden, cl::desc("Prefer predicating a reduction operation over an after loop select."))
static std::optional< ElementCount > getSmallBestKnownTC(PredicatedScalarEvolution &PSE, Loop *L, bool CanUseConstantMax=true, bool CanExcludeZeroTrips=false)
Returns "best known" trip count, which is either a valid positive trip count or std::nullopt when an ...
static const SCEV * getAddressAccessSCEV(Value *Ptr, PredicatedScalarEvolution &PSE, const Loop *TheLoop)
Gets the address access SCEV for Ptr, if it should be used for cost modeling according to isAddressSC...
static cl::opt< bool > EnableLoadStoreRuntimeInterleave("enable-loadstore-runtime-interleave", cl::init(true), cl::Hidden, cl::desc("Enable runtime interleaving until load/store ports are saturated"))
static cl::opt< bool > VPlanBuildStressTest("vplan-build-stress-test", cl::init(false), cl::Hidden, cl::desc("Build VPlan for every supported loop nest in the function and bail " "out right after the build (stress test the VPlan H-CFG construction " "in the VPlan-native vectorization path)."))
static bool hasIrregularType(Type *Ty, const DataLayout &DL)
A helper function that returns true if the given type is irregular.
static cl::opt< bool > LoopVectorizeWithBlockFrequency("loop-vectorize-with-block-frequency", cl::init(true), cl::Hidden, cl::desc("Enable the use of the block frequency analysis to access PGO " "heuristics minimizing code growth in cold regions and being more " "aggressive in hot regions."))
static ElementCount determineVPlanVF(const TargetTransformInfo &TTI, VFSelectionContext &Config)
static bool useActiveLaneMask(TailFoldingStyle Style)
static bool hasReplicatorRegion(VPlan &Plan)
static bool isIndvarOverflowCheckKnownFalse(const LoopVectorizationCostModel *Cost, ElementCount VF, std::optional< unsigned > UF=std::nullopt)
For the given VF and UF and maximum trip count computed for the loop, return whether the induction va...
static void addFullyUnrolledInstructionsToIgnore(Loop *L, const LoopVectorizationLegality::InductionList &IL, SmallPtrSetImpl< Instruction * > &InstsToIgnore)
Knowing that loop L executes a single vector iteration, add instructions that will get simplified and...
static bool hasFindLastReductionPhi(VPlan &Plan)
Returns true if the VPlan contains a VPReductionPHIRecipe with FindLast recurrence kind.
static cl::opt< bool > EnableInterleavedMemAccesses("enable-interleaved-mem-accesses", cl::init(false), cl::Hidden, cl::desc("Enable vectorization on interleaved memory accesses in a loop"))
static cl::opt< bool > EnableMaskedInterleavedMemAccesses("enable-masked-interleaved-mem-accesses", cl::init(false), cl::Hidden, cl::desc("Enable vectorization on masked interleaved memory accesses in a loop"))
An interleave-group may need masking if it resides in a block that needs predication,...
static cl::opt< bool > ForceOrderedReductions("force-ordered-reductions", cl::init(false), cl::Hidden, cl::desc("Enable the vectorisation of loops with in-order (strict) " "FP reductions"))
static cl::opt< TailFoldingPolicyTy > TailFoldingPolicy("tail-folding-policy", cl::init(TailFoldingPolicyTy::None), cl::Hidden, cl::desc("Tail-folding preferences over creating an epilogue loop."), cl::values(clEnumValN(TailFoldingPolicyTy::None, "dont-fold-tail", "Don't tail-fold loops."), clEnumValN(TailFoldingPolicyTy::PreferFoldTail, "prefer-fold-tail", "prefer tail-folding, otherwise create an epilogue when " "appropriate."), clEnumValN(TailFoldingPolicyTy::MustFoldTail, "must-fold-tail", "always tail-fold, don't attempt vectorization if " "tail-folding fails.")))
static bool isOutsideLoopWorkProfitable(GeneratedRTChecks &Checks, VectorizationFactor &VF, Loop *L, PredicatedScalarEvolution &PSE, VPCostContext &CostCtx, VPlan &Plan, EpilogueLowering SEL, std::optional< unsigned > VScale)
This function determines whether or not it's still profitable to vectorize the loop given the extra w...
static cl::opt< cl::boolOrDefault > ForceSafeDivisor("force-widen-divrem-via-safe-divisor", cl::Hidden, cl::desc("Override cost based safe divisor widening for div/rem instructions"))
static InstructionCost calculateEarlyExitCost(VPCostContext &CostCtx, VPlan &Plan, ElementCount VF)
For loops with uncountable early exits, find the cost of doing work when exiting the loop early,...
static cl::opt< unsigned > ForceTargetMaxVectorInterleaveFactor("force-target-max-vector-interleave", cl::init(0), cl::Hidden, cl::desc("A flag that overrides the target's max interleave factor for " "vectorized loops."))
static bool useMaskedInterleavedAccesses(const TargetTransformInfo &TTI)
cl::opt< unsigned > NumberOfStoresToPredicate("vectorize-num-stores-pred", cl::init(1), cl::Hidden, cl::desc("Max number of stores to be predicated behind an if."))
The number of stores in a loop that are allowed to need predication.
static EpilogueLowering getEpilogueLowering(Function *F, Loop *L, LoopVectorizeHints &Hints, bool OptForSize, TargetTransformInfo *TTI, TargetLibraryInfo *TLI, LoopVectorizationLegality &LVL, InterleavedAccessInfo *IAI)
static void fixScalarResumeValuesFromBypass(BasicBlock *BypassBlock, Loop *L, VPlan &BestEpiPlan, ArrayRef< VPInstruction * > ResumeValues)
static cl::opt< unsigned > MaxNestedScalarReductionIC("max-nested-scalar-reduction-interleave", cl::init(2), cl::Hidden, cl::desc("The maximum interleave count to use when interleaving a scalar " "reduction in a nested loop."))
static cl::opt< unsigned > ForceTargetMaxScalarInterleaveFactor("force-target-max-scalar-interleave", cl::init(0), cl::Hidden, cl::desc("A flag that overrides the target's max interleave factor for " "scalar loops."))
static void checkMixedPrecision(Loop *L, OptimizationRemarkEmitter *ORE)
static bool willGenerateVectors(VPlan &Plan, ElementCount VF, const TargetTransformInfo &TTI)
Check if any recipe of Plan will generate a vector value, which will be assigned a vector register.
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
This file implements a map that provides insertion order iteration.
This file contains the declarations for metadata subclasses.
ConstantRange Range(APInt(BitWidth, Low), APInt(BitWidth, High))
uint64_t IntrinsicInst * II
#define P(N)
This file contains the declarations for profiling metadata utility functions.
const SmallVectorImpl< MachineOperand > & Cond
static bool isValid(const char C)
Returns true if C is a valid mangled character: <0-9a-zA-Z_>.
static InstructionCost getScalarizationOverhead(const TargetTransformInfo &TTI, Type *ScalarTy, VectorType *Ty, const APInt &DemandedElts, bool Insert, bool Extract, TTI::TargetCostKind CostKind, bool ForPoisonSrc=true, ArrayRef< Value * > VL={}, TTI::VectorInstrContext VIC=TTI::VectorInstrContext::None)
This is similar to TargetTransformInfo::getScalarizationOverhead, but if ScalarTy is a FixedVectorTyp...
This file contains some templates that are useful if you are working with the STL at all.
#define OP(OPC)
Definition Instruction.h:46
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Definition Statistic.h:171
#define LLVM_DEBUG(...)
Definition Debug.h:114
#define DEBUG_WITH_TYPE(TYPE,...)
DEBUG_WITH_TYPE macro - This macro should be used by passes to emit debug information.
Definition Debug.h:72
This pass exposes codegen information to IR-level passes.
LocallyHashedType DenseMapInfo< LocallyHashedType >::Empty
This file implements the TypeSwitch template, which mimics a switch() statement whose cases are type ...
This file contains the declarations of different VPlan-related auxiliary helpers.
This file provides utility VPlan to VPlan transformations.
#define RUN_VPLAN_PASS(PASS,...)
#define RUN_VPLAN_PASS_NO_VERIFY(PASS,...)
This file declares the class VPlanVerifier, which contains utility functions to check the consistency...
This file contains the declarations of the Vectorization Plan base classes:
static const char PassName[]
Value * RHS
Value * LHS
static const uint32_t IV[8]
Definition blake3_impl.h:83
A manager for alias analyses.
Class for arbitrary precision integers.
Definition APInt.h:78
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
Definition APInt.h:235
uint64_t getZExtValue() const
Get zero extended value.
Definition APInt.h:1563
unsigned getActiveBits() const
Compute the number of active bits in the value.
Definition APInt.h:1535
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
Definition APInt.h:381
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:39
size_t size() const
Get the array size.
Definition ArrayRef.h:140
A function analysis which provides an AssumptionCache.
A cache of @llvm.assume calls within a function.
LLVM Basic Block Representation.
Definition BasicBlock.h:62
iterator_range< const_phi_iterator > phis() const
Returns a range that iterates over the phis in the basic block.
Definition BasicBlock.h:530
const Function * getParent() const
Return the enclosing method, or null if none.
Definition BasicBlock.h:213
LLVM_ABI InstListType::const_iterator getFirstNonPHIIt() const
Returns an iterator to the first instruction in this block that is not a PHINode instruction.
LLVM_ABI const BasicBlock * getSinglePredecessor() const
Return the predecessor of this block if it has a single predecessor block.
LLVM_ABI const BasicBlock * getSingleSuccessor() const
Return the successor of this block if it has a single successor.
LLVM_ABI LLVMContext & getContext() const
Get the context in which this basic block lives.
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction; assumes that the block is well-formed.
Definition BasicBlock.h:237
Analysis pass which computes BlockFrequencyInfo.
BlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate IR basic block frequen...
Represents analyses that only rely on functions' control flow.
Definition Analysis.h:73
bool isNoBuiltin() const
Return true if the call should not be treated as a call to a builtin.
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
Value * getArgOperand(unsigned i) const
iterator_range< User::op_iterator > args()
Iteration adapter for range-for loops.
unsigned arg_size() const
This class represents a function call, abstracting a target machine's calling convention.
static Type * makeCmpResultType(Type *opnd_type)
Create a result type for fcmp/icmp.
Definition InstrTypes.h:986
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition InstrTypes.h:676
@ ICMP_UGT
unsigned greater than
Definition InstrTypes.h:699
@ ICMP_ULT
unsigned less than
Definition InstrTypes.h:701
Conditional Branch instruction.
BasicBlock * getSuccessor(unsigned i) const
This is the shared class of boolean and integer constants.
Definition Constants.h:87
static LLVM_ABI ConstantInt * getTrue(LLVMContext &Context)
This class represents a range of values.
LLVM_ABI APInt getUnsignedMax() const
Return the largest unsigned value contained in the ConstantRange.
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:64
A debug info location.
Definition DebugLoc.h:123
static DebugLoc getTemporary()
Definition DebugLoc.h:160
static DebugLoc getUnknown()
Definition DebugLoc.h:161
An analysis that produces DemandedBits for a function.
ValueT lookup(const_arg_type_t< KeyT > Val) const
Return the entry for the specified key, or a default constructed value if no such entry exists.
Definition DenseMap.h:205
iterator find(const_arg_type_t< KeyT > Val)
Definition DenseMap.h:178
std::pair< iterator, bool > try_emplace(KeyT &&Key, Ts &&...Args)
Definition DenseMap.h:254
iterator end()
Definition DenseMap.h:81
bool contains(const_arg_type_t< KeyT > Val) const
Return true if the specified key is in the map, false otherwise.
Definition DenseMap.h:169
void insert_range(Range &&R)
Inserts range of 'std::pair<KeyT, ValueT>' values into the map.
Definition DenseMap.h:292
Implements a dense probed hash-table based set.
Definition DenseSet.h:279
Analysis pass which computes a DominatorTree.
Definition Dominators.h:278
void changeImmediateDominator(DomTreeNodeBase< NodeT > *N, DomTreeNodeBase< NodeT > *NewIDom)
changeImmediateDominator - This method is used to update the dominator tree information when a node's...
void eraseNode(NodeT *BB)
eraseNode - Removes a node from the dominator tree.
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition Dominators.h:159
constexpr bool isVector() const
One or more elements.
Definition TypeSize.h:324
static constexpr ElementCount getScalable(ScalarTy MinVal)
Definition TypeSize.h:312
static constexpr ElementCount getFixed(ScalarTy MinVal)
Definition TypeSize.h:309
static constexpr ElementCount get(ScalarTy MinVal, bool Scalable)
Definition TypeSize.h:315
constexpr bool isScalar() const
Exactly one element.
Definition TypeSize.h:320
EpilogueVectorizerEpilogueLoop(Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI, DominatorTree *DT, const TargetTransformInfo *TTI, AssumptionCache *AC, EpilogueLoopVectorizationInfo &EPI, LoopVectorizationCostModel *CM, GeneratedRTChecks &Checks, VPlan &Plan)
BasicBlock * createVectorizedLoopSkeleton() final
Implements the interface for creating a vectorized skeleton using the epilogue loop strategy (i....
void printDebugTracesAtStart() override
Allow subclasses to override and print debug traces before/after vplan execution, when trace informat...
A specialized derived class of inner loop vectorizer that performs vectorization of main loops in the...
void printDebugTracesAtStart() override
Allow subclasses to override and print debug traces before/after vplan execution, when trace informat...
EpilogueVectorizerMainLoop(Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI, DominatorTree *DT, const TargetTransformInfo *TTI, AssumptionCache *AC, EpilogueLoopVectorizationInfo &EPI, LoopVectorizationCostModel *CM, GeneratedRTChecks &Check, VPlan &Plan)
Convenience struct for specifying and reasoning about fast-math flags.
Definition FMF.h:23
Class to represent function types.
param_iterator param_begin() const
param_iterator param_end() const
FunctionType * getFunctionType() const
Returns the FunctionType for me.
Definition Function.h:211
Represents flags for the getelementptr instruction/expression.
static GEPNoWrapFlags none()
void applyUpdates(ArrayRef< UpdateT > Updates)
Submit updates to all available trees.
Common base class shared among various IRBuilders.
Definition IRBuilder.h:114
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition IRBuilder.h:2853
A struct for saving information about induction variables.
const SCEV * getStep() const
ArrayRef< Instruction * > getCastInsts() const
Returns an ArrayRef to the type cast instructions in the induction update chain, that are redundant w...
@ IK_PtrInduction
Pointer induction var. Step = C.
InnerLoopAndEpilogueVectorizer(Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI, DominatorTree *DT, const TargetTransformInfo *TTI, AssumptionCache *AC, EpilogueLoopVectorizationInfo &EPI, LoopVectorizationCostModel *CM, GeneratedRTChecks &Checks, VPlan &Plan, ElementCount VecWidth, ElementCount MinProfitableTripCount, unsigned UnrollFactor)
EpilogueLoopVectorizationInfo & EPI
Holds and updates state information required to vectorize the main loop and its epilogue in two separ...
InnerLoopVectorizer vectorizes loops which contain only one basic block to a specified vectorization ...
virtual void printDebugTracesAtStart()
Allow subclasses to override and print debug traces before/after vplan execution, when trace informat...
const TargetTransformInfo * TTI
Target Transform Info.
LoopVectorizationCostModel * Cost
The profitablity analysis.
friend class LoopVectorizationPlanner
InnerLoopVectorizer(Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI, DominatorTree *DT, const TargetTransformInfo *TTI, AssumptionCache *AC, ElementCount VecWidth, unsigned UnrollFactor, LoopVectorizationCostModel *CM, GeneratedRTChecks &RTChecks, VPlan &Plan)
PredicatedScalarEvolution & PSE
A wrapper around ScalarEvolution used to add runtime SCEV checks.
LoopInfo * LI
Loop Info.
DominatorTree * DT
Dominator Tree.
void fixVectorizedLoop(VPTransformState &State)
Fix the vectorized code, taking care of header phi's, and more.
virtual BasicBlock * createVectorizedLoopSkeleton()
Creates a basic block for the scalar preheader.
virtual void printDebugTracesAtEnd()
AssumptionCache * AC
Assumption Cache.
IRBuilder Builder
The builder that we use.
void fixNonInductionPHIs(VPTransformState &State)
Fix the non-induction PHIs in Plan.
VPBasicBlock * VectorPHVPBB
The vector preheader block of Plan, used as target for check blocks introduced during skeleton creati...
unsigned UF
The vectorization unroll factor to use.
GeneratedRTChecks & RTChecks
Structure to hold information about generated runtime checks, responsible for cleaning the checks,...
virtual ~InnerLoopVectorizer()=default
ElementCount VF
The vectorization SIMD factor to use.
Loop * OrigLoop
The original loop.
BasicBlock * createScalarPreheader(StringRef Prefix)
Create and return a new IR basic block for the scalar preheader whose name is prefixed with Prefix.
static InstructionCost getInvalid(CostType Val=0)
static InstructionCost getMax()
CostType getValue() const
This function is intended to be used as sparingly as possible, since the class provides the full rang...
bool isCast() const
LLVM_ABI const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
LLVM_ABI void moveBefore(InstListType::iterator InsertPos)
Unlink this instruction from its current basic block and insert it into the basic block that MovePos ...
LLVM_ABI InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
Instruction * user_back()
Specialize the methods defined in Value, as we know that an instruction can only be used by other ins...
const char * getOpcodeName() const
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
Class to represent integer types.
static LLVM_ABI IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
Definition Type.cpp:354
LLVM_ABI APInt getMask() const
For example, this is 0xFF for an 8 bit integer, 0xFFFF for i16, etc.
Definition Type.cpp:378
The group of interleaved loads/stores sharing the same stride and close to each other.
auto members() const
Return an iterator range over the non-null members of this group, in index order.
InstTy * getInsertPos() const
uint32_t getNumMembers() const
Drive the analysis of interleaved memory accesses in the loop.
bool requiresScalarEpilogue() const
Returns true if an interleaved group that may access memory out-of-bounds requires a scalar epilogue ...
LLVM_ABI void analyzeInterleaving(bool EnableMaskedInterleavedGroup)
Analyze the interleaved accesses and collect them in interleave groups.
An instruction for reading from memory.
Type * getPointerOperandType() const
This analysis provides dependence information for the memory accesses of a loop.
const RuntimePointerChecking * getRuntimePointerChecking() const
unsigned getNumRuntimePointerChecks() const
Number of memchecks required to prove independence of otherwise may-alias pointers.
Analysis pass that exposes the LoopInfo for a function.
Definition LoopInfo.h:587
bool contains(const LoopT *L) const
Return true if the specified loop is contained within in this loop.
BlockT * getLoopLatch() const
If there is a single latch block for this loop, return it.
bool isInnermost() const
Return true if the loop does not contain any (natural) loops.
void getExitingBlocks(SmallVectorImpl< BlockT * > &ExitingBlocks) const
Return all blocks inside the loop that have successors outside of the loop.
BlockT * getHeader() const
iterator_range< block_iterator > blocks() const
Store the result of a depth first search within basic blocks contained by a single loop.
RPOIterator beginRPO() const
Reverse iterate over the cached postorder blocks.
void perform(const LoopInfo *LI)
Traverse the loop blocks and store the DFS result.
RPOIterator endRPO() const
Wrapper class to LoopBlocksDFS that provides a standard begin()/end() interface for the DFS reverse p...
void perform(const LoopInfo *LI)
Traverse the loop blocks and store the DFS result.
void removeBlock(BlockT *BB)
This method completely removes BB from all data structures, including all of the Loop objects it is n...
LoopVectorizationCostModel - estimates the expected speedups due to vectorization.
bool isEpilogueVectorizationProfitable(const ElementCount VF, const unsigned IC) const
Returns true if epilogue vectorization is considered profitable, and false otherwise.
bool useWideActiveLaneMask() const
Returns true if the use of wide lane masks is requested and the loop is using tail-folding with a lan...
bool isPredicatedInst(Instruction *I) const
Returns true if I is an instruction that needs to be predicated at runtime.
void collectValuesToIgnore()
Collect values we want to ignore in the cost model.
BlockFrequencyInfo * BFI
The BlockFrequencyInfo returned from GetBFI.
BlockFrequencyInfo & getBFI()
Returns the BlockFrequencyInfo for the function if cached, otherwise fetches it via GetBFI.
bool isUniformAfterVectorization(Instruction *I, ElementCount VF) const
Returns true if I is known to be uniform after vectorization.
bool preferTailFoldedLoop() const
Returns true if tail-folding is preferred over an epilogue.
bool useEmulatedMaskMemRefHack(Instruction *I, ElementCount VF)
Returns true if an artificially high cost for emulated masked memrefs should be used.
void collectNonVectorizedAndSetWideningDecisions(ElementCount VF)
Collect values that will not be widened, including Uniforms, Scalars, and Instructions to Scalarize f...
bool isMaskRequired(Instruction *I) const
Wrapper function for LoopVectorizationLegality::isMaskRequired, that passes the Instruction I and if ...
PredicatedScalarEvolution & PSE
Predicated scalar evolution analysis.
const LoopVectorizeHints * Hints
Loop Vectorize Hint.
const TargetTransformInfo & TTI
Vector target information.
LoopVectorizationLegality * Legal
Vectorization legality.
uint64_t getPredBlockCostDivisor(TargetTransformInfo::TargetCostKind CostKind, const BasicBlock *BB)
A helper function that returns how much we should divide the cost of a predicated block by.
std::optional< InstructionCost > getReductionPatternCost(Instruction *I, ElementCount VF, Type *VectorTy) const
Return the cost of instructions in an inloop reduction pattern, if I is part of that pattern.
InstructionCost getInstructionCost(Instruction *I, ElementCount VF)
Returns the execution time cost of an instruction for a given vector width.
bool interleavedAccessCanBeWidened(Instruction *I, ElementCount VF) const
Returns true if I is a memory instruction in an interleaved-group of memory accesses that can be vect...
const TargetLibraryInfo * TLI
Target Library Info.
bool memoryInstructionCanBeWidened(Instruction *I, ElementCount VF)
Returns true if I is a memory instruction with consecutive memory access that can be widened.
const InterleaveGroup< Instruction > * getInterleavedAccessGroup(Instruction *Instr) const
Get the interleaved access group that Instr belongs to.
InstructionCost getVectorIntrinsicCost(CallInst *CI, ElementCount VF) const
Estimate cost of an intrinsic call instruction CI if it were vectorized with factor VF.
bool isScalarAfterVectorization(Instruction *I, ElementCount VF) const
Returns true if I is known to be scalar after vectorization.
bool isOptimizableIVTruncate(Instruction *I, ElementCount VF)
Return True if instruction I is an optimizable truncate whose operand is an induction variable.
FixedScalableVFPair computeMaxVF(ElementCount UserVF, unsigned UserIC)
Loop * TheLoop
The loop that we evaluate.
InterleavedAccessInfo & InterleaveInfo
The interleave access information contains groups of interleaved accesses with the same stride and cl...
SmallPtrSet< const Value *, 16 > ValuesToIgnore
Values to ignore in the cost model.
void setVectorizedCallDecision(ElementCount VF)
A call may be vectorized in different ways depending on whether we have vectorized variants available...
void invalidateCostModelingDecisions()
Invalidates decisions already taken by the cost model.
bool isAccessInterleaved(Instruction *Instr) const
Check if Instr belongs to any interleaved access group.
void setTailFoldingStyle(bool IsScalableVF, unsigned UserIC)
Selects and saves TailFoldingStyle.
OptimizationRemarkEmitter * ORE
Interface to emit optimization remarks.
LoopInfo * LI
Loop Info analysis.
bool requiresScalarEpilogue(bool IsVectorizing) const
Returns true if we're required to use a scalar epilogue for at least the final iteration of the origi...
SmallPtrSet< const Value *, 16 > VecValuesToIgnore
Values to ignore in the cost model when VF > 1.
bool isProfitableToScalarize(Instruction *I, ElementCount VF) const
void setWideningDecision(const InterleaveGroup< Instruction > *Grp, ElementCount VF, InstWidening W, InstructionCost Cost)
Save vectorization decision W and Cost taken by the cost model for interleaving group Grp and vector ...
bool isEpilogueAllowed() const
Returns true if an epilogue is allowed (e.g., not prevented by optsize or a loop hint annotation).
CallWideningDecision getCallWideningDecision(CallInst *CI, ElementCount VF) const
bool canTruncateToMinimalBitwidth(Instruction *I, ElementCount VF) const
bool shouldConsiderInvariant(Value *Op)
Returns true if Op should be considered invariant and if it is trivially hoistable.
bool foldTailByMasking() const
Returns true if all loop blocks should be masked to fold tail loop.
bool foldTailWithEVL() const
Returns true if VP intrinsics with explicit vector length support should be generated in the tail fol...
bool blockNeedsPredicationForAnyReason(BasicBlock *BB) const
Returns true if the instructions in this block requires predication for any reason,...
void setCallWideningDecision(CallInst *CI, ElementCount VF, InstWidening Kind, Function *Variant, Intrinsic::ID IID, std::optional< unsigned > MaskPos, InstructionCost Cost)
AssumptionCache * AC
Assumption cache.
void setWideningDecision(Instruction *I, ElementCount VF, InstWidening W, InstructionCost Cost)
Save vectorization decision W and Cost taken by the cost model for instruction I and vector width VF.
InstWidening
Decision that was taken during cost calculation for memory instruction.
bool usePredicatedReductionSelect(RecurKind RecurrenceKind) const
Returns true if the predicated reduction select should be used to set the incoming value for the redu...
LoopVectorizationCostModel(EpilogueLowering SEL, Loop *L, PredicatedScalarEvolution &PSE, LoopInfo *LI, LoopVectorizationLegality *Legal, const TargetTransformInfo &TTI, const TargetLibraryInfo *TLI, AssumptionCache *AC, OptimizationRemarkEmitter *ORE, std::function< BlockFrequencyInfo &()> GetBFI, const Function *F, const LoopVectorizeHints *Hints, InterleavedAccessInfo &IAI, VFSelectionContext &Config)
std::pair< InstructionCost, InstructionCost > getDivRemSpeculationCost(Instruction *I, ElementCount VF)
Return the costs for our two available strategies for lowering a div/rem operation which requires spe...
InstructionCost getVectorCallCost(CallInst *CI, ElementCount VF) const
Estimate cost of a call instruction CI if it were vectorized with factor VF.
bool isScalarWithPredication(Instruction *I, ElementCount VF)
Returns true if I is an instruction which requires predication and for which our chosen predication s...
bool isDivRemScalarWithPredication(InstructionCost ScalarCost, InstructionCost SafeDivisorCost) const
Given costs for both strategies, return true if the scalar predication lowering should be used for di...
std::function< BlockFrequencyInfo &()> GetBFI
A function to lazily fetch BlockFrequencyInfo.
InstructionCost expectedCost(ElementCount VF)
Returns the expected execution cost.
void setCostBasedWideningDecision(ElementCount VF)
Memory access instruction may be vectorized in more than one way.
InstWidening getWideningDecision(Instruction *I, ElementCount VF) const
Return the cost model decision for the given instruction I and vector width VF.
InstructionCost getWideningCost(Instruction *I, ElementCount VF)
Return the vectorization cost for the given instruction I and vector width VF.
TailFoldingStyle getTailFoldingStyle() const
Returns the TailFoldingStyle that is best for the current loop.
void collectInstsToScalarize(ElementCount VF)
Collects the instructions to scalarize for each predicated instruction in the loop.
LoopVectorizationLegality checks if it is legal to vectorize a loop, and to what vectorization factor...
MapVector< PHINode *, InductionDescriptor > InductionList
InductionList saves induction variables and maps them to the induction descriptor.
bool canVectorize(bool UseVPlanNativePath)
Returns true if it is legal to vectorize this loop.
bool canVectorizeFPMath(bool EnableStrictReductions)
Returns true if it is legal to vectorize the FP math operations in this loop.
const SmallVector< BasicBlock *, 4 > & getCountableExitingBlocks() const
Returns all exiting blocks with a countable exit, i.e.
bool hasUncountableEarlyExit() const
Returns true if the loop has uncountable early exits, i.e.
bool hasHistograms() const
Returns a list of all known histogram operations in the loop.
const LoopAccessInfo * getLAI() const
Planner drives the vectorization process after having passed Legality checks.
DenseMap< const SCEV *, Value * > executePlan(ElementCount VF, unsigned UF, VPlan &BestPlan, InnerLoopVectorizer &LB, DominatorTree *DT, EpilogueVectorizationKind EpilogueVecKind=EpilogueVectorizationKind::None)
EpilogueVectorizationKind
Generate the IR code for the vectorized loop captured in VPlan BestPlan according to the best selecte...
@ MainLoop
Vectorizing the main loop of epilogue vectorization.
VPlan & getPlanFor(ElementCount VF) const
Return the VPlan for VF.
Definition VPlan.cpp:1691
VectorizationFactor planInVPlanNativePath(ElementCount UserVF)
Use the VPlan-native path to plan how to best vectorize, return the best VF and its cost.
void updateLoopMetadataAndProfileInfo(Loop *VectorLoop, VPBasicBlock *HeaderVPBB, const VPlan &Plan, bool VectorizingEpilogue, MDNode *OrigLoopID, std::optional< unsigned > OrigAverageTripCount, unsigned OrigLoopInvocationWeight, unsigned EstimatedVFxUF, bool DisableRuntimeUnroll)
Update loop metadata and profile info for both the scalar remainder loop and VectorLoop,...
Definition VPlan.cpp:1742
void buildVPlans(ElementCount MinVF, ElementCount MaxVF)
Build VPlans for power-of-2 VF's between MinVF and MaxVF inclusive, according to the information gath...
Definition VPlan.cpp:1675
void attachRuntimeChecks(VPlan &Plan, GeneratedRTChecks &RTChecks, bool HasBranchWeights) const
Attach the runtime checks of RTChecks to Plan.
unsigned selectInterleaveCount(VPlan &Plan, ElementCount VF, InstructionCost LoopCost)
void emitInvalidCostRemarks(OptimizationRemarkEmitter *ORE)
Emit remarks for recipes with invalid costs in the available VPlans.
static bool getDecisionAndClampRange(const std::function< bool(ElementCount)> &Predicate, VFRange &Range)
Test a Predicate on a Range of VF's.
Definition VPlan.cpp:1656
void printPlans(raw_ostream &O)
Definition VPlan.cpp:1848
void plan(ElementCount UserVF, unsigned UserIC)
Build VPlans for the specified UserVF and UserIC if they are non-zero or all applicable candidate VFs...
std::unique_ptr< VPlan > selectBestEpiloguePlan(VPlan &MainPlan, ElementCount MainLoopVF, unsigned IC)
void addMinimumIterationCheck(VPlan &Plan, ElementCount VF, unsigned UF, ElementCount MinProfitableTripCount) const
Create a check to Plan to see if the vector loop should be executed based on its trip count.
bool hasPlanWithVF(ElementCount VF) const
Look through the existing plans and return true if we have one with vectorization factor VF.
std::pair< VectorizationFactor, VPlan * > computeBestVF()
Compute and return the most profitable vectorization factor and the corresponding best VPlan.
This holds vectorization requirements that must be verified late in the process.
Utility class for getting and setting loop vectorizer hints in the form of loop metadata.
bool allowVectorization(Function *F, Loop *L, bool VectorizeOnlyWhenForced) const
void emitRemarkWithHints() const
Dumps all the hint information.
const char * vectorizeAnalysisPassName() const
If hints are provided that force vectorization, use the AlwaysPrint pass name to force the frontend t...
Represents a single loop in the control flow graph.
Definition LoopInfo.h:40
bool hasLoopInvariantOperands(const Instruction *I) const
Return true if all the operands of the specified instruction are loop invariant.
Definition LoopInfo.cpp:73
DebugLoc getStartLoc() const
Return the debug location of the start of this loop.
Definition LoopInfo.cpp:659
bool isLoopInvariant(const Value *V) const
Return true if the specified value is loop invariant.
Definition LoopInfo.cpp:67
Metadata node.
Definition Metadata.h:1080
This class implements a map that also provides access to all stored values in a deterministic order.
Definition MapVector.h:36
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition MapVector.h:124
Function * getFunction(StringRef Name) const
Look up the specified function in the module symbol table.
Definition Module.cpp:235
Diagnostic information for optimization analysis remarks related to pointer aliasing.
Diagnostic information for optimization analysis remarks related to floating-point non-commutativity.
Diagnostic information for optimization analysis remarks.
The optimization diagnostic interface.
LLVM_ABI void emit(DiagnosticInfoOptimizationBase &OptDiag)
Output the remark via the diagnostic handler and to the optimization record file.
Diagnostic information for missed-optimization remarks.
Diagnostic information for applied optimization remarks.
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
op_range incoming_values()
Value * getIncomingValueForBlock(const BasicBlock *BB) const
unsigned getNumIncomingValues() const
Return the number of incoming edges.
An interface layer with SCEV used to manage how we see SCEV expressions for values in the context of ...
ScalarEvolution * getSE() const
Returns the ScalarEvolution analysis used.
LLVM_ABI const SCEVPredicate & getPredicate() const
LLVM_ABI unsigned getSmallConstantMaxTripCount()
Returns the upper bound of the loop trip count as a normal unsigned value, or 0 if the trip count is ...
LLVM_ABI const SCEV * getBackedgeTakenCount()
Get the (predicated) backedge count for the analyzed loop.
LLVM_ABI const SCEV * getSCEV(Value *V)
Returns the SCEV expression of V, in the context of the current SCEV predicate.
A set of analyses that are preserved following a run of a transformation pass.
Definition Analysis.h:112
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition Analysis.h:118
PreservedAnalyses & preserveSet()
Mark an analysis set as preserved.
Definition Analysis.h:151
PreservedAnalyses & preserve()
Mark an analysis as preserved.
Definition Analysis.h:132
An analysis pass based on the new PM to deliver ProfileSummaryInfo.
The RecurrenceDescriptor is used to identify recurrences variables in a loop.
static bool isFMulAddIntrinsic(Instruction *I)
Returns true if the instruction is a call to the llvm.fmuladd intrinsic.
FastMathFlags getFastMathFlags() const
static LLVM_ABI unsigned getOpcode(RecurKind Kind)
Returns the opcode corresponding to the RecurrenceKind.
Type * getRecurrenceType() const
Returns the type of the recurrence.
const SmallPtrSet< Instruction *, 8 > & getCastInsts() const
Returns a reference to the instructions used for type-promoting the recurrence.
static bool isFindLastRecurrenceKind(RecurKind Kind)
Returns true if the recurrence kind is of the form select(cmp(),x,y) where one of (x,...
static bool isAnyOfRecurrenceKind(RecurKind Kind)
Returns true if the recurrence kind is of the form select(cmp(),x,y) where one of (x,...
bool isSigned() const
Returns true if all source operands of the recurrence are SExtInsts.
RecurKind getRecurrenceKind() const
static bool isFindIVRecurrenceKind(RecurKind Kind)
Returns true if the recurrence kind is of the form select(cmp(),x,y) where one of (x,...
static bool isMinMaxRecurrenceKind(RecurKind Kind)
Returns true if the recurrence kind is any min/max kind.
std::optional< ArrayRef< PointerDiffInfo > > getDiffChecks() const
const SmallVectorImpl< RuntimePointerCheck > & getChecks() const
Returns the checks that generateChecks created.
This class uses information about analyze scalars to rewrite expressions in canonical form.
ScalarEvolution * getSE()
bool isInsertedInstruction(Instruction *I) const
Return true if the specified instruction was inserted by the code rewriter.
LLVM_ABI Value * expandCodeForPredicate(const SCEVPredicate *Pred, Instruction *Loc)
Generates a code sequence that evaluates this predicate.
void eraseDeadInstructions(Value *Root)
Remove inserted instructions that are dead, e.g.
virtual bool isAlwaysTrue() const =0
Returns true if the predicate is always true.
This class represents an analyzed expression in the program.
LLVM_ABI bool isZero() const
Return true if the expression is a constant zero.
LLVM_ABI Type * getType() const
Return the LLVM type of this SCEV expression.
Analysis pass that exposes the ScalarEvolution for a function.
The main scalar evolution driver.
LLVM_ABI const SCEV * getURemExpr(SCEVUse LHS, SCEVUse RHS)
Represents an unsigned remainder expression based on unsigned division.
LLVM_ABI const SCEV * getBackedgeTakenCount(const Loop *L, ExitCountKind Kind=Exact)
If the specified loop has a predictable backedge-taken count, return it, otherwise return a SCEVCould...
LLVM_ABI const SCEV * getConstant(ConstantInt *V)
LLVM_ABI const SCEV * getSCEV(Value *V)
Return a SCEV expression for the full generality of the specified expression.
LLVM_ABI const SCEV * getTripCountFromExitCount(const SCEV *ExitCount)
A version of getTripCountFromExitCount below which always picks an evaluation type which can not resu...
const SCEV * getOne(Type *Ty)
Return a SCEV for the constant 1 of a specific type.
LLVM_ABI void forgetLoop(const Loop *L)
This method should be called by the client when it has changed a loop in a way that may effect Scalar...
LLVM_ABI bool isLoopInvariant(const SCEV *S, const Loop *L)
Return true if the value of the given SCEV is unchanging in the specified loop.
LLVM_ABI const SCEV * getElementCount(Type *Ty, ElementCount EC, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap)
ConstantRange getUnsignedRange(const SCEV *S)
Determine the unsigned range for a particular SCEV.
LLVM_ABI void forgetValue(Value *V)
This method should be called by the client when it has changed a value in a way that may effect its v...
LLVM_ABI void forgetBlockAndLoopDispositions(Value *V=nullptr)
Called when the client has changed the disposition of values in a loop or block.
const SCEV * getMinusOne(Type *Ty)
Return a SCEV for the constant -1 of a specific type.
LLVM_ABI void forgetLcssaPhiWithNewPredecessor(Loop *L, PHINode *V)
Forget LCSSA phi node V of loop L to which a new predecessor was added, such that it may no longer be...
LLVM_ABI const SCEV * getMulExpr(SmallVectorImpl< SCEVUse > &Ops, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)
Get a canonical multiply expression, or something simpler if possible.
LLVM_ABI unsigned getSmallConstantTripCount(const Loop *L)
Returns the exact trip count of the loop if we can compute it, and the result is a small constant.
APInt getUnsignedRangeMax(const SCEV *S)
Determine the max of the unsigned range for a particular SCEV.
LLVM_ABI const SCEV * getAddExpr(SmallVectorImpl< SCEVUse > &Ops, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)
Get a canonical add expression, or something simpler if possible.
LLVM_ABI bool isKnownPredicate(CmpPredicate Pred, SCEVUse LHS, SCEVUse RHS)
Test if the given expression is known to satisfy the condition described by Pred, LHS,...
LLVM_ABI const SCEV * applyLoopGuards(const SCEV *Expr, const Loop *L)
Try to apply information from loop guards for L to Expr.
This class represents the LLVM 'select' instruction.
A vector that has set insertion semantics.
Definition SetVector.h:57
size_type size() const
Determine the number of elements in the SetVector.
Definition SetVector.h:103
void insert_range(Range &&R)
Definition SetVector.h:176
size_type count(const_arg_type key) const
Count the number of elements of a given key in the SetVector.
Definition SetVector.h:262
bool insert(const value_type &X)
Insert a new element into the SetVector.
Definition SetVector.h:151
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
bool contains(ConstPtrType Ptr) const
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
A SetVector that performs no allocations if smaller than a certain size.
Definition SetVector.h:339
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
An instruction for storing to memory.
Represent a constant reference to a string, i.e.
Definition StringRef.h:55
Analysis pass providing the TargetTransformInfo.
Analysis pass providing the TargetLibraryInfo.
Provides information about what library functions are available for the current target.
This pass provides access to the codegen interfaces that are needed for IR-level transformations.
VectorInstrContext
Represents a hint about the context in which an insert/extract is used.
@ None
The insert/extract is not used with a load/store.
@ Load
The value being inserted comes from a load (InsertElement only).
@ Store
The extracted value is stored (ExtractElement only).
static LLVM_ABI OperandValueInfo getOperandInfo(const Value *V)
Collect properties of V used in cost analysis, e.g. OP_PowerOf2.
TargetCostKind
The kind of cost model.
@ TCK_RecipThroughput
Reciprocal throughput.
@ TCK_CodeSize
Instruction code size.
@ TCK_SizeAndLatency
The weighted sum of size and latency.
@ TCK_Latency
The latency of instruction.
@ TCC_Free
Expected to fold away in lowering.
LLVM_ABI InstructionCost getInstructionCost(const User *U, ArrayRef< const Value * > Operands, TargetCostKind CostKind) const
Estimate the cost of a given IR user when lowered.
@ SK_Splice
Concatenates elements from the first input vector with elements of the second input vector.
@ SK_Broadcast
Broadcast element 0 to all other elements.
@ SK_Reverse
Reverse the order of the vector.
CastContextHint
Represents a hint about the context in which a cast is used.
@ Reversed
The cast is used with a reversed load/store.
@ Masked
The cast is used with a masked load/store.
@ Normal
The cast is used with a normal load/store.
@ Interleave
The cast is used with an interleaved load/store.
@ GatherScatter
The cast is used with a gather/scatter.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition Twine.h:82
This class implements a switch-like dispatch statement for a value of 'T' using dyn_cast functionalit...
Definition TypeSwitch.h:89
TypeSwitch< T, ResultT > & Case(CallableT &&caseFn)
Add a case on the given type.
Definition TypeSwitch.h:98
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:46
LLVM_ABI unsigned getIntegerBitWidth() const
bool isVectorTy() const
True if this is an instance of VectorType.
Definition Type.h:290
static LLVM_ABI Type * getVoidTy(LLVMContext &C)
Definition Type.cpp:286
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
Definition Type.h:370
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
Definition Type.h:130
LLVM_ABI unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
Definition Type.cpp:236
static LLVM_ABI IntegerType * getInt1Ty(LLVMContext &C)
Definition Type.cpp:310
bool isVoidTy() const
Return true if this is 'void'.
Definition Type.h:141
A Use represents the edge between a Value definition and its users.
Definition Use.h:35
op_range operands()
Definition User.h:267
iterator_range< op_iterator > op_range
Definition User.h:256
LLVM_ABI bool replaceUsesOfWith(Value *From, Value *To)
Replace uses of one Value with another.
Definition User.cpp:25
Value * getOperand(unsigned i) const
Definition User.h:207
static SmallVector< VFInfo, 8 > getMappings(const CallInst &CI)
Retrieve all the VFInfo instances associated to the CallInst CI.
Definition VectorUtils.h:76
Holds state needed to make cost decisions before computing costs per-VF, including the maximum VFs.
std::pair< unsigned, unsigned > getSmallestAndWidestTypes() const
const TTI::TargetCostKind CostKind
The kind of cost that we are calculating.
void collectElementTypesForWidening(const SmallPtrSetImpl< const Value * > *ValuesToIgnore=nullptr)
Collect element types in the loop that need widening.
std::optional< unsigned > getVScaleForTuning() const
VPBasicBlock serves as the leaf of the Hierarchical Control-Flow Graph.
Definition VPlan.h:4153
RecipeListTy::iterator iterator
Instruction iterators...
Definition VPlan.h:4180
iterator end()
Definition VPlan.h:4190
iterator begin()
Recipe iterator methods.
Definition VPlan.h:4188
iterator_range< iterator > phis()
Returns an iterator range over the PHI-like recipes in the block.
Definition VPlan.h:4241
InstructionCost cost(ElementCount VF, VPCostContext &Ctx) override
Return the cost of this VPBasicBlock.
Definition VPlan.cpp:747
iterator getFirstNonPhi()
Return the position of the first non-phi node recipe in the block.
Definition VPlan.cpp:233
const VPRecipeBase & front() const
Definition VPlan.h:4200
VPRecipeBase * getTerminator()
If the block has multiple successors, return the branch recipe terminating the block.
Definition VPlan.cpp:630
bool empty() const
Definition VPlan.h:4199
const VPBasicBlock * getExitingBasicBlock() const
Definition VPlan.cpp:203
void setName(const Twine &newName)
Definition VPlan.h:182
VPlan * getPlan()
Definition VPlan.cpp:178
const VPBasicBlock * getEntryBasicBlock() const
Definition VPlan.cpp:183
VPBlockBase * getSingleSuccessor() const
Definition VPlan.h:230
static void reassociateBlocks(VPBlockBase *Old, VPBlockBase *New)
Reassociate all the blocks connected to Old so that they now point to New.
Definition VPlanUtils.h:262
static auto blocksOnly(T &&Range)
Return an iterator range over Range which only includes BlockTy blocks.
Definition VPlanUtils.h:290
VPlan-based builder utility analogous to IRBuilder.
VPInstruction * createAdd(VPValue *LHS, VPValue *RHS, DebugLoc DL=DebugLoc::getUnknown(), const Twine &Name="", VPRecipeWithIRFlags::WrapFlagsTy WrapFlags={false, false})
T * insert(T *R)
Insert R at the current insertion point. Returns R unchanged.
static VPBuilder getToInsertAfter(VPRecipeBase *R)
Create a VPBuilder to insert after R.
VPPhi * createScalarPhi(ArrayRef< VPValue * > IncomingValues, DebugLoc DL=DebugLoc::getUnknown(), const Twine &Name="", const VPIRFlags &Flags={})
VPInstruction * createNaryOp(unsigned Opcode, ArrayRef< VPValue * > Operands, Instruction *Inst=nullptr, const VPIRFlags &Flags={}, const VPIRMetadata &MD={}, DebugLoc DL=DebugLoc::getUnknown(), const Twine &Name="")
Create an N-ary operation with Opcode, Operands and set Inst as its underlying Instruction.
unsigned getNumDefinedValues() const
Returns the number of values defined by the VPDef.
Definition VPlanValue.h:498
VPValue * getVPSingleValue()
Returns the only VPValue defined by the VPDef.
Definition VPlanValue.h:471
A pure virtual base class for all recipes modeling header phis, including phis for first order recurr...
Definition VPlan.h:2299
virtual VPValue * getBackedgeValue()
Returns the incoming value from the loop backedge.
Definition VPlan.h:2341
VPValue * getStartValue()
Returns the start value of the phi, if one is set.
Definition VPlan.h:2330
A recipe representing a sequence of load -> update -> store as part of a histogram operation.
Definition VPlan.h:2044
A special type of VPBasicBlock that wraps an existing IR basic block.
Definition VPlan.h:4306
Class to record and manage LLVM IR flags.
Definition VPlan.h:691
LLVM_ABI_FOR_TEST FastMathFlags getFastMathFlags() const
This is a concrete Recipe that models a single VPlan-level instruction.
Definition VPlan.h:1226
unsigned getNumOperandsWithoutMask() const
Returns the number of operands, excluding the mask if the VPInstruction is masked.
Definition VPlan.h:1452
iterator_range< operand_iterator > operandsWithoutMask()
Returns an iterator range over the operands excluding the mask operand if present.
Definition VPlan.h:1472
@ ResumeForEpilogue
Explicit user for the resume phi of the canonical induction in the main VPlan, used by the epilogue v...
Definition VPlan.h:1326
@ ReductionStartVector
Start vector for reductions with 3 operands: the original start value, the identity value for the red...
Definition VPlan.h:1317
@ ComputeReductionResult
Reduce the operands to the final reduction result using the operation specified via the operation's V...
Definition VPlan.h:1269
unsigned getOpcode() const
Definition VPlan.h:1401
void setName(StringRef NewName)
Set the symbolic name for the VPInstruction.
Definition VPlan.h:1500
VPValue * getMask() const
Returns the mask for the VPInstruction.
Definition VPlan.h:1466
bool isMasked() const
Returns true if the VPInstruction has a mask operand.
Definition VPlan.h:1442
VPInterleaveRecipe is a recipe for transforming an interleave group of load or stores into one wide l...
Definition VPlan.h:2947
detail::zippy< llvm::detail::zip_first, VPUser::const_operand_range, const_incoming_blocks_range > incoming_values_and_blocks() const
Returns an iterator range over pairs of incoming values and corresponding incoming blocks.
Definition VPlan.h:1629
VPRecipeBase is a base class modeling a sequence of one or more output IR instructions.
Definition VPlan.h:405
DebugLoc getDebugLoc() const
Returns the debug location of the recipe.
Definition VPlan.h:557
void moveBefore(VPBasicBlock &BB, iplist< VPRecipeBase >::iterator I)
Unlink this recipe and insert into BB before I.
void insertBefore(VPRecipeBase *InsertPos)
Insert an unlinked recipe into a basic block immediately before the specified recipe.
iplist< VPRecipeBase >::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
Helper class to create VPRecipies from IR instructions.
VPRecipeBase * tryToCreateWidenNonPhiRecipe(VPSingleDefRecipe *R, VFRange &Range)
Create and return a widened recipe for a non-phi recipe R if one can be created within the given VF R...
VPHistogramRecipe * widenIfHistogram(VPInstruction *VPI)
If VPI represents a histogram operation (as determined by LoopVectorizationLegality) make that safe f...
VPRecipeBase * tryToWidenMemory(VPInstruction *VPI, VFRange &Range)
Check if the load or store instruction VPI should widened for Range.Start and potentially masked.
bool replaceWithFinalIfReductionStore(VPInstruction *VPI, VPBuilder &FinalRedStoresBuilder)
If VPI is a store of a reduction into an invariant address, delete it.
VPReplicateRecipe * handleReplication(VPInstruction *VPI, VFRange &Range)
Build a VPReplicationRecipe for VPI.
bool isOrdered() const
Returns true, if the phi is part of an ordered reduction.
Definition VPlan.h:2746
unsigned getVFScaleFactor() const
Get the factor that the VF of this recipe's output should be scaled by, or 1 if it isn't scaled.
Definition VPlan.h:2725
bool isInLoop() const
Returns true if the phi is part of an in-loop reduction.
Definition VPlan.h:2749
RecurKind getRecurrenceKind() const
Returns the recurrence kind of the reduction.
Definition VPlan.h:2743
A recipe to represent inloop, ordered or partial reduction operations.
Definition VPlan.h:3040
VPRegionBlock represents a collection of VPBasicBlocks and VPRegionBlocks which form a Single-Entry-S...
Definition VPlan.h:4363
const VPBlockBase * getEntry() const
Definition VPlan.h:4407
void clearCanonicalIVNUW(VPInstruction *Increment)
Unsets NUW for the canonical IV increment Increment, for loop regions.
Definition VPlan.h:4491
VPRegionValue * getCanonicalIV()
Return the canonical induction variable of the region, null for replicating regions.
Definition VPlan.h:4475
VPReplicateRecipe replicates a given instruction producing multiple scalar copies of the original sca...
Definition VPlan.h:3194
VPSingleDef is a base class for recipes for modeling a sequence of one or more output IR that define ...
Definition VPlan.h:609
Instruction * getUnderlyingInstr()
Returns the underlying instruction.
Definition VPlan.h:676
An analysis for type-inference for VPValues.
Type * inferScalarType(const VPValue *V)
Infer the type of V. Returns the scalar type of V.
This class augments VPValue with operands which provide the inverse def-use edges from VPValue's user...
Definition VPlanValue.h:329
void setOperand(unsigned I, VPValue *New)
Definition VPlanValue.h:373
operand_iterator op_begin()
Definition VPlanValue.h:393
VPValue * getOperand(unsigned N) const
Definition VPlanValue.h:368
This is the base class of the VPlan Def/Use graph, used for modeling the data flow into,...
Definition VPlanValue.h:49
Value * getLiveInIRValue() const
Return the underlying IR value for a VPIRValue.
Definition VPlan.cpp:138
VPRecipeBase * getDefiningRecipe()
Returns the recipe defining this VPValue or nullptr if it is not defined by a recipe,...
Definition VPlan.cpp:128
Value * getUnderlyingValue() const
Return the underlying Value attached to this VPValue.
Definition VPlanValue.h:74
void replaceAllUsesWith(VPValue *New)
Definition VPlan.cpp:1469
void replaceUsesWithIf(VPValue *New, llvm::function_ref< bool(VPUser &U, unsigned Idx)> ShouldReplace)
Go through the uses list for this VPValue and make each use point to New if the callback ShouldReplac...
Definition VPlan.cpp:1475
user_range users()
Definition VPlanValue.h:155
A recipe to compute a pointer to the last element of each part of a widened memory access for widened...
Definition VPlan.h:2150
A recipe to compute the pointers for widened memory accesses of SourceElementTy.
Definition VPlan.h:2223
VPWidenCastRecipe is a recipe to create vector cast instructions.
Definition VPlan.h:1836
A recipe for handling GEP instructions.
Definition VPlan.h:2086
A recipe for handling phi nodes of integer and floating-point inductions, producing their vector valu...
Definition VPlan.h:2447
A recipe for widened phis.
Definition VPlan.h:2583
VPWidenRecipe is a recipe for producing a widened instruction using the opcode and operands of the re...
Definition VPlan.h:1780
VPlan models a candidate for vectorization, encoding various decisions take to produce efficient outp...
Definition VPlan.h:4511
bool hasVF(ElementCount VF) const
Definition VPlan.h:4725
ElementCount getSingleVF() const
Returns the single VF of the plan, asserting that the plan has exactly one VF.
Definition VPlan.h:4738
VPBasicBlock * getEntry()
Definition VPlan.h:4603
VPValue * getTripCount() const
The trip count of the original loop.
Definition VPlan.h:4662
VPSymbolicValue & getVFxUF()
Returns VF * UF of the vector loop region.
Definition VPlan.h:4701
bool hasUF(unsigned UF) const
Definition VPlan.h:4750
ArrayRef< VPIRBasicBlock * > getExitBlocks() const
Return an ArrayRef containing VPIRBasicBlocks wrapping the exit blocks of the original scalar loop.
Definition VPlan.h:4652
VPIRValue * getOrAddLiveIn(Value *V)
Gets the live-in VPIRValue for V or adds a new live-in (if none exists yet) for V.
Definition VPlan.h:4775
VPIRValue * getZero(Type *Ty)
Return a VPIRValue wrapping the null value of type Ty.
Definition VPlan.h:4801
LLVM_ABI_FOR_TEST VPRegionBlock * getVectorLoopRegion()
Returns the VPRegionBlock of the vector loop.
Definition VPlan.cpp:1066
bool hasEarlyExit() const
Returns true if the VPlan is based on a loop with an early exit.
Definition VPlan.h:4898
InstructionCost cost(ElementCount VF, VPCostContext &Ctx)
Return the cost of this plan.
Definition VPlan.cpp:1048
void resetTripCount(VPValue *NewTripCount)
Resets the trip count for the VPlan.
Definition VPlan.h:4676
VPBasicBlock * getMiddleBlock()
Returns the 'middle' block of the plan, that is the block that selects whether to execute the scalar ...
Definition VPlan.h:4628
VPBasicBlock * getVectorPreheader() const
Returns the preheader of the vector loop region, if one exists, or null otherwise.
Definition VPlan.h:4608
VPSymbolicValue & getUF()
Returns the UF of the vector loop region.
Definition VPlan.h:4698
VPBasicBlock * getScalarPreheader() const
Return the VPBasicBlock for the preheader of the scalar loop.
Definition VPlan.h:4642
void execute(VPTransformState *State)
Generate the IR code for this VPlan.
Definition VPlan.cpp:918
VPIRBasicBlock * getScalarHeader() const
Return the VPIRBasicBlock wrapping the header of the scalar loop.
Definition VPlan.h:4648
VPSymbolicValue & getVF()
Returns the VF of the vector loop region.
Definition VPlan.h:4694
LLVM_ABI_FOR_TEST VPlan * duplicate()
Clone the current VPlan, update all VPValues of the new VPlan and cloned recipes to refer to the clon...
Definition VPlan.cpp:1214
LLVM Value Representation.
Definition Value.h:75
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:255
LLVM_ABI bool hasOneUser() const
Return true if there is exactly one user of this value.
Definition Value.cpp:162
LLVM_ABI void setName(const Twine &Name)
Change the name of the value.
Definition Value.cpp:393
LLVM_ABI void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
Definition Value.cpp:549
iterator_range< user_iterator > users()
Definition Value.h:426
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
Definition Value.cpp:318
static LLVM_ABI VectorType * get(Type *ElementType, ElementCount EC)
This static method is the primary way to construct an VectorType.
std::pair< iterator, bool > insert(const ValueT &V)
Definition DenseSet.h:202
bool contains(const_arg_type_t< ValueT > V) const
Check if the set contains the given element.
Definition DenseSet.h:175
constexpr ScalarTy getFixedValue() const
Definition TypeSize.h:200
static constexpr bool isKnownLE(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
Definition TypeSize.h:230
constexpr bool isNonZero() const
Definition TypeSize.h:155
static constexpr bool isKnownLT(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
Definition TypeSize.h:216
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
Definition TypeSize.h:168
constexpr bool isFixed() const
Returns true if the quantity is not scaled by vscale.
Definition TypeSize.h:171
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
Definition TypeSize.h:165
constexpr bool isZero() const
Definition TypeSize.h:153
static constexpr bool isKnownGT(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
Definition TypeSize.h:223
constexpr LeafTy divideCoefficientBy(ScalarTy RHS) const
We do not provide the '/' operator here because division for polynomial types does not work in the sa...
Definition TypeSize.h:252
static constexpr bool isKnownGE(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
Definition TypeSize.h:237
An efficient, type-erasing, non-owning reference to a callable.
const ParentTy * getParent() const
Definition ilist_node.h:34
self_iterator getIterator()
Definition ilist_node.h:123
IteratorT end() const
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition raw_ostream.h:53
A raw_ostream that writes to an std::string.
Changed
This provides a very simple, boring adaptor for a begin and end iterator into a range type.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
@ Tail
Attemps to make calls as fast as possible while guaranteeing that tail call optimization can always b...
Definition CallingConv.h:76
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
@ BasicBlock
Various leaf nodes.
Definition ISDOpcodes.h:81
std::variant< std::monostate, Loc::Single, Loc::Multi, Loc::MMI, Loc::EntryValue > Variant
Alias for the std::variant specialization base class of DbgVariable.
Definition DwarfDebug.h:190
SpecificConstantMatch m_ZeroInt()
Convenience matchers for specific integer values.
OneUse_match< SubPat > m_OneUse(const SubPat &SP)
match_combine_or< Ty... > m_CombineOr(const Ty &...Ps)
Combine pattern matchers matching any of Ps patterns.
BinaryOp_match< LHS, RHS, Instruction::Add > m_Add(const LHS &L, const RHS &R)
specific_intval< false > m_SpecificInt(const APInt &V)
Match a specific integer value or vector with all elements equal to the value.
bool match(Val *V, const Pattern &P)
match_bind< Instruction > m_Instruction(Instruction *&I)
Match an instruction, capturing it if we match.
specificval_ty m_Specific(const Value *V)
Match if we have a specific specified value.
auto match_fn(const Pattern &P)
A match functor that can be used as a UnaryPredicate in functional algorithms like all_of.
cst_pred_ty< is_one > m_One()
Match an integer 1 or a vector with all elements equal to 1.
auto m_Value()
Match an arbitrary value and ignore it.
BinaryOp_match< LHS, RHS, Instruction::Mul > m_Mul(const LHS &L, const RHS &R)
auto m_LogicalOr()
Matches L || R where L and R are arbitrary values.
match_combine_or< CastInst_match< OpTy, ZExtInst >, CastInst_match< OpTy, SExtInst > > m_ZExtOrSExt(const OpTy &Op)
auto m_LogicalAnd()
Matches L && R where L and R are arbitrary values.
bind_cst_ty m_scev_APInt(const APInt *&C)
Match an SCEV constant and bind it to an APInt.
specificloop_ty m_SpecificLoop(const Loop *L)
cst_pred_ty< is_specific_signed_cst > m_scev_SpecificSInt(int64_t V)
Match an SCEV constant with a plain signed integer (sign-extended value will be matched)
match_bind< const SCEVMulExpr > m_scev_Mul(const SCEVMulExpr *&V)
bool match(const SCEV *S, const Pattern &P)
SCEVAffineAddRec_match< Op0_t, Op1_t, match_isa< const Loop > > m_scev_AffineAddRec(const Op0_t &Op0, const Op1_t &Op1)
SCEVBinaryExpr_match< SCEVMulExpr, Op0_t, Op1_t, SCEV::FlagAnyWrap, true > m_scev_c_Mul(const Op0_t &Op0, const Op1_t &Op1)
int_pred_ty< is_zero_int > m_ZeroInt()
Match an integer 0 or a vector with all elements equal to 0.
bool matchFindIVResult(VPInstruction *VPI, Op0_t ReducedIV, Op1_t Start)
Match FindIV result pattern: select(icmp ne ComputeReductionResult(ReducedIV), Sentinel),...
VPInstruction_match< VPInstruction::ExtractLastLane, Op0_t > m_ExtractLastLane(const Op0_t &Op0)
VPInstruction_match< VPInstruction::BranchOnCount > m_BranchOnCount()
auto m_VPValue()
Match an arbitrary VPValue and ignore it.
VPInstruction_match< VPInstruction::ExtractLastPart, Op0_t > m_ExtractLastPart(const Op0_t &Op0)
bool match(Val *V, const Pattern &P)
VPInstruction_match< VPInstruction::ExtractLane, Op0_t, Op1_t > m_ExtractLane(const Op0_t &Op0, const Op1_t &Op1)
ValuesClass values(OptsTy... Options)
Helper to build a ValuesClass by forwarding a variable number of arguments as an initializer list to ...
initializer< Ty > init(const Ty &Val)
Add a small namespace to avoid name clashes with the classes used in the streaming interface.
DiagnosticInfoOptimizationBase::Argument NV
NodeAddr< InstrNode * > Instr
Definition RDFGraph.h:389
friend class Instruction
Iterator for Instructions in a `BasicBlock.
Definition BasicBlock.h:73
VPValue * getOrCreateVPValueForSCEVExpr(VPlan &Plan, const SCEV *Expr)
Get or create a VPValue that corresponds to the expansion of Expr.
VPBasicBlock * getFirstLoopHeader(VPlan &Plan, VPDominatorTree &VPDT)
Returns the header block of the first, top-level loop, or null if none exist.
bool isAddressSCEVForCost(const SCEV *Addr, ScalarEvolution &SE, const Loop *L)
Returns true if Addr is an address SCEV that can be passed to TTI::getAddressComputationCost,...
VPInstruction * findCanonicalIVIncrement(VPlan &Plan)
Find the canonical IV increment of Plan's vector loop region.
VPRecipeBase * findRecipe(VPValue *Start, PredT Pred)
Search Start's users for a recipe satisfying Pred, looking through recipes with definitions.
Definition VPlanUtils.h:115
VPSingleDefRecipe * findHeaderMask(VPlan &Plan)
Collect the header mask with the pattern: (ICMP_ULE, WideCanonicalIV, backedge-taken-count) TODO: Int...
GEPNoWrapFlags getGEPFlagsForPtr(VPValue *Ptr)
Returns the GEP nowrap flags for Ptr, looking through pointer casts mirroring Value::stripPointerCast...
const SCEV * getSCEVExprForVPValue(const VPValue *V, PredicatedScalarEvolution &PSE, const Loop *L=nullptr)
Return the SCEV expression for V.
This is an optimization pass for GlobalISel generic memory operations.
LLVM_ABI bool simplifyLoop(Loop *L, DominatorTree *DT, LoopInfo *LI, ScalarEvolution *SE, AssumptionCache *AC, MemorySSAUpdater *MSSAU, bool PreserveLCSSA)
Simplify each loop in a loop nest recursively.
void reportVectorizationInfo(const StringRef Msg, const StringRef ORETag, OptimizationRemarkEmitter *ORE, const Loop *TheLoop, Instruction *I=nullptr, DebugLoc DL={})
Reports an informative message: print Msg for debugging purposes as well as an optimization remark.
detail::zippy< detail::zip_shortest, T, U, Args... > zip(T &&t, U &&u, Args &&...args)
zip iterator for two or more iteratable types.
Definition STLExtras.h:830
constexpr auto not_equal_to(T &&Arg)
Functor variant of std::not_equal_to that can be used as a UnaryPredicate in functional algorithms li...
Definition STLExtras.h:2179
FunctionAddr VTableAddr Value
Definition InstrProf.h:137
LLVM_ABI Value * addRuntimeChecks(Instruction *Loc, Loop *TheLoop, const SmallVectorImpl< RuntimePointerCheck > &PointerChecks, SCEVExpander &Expander, bool HoistRuntimeChecks=false)
Add code that checks at runtime if the accessed arrays in PointerChecks overlap.
auto cast_if_present(const Y &Val)
cast_if_present<X> - Functionally identical to cast, except that a null value is accepted.
Definition Casting.h:683
LLVM_ABI bool RemoveRedundantDbgInstrs(BasicBlock *BB)
Try to remove redundant dbg.value instructions from given basic block.
LLVM_ABI_FOR_TEST cl::opt< bool > VerifyEachVPlan
LLVM_ABI std::optional< unsigned > getLoopEstimatedTripCount(Loop *L, unsigned *EstimatedLoopInvocationWeight=nullptr)
Return either:
static void reportVectorization(OptimizationRemarkEmitter *ORE, Loop *TheLoop, VectorizationFactor VF, unsigned IC)
Report successful vectorization of the loop.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1738
unsigned getLoadStoreAddressSpace(const Value *I)
A helper function that returns the address space of the pointer operand of load or store instruction.
LLVM_ABI Intrinsic::ID getMinMaxReductionIntrinsicOp(Intrinsic::ID RdxID)
Returns the min/max intrinsic used when expanding a min/max reduction.
LLVM_ABI Intrinsic::ID getVectorIntrinsicIDForCall(const CallInst *CI, const TargetLibraryInfo *TLI)
Returns intrinsic ID for call.
InstructionCost Cost
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
LLVM_ABI bool verifyFunction(const Function &F, raw_ostream *OS=nullptr)
Check a function for errors, useful for use when debugging a pass.
const Value * getLoadStorePointerOperand(const Value *V)
A helper function that returns the pointer operand of a load or store instruction.
OuterAnalysisManagerProxy< ModuleAnalysisManager, Function > ModuleAnalysisManagerFunctionProxy
Provide the ModuleAnalysisManager to Function proxy.
Value * getRuntimeVF(IRBuilderBase &B, Type *Ty, ElementCount VF)
Return the runtime value for VF.
LLVM_ABI bool formLCSSARecursively(Loop &L, const DominatorTree &DT, const LoopInfo *LI, ScalarEvolution *SE)
Put a loop nest into LCSSA form.
Definition LCSSA.cpp:449
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
Definition STLExtras.h:2207
LLVM_ABI bool shouldOptimizeForSize(const MachineFunction *MF, ProfileSummaryInfo *PSI, const MachineBlockFrequencyInfo *BFI, PGSOQueryType QueryType=PGSOQueryType::Other)
Returns true if machine function MF is suggested to be size-optimized based on the profile.
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
Definition STLExtras.h:633
Align getLoadStoreAlignment(const Value *I)
A helper function that returns the alignment of load or store instruction.
iterator_range< df_iterator< VPBlockShallowTraversalWrapper< VPBlockBase * > > > vp_depth_first_shallow(VPBlockBase *G)
Returns an iterator range to traverse the graph starting at G in depth-first order.
Definition VPlanCFG.h:253
LLVM_ABI bool VerifySCEV
LLVM_ABI_FOR_TEST cl::opt< bool > VPlanPrintAfterAll
LLVM_ABI bool isSafeToSpeculativelyExecute(const Instruction *I, const Instruction *CtxI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr, const TargetLibraryInfo *TLI=nullptr, bool UseVariableInfo=true, bool IgnoreUBImplyingAttrs=true)
Return true if the instruction does not have any effects besides calculating the result and does not ...
bool isa_and_nonnull(const Y &Val)
Definition Casting.h:676
iterator_range< df_iterator< VPBlockDeepTraversalWrapper< VPBlockBase * > > > vp_depth_first_deep(VPBlockBase *G)
Returns an iterator range to traverse the graph starting at G in depth-first order while traversing t...
Definition VPlanCFG.h:265
SmallVector< VPRegisterUsage, 8 > calculateRegisterUsageForPlan(VPlan &Plan, ArrayRef< ElementCount > VFs, const TargetTransformInfo &TTI, const SmallPtrSetImpl< const Value * > &ValuesToIgnore)
Estimate the register usage for Plan and vectorization factors in VFs by calculating the highest numb...
auto map_range(ContainerTy &&C, FuncTy F)
Return a range that applies F to the elements of C.
Definition STLExtras.h:365
constexpr auto bind_front(FnT &&Fn, BindArgsT &&...BindArgs)
C++20 bind_front.
auto dyn_cast_or_null(const Y &Val)
Definition Casting.h:753
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1745
void collectEphemeralRecipesForVPlan(VPlan &Plan, DenseSet< VPRecipeBase * > &EphRecipes)
auto reverse(ContainerTy &&C)
Definition STLExtras.h:407
bool containsIrreducibleCFG(RPOTraversalT &RPOTraversal, const LoopInfoT &LI)
Return true if the control flow in RPOTraversal is irreducible.
Definition CFG.h:154
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
Definition MathExtras.h:279
void sort(IteratorTy Start, IteratorTy End)
Definition STLExtras.h:1635
LLVM_ABI_FOR_TEST cl::opt< bool > EnableWideActiveLaneMask
UncountableExitStyle
Different methods of handling early exits.
Definition VPlan.h:82
@ ReadOnly
No side effects to worry about, so we can process any uncountable exits in the loop and branch either...
Definition VPlan.h:87
@ MaskedHandleExitInScalarLoop
All memory operations other than the load(s) required to determine whether an uncountable exit occurr...
Definition VPlan.h:92
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:207
bool none_of(R &&Range, UnaryPredicate P)
Provide wrappers to std::none_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1752
LLVM_ABI cl::opt< bool > EnableLoopVectorization
constexpr uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
Definition Alignment.h:144
LLVM_ABI_FOR_TEST cl::list< std::string > VPlanPrintAfterPasses
LLVM_ABI bool wouldInstructionBeTriviallyDead(const Instruction *I, const TargetLibraryInfo *TLI=nullptr)
Return true if the result produced by the instruction would have no side effects if it was not used.
Definition Local.cpp:422
SmallVector< ValueTypeFromRangeType< R >, Size > to_vector(R &&Range)
Given a range of type R, iterate the entire range and return a SmallVector with elements of the vecto...
Type * toVectorizedTy(Type *Ty, ElementCount EC)
A helper for converting to vectorized types.
LLVM_ABI void llvm_unreachable_internal(const char *msg=nullptr, const char *file=nullptr, unsigned line=0)
This function calls abort(), and prints the optional message to stderr.
T * find_singleton(R &&Range, Predicate P, bool AllowRepeats=false)
Return the single value in Range that satisfies P(<member of Range> *, AllowRepeats)->T * returning n...
Definition STLExtras.h:1836
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
std::optional< unsigned > getMaxVScale(const Function &F, const TargetTransformInfo &TTI)
cl::opt< unsigned > ForceTargetInstructionCost
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
format_object< Ts... > format(const char *Fmt, const Ts &... Vals)
These are helper functions used to produce formatted output.
Definition Format.h:129
constexpr T divideCeil(U Numerator, V Denominator)
Returns the integer ceil(Numerator / Denominator).
Definition MathExtras.h:394
bool canVectorizeTy(Type *Ty)
Returns true if Ty is a valid vector element type, void, or an unpacked literal struct where all elem...
TargetTransformInfo TTI
@ CM_EpilogueNotAllowedLowTripLoop
@ CM_EpilogueNotNeededFoldTail
@ CM_EpilogueNotAllowedFoldTail
@ CM_EpilogueNotAllowedOptSize
@ CM_EpilogueAllowed
LLVM_ABI bool isAssignmentTrackingEnabled(const Module &M)
Return true if assignment tracking is enabled for module M.
RecurKind
These are the kinds of recurrences that we support.
@ Or
Bitwise or logical OR of integers.
@ FMulAdd
Sum of float products with llvm.fmuladd(a * b + sum).
@ Sub
Subtraction of integers.
@ Add
Sum of integers.
LLVM_ABI Value * getRecurrenceIdentity(RecurKind K, Type *Tp, FastMathFlags FMF)
Given information about an recurrence kind, return the identity for the @llvm.vector....
LLVM_ABI BasicBlock * SplitBlock(BasicBlock *Old, BasicBlock::iterator SplitPt, DominatorTree *DT, LoopInfo *LI=nullptr, MemorySSAUpdater *MSSAU=nullptr, const Twine &BBName="")
Split the specified block at the specified instruction.
LLVM_ABI void reportVectorizationFailure(const StringRef DebugMsg, const StringRef OREMsg, const StringRef ORETag, OptimizationRemarkEmitter *ORE, Loop *TheLoop, Instruction *I=nullptr)
Reports a vectorization failure: print DebugMsg for debugging purposes along with the corresponding o...
DWARFExpression::Operation Op
LLVM_ABI bool isGuaranteedNotToBeUndefOrPoison(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Return true if this function can prove that V does not have undef bits and is never poison.
ArrayRef(const T &OneElt) -> ArrayRef< T >
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1771
auto predecessors(const MachineBasicBlock *BB)
iterator_range< pointer_iterator< WrappedIteratorT > > make_pointer_range(RangeT &&Range)
Definition iterator.h:368
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition STLExtras.h:1946
cl::opt< bool > EnableVPlanNativePath
Type * getLoadStoreType(const Value *I)
A helper function that returns the type of a load or store instruction.
ArrayRef< Type * > getContainedTypes(Type *const &Ty)
Returns the types contained in Ty.
LLVM_ABI Value * addDiffRuntimeChecks(Instruction *Loc, ArrayRef< PointerDiffInfo > Checks, SCEVExpander &Expander, function_ref< Value *(IRBuilderBase &, unsigned)> GetVF, unsigned IC)
bool pred_empty(const BasicBlock *BB)
Definition CFG.h:119
@ None
Don't use tail folding.
@ DataWithEVL
Use predicated EVL instructions for tail-folding.
@ DataAndControlFlow
Use predicate to control both data and control flow.
@ DataWithoutLaneMask
Same as Data, but avoids using the get.active.lane.mask intrinsic to calculate the mask and instead i...
@ Data
Use predicate only to mask operations on data in the loop.
AnalysisManager< Function > FunctionAnalysisManager
Convenience typedef for the Function analysis manager.
LLVM_ABI bool hasBranchWeightMD(const Instruction &I)
Checks if an instructions has Branch Weight Metadata.
hash_code hash_combine(const Ts &...args)
Combine values into a single hash_code.
Definition Hashing.h:592
@ Increment
Incrementally increasing token ID.
Definition AllocToken.h:26
T bit_floor(T Value)
Returns the largest integral power of two no greater than Value if Value is nonzero.
Definition bit.h:347
Type * toVectorTy(Type *Scalar, ElementCount EC)
A helper function for converting Scalar types to vector types.
std::unique_ptr< VPlan > VPlanPtr
Definition VPlan.h:77
constexpr detail::IsaCheckPredicate< Types... > IsaPred
Function object wrapper for the llvm::isa type check.
Definition Casting.h:866
LLVM_ABI_FOR_TEST bool verifyVPlanIsValid(const VPlan &Plan)
Verify invariants for general VPlans.
hash_code hash_combine_range(InputIteratorT first, InputIteratorT last)
Compute a hash_code for a sequence of values.
Definition Hashing.h:466
LLVM_ABI_FOR_TEST cl::opt< bool > VPlanPrintVectorRegionScope
LLVM_ABI cl::opt< bool > EnableLoopInterleaving
#define N
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition Alignment.h:39
A special type used by analysis passes to provide an address that identifies that particular analysis...
Definition Analysis.h:29
static LLVM_ABI void collectEphemeralValues(const Loop *L, AssumptionCache *AC, SmallPtrSetImpl< const Value * > &EphValues)
Collect a loop's ephemeral values (those used only by an assume or similar intrinsics in the loop).
An information struct used to provide DenseMap with the various necessary components for a given valu...
Encapsulate information regarding vectorization of a loop and its epilogue.
EpilogueLoopVectorizationInfo(ElementCount MVF, unsigned MUF, ElementCount EVF, unsigned EUF, VPlan &EpiloguePlan)
A class that represents two vectorization factors (initialized with 0 by default).
static FixedScalableVFPair getNone()
This holds details about a histogram operation – a load -> update -> store sequence where each lane i...
TargetLibraryInfo * TLI
LLVM_ABI LoopVectorizeResult runImpl(Function &F)
LLVM_ABI bool processLoop(Loop *L)
ProfileSummaryInfo * PSI
LoopAccessInfoManager * LAIs
LLVM_ABI void printPipeline(raw_ostream &OS, function_ref< StringRef(StringRef)> MapClassName2PassName)
LLVM_ABI LoopVectorizePass(LoopVectorizeOptions Opts={})
ScalarEvolution * SE
AssumptionCache * AC
LLVM_ABI PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM)
OptimizationRemarkEmitter * ORE
std::function< BlockFrequencyInfo &()> GetBFI
TargetTransformInfo * TTI
Storage for information about made changes.
A CRTP mix-in to automatically provide informational APIs needed for passes.
Definition PassManager.h:89
A marker analysis to determine if extra passes should be run after loop vectorization.
static LLVM_ABI AnalysisKey Key
Holds the VFShape for a specific scalar to vector function mapping.
std::optional< unsigned > getParamIndexForOptionalMask() const
Instruction Set Architecture.
Encapsulates information needed to describe a parameter.
A range of powers-of-2 vectorization factors with fixed start and adjustable end.
ElementCount End
Struct to hold various analysis needed for cost computations.
LoopVectorizationCostModel & CM
static bool isFreeScalarIntrinsic(Intrinsic::ID ID)
Returns true if ID is a pseudo intrinsic that is dropped via scalarization rather than widened.
Definition VPlan.cpp:1957
bool skipCostComputation(Instruction *UI, bool IsVector) const
Return true if the cost for UI shouldn't be computed, e.g.
InstructionCost getLegacyCost(Instruction *UI, ElementCount VF) const
Return the cost for UI with VF using the legacy cost model as fallback until computing the cost of al...
uint64_t getPredBlockCostDivisor(BasicBlock *BB) const
TargetTransformInfo::TargetCostKind CostKind
SmallPtrSet< Instruction *, 8 > SkipCostComputation
A VPValue representing a live-in from the input IR or a constant.
Definition VPlanValue.h:240
A struct that represents some properties of the register usage of a loop.
InstructionCost spillCost(const TargetTransformInfo &TTI, TargetTransformInfo::TargetCostKind CostKind, unsigned OverrideMaxNumRegs=0) const
Calculate the estimated cost of any spills due to using more registers than the number available for ...
VPTransformState holds information passed down when "executing" a VPlan, needed for generating the ou...
A recipe for widening load operations, using the address to load from and an optional mask.
Definition VPlan.h:3579
A recipe for widening store operations, using the stored value, the address to store to and an option...
Definition VPlan.h:3660
static LLVM_ABI_FOR_TEST bool tryToConvertVPInstructionsToVPRecipes(VPlan &Plan, const TargetLibraryInfo &TLI)
Replaces the VPInstructions in Plan with corresponding widen recipes.
static void makeMemOpWideningDecisions(VPlan &Plan, VFRange &Range, VPRecipeBuilder &RecipeBuilder)
Convert load/store VPInstructions in Plan into widened or replicate recipes.
static void addMinimumIterationCheck(VPlan &Plan, ElementCount VF, unsigned UF, ElementCount MinProfitableTripCount, bool RequiresScalarEpilogue, bool TailFolded, Loop *OrigLoop, const uint32_t *MinItersBypassWeights, DebugLoc DL, PredicatedScalarEvolution &PSE, VPBasicBlock *CheckBlock=nullptr)
static bool createHeaderPhiRecipes(VPlan &Plan, PredicatedScalarEvolution &PSE, Loop &OrigLoop, const MapVector< PHINode *, InductionDescriptor > &Inductions, const MapVector< PHINode *, RecurrenceDescriptor > &Reductions, const SmallPtrSetImpl< const PHINode * > &FixedOrderRecurrences, const SmallPtrSetImpl< PHINode * > &InLoopReductions, bool AllowReordering)
Replace VPPhi recipes in Plan's header with corresponding VPHeaderPHIRecipe subclasses for inductions...
static void materializeBroadcasts(VPlan &Plan)
Add explicit broadcasts for live-ins and VPValues defined in Plan's entry block if they are used as v...
static void materializePacksAndUnpacks(VPlan &Plan)
Add explicit Build[Struct]Vector recipes to Pack multiple scalar values into vectors and Unpack recip...
static void createInterleaveGroups(VPlan &Plan, const SmallPtrSetImpl< const InterleaveGroup< Instruction > * > &InterleaveGroups, const bool &EpilogueAllowed)
static LLVM_ABI_FOR_TEST std::unique_ptr< VPlan > buildVPlan0(Loop *TheLoop, LoopInfo &LI, Type *InductionTy, DebugLoc IVDL, PredicatedScalarEvolution &PSE, LoopVersioning *LVer=nullptr)
Create a base VPlan0, serving as the common starting point for all later candidates.
static bool simplifyKnownEVL(VPlan &Plan, ElementCount VF, PredicatedScalarEvolution &PSE)
Try to simplify VPInstruction::ExplicitVectorLength recipes when the AVL is known to be <= VF,...
static void removeBranchOnConst(VPlan &Plan, bool OnlyLatches=false)
Remove BranchOnCond recipes with true or false conditions together with removing dead edges to their ...
static void introduceMasksAndLinearize(VPlan &Plan)
Predicate and linearize the control-flow in the only loop region of Plan.
static void materializeFactors(VPlan &Plan, VPBasicBlock *VectorPH, ElementCount VF)
Materialize UF, VF and VFxUF to be computed explicitly using VPInstructions.
static void foldTailByMasking(VPlan &Plan)
Adapts the vector loop region for tail folding by introducing a header mask and conditionally executi...
static void materializeBackedgeTakenCount(VPlan &Plan, VPBasicBlock *VectorPH)
Materialize the backedge-taken count to be computed explicitly using VPInstructions.
static void addMinimumVectorEpilogueIterationCheck(VPlan &Plan, Value *VectorTripCount, bool RequiresScalarEpilogue, ElementCount EpilogueVF, unsigned EpilogueUF, unsigned MainLoopStep, unsigned EpilogueLoopStep, ScalarEvolution &SE)
Add a check to Plan to see if the epilogue vector loop should be executed.
static void addActiveLaneMask(VPlan &Plan, bool UseActiveLaneMaskForControlFlow)
Replace (ICMP_ULE, wide canonical IV, backedge-taken-count) checks with an (active-lane-mask recipe,...
static bool handleMultiUseReductions(VPlan &Plan, OptimizationRemarkEmitter *ORE, Loop *TheLoop)
Try to legalize reductions with multiple in-loop uses.
static void dropPoisonGeneratingRecipes(VPlan &Plan, const std::function< bool(BasicBlock *)> &BlockNeedsPredication)
Drop poison flags from recipes that may generate a poison value that is used after vectorization,...
static void replaceWideCanonicalIVWithWideIV(VPlan &Plan, ScalarEvolution &SE, const TargetTransformInfo &TTI, TargetTransformInfo::TargetCostKind CostKind, ElementCount VF, unsigned UF, const SmallPtrSetImpl< const Value * > &ValuesToIgnore)
Replace a VPWidenCanonicalIVRecipe if it is present in Plan, with a VPWidenIntOrFpInductionRecipe,...
static void convertToVariableLengthStep(VPlan &Plan)
Transform loops with variable-length stepping after region dissolution.
static void addBranchWeightToMiddleTerminator(VPlan &Plan, ElementCount VF, std::optional< unsigned > VScaleForTuning)
Add branch weight metadata, if the Plan's middle block is terminated by a BranchOnCond recipe.
static std::unique_ptr< VPlan > narrowInterleaveGroups(VPlan &Plan, const TargetTransformInfo &TTI)
Try to find a single VF among Plan's VFs for which all interleave groups (with known minimum VF eleme...
static bool handleFindLastReductions(VPlan &Plan)
Check if Plan contains any FindLast reductions.
static void createInLoopReductionRecipes(VPlan &Plan, ElementCount MinVF)
Create VPReductionRecipes for in-loop reductions.
static void unrollByUF(VPlan &Plan, unsigned UF)
Explicitly unroll Plan by UF.
static DenseMap< const SCEV *, Value * > expandSCEVs(VPlan &Plan, ScalarEvolution &SE)
Expand VPExpandSCEVRecipes in Plan's entry block.
static void convertToConcreteRecipes(VPlan &Plan)
Lower abstract recipes to concrete ones, that can be codegen'd.
static void expandBranchOnTwoConds(VPlan &Plan)
Expand BranchOnTwoConds instructions into explicit CFG with BranchOnCond instructions.
static void materializeVectorTripCount(VPlan &Plan, VPBasicBlock *VectorPHVPBB, bool TailByMasking, bool RequiresScalarEpilogue, VPValue *Step, std::optional< uint64_t > MaxRuntimeStep=std::nullopt)
Materialize vector trip count computations to a set of VPInstructions.
static void hoistPredicatedLoads(VPlan &Plan, PredicatedScalarEvolution &PSE, const Loop *L)
Hoist predicated loads from the same address to the loop entry block, if they are guaranteed to execu...
static void optimizeFindIVReductions(VPlan &Plan, PredicatedScalarEvolution &PSE, Loop &L)
Optimize FindLast reductions selecting IVs (or expressions of IVs) by converting them to FindIV reduc...
static void convertToAbstractRecipes(VPlan &Plan, VPCostContext &Ctx, VFRange &Range)
This function converts initial recipes to the abstract recipes and clamps Range based on cost model f...
static void materializeConstantVectorTripCount(VPlan &Plan, ElementCount BestVF, unsigned BestUF, PredicatedScalarEvolution &PSE)
static void makeScalarizationDecisions(VPlan &Plan, VFRange &Range)
Make VPlan-based scalarization decision prior to delegating to the ones made by the legacy CM.
static void addExplicitVectorLength(VPlan &Plan, const std::optional< unsigned > &MaxEVLSafeElements)
Add a VPCurrentIterationPHIRecipe and related recipes to Plan and replaces all uses of the canonical ...
static void adjustFirstOrderRecurrenceMiddleUsers(VPlan &Plan, VFRange &Range)
Adjust first-order recurrence users in the middle block: create penultimate element extracts for LCSS...
static void optimizeEVLMasks(VPlan &Plan)
Optimize recipes which use an EVL-based header mask to VP intrinsics, for example:
static LLVM_ABI_FOR_TEST bool handleEarlyExits(VPlan &Plan, UncountableExitStyle Style, Loop *TheLoop, PredicatedScalarEvolution &PSE, DominatorTree &DT, AssumptionCache *AC)
Update Plan to account for all early exits.
static void replaceSymbolicStrides(VPlan &Plan, PredicatedScalarEvolution &PSE, const DenseMap< Value *, const SCEV * > &StridesMap)
Replace symbolic strides from StridesMap in Plan with constants when possible.
static bool handleMaxMinNumReductions(VPlan &Plan)
Check if Plan contains any FMaxNum or FMinNum reductions.
static LLVM_ABI_FOR_TEST void createLoopRegions(VPlan &Plan)
Replace loops in Plan's flat CFG with VPRegionBlocks, turning Plan's flat CFG into a hierarchical CFG...
static void removeDeadRecipes(VPlan &Plan)
Remove dead recipes from Plan.
static void attachCheckBlock(VPlan &Plan, Value *Cond, BasicBlock *CheckBlock, bool AddBranchWeights)
Wrap runtime check block CheckBlock in a VPIRBB and Cond in a VPValue and connect the block to Plan,...
static void simplifyRecipes(VPlan &Plan)
Perform instcombine-like simplifications on recipes in Plan.
static void sinkPredicatedStores(VPlan &Plan, PredicatedScalarEvolution &PSE, const Loop *L)
Sink predicated stores to the same address with complementary predicates (P and NOT P) to an uncondit...
static void replicateByVF(VPlan &Plan, ElementCount VF)
Replace replicating VPReplicateRecipe, VPScalarIVStepsRecipe and VPInstruction in Plan with VF single...
static void addIterationCountCheckBlock(VPlan &Plan, ElementCount VF, unsigned UF, bool RequiresScalarEpilogue, Loop *OrigLoop, const uint32_t *MinItersBypassWeights, DebugLoc DL, PredicatedScalarEvolution &PSE)
Add a new check block before the vector preheader to Plan to check if the main vector loop should be ...
static void clearReductionWrapFlags(VPlan &Plan)
Clear NSW/NUW flags from reduction instructions if necessary.
static void optimizeInductionLiveOutUsers(VPlan &Plan, PredicatedScalarEvolution &PSE, bool FoldTail)
If there's a single exit block, optimize its phi recipes that use exiting IV values by feeding them p...
static void createPartialReductions(VPlan &Plan, VPCostContext &CostCtx, VFRange &Range)
Detect and create partial reduction recipes for scaled reductions in Plan.
static void cse(VPlan &Plan)
Perform common-subexpression-elimination on Plan.
static LLVM_ABI_FOR_TEST void optimize(VPlan &Plan)
Apply VPlan-to-VPlan optimizations to Plan, including induction recipe optimizations,...
static void dissolveLoopRegions(VPlan &Plan)
Replace loop regions with explicit CFG.
static void truncateToMinimalBitwidths(VPlan &Plan, const MapVector< Instruction *, uint64_t > &MinBWs)
Insert truncates and extends for any truncated recipe.
static void optimizeForVFAndUF(VPlan &Plan, ElementCount BestVF, unsigned BestUF, PredicatedScalarEvolution &PSE)
Optimize Plan based on BestVF and BestUF.
static void convertEVLExitCond(VPlan &Plan)
Replaces the exit condition from (branch-on-cond eq CanonicalIVInc, VectorTripCount) to (branch-on-co...
static LLVM_ABI_FOR_TEST void addMiddleCheck(VPlan &Plan, bool TailFolded)
If a check is needed to guard executing the scalar epilogue loop, it will be added to the middle bloc...
TODO: The following VectorizationFactor was pulled out of LoopVectorizationCostModel class.
InstructionCost Cost
Cost of the loop with that width.
ElementCount MinProfitableTripCount
The minimum trip count required to make vectorization profitable, e.g.
ElementCount Width
Vector width with best cost.
InstructionCost ScalarCost
Cost of the scalar loop.
static VectorizationFactor Disabled()
Width 1 means no vectorization, cost 0 means uncomputed cost.
static LLVM_ABI bool HoistRuntimeChecks