LLVM 23.0.0git
LoopVectorize.cpp
Go to the documentation of this file.
1//===- LoopVectorize.cpp - A Loop Vectorizer ------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This is the LLVM loop vectorizer. This pass modifies 'vectorizable' loops
10// and generates target-independent LLVM-IR.
11// The vectorizer uses the TargetTransformInfo analysis to estimate the costs
12// of instructions in order to estimate the profitability of vectorization.
13//
14// The loop vectorizer combines consecutive loop iterations into a single
15// 'wide' iteration. After this transformation the index is incremented
16// by the SIMD vector width, and not by one.
17//
18// This pass has three parts:
19// 1. The main loop pass that drives the different parts.
20// 2. LoopVectorizationLegality - A unit that checks for the legality
21// of the vectorization.
22// 3. InnerLoopVectorizer - A unit that performs the actual
23// widening of instructions.
24// 4. LoopVectorizationCostModel - A unit that checks for the profitability
25// of vectorization. It decides on the optimal vector width, which
26// can be one, if vectorization is not profitable.
27//
28// There is a development effort going on to migrate loop vectorizer to the
29// VPlan infrastructure and to introduce outer loop vectorization support (see
30// docs/VectorizationPlan.rst and
31// http://lists.llvm.org/pipermail/llvm-dev/2017-December/119523.html). For this
32// purpose, we temporarily introduced the VPlan-native vectorization path: an
33// alternative vectorization path that is natively implemented on top of the
34// VPlan infrastructure. See EnableVPlanNativePath for enabling.
35//
36//===----------------------------------------------------------------------===//
37//
38// The reduction-variable vectorization is based on the paper:
39// D. Nuzman and R. Henderson. Multi-platform Auto-vectorization.
40//
41// Variable uniformity checks are inspired by:
42// Karrenberg, R. and Hack, S. Whole Function Vectorization.
43//
44// The interleaved access vectorization is based on the paper:
45// Dorit Nuzman, Ira Rosen and Ayal Zaks. Auto-Vectorization of Interleaved
46// Data for SIMD
47//
48// Other ideas/concepts are from:
49// A. Zaks and D. Nuzman. Autovectorization in GCC-two years later.
50//
51// S. Maleki, Y. Gao, M. Garzaran, T. Wong and D. Padua. An Evaluation of
52// Vectorizing Compilers.
53//
54//===----------------------------------------------------------------------===//
55
58#include "VPRecipeBuilder.h"
59#include "VPlan.h"
60#include "VPlanAnalysis.h"
61#include "VPlanCFG.h"
62#include "VPlanHelpers.h"
63#include "VPlanPatternMatch.h"
64#include "VPlanTransforms.h"
65#include "VPlanUtils.h"
66#include "VPlanVerifier.h"
67#include "llvm/ADT/APInt.h"
68#include "llvm/ADT/ArrayRef.h"
69#include "llvm/ADT/DenseMap.h"
71#include "llvm/ADT/Hashing.h"
72#include "llvm/ADT/MapVector.h"
73#include "llvm/ADT/STLExtras.h"
76#include "llvm/ADT/Statistic.h"
77#include "llvm/ADT/StringRef.h"
78#include "llvm/ADT/Twine.h"
79#include "llvm/ADT/TypeSwitch.h"
84#include "llvm/Analysis/CFG.h"
101#include "llvm/IR/Attributes.h"
102#include "llvm/IR/BasicBlock.h"
103#include "llvm/IR/CFG.h"
104#include "llvm/IR/Constant.h"
105#include "llvm/IR/Constants.h"
106#include "llvm/IR/DataLayout.h"
107#include "llvm/IR/DebugInfo.h"
108#include "llvm/IR/DebugLoc.h"
109#include "llvm/IR/DerivedTypes.h"
111#include "llvm/IR/Dominators.h"
112#include "llvm/IR/Function.h"
113#include "llvm/IR/IRBuilder.h"
114#include "llvm/IR/InstrTypes.h"
115#include "llvm/IR/Instruction.h"
116#include "llvm/IR/Instructions.h"
118#include "llvm/IR/Intrinsics.h"
119#include "llvm/IR/MDBuilder.h"
120#include "llvm/IR/Metadata.h"
121#include "llvm/IR/Module.h"
122#include "llvm/IR/Operator.h"
123#include "llvm/IR/PatternMatch.h"
125#include "llvm/IR/Type.h"
126#include "llvm/IR/Use.h"
127#include "llvm/IR/User.h"
128#include "llvm/IR/Value.h"
129#include "llvm/IR/Verifier.h"
130#include "llvm/Support/Casting.h"
132#include "llvm/Support/Debug.h"
147#include <algorithm>
148#include <cassert>
149#include <cmath>
150#include <cstdint>
151#include <functional>
152#include <iterator>
153#include <limits>
154#include <memory>
155#include <string>
156#include <tuple>
157#include <utility>
158
159using namespace llvm;
160using namespace SCEVPatternMatch;
161
162#define LV_NAME "loop-vectorize"
163#define DEBUG_TYPE LV_NAME
164
165#ifndef NDEBUG
166const char VerboseDebug[] = DEBUG_TYPE "-verbose";
167#endif
168
169STATISTIC(LoopsVectorized, "Number of loops vectorized");
170STATISTIC(LoopsAnalyzed, "Number of loops analyzed for vectorization");
171STATISTIC(LoopsEpilogueVectorized, "Number of epilogues vectorized");
172STATISTIC(LoopsEarlyExitVectorized, "Number of early exit loops vectorized");
173
175 "enable-epilogue-vectorization", cl::init(true), cl::Hidden,
176 cl::desc("Enable vectorization of epilogue loops."));
177
179 "epilogue-vectorization-force-VF", cl::init(1), cl::Hidden,
180 cl::desc("When epilogue vectorization is enabled, and a value greater than "
181 "1 is specified, forces the given VF for all applicable epilogue "
182 "loops."));
183
185 "epilogue-vectorization-minimum-VF", cl::Hidden,
186 cl::desc("Only loops with vectorization factor equal to or larger than "
187 "the specified value are considered for epilogue vectorization."));
188
189/// Loops with a known constant trip count below this number are vectorized only
190/// if no scalar iteration overheads are incurred.
192 "vectorizer-min-trip-count", cl::init(16), cl::Hidden,
193 cl::desc("Loops with a constant trip count that is smaller than this "
194 "value are vectorized only if no scalar iteration overheads "
195 "are incurred."));
196
198 "vectorize-memory-check-threshold", cl::init(128), cl::Hidden,
199 cl::desc("The maximum allowed number of runtime memory checks"));
200
201/// Option tail-folding-policy indicates that an epilogue is undesired, that
202/// tail folding is preferred, and this lists all options. I.e., the vectorizer
203/// will try to fold the tail-loop (epilogue) into the vector body and predicate
204/// the instructions accordingly. If tail-folding fails, there are different
205/// fallback strategies depending on these values:
207
209 "tail-folding-policy", cl::init(TailFoldingPolicyTy::None), cl::Hidden,
210 cl::desc("Tail-folding preferences over creating an epilogue loop."),
212 clEnumValN(TailFoldingPolicyTy::None, "dont-fold-tail",
213 "Don't tail-fold loops."),
215 "prefer tail-folding, otherwise create an epilogue when "
216 "appropriate."),
218 "always tail-fold, don't attempt vectorization if "
219 "tail-folding fails.")));
220
222 "force-tail-folding-style", cl::desc("Force the tail folding style"),
225 clEnumValN(TailFoldingStyle::None, "none", "Disable tail folding"),
228 "Create lane mask for data only, using active.lane.mask intrinsic"),
230 "data-without-lane-mask",
231 "Create lane mask with compare/stepvector"),
233 "Create lane mask using active.lane.mask intrinsic, and use "
234 "it for both data and control flow"),
236 "Use predicated EVL instructions for tail folding. If EVL "
237 "is unsupported, fallback to data-without-lane-mask.")));
238
240 "enable-wide-lane-mask", cl::init(false), cl::Hidden,
241 cl::desc("Enable use of wide lane masks when used for control flow in "
242 "tail-folded loops"));
243
245 "enable-interleaved-mem-accesses", cl::init(false), cl::Hidden,
246 cl::desc("Enable vectorization on interleaved memory accesses in a loop"));
247
248/// An interleave-group may need masking if it resides in a block that needs
249/// predication, or in order to mask away gaps.
251 "enable-masked-interleaved-mem-accesses", cl::init(false), cl::Hidden,
252 cl::desc("Enable vectorization on masked interleaved memory accesses in a loop"));
253
255 "force-target-num-scalar-regs", cl::init(0), cl::Hidden,
256 cl::desc("A flag that overrides the target's number of scalar registers."));
257
259 "force-target-num-vector-regs", cl::init(0), cl::Hidden,
260 cl::desc("A flag that overrides the target's number of vector registers."));
261
263 "force-target-max-scalar-interleave", cl::init(0), cl::Hidden,
264 cl::desc("A flag that overrides the target's max interleave factor for "
265 "scalar loops."));
266
268 "force-target-max-vector-interleave", cl::init(0), cl::Hidden,
269 cl::desc("A flag that overrides the target's max interleave factor for "
270 "vectorized loops."));
271
273 "force-target-instruction-cost", cl::init(0), cl::Hidden,
274 cl::desc("A flag that overrides the target's expected cost for "
275 "an instruction to a single constant value. Mostly "
276 "useful for getting consistent testing."));
277
279 "small-loop-cost", cl::init(20), cl::Hidden,
280 cl::desc(
281 "The cost of a loop that is considered 'small' by the interleaver."));
282
284 "loop-vectorize-with-block-frequency", cl::init(true), cl::Hidden,
285 cl::desc("Enable the use of the block frequency analysis to access PGO "
286 "heuristics minimizing code growth in cold regions and being more "
287 "aggressive in hot regions."));
288
289// Runtime interleave loops for load/store throughput.
291 "enable-loadstore-runtime-interleave", cl::init(true), cl::Hidden,
292 cl::desc(
293 "Enable runtime interleaving until load/store ports are saturated"));
294
295/// The number of stores in a loop that are allowed to need predication.
297 "vectorize-num-stores-pred", cl::init(1), cl::Hidden,
298 cl::desc("Max number of stores to be predicated behind an if."));
299
301 "enable-ind-var-reg-heur", cl::init(true), cl::Hidden,
302 cl::desc("Count the induction variable only once when interleaving"));
303
305 "max-nested-scalar-reduction-interleave", cl::init(2), cl::Hidden,
306 cl::desc("The maximum interleave count to use when interleaving a scalar "
307 "reduction in a nested loop."));
308
310 "force-ordered-reductions", cl::init(false), cl::Hidden,
311 cl::desc("Enable the vectorisation of loops with in-order (strict) "
312 "FP reductions"));
313
315 "prefer-predicated-reduction-select", cl::init(false), cl::Hidden,
316 cl::desc(
317 "Prefer predicating a reduction operation over an after loop select."));
318
320 "enable-vplan-native-path", cl::Hidden,
321 cl::desc("Enable VPlan-native vectorization path with "
322 "support for outer loop vectorization."));
323
325 llvm::VerifyEachVPlan("vplan-verify-each",
326#ifdef EXPENSIVE_CHECKS
327 cl::init(true),
328#else
329 cl::init(false),
330#endif
332 cl::desc("Verify VPlans after VPlan transforms."));
333
334#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
336 "vplan-print-after-all", cl::init(false), cl::Hidden,
337 cl::desc("Print VPlans after all VPlan transformations."));
338
340 "vplan-print-after", cl::Hidden,
341 cl::desc("Print VPlans after specified VPlan transformations (regexp)."));
342
344 "vplan-print-vector-region-scope", cl::init(false), cl::Hidden,
345 cl::desc("Limit VPlan printing to vector loop region in "
346 "`-vplan-print-after*` if the plan has one."));
347#endif
348
349// This flag enables the stress testing of the VPlan H-CFG construction in the
350// VPlan-native vectorization path. It must be used in conjuction with
351// -enable-vplan-native-path. -vplan-verify-hcfg can also be used to enable the
352// verification of the H-CFGs built.
354 "vplan-build-stress-test", cl::init(false), cl::Hidden,
355 cl::desc(
356 "Build VPlan for every supported loop nest in the function and bail "
357 "out right after the build (stress test the VPlan H-CFG construction "
358 "in the VPlan-native vectorization path)."));
359
361 "interleave-loops", cl::init(true), cl::Hidden,
362 cl::desc("Enable loop interleaving in Loop vectorization passes"));
364 "vectorize-loops", cl::init(true), cl::Hidden,
365 cl::desc("Run the Loop vectorization passes"));
366
368 "force-widen-divrem-via-safe-divisor", cl::Hidden,
369 cl::desc(
370 "Override cost based safe divisor widening for div/rem instructions"));
371
373 "enable-early-exit-vectorization", cl::init(true), cl::Hidden,
374 cl::desc(
375 "Enable vectorization of early exit loops with uncountable exits."));
376
377// Likelyhood of bypassing the vectorized loop because there are zero trips left
378// after prolog. See `emitIterationCountCheck`.
379static constexpr uint32_t MinItersBypassWeights[] = {1, 127};
380
381/// A helper function that returns true if the given type is irregular. The
382/// type is irregular if its allocated size doesn't equal the store size of an
383/// element of the corresponding vector type.
384static bool hasIrregularType(Type *Ty, const DataLayout &DL) {
385 // Determine if an array of N elements of type Ty is "bitcast compatible"
386 // with a <N x Ty> vector.
387 // This is only true if there is no padding between the array elements.
388 return DL.getTypeAllocSizeInBits(Ty) != DL.getTypeSizeInBits(Ty);
389}
390
391/// A version of ScalarEvolution::getSmallConstantTripCount that returns an
392/// ElementCount to include loops whose trip count is a function of vscale.
394 const Loop *L) {
395 if (unsigned ExpectedTC = SE->getSmallConstantTripCount(L))
396 return ElementCount::getFixed(ExpectedTC);
397
398 const SCEV *BTC = SE->getBackedgeTakenCount(L);
400 return ElementCount::getFixed(0);
401
402 const SCEV *ExitCount = SE->getTripCountFromExitCount(BTC, BTC->getType(), L);
403 if (isa<SCEVVScale>(ExitCount))
405
406 const APInt *Scale;
407 if (match(ExitCount, m_scev_Mul(m_scev_APInt(Scale), m_SCEVVScale())))
408 if (cast<SCEVMulExpr>(ExitCount)->hasNoUnsignedWrap())
409 if (Scale->getActiveBits() <= 32)
411
412 return ElementCount::getFixed(0);
413}
414
415/// Get the maximum trip count for \p L from the SCEV unsigned range, excluding
416/// zero from the range. Only valid when not folding the tail, as the minimum
417/// iteration count check guards against a zero trip count. Returns 0 if
418/// unknown.
420 Loop *L) {
421 const SCEV *BTC = PSE.getBackedgeTakenCount();
423 return 0;
424 ScalarEvolution *SE = PSE.getSE();
425 const SCEV *TripCount = SE->getTripCountFromExitCount(BTC, BTC->getType(), L);
426 ConstantRange TCRange = SE->getUnsignedRange(TripCount);
427 APInt MaxTCFromRange = TCRange.getUnsignedMax();
428 if (!MaxTCFromRange.isZero() && MaxTCFromRange.getActiveBits() <= 32)
429 return MaxTCFromRange.getZExtValue();
430 return 0;
431}
432
433/// Returns "best known" trip count, which is either a valid positive trip count
434/// or std::nullopt when an estimate cannot be made (including when the trip
435/// count would overflow), for the specified loop \p L as defined by the
436/// following procedure:
437/// 1) Returns exact trip count if it is known.
438/// 2) Returns expected trip count according to profile data if any.
439/// 3) Returns upper bound estimate if known, and if \p CanUseConstantMax.
440/// 4) Returns the maximum trip count from the SCEV range excluding zero,
441/// if \p CanUseConstantMax and \p CanExcludeZeroTrips.
442/// 5) Returns std::nullopt if all of the above failed.
443static std::optional<ElementCount>
445 bool CanUseConstantMax = true,
446 bool CanExcludeZeroTrips = false) {
447 // Check if exact trip count is known.
448 if (auto ExpectedTC = getSmallConstantTripCount(PSE.getSE(), L))
449 return ExpectedTC;
450
451 // Check if there is an expected trip count available from profile data.
453 if (auto EstimatedTC = getLoopEstimatedTripCount(L))
454 return ElementCount::getFixed(*EstimatedTC);
455
456 if (!CanUseConstantMax)
457 return std::nullopt;
458
459 // Check if upper bound estimate is known.
460 if (unsigned ExpectedTC = PSE.getSmallConstantMaxTripCount())
461 return ElementCount::getFixed(ExpectedTC);
462
463 // Get the maximum trip count from the SCEV range excluding zero. This is
464 // only safe when not folding the tail, as the minimum iteration count check
465 // prevents entering the vector loop with a zero trip count.
466 if (CanUseConstantMax && CanExcludeZeroTrips)
467 if (unsigned RefinedTC = getMaxTCFromNonZeroRange(PSE, L))
468 return ElementCount::getFixed(RefinedTC);
469
470 return std::nullopt;
471}
472
473namespace {
474// Forward declare GeneratedRTChecks.
475class GeneratedRTChecks;
476
477using SCEV2ValueTy = DenseMap<const SCEV *, Value *>;
478} // namespace
479
480namespace llvm {
481
483
484/// InnerLoopVectorizer vectorizes loops which contain only one basic
485/// block to a specified vectorization factor (VF).
486/// This class performs the widening of scalars into vectors, or multiple
487/// scalars. This class also implements the following features:
488/// * It inserts an epilogue loop for handling loops that don't have iteration
489/// counts that are known to be a multiple of the vectorization factor.
490/// * It handles the code generation for reduction variables.
491/// * Scalarization (implementation using scalars) of un-vectorizable
492/// instructions.
493/// InnerLoopVectorizer does not perform any vectorization-legality
494/// checks, and relies on the caller to check for the different legality
495/// aspects. The InnerLoopVectorizer relies on the
496/// LoopVectorizationLegality class to provide information about the induction
497/// and reduction variables that were found to a given vectorization factor.
499public:
503 ElementCount VecWidth, unsigned UnrollFactor,
505 GeneratedRTChecks &RTChecks, VPlan &Plan)
506 : OrigLoop(OrigLoop), PSE(PSE), LI(LI), DT(DT), TTI(TTI), AC(AC),
507 VF(VecWidth), UF(UnrollFactor), Builder(PSE.getSE()->getContext()),
510 Plan.getVectorLoopRegion()->getSinglePredecessor())) {}
511
512 virtual ~InnerLoopVectorizer() = default;
513
514 /// Creates a basic block for the scalar preheader. Both
515 /// EpilogueVectorizerMainLoop and EpilogueVectorizerEpilogueLoop overwrite
516 /// the method to create additional blocks and checks needed for epilogue
517 /// vectorization.
519
520 /// Fix the vectorized code, taking care of header phi's, and more.
522
523 /// Fix the non-induction PHIs in \p Plan.
525
526protected:
528
529 /// Create and return a new IR basic block for the scalar preheader whose name
530 /// is prefixed with \p Prefix.
532
533 /// Allow subclasses to override and print debug traces before/after vplan
534 /// execution, when trace information is requested.
535 virtual void printDebugTracesAtStart() {}
536 virtual void printDebugTracesAtEnd() {}
537
538 /// The original loop.
540
541 /// A wrapper around ScalarEvolution used to add runtime SCEV checks. Applies
542 /// dynamic knowledge to simplify SCEV expressions and converts them to a
543 /// more usable form.
545
546 /// Loop Info.
548
549 /// Dominator Tree.
551
552 /// Target Transform Info.
554
555 /// Assumption Cache.
557
558 /// The vectorization SIMD factor to use. Each vector will have this many
559 /// vector elements.
561
562 /// The vectorization unroll factor to use. Each scalar is vectorized to this
563 /// many different vector instructions.
564 unsigned UF;
565
566 /// The builder that we use
568
569 // --- Vectorization state ---
570
571 /// The profitablity analysis.
573
574 /// Structure to hold information about generated runtime checks, responsible
575 /// for cleaning the checks, if vectorization turns out unprofitable.
576 GeneratedRTChecks &RTChecks;
577
579
580 /// The vector preheader block of \p Plan, used as target for check blocks
581 /// introduced during skeleton creation.
583};
584
585/// Encapsulate information regarding vectorization of a loop and its epilogue.
586/// This information is meant to be updated and used across two stages of
587/// epilogue vectorization.
590 unsigned MainLoopUF = 0;
592 unsigned EpilogueUF = 0;
597
599 ElementCount EVF, unsigned EUF,
601 : MainLoopVF(MVF), MainLoopUF(MUF), EpilogueVF(EVF), EpilogueUF(EUF),
603 assert(EUF == 1 &&
604 "A high UF for the epilogue loop is likely not beneficial.");
605 }
606};
607
608/// An extension of the inner loop vectorizer that creates a skeleton for a
609/// vectorized loop that has its epilogue (residual) also vectorized.
610/// The idea is to run the vplan on a given loop twice, firstly to setup the
611/// skeleton and vectorize the main loop, and secondly to complete the skeleton
612/// from the first step and vectorize the epilogue. This is achieved by
613/// deriving two concrete strategy classes from this base class and invoking
614/// them in succession from the loop vectorizer planner.
616public:
626
627 /// Holds and updates state information required to vectorize the main loop
628 /// and its epilogue in two separate passes. This setup helps us avoid
629 /// regenerating and recomputing runtime safety checks. It also helps us to
630 /// shorten the iteration-count-check path length for the cases where the
631 /// iteration count of the loop is so small that the main vector loop is
632 /// completely skipped.
634
635protected:
637};
638
639/// A specialized derived class of inner loop vectorizer that performs
640/// vectorization of *main* loops in the process of vectorizing loops and their
641/// epilogues.
643public:
654
655protected:
656 void printDebugTracesAtStart() override;
657 void printDebugTracesAtEnd() override;
658};
659
660// A specialized derived class of inner loop vectorizer that performs
661// vectorization of *epilogue* loops in the process of vectorizing loops and
662// their epilogues.
664public:
671 GeneratedRTChecks &Checks, VPlan &Plan)
673 Checks, Plan, EPI.EpilogueVF,
674 EPI.EpilogueVF, EPI.EpilogueUF) {}
675 /// Implements the interface for creating a vectorized skeleton using the
676 /// *epilogue loop* strategy (i.e., the second pass of VPlan execution).
678
679protected:
680 void printDebugTracesAtStart() override;
681 void printDebugTracesAtEnd() override;
682};
683} // end namespace llvm
684
685/// Look for a meaningful debug location on the instruction or its operands.
687 if (!I)
688 return DebugLoc::getUnknown();
689
691 if (I->getDebugLoc() != Empty)
692 return I->getDebugLoc();
693
694 for (Use &Op : I->operands()) {
695 if (Instruction *OpInst = dyn_cast<Instruction>(Op))
696 if (OpInst->getDebugLoc() != Empty)
697 return OpInst->getDebugLoc();
698 }
699
700 return I->getDebugLoc();
701}
702
703/// Write a \p DebugMsg about vectorization to the debug output stream. If \p I
704/// is passed, the message relates to that particular instruction.
705#ifndef NDEBUG
706static void debugVectorizationMessage(const StringRef Prefix,
707 const StringRef DebugMsg,
708 Instruction *I) {
709 dbgs() << "LV: " << Prefix << DebugMsg;
710 if (I != nullptr)
711 dbgs() << " " << *I;
712 else
713 dbgs() << '.';
714 dbgs() << '\n';
715}
716#endif
717
718/// Create an analysis remark that explains why vectorization failed
719///
720/// \p PassName is the name of the pass (e.g. can be AlwaysPrint). \p
721/// RemarkName is the identifier for the remark. If \p I is passed it is an
722/// instruction that prevents vectorization. Otherwise \p TheLoop is used for
723/// the location of the remark. If \p DL is passed, use it as debug location for
724/// the remark. \return the remark object that can be streamed to.
725static OptimizationRemarkAnalysis
726createLVAnalysis(const char *PassName, StringRef RemarkName,
727 const Loop *TheLoop, Instruction *I, DebugLoc DL = {}) {
728 BasicBlock *CodeRegion = I ? I->getParent() : TheLoop->getHeader();
729 // If debug location is attached to the instruction, use it. Otherwise if DL
730 // was not provided, use the loop's.
731 if (I && I->getDebugLoc())
732 DL = I->getDebugLoc();
733 else if (!DL)
734 DL = TheLoop->getStartLoc();
735
736 return OptimizationRemarkAnalysis(PassName, RemarkName, DL, CodeRegion);
737}
738
739namespace llvm {
740
741/// Return the runtime value for VF.
743 return B.CreateElementCount(Ty, VF);
744}
745
747 const StringRef OREMsg, const StringRef ORETag,
748 OptimizationRemarkEmitter *ORE, Loop *TheLoop,
749 Instruction *I) {
750 LLVM_DEBUG(debugVectorizationMessage("Not vectorizing: ", DebugMsg, I));
751 LoopVectorizeHints Hints(TheLoop, false /* doesn't matter */, *ORE);
752 ORE->emit(
753 createLVAnalysis(Hints.vectorizeAnalysisPassName(), ORETag, TheLoop, I)
754 << "loop not vectorized: " << OREMsg);
755}
756
757void reportVectorizationInfo(const StringRef Msg, const StringRef ORETag,
759 const Loop *TheLoop, Instruction *I, DebugLoc DL) {
761 LoopVectorizeHints Hints(TheLoop, false /* doesn't matter */, *ORE);
762 ORE->emit(createLVAnalysis(Hints.vectorizeAnalysisPassName(), ORETag, TheLoop,
763 I, DL)
764 << Msg);
765}
766
767/// Report successful vectorization of the loop. In case an outer loop is
768/// vectorized, prepend "outer" to the vectorization remark.
770 VectorizationFactor VF, unsigned IC) {
772 "Vectorizing: ", TheLoop->isInnermost() ? "innermost loop" : "outer loop",
773 nullptr));
774 StringRef LoopType = TheLoop->isInnermost() ? "" : "outer ";
775 ORE->emit([&]() {
776 return OptimizationRemark(LV_NAME, "Vectorized", TheLoop->getStartLoc(),
777 TheLoop->getHeader())
778 << "vectorized " << LoopType << "loop (vectorization width: "
779 << ore::NV("VectorizationFactor", VF.Width)
780 << ", interleaved count: " << ore::NV("InterleaveCount", IC) << ")";
781 });
782}
783
784} // end namespace llvm
785
786namespace llvm {
787
788// Loop vectorization cost-model hints how the epilogue/tail loop should be
789// lowered.
791
792 // The default: allowing epilogues.
794
795 // Vectorization with OptForSize: don't allow epilogues.
797
798 // A special case of vectorisation with OptForSize: loops with a very small
799 // trip count are considered for vectorization under OptForSize, thereby
800 // making sure the cost of their loop body is dominant, free of runtime
801 // guards and scalar iteration overheads.
803
804 // Loop hint indicating an epilogue is undesired, apply tail folding.
806
807 // Directive indicating we must either fold the epilogue/tail or not vectorize
809};
810
811/// LoopVectorizationCostModel - estimates the expected speedups due to
812/// vectorization.
813/// In many cases vectorization is not profitable. This can happen because of
814/// a number of reasons. In this class we mainly attempt to predict the
815/// expected speedup/slowdowns due to the supported instruction set. We use the
816/// TargetTransformInfo to query the different backends for the cost of
817/// different operations.
820
821public:
835
836 /// \return An upper bound for the vectorization factors (both fixed and
837 /// scalable). If the factors are 0, vectorization and interleaving should be
838 /// avoided up front.
839 FixedScalableVFPair computeMaxVF(ElementCount UserVF, unsigned UserIC);
840
841 /// Memory access instruction may be vectorized in more than one way.
842 /// Form of instruction after vectorization depends on cost.
843 /// This function takes cost-based decisions for Load/Store instructions
844 /// and collects them in a map. This decisions map is used for building
845 /// the lists of loop-uniform and loop-scalar instructions.
846 /// The calculated cost is saved with widening decision in order to
847 /// avoid redundant calculations.
848 void setCostBasedWideningDecision(ElementCount VF);
849
850 /// A call may be vectorized in different ways depending on whether we have
851 /// vectorized variants available and whether the target supports masking.
852 /// This function analyzes all calls in the function at the supplied VF,
853 /// makes a decision based on the costs of available options, and stores that
854 /// decision in a map for use in planning and plan execution.
855 void setVectorizedCallDecision(ElementCount VF);
856
857 /// Collect values we want to ignore in the cost model.
858 void collectValuesToIgnore();
859
860 /// \returns True if it is more profitable to scalarize instruction \p I for
861 /// vectorization factor \p VF.
863 assert(VF.isVector() &&
864 "Profitable to scalarize relevant only for VF > 1.");
865 assert(
866 TheLoop->isInnermost() &&
867 "cost-model should not be used for outer loops (in VPlan-native path)");
868
869 auto Scalars = InstsToScalarize.find(VF);
870 assert(Scalars != InstsToScalarize.end() &&
871 "VF not yet analyzed for scalarization profitability");
872 return Scalars->second.contains(I);
873 }
874
875 /// Returns true if \p I is known to be uniform after vectorization.
877 assert(
878 TheLoop->isInnermost() &&
879 "cost-model should not be used for outer loops (in VPlan-native path)");
880
881 // If VF is scalar, then all instructions are trivially uniform.
882 if (VF.isScalar())
883 return true;
884
885 // Pseudo probes must be duplicated per vector lane so that the
886 // profiled loop trip count is not undercounted.
888 return false;
889
890 auto UniformsPerVF = Uniforms.find(VF);
891 assert(UniformsPerVF != Uniforms.end() &&
892 "VF not yet analyzed for uniformity");
893 return UniformsPerVF->second.count(I);
894 }
895
896 /// Returns true if \p I is known to be scalar after vectorization.
898 assert(
899 TheLoop->isInnermost() &&
900 "cost-model should not be used for outer loops (in VPlan-native path)");
901 if (VF.isScalar())
902 return true;
903
904 auto ScalarsPerVF = Scalars.find(VF);
905 assert(ScalarsPerVF != Scalars.end() &&
906 "Scalar values are not calculated for VF");
907 return ScalarsPerVF->second.count(I);
908 }
909
910 /// \returns True if instruction \p I can be truncated to a smaller bitwidth
911 /// for vectorization factor \p VF.
913 const auto &MinBWs = Config.getMinimalBitwidths();
914 // Truncs must truncate at most to their destination type.
915 if (isa_and_nonnull<TruncInst>(I) && MinBWs.contains(I) &&
916 I->getType()->getScalarSizeInBits() < MinBWs.lookup(I))
917 return false;
918 return VF.isVector() && MinBWs.contains(I) &&
921 }
922
923 /// Decision that was taken during cost calculation for memory instruction.
926 CM_Widen, // For consecutive accesses with stride +1.
927 CM_Widen_Reverse, // For consecutive accesses with stride -1.
933 };
934
935 /// Save vectorization decision \p W and \p Cost taken by the cost model for
936 /// instruction \p I and vector width \p VF.
939 assert(VF.isVector() && "Expected VF >=2");
940 WideningDecisions[{I, VF}] = {W, Cost};
941 }
942
943 /// Save vectorization decision \p W and \p Cost taken by the cost model for
944 /// interleaving group \p Grp and vector width \p VF.
948 assert(VF.isVector() && "Expected VF >=2");
949 /// Broadcast this decicion to all instructions inside the group.
950 /// When interleaving, the cost will only be assigned one instruction, the
951 /// insert position. For other cases, add the appropriate fraction of the
952 /// total cost to each instruction. This ensures accurate costs are used,
953 /// even if the insert position instruction is not used.
954 InstructionCost InsertPosCost = Cost;
955 InstructionCost OtherMemberCost = 0;
956 if (W != CM_Interleave)
957 OtherMemberCost = InsertPosCost = Cost / Grp->getNumMembers();
958 ;
959 for (auto *I : Grp->members()) {
960 if (Grp->getInsertPos() == I)
961 WideningDecisions[{I, VF}] = {W, InsertPosCost};
962 else
963 WideningDecisions[{I, VF}] = {W, OtherMemberCost};
964 }
965 }
966
967 /// Return the cost model decision for the given instruction \p I and vector
968 /// width \p VF. Return CM_Unknown if this instruction did not pass
969 /// through the cost modeling.
971 assert(VF.isVector() && "Expected VF to be a vector VF");
972 assert(
973 TheLoop->isInnermost() &&
974 "cost-model should not be used for outer loops (in VPlan-native path)");
975
976 std::pair<Instruction *, ElementCount> InstOnVF(I, VF);
977 auto Itr = WideningDecisions.find(InstOnVF);
978 if (Itr == WideningDecisions.end())
979 return CM_Unknown;
980 return Itr->second.first;
981 }
982
983 /// Return the vectorization cost for the given instruction \p I and vector
984 /// width \p VF.
986 assert(VF.isVector() && "Expected VF >=2");
987 std::pair<Instruction *, ElementCount> InstOnVF(I, VF);
988 assert(WideningDecisions.contains(InstOnVF) &&
989 "The cost is not calculated");
990 return WideningDecisions[InstOnVF].second;
991 }
992
1000
1002 Function *Variant, Intrinsic::ID IID,
1003 std::optional<unsigned> MaskPos,
1005 assert(!VF.isScalar() && "Expected vector VF");
1006 CallWideningDecisions[{CI, VF}] = {Kind, Variant, IID, MaskPos, Cost};
1007 }
1008
1010 ElementCount VF) const {
1011 assert(!VF.isScalar() && "Expected vector VF");
1012 auto I = CallWideningDecisions.find({CI, VF});
1013 if (I == CallWideningDecisions.end())
1014 return {CM_Unknown, nullptr, Intrinsic::not_intrinsic, std::nullopt, 0};
1015 return I->second;
1016 }
1017
1018 /// Return True if instruction \p I is an optimizable truncate whose operand
1019 /// is an induction variable. Such a truncate will be removed by adding a new
1020 /// induction variable with the destination type.
1022 // If the instruction is not a truncate, return false.
1023 auto *Trunc = dyn_cast<TruncInst>(I);
1024 if (!Trunc)
1025 return false;
1026
1027 // Get the source and destination types of the truncate.
1028 Type *SrcTy = toVectorTy(Trunc->getSrcTy(), VF);
1029 Type *DestTy = toVectorTy(Trunc->getDestTy(), VF);
1030
1031 // If the truncate is free for the given types, return false. Replacing a
1032 // free truncate with an induction variable would add an induction variable
1033 // update instruction to each iteration of the loop. We exclude from this
1034 // check the primary induction variable since it will need an update
1035 // instruction regardless.
1036 Value *Op = Trunc->getOperand(0);
1037 if (Op != Legal->getPrimaryInduction() && TTI.isTruncateFree(SrcTy, DestTy))
1038 return false;
1039
1040 // If the truncated value is not an induction variable, return false.
1041 return Legal->isInductionPhi(Op);
1042 }
1043
1044 /// Collects the instructions to scalarize for each predicated instruction in
1045 /// the loop.
1046 void collectInstsToScalarize(ElementCount VF);
1047
1048 /// Collect values that will not be widened, including Uniforms, Scalars, and
1049 /// Instructions to Scalarize for the given \p VF.
1050 /// The sets depend on CM decision for Load/Store instructions
1051 /// that may be vectorized as interleave, gather-scatter or scalarized.
1052 /// Also make a decision on what to do about call instructions in the loop
1053 /// at that VF -- scalarize, call a known vector routine, or call a
1054 /// vector intrinsic.
1056 // Do the analysis once.
1057 if (VF.isScalar() || Uniforms.contains(VF))
1058 return;
1060 collectLoopUniforms(VF);
1062 collectLoopScalars(VF);
1064 }
1065
1066 /// Given costs for both strategies, return true if the scalar predication
1067 /// lowering should be used for div/rem. This incorporates an override
1068 /// option so it is not simply a cost comparison.
1070 InstructionCost SafeDivisorCost) const {
1071 switch (ForceSafeDivisor) {
1072 case cl::BOU_UNSET:
1073 return ScalarCost < SafeDivisorCost;
1074 case cl::BOU_TRUE:
1075 return false;
1076 case cl::BOU_FALSE:
1077 return true;
1078 }
1079 llvm_unreachable("impossible case value");
1080 }
1081
1082 /// Returns true if \p I is an instruction which requires predication and
1083 /// for which our chosen predication strategy is scalarization (i.e. we
1084 /// don't have an alternate strategy such as masking available).
1085 /// \p VF is the vectorization factor that will be used to vectorize \p I.
1086 bool isScalarWithPredication(Instruction *I, ElementCount VF);
1087
1088 /// Wrapper function for LoopVectorizationLegality::isMaskRequired,
1089 /// that passes the Instruction \p I and if we fold tail.
1090 bool isMaskRequired(Instruction *I) const;
1091
1092 /// Returns true if \p I is an instruction that needs to be predicated
1093 /// at runtime. The result is independent of the predication mechanism.
1094 /// Superset of instructions that return true for isScalarWithPredication.
1095 bool isPredicatedInst(Instruction *I) const;
1096
1097 /// A helper function that returns how much we should divide the cost of a
1098 /// predicated block by. Typically this is the reciprocal of the block
1099 /// probability, i.e. if we return X we are assuming the predicated block will
1100 /// execute once for every X iterations of the loop header so the block should
1101 /// only contribute 1/X of its cost to the total cost calculation, but when
1102 /// optimizing for code size it will just be 1 as code size costs don't depend
1103 /// on execution probabilities.
1104 ///
1105 /// Note that if a block wasn't originally predicated but was predicated due
1106 /// to tail folding, the divisor will still be 1 because it will execute for
1107 /// every iteration of the loop header.
1108 inline uint64_t
1109 getPredBlockCostDivisor(TargetTransformInfo::TargetCostKind CostKind,
1110 const BasicBlock *BB);
1111
1112 /// Returns true if an artificially high cost for emulated masked memrefs
1113 /// should be used.
1114 bool useEmulatedMaskMemRefHack(Instruction *I, ElementCount VF);
1115
1116 /// Return the costs for our two available strategies for lowering a
1117 /// div/rem operation which requires speculating at least one lane.
1118 /// First result is for scalarization (will be invalid for scalable
1119 /// vectors); second is for the safe-divisor strategy.
1120 std::pair<InstructionCost, InstructionCost>
1121 getDivRemSpeculationCost(Instruction *I, ElementCount VF);
1122
1123 /// Returns true if \p I is a memory instruction with consecutive memory
1124 /// access that can be widened.
1125 bool memoryInstructionCanBeWidened(Instruction *I, ElementCount VF);
1126
1127 /// Returns true if \p I is a memory instruction in an interleaved-group
1128 /// of memory accesses that can be vectorized with wide vector loads/stores
1129 /// and shuffles.
1130 bool interleavedAccessCanBeWidened(Instruction *I, ElementCount VF) const;
1131
1132 /// Check if \p Instr belongs to any interleaved access group.
1134 return InterleaveInfo.isInterleaved(Instr);
1135 }
1136
1137 /// Get the interleaved access group that \p Instr belongs to.
1140 return InterleaveInfo.getInterleaveGroup(Instr);
1141 }
1142
1143 /// Returns true if we're required to use a scalar epilogue for at least
1144 /// the final iteration of the original loop.
1145 bool requiresScalarEpilogue(bool IsVectorizing) const {
1146 if (!isEpilogueAllowed()) {
1147 LLVM_DEBUG(dbgs() << "LV: Loop does not require scalar epilogue\n");
1148 return false;
1149 }
1150 // If we might exit from anywhere but the latch and early exit vectorization
1151 // is disabled, we must run the exiting iteration in scalar form.
1152 if (TheLoop->getExitingBlock() != TheLoop->getLoopLatch() &&
1153 !(EnableEarlyExitVectorization && Legal->hasUncountableEarlyExit())) {
1154 LLVM_DEBUG(dbgs() << "LV: Loop requires scalar epilogue: not exiting "
1155 "from latch block\n");
1156 return true;
1157 }
1158 if (IsVectorizing && InterleaveInfo.requiresScalarEpilogue()) {
1159 LLVM_DEBUG(dbgs() << "LV: Loop requires scalar epilogue: "
1160 "interleaved group requires scalar epilogue\n");
1161 return true;
1162 }
1163 LLVM_DEBUG(dbgs() << "LV: Loop does not require scalar epilogue\n");
1164 return false;
1165 }
1166
1167 /// Returns true if an epilogue is allowed (e.g., not prevented by
1168 /// optsize or a loop hint annotation).
1169 bool isEpilogueAllowed() const {
1170 return EpilogueLoweringStatus == CM_EpilogueAllowed;
1171 }
1172
1173 /// Returns true if tail-folding is preferred over an epilogue.
1175 return EpilogueLoweringStatus == CM_EpilogueNotNeededFoldTail ||
1176 EpilogueLoweringStatus == CM_EpilogueNotAllowedFoldTail;
1177 }
1178
1179 /// Returns the TailFoldingStyle that is best for the current loop.
1181 return ChosenTailFoldingStyle;
1182 }
1183
1184 /// Selects and saves TailFoldingStyle.
1185 /// \param IsScalableVF true if scalable vector factors enabled.
1186 /// \param UserIC User specific interleave count.
1187 void setTailFoldingStyle(bool IsScalableVF, unsigned UserIC) {
1188 assert(ChosenTailFoldingStyle == TailFoldingStyle::None &&
1189 "Tail folding must not be selected yet.");
1190 if (!Legal->canFoldTailByMasking()) {
1191 ChosenTailFoldingStyle = TailFoldingStyle::None;
1192 return;
1193 }
1194
1195 // Default to TTI preference, but allow command line override.
1196 ChosenTailFoldingStyle = TTI.getPreferredTailFoldingStyle();
1197 if (ForceTailFoldingStyle.getNumOccurrences())
1198 ChosenTailFoldingStyle = ForceTailFoldingStyle.getValue();
1199
1200 if (ChosenTailFoldingStyle != TailFoldingStyle::DataWithEVL)
1201 return;
1202 // Override EVL styles if needed.
1203 // FIXME: Investigate opportunity for fixed vector factor.
1204 bool EVLIsLegal = UserIC <= 1 && IsScalableVF &&
1205 TTI.hasActiveVectorLength() && !EnableVPlanNativePath;
1206 if (EVLIsLegal)
1207 return;
1208 // If for some reason EVL mode is unsupported, fallback to an epilogue
1209 // if it's allowed, or DataWithoutLaneMask otherwise.
1210 if (EpilogueLoweringStatus == CM_EpilogueAllowed ||
1211 EpilogueLoweringStatus == CM_EpilogueNotNeededFoldTail)
1212 ChosenTailFoldingStyle = TailFoldingStyle::None;
1213 else
1214 ChosenTailFoldingStyle = TailFoldingStyle::DataWithoutLaneMask;
1215
1216 LLVM_DEBUG(
1217 dbgs() << "LV: Preference for VP intrinsics indicated. Will "
1218 "not try to generate VP Intrinsics "
1219 << (UserIC > 1
1220 ? "since interleave count specified is greater than 1.\n"
1221 : "due to non-interleaving reasons.\n"));
1222 }
1223
1224 /// Returns true if all loop blocks should be masked to fold tail loop.
1225 bool foldTailByMasking() const {
1227 }
1228
1229 /// Returns true if the use of wide lane masks is requested and the loop is
1230 /// using tail-folding with a lane mask for control flow.
1233 return false;
1234
1236 }
1237
1238 /// Returns true if the instructions in this block requires predication
1239 /// for any reason, e.g. because tail folding now requires a predicate
1240 /// or because the block in the original loop was predicated.
1242 return foldTailByMasking() || Legal->blockNeedsPredication(BB);
1243 }
1244
1245 /// Returns true if VP intrinsics with explicit vector length support should
1246 /// be generated in the tail folded loop.
1250
1251 /// Returns true if the predicated reduction select should be used to set the
1252 /// incoming value for the reduction phi.
1253 bool usePredicatedReductionSelect(RecurKind RecurrenceKind) const {
1254 // Force to use predicated reduction select since the EVL of the
1255 // second-to-last iteration might not be VF*UF.
1256 if (foldTailWithEVL())
1257 return true;
1258
1259 // Note: For FindLast recurrences we prefer a predicated select to simplify
1260 // matching in handleFindLastReductions(), rather than handle multiple
1261 // cases.
1263 return true;
1264
1266 TTI.preferPredicatedReductionSelect();
1267 }
1268
1269 /// Estimate cost of an intrinsic call instruction CI if it were vectorized
1270 /// with factor VF. Return the cost of the instruction, including
1271 /// scalarization overhead if it's needed.
1272 InstructionCost getVectorIntrinsicCost(CallInst *CI, ElementCount VF) const;
1273
1274 /// Estimate cost of a call instruction CI if it were vectorized with factor
1275 /// VF. Return the cost of the instruction, including scalarization overhead
1276 /// if it's needed.
1277 InstructionCost getVectorCallCost(CallInst *CI, ElementCount VF) const;
1278
1279 /// Invalidates decisions already taken by the cost model.
1281 WideningDecisions.clear();
1282 CallWideningDecisions.clear();
1283 Uniforms.clear();
1284 Scalars.clear();
1285 }
1286
1287 /// Returns the expected execution cost. The unit of the cost does
1288 /// not matter because we use the 'cost' units to compare different
1289 /// vector widths. The cost that is returned is *not* normalized by
1290 /// the factor width.
1291 InstructionCost expectedCost(ElementCount VF);
1292
1293 /// Returns true if epilogue vectorization is considered profitable, and
1294 /// false otherwise.
1295 /// \p VF is the vectorization factor chosen for the original loop.
1296 /// \p Multiplier is an aditional scaling factor applied to VF before
1297 /// comparing to EpilogueVectorizationMinVF.
1298 bool isEpilogueVectorizationProfitable(const ElementCount VF,
1299 const unsigned IC) const;
1300
1301 /// Returns the execution time cost of an instruction for a given vector
1302 /// width. Vector width of one means scalar.
1303 InstructionCost getInstructionCost(Instruction *I, ElementCount VF);
1304
1305 /// Return the cost of instructions in an inloop reduction pattern, if I is
1306 /// part of that pattern.
1307 std::optional<InstructionCost> getReductionPatternCost(Instruction *I,
1308 ElementCount VF,
1309 Type *VectorTy) const;
1310
1311 /// Returns true if \p Op should be considered invariant and if it is
1312 /// trivially hoistable.
1313 bool shouldConsiderInvariant(Value *Op);
1314
1315private:
1316 unsigned NumPredStores = 0;
1317
1318 /// VF selection state independent of cost-modeling decisions.
1319 VFSelectionContext &Config;
1320
1321 /// Calculate vectorization cost of memory instruction \p I.
1322 InstructionCost getMemoryInstructionCost(Instruction *I, ElementCount VF);
1323
1324 /// The cost computation for scalarized memory instruction.
1325 InstructionCost getMemInstScalarizationCost(Instruction *I, ElementCount VF);
1326
1327 /// The cost computation for interleaving group of memory instructions.
1328 InstructionCost getInterleaveGroupCost(Instruction *I, ElementCount VF);
1329
1330 /// The cost computation for Gather/Scatter instruction.
1331 InstructionCost getGatherScatterCost(Instruction *I, ElementCount VF);
1332
1333 /// The cost computation for widening instruction \p I with consecutive
1334 /// memory access.
1335 InstructionCost getConsecutiveMemOpCost(Instruction *I, ElementCount VF);
1336
1337 /// The cost calculation for Load/Store instruction \p I with uniform pointer -
1338 /// Load: scalar load + broadcast.
1339 /// Store: scalar store + (loop invariant value stored? 0 : extract of last
1340 /// element)
1341 InstructionCost getUniformMemOpCost(Instruction *I, ElementCount VF);
1342
1343 /// Estimate the overhead of scalarizing an instruction. This is a
1344 /// convenience wrapper for the type-based getScalarizationOverhead API.
1346 ElementCount VF) const;
1347
1348 /// A type representing the costs for instructions if they were to be
1349 /// scalarized rather than vectorized. The entries are Instruction-Cost
1350 /// pairs.
1351 using ScalarCostsTy = MapVector<Instruction *, InstructionCost>;
1352
1353 /// A set containing all BasicBlocks that are known to present after
1354 /// vectorization as a predicated block.
1356 PredicatedBBsAfterVectorization;
1357
1358 /// Records whether it is allowed to have the original scalar loop execute at
1359 /// least once. This may be needed as a fallback loop in case runtime
1360 /// aliasing/dependence checks fail, or to handle the tail/remainder
1361 /// iterations when the trip count is unknown or doesn't divide by the VF,
1362 /// or as a peel-loop to handle gaps in interleave-groups.
1363 /// Under optsize and when the trip count is very small we don't allow any
1364 /// iterations to execute in the scalar loop.
1365 EpilogueLowering EpilogueLoweringStatus = CM_EpilogueAllowed;
1366
1367 /// Control finally chosen tail folding style.
1368 TailFoldingStyle ChosenTailFoldingStyle = TailFoldingStyle::None;
1369
1370 /// A map holding scalar costs for different vectorization factors. The
1371 /// presence of a cost for an instruction in the mapping indicates that the
1372 /// instruction will be scalarized when vectorizing with the associated
1373 /// vectorization factor. The entries are VF-ScalarCostTy pairs.
1375
1376 /// Holds the instructions known to be uniform after vectorization.
1377 /// The data is collected per VF.
1379
1380 /// Holds the instructions known to be scalar after vectorization.
1381 /// The data is collected per VF.
1383
1384 /// Holds the instructions (address computations) that are forced to be
1385 /// scalarized.
1387
1388 /// Returns the expected difference in cost from scalarizing the expression
1389 /// feeding a predicated instruction \p PredInst. The instructions to
1390 /// scalarize and their scalar costs are collected in \p ScalarCosts. A
1391 /// non-negative return value implies the expression will be scalarized.
1392 /// Currently, only single-use chains are considered for scalarization.
1393 InstructionCost computePredInstDiscount(Instruction *PredInst,
1394 ScalarCostsTy &ScalarCosts,
1395 ElementCount VF);
1396
1397 /// Collect the instructions that are uniform after vectorization. An
1398 /// instruction is uniform if we represent it with a single scalar value in
1399 /// the vectorized loop corresponding to each vector iteration. Examples of
1400 /// uniform instructions include pointer operands of consecutive or
1401 /// interleaved memory accesses. Note that although uniformity implies an
1402 /// instruction will be scalar, the reverse is not true. In general, a
1403 /// scalarized instruction will be represented by VF scalar values in the
1404 /// vectorized loop, each corresponding to an iteration of the original
1405 /// scalar loop.
1406 void collectLoopUniforms(ElementCount VF);
1407
1408 /// Collect the instructions that are scalar after vectorization. An
1409 /// instruction is scalar if it is known to be uniform or will be scalarized
1410 /// during vectorization. collectLoopScalars should only add non-uniform nodes
1411 /// to the list if they are used by a load/store instruction that is marked as
1412 /// CM_Scalarize. Non-uniform scalarized instructions will be represented by
1413 /// VF values in the vectorized loop, each corresponding to an iteration of
1414 /// the original scalar loop.
1415 void collectLoopScalars(ElementCount VF);
1416
1417 /// Keeps cost model vectorization decision and cost for instructions.
1418 /// Right now it is used for memory instructions only.
1420 std::pair<InstWidening, InstructionCost>>;
1421
1422 DecisionList WideningDecisions;
1423
1424 using CallDecisionList =
1425 DenseMap<std::pair<CallInst *, ElementCount>, CallWideningDecision>;
1426
1427 CallDecisionList CallWideningDecisions;
1428
1429 /// Returns true if \p V is expected to be vectorized and it needs to be
1430 /// extracted.
1431 bool needsExtract(Value *V, ElementCount VF) const {
1433 if (VF.isScalar() || !I || !TheLoop->contains(I) ||
1434 TheLoop->isLoopInvariant(I) ||
1435 getWideningDecision(I, VF) == CM_Scalarize ||
1436 (isa<CallInst>(I) &&
1437 getCallWideningDecision(cast<CallInst>(I), VF).Kind == CM_Scalarize))
1438 return false;
1439
1440 // Assume we can vectorize V (and hence we need extraction) if the
1441 // scalars are not computed yet. This can happen, because it is called
1442 // via getScalarizationOverhead from setCostBasedWideningDecision, before
1443 // the scalars are collected. That should be a safe assumption in most
1444 // cases, because we check if the operands have vectorizable types
1445 // beforehand in LoopVectorizationLegality.
1446 return !Scalars.contains(VF) || !isScalarAfterVectorization(I, VF);
1447 };
1448
1449 /// Returns a range containing only operands needing to be extracted.
1450 SmallVector<Value *, 4> filterExtractingOperands(Instruction::op_range Ops,
1451 ElementCount VF) const {
1452
1453 SmallPtrSet<const Value *, 4> UniqueOperands;
1455 for (Value *Op : Ops) {
1456 if (isa<Constant>(Op) || !UniqueOperands.insert(Op).second ||
1457 !needsExtract(Op, VF))
1458 continue;
1459 Res.push_back(Op);
1460 }
1461 return Res;
1462 }
1463
1464public:
1465 /// The loop that we evaluate.
1467
1468 /// Predicated scalar evolution analysis.
1470
1471 /// Loop Info analysis.
1473
1474 /// Vectorization legality.
1476
1477 /// Vector target information.
1479
1480 /// Target Library Info.
1482
1483 /// Assumption cache.
1485
1486 /// Interface to emit optimization remarks.
1488
1489 /// A function to lazily fetch BlockFrequencyInfo. This avoids computing it
1490 /// unless necessary, e.g. when the loop isn't legal to vectorize or when
1491 /// there is no predication.
1492 std::function<BlockFrequencyInfo &()> GetBFI;
1493 /// The BlockFrequencyInfo returned from GetBFI.
1495 /// Returns the BlockFrequencyInfo for the function if cached, otherwise
1496 /// fetches it via GetBFI. Avoids an indirect call to the std::function.
1498 if (!BFI)
1499 BFI = &GetBFI();
1500 return *BFI;
1501 }
1502
1504
1505 /// Loop Vectorize Hint.
1507
1508 /// The interleave access information contains groups of interleaved accesses
1509 /// with the same stride and close to each other.
1511
1512 /// Values to ignore in the cost model.
1514
1515 /// Values to ignore in the cost model when VF > 1.
1517};
1518} // end namespace llvm
1519
1520namespace {
1521/// Helper struct to manage generating runtime checks for vectorization.
1522///
1523/// The runtime checks are created up-front in temporary blocks to allow better
1524/// estimating the cost and un-linked from the existing IR. After deciding to
1525/// vectorize, the checks are moved back. If deciding not to vectorize, the
1526/// temporary blocks are completely removed.
1527class GeneratedRTChecks {
1528 /// Basic block which contains the generated SCEV checks, if any.
1529 BasicBlock *SCEVCheckBlock = nullptr;
1530
1531 /// The value representing the result of the generated SCEV checks. If it is
1532 /// nullptr no SCEV checks have been generated.
1533 Value *SCEVCheckCond = nullptr;
1534
1535 /// Basic block which contains the generated memory runtime checks, if any.
1536 BasicBlock *MemCheckBlock = nullptr;
1537
1538 /// The value representing the result of the generated memory runtime checks.
1539 /// If it is nullptr no memory runtime checks have been generated.
1540 Value *MemRuntimeCheckCond = nullptr;
1541
1542 DominatorTree *DT;
1543 LoopInfo *LI;
1545
1546 SCEVExpander SCEVExp;
1547 SCEVExpander MemCheckExp;
1548
1549 bool CostTooHigh = false;
1550
1551 Loop *OuterLoop = nullptr;
1552
1554
1555 /// The kind of cost that we are calculating
1557
1558public:
1559 GeneratedRTChecks(PredicatedScalarEvolution &PSE, DominatorTree *DT,
1562 : DT(DT), LI(LI), TTI(TTI),
1563 SCEVExp(*PSE.getSE(), "scev.check", /*PreserveLCSSA=*/false),
1564 MemCheckExp(*PSE.getSE(), "scev.check", /*PreserveLCSSA=*/false),
1565 PSE(PSE), CostKind(CostKind) {}
1566
1567 /// Generate runtime checks in SCEVCheckBlock and MemCheckBlock, so we can
1568 /// accurately estimate the cost of the runtime checks. The blocks are
1569 /// un-linked from the IR and are added back during vector code generation. If
1570 /// there is no vector code generation, the check blocks are removed
1571 /// completely.
1572 void create(Loop *L, const LoopAccessInfo &LAI,
1573 const SCEVPredicate &UnionPred, ElementCount VF, unsigned IC,
1574 OptimizationRemarkEmitter &ORE) {
1575
1576 // Hard cutoff to limit compile-time increase in case a very large number of
1577 // runtime checks needs to be generated.
1578 // TODO: Skip cutoff if the loop is guaranteed to execute, e.g. due to
1579 // profile info.
1580 CostTooHigh =
1582 if (CostTooHigh) {
1583 // Mark runtime checks as never succeeding when they exceed the threshold.
1584 MemRuntimeCheckCond = ConstantInt::getTrue(L->getHeader()->getContext());
1585 SCEVCheckCond = ConstantInt::getTrue(L->getHeader()->getContext());
1586 ORE.emit([&]() {
1587 return OptimizationRemarkAnalysisAliasing(
1588 DEBUG_TYPE, "TooManyMemoryRuntimeChecks", L->getStartLoc(),
1589 L->getHeader())
1590 << "loop not vectorized: too many memory checks needed";
1591 });
1592 LLVM_DEBUG(dbgs() << "LV: Too many memory checks needed.\n");
1593 return;
1594 }
1595
1596 BasicBlock *LoopHeader = L->getHeader();
1597 BasicBlock *Preheader = L->getLoopPreheader();
1598
1599 // Use SplitBlock to create blocks for SCEV & memory runtime checks to
1600 // ensure the blocks are properly added to LoopInfo & DominatorTree. Those
1601 // may be used by SCEVExpander. The blocks will be un-linked from their
1602 // predecessors and removed from LI & DT at the end of the function.
1603 if (!UnionPred.isAlwaysTrue()) {
1604 SCEVCheckBlock = SplitBlock(Preheader, Preheader->getTerminator(), DT, LI,
1605 nullptr, "vector.scevcheck");
1606
1607 SCEVCheckCond = SCEVExp.expandCodeForPredicate(
1608 &UnionPred, SCEVCheckBlock->getTerminator());
1609 if (isa<Constant>(SCEVCheckCond)) {
1610 // Clean up directly after expanding the predicate to a constant, to
1611 // avoid further expansions re-using anything left over from SCEVExp.
1612 SCEVExpanderCleaner SCEVCleaner(SCEVExp);
1613 SCEVCleaner.cleanup();
1614 }
1615 }
1616
1617 const auto &RtPtrChecking = *LAI.getRuntimePointerChecking();
1618 if (RtPtrChecking.Need) {
1619 auto *Pred = SCEVCheckBlock ? SCEVCheckBlock : Preheader;
1620 MemCheckBlock = SplitBlock(Pred, Pred->getTerminator(), DT, LI, nullptr,
1621 "vector.memcheck");
1622
1623 auto DiffChecks = RtPtrChecking.getDiffChecks();
1624 if (DiffChecks) {
1625 Value *RuntimeVF = nullptr;
1626 MemRuntimeCheckCond = addDiffRuntimeChecks(
1627 MemCheckBlock->getTerminator(), *DiffChecks, MemCheckExp,
1628 [VF, &RuntimeVF](IRBuilderBase &B, unsigned Bits) {
1629 if (!RuntimeVF)
1630 RuntimeVF = getRuntimeVF(B, B.getIntNTy(Bits), VF);
1631 return RuntimeVF;
1632 },
1633 IC);
1634 } else {
1635 MemRuntimeCheckCond = addRuntimeChecks(
1636 MemCheckBlock->getTerminator(), L, RtPtrChecking.getChecks(),
1638 }
1639 assert(MemRuntimeCheckCond &&
1640 "no RT checks generated although RtPtrChecking "
1641 "claimed checks are required");
1642 }
1643
1644 SCEVExp.eraseDeadInstructions(SCEVCheckCond);
1645
1646 if (!MemCheckBlock && !SCEVCheckBlock)
1647 return;
1648
1649 // Unhook the temporary block with the checks, update various places
1650 // accordingly.
1651 if (SCEVCheckBlock)
1652 SCEVCheckBlock->replaceAllUsesWith(Preheader);
1653 if (MemCheckBlock)
1654 MemCheckBlock->replaceAllUsesWith(Preheader);
1655
1656 if (SCEVCheckBlock) {
1657 SCEVCheckBlock->getTerminator()->moveBefore(
1658 Preheader->getTerminator()->getIterator());
1659 auto *UI = new UnreachableInst(Preheader->getContext(), SCEVCheckBlock);
1660 UI->setDebugLoc(DebugLoc::getTemporary());
1661 Preheader->getTerminator()->eraseFromParent();
1662 }
1663 if (MemCheckBlock) {
1664 MemCheckBlock->getTerminator()->moveBefore(
1665 Preheader->getTerminator()->getIterator());
1666 auto *UI = new UnreachableInst(Preheader->getContext(), MemCheckBlock);
1667 UI->setDebugLoc(DebugLoc::getTemporary());
1668 Preheader->getTerminator()->eraseFromParent();
1669 }
1670
1671 DT->changeImmediateDominator(LoopHeader, Preheader);
1672 if (MemCheckBlock) {
1673 DT->eraseNode(MemCheckBlock);
1674 LI->removeBlock(MemCheckBlock);
1675 }
1676 if (SCEVCheckBlock) {
1677 DT->eraseNode(SCEVCheckBlock);
1678 LI->removeBlock(SCEVCheckBlock);
1679 }
1680
1681 // Outer loop is used as part of the later cost calculations.
1682 OuterLoop = L->getParentLoop();
1683 }
1684
1686 if (SCEVCheckBlock || MemCheckBlock)
1687 LLVM_DEBUG(dbgs() << "Calculating cost of runtime checks:\n");
1688
1689 if (CostTooHigh) {
1691 Cost.setInvalid();
1692 LLVM_DEBUG(dbgs() << " number of checks exceeded threshold\n");
1693 return Cost;
1694 }
1695
1696 InstructionCost RTCheckCost = 0;
1697 if (SCEVCheckBlock)
1698 for (Instruction &I : *SCEVCheckBlock) {
1699 if (SCEVCheckBlock->getTerminator() == &I)
1700 continue;
1702 LLVM_DEBUG(dbgs() << " " << C << " for " << I << "\n");
1703 RTCheckCost += C;
1704 }
1705 if (MemCheckBlock) {
1706 InstructionCost MemCheckCost = 0;
1707 for (Instruction &I : *MemCheckBlock) {
1708 if (MemCheckBlock->getTerminator() == &I)
1709 continue;
1711 LLVM_DEBUG(dbgs() << " " << C << " for " << I << "\n");
1712 MemCheckCost += C;
1713 }
1714
1715 // If the runtime memory checks are being created inside an outer loop
1716 // we should find out if these checks are outer loop invariant. If so,
1717 // the checks will likely be hoisted out and so the effective cost will
1718 // reduce according to the outer loop trip count.
1719 if (OuterLoop) {
1720 ScalarEvolution *SE = MemCheckExp.getSE();
1721 // TODO: If profitable, we could refine this further by analysing every
1722 // individual memory check, since there could be a mixture of loop
1723 // variant and invariant checks that mean the final condition is
1724 // variant.
1725 const SCEV *Cond = SE->getSCEV(MemRuntimeCheckCond);
1726 if (SE->isLoopInvariant(Cond, OuterLoop)) {
1727 // It seems reasonable to assume that we can reduce the effective
1728 // cost of the checks even when we know nothing about the trip
1729 // count. Assume that the outer loop executes at least twice.
1730 unsigned BestTripCount = 2;
1731
1732 // Get the best known TC estimate.
1733 if (auto EstimatedTC = getSmallBestKnownTC(
1734 PSE, OuterLoop, /* CanUseConstantMax = */ false))
1735 if (EstimatedTC->isFixed())
1736 BestTripCount = EstimatedTC->getFixedValue();
1737
1738 InstructionCost NewMemCheckCost = MemCheckCost / BestTripCount;
1739
1740 // Let's ensure the cost is always at least 1.
1741 NewMemCheckCost = std::max(NewMemCheckCost.getValue(),
1742 (InstructionCost::CostType)1);
1743
1744 if (BestTripCount > 1)
1746 << "We expect runtime memory checks to be hoisted "
1747 << "out of the outer loop. Cost reduced from "
1748 << MemCheckCost << " to " << NewMemCheckCost << '\n');
1749
1750 MemCheckCost = NewMemCheckCost;
1751 }
1752 }
1753
1754 RTCheckCost += MemCheckCost;
1755 }
1756
1757 if (SCEVCheckBlock || MemCheckBlock)
1758 LLVM_DEBUG(dbgs() << "Total cost of runtime checks: " << RTCheckCost
1759 << "\n");
1760
1761 return RTCheckCost;
1762 }
1763
1764 /// Remove the created SCEV & memory runtime check blocks & instructions, if
1765 /// unused.
1766 ~GeneratedRTChecks() {
1767 SCEVExpanderCleaner SCEVCleaner(SCEVExp);
1768 SCEVExpanderCleaner MemCheckCleaner(MemCheckExp);
1769 bool SCEVChecksUsed = !SCEVCheckBlock || !pred_empty(SCEVCheckBlock);
1770 bool MemChecksUsed = !MemCheckBlock || !pred_empty(MemCheckBlock);
1771 if (SCEVChecksUsed)
1772 SCEVCleaner.markResultUsed();
1773
1774 if (MemChecksUsed) {
1775 MemCheckCleaner.markResultUsed();
1776 } else {
1777 auto &SE = *MemCheckExp.getSE();
1778 // Memory runtime check generation creates compares that use expanded
1779 // values. Remove them before running the SCEVExpanderCleaners.
1780 for (auto &I : make_early_inc_range(reverse(*MemCheckBlock))) {
1781 if (MemCheckExp.isInsertedInstruction(&I))
1782 continue;
1783 SE.forgetValue(&I);
1784 I.eraseFromParent();
1785 }
1786 }
1787 MemCheckCleaner.cleanup();
1788 SCEVCleaner.cleanup();
1789
1790 if (!SCEVChecksUsed)
1791 SCEVCheckBlock->eraseFromParent();
1792 if (!MemChecksUsed)
1793 MemCheckBlock->eraseFromParent();
1794 }
1795
1796 /// Retrieves the SCEVCheckCond and SCEVCheckBlock that were generated as IR
1797 /// outside VPlan.
1798 std::pair<Value *, BasicBlock *> getSCEVChecks() const {
1799 using namespace llvm::PatternMatch;
1800 if (!SCEVCheckCond || match(SCEVCheckCond, m_ZeroInt()))
1801 return {nullptr, nullptr};
1802
1803 return {SCEVCheckCond, SCEVCheckBlock};
1804 }
1805
1806 /// Retrieves the MemCheckCond and MemCheckBlock that were generated as IR
1807 /// outside VPlan.
1808 std::pair<Value *, BasicBlock *> getMemRuntimeChecks() const {
1809 using namespace llvm::PatternMatch;
1810 if (MemRuntimeCheckCond && match(MemRuntimeCheckCond, m_ZeroInt()))
1811 return {nullptr, nullptr};
1812 return {MemRuntimeCheckCond, MemCheckBlock};
1813 }
1814
1815 /// Return true if any runtime checks have been added
1816 bool hasChecks() const {
1817 return getSCEVChecks().first || getMemRuntimeChecks().first;
1818 }
1819};
1820} // namespace
1821
1823 return Style == TailFoldingStyle::Data ||
1825}
1826
1830
1831// Return true if \p OuterLp is an outer loop annotated with hints for explicit
1832// vectorization. The loop needs to be annotated with #pragma omp simd
1833// simdlen(#) or #pragma clang vectorize(enable) vectorize_width(#). If the
1834// vector length information is not provided, vectorization is not considered
1835// explicit. Interleave hints are not allowed either. These limitations will be
1836// relaxed in the future.
1837// Please, note that we are currently forced to abuse the pragma 'clang
1838// vectorize' semantics. This pragma provides *auto-vectorization hints*
1839// (i.e., LV must check that vectorization is legal) whereas pragma 'omp simd'
1840// provides *explicit vectorization hints* (LV can bypass legal checks and
1841// assume that vectorization is legal). However, both hints are implemented
1842// using the same metadata (llvm.loop.vectorize, processed by
1843// LoopVectorizeHints). This will be fixed in the future when the native IR
1844// representation for pragma 'omp simd' is introduced.
1845static bool isExplicitVecOuterLoop(Loop *OuterLp,
1847 assert(!OuterLp->isInnermost() && "This is not an outer loop");
1848 LoopVectorizeHints Hints(OuterLp, true /*DisableInterleaving*/, *ORE);
1849
1850 // Only outer loops with an explicit vectorization hint are supported.
1851 // Unannotated outer loops are ignored.
1853 return false;
1854
1855 Function *Fn = OuterLp->getHeader()->getParent();
1856 if (!Hints.allowVectorization(Fn, OuterLp,
1857 true /*VectorizeOnlyWhenForced*/)) {
1858 LLVM_DEBUG(dbgs() << "LV: Loop hints prevent outer loop vectorization.\n");
1859 return false;
1860 }
1861
1862 if (Hints.getInterleave() > 1) {
1863 // TODO: Interleave support is future work.
1864 LLVM_DEBUG(dbgs() << "LV: Not vectorizing: Interleave is not supported for "
1865 "outer loops.\n");
1866 Hints.emitRemarkWithHints();
1867 return false;
1868 }
1869
1870 return true;
1871}
1872
1876 // Collect inner loops and outer loops without irreducible control flow. For
1877 // now, only collect outer loops that have explicit vectorization hints. If we
1878 // are stress testing the VPlan H-CFG construction, we collect the outermost
1879 // loop of every loop nest.
1880 if (L.isInnermost() || VPlanBuildStressTest ||
1882 LoopBlocksRPO RPOT(&L);
1883 RPOT.perform(LI);
1885 V.push_back(&L);
1886 // TODO: Collect inner loops inside marked outer loops in case
1887 // vectorization fails for the outer loop. Do not invoke
1888 // 'containsIrreducibleCFG' again for inner loops when the outer loop is
1889 // already known to be reducible. We can use an inherited attribute for
1890 // that.
1891 return;
1892 }
1893 }
1894 for (Loop *InnerL : L)
1895 collectSupportedLoops(*InnerL, LI, ORE, V);
1896}
1897
1898//===----------------------------------------------------------------------===//
1899// Implementation of LoopVectorizationLegality, InnerLoopVectorizer and
1900// LoopVectorizationCostModel and LoopVectorizationPlanner.
1901//===----------------------------------------------------------------------===//
1902
1903/// For the given VF and UF and maximum trip count computed for the loop, return
1904/// whether the induction variable might overflow in the vectorized loop. If not,
1905/// then we know a runtime overflow check always evaluates to false and can be
1906/// removed.
1908 const LoopVectorizationCostModel *Cost,
1909 ElementCount VF, std::optional<unsigned> UF = std::nullopt) {
1910 // Always be conservative if we don't know the exact unroll factor.
1911 unsigned MaxUF = UF ? *UF : Cost->TTI.getMaxInterleaveFactor(VF);
1912
1913 IntegerType *IdxTy = Cost->Legal->getWidestInductionType();
1914 APInt MaxUIntTripCount = IdxTy->getMask();
1915
1916 // We know the runtime overflow check is known false iff the (max) trip-count
1917 // is known and (max) trip-count + (VF * UF) does not overflow in the type of
1918 // the vector loop induction variable.
1919 if (unsigned TC = Cost->PSE.getSmallConstantMaxTripCount()) {
1920 uint64_t MaxVF = VF.getKnownMinValue();
1921 if (VF.isScalable()) {
1922 std::optional<unsigned> MaxVScale =
1923 getMaxVScale(*Cost->TheFunction, Cost->TTI);
1924 if (!MaxVScale)
1925 return false;
1926 MaxVF *= *MaxVScale;
1927 }
1928
1929 return (MaxUIntTripCount - TC).ugt(MaxVF * MaxUF);
1930 }
1931
1932 return false;
1933}
1934
1935// Return whether we allow using masked interleave-groups (for dealing with
1936// strided loads/stores that reside in predicated blocks, or for dealing
1937// with gaps).
1939 // If an override option has been passed in for interleaved accesses, use it.
1940 if (EnableMaskedInterleavedMemAccesses.getNumOccurrences() > 0)
1942
1943 return TTI.enableMaskedInterleavedAccessVectorization();
1944}
1945
1946/// Replace \p VPBB with a VPIRBasicBlock wrapping \p IRBB. All recipes from \p
1947/// VPBB are moved to the end of the newly created VPIRBasicBlock. All
1948/// predecessors and successors of VPBB, if any, are rewired to the new
1949/// VPIRBasicBlock. If \p VPBB may be unreachable, \p Plan must be passed.
1951 BasicBlock *IRBB,
1952 VPlan *Plan = nullptr) {
1953 if (!Plan)
1954 Plan = VPBB->getPlan();
1955 VPIRBasicBlock *IRVPBB = Plan->createVPIRBasicBlock(IRBB);
1956 auto IP = IRVPBB->begin();
1957 for (auto &R : make_early_inc_range(VPBB->phis()))
1958 R.moveBefore(*IRVPBB, IP);
1959
1960 for (auto &R :
1962 R.moveBefore(*IRVPBB, IRVPBB->end());
1963
1964 VPBlockUtils::reassociateBlocks(VPBB, IRVPBB);
1965 // VPBB is now dead and will be cleaned up when the plan gets destroyed.
1966 return IRVPBB;
1967}
1968
1970 BasicBlock *VectorPH = OrigLoop->getLoopPreheader();
1971 assert(VectorPH && "Invalid loop structure");
1972 assert((OrigLoop->getUniqueLatchExitBlock() ||
1973 Cost->requiresScalarEpilogue(VF.isVector())) &&
1974 "loops not exiting via the latch without required epilogue?");
1975
1976 // NOTE: The Plan's scalar preheader VPBB isn't replaced with a VPIRBasicBlock
1977 // wrapping the newly created scalar preheader here at the moment, because the
1978 // Plan's scalar preheader may be unreachable at this point. Instead it is
1979 // replaced in executePlan.
1980 return SplitBlock(VectorPH, VectorPH->getTerminator(), DT, LI, nullptr,
1981 Twine(Prefix) + "scalar.ph");
1982}
1983
1984/// Knowing that loop \p L executes a single vector iteration, add instructions
1985/// that will get simplified and thus should not have any cost to \p
1986/// InstsToIgnore.
1989 SmallPtrSetImpl<Instruction *> &InstsToIgnore) {
1990 auto *Cmp = L->getLatchCmpInst();
1991 if (Cmp)
1992 InstsToIgnore.insert(Cmp);
1993 for (const auto &KV : IL) {
1994 // Extract the key by hand so that it can be used in the lambda below. Note
1995 // that captured structured bindings are a C++20 extension.
1996 const PHINode *IV = KV.first;
1997
1998 // Get next iteration value of the induction variable.
1999 Instruction *IVInst =
2000 cast<Instruction>(IV->getIncomingValueForBlock(L->getLoopLatch()));
2001 if (all_of(IVInst->users(),
2002 [&](const User *U) { return U == IV || U == Cmp; }))
2003 InstsToIgnore.insert(IVInst);
2004 }
2005}
2006
2008 // Create a new IR basic block for the scalar preheader.
2009 BasicBlock *ScalarPH = createScalarPreheader("");
2010 return ScalarPH->getSinglePredecessor();
2011}
2012
2013namespace {
2014
2015struct CSEDenseMapInfo {
2016 static bool canHandle(const Instruction *I) {
2019 }
2020
2021 static inline Instruction *getEmptyKey() {
2023 }
2024
2025 static inline Instruction *getTombstoneKey() {
2026 return DenseMapInfo<Instruction *>::getTombstoneKey();
2027 }
2028
2029 static unsigned getHashValue(const Instruction *I) {
2030 assert(canHandle(I) && "Unknown instruction!");
2031 return hash_combine(I->getOpcode(),
2032 hash_combine_range(I->operand_values()));
2033 }
2034
2035 static bool isEqual(const Instruction *LHS, const Instruction *RHS) {
2036 if (LHS == getEmptyKey() || RHS == getEmptyKey() ||
2037 LHS == getTombstoneKey() || RHS == getTombstoneKey())
2038 return LHS == RHS;
2039 return LHS->isIdenticalTo(RHS);
2040 }
2041};
2042
2043} // end anonymous namespace
2044
2045/// FIXME: This legacy common-subexpression-elimination routine is scheduled for
2046/// removal, in favor of the VPlan-based one.
2047static void legacyCSE(BasicBlock *BB) {
2048 // Perform simple cse.
2050 for (Instruction &In : llvm::make_early_inc_range(*BB)) {
2051 if (!CSEDenseMapInfo::canHandle(&In))
2052 continue;
2053
2054 // Check if we can replace this instruction with any of the
2055 // visited instructions.
2056 if (Instruction *V = CSEMap.lookup(&In)) {
2057 In.replaceAllUsesWith(V);
2058 In.eraseFromParent();
2059 continue;
2060 }
2061
2062 CSEMap[&In] = &In;
2063 }
2064}
2065
2066/// This function attempts to return a value that represents the ElementCount
2067/// at runtime. For fixed-width VFs we know this precisely at compile
2068/// time, but for scalable VFs we calculate it based on an estimate of the
2069/// vscale value.
2071 std::optional<unsigned> VScale) {
2072 unsigned EstimatedVF = VF.getKnownMinValue();
2073 if (VF.isScalable())
2074 if (VScale)
2075 EstimatedVF *= *VScale;
2076 assert(EstimatedVF >= 1 && "Estimated VF shouldn't be less than 1");
2077 return EstimatedVF;
2078}
2079
2082 ElementCount VF) const {
2083 // We only need to calculate a cost if the VF is scalar; for actual vectors
2084 // we should already have a pre-calculated cost at each VF.
2085 if (!VF.isScalar())
2086 return getCallWideningDecision(CI, VF).Cost;
2087
2088 Type *RetTy = CI->getType();
2090 if (auto RedCost = getReductionPatternCost(CI, VF, RetTy))
2091 return *RedCost;
2092
2094 for (auto &ArgOp : CI->args())
2095 Tys.push_back(ArgOp->getType());
2096
2097 InstructionCost ScalarCallCost = TTI.getCallInstrCost(
2098 CI->getCalledFunction(), RetTy, Tys, Config.CostKind);
2099
2100 // If this is an intrinsic we may have a lower cost for it.
2103 return std::min(ScalarCallCost, IntrinsicCost);
2104 }
2105 return ScalarCallCost;
2106}
2107
2109 if (VF.isScalar() || !canVectorizeTy(Ty))
2110 return Ty;
2111 return toVectorizedTy(Ty, VF);
2112}
2113
2116 ElementCount VF) const {
2118 assert(ID && "Expected intrinsic call!");
2119 Type *RetTy = maybeVectorizeType(CI->getType(), VF);
2120 FastMathFlags FMF;
2121 if (auto *FPMO = dyn_cast<FPMathOperator>(CI))
2122 FMF = FPMO->getFastMathFlags();
2123
2126 SmallVector<Type *> ParamTys;
2127 std::transform(FTy->param_begin(), FTy->param_end(),
2128 std::back_inserter(ParamTys),
2129 [&](Type *Ty) { return maybeVectorizeType(Ty, VF); });
2130
2131 IntrinsicCostAttributes CostAttrs(ID, RetTy, Arguments, ParamTys, FMF,
2134 return TTI.getIntrinsicInstrCost(CostAttrs, Config.CostKind);
2135}
2136
2138 // Fix widened non-induction PHIs by setting up the PHI operands.
2139 fixNonInductionPHIs(State);
2140
2141 // Don't apply optimizations below when no (vector) loop remains, as they all
2142 // require one at the moment.
2143 VPBasicBlock *HeaderVPBB =
2144 vputils::getFirstLoopHeader(*State.Plan, State.VPDT);
2145 if (!HeaderVPBB)
2146 return;
2147
2148 BasicBlock *HeaderBB = State.CFG.VPBB2IRBB[HeaderVPBB];
2149
2150 // Remove redundant induction instructions.
2151 legacyCSE(HeaderBB);
2152}
2153
2155 auto Iter = vp_depth_first_shallow(Plan.getEntry());
2157 for (VPRecipeBase &P : VPBB->phis()) {
2159 if (!VPPhi)
2160 continue;
2161 PHINode *NewPhi = cast<PHINode>(State.get(VPPhi));
2162 // Make sure the builder has a valid insert point.
2163 Builder.SetInsertPoint(NewPhi);
2164 for (const auto &[Inc, VPBB] : VPPhi->incoming_values_and_blocks())
2165 NewPhi->addIncoming(State.get(Inc), State.CFG.VPBB2IRBB[VPBB]);
2166 }
2167 }
2168}
2169
2170void LoopVectorizationCostModel::collectLoopScalars(ElementCount VF) {
2171 // We should not collect Scalars more than once per VF. Right now, this
2172 // function is called from collectUniformsAndScalars(), which already does
2173 // this check. Collecting Scalars for VF=1 does not make any sense.
2174 assert(VF.isVector() && !Scalars.contains(VF) &&
2175 "This function should not be visited twice for the same VF");
2176
2177 // This avoids any chances of creating a REPLICATE recipe during planning
2178 // since that would result in generation of scalarized code during execution,
2179 // which is not supported for scalable vectors.
2180 if (VF.isScalable()) {
2181 Scalars[VF].insert_range(Uniforms[VF]);
2182 return;
2183 }
2184
2186
2187 // These sets are used to seed the analysis with pointers used by memory
2188 // accesses that will remain scalar.
2190 SmallPtrSet<Instruction *, 8> PossibleNonScalarPtrs;
2191 auto *Latch = TheLoop->getLoopLatch();
2192
2193 // A helper that returns true if the use of Ptr by MemAccess will be scalar.
2194 // The pointer operands of loads and stores will be scalar as long as the
2195 // memory access is not a gather or scatter operation. The value operand of a
2196 // store will remain scalar if the store is scalarized.
2197 auto IsScalarUse = [&](Instruction *MemAccess, Value *Ptr) {
2198 InstWidening WideningDecision = getWideningDecision(MemAccess, VF);
2199 assert(WideningDecision != CM_Unknown &&
2200 "Widening decision should be ready at this moment");
2201 if (auto *Store = dyn_cast<StoreInst>(MemAccess))
2202 if (Ptr == Store->getValueOperand())
2203 return WideningDecision == CM_Scalarize;
2204 assert(Ptr == getLoadStorePointerOperand(MemAccess) &&
2205 "Ptr is neither a value or pointer operand");
2206 return WideningDecision != CM_GatherScatter;
2207 };
2208
2209 // A helper that returns true if the given value is a getelementptr
2210 // instruction contained in the loop.
2211 auto IsLoopVaryingGEP = [&](Value *V) {
2212 return isa<GetElementPtrInst>(V) && !TheLoop->isLoopInvariant(V);
2213 };
2214
2215 // A helper that evaluates a memory access's use of a pointer. If the use will
2216 // be a scalar use and the pointer is only used by memory accesses, we place
2217 // the pointer in ScalarPtrs. Otherwise, the pointer is placed in
2218 // PossibleNonScalarPtrs.
2219 auto EvaluatePtrUse = [&](Instruction *MemAccess, Value *Ptr) {
2220 // We only care about bitcast and getelementptr instructions contained in
2221 // the loop.
2222 if (!IsLoopVaryingGEP(Ptr))
2223 return;
2224
2225 // If the pointer has already been identified as scalar (e.g., if it was
2226 // also identified as uniform), there's nothing to do.
2227 auto *I = cast<Instruction>(Ptr);
2228 if (Worklist.count(I))
2229 return;
2230
2231 // If the use of the pointer will be a scalar use, and all users of the
2232 // pointer are memory accesses, place the pointer in ScalarPtrs. Otherwise,
2233 // place the pointer in PossibleNonScalarPtrs.
2234 if (IsScalarUse(MemAccess, Ptr) &&
2236 ScalarPtrs.insert(I);
2237 else
2238 PossibleNonScalarPtrs.insert(I);
2239 };
2240
2241 // We seed the scalars analysis with three classes of instructions: (1)
2242 // instructions marked uniform-after-vectorization and (2) bitcast,
2243 // getelementptr and (pointer) phi instructions used by memory accesses
2244 // requiring a scalar use.
2245 //
2246 // (1) Add to the worklist all instructions that have been identified as
2247 // uniform-after-vectorization.
2248 Worklist.insert_range(Uniforms[VF]);
2249
2250 // (2) Add to the worklist all bitcast and getelementptr instructions used by
2251 // memory accesses requiring a scalar use. The pointer operands of loads and
2252 // stores will be scalar unless the operation is a gather or scatter.
2253 // The value operand of a store will remain scalar if the store is scalarized.
2254 for (auto *BB : TheLoop->blocks())
2255 for (auto &I : *BB) {
2256 if (auto *Load = dyn_cast<LoadInst>(&I)) {
2257 EvaluatePtrUse(Load, Load->getPointerOperand());
2258 } else if (auto *Store = dyn_cast<StoreInst>(&I)) {
2259 EvaluatePtrUse(Store, Store->getPointerOperand());
2260 EvaluatePtrUse(Store, Store->getValueOperand());
2261 }
2262 }
2263 for (auto *I : ScalarPtrs)
2264 if (!PossibleNonScalarPtrs.count(I)) {
2265 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *I << "\n");
2266 Worklist.insert(I);
2267 }
2268
2269 // Insert the forced scalars.
2270 // FIXME: Currently VPWidenPHIRecipe() often creates a dead vector
2271 // induction variable when the PHI user is scalarized.
2272 auto ForcedScalar = ForcedScalars.find(VF);
2273 if (ForcedScalar != ForcedScalars.end())
2274 for (auto *I : ForcedScalar->second) {
2275 LLVM_DEBUG(dbgs() << "LV: Found (forced) scalar instruction: " << *I << "\n");
2276 Worklist.insert(I);
2277 }
2278
2279 // Expand the worklist by looking through any bitcasts and getelementptr
2280 // instructions we've already identified as scalar. This is similar to the
2281 // expansion step in collectLoopUniforms(); however, here we're only
2282 // expanding to include additional bitcasts and getelementptr instructions.
2283 unsigned Idx = 0;
2284 while (Idx != Worklist.size()) {
2285 Instruction *Dst = Worklist[Idx++];
2286 if (!IsLoopVaryingGEP(Dst->getOperand(0)))
2287 continue;
2288 auto *Src = cast<Instruction>(Dst->getOperand(0));
2289 if (llvm::all_of(Src->users(), [&](User *U) -> bool {
2290 auto *J = cast<Instruction>(U);
2291 return !TheLoop->contains(J) || Worklist.count(J) ||
2292 ((isa<LoadInst>(J) || isa<StoreInst>(J)) &&
2293 IsScalarUse(J, Src));
2294 })) {
2295 Worklist.insert(Src);
2296 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Src << "\n");
2297 }
2298 }
2299
2300 // An induction variable will remain scalar if all users of the induction
2301 // variable and induction variable update remain scalar.
2302 for (const auto &Induction : Legal->getInductionVars()) {
2303 auto *Ind = Induction.first;
2304 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch));
2305
2306 // If tail-folding is applied, the primary induction variable will be used
2307 // to feed a vector compare.
2308 if (Ind == Legal->getPrimaryInduction() && foldTailByMasking())
2309 continue;
2310
2311 // Returns true if \p Indvar is a pointer induction that is used directly by
2312 // load/store instruction \p I.
2313 auto IsDirectLoadStoreFromPtrIndvar = [&](Instruction *Indvar,
2314 Instruction *I) {
2315 return Induction.second.getKind() ==
2318 Indvar == getLoadStorePointerOperand(I) && IsScalarUse(I, Indvar);
2319 };
2320
2321 // Determine if all users of the induction variable are scalar after
2322 // vectorization.
2323 bool ScalarInd = all_of(Ind->users(), [&](User *U) -> bool {
2324 auto *I = cast<Instruction>(U);
2325 return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I) ||
2326 IsDirectLoadStoreFromPtrIndvar(Ind, I);
2327 });
2328 if (!ScalarInd)
2329 continue;
2330
2331 // If the induction variable update is a fixed-order recurrence, neither the
2332 // induction variable or its update should be marked scalar after
2333 // vectorization.
2334 auto *IndUpdatePhi = dyn_cast<PHINode>(IndUpdate);
2335 if (IndUpdatePhi && Legal->isFixedOrderRecurrence(IndUpdatePhi))
2336 continue;
2337
2338 // Determine if all users of the induction variable update instruction are
2339 // scalar after vectorization.
2340 bool ScalarIndUpdate = all_of(IndUpdate->users(), [&](User *U) -> bool {
2341 auto *I = cast<Instruction>(U);
2342 return I == Ind || !TheLoop->contains(I) || Worklist.count(I) ||
2343 IsDirectLoadStoreFromPtrIndvar(IndUpdate, I);
2344 });
2345 if (!ScalarIndUpdate)
2346 continue;
2347
2348 // The induction variable and its update instruction will remain scalar.
2349 Worklist.insert(Ind);
2350 Worklist.insert(IndUpdate);
2351 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Ind << "\n");
2352 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *IndUpdate
2353 << "\n");
2354 }
2355
2356 Scalars[VF].insert_range(Worklist);
2357}
2358
2360 ElementCount VF) {
2361 if (!isPredicatedInst(I))
2362 return false;
2363
2364 // Do we have a non-scalar lowering for this predicated
2365 // instruction? No - it is scalar with predication.
2366 switch(I->getOpcode()) {
2367 default:
2368 return true;
2369 case Instruction::Call:
2370 if (VF.isScalar())
2371 return true;
2373 case Instruction::Load:
2374 case Instruction::Store: {
2375 auto *Ptr = getLoadStorePointerOperand(I);
2376 auto *Ty = getLoadStoreType(I);
2377 unsigned AS = getLoadStoreAddressSpace(I);
2378 Type *VTy = Ty;
2379 if (VF.isVector())
2380 VTy = VectorType::get(Ty, VF);
2381 const Align Alignment = getLoadStoreAlignment(I);
2382 return isa<LoadInst>(I)
2383 ? !(Config.isLegalMaskedLoad(Ty, Ptr, Alignment, AS) ||
2384 TTI.isLegalMaskedGather(VTy, Alignment))
2385 : !(Config.isLegalMaskedStore(Ty, Ptr, Alignment, AS) ||
2386 TTI.isLegalMaskedScatter(VTy, Alignment));
2387 }
2388 case Instruction::UDiv:
2389 case Instruction::SDiv:
2390 case Instruction::SRem:
2391 case Instruction::URem: {
2392 // We have the option to use the safe-divisor idiom to avoid predication.
2393 // The cost based decision here will always select safe-divisor for
2394 // scalable vectors as scalarization isn't legal.
2395 const auto [ScalarCost, SafeDivisorCost] = getDivRemSpeculationCost(I, VF);
2396 return isDivRemScalarWithPredication(ScalarCost, SafeDivisorCost);
2397 }
2398 }
2399}
2400
2402 return Legal->isMaskRequired(I, foldTailByMasking());
2403}
2404
2405// TODO: Fold into LoopVectorizationLegality::isMaskRequired.
2407 // TODO: We can use the loop-preheader as context point here and get
2408 // context sensitive reasoning for isSafeToSpeculativelyExecute.
2412 return false;
2413
2414 // If the instruction was executed conditionally in the original scalar loop,
2415 // predication is needed with a mask whose lanes are all possibly inactive.
2416 if (Legal->blockNeedsPredication(I->getParent()))
2417 return true;
2418
2419 // If we're not folding the tail by masking, predication is unnecessary.
2420 if (!foldTailByMasking())
2421 return false;
2422
2423 // All that remain are instructions with side-effects originally executed in
2424 // the loop unconditionally, but now execute under a tail-fold mask (only)
2425 // having at least one active lane (the first). If the side-effects of the
2426 // instruction are invariant, executing it w/o (the tail-folding) mask is safe
2427 // - it will cause the same side-effects as when masked.
2428 switch(I->getOpcode()) {
2429 default:
2431 "instruction should have been considered by earlier checks");
2432 case Instruction::Call:
2433 // Side-effects of a Call are assumed to be non-invariant, needing a
2434 // (fold-tail) mask.
2436 "should have returned earlier for calls not needing a mask");
2437 return true;
2438 case Instruction::Load:
2439 // If the address is loop invariant no predication is needed.
2440 return !Legal->isInvariant(getLoadStorePointerOperand(I));
2441 case Instruction::Store: {
2442 // For stores, we need to prove both speculation safety (which follows from
2443 // the same argument as loads), but also must prove the value being stored
2444 // is correct. The easiest form of the later is to require that all values
2445 // stored are the same.
2446 return !(Legal->isInvariant(getLoadStorePointerOperand(I)) &&
2447 TheLoop->isLoopInvariant(cast<StoreInst>(I)->getValueOperand()));
2448 }
2449 case Instruction::UDiv:
2450 case Instruction::URem:
2451 // If the divisor is loop-invariant no predication is needed.
2452 return !Legal->isInvariant(I->getOperand(1));
2453 case Instruction::SDiv:
2454 case Instruction::SRem:
2455 // Conservative for now, since masked-off lanes may be poison and could
2456 // trigger signed overflow.
2457 return true;
2458 }
2459}
2460
2464 return 1;
2465 // If the block wasn't originally predicated then return early to avoid
2466 // computing BlockFrequencyInfo unnecessarily.
2467 if (!Legal->blockNeedsPredication(BB))
2468 return 1;
2469
2470 uint64_t HeaderFreq =
2471 getBFI().getBlockFreq(TheLoop->getHeader()).getFrequency();
2472 uint64_t BBFreq = getBFI().getBlockFreq(BB).getFrequency();
2473 assert(HeaderFreq >= BBFreq &&
2474 "Header has smaller block freq than dominated BB?");
2475 return std::round((double)HeaderFreq / BBFreq);
2476}
2477
2478std::pair<InstructionCost, InstructionCost>
2480 ElementCount VF) {
2481 assert(I->getOpcode() == Instruction::UDiv ||
2482 I->getOpcode() == Instruction::SDiv ||
2483 I->getOpcode() == Instruction::SRem ||
2484 I->getOpcode() == Instruction::URem);
2486
2487 // Scalarization isn't legal for scalable vector types
2488 InstructionCost ScalarizationCost = InstructionCost::getInvalid();
2489 if (!VF.isScalable()) {
2490 // Get the scalarization cost and scale this amount by the probability of
2491 // executing the predicated block. If the instruction is not predicated,
2492 // we fall through to the next case.
2493 ScalarizationCost = 0;
2494
2495 // These instructions have a non-void type, so account for the phi nodes
2496 // that we will create. This cost is likely to be zero. The phi node
2497 // cost, if any, should be scaled by the block probability because it
2498 // models a copy at the end of each predicated block.
2499 ScalarizationCost += VF.getFixedValue() *
2500 TTI.getCFInstrCost(Instruction::PHI, Config.CostKind);
2501
2502 // The cost of the non-predicated instruction.
2503 ScalarizationCost +=
2504 VF.getFixedValue() * TTI.getArithmeticInstrCost(
2505 I->getOpcode(), I->getType(), Config.CostKind);
2506
2507 // The cost of insertelement and extractelement instructions needed for
2508 // scalarization.
2509 ScalarizationCost += getScalarizationOverhead(I, VF);
2510
2511 // Scale the cost by the probability of executing the predicated blocks.
2512 // This assumes the predicated block for each vector lane is equally
2513 // likely.
2514 ScalarizationCost =
2515 ScalarizationCost /
2516 getPredBlockCostDivisor(Config.CostKind, I->getParent());
2517 }
2518
2519 InstructionCost SafeDivisorCost = 0;
2520 auto *VecTy = toVectorTy(I->getType(), VF);
2521 // The cost of the select guard to ensure all lanes are well defined
2522 // after we speculate above any internal control flow.
2523 SafeDivisorCost +=
2524 TTI.getCmpSelInstrCost(Instruction::Select, VecTy,
2525 toVectorTy(Type::getInt1Ty(I->getContext()), VF),
2526 CmpInst::BAD_ICMP_PREDICATE, Config.CostKind);
2527
2528 SmallVector<const Value *, 4> Operands(I->operand_values());
2529 SafeDivisorCost += TTI.getArithmeticInstrCost(
2530 I->getOpcode(), VecTy, Config.CostKind,
2531 {TargetTransformInfo::OK_AnyValue, TargetTransformInfo::OP_None},
2532 {TargetTransformInfo::OK_AnyValue, TargetTransformInfo::OP_None},
2533 Operands, I);
2534 return {ScalarizationCost, SafeDivisorCost};
2535}
2536
2538 Instruction *I, ElementCount VF) const {
2539 assert(isAccessInterleaved(I) && "Expecting interleaved access.");
2541 "Decision should not be set yet.");
2542 auto *Group = getInterleavedAccessGroup(I);
2543 assert(Group && "Must have a group.");
2544 unsigned InterleaveFactor = Group->getFactor();
2545
2546 // If the instruction's allocated size doesn't equal its type size, it
2547 // requires padding and will be scalarized.
2548 auto &DL = I->getDataLayout();
2549 auto *ScalarTy = getLoadStoreType(I);
2550 if (hasIrregularType(ScalarTy, DL))
2551 return false;
2552
2553 // For scalable vectors, the interleave factors must be <= 8 since we require
2554 // the (de)interleaveN intrinsics instead of shufflevectors.
2555 if (VF.isScalable() && InterleaveFactor > 8)
2556 return false;
2557
2558 // If the group involves a non-integral pointer, we may not be able to
2559 // losslessly cast all values to a common type.
2560 bool ScalarNI = DL.isNonIntegralPointerType(ScalarTy);
2561 for (Instruction *Member : Group->members()) {
2562 auto *MemberTy = getLoadStoreType(Member);
2563 bool MemberNI = DL.isNonIntegralPointerType(MemberTy);
2564 // Don't coerce non-integral pointers to integers or vice versa.
2565 if (MemberNI != ScalarNI)
2566 // TODO: Consider adding special nullptr value case here
2567 return false;
2568 if (MemberNI && ScalarNI &&
2569 ScalarTy->getPointerAddressSpace() !=
2570 MemberTy->getPointerAddressSpace())
2571 return false;
2572 }
2573
2574 // Check if masking is required.
2575 // A Group may need masking for one of two reasons: it resides in a block that
2576 // needs predication, or it was decided to use masking to deal with gaps
2577 // (either a gap at the end of a load-access that may result in a speculative
2578 // load, or any gaps in a store-access).
2579 bool PredicatedAccessRequiresMasking =
2581 bool LoadAccessWithGapsRequiresEpilogMasking =
2582 isa<LoadInst>(I) && Group->requiresScalarEpilogue() &&
2584 bool StoreAccessWithGapsRequiresMasking =
2585 isa<StoreInst>(I) && !Group->isFull();
2586 if (!PredicatedAccessRequiresMasking &&
2587 !LoadAccessWithGapsRequiresEpilogMasking &&
2588 !StoreAccessWithGapsRequiresMasking)
2589 return true;
2590
2591 // If masked interleaving is required, we expect that the user/target had
2592 // enabled it, because otherwise it either wouldn't have been created or
2593 // it should have been invalidated by the CostModel.
2595 "Masked interleave-groups for predicated accesses are not enabled.");
2596
2597 if (Group->isReverse())
2598 return false;
2599
2600 // TODO: Support interleaved access that requires a gap mask for scalable VFs.
2601 bool NeedsMaskForGaps = LoadAccessWithGapsRequiresEpilogMasking ||
2602 StoreAccessWithGapsRequiresMasking;
2603 if (VF.isScalable() && NeedsMaskForGaps)
2604 return false;
2605
2606 auto *Ty = getLoadStoreType(I);
2607 const Align Alignment = getLoadStoreAlignment(I);
2608 unsigned AS = getLoadStoreAddressSpace(I);
2609 return isa<LoadInst>(I) ? TTI.isLegalMaskedLoad(Ty, Alignment, AS)
2610 : TTI.isLegalMaskedStore(Ty, Alignment, AS);
2611}
2612
2614 Instruction *I, ElementCount VF) {
2615 // Get and ensure we have a valid memory instruction.
2616 assert((isa<LoadInst, StoreInst>(I)) && "Invalid memory instruction");
2617
2618 auto *Ptr = getLoadStorePointerOperand(I);
2619 auto *ScalarTy = getLoadStoreType(I);
2620
2621 // In order to be widened, the pointer should be consecutive, first of all.
2622 if (!Legal->isConsecutivePtr(ScalarTy, Ptr))
2623 return false;
2624
2625 // If the instruction is a store located in a predicated block, it will be
2626 // scalarized.
2627 if (isScalarWithPredication(I, VF))
2628 return false;
2629
2630 // If the instruction's allocated size doesn't equal it's type size, it
2631 // requires padding and will be scalarized.
2632 auto &DL = I->getDataLayout();
2633 if (hasIrregularType(ScalarTy, DL))
2634 return false;
2635
2636 return true;
2637}
2638
2639void LoopVectorizationCostModel::collectLoopUniforms(ElementCount VF) {
2640 // We should not collect Uniforms more than once per VF. Right now,
2641 // this function is called from collectUniformsAndScalars(), which
2642 // already does this check. Collecting Uniforms for VF=1 does not make any
2643 // sense.
2644
2645 assert(VF.isVector() && !Uniforms.contains(VF) &&
2646 "This function should not be visited twice for the same VF");
2647
2648 // Visit the list of Uniforms. If we find no uniform value, we won't
2649 // analyze again. Uniforms.count(VF) will return 1.
2650 Uniforms[VF].clear();
2651
2652 // Now we know that the loop is vectorizable!
2653 // Collect instructions inside the loop that will remain uniform after
2654 // vectorization.
2655
2656 // Global values, params and instructions outside of current loop are out of
2657 // scope.
2658 auto IsOutOfScope = [&](Value *V) -> bool {
2660 return (!I || !TheLoop->contains(I));
2661 };
2662
2663 // Worklist containing uniform instructions demanding lane 0.
2664 SetVector<Instruction *> Worklist;
2665
2666 // Add uniform instructions demanding lane 0 to the worklist. Instructions
2667 // that require predication must not be considered uniform after
2668 // vectorization, because that would create an erroneous replicating region
2669 // where only a single instance out of VF should be formed.
2670 auto AddToWorklistIfAllowed = [&](Instruction *I) -> void {
2671 if (IsOutOfScope(I)) {
2672 LLVM_DEBUG(dbgs() << "LV: Found not uniform due to scope: "
2673 << *I << "\n");
2674 return;
2675 }
2676 if (isPredicatedInst(I)) {
2677 LLVM_DEBUG(
2678 dbgs() << "LV: Found not uniform due to requiring predication: " << *I
2679 << "\n");
2680 return;
2681 }
2682 LLVM_DEBUG(dbgs() << "LV: Found uniform instruction: " << *I << "\n");
2683 Worklist.insert(I);
2684 };
2685
2686 // Start with the conditional branches exiting the loop. If the branch
2687 // condition is an instruction contained in the loop that is only used by the
2688 // branch, it is uniform. Note conditions from uncountable early exits are not
2689 // uniform.
2691 TheLoop->getExitingBlocks(Exiting);
2692 for (BasicBlock *E : Exiting) {
2693 if (Legal->hasUncountableEarlyExit() && TheLoop->getLoopLatch() != E)
2694 continue;
2695 auto *Cmp = dyn_cast<Instruction>(E->getTerminator()->getOperand(0));
2696 if (Cmp && TheLoop->contains(Cmp) && Cmp->hasOneUse())
2697 AddToWorklistIfAllowed(Cmp);
2698 }
2699
2700 auto PrevVF = VF.divideCoefficientBy(2);
2701 // Return true if all lanes perform the same memory operation, and we can
2702 // thus choose to execute only one.
2703 auto IsUniformMemOpUse = [&](Instruction *I) {
2704 // If the value was already known to not be uniform for the previous
2705 // (smaller VF), it cannot be uniform for the larger VF.
2706 if (PrevVF.isVector()) {
2707 auto Iter = Uniforms.find(PrevVF);
2708 if (Iter != Uniforms.end() && !Iter->second.contains(I))
2709 return false;
2710 }
2711 if (!Legal->isUniformMemOp(*I, VF))
2712 return false;
2713 if (isa<LoadInst>(I))
2714 // Loading the same address always produces the same result - at least
2715 // assuming aliasing and ordering which have already been checked.
2716 return true;
2717 // Storing the same value on every iteration.
2718 return TheLoop->isLoopInvariant(cast<StoreInst>(I)->getValueOperand());
2719 };
2720
2721 auto IsUniformDecision = [&](Instruction *I, ElementCount VF) {
2722 InstWidening WideningDecision = getWideningDecision(I, VF);
2723 assert(WideningDecision != CM_Unknown &&
2724 "Widening decision should be ready at this moment");
2725
2726 if (IsUniformMemOpUse(I))
2727 return true;
2728
2729 return (WideningDecision == CM_Widen ||
2730 WideningDecision == CM_Widen_Reverse ||
2731 WideningDecision == CM_Interleave);
2732 };
2733
2734 // Returns true if Ptr is the pointer operand of a memory access instruction
2735 // I, I is known to not require scalarization, and the pointer is not also
2736 // stored.
2737 auto IsVectorizedMemAccessUse = [&](Instruction *I, Value *Ptr) -> bool {
2738 if (isa<StoreInst>(I) && I->getOperand(0) == Ptr)
2739 return false;
2740 return getLoadStorePointerOperand(I) == Ptr &&
2741 (IsUniformDecision(I, VF) || Legal->isInvariant(Ptr));
2742 };
2743
2744 // Holds a list of values which are known to have at least one uniform use.
2745 // Note that there may be other uses which aren't uniform. A "uniform use"
2746 // here is something which only demands lane 0 of the unrolled iterations;
2747 // it does not imply that all lanes produce the same value (e.g. this is not
2748 // the usual meaning of uniform)
2749 SetVector<Value *> HasUniformUse;
2750
2751 // Scan the loop for instructions which are either a) known to have only
2752 // lane 0 demanded or b) are uses which demand only lane 0 of their operand.
2753 for (auto *BB : TheLoop->blocks())
2754 for (auto &I : *BB) {
2755 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(&I)) {
2756 switch (II->getIntrinsicID()) {
2757 case Intrinsic::sideeffect:
2758 case Intrinsic::experimental_noalias_scope_decl:
2759 case Intrinsic::assume:
2760 case Intrinsic::lifetime_start:
2761 case Intrinsic::lifetime_end:
2762 if (TheLoop->hasLoopInvariantOperands(&I))
2763 AddToWorklistIfAllowed(&I);
2764 break;
2765 default:
2766 break;
2767 }
2768 }
2769
2770 if (auto *EVI = dyn_cast<ExtractValueInst>(&I)) {
2771 if (IsOutOfScope(EVI->getAggregateOperand())) {
2772 AddToWorklistIfAllowed(EVI);
2773 continue;
2774 }
2775 // Only ExtractValue instructions where the aggregate value comes from a
2776 // call are allowed to be non-uniform.
2777 assert(isa<CallInst>(EVI->getAggregateOperand()) &&
2778 "Expected aggregate value to be call return value");
2779 }
2780
2781 // If there's no pointer operand, there's nothing to do.
2782 auto *Ptr = getLoadStorePointerOperand(&I);
2783 if (!Ptr)
2784 continue;
2785
2786 // If the pointer can be proven to be uniform, always add it to the
2787 // worklist.
2788 if (isa<Instruction>(Ptr) && Legal->isUniform(Ptr, VF))
2789 AddToWorklistIfAllowed(cast<Instruction>(Ptr));
2790
2791 if (IsUniformMemOpUse(&I))
2792 AddToWorklistIfAllowed(&I);
2793
2794 if (IsVectorizedMemAccessUse(&I, Ptr))
2795 HasUniformUse.insert(Ptr);
2796 }
2797
2798 // Add to the worklist any operands which have *only* uniform (e.g. lane 0
2799 // demanding) users. Since loops are assumed to be in LCSSA form, this
2800 // disallows uses outside the loop as well.
2801 for (auto *V : HasUniformUse) {
2802 if (IsOutOfScope(V))
2803 continue;
2804 auto *I = cast<Instruction>(V);
2805 bool UsersAreMemAccesses = all_of(I->users(), [&](User *U) -> bool {
2806 auto *UI = cast<Instruction>(U);
2807 return TheLoop->contains(UI) && IsVectorizedMemAccessUse(UI, V);
2808 });
2809 if (UsersAreMemAccesses)
2810 AddToWorklistIfAllowed(I);
2811 }
2812
2813 // Expand Worklist in topological order: whenever a new instruction
2814 // is added , its users should be already inside Worklist. It ensures
2815 // a uniform instruction will only be used by uniform instructions.
2816 unsigned Idx = 0;
2817 while (Idx != Worklist.size()) {
2818 Instruction *I = Worklist[Idx++];
2819
2820 for (auto *OV : I->operand_values()) {
2821 // isOutOfScope operands cannot be uniform instructions.
2822 if (IsOutOfScope(OV))
2823 continue;
2824 // First order recurrence Phi's should typically be considered
2825 // non-uniform.
2826 auto *OP = dyn_cast<PHINode>(OV);
2827 if (OP && Legal->isFixedOrderRecurrence(OP))
2828 continue;
2829 // If all the users of the operand are uniform, then add the
2830 // operand into the uniform worklist.
2831 auto *OI = cast<Instruction>(OV);
2832 if (llvm::all_of(OI->users(), [&](User *U) -> bool {
2833 auto *J = cast<Instruction>(U);
2834 return Worklist.count(J) || IsVectorizedMemAccessUse(J, OI);
2835 }))
2836 AddToWorklistIfAllowed(OI);
2837 }
2838 }
2839
2840 // For an instruction to be added into Worklist above, all its users inside
2841 // the loop should also be in Worklist. However, this condition cannot be
2842 // true for phi nodes that form a cyclic dependence. We must process phi
2843 // nodes separately. An induction variable will remain uniform if all users
2844 // of the induction variable and induction variable update remain uniform.
2845 // The code below handles both pointer and non-pointer induction variables.
2846 BasicBlock *Latch = TheLoop->getLoopLatch();
2847 for (const auto &Induction : Legal->getInductionVars()) {
2848 auto *Ind = Induction.first;
2849 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch));
2850
2851 // Determine if all users of the induction variable are uniform after
2852 // vectorization.
2853 bool UniformInd = all_of(Ind->users(), [&](User *U) -> bool {
2854 auto *I = cast<Instruction>(U);
2855 return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I) ||
2856 IsVectorizedMemAccessUse(I, Ind);
2857 });
2858 if (!UniformInd)
2859 continue;
2860
2861 // Determine if all users of the induction variable update instruction are
2862 // uniform after vectorization.
2863 bool UniformIndUpdate = all_of(IndUpdate->users(), [&](User *U) -> bool {
2864 auto *I = cast<Instruction>(U);
2865 return I == Ind || Worklist.count(I) ||
2866 IsVectorizedMemAccessUse(I, IndUpdate);
2867 });
2868 if (!UniformIndUpdate)
2869 continue;
2870
2871 // The induction variable and its update instruction will remain uniform.
2872 AddToWorklistIfAllowed(Ind);
2873 AddToWorklistIfAllowed(IndUpdate);
2874 }
2875
2876 Uniforms[VF].insert_range(Worklist);
2877}
2878
2879FixedScalableVFPair
2881 if (Legal->getRuntimePointerChecking()->Need && TTI.hasBranchDivergence()) {
2882 // TODO: It may be useful to do since it's still likely to be dynamically
2883 // uniform if the target can skip.
2885 "Not inserting runtime ptr check for divergent target",
2886 "runtime pointer checks needed. Not enabled for divergent target",
2887 "CantVersionLoopWithDivergentTarget", ORE, TheLoop);
2889 }
2890
2891 ScalarEvolution *SE = PSE.getSE();
2893 unsigned MaxTC = PSE.getSmallConstantMaxTripCount();
2894 if (!MaxTC && EpilogueLoweringStatus == CM_EpilogueAllowed)
2896 LLVM_DEBUG(dbgs() << "LV: Found trip count: " << TC << '\n');
2897 if (TC != ElementCount::getFixed(MaxTC))
2898 LLVM_DEBUG(dbgs() << "LV: Found maximum trip count: " << MaxTC << '\n');
2899 if (TC.isScalar()) {
2900 reportVectorizationFailure("Single iteration (non) loop",
2901 "loop trip count is one, irrelevant for vectorization",
2902 "SingleIterationLoop", ORE, TheLoop);
2904 }
2905
2906 // If BTC matches the widest induction type and is -1 then the trip count
2907 // computation will wrap to 0 and the vector trip count will be 0. Do not try
2908 // to vectorize.
2909 const SCEV *BTC = SE->getBackedgeTakenCount(TheLoop);
2910 if (!isa<SCEVCouldNotCompute>(BTC) &&
2911 BTC->getType()->getScalarSizeInBits() >=
2912 Legal->getWidestInductionType()->getScalarSizeInBits() &&
2914 SE->getMinusOne(BTC->getType()))) {
2916 "Trip count computation wrapped",
2917 "backedge-taken count is -1, loop trip count wrapped to 0",
2918 "TripCountWrapped", ORE, TheLoop);
2920 }
2921
2922 assert(WideningDecisions.empty() && CallWideningDecisions.empty() &&
2923 Uniforms.empty() && Scalars.empty() &&
2924 "No cost-modeling decisions should have been taken at this point");
2925
2926 switch (EpilogueLoweringStatus) {
2927 case CM_EpilogueAllowed:
2928 return Config.computeFeasibleMaxVF(MaxTC, UserVF, UserIC, false,
2931 [[fallthrough]];
2933 LLVM_DEBUG(dbgs() << "LV: tail-folding hint/switch found.\n"
2934 << "LV: Not allowing epilogue, creating tail-folded "
2935 << "vector loop.\n");
2936 break;
2938 // fallthrough as a special case of OptForSize
2940 if (EpilogueLoweringStatus == CM_EpilogueNotAllowedOptSize)
2941 LLVM_DEBUG(dbgs() << "LV: Not allowing epilogue due to -Os/-Oz.\n");
2942 else
2943 LLVM_DEBUG(dbgs() << "LV: Not allowing epilogue due to low trip "
2944 << "count.\n");
2945
2946 // Bail if runtime checks are required, which are not good when optimising
2947 // for size.
2948 if (Config.runtimeChecksRequired())
2950
2951 break;
2952 }
2953
2954 // Now try the tail folding
2955
2956 // Invalidate interleave groups that require an epilogue if we can't mask
2957 // the interleave-group.
2959 // Note: There is no need to invalidate any cost modeling decisions here, as
2960 // none were taken so far (see assertion above).
2961 InterleaveInfo.invalidateGroupsRequiringScalarEpilogue();
2962 }
2963
2964 FixedScalableVFPair MaxFactors = Config.computeFeasibleMaxVF(
2965 MaxTC, UserVF, UserIC, true, requiresScalarEpilogue(true));
2966
2967 // Avoid tail folding if the trip count is known to be a multiple of any VF
2968 // we choose.
2969 std::optional<unsigned> MaxPowerOf2RuntimeVF =
2970 MaxFactors.FixedVF.getFixedValue();
2971 if (MaxFactors.ScalableVF) {
2972 std::optional<unsigned> MaxVScale = getMaxVScale(*TheFunction, TTI);
2973 if (MaxVScale) {
2974 MaxPowerOf2RuntimeVF = std::max<unsigned>(
2975 *MaxPowerOf2RuntimeVF,
2976 *MaxVScale * MaxFactors.ScalableVF.getKnownMinValue());
2977 } else
2978 MaxPowerOf2RuntimeVF = std::nullopt; // Stick with tail-folding for now.
2979 }
2980
2981 auto NoScalarEpilogueNeeded = [this, &UserIC](unsigned MaxVF) {
2982 // Return false if the loop is neither a single-latch-exit loop nor an
2983 // early-exit loop as tail-folding is not supported in that case.
2984 if (TheLoop->getExitingBlock() != TheLoop->getLoopLatch() &&
2985 !Legal->hasUncountableEarlyExit())
2986 return false;
2987 unsigned MaxVFtimesIC = UserIC ? MaxVF * UserIC : MaxVF;
2988 ScalarEvolution *SE = PSE.getSE();
2989 // Calling getSymbolicMaxBackedgeTakenCount enables support for loops
2990 // with uncountable exits. For countable loops, the symbolic maximum must
2991 // remain identical to the known back-edge taken count.
2992 const SCEV *BackedgeTakenCount = PSE.getSymbolicMaxBackedgeTakenCount();
2993 assert((Legal->hasUncountableEarlyExit() ||
2994 BackedgeTakenCount == PSE.getBackedgeTakenCount()) &&
2995 "Invalid loop count");
2996 const SCEV *ExitCount = SE->getAddExpr(
2997 BackedgeTakenCount, SE->getOne(BackedgeTakenCount->getType()));
2998 const SCEV *Rem = SE->getURemExpr(
2999 SE->applyLoopGuards(ExitCount, TheLoop),
3000 SE->getConstant(BackedgeTakenCount->getType(), MaxVFtimesIC));
3001 return Rem->isZero();
3002 };
3003
3004 if (MaxPowerOf2RuntimeVF > 0u) {
3005 assert((UserVF.isNonZero() || isPowerOf2_32(*MaxPowerOf2RuntimeVF)) &&
3006 "MaxFixedVF must be a power of 2");
3007 if (NoScalarEpilogueNeeded(*MaxPowerOf2RuntimeVF)) {
3008 // Accept MaxFixedVF if we do not have a tail.
3009 LLVM_DEBUG(dbgs() << "LV: No tail will remain for any chosen VF.\n");
3010 return MaxFactors;
3011 }
3012 }
3013
3014 auto ExpectedTC = getSmallBestKnownTC(PSE, TheLoop);
3015 if (ExpectedTC && ExpectedTC->isFixed() &&
3016 ExpectedTC->getFixedValue() <=
3017 TTI.getMinTripCountTailFoldingThreshold()) {
3018 if (MaxPowerOf2RuntimeVF > 0u) {
3019 // If we have a low-trip-count, and the fixed-width VF is known to divide
3020 // the trip count but the scalable factor does not, use the fixed-width
3021 // factor in preference to allow the generation of a non-predicated loop.
3022 if (EpilogueLoweringStatus == CM_EpilogueNotAllowedLowTripLoop &&
3023 NoScalarEpilogueNeeded(MaxFactors.FixedVF.getFixedValue())) {
3024 LLVM_DEBUG(dbgs() << "LV: Picking a fixed-width so that no tail will "
3025 "remain for any chosen VF.\n");
3026 MaxFactors.ScalableVF = ElementCount::getScalable(0);
3027 return MaxFactors;
3028 }
3029 }
3030
3032 "The trip count is below the minial threshold value.",
3033 "loop trip count is too low, avoiding vectorization", "LowTripCount",
3034 ORE, TheLoop);
3036 }
3037
3038 // If we don't know the precise trip count, or if the trip count that we
3039 // found modulo the vectorization factor is not zero, try to fold the tail
3040 // by masking.
3041 // FIXME: look for a smaller MaxVF that does divide TC rather than masking.
3042 bool ContainsScalableVF = MaxFactors.ScalableVF.isNonZero();
3043 setTailFoldingStyle(ContainsScalableVF, UserIC);
3044 if (foldTailByMasking()) {
3045 if (foldTailWithEVL()) {
3046 LLVM_DEBUG(
3047 dbgs()
3048 << "LV: tail is folded with EVL, forcing unroll factor to be 1. Will "
3049 "try to generate VP Intrinsics with scalable vector "
3050 "factors only.\n");
3051 // Tail folded loop using VP intrinsics restricts the VF to be scalable
3052 // for now.
3053 // TODO: extend it for fixed vectors, if required.
3054 assert(ContainsScalableVF && "Expected scalable vector factor.");
3055
3056 MaxFactors.FixedVF = ElementCount::getFixed(1);
3057 }
3058 return MaxFactors;
3059 }
3060
3061 // If there was a tail-folding hint/switch, but we can't fold the tail by
3062 // masking, fallback to a vectorization with an epilogue.
3063 if (EpilogueLoweringStatus == CM_EpilogueNotNeededFoldTail) {
3064 LLVM_DEBUG(dbgs() << "LV: Cannot fold tail by masking: vectorize with an "
3065 "epilogue instead.\n");
3066 EpilogueLoweringStatus = CM_EpilogueAllowed;
3067 return MaxFactors;
3068 }
3069
3070 if (EpilogueLoweringStatus == CM_EpilogueNotAllowedFoldTail) {
3071 LLVM_DEBUG(dbgs() << "LV: Can't fold tail by masking: don't vectorize\n");
3073 }
3074
3075 if (TC.isZero()) {
3077 "unable to calculate the loop count due to complex control flow",
3078 "UnknownLoopCountComplexCFG", ORE, TheLoop);
3080 }
3081
3083 "Cannot optimize for size and vectorize at the same time.",
3084 "cannot optimize for size and vectorize at the same time. "
3085 "Enable vectorization of this loop with '#pragma clang loop "
3086 "vectorize(enable)' when compiling with -Os/-Oz",
3087 "NoTailLoopWithOptForSize", ORE, TheLoop);
3089}
3090
3091bool LoopVectorizationPlanner::isMoreProfitable(const VectorizationFactor &A,
3092 const VectorizationFactor &B,
3093 const unsigned MaxTripCount,
3094 bool HasTail,
3095 bool IsEpilogue) const {
3096 InstructionCost CostA = A.Cost;
3097 InstructionCost CostB = B.Cost;
3098
3099 // When there is a hint to always prefer scalable vectors, honour that hint.
3100 if (Hints.isScalableVectorizationAlwaysPreferred())
3101 if (A.Width.isScalable() && CostA.isValid() && !B.Width.isScalable() &&
3102 !B.Width.isScalar())
3103 return true;
3104
3105 // Improve estimate for the vector width if it is scalable.
3106 unsigned EstimatedWidthA = A.Width.getKnownMinValue();
3107 unsigned EstimatedWidthB = B.Width.getKnownMinValue();
3108 if (std::optional<unsigned> VScale = Config.getVScaleForTuning()) {
3109 if (A.Width.isScalable())
3110 EstimatedWidthA *= *VScale;
3111 if (B.Width.isScalable())
3112 EstimatedWidthB *= *VScale;
3113 }
3114
3115 // When optimizing for size choose whichever is smallest, which will be the
3116 // one with the smallest cost for the whole loop. On a tie pick the larger
3117 // vector width, on the assumption that throughput will be greater.
3118 if (Config.CostKind == TTI::TCK_CodeSize)
3119 return CostA < CostB ||
3120 (CostA == CostB && EstimatedWidthA > EstimatedWidthB);
3121
3122 // Assume vscale may be larger than 1 (or the value being tuned for),
3123 // so that scalable vectorization is slightly favorable over fixed-width
3124 // vectorization.
3125 bool PreferScalable = !TTI.preferFixedOverScalableIfEqualCost(IsEpilogue) &&
3126 A.Width.isScalable() && !B.Width.isScalable();
3127
3128 auto CmpFn = [PreferScalable](const InstructionCost &LHS,
3129 const InstructionCost &RHS) {
3130 return PreferScalable ? LHS <= RHS : LHS < RHS;
3131 };
3132
3133 // To avoid the need for FP division:
3134 // (CostA / EstimatedWidthA) < (CostB / EstimatedWidthB)
3135 // <=> (CostA * EstimatedWidthB) < (CostB * EstimatedWidthA)
3136 bool LowerCostWithoutTC =
3137 CmpFn(CostA * EstimatedWidthB, CostB * EstimatedWidthA);
3138 if (!MaxTripCount)
3139 return LowerCostWithoutTC;
3140
3141 auto GetCostForTC = [MaxTripCount, HasTail](unsigned VF,
3142 InstructionCost VectorCost,
3143 InstructionCost ScalarCost) {
3144 // If the trip count is a known (possibly small) constant, the trip count
3145 // will be rounded up to an integer number of iterations under
3146 // FoldTailByMasking. The total cost in that case will be
3147 // VecCost*ceil(TripCount/VF). When not folding the tail, the total
3148 // cost will be VecCost*floor(TC/VF) + ScalarCost*(TC%VF). There will be
3149 // some extra overheads, but for the purpose of comparing the costs of
3150 // different VFs we can use this to compare the total loop-body cost
3151 // expected after vectorization.
3152 if (HasTail)
3153 return VectorCost * (MaxTripCount / VF) +
3154 ScalarCost * (MaxTripCount % VF);
3155 return VectorCost * divideCeil(MaxTripCount, VF);
3156 };
3157
3158 auto RTCostA = GetCostForTC(EstimatedWidthA, CostA, A.ScalarCost);
3159 auto RTCostB = GetCostForTC(EstimatedWidthB, CostB, B.ScalarCost);
3160 bool LowerCostWithTC = CmpFn(RTCostA, RTCostB);
3161 LLVM_DEBUG(if (LowerCostWithTC != LowerCostWithoutTC) {
3162 dbgs() << "LV: VF " << (LowerCostWithTC ? A.Width : B.Width)
3163 << " has lower cost than VF "
3164 << (LowerCostWithTC ? B.Width : A.Width)
3165 << " when taking the cost of the remaining scalar loop iterations "
3166 "into consideration for a maximum trip count of "
3167 << MaxTripCount << ".\n";
3168 });
3169 return LowerCostWithTC;
3170}
3171
3172bool LoopVectorizationPlanner::isMoreProfitable(const VectorizationFactor &A,
3173 const VectorizationFactor &B,
3174 bool HasTail,
3175 bool IsEpilogue) const {
3176 const unsigned MaxTripCount = PSE.getSmallConstantMaxTripCount();
3177 return LoopVectorizationPlanner::isMoreProfitable(A, B, MaxTripCount, HasTail,
3178 IsEpilogue);
3179}
3180
3183 using RecipeVFPair = std::pair<VPRecipeBase *, ElementCount>;
3184 SmallVector<RecipeVFPair> InvalidCosts;
3185 for (const auto &Plan : VPlans) {
3186 for (ElementCount VF : Plan->vectorFactors()) {
3187 // The VPlan-based cost model is designed for computing vector cost.
3188 // Querying VPlan-based cost model with a scarlar VF will cause some
3189 // errors because we expect the VF is vector for most of the widen
3190 // recipes.
3191 if (VF.isScalar())
3192 continue;
3193
3194 VPCostContext CostCtx(CM.TTI, *CM.TLI, *Plan, CM, Config.CostKind, CM.PSE,
3195 OrigLoop);
3196 precomputeCosts(*Plan, VF, CostCtx);
3197 auto Iter = vp_depth_first_deep(Plan->getVectorLoopRegion()->getEntry());
3199 for (auto &R : *VPBB) {
3200 if (!R.cost(VF, CostCtx).isValid())
3201 InvalidCosts.emplace_back(&R, VF);
3202 }
3203 }
3204 }
3205 }
3206 if (InvalidCosts.empty())
3207 return;
3208
3209 // Emit a report of VFs with invalid costs in the loop.
3210
3211 // Group the remarks per recipe, keeping the recipe order from InvalidCosts.
3213 unsigned I = 0;
3214 for (auto &Pair : InvalidCosts)
3215 if (Numbering.try_emplace(Pair.first, I).second)
3216 ++I;
3217
3218 // Sort the list, first on recipe(number) then on VF.
3219 sort(InvalidCosts, [&Numbering](RecipeVFPair &A, RecipeVFPair &B) {
3220 unsigned NA = Numbering[A.first];
3221 unsigned NB = Numbering[B.first];
3222 if (NA != NB)
3223 return NA < NB;
3224 return ElementCount::isKnownLT(A.second, B.second);
3225 });
3226
3227 // For a list of ordered recipe-VF pairs:
3228 // [(load, VF1), (load, VF2), (store, VF1)]
3229 // group the recipes together to emit separate remarks for:
3230 // load (VF1, VF2)
3231 // store (VF1)
3232 auto Tail = ArrayRef<RecipeVFPair>(InvalidCosts);
3233 auto Subset = ArrayRef<RecipeVFPair>();
3234 do {
3235 if (Subset.empty())
3236 Subset = Tail.take_front(1);
3237
3238 VPRecipeBase *R = Subset.front().first;
3239
3240 unsigned Opcode =
3242 .Case([](const VPHeaderPHIRecipe *R) { return Instruction::PHI; })
3243 .Case(
3244 [](const VPWidenStoreRecipe *R) { return Instruction::Store; })
3245 .Case([](const VPWidenLoadRecipe *R) { return Instruction::Load; })
3246 .Case<VPWidenCallRecipe, VPWidenIntrinsicRecipe>(
3247 [](const auto *R) { return Instruction::Call; })
3250 [](const auto *R) { return R->getOpcode(); })
3251 .Case([](const VPInterleaveRecipe *R) {
3252 return R->getStoredValues().empty() ? Instruction::Load
3253 : Instruction::Store;
3254 })
3255 .Case([](const VPReductionRecipe *R) {
3256 return RecurrenceDescriptor::getOpcode(R->getRecurrenceKind());
3257 });
3258
3259 // If the next recipe is different, or if there are no other pairs,
3260 // emit a remark for the collated subset. e.g.
3261 // [(load, VF1), (load, VF2))]
3262 // to emit:
3263 // remark: invalid costs for 'load' at VF=(VF1, VF2)
3264 if (Subset == Tail || Tail[Subset.size()].first != R) {
3265 std::string OutString;
3266 raw_string_ostream OS(OutString);
3267 assert(!Subset.empty() && "Unexpected empty range");
3268 OS << "Recipe with invalid costs prevented vectorization at VF=(";
3269 for (const auto &Pair : Subset)
3270 OS << (Pair.second == Subset.front().second ? "" : ", ") << Pair.second;
3271 OS << "):";
3272 if (Opcode == Instruction::Call) {
3273 StringRef Name = "";
3274 if (auto *Int = dyn_cast<VPWidenIntrinsicRecipe>(R)) {
3275 Name = Int->getIntrinsicName();
3276 } else {
3277 auto *WidenCall = dyn_cast<VPWidenCallRecipe>(R);
3278 Function *CalledFn =
3279 WidenCall ? WidenCall->getCalledScalarFunction()
3280 : cast<Function>(R->getOperand(R->getNumOperands() - 1)
3281 ->getLiveInIRValue());
3282 Name = CalledFn->getName();
3283 }
3284 OS << " call to " << Name;
3285 } else
3286 OS << " " << Instruction::getOpcodeName(Opcode);
3287 reportVectorizationInfo(OutString, "InvalidCost", ORE, OrigLoop, nullptr,
3288 R->getDebugLoc());
3289 Tail = Tail.drop_front(Subset.size());
3290 Subset = {};
3291 } else
3292 // Grow the subset by one element
3293 Subset = Tail.take_front(Subset.size() + 1);
3294 } while (!Tail.empty());
3295}
3296
3297/// Check if any recipe of \p Plan will generate a vector value, which will be
3298/// assigned a vector register.
3300 const TargetTransformInfo &TTI) {
3301 assert(VF.isVector() && "Checking a scalar VF?");
3302 VPTypeAnalysis TypeInfo(Plan);
3303 DenseSet<VPRecipeBase *> EphemeralRecipes;
3304 collectEphemeralRecipesForVPlan(Plan, EphemeralRecipes);
3305 // Set of already visited types.
3306 DenseSet<Type *> Visited;
3309 for (VPRecipeBase &R : *VPBB) {
3310 if (EphemeralRecipes.contains(&R))
3311 continue;
3312 // Continue early if the recipe is considered to not produce a vector
3313 // result. Note that this includes VPInstruction where some opcodes may
3314 // produce a vector, to preserve existing behavior as VPInstructions model
3315 // aspects not directly mapped to existing IR instructions.
3316 switch (R.getVPRecipeID()) {
3317 case VPRecipeBase::VPDerivedIVSC:
3318 case VPRecipeBase::VPScalarIVStepsSC:
3319 case VPRecipeBase::VPReplicateSC:
3320 case VPRecipeBase::VPInstructionSC:
3321 case VPRecipeBase::VPCurrentIterationPHISC:
3322 case VPRecipeBase::VPVectorPointerSC:
3323 case VPRecipeBase::VPVectorEndPointerSC:
3324 case VPRecipeBase::VPExpandSCEVSC:
3325 case VPRecipeBase::VPPredInstPHISC:
3326 case VPRecipeBase::VPBranchOnMaskSC:
3327 continue;
3328 case VPRecipeBase::VPReductionSC:
3329 case VPRecipeBase::VPActiveLaneMaskPHISC:
3330 case VPRecipeBase::VPWidenCallSC:
3331 case VPRecipeBase::VPWidenCanonicalIVSC:
3332 case VPRecipeBase::VPWidenCastSC:
3333 case VPRecipeBase::VPWidenGEPSC:
3334 case VPRecipeBase::VPWidenIntrinsicSC:
3335 case VPRecipeBase::VPWidenSC:
3336 case VPRecipeBase::VPBlendSC:
3337 case VPRecipeBase::VPFirstOrderRecurrencePHISC:
3338 case VPRecipeBase::VPHistogramSC:
3339 case VPRecipeBase::VPWidenPHISC:
3340 case VPRecipeBase::VPWidenIntOrFpInductionSC:
3341 case VPRecipeBase::VPWidenPointerInductionSC:
3342 case VPRecipeBase::VPReductionPHISC:
3343 case VPRecipeBase::VPInterleaveEVLSC:
3344 case VPRecipeBase::VPInterleaveSC:
3345 case VPRecipeBase::VPWidenLoadEVLSC:
3346 case VPRecipeBase::VPWidenLoadSC:
3347 case VPRecipeBase::VPWidenStoreEVLSC:
3348 case VPRecipeBase::VPWidenStoreSC:
3349 break;
3350 default:
3351 llvm_unreachable("unhandled recipe");
3352 }
3353
3354 auto WillGenerateTargetVectors = [&TTI, VF](Type *VectorTy) {
3355 unsigned NumLegalParts = TTI.getNumberOfParts(VectorTy);
3356 if (!NumLegalParts)
3357 return false;
3358 if (VF.isScalable()) {
3359 // <vscale x 1 x iN> is assumed to be profitable over iN because
3360 // scalable registers are a distinct register class from scalar
3361 // ones. If we ever find a target which wants to lower scalable
3362 // vectors back to scalars, we'll need to update this code to
3363 // explicitly ask TTI about the register class uses for each part.
3364 return NumLegalParts <= VF.getKnownMinValue();
3365 }
3366 // Two or more elements that share a register - are vectorized.
3367 return NumLegalParts < VF.getFixedValue();
3368 };
3369
3370 // If no def nor is a store, e.g., branches, continue - no value to check.
3371 if (R.getNumDefinedValues() == 0 &&
3373 continue;
3374 // For multi-def recipes, currently only interleaved loads, suffice to
3375 // check first def only.
3376 // For stores check their stored value; for interleaved stores suffice
3377 // the check first stored value only. In all cases this is the second
3378 // operand.
3379 VPValue *ToCheck =
3380 R.getNumDefinedValues() >= 1 ? R.getVPValue(0) : R.getOperand(1);
3381 Type *ScalarTy = TypeInfo.inferScalarType(ToCheck);
3382 if (!Visited.insert({ScalarTy}).second)
3383 continue;
3384 Type *WideTy = toVectorizedTy(ScalarTy, VF);
3385 if (any_of(getContainedTypes(WideTy), WillGenerateTargetVectors))
3386 return true;
3387 }
3388 }
3389
3390 return false;
3391}
3392
3393static bool hasReplicatorRegion(VPlan &Plan) {
3395 Plan.getVectorLoopRegion()->getEntry())),
3396 [](auto *VPRB) { return VPRB->isReplicator(); });
3397}
3398
3399/// Returns true if the VPlan contains a VPReductionPHIRecipe with
3400/// FindLast recurrence kind.
3401static bool hasFindLastReductionPhi(VPlan &Plan) {
3403 [](VPRecipeBase &R) {
3404 auto *RedPhi = dyn_cast<VPReductionPHIRecipe>(&R);
3405 return RedPhi &&
3406 RecurrenceDescriptor::isFindLastRecurrenceKind(
3407 RedPhi->getRecurrenceKind());
3408 });
3409}
3410
3411/// Returns true if the VPlan contains header phi recipes that are not currently
3412/// supported for epilogue vectorization.
3414 return any_of(
3416 [](VPRecipeBase &R) {
3417 switch (R.getVPRecipeID()) {
3418 case VPRecipeBase::VPFirstOrderRecurrencePHISC:
3419 // TODO: Add support for fixed-order recurrences.
3420 return true;
3421 case VPRecipeBase::VPWidenIntOrFpInductionSC:
3422 return !cast<VPWidenIntOrFpInductionRecipe>(&R)->getPHINode();
3423 case VPRecipeBase::VPReductionPHISC: {
3424 auto *RedPhi = cast<VPReductionPHIRecipe>(&R);
3425 // TODO: Support FMinNum/FMaxNum, FindLast reductions, and reductions
3426 // without underlying values.
3427 RecurKind Kind = RedPhi->getRecurrenceKind();
3428 if (RecurrenceDescriptor::isFPMinMaxNumRecurrenceKind(Kind) ||
3429 RecurrenceDescriptor::isFindLastRecurrenceKind(Kind) ||
3430 !RedPhi->getUnderlyingValue())
3431 return true;
3432 // TODO: Add support for FindIV reductions with sunk expressions: the
3433 // resume value from the main loop is in expression domain (e.g.,
3434 // mul(ReducedIV, 3)), but the epilogue tracks raw IV values. A sunk
3435 // expression is identified by a non-VPInstruction user of
3436 // ComputeReductionResult.
3437 if (RecurrenceDescriptor::isFindIVRecurrenceKind(Kind)) {
3438 auto *RdxResult = vputils::findComputeReductionResult(RedPhi);
3439 assert(RdxResult &&
3440 "FindIV reduction must have ComputeReductionResult");
3441 return any_of(RdxResult->users(),
3442 std::not_fn(IsaPred<VPInstruction>));
3443 }
3444 return false;
3445 }
3446 default:
3447 return false;
3448 };
3449 });
3450}
3451
3452bool LoopVectorizationPlanner::isCandidateForEpilogueVectorization(
3453 VPlan &MainPlan) const {
3454 // Bail out if the plan contains header phi recipes not yet supported
3455 // for epilogue vectorization.
3456 if (hasUnsupportedHeaderPhiRecipe(MainPlan))
3457 return false;
3458
3459 // Epilogue vectorization code has not been auditted to ensure it handles
3460 // non-latch exits properly. It may be fine, but it needs auditted and
3461 // tested.
3462 // TODO: Add support for loops with an early exit.
3463 if (OrigLoop->getExitingBlock() != OrigLoop->getLoopLatch())
3464 return false;
3465
3466 return true;
3467}
3468
3470 const ElementCount VF, const unsigned IC) const {
3471 // FIXME: We need a much better cost-model to take different parameters such
3472 // as register pressure, code size increase and cost of extra branches into
3473 // account. For now we apply a very crude heuristic and only consider loops
3474 // with vectorization factors larger than a certain value.
3475
3476 // Allow the target to opt out.
3477 if (!TTI.preferEpilogueVectorization(VF * IC))
3478 return false;
3479
3480 unsigned MinVFThreshold = EpilogueVectorizationMinVF.getNumOccurrences() > 0
3482 : TTI.getEpilogueVectorizationMinVF();
3483 return estimateElementCount(VF * IC, Config.getVScaleForTuning()) >=
3484 MinVFThreshold;
3485}
3486
3488 VPlan &MainPlan, ElementCount MainLoopVF, unsigned IC) {
3490 LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization is disabled.\n");
3491 return nullptr;
3492 }
3493
3494 if (!CM.isEpilogueAllowed()) {
3495 LLVM_DEBUG(dbgs() << "LEV: Unable to vectorize epilogue because no "
3496 "epilogue is allowed.\n");
3497 return nullptr;
3498 }
3499
3500 // Not really a cost consideration, but check for unsupported cases here to
3501 // simplify the logic.
3502 if (!isCandidateForEpilogueVectorization(MainPlan)) {
3503 LLVM_DEBUG(dbgs() << "LEV: Unable to vectorize epilogue because the loop "
3504 "is not a supported candidate.\n");
3505 return nullptr;
3506 }
3507
3510 IC * estimateElementCount(MainLoopVF, Config.getVScaleForTuning())) {
3511 // Note that the main loop leaves IC * MainLoopVF iterations iff a scalar
3512 // epilogue is required, but then the epilogue loop also requires a scalar
3513 // epilogue.
3514 LLVM_DEBUG(dbgs() << "LEV: Forced epilogue VF results in dead epilogue "
3515 "vector loop, skipping vectorizing epilogue.\n");
3516 return nullptr;
3517 }
3518
3519 LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization factor is forced.\n");
3521 if (hasPlanWithVF(ForcedEC)) {
3522 std::unique_ptr<VPlan> Clone(getPlanFor(ForcedEC).duplicate());
3523 Clone->setVF(ForcedEC);
3524 return Clone;
3525 }
3526
3527 LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization forced factor is not "
3528 "viable.\n");
3529 return nullptr;
3530 }
3531
3532 if (OrigLoop->getHeader()->getParent()->hasOptSize()) {
3533 LLVM_DEBUG(
3534 dbgs() << "LEV: Epilogue vectorization skipped due to opt for size.\n");
3535 return nullptr;
3536 }
3537
3538 if (!CM.isEpilogueVectorizationProfitable(MainLoopVF, IC)) {
3539 LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization is not profitable for "
3540 "this loop\n");
3541 return nullptr;
3542 }
3543
3544 // Check if a plan's vector loop processes fewer iterations than VF (e.g. when
3545 // interleave groups have been narrowed) narrowInterleaveGroups) and return
3546 // the adjusted, effective VF.
3547 using namespace VPlanPatternMatch;
3548 auto GetEffectiveVF = [](VPlan &Plan, ElementCount VF) -> ElementCount {
3549 auto *Exiting = Plan.getVectorLoopRegion()->getExitingBasicBlock();
3550 if (match(&Exiting->back(),
3551 m_BranchOnCount(m_Add(m_CanonicalIV(), m_Specific(&Plan.getUF())),
3552 m_VPValue())))
3553 return ElementCount::get(1, VF.isScalable());
3554 return VF;
3555 };
3556
3557 // Check if the main loop processes fewer than MainLoopVF elements per
3558 // iteration (e.g. due to narrowing interleave groups). Adjust MainLoopVF
3559 // as needed.
3560 MainLoopVF = GetEffectiveVF(MainPlan, MainLoopVF);
3561
3562 // If MainLoopVF = vscale x 2, and vscale is expected to be 4, then we know
3563 // the main loop handles 8 lanes per iteration. We could still benefit from
3564 // vectorizing the epilogue loop with VF=4.
3565 ElementCount EstimatedRuntimeVF = ElementCount::getFixed(
3566 estimateElementCount(MainLoopVF, Config.getVScaleForTuning()));
3567
3568 Type *TCType = Legal->getWidestInductionType();
3569 const SCEV *RemainingIterations = nullptr;
3570 unsigned MaxTripCount = 0;
3571 const SCEV *TC = vputils::getSCEVExprForVPValue(MainPlan.getTripCount(), PSE);
3572 assert(!isa<SCEVCouldNotCompute>(TC) && "Trip count SCEV must be computable");
3573 const SCEV *KnownMinTC;
3574 bool ScalableTC = match(TC, m_scev_c_Mul(m_SCEV(KnownMinTC), m_SCEVVScale()));
3575 bool ScalableRemIter = false;
3576 ScalarEvolution &SE = *PSE.getSE();
3577 // Use versions of TC and VF in which both are either scalable or fixed.
3578 if (ScalableTC == MainLoopVF.isScalable()) {
3579 ScalableRemIter = ScalableTC;
3580 RemainingIterations =
3581 SE.getURemExpr(TC, SE.getElementCount(TCType, MainLoopVF * IC));
3582 } else if (ScalableTC) {
3583 const SCEV *EstimatedTC = SE.getMulExpr(
3584 KnownMinTC,
3585 SE.getConstant(TCType, Config.getVScaleForTuning().value_or(1)));
3586 RemainingIterations = SE.getURemExpr(
3587 EstimatedTC, SE.getElementCount(TCType, MainLoopVF * IC));
3588 } else
3589 RemainingIterations =
3590 SE.getURemExpr(TC, SE.getElementCount(TCType, EstimatedRuntimeVF * IC));
3591
3592 // No iterations left to process in the epilogue.
3593 if (RemainingIterations->isZero())
3594 return nullptr;
3595
3596 if (MainLoopVF.isFixed()) {
3597 MaxTripCount = MainLoopVF.getFixedValue() * IC - 1;
3598 if (SE.isKnownPredicate(CmpInst::ICMP_ULT, RemainingIterations,
3599 SE.getConstant(TCType, MaxTripCount))) {
3600 MaxTripCount = SE.getUnsignedRangeMax(RemainingIterations).getZExtValue();
3601 }
3602 LLVM_DEBUG(dbgs() << "LEV: Maximum Trip Count for Epilogue: "
3603 << MaxTripCount << "\n");
3604 }
3605
3606 auto SkipVF = [&](const SCEV *VF, const SCEV *RemIter) -> bool {
3607 return SE.isKnownPredicate(CmpInst::ICMP_UGT, VF, RemIter);
3608 };
3610 VPlan *BestPlan = nullptr;
3611 for (auto &NextVF : ProfitableVFs) {
3612 // Skip candidate VFs without a corresponding VPlan.
3613 if (!hasPlanWithVF(NextVF.Width))
3614 continue;
3615
3616 VPlan &CurrentPlan = getPlanFor(NextVF.Width);
3617 ElementCount EffectiveVF = GetEffectiveVF(CurrentPlan, NextVF.Width);
3618 // Skip candidate VFs with widths >= the (estimated) runtime VF (scalable
3619 // vectors) or > the VF of the main loop (fixed vectors).
3620 if ((!EffectiveVF.isScalable() && MainLoopVF.isScalable() &&
3621 ElementCount::isKnownGE(EffectiveVF, EstimatedRuntimeVF)) ||
3622 (EffectiveVF.isScalable() &&
3623 ElementCount::isKnownGE(EffectiveVF, MainLoopVF)) ||
3624 (!EffectiveVF.isScalable() && !MainLoopVF.isScalable() &&
3625 ElementCount::isKnownGT(EffectiveVF, MainLoopVF)))
3626 continue;
3627
3628 // If EffectiveVF is greater than the number of remaining iterations, the
3629 // epilogue loop would be dead. Skip such factors. If the epilogue plan
3630 // also has narrowed interleave groups, use the effective VF since
3631 // the epilogue step will be reduced to its IC.
3632 // TODO: We should also consider comparing against a scalable
3633 // RemainingIterations when SCEV be able to evaluate non-canonical
3634 // vscale-based expressions.
3635 if (!ScalableRemIter) {
3636 // Handle the case where EffectiveVF and RemainingIterations are in
3637 // different numerical spaces.
3638 if (EffectiveVF.isScalable())
3639 EffectiveVF = ElementCount::getFixed(
3640 estimateElementCount(EffectiveVF, Config.getVScaleForTuning()));
3641 if (SkipVF(SE.getElementCount(TCType, EffectiveVF), RemainingIterations))
3642 continue;
3643 }
3644
3645 if (Result.Width.isScalar() ||
3646 isMoreProfitable(NextVF, Result, MaxTripCount, !CM.foldTailByMasking(),
3647 /*IsEpilogue*/ true)) {
3648 Result = NextVF;
3649 BestPlan = &CurrentPlan;
3650 }
3651 }
3652
3653 if (!BestPlan)
3654 return nullptr;
3655
3656 LLVM_DEBUG(dbgs() << "LEV: Vectorizing epilogue loop with VF = "
3657 << Result.Width << "\n");
3658 std::unique_ptr<VPlan> Clone(BestPlan->duplicate());
3659 Clone->setVF(Result.Width);
3660 return Clone;
3661}
3662
3663unsigned
3665 InstructionCost LoopCost) {
3666 // -- The interleave heuristics --
3667 // We interleave the loop in order to expose ILP and reduce the loop overhead.
3668 // There are many micro-architectural considerations that we can't predict
3669 // at this level. For example, frontend pressure (on decode or fetch) due to
3670 // code size, or the number and capabilities of the execution ports.
3671 //
3672 // We use the following heuristics to select the interleave count:
3673 // 1. If the code has reductions, then we interleave to break the cross
3674 // iteration dependency.
3675 // 2. If the loop is really small, then we interleave to reduce the loop
3676 // overhead.
3677 // 3. We don't interleave if we think that we will spill registers to memory
3678 // due to the increased register pressure.
3679
3680 // Only interleave tail-folded loops if wide lane masks are requested, as the
3681 // overhead of multiple instructions to calculate the predicate is likely
3682 // not beneficial. If an epilogue is not allowed for any other reason,
3683 // do not interleave.
3684 if (!CM.isEpilogueAllowed() &&
3685 !(CM.preferTailFoldedLoop() && CM.useWideActiveLaneMask()))
3686 return 1;
3687
3690 LLVM_DEBUG(dbgs() << "LV: Loop requires variable-length step. "
3691 "Unroll factor forced to be 1.\n");
3692 return 1;
3693 }
3694
3695 // We used the distance for the interleave count.
3696 if (!Legal->isSafeForAnyVectorWidth())
3697 return 1;
3698
3699 // We don't attempt to perform interleaving for loops with uncountable early
3700 // exits because the VPInstruction::AnyOf code cannot currently handle
3701 // multiple parts.
3702 if (Plan.hasEarlyExit())
3703 return 1;
3704
3705 const bool HasReductions =
3708
3709 // FIXME: implement interleaving for FindLast transform correctly.
3710 if (hasFindLastReductionPhi(Plan))
3711 return 1;
3712
3713 VPRegisterUsage R =
3714 calculateRegisterUsageForPlan(Plan, {VF}, TTI, CM.ValuesToIgnore)[0];
3715
3716 // If we did not calculate the cost for VF (because the user selected the VF)
3717 // then we calculate the cost of VF here.
3718 if (LoopCost == 0) {
3719 if (VF.isScalar())
3720 LoopCost = CM.expectedCost(VF);
3721 else
3722 LoopCost = cost(Plan, VF, &R);
3723 assert(LoopCost.isValid() && "Expected to have chosen a VF with valid cost");
3724
3725 // Loop body is free and there is no need for interleaving.
3726 if (LoopCost == 0)
3727 return 1;
3728 }
3729
3730 // We divide by these constants so assume that we have at least one
3731 // instruction that uses at least one register.
3732 for (auto &Pair : R.MaxLocalUsers) {
3733 Pair.second = std::max(Pair.second, 1U);
3734 }
3735
3736 // We calculate the interleave count using the following formula.
3737 // Subtract the number of loop invariants from the number of available
3738 // registers. These registers are used by all of the interleaved instances.
3739 // Next, divide the remaining registers by the number of registers that is
3740 // required by the loop, in order to estimate how many parallel instances
3741 // fit without causing spills. All of this is rounded down if necessary to be
3742 // a power of two. We want power of two interleave count to simplify any
3743 // addressing operations or alignment considerations.
3744 // We also want power of two interleave counts to ensure that the induction
3745 // variable of the vector loop wraps to zero, when tail is folded by masking;
3746 // this currently happens when OptForSize, in which case IC is set to 1 above.
3747 unsigned IC = UINT_MAX;
3748
3749 for (const auto &Pair : R.MaxLocalUsers) {
3750 unsigned TargetNumRegisters = TTI.getNumberOfRegisters(Pair.first);
3751 LLVM_DEBUG(dbgs() << "LV: The target has " << TargetNumRegisters
3752 << " registers of "
3753 << TTI.getRegisterClassName(Pair.first)
3754 << " register class\n");
3755 if (VF.isScalar()) {
3756 if (ForceTargetNumScalarRegs.getNumOccurrences() > 0)
3757 TargetNumRegisters = ForceTargetNumScalarRegs;
3758 } else {
3759 if (ForceTargetNumVectorRegs.getNumOccurrences() > 0)
3760 TargetNumRegisters = ForceTargetNumVectorRegs;
3761 }
3762 unsigned MaxLocalUsers = Pair.second;
3763 unsigned LoopInvariantRegs = 0;
3764 if (R.LoopInvariantRegs.contains(Pair.first))
3765 LoopInvariantRegs = R.LoopInvariantRegs[Pair.first];
3766
3767 unsigned TmpIC = llvm::bit_floor((TargetNumRegisters - LoopInvariantRegs) /
3768 MaxLocalUsers);
3769 // Don't count the induction variable as interleaved.
3771 TmpIC = llvm::bit_floor((TargetNumRegisters - LoopInvariantRegs - 1) /
3772 std::max(1U, (MaxLocalUsers - 1)));
3773 }
3774
3775 IC = std::min(IC, TmpIC);
3776 }
3777
3778 // Clamp the interleave ranges to reasonable counts.
3779 unsigned MaxInterleaveCount = TTI.getMaxInterleaveFactor(VF);
3780 LLVM_DEBUG(dbgs() << "LV: MaxInterleaveFactor for the target is "
3781 << MaxInterleaveCount << "\n");
3782
3783 // Check if the user has overridden the max.
3784 if (VF.isScalar()) {
3785 if (ForceTargetMaxScalarInterleaveFactor.getNumOccurrences() > 0)
3786 MaxInterleaveCount = ForceTargetMaxScalarInterleaveFactor;
3787 } else {
3788 if (ForceTargetMaxVectorInterleaveFactor.getNumOccurrences() > 0)
3789 MaxInterleaveCount = ForceTargetMaxVectorInterleaveFactor;
3790 }
3791
3792 // Try to get the exact trip count, or an estimate based on profiling data or
3793 // ConstantMax from PSE, failing that.
3794 auto BestKnownTC =
3795 getSmallBestKnownTC(PSE, OrigLoop,
3796 /*CanUseConstantMax=*/true,
3797 /*CanExcludeZeroTrips=*/CM.isEpilogueAllowed());
3798
3799 // For fixed length VFs treat a scalable trip count as unknown.
3800 if (BestKnownTC && (BestKnownTC->isFixed() || VF.isScalable())) {
3801 // Re-evaluate trip counts and VFs to be in the same numerical space.
3802 unsigned AvailableTC =
3803 estimateElementCount(*BestKnownTC, Config.getVScaleForTuning());
3804 unsigned EstimatedVF =
3805 estimateElementCount(VF, Config.getVScaleForTuning());
3806
3807 // At least one iteration must be scalar when this constraint holds. So the
3808 // maximum available iterations for interleaving is one less.
3809 if (CM.requiresScalarEpilogue(VF.isVector()))
3810 --AvailableTC;
3811
3812 unsigned InterleaveCountLB = bit_floor(std::max(
3813 1u, std::min(AvailableTC / (EstimatedVF * 2), MaxInterleaveCount)));
3814
3815 if (getSmallConstantTripCount(PSE.getSE(), OrigLoop).isNonZero()) {
3816 // If the best known trip count is exact, we select between two
3817 // prospective ICs, where
3818 //
3819 // 1) the aggressive IC is capped by the trip count divided by VF
3820 // 2) the conservative IC is capped by the trip count divided by (VF * 2)
3821 //
3822 // The final IC is selected in a way that the epilogue loop trip count is
3823 // minimized while maximizing the IC itself, so that we either run the
3824 // vector loop at least once if it generates a small epilogue loop, or
3825 // else we run the vector loop at least twice.
3826
3827 unsigned InterleaveCountUB = bit_floor(std::max(
3828 1u, std::min(AvailableTC / EstimatedVF, MaxInterleaveCount)));
3829 MaxInterleaveCount = InterleaveCountLB;
3830
3831 if (InterleaveCountUB != InterleaveCountLB) {
3832 unsigned TailTripCountUB =
3833 (AvailableTC % (EstimatedVF * InterleaveCountUB));
3834 unsigned TailTripCountLB =
3835 (AvailableTC % (EstimatedVF * InterleaveCountLB));
3836 // If both produce same scalar tail, maximize the IC to do the same work
3837 // in fewer vector loop iterations
3838 if (TailTripCountUB == TailTripCountLB)
3839 MaxInterleaveCount = InterleaveCountUB;
3840 }
3841 } else {
3842 // If trip count is an estimated compile time constant, limit the
3843 // IC to be capped by the trip count divided by VF * 2, such that the
3844 // vector loop runs at least twice to make interleaving seem profitable
3845 // when there is an epilogue loop present. Since exact Trip count is not
3846 // known we choose to be conservative in our IC estimate.
3847 MaxInterleaveCount = InterleaveCountLB;
3848 }
3849 }
3850
3851 assert(MaxInterleaveCount > 0 &&
3852 "Maximum interleave count must be greater than 0");
3853
3854 // Clamp the calculated IC to be between the 1 and the max interleave count
3855 // that the target and trip count allows.
3856 if (IC > MaxInterleaveCount)
3857 IC = MaxInterleaveCount;
3858 else
3859 // Make sure IC is greater than 0.
3860 IC = std::max(1u, IC);
3861
3862 assert(IC > 0 && "Interleave count must be greater than 0.");
3863
3864 // Interleave if we vectorized this loop and there is a reduction that could
3865 // benefit from interleaving.
3866 if (VF.isVector() && HasReductions) {
3867 LLVM_DEBUG(dbgs() << "LV: Interleaving because of reductions.\n");
3868 return IC;
3869 }
3870
3871 // For any scalar loop that either requires runtime checks or tail-folding we
3872 // are better off leaving this to the unroller. Note that if we've already
3873 // vectorized the loop we will have done the runtime check and so interleaving
3874 // won't require further checks.
3875 bool ScalarInterleavingRequiresPredication =
3876 (VF.isScalar() && any_of(OrigLoop->blocks(), [this](BasicBlock *BB) {
3877 return Legal->blockNeedsPredication(BB);
3878 }));
3879 bool ScalarInterleavingRequiresRuntimePointerCheck =
3880 (VF.isScalar() && Legal->getRuntimePointerChecking()->Need);
3881
3882 // We want to interleave small loops in order to reduce the loop overhead and
3883 // potentially expose ILP opportunities.
3884 LLVM_DEBUG(dbgs() << "LV: Loop cost is " << LoopCost << '\n'
3885 << "LV: IC is " << IC << '\n'
3886 << "LV: VF is " << VF << '\n');
3887 const bool AggressivelyInterleave =
3888 TTI.enableAggressiveInterleaving(HasReductions);
3889 if (!ScalarInterleavingRequiresRuntimePointerCheck &&
3890 !ScalarInterleavingRequiresPredication && LoopCost < SmallLoopCost) {
3891 // We assume that the cost overhead is 1 and we use the cost model
3892 // to estimate the cost of the loop and interleave until the cost of the
3893 // loop overhead is about 5% of the cost of the loop.
3894 unsigned SmallIC = std::min(IC, (unsigned)llvm::bit_floor<uint64_t>(
3895 SmallLoopCost / LoopCost.getValue()));
3896
3897 // Interleave until store/load ports (estimated by max interleave count) are
3898 // saturated.
3899 unsigned NumStores = 0;
3900 unsigned NumLoads = 0;
3903 for (VPRecipeBase &R : *VPBB) {
3905 NumLoads++;
3906 continue;
3907 }
3909 NumStores++;
3910 continue;
3911 }
3912
3913 if (auto *InterleaveR = dyn_cast<VPInterleaveRecipe>(&R)) {
3914 if (unsigned StoreOps = InterleaveR->getNumStoreOperands())
3915 NumStores += StoreOps;
3916 else
3917 NumLoads += InterleaveR->getNumDefinedValues();
3918 continue;
3919 }
3920 if (auto *RepR = dyn_cast<VPReplicateRecipe>(&R)) {
3921 NumLoads += isa<LoadInst>(RepR->getUnderlyingInstr());
3922 NumStores += isa<StoreInst>(RepR->getUnderlyingInstr());
3923 continue;
3924 }
3925 if (isa<VPHistogramRecipe>(&R)) {
3926 NumLoads++;
3927 NumStores++;
3928 continue;
3929 }
3930 }
3931 }
3932 unsigned StoresIC = IC / (NumStores ? NumStores : 1);
3933 unsigned LoadsIC = IC / (NumLoads ? NumLoads : 1);
3934
3935 // There is little point in interleaving for reductions containing selects
3936 // and compares when VF=1 since it may just create more overhead than it's
3937 // worth for loops with small trip counts. This is because we still have to
3938 // do the final reduction after the loop.
3939 bool HasSelectCmpReductions =
3940 HasReductions &&
3942 [](VPRecipeBase &R) {
3943 auto *RedR = dyn_cast<VPReductionPHIRecipe>(&R);
3944 return RedR && (RecurrenceDescriptor::isAnyOfRecurrenceKind(
3945 RedR->getRecurrenceKind()) ||
3946 RecurrenceDescriptor::isFindIVRecurrenceKind(
3947 RedR->getRecurrenceKind()));
3948 });
3949 if (HasSelectCmpReductions) {
3950 LLVM_DEBUG(dbgs() << "LV: Not interleaving select-cmp reductions.\n");
3951 return 1;
3952 }
3953
3954 // If we have a scalar reduction (vector reductions are already dealt with
3955 // by this point), we can increase the critical path length if the loop
3956 // we're interleaving is inside another loop. For tree-wise reductions
3957 // set the limit to 2, and for ordered reductions it's best to disable
3958 // interleaving entirely.
3959 if (HasReductions && OrigLoop->getLoopDepth() > 1) {
3960 bool HasOrderedReductions =
3962 [](VPRecipeBase &R) {
3963 auto *RedR = dyn_cast<VPReductionPHIRecipe>(&R);
3964
3965 return RedR && RedR->isOrdered();
3966 });
3967 if (HasOrderedReductions) {
3968 LLVM_DEBUG(
3969 dbgs() << "LV: Not interleaving scalar ordered reductions.\n");
3970 return 1;
3971 }
3972
3973 unsigned F = MaxNestedScalarReductionIC;
3974 SmallIC = std::min(SmallIC, F);
3975 StoresIC = std::min(StoresIC, F);
3976 LoadsIC = std::min(LoadsIC, F);
3977 }
3978
3980 std::max(StoresIC, LoadsIC) > SmallIC) {
3981 LLVM_DEBUG(
3982 dbgs() << "LV: Interleaving to saturate store or load ports.\n");
3983 return std::max(StoresIC, LoadsIC);
3984 }
3985
3986 // If there are scalar reductions and TTI has enabled aggressive
3987 // interleaving for reductions, we will interleave to expose ILP.
3988 if (VF.isScalar() && AggressivelyInterleave) {
3989 LLVM_DEBUG(dbgs() << "LV: Interleaving to expose ILP.\n");
3990 // Interleave no less than SmallIC but not as aggressive as the normal IC
3991 // to satisfy the rare situation when resources are too limited.
3992 return std::max(IC / 2, SmallIC);
3993 }
3994
3995 LLVM_DEBUG(dbgs() << "LV: Interleaving to reduce branch cost.\n");
3996 return SmallIC;
3997 }
3998
3999 // Interleave if this is a large loop (small loops are already dealt with by
4000 // this point) that could benefit from interleaving.
4001 if (AggressivelyInterleave) {
4002 LLVM_DEBUG(dbgs() << "LV: Interleaving to expose ILP.\n");
4003 return IC;
4004 }
4005
4006 LLVM_DEBUG(dbgs() << "LV: Not Interleaving.\n");
4007 return 1;
4008}
4009
4011 ElementCount VF) {
4012 // TODO: Cost model for emulated masked load/store is completely
4013 // broken. This hack guides the cost model to use an artificially
4014 // high enough value to practically disable vectorization with such
4015 // operations, except where previously deployed legality hack allowed
4016 // using very low cost values. This is to avoid regressions coming simply
4017 // from moving "masked load/store" check from legality to cost model.
4018 // Masked Load/Gather emulation was previously never allowed.
4019 // Limited number of Masked Store/Scatter emulation was allowed.
4021 "Expecting a scalar emulated instruction");
4022 return isa<LoadInst>(I) ||
4023 (isa<StoreInst>(I) &&
4024 NumPredStores > NumberOfStoresToPredicate);
4025}
4026
4028 assert(VF.isVector() && "Expected VF >= 2");
4029
4030 // If we've already collected the instructions to scalarize or the predicated
4031 // BBs after vectorization, there's nothing to do. Collection may already have
4032 // occurred if we have a user-selected VF and are now computing the expected
4033 // cost for interleaving.
4034 if (InstsToScalarize.contains(VF) ||
4035 PredicatedBBsAfterVectorization.contains(VF))
4036 return;
4037
4038 // Initialize a mapping for VF in InstsToScalalarize. If we find that it's
4039 // not profitable to scalarize any instructions, the presence of VF in the
4040 // map will indicate that we've analyzed it already.
4041 ScalarCostsTy &ScalarCostsVF = InstsToScalarize[VF];
4042
4043 // Find all the instructions that are scalar with predication in the loop and
4044 // determine if it would be better to not if-convert the blocks they are in.
4045 // If so, we also record the instructions to scalarize.
4046 for (BasicBlock *BB : TheLoop->blocks()) {
4048 continue;
4049 for (Instruction &I : *BB)
4050 if (isScalarWithPredication(&I, VF)) {
4051 ScalarCostsTy ScalarCosts;
4052 // Do not apply discount logic for:
4053 // 1. Scalars after vectorization, as there will only be a single copy
4054 // of the instruction.
4055 // 2. Scalable VF, as that would lead to invalid scalarization costs.
4056 // 3. Emulated masked memrefs, if a hacked cost is needed.
4057 if (!isScalarAfterVectorization(&I, VF) && !VF.isScalable() &&
4059 computePredInstDiscount(&I, ScalarCosts, VF) >= 0) {
4060 for (const auto &[I, IC] : ScalarCosts)
4061 ScalarCostsVF.insert({I, IC});
4062 // Check if we decided to scalarize a call. If so, update the widening
4063 // decision of the call to CM_Scalarize with the computed scalar cost.
4064 for (const auto &[I, Cost] : ScalarCosts) {
4065 auto *CI = dyn_cast<CallInst>(I);
4066 if (!CI || !CallWideningDecisions.contains({CI, VF}))
4067 continue;
4068 CallWideningDecisions[{CI, VF}].Kind = CM_Scalarize;
4069 CallWideningDecisions[{CI, VF}].Cost = Cost;
4070 }
4071 }
4072 // Remember that BB will remain after vectorization.
4073 PredicatedBBsAfterVectorization[VF].insert(BB);
4074 for (auto *Pred : predecessors(BB)) {
4075 if (Pred->getSingleSuccessor() == BB)
4076 PredicatedBBsAfterVectorization[VF].insert(Pred);
4077 }
4078 }
4079 }
4080}
4081
4082InstructionCost LoopVectorizationCostModel::computePredInstDiscount(
4083 Instruction *PredInst, ScalarCostsTy &ScalarCosts, ElementCount VF) {
4084 assert(!isUniformAfterVectorization(PredInst, VF) &&
4085 "Instruction marked uniform-after-vectorization will be predicated");
4086
4087 // Initialize the discount to zero, meaning that the scalar version and the
4088 // vector version cost the same.
4089 InstructionCost Discount = 0;
4090
4091 // Holds instructions to analyze. The instructions we visit are mapped in
4092 // ScalarCosts. Those instructions are the ones that would be scalarized if
4093 // we find that the scalar version costs less.
4095
4096 // Returns true if the given instruction can be scalarized.
4097 auto CanBeScalarized = [&](Instruction *I) -> bool {
4098 // We only attempt to scalarize instructions forming a single-use chain
4099 // from the original predicated block that would otherwise be vectorized.
4100 // Although not strictly necessary, we give up on instructions we know will
4101 // already be scalar to avoid traversing chains that are unlikely to be
4102 // beneficial.
4103 if (!I->hasOneUse() || PredInst->getParent() != I->getParent() ||
4105 return false;
4106
4107 // If the instruction is scalar with predication, it will be analyzed
4108 // separately. We ignore it within the context of PredInst.
4109 if (isScalarWithPredication(I, VF))
4110 return false;
4111
4112 // If any of the instruction's operands are uniform after vectorization,
4113 // the instruction cannot be scalarized. This prevents, for example, a
4114 // masked load from being scalarized.
4115 //
4116 // We assume we will only emit a value for lane zero of an instruction
4117 // marked uniform after vectorization, rather than VF identical values.
4118 // Thus, if we scalarize an instruction that uses a uniform, we would
4119 // create uses of values corresponding to the lanes we aren't emitting code
4120 // for. This behavior can be changed by allowing getScalarValue to clone
4121 // the lane zero values for uniforms rather than asserting.
4122 for (Use &U : I->operands())
4123 if (auto *J = dyn_cast<Instruction>(U.get()))
4124 if (isUniformAfterVectorization(J, VF))
4125 return false;
4126
4127 // Otherwise, we can scalarize the instruction.
4128 return true;
4129 };
4130
4131 // Compute the expected cost discount from scalarizing the entire expression
4132 // feeding the predicated instruction. We currently only consider expressions
4133 // that are single-use instruction chains.
4134 Worklist.push_back(PredInst);
4135 while (!Worklist.empty()) {
4136 Instruction *I = Worklist.pop_back_val();
4137
4138 // If we've already analyzed the instruction, there's nothing to do.
4139 if (ScalarCosts.contains(I))
4140 continue;
4141
4142 // Cannot scalarize fixed-order recurrence phis at the moment.
4143 if (isa<PHINode>(I) && Legal->isFixedOrderRecurrence(cast<PHINode>(I)))
4144 continue;
4145
4146 // Compute the cost of the vector instruction. Note that this cost already
4147 // includes the scalarization overhead of the predicated instruction.
4148 InstructionCost VectorCost = getInstructionCost(I, VF);
4149
4150 // Compute the cost of the scalarized instruction. This cost is the cost of
4151 // the instruction as if it wasn't if-converted and instead remained in the
4152 // predicated block. We will scale this cost by block probability after
4153 // computing the scalarization overhead.
4154 InstructionCost ScalarCost =
4156
4157 // Compute the scalarization overhead of needed insertelement instructions
4158 // and phi nodes.
4159 if (isScalarWithPredication(I, VF) && !I->getType()->isVoidTy()) {
4160 Type *WideTy = toVectorizedTy(I->getType(), VF);
4161 for (Type *VectorTy : getContainedTypes(WideTy)) {
4162 ScalarCost += TTI.getScalarizationOverhead(
4164 /*Insert=*/true,
4165 /*Extract=*/false, Config.CostKind);
4166 }
4167 ScalarCost += VF.getFixedValue() *
4168 TTI.getCFInstrCost(Instruction::PHI, Config.CostKind);
4169 }
4170
4171 // Compute the scalarization overhead of needed extractelement
4172 // instructions. For each of the instruction's operands, if the operand can
4173 // be scalarized, add it to the worklist; otherwise, account for the
4174 // overhead.
4175 for (Use &U : I->operands())
4176 if (auto *J = dyn_cast<Instruction>(U.get())) {
4177 assert(canVectorizeTy(J->getType()) &&
4178 "Instruction has non-scalar type");
4179 if (CanBeScalarized(J))
4180 Worklist.push_back(J);
4181 else if (needsExtract(J, VF)) {
4182 Type *WideTy = toVectorizedTy(J->getType(), VF);
4183 for (Type *VectorTy : getContainedTypes(WideTy)) {
4184 ScalarCost += TTI.getScalarizationOverhead(
4185 cast<VectorType>(VectorTy),
4186 APInt::getAllOnes(VF.getFixedValue()), /*Insert*/ false,
4187 /*Extract*/ true, Config.CostKind);
4188 }
4189 }
4190 }
4191
4192 // Scale the total scalar cost by block probability.
4193 ScalarCost /= getPredBlockCostDivisor(Config.CostKind, I->getParent());
4194
4195 // Compute the discount. A non-negative discount means the vector version
4196 // of the instruction costs more, and scalarizing would be beneficial.
4197 Discount += VectorCost - ScalarCost;
4198 ScalarCosts[I] = ScalarCost;
4199 }
4200
4201 return Discount;
4202}
4203
4206 assert(VF.isScalar() && "must only be called for scalar VFs");
4207
4208 // For each block.
4209 for (BasicBlock *BB : TheLoop->blocks()) {
4210 InstructionCost BlockCost;
4211
4212 // For each instruction in the old loop.
4213 for (Instruction &I : *BB) {
4214 // Skip ignored values.
4215 if (ValuesToIgnore.count(&I) ||
4216 (VF.isVector() && VecValuesToIgnore.count(&I)))
4217 continue;
4218
4220
4221 // Check if we should override the cost.
4222 if (C.isValid() && ForceTargetInstructionCost.getNumOccurrences() > 0)
4224
4225 BlockCost += C;
4226 LLVM_DEBUG(dbgs() << "LV: Found an estimated cost of " << C << " for VF "
4227 << VF << " For instruction: " << I << '\n');
4228 }
4229
4230 // In the scalar loop, we may not always execute the predicated block, if it
4231 // is an if-else block. Thus, scale the block's cost by the probability of
4232 // executing it. getPredBlockCostDivisor will return 1 for blocks that are
4233 // only predicated by the header mask when folding the tail.
4234 Cost += BlockCost / getPredBlockCostDivisor(Config.CostKind, BB);
4235 }
4236
4237 return Cost;
4238}
4239
4240/// Gets the address access SCEV for Ptr, if it should be used for cost modeling
4241/// according to isAddressSCEVForCost.
4242///
4243/// This SCEV can be sent to the Target in order to estimate the address
4244/// calculation cost.
4246 Value *Ptr,
4248 const Loop *TheLoop) {
4249 const SCEV *Addr = PSE.getSCEV(Ptr);
4250 return vputils::isAddressSCEVForCost(Addr, *PSE.getSE(), TheLoop) ? Addr
4251 : nullptr;
4252}
4253
4255LoopVectorizationCostModel::getMemInstScalarizationCost(Instruction *I,
4256 ElementCount VF) {
4257 assert(VF.isVector() &&
4258 "Scalarization cost of instruction implies vectorization.");
4259 if (VF.isScalable())
4261
4262 Type *ValTy = getLoadStoreType(I);
4263 auto *SE = PSE.getSE();
4264
4265 unsigned AS = getLoadStoreAddressSpace(I);
4267 Type *PtrTy = toVectorTy(Ptr->getType(), VF);
4268 // NOTE: PtrTy is a vector to signal `TTI::getAddressComputationCost`
4269 // that it is being called from this specific place.
4270
4271 // Figure out whether the access is strided and get the stride value
4272 // if it's known in compile time
4273 const SCEV *PtrSCEV = getAddressAccessSCEV(Ptr, PSE, TheLoop);
4274
4275 // Get the cost of the scalar memory instruction and address computation.
4277 VF.getFixedValue() *
4278 TTI.getAddressComputationCost(PtrTy, SE, PtrSCEV, Config.CostKind);
4279
4280 // Don't pass *I here, since it is scalar but will actually be part of a
4281 // vectorized loop where the user of it is a vectorized instruction.
4282 const Align Alignment = getLoadStoreAlignment(I);
4283 TTI::OperandValueInfo OpInfo = TTI::getOperandInfo(I->getOperand(0));
4284 Cost += VF.getFixedValue() *
4285 TTI.getMemoryOpCost(I->getOpcode(), ValTy->getScalarType(), Alignment,
4286 AS, Config.CostKind, OpInfo);
4287
4288 // Get the overhead of the extractelement and insertelement instructions
4289 // we might create due to scalarization.
4290 Cost += getScalarizationOverhead(I, VF);
4291
4292 // If we have a predicated load/store, it will need extra i1 extracts and
4293 // conditional branches, but may not be executed for each vector lane. Scale
4294 // the cost by the probability of executing the predicated block.
4295 if (isPredicatedInst(I)) {
4296 Cost /= getPredBlockCostDivisor(Config.CostKind, I->getParent());
4297
4298 // Add the cost of an i1 extract and a branch
4299 auto *VecI1Ty =
4301 Cost += TTI.getScalarizationOverhead(
4302 VecI1Ty, APInt::getAllOnes(VF.getFixedValue()),
4303 /*Insert=*/false, /*Extract=*/true, Config.CostKind);
4304 Cost += TTI.getCFInstrCost(Instruction::CondBr, Config.CostKind);
4305
4307 // Artificially setting to a high enough value to practically disable
4308 // vectorization with such operations.
4309 Cost = 3000000;
4310 }
4311
4312 return Cost;
4313}
4314
4316LoopVectorizationCostModel::getConsecutiveMemOpCost(Instruction *I,
4317 ElementCount VF) {
4318 Type *ValTy = getLoadStoreType(I);
4319 auto *VectorTy = cast<VectorType>(toVectorTy(ValTy, VF));
4321 unsigned AS = getLoadStoreAddressSpace(I);
4322 int ConsecutiveStride = Legal->isConsecutivePtr(ValTy, Ptr);
4323
4324 assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) &&
4325 "Stride should be 1 or -1 for consecutive memory access");
4326 const Align Alignment = getLoadStoreAlignment(I);
4328 if (isMaskRequired(I)) {
4329 unsigned IID = I->getOpcode() == Instruction::Load
4330 ? Intrinsic::masked_load
4331 : Intrinsic::masked_store;
4332 Cost += TTI.getMemIntrinsicInstrCost(
4333 MemIntrinsicCostAttributes(IID, VectorTy, Alignment, AS),
4334 Config.CostKind);
4335 } else {
4336 TTI::OperandValueInfo OpInfo = TTI::getOperandInfo(I->getOperand(0));
4337 Cost += TTI.getMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS,
4338 Config.CostKind, OpInfo, I);
4339 }
4340
4341 bool Reverse = ConsecutiveStride < 0;
4342 if (Reverse)
4343 Cost += TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy,
4344 VectorTy, {}, Config.CostKind, 0);
4345 return Cost;
4346}
4347
4349LoopVectorizationCostModel::getUniformMemOpCost(Instruction *I,
4350 ElementCount VF) {
4351 assert(Legal->isUniformMemOp(*I, VF));
4352
4353 Type *ValTy = getLoadStoreType(I);
4355 auto *VectorTy = cast<VectorType>(toVectorTy(ValTy, VF));
4356 const Align Alignment = getLoadStoreAlignment(I);
4357 unsigned AS = getLoadStoreAddressSpace(I);
4358 if (isa<LoadInst>(I)) {
4359 return TTI.getAddressComputationCost(PtrTy, nullptr, nullptr,
4360 Config.CostKind) +
4361 TTI.getMemoryOpCost(Instruction::Load, ValTy, Alignment, AS,
4362 Config.CostKind) +
4363 TTI.getShuffleCost(TargetTransformInfo::SK_Broadcast, VectorTy,
4364 VectorTy, {}, Config.CostKind);
4365 }
4366 StoreInst *SI = cast<StoreInst>(I);
4367
4368 bool IsLoopInvariantStoreValue = Legal->isInvariant(SI->getValueOperand());
4369 // TODO: We have existing tests that request the cost of extracting element
4370 // VF.getKnownMinValue() - 1 from a scalable vector. This does not represent
4371 // the actual generated code, which involves extracting the last element of
4372 // a scalable vector where the lane to extract is unknown at compile time.
4374 TTI.getAddressComputationCost(PtrTy, nullptr, nullptr, Config.CostKind) +
4375 TTI.getMemoryOpCost(Instruction::Store, ValTy, Alignment, AS,
4376 Config.CostKind);
4377 if (!IsLoopInvariantStoreValue)
4378 Cost += TTI.getIndexedVectorInstrCostFromEnd(Instruction::ExtractElement,
4379 VectorTy, Config.CostKind, 0);
4380 return Cost;
4381}
4382
4384LoopVectorizationCostModel::getGatherScatterCost(Instruction *I,
4385 ElementCount VF) {
4386 Type *ValTy = getLoadStoreType(I);
4387 auto *VectorTy = cast<VectorType>(toVectorTy(ValTy, VF));
4388 const Align Alignment = getLoadStoreAlignment(I);
4390 Type *PtrTy = Ptr->getType();
4391
4392 if (!Legal->isUniform(Ptr, VF))
4393 PtrTy = toVectorTy(PtrTy, VF);
4394
4395 unsigned IID = I->getOpcode() == Instruction::Load
4396 ? Intrinsic::masked_gather
4397 : Intrinsic::masked_scatter;
4398 return TTI.getAddressComputationCost(PtrTy, nullptr, nullptr,
4399 Config.CostKind) +
4400 TTI.getMemIntrinsicInstrCost(
4401 MemIntrinsicCostAttributes(IID, VectorTy, Ptr, isMaskRequired(I),
4402 Alignment, I),
4403 Config.CostKind);
4404}
4405
4407LoopVectorizationCostModel::getInterleaveGroupCost(Instruction *I,
4408 ElementCount VF) {
4409 const auto *Group = getInterleavedAccessGroup(I);
4410 assert(Group && "Fail to get an interleaved access group.");
4411
4412 Instruction *InsertPos = Group->getInsertPos();
4413 Type *ValTy = getLoadStoreType(InsertPos);
4414 auto *VectorTy = cast<VectorType>(toVectorTy(ValTy, VF));
4415 unsigned AS = getLoadStoreAddressSpace(InsertPos);
4416
4417 unsigned InterleaveFactor = Group->getFactor();
4418 auto *WideVecTy = VectorType::get(ValTy, VF * InterleaveFactor);
4419
4420 // Holds the indices of existing members in the interleaved group.
4421 SmallVector<unsigned, 4> Indices;
4422 for (unsigned IF = 0; IF < InterleaveFactor; IF++)
4423 if (Group->getMember(IF))
4424 Indices.push_back(IF);
4425
4426 // Calculate the cost of the whole interleaved group.
4427 bool UseMaskForGaps =
4428 (Group->requiresScalarEpilogue() && !isEpilogueAllowed()) ||
4429 (isa<StoreInst>(I) && !Group->isFull());
4430 InstructionCost Cost = TTI.getInterleavedMemoryOpCost(
4431 InsertPos->getOpcode(), WideVecTy, Group->getFactor(), Indices,
4432 Group->getAlign(), AS, Config.CostKind, isMaskRequired(I),
4433 UseMaskForGaps);
4434
4435 if (Group->isReverse()) {
4436 // TODO: Add support for reversed masked interleaved access.
4438 "Reverse masked interleaved access not supported.");
4439 Cost += Group->getNumMembers() *
4440 TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy,
4441 VectorTy, {}, Config.CostKind, 0);
4442 }
4443 return Cost;
4444}
4445
4446std::optional<InstructionCost>
4448 ElementCount VF,
4449 Type *Ty) const {
4450 using namespace llvm::PatternMatch;
4451 // Early exit for no inloop reductions
4452 if (Config.getInLoopReductions().empty() || VF.isScalar() ||
4453 !isa<VectorType>(Ty))
4454 return std::nullopt;
4455 auto *VectorTy = cast<VectorType>(Ty);
4456
4457 // We are looking for a pattern of, and finding the minimal acceptable cost:
4458 // reduce(mul(ext(A), ext(B))) or
4459 // reduce(mul(A, B)) or
4460 // reduce(ext(A)) or
4461 // reduce(A).
4462 // The basic idea is that we walk down the tree to do that, finding the root
4463 // reduction instruction in InLoopReductionImmediateChains. From there we find
4464 // the pattern of mul/ext and test the cost of the entire pattern vs the cost
4465 // of the components. If the reduction cost is lower then we return it for the
4466 // reduction instruction and 0 for the other instructions in the pattern. If
4467 // it is not we return an invalid cost specifying the orignal cost method
4468 // should be used.
4469 Instruction *RetI = I;
4470 if (match(RetI, m_ZExtOrSExt(m_Value()))) {
4471 if (!RetI->hasOneUser())
4472 return std::nullopt;
4473 RetI = RetI->user_back();
4474 }
4475
4476 if (match(RetI, m_OneUse(m_Mul(m_Value(), m_Value()))) &&
4477 RetI->user_back()->getOpcode() == Instruction::Add) {
4478 RetI = RetI->user_back();
4479 }
4480
4481 // Test if the found instruction is a reduction, and if not return an invalid
4482 // cost specifying the parent to use the original cost modelling.
4483 Instruction *LastChain = Config.getInLoopReductionImmediateChain(RetI);
4484 if (!LastChain)
4485 return std::nullopt;
4486
4487 // Find the reduction this chain is a part of and calculate the basic cost of
4488 // the reduction on its own.
4489 Instruction *ReductionPhi = LastChain;
4490 while (!isa<PHINode>(ReductionPhi))
4491 ReductionPhi = Config.getInLoopReductionImmediateChain(ReductionPhi);
4492
4493 const RecurrenceDescriptor &RdxDesc =
4494 Legal->getRecurrenceDescriptor(cast<PHINode>(ReductionPhi));
4495
4496 InstructionCost BaseCost;
4497 RecurKind RK = RdxDesc.getRecurrenceKind();
4500 BaseCost = TTI.getMinMaxReductionCost(
4501 MinMaxID, VectorTy, RdxDesc.getFastMathFlags(), Config.CostKind);
4502 } else {
4503 BaseCost = TTI.getArithmeticReductionCost(RdxDesc.getOpcode(), VectorTy,
4504 RdxDesc.getFastMathFlags(),
4505 Config.CostKind);
4506 }
4507
4508 // For a call to the llvm.fmuladd intrinsic we need to add the cost of a
4509 // normal fmul instruction to the cost of the fadd reduction.
4510 if (RK == RecurKind::FMulAdd)
4511 BaseCost += TTI.getArithmeticInstrCost(Instruction::FMul, VectorTy,
4512 Config.CostKind);
4513
4514 // If we're using ordered reductions then we can just return the base cost
4515 // here, since getArithmeticReductionCost calculates the full ordered
4516 // reduction cost when FP reassociation is not allowed.
4517 if (Config.useOrderedReductions(RdxDesc))
4518 return BaseCost;
4519
4520 // Get the operand that was not the reduction chain and match it to one of the
4521 // patterns, returning the better cost if it is found.
4522 Instruction *RedOp = RetI->getOperand(1) == LastChain
4525
4526 VectorTy = VectorType::get(I->getOperand(0)->getType(), VectorTy);
4527
4528 Instruction *Op0, *Op1;
4529 if (RedOp && RdxDesc.getOpcode() == Instruction::Add &&
4530 match(RedOp,
4532 match(Op0, m_ZExtOrSExt(m_Value())) &&
4533 Op0->getOpcode() == Op1->getOpcode() &&
4534 Op0->getOperand(0)->getType() == Op1->getOperand(0)->getType() &&
4535 !TheLoop->isLoopInvariant(Op0) && !TheLoop->isLoopInvariant(Op1) &&
4536 (Op0->getOpcode() == RedOp->getOpcode() || Op0 == Op1)) {
4537
4538 // Matched reduce.add(ext(mul(ext(A), ext(B)))
4539 // Note that the extend opcodes need to all match, or if A==B they will have
4540 // been converted to zext(mul(sext(A), sext(A))) as it is known positive,
4541 // which is equally fine.
4542 bool IsUnsigned = isa<ZExtInst>(Op0);
4543 auto *ExtType = VectorType::get(Op0->getOperand(0)->getType(), VectorTy);
4544 auto *MulType = VectorType::get(Op0->getType(), VectorTy);
4545
4546 InstructionCost ExtCost =
4547 TTI.getCastInstrCost(Op0->getOpcode(), MulType, ExtType,
4548 TTI::CastContextHint::None, Config.CostKind, Op0);
4549 InstructionCost MulCost =
4550 TTI.getArithmeticInstrCost(Instruction::Mul, MulType, Config.CostKind);
4551 InstructionCost Ext2Cost = TTI.getCastInstrCost(
4552 RedOp->getOpcode(), VectorTy, MulType, TTI::CastContextHint::None,
4553 Config.CostKind, RedOp);
4554
4555 InstructionCost RedCost = TTI.getMulAccReductionCost(
4556 IsUnsigned, RdxDesc.getOpcode(), RdxDesc.getRecurrenceType(), ExtType,
4557 Config.CostKind);
4558
4559 if (RedCost.isValid() &&
4560 RedCost < ExtCost * 2 + MulCost + Ext2Cost + BaseCost)
4561 return I == RetI ? RedCost : 0;
4562 } else if (RedOp && match(RedOp, m_ZExtOrSExt(m_Value())) &&
4563 !TheLoop->isLoopInvariant(RedOp)) {
4564 // Matched reduce(ext(A))
4565 bool IsUnsigned = isa<ZExtInst>(RedOp);
4566 auto *ExtType = VectorType::get(RedOp->getOperand(0)->getType(), VectorTy);
4567 InstructionCost RedCost = TTI.getExtendedReductionCost(
4568 RdxDesc.getOpcode(), IsUnsigned, RdxDesc.getRecurrenceType(), ExtType,
4569 RdxDesc.getFastMathFlags(), Config.CostKind);
4570
4571 InstructionCost ExtCost = TTI.getCastInstrCost(
4572 RedOp->getOpcode(), VectorTy, ExtType, TTI::CastContextHint::None,
4573 Config.CostKind, RedOp);
4574 if (RedCost.isValid() && RedCost < BaseCost + ExtCost)
4575 return I == RetI ? RedCost : 0;
4576 } else if (RedOp && RdxDesc.getOpcode() == Instruction::Add &&
4577 match(RedOp, m_Mul(m_Instruction(Op0), m_Instruction(Op1)))) {
4578 if (match(Op0, m_ZExtOrSExt(m_Value())) &&
4579 Op0->getOpcode() == Op1->getOpcode() &&
4580 !TheLoop->isLoopInvariant(Op0) && !TheLoop->isLoopInvariant(Op1)) {
4581 bool IsUnsigned = isa<ZExtInst>(Op0);
4582 Type *Op0Ty = Op0->getOperand(0)->getType();
4583 Type *Op1Ty = Op1->getOperand(0)->getType();
4584 Type *LargestOpTy =
4585 Op0Ty->getIntegerBitWidth() < Op1Ty->getIntegerBitWidth() ? Op1Ty
4586 : Op0Ty;
4587 auto *ExtType = VectorType::get(LargestOpTy, VectorTy);
4588
4589 // Matched reduce.add(mul(ext(A), ext(B))), where the two ext may be of
4590 // different sizes. We take the largest type as the ext to reduce, and add
4591 // the remaining cost as, for example reduce(mul(ext(ext(A)), ext(B))).
4592 InstructionCost ExtCost0 = TTI.getCastInstrCost(
4593 Op0->getOpcode(), VectorTy, VectorType::get(Op0Ty, VectorTy),
4594 TTI::CastContextHint::None, Config.CostKind, Op0);
4595 InstructionCost ExtCost1 = TTI.getCastInstrCost(
4596 Op1->getOpcode(), VectorTy, VectorType::get(Op1Ty, VectorTy),
4597 TTI::CastContextHint::None, Config.CostKind, Op1);
4598 InstructionCost MulCost = TTI.getArithmeticInstrCost(
4599 Instruction::Mul, VectorTy, Config.CostKind);
4600
4601 InstructionCost RedCost = TTI.getMulAccReductionCost(
4602 IsUnsigned, RdxDesc.getOpcode(), RdxDesc.getRecurrenceType(), ExtType,
4603 Config.CostKind);
4604 InstructionCost ExtraExtCost = 0;
4605 if (Op0Ty != LargestOpTy || Op1Ty != LargestOpTy) {
4606 Instruction *ExtraExtOp = (Op0Ty != LargestOpTy) ? Op0 : Op1;
4607 ExtraExtCost = TTI.getCastInstrCost(
4608 ExtraExtOp->getOpcode(), ExtType,
4609 VectorType::get(ExtraExtOp->getOperand(0)->getType(), VectorTy),
4610 TTI::CastContextHint::None, Config.CostKind, ExtraExtOp);
4611 }
4612
4613 if (RedCost.isValid() &&
4614 (RedCost + ExtraExtCost) < (ExtCost0 + ExtCost1 + MulCost + BaseCost))
4615 return I == RetI ? RedCost : 0;
4616 } else if (!match(I, m_ZExtOrSExt(m_Value()))) {
4617 // Matched reduce.add(mul())
4618 InstructionCost MulCost = TTI.getArithmeticInstrCost(
4619 Instruction::Mul, VectorTy, Config.CostKind);
4620
4621 InstructionCost RedCost = TTI.getMulAccReductionCost(
4622 true, RdxDesc.getOpcode(), RdxDesc.getRecurrenceType(), VectorTy,
4623 Config.CostKind);
4624
4625 if (RedCost.isValid() && RedCost < MulCost + BaseCost)
4626 return I == RetI ? RedCost : 0;
4627 }
4628 }
4629
4630 return I == RetI ? std::optional<InstructionCost>(BaseCost) : std::nullopt;
4631}
4632
4634LoopVectorizationCostModel::getMemoryInstructionCost(Instruction *I,
4635 ElementCount VF) {
4636 // Calculate scalar cost only. Vectorization cost should be ready at this
4637 // moment.
4638 if (VF.isScalar()) {
4639 Type *ValTy = getLoadStoreType(I);
4641 const Align Alignment = getLoadStoreAlignment(I);
4642 unsigned AS = getLoadStoreAddressSpace(I);
4643
4644 TTI::OperandValueInfo OpInfo = TTI::getOperandInfo(I->getOperand(0));
4645 return TTI.getAddressComputationCost(PtrTy, nullptr, nullptr,
4646 Config.CostKind) +
4647 TTI.getMemoryOpCost(I->getOpcode(), ValTy, Alignment, AS,
4648 Config.CostKind, OpInfo, I);
4649 }
4650 return getWideningCost(I, VF);
4651}
4652
4654LoopVectorizationCostModel::getScalarizationOverhead(Instruction *I,
4655 ElementCount VF) const {
4656
4657 // There is no mechanism yet to create a scalable scalarization loop,
4658 // so this is currently Invalid.
4659 if (VF.isScalable())
4661
4662 if (VF.isScalar())
4663 return 0;
4664
4666 Type *RetTy = toVectorizedTy(I->getType(), VF);
4667 if (!RetTy->isVoidTy() &&
4668 (!isa<LoadInst>(I) || !TTI.supportsEfficientVectorElementLoadStore())) {
4669
4671 if (isa<LoadInst>(I))
4673 else if (isa<StoreInst>(I))
4675
4676 for (Type *VectorTy : getContainedTypes(RetTy)) {
4677 Cost += TTI.getScalarizationOverhead(
4679 /*Insert=*/true, /*Extract=*/false, Config.CostKind,
4680 /*ForPoisonSrc=*/true, {}, VIC);
4681 }
4682 }
4683
4684 // Some targets keep addresses scalar.
4685 if (isa<LoadInst>(I) && !TTI.prefersVectorizedAddressing())
4686 return Cost;
4687
4688 // Some targets support efficient element stores.
4689 if (isa<StoreInst>(I) && TTI.supportsEfficientVectorElementLoadStore())
4690 return Cost;
4691
4692 // Collect operands to consider.
4693 CallInst *CI = dyn_cast<CallInst>(I);
4694 Instruction::op_range Ops = CI ? CI->args() : I->operands();
4695
4696 // Skip operands that do not require extraction/scalarization and do not incur
4697 // any overhead.
4699 for (auto *V : filterExtractingOperands(Ops, VF))
4700 Tys.push_back(maybeVectorizeType(V->getType(), VF));
4701
4705 return Cost +
4706 TTI.getOperandsScalarizationOverhead(Tys, Config.CostKind, OperandVIC);
4707}
4708
4710 if (VF.isScalar())
4711 return;
4712 NumPredStores = 0;
4713 for (BasicBlock *BB : TheLoop->blocks()) {
4714 // For each instruction in the old loop.
4715 for (Instruction &I : *BB) {
4717 if (!Ptr)
4718 continue;
4719
4720 // TODO: We should generate better code and update the cost model for
4721 // predicated uniform stores. Today they are treated as any other
4722 // predicated store (see added test cases in
4723 // invariant-store-vectorization.ll).
4725 NumPredStores++;
4726
4727 if (Legal->isUniformMemOp(I, VF)) {
4728 auto IsLegalToScalarize = [&]() {
4729 if (!VF.isScalable())
4730 // Scalarization of fixed length vectors "just works".
4731 return true;
4732
4733 // We have dedicated lowering for unpredicated uniform loads and
4734 // stores. Note that even with tail folding we know that at least
4735 // one lane is active (i.e. generalized predication is not possible
4736 // here), and the logic below depends on this fact.
4737 if (!foldTailByMasking())
4738 return true;
4739
4740 // For scalable vectors, a uniform memop load is always
4741 // uniform-by-parts and we know how to scalarize that.
4742 if (isa<LoadInst>(I))
4743 return true;
4744
4745 // A uniform store isn't neccessarily uniform-by-part
4746 // and we can't assume scalarization.
4747 auto &SI = cast<StoreInst>(I);
4748 return TheLoop->isLoopInvariant(SI.getValueOperand());
4749 };
4750
4751 const InstructionCost GatherScatterCost =
4752 Config.isLegalGatherOrScatter(&I, VF)
4753 ? getGatherScatterCost(&I, VF)
4755
4756 // Load: Scalar load + broadcast
4757 // Store: Scalar store + isLoopInvariantStoreValue ? 0 : extract
4758 // FIXME: This cost is a significant under-estimate for tail folded
4759 // memory ops.
4760 const InstructionCost ScalarizationCost =
4761 IsLegalToScalarize() ? getUniformMemOpCost(&I, VF)
4763
4764 // Choose better solution for the current VF, Note that Invalid
4765 // costs compare as maximumal large. If both are invalid, we get
4766 // scalable invalid which signals a failure and a vectorization abort.
4767 if (GatherScatterCost < ScalarizationCost)
4768 setWideningDecision(&I, VF, CM_GatherScatter, GatherScatterCost);
4769 else
4770 setWideningDecision(&I, VF, CM_Scalarize, ScalarizationCost);
4771 continue;
4772 }
4773
4774 // We assume that widening is the best solution when possible.
4775 if (memoryInstructionCanBeWidened(&I, VF)) {
4776 InstructionCost Cost = getConsecutiveMemOpCost(&I, VF);
4777 int ConsecutiveStride = Legal->isConsecutivePtr(
4779 assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) &&
4780 "Expected consecutive stride.");
4781 InstWidening Decision =
4782 ConsecutiveStride == 1 ? CM_Widen : CM_Widen_Reverse;
4783 setWideningDecision(&I, VF, Decision, Cost);
4784 continue;
4785 }
4786
4787 // Choose between Interleaving, Gather/Scatter or Scalarization.
4789 unsigned NumAccesses = 1;
4790 if (isAccessInterleaved(&I)) {
4791 const auto *Group = getInterleavedAccessGroup(&I);
4792 assert(Group && "Fail to get an interleaved access group.");
4793
4794 // Make one decision for the whole group.
4795 if (getWideningDecision(&I, VF) != CM_Unknown)
4796 continue;
4797
4798 NumAccesses = Group->getNumMembers();
4800 InterleaveCost = getInterleaveGroupCost(&I, VF);
4801 }
4802
4803 InstructionCost GatherScatterCost =
4804 Config.isLegalGatherOrScatter(&I, VF)
4805 ? getGatherScatterCost(&I, VF) * NumAccesses
4807
4808 InstructionCost ScalarizationCost =
4809 getMemInstScalarizationCost(&I, VF) * NumAccesses;
4810
4811 // Choose better solution for the current VF,
4812 // write down this decision and use it during vectorization.
4814 InstWidening Decision;
4815 if (InterleaveCost <= GatherScatterCost &&
4816 InterleaveCost < ScalarizationCost) {
4817 Decision = CM_Interleave;
4818 Cost = InterleaveCost;
4819 } else if (GatherScatterCost < ScalarizationCost) {
4820 Decision = CM_GatherScatter;
4821 Cost = GatherScatterCost;
4822 } else {
4823 Decision = CM_Scalarize;
4824 Cost = ScalarizationCost;
4825 }
4826 // If the instructions belongs to an interleave group, the whole group
4827 // receives the same decision. The whole group receives the cost, but
4828 // the cost will actually be assigned to one instruction.
4829 if (const auto *Group = getInterleavedAccessGroup(&I)) {
4830 if (Decision == CM_Scalarize) {
4831 for (Instruction *I : Group->members())
4832 setWideningDecision(I, VF, Decision,
4833 getMemInstScalarizationCost(I, VF));
4834 } else {
4835 setWideningDecision(Group, VF, Decision, Cost);
4836 }
4837 } else
4838 setWideningDecision(&I, VF, Decision, Cost);
4839 }
4840 }
4841
4842 // Make sure that any load of address and any other address computation
4843 // remains scalar unless there is gather/scatter support. This avoids
4844 // inevitable extracts into address registers, and also has the benefit of
4845 // activating LSR more, since that pass can't optimize vectorized
4846 // addresses.
4847 if (TTI.prefersVectorizedAddressing())
4848 return;
4849
4850 // Start with all scalar pointer uses.
4852 for (BasicBlock *BB : TheLoop->blocks())
4853 for (Instruction &I : *BB) {
4854 Instruction *PtrDef =
4856 if (PtrDef && TheLoop->contains(PtrDef) &&
4858 AddrDefs.insert(PtrDef);
4859 }
4860
4861 // Add all instructions used to generate the addresses.
4863 append_range(Worklist, AddrDefs);
4864 while (!Worklist.empty()) {
4865 Instruction *I = Worklist.pop_back_val();
4866 for (auto &Op : I->operands())
4867 if (auto *InstOp = dyn_cast<Instruction>(Op))
4868 if (TheLoop->contains(InstOp) && !isa<PHINode>(InstOp) &&
4869 AddrDefs.insert(InstOp).second)
4870 Worklist.push_back(InstOp);
4871 }
4872
4873 auto UpdateMemOpUserCost = [this, VF](LoadInst *LI) {
4874 // If there are direct memory op users of the newly scalarized load,
4875 // their cost may have changed because there's no scalarization
4876 // overhead for the operand. Update it.
4877 for (User *U : LI->users()) {
4879 continue;
4881 continue;
4884 getMemInstScalarizationCost(cast<Instruction>(U), VF));
4885 }
4886 };
4887 for (auto *I : AddrDefs) {
4888 if (isa<LoadInst>(I)) {
4889 // Setting the desired widening decision should ideally be handled in
4890 // by cost functions, but since this involves the task of finding out
4891 // if the loaded register is involved in an address computation, it is
4892 // instead changed here when we know this is the case.
4893 InstWidening Decision = getWideningDecision(I, VF);
4894 if (!isPredicatedInst(I) &&
4895 (Decision == CM_Widen || Decision == CM_Widen_Reverse ||
4896 (!Legal->isUniformMemOp(*I, VF) && Decision == CM_Scalarize))) {
4897 // Scalarize a widened load of address or update the cost of a scalar
4898 // load of an address.
4900 I, VF, CM_Scalarize,
4901 (VF.getKnownMinValue() *
4902 getMemoryInstructionCost(I, ElementCount::getFixed(1))));
4903 UpdateMemOpUserCost(cast<LoadInst>(I));
4904 } else if (const auto *Group = getInterleavedAccessGroup(I)) {
4905 // Scalarize all members of this interleaved group when any member
4906 // is used as an address. The address-used load skips scalarization
4907 // overhead, other members include it.
4908 for (Instruction *Member : Group->members()) {
4909 InstructionCost Cost = AddrDefs.contains(Member)
4910 ? (VF.getKnownMinValue() *
4911 getMemoryInstructionCost(
4912 Member, ElementCount::getFixed(1)))
4913 : getMemInstScalarizationCost(Member, VF);
4915 UpdateMemOpUserCost(cast<LoadInst>(Member));
4916 }
4917 }
4918 } else {
4919 // Cannot scalarize fixed-order recurrence phis at the moment.
4920 if (isa<PHINode>(I) && Legal->isFixedOrderRecurrence(cast<PHINode>(I)))
4921 continue;
4922
4923 // Make sure I gets scalarized and a cost estimate without
4924 // scalarization overhead.
4925 ForcedScalars[VF].insert(I);
4926 }
4927 }
4928}
4929
4931 assert(!VF.isScalar() &&
4932 "Trying to set a vectorization decision for a scalar VF");
4933
4934 auto ForcedScalar = ForcedScalars.find(VF);
4935 for (BasicBlock *BB : TheLoop->blocks()) {
4936 // For each instruction in the old loop.
4937 for (Instruction &I : *BB) {
4939
4940 if (!CI)
4941 continue;
4942
4946 Function *ScalarFunc = CI->getCalledFunction();
4947 Type *ScalarRetTy = CI->getType();
4948 SmallVector<Type *, 4> Tys, ScalarTys;
4949 for (auto &ArgOp : CI->args())
4950 ScalarTys.push_back(ArgOp->getType());
4951
4952 // Estimate cost of scalarized vector call. The source operands are
4953 // assumed to be vectors, so we need to extract individual elements from
4954 // there, execute VF scalar calls, and then gather the result into the
4955 // vector return value.
4956 if (VF.isFixed()) {
4957 InstructionCost ScalarCallCost = TTI.getCallInstrCost(
4958 ScalarFunc, ScalarRetTy, ScalarTys, Config.CostKind);
4959
4960 // Compute costs of unpacking argument values for the scalar calls and
4961 // packing the return values to a vector.
4962 InstructionCost ScalarizationCost = getScalarizationOverhead(CI, VF);
4963 ScalarCost = ScalarCallCost * VF.getKnownMinValue() + ScalarizationCost;
4964 } else {
4965 // There is no point attempting to calculate the scalar cost for a
4966 // scalable VF as we know it will be Invalid.
4967 assert(!getScalarizationOverhead(CI, VF).isValid() &&
4968 "Unexpected valid cost for scalarizing scalable vectors");
4969 ScalarCost = InstructionCost::getInvalid();
4970 }
4971
4972 // Honor ForcedScalars and UniformAfterVectorization decisions.
4973 // TODO: For calls, it might still be more profitable to widen. Use
4974 // VPlan-based cost model to compare different options.
4975 if (VF.isVector() && ((ForcedScalar != ForcedScalars.end() &&
4976 ForcedScalar->second.contains(CI)) ||
4977 isUniformAfterVectorization(CI, VF))) {
4978 setCallWideningDecision(CI, VF, CM_Scalarize, nullptr,
4979 Intrinsic::not_intrinsic, std::nullopt,
4980 ScalarCost);
4981 continue;
4982 }
4983
4984 bool MaskRequired = isMaskRequired(CI);
4985 // Compute corresponding vector type for return value and arguments.
4986 Type *RetTy = toVectorizedTy(ScalarRetTy, VF);
4987 for (Type *ScalarTy : ScalarTys)
4988 Tys.push_back(toVectorizedTy(ScalarTy, VF));
4989
4990 // An in-loop reduction using an fmuladd intrinsic is a special case;
4991 // we don't want the normal cost for that intrinsic.
4993 if (auto RedCost = getReductionPatternCost(CI, VF, RetTy)) {
4996 std::nullopt, *RedCost);
4997 continue;
4998 }
4999
5000 // Find the cost of vectorizing the call, if we can find a suitable
5001 // vector variant of the function.
5002 VFInfo FuncInfo;
5003 Function *VecFunc = nullptr;
5004 // Search through any available variants for one we can use at this VF.
5005 for (VFInfo &Info : VFDatabase::getMappings(*CI)) {
5006 // Must match requested VF.
5007 if (Info.Shape.VF != VF)
5008 continue;
5009
5010 // Must take a mask argument if one is required
5011 if (MaskRequired && !Info.isMasked())
5012 continue;
5013
5014 // Check that all parameter kinds are supported
5015 bool ParamsOk = true;
5016 for (VFParameter Param : Info.Shape.Parameters) {
5017 switch (Param.ParamKind) {
5019 break;
5021 Value *ScalarParam = CI->getArgOperand(Param.ParamPos);
5022 // Make sure the scalar parameter in the loop is invariant.
5023 if (!PSE.getSE()->isLoopInvariant(PSE.getSCEV(ScalarParam),
5024 TheLoop))
5025 ParamsOk = false;
5026 break;
5027 }
5029 Value *ScalarParam = CI->getArgOperand(Param.ParamPos);
5030 // Find the stride for the scalar parameter in this loop and see if
5031 // it matches the stride for the variant.
5032 // TODO: do we need to figure out the cost of an extract to get the
5033 // first lane? Or do we hope that it will be folded away?
5034 ScalarEvolution *SE = PSE.getSE();
5035 if (!match(SE->getSCEV(ScalarParam),
5037 m_SCEV(), m_scev_SpecificSInt(Param.LinearStepOrPos),
5039 ParamsOk = false;
5040 break;
5041 }
5043 break;
5044 default:
5045 ParamsOk = false;
5046 break;
5047 }
5048 }
5049
5050 if (!ParamsOk)
5051 continue;
5052
5053 // Found a suitable candidate, stop here.
5054 VecFunc = CI->getModule()->getFunction(Info.VectorName);
5055 FuncInfo = Info;
5056 break;
5057 }
5058
5059 if (TLI && VecFunc && !CI->isNoBuiltin())
5060 VectorCost = TTI.getCallInstrCost(nullptr, RetTy, Tys, Config.CostKind);
5061
5062 // Find the cost of an intrinsic; some targets may have instructions that
5063 // perform the operation without needing an actual call.
5065 if (IID != Intrinsic::not_intrinsic)
5067
5068 InstructionCost Cost = ScalarCost;
5069 InstWidening Decision = CM_Scalarize;
5070
5071 if (VectorCost.isValid() && VectorCost <= Cost) {
5072 Cost = VectorCost;
5073 Decision = CM_VectorCall;
5074 }
5075
5076 if (IntrinsicCost.isValid() && IntrinsicCost <= Cost) {
5078 Decision = CM_IntrinsicCall;
5079 }
5080
5081 setCallWideningDecision(CI, VF, Decision, VecFunc, IID,
5083 }
5084 }
5085}
5086
5088 if (!Legal->isInvariant(Op))
5089 return false;
5090 // Consider Op invariant, if it or its operands aren't predicated
5091 // instruction in the loop. In that case, it is not trivially hoistable.
5092 auto *OpI = dyn_cast<Instruction>(Op);
5093 return !OpI || !TheLoop->contains(OpI) ||
5094 (!isPredicatedInst(OpI) &&
5095 (!isa<PHINode>(OpI) || OpI->getParent() != TheLoop->getHeader()) &&
5096 all_of(OpI->operands(),
5097 [this](Value *Op) { return shouldConsiderInvariant(Op); }));
5098}
5099
5102 ElementCount VF) {
5103 // If we know that this instruction will remain uniform, check the cost of
5104 // the scalar version.
5106 VF = ElementCount::getFixed(1);
5107
5108 if (VF.isVector() && isProfitableToScalarize(I, VF))
5109 return InstsToScalarize[VF][I];
5110
5111 // Forced scalars do not have any scalarization overhead.
5112 auto ForcedScalar = ForcedScalars.find(VF);
5113 if (VF.isVector() && ForcedScalar != ForcedScalars.end()) {
5114 auto InstSet = ForcedScalar->second;
5115 if (InstSet.count(I))
5117 VF.getKnownMinValue();
5118 }
5119
5120 const auto &MinBWs = Config.getMinimalBitwidths();
5121 uint64_t InstrMinBWs = MinBWs.lookup(I);
5122 Type *RetTy = I->getType();
5124 RetTy = IntegerType::get(RetTy->getContext(), InstrMinBWs);
5125 auto *SE = PSE.getSE();
5126
5127 Type *VectorTy;
5128 if (isScalarAfterVectorization(I, VF)) {
5129 [[maybe_unused]] auto HasSingleCopyAfterVectorization =
5130 [this](Instruction *I, ElementCount VF) -> bool {
5131 if (VF.isScalar())
5132 return true;
5133
5134 auto Scalarized = InstsToScalarize.find(VF);
5135 assert(Scalarized != InstsToScalarize.end() &&
5136 "VF not yet analyzed for scalarization profitability");
5137 return !Scalarized->second.count(I) &&
5138 llvm::all_of(I->users(), [&](User *U) {
5139 auto *UI = cast<Instruction>(U);
5140 return !Scalarized->second.count(UI);
5141 });
5142 };
5143
5144 // With the exception of GEPs and PHIs, after scalarization there should
5145 // only be one copy of the instruction generated in the loop. This is
5146 // because the VF is either 1, or any instructions that need scalarizing
5147 // have already been dealt with by the time we get here. As a result,
5148 // it means we don't have to multiply the instruction cost by VF.
5149 assert(I->getOpcode() == Instruction::GetElementPtr ||
5150 I->getOpcode() == Instruction::PHI ||
5151 (I->getOpcode() == Instruction::BitCast &&
5152 I->getType()->isPointerTy()) ||
5153 HasSingleCopyAfterVectorization(I, VF));
5154 VectorTy = RetTy;
5155 } else
5156 VectorTy = toVectorizedTy(RetTy, VF);
5157
5158 if (VF.isVector() && VectorTy->isVectorTy() &&
5159 !TTI.getNumberOfParts(VectorTy))
5161
5162 // TODO: We need to estimate the cost of intrinsic calls.
5163 switch (I->getOpcode()) {
5164 case Instruction::GetElementPtr:
5165 // We mark this instruction as zero-cost because the cost of GEPs in
5166 // vectorized code depends on whether the corresponding memory instruction
5167 // is scalarized or not. Therefore, we handle GEPs with the memory
5168 // instruction cost.
5169 return 0;
5170 case Instruction::UncondBr:
5171 case Instruction::CondBr: {
5172 // In cases of scalarized and predicated instructions, there will be VF
5173 // predicated blocks in the vectorized loop. Each branch around these
5174 // blocks requires also an extract of its vector compare i1 element.
5175 // Note that the conditional branch from the loop latch will be replaced by
5176 // a single branch controlling the loop, so there is no extra overhead from
5177 // scalarization.
5178 bool ScalarPredicatedBB = false;
5180 if (VF.isVector() && BI &&
5181 (PredicatedBBsAfterVectorization[VF].count(BI->getSuccessor(0)) ||
5182 PredicatedBBsAfterVectorization[VF].count(BI->getSuccessor(1))) &&
5183 BI->getParent() != TheLoop->getLoopLatch())
5184 ScalarPredicatedBB = true;
5185
5186 if (ScalarPredicatedBB) {
5187 // Not possible to scalarize scalable vector with predicated instructions.
5188 if (VF.isScalable())
5190 // Return cost for branches around scalarized and predicated blocks.
5191 auto *VecI1Ty =
5193 return (TTI.getScalarizationOverhead(
5194 VecI1Ty, APInt::getAllOnes(VF.getFixedValue()),
5195 /*Insert*/ false, /*Extract*/ true, Config.CostKind) +
5196 (TTI.getCFInstrCost(Instruction::CondBr, Config.CostKind) *
5197 VF.getFixedValue()));
5198 }
5199
5200 if (I->getParent() == TheLoop->getLoopLatch() || VF.isScalar())
5201 // The back-edge branch will remain, as will all scalar branches.
5202 return TTI.getCFInstrCost(Instruction::UncondBr, Config.CostKind);
5203
5204 // This branch will be eliminated by if-conversion.
5205 return 0;
5206 // Note: We currently assume zero cost for an unconditional branch inside
5207 // a predicated block since it will become a fall-through, although we
5208 // may decide in the future to call TTI for all branches.
5209 }
5210 case Instruction::Switch: {
5211 if (VF.isScalar())
5212 return TTI.getCFInstrCost(Instruction::Switch, Config.CostKind);
5213 auto *Switch = cast<SwitchInst>(I);
5214 return Switch->getNumCases() *
5215 TTI.getCmpSelInstrCost(
5216 Instruction::ICmp,
5217 toVectorTy(Switch->getCondition()->getType(), VF),
5218 toVectorTy(Type::getInt1Ty(I->getContext()), VF),
5219 CmpInst::ICMP_EQ, Config.CostKind);
5220 }
5221 case Instruction::PHI: {
5222 auto *Phi = cast<PHINode>(I);
5223
5224 // First-order recurrences are replaced by vector shuffles inside the loop.
5225 if (VF.isVector() && Legal->isFixedOrderRecurrence(Phi)) {
5226 return TTI.getShuffleCost(
5228 cast<VectorType>(VectorTy), {}, Config.CostKind, -1);
5229 }
5230
5231 // Phi nodes in non-header blocks (not inductions, reductions, etc.) are
5232 // converted into select instructions. We require N - 1 selects per phi
5233 // node, where N is the number of incoming values.
5234 if (VF.isVector() && Phi->getParent() != TheLoop->getHeader()) {
5235 Type *ResultTy = Phi->getType();
5236
5237 // All instructions in an Any-of reduction chain are narrowed to bool.
5238 // Check if that is the case for this phi node.
5239 auto *HeaderUser = cast_if_present<PHINode>(
5240 find_singleton<User>(Phi->users(), [this](User *U, bool) -> User * {
5241 auto *Phi = dyn_cast<PHINode>(U);
5242 if (Phi && Phi->getParent() == TheLoop->getHeader())
5243 return Phi;
5244 return nullptr;
5245 }));
5246 if (HeaderUser) {
5247 auto &ReductionVars = Legal->getReductionVars();
5248 auto Iter = ReductionVars.find(HeaderUser);
5249 if (Iter != ReductionVars.end() &&
5251 Iter->second.getRecurrenceKind()))
5252 ResultTy = Type::getInt1Ty(Phi->getContext());
5253 }
5254 return (Phi->getNumIncomingValues() - 1) *
5255 TTI.getCmpSelInstrCost(
5256 Instruction::Select, toVectorTy(ResultTy, VF),
5257 toVectorTy(Type::getInt1Ty(Phi->getContext()), VF),
5258 CmpInst::BAD_ICMP_PREDICATE, Config.CostKind);
5259 }
5260
5261 // When tail folding with EVL, if the phi is part of an out of loop
5262 // reduction then it will be transformed into a wide vp_merge.
5263 if (VF.isVector() && foldTailWithEVL() &&
5264 Legal->getReductionVars().contains(Phi) &&
5265 !Config.isInLoopReduction(Phi)) {
5267 Intrinsic::vp_merge, toVectorTy(Phi->getType(), VF),
5268 {toVectorTy(Type::getInt1Ty(Phi->getContext()), VF)});
5269 return TTI.getIntrinsicInstrCost(ICA, Config.CostKind);
5270 }
5271
5272 return TTI.getCFInstrCost(Instruction::PHI, Config.CostKind);
5273 }
5274 case Instruction::UDiv:
5275 case Instruction::SDiv:
5276 case Instruction::URem:
5277 case Instruction::SRem:
5278 if (VF.isVector() && isPredicatedInst(I)) {
5279 const auto [ScalarCost, SafeDivisorCost] = getDivRemSpeculationCost(I, VF);
5280 return isDivRemScalarWithPredication(ScalarCost, SafeDivisorCost) ?
5281 ScalarCost : SafeDivisorCost;
5282 }
5283 // We've proven all lanes safe to speculate, fall through.
5284 [[fallthrough]];
5285 case Instruction::Add:
5286 case Instruction::Sub: {
5287 auto Info = Legal->getHistogramInfo(I);
5288 if (Info && VF.isVector()) {
5289 const HistogramInfo *HGram = Info.value();
5290 // Assume that a non-constant update value (or a constant != 1) requires
5291 // a multiply, and add that into the cost.
5293 ConstantInt *RHS = dyn_cast<ConstantInt>(I->getOperand(1));
5294 if (!RHS || RHS->getZExtValue() != 1)
5295 MulCost = TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy,
5296 Config.CostKind);
5297
5298 // Find the cost of the histogram operation itself.
5299 Type *PtrTy = VectorType::get(HGram->Load->getPointerOperandType(), VF);
5300 Type *ScalarTy = I->getType();
5301 Type *MaskTy = VectorType::get(Type::getInt1Ty(I->getContext()), VF);
5302 IntrinsicCostAttributes ICA(Intrinsic::experimental_vector_histogram_add,
5303 Type::getVoidTy(I->getContext()),
5304 {PtrTy, ScalarTy, MaskTy});
5305
5306 // Add the costs together with the add/sub operation.
5307 return TTI.getIntrinsicInstrCost(ICA, Config.CostKind) + MulCost +
5308 TTI.getArithmeticInstrCost(I->getOpcode(), VectorTy,
5309 Config.CostKind);
5310 }
5311 [[fallthrough]];
5312 }
5313 case Instruction::FAdd:
5314 case Instruction::FSub:
5315 case Instruction::Mul:
5316 case Instruction::FMul:
5317 case Instruction::FDiv:
5318 case Instruction::FRem:
5319 case Instruction::Shl:
5320 case Instruction::LShr:
5321 case Instruction::AShr:
5322 case Instruction::And:
5323 case Instruction::Or:
5324 case Instruction::Xor: {
5325 // If we're speculating on the stride being 1, the multiplication may
5326 // fold away. We can generalize this for all operations using the notion
5327 // of neutral elements. (TODO)
5328 if (I->getOpcode() == Instruction::Mul &&
5329 ((TheLoop->isLoopInvariant(I->getOperand(0)) &&
5330 PSE.getSCEV(I->getOperand(0))->isOne()) ||
5331 (TheLoop->isLoopInvariant(I->getOperand(1)) &&
5332 PSE.getSCEV(I->getOperand(1))->isOne())))
5333 return 0;
5334
5335 // Detect reduction patterns
5336 if (auto RedCost = getReductionPatternCost(I, VF, VectorTy))
5337 return *RedCost;
5338
5339 // Certain instructions can be cheaper to vectorize if they have a constant
5340 // second vector operand. One example of this are shifts on x86.
5341 Value *Op2 = I->getOperand(1);
5342 if (!isa<Constant>(Op2) && TheLoop->isLoopInvariant(Op2) &&
5343 PSE.getSE()->isSCEVable(Op2->getType()) &&
5344 isa<SCEVConstant>(PSE.getSCEV(Op2))) {
5345 Op2 = cast<SCEVConstant>(PSE.getSCEV(Op2))->getValue();
5346 }
5347 auto Op2Info = TTI.getOperandInfo(Op2);
5348 if (Op2Info.Kind == TargetTransformInfo::OK_AnyValue &&
5351
5352 SmallVector<const Value *, 4> Operands(I->operand_values());
5353 return TTI.getArithmeticInstrCost(
5354 I->getOpcode(), VectorTy, Config.CostKind,
5355 {TargetTransformInfo::OK_AnyValue, TargetTransformInfo::OP_None},
5356 Op2Info, Operands, I, TLI);
5357 }
5358 case Instruction::FNeg: {
5359 return TTI.getArithmeticInstrCost(
5360 I->getOpcode(), VectorTy, Config.CostKind,
5361 {TargetTransformInfo::OK_AnyValue, TargetTransformInfo::OP_None},
5362 {TargetTransformInfo::OK_AnyValue, TargetTransformInfo::OP_None},
5363 I->getOperand(0), I);
5364 }
5365 case Instruction::Select: {
5367 const SCEV *CondSCEV = SE->getSCEV(SI->getCondition());
5368 bool ScalarCond = (SE->isLoopInvariant(CondSCEV, TheLoop));
5369
5370 const Value *Op0, *Op1;
5371 using namespace llvm::PatternMatch;
5372 if (!ScalarCond && (match(I, m_LogicalAnd(m_Value(Op0), m_Value(Op1))) ||
5373 match(I, m_LogicalOr(m_Value(Op0), m_Value(Op1))))) {
5374 // select x, y, false --> x & y
5375 // select x, true, y --> x | y
5376 const auto [Op1VK, Op1VP] = TTI::getOperandInfo(Op0);
5377 const auto [Op2VK, Op2VP] = TTI::getOperandInfo(Op1);
5378 assert(Op0->getType()->getScalarSizeInBits() == 1 &&
5379 Op1->getType()->getScalarSizeInBits() == 1);
5380
5381 return TTI.getArithmeticInstrCost(
5382 match(I, m_LogicalOr()) ? Instruction::Or : Instruction::And,
5383 VectorTy, Config.CostKind, {Op1VK, Op1VP}, {Op2VK, Op2VP}, {Op0, Op1},
5384 I);
5385 }
5386
5387 Type *CondTy = SI->getCondition()->getType();
5388 if (!ScalarCond)
5389 CondTy = VectorType::get(CondTy, VF);
5390
5392 if (auto *Cmp = dyn_cast<CmpInst>(SI->getCondition()))
5393 Pred = Cmp->getPredicate();
5394 return TTI.getCmpSelInstrCost(
5395 I->getOpcode(), VectorTy, CondTy, Pred, Config.CostKind,
5396 {TTI::OK_AnyValue, TTI::OP_None}, {TTI::OK_AnyValue, TTI::OP_None}, I);
5397 }
5398 case Instruction::ICmp:
5399 case Instruction::FCmp: {
5400 Type *ValTy = I->getOperand(0)->getType();
5401
5403 [[maybe_unused]] Instruction *Op0AsInstruction =
5404 dyn_cast<Instruction>(I->getOperand(0));
5405 assert((!canTruncateToMinimalBitwidth(Op0AsInstruction, VF) ||
5406 InstrMinBWs == MinBWs.lookup(Op0AsInstruction)) &&
5407 "if both the operand and the compare are marked for "
5408 "truncation, they must have the same bitwidth");
5409 ValTy = IntegerType::get(ValTy->getContext(), InstrMinBWs);
5410 }
5411
5412 VectorTy = toVectorTy(ValTy, VF);
5413 return TTI.getCmpSelInstrCost(
5414 I->getOpcode(), VectorTy, CmpInst::makeCmpResultType(VectorTy),
5415 cast<CmpInst>(I)->getPredicate(), Config.CostKind,
5416 {TTI::OK_AnyValue, TTI::OP_None}, {TTI::OK_AnyValue, TTI::OP_None}, I);
5417 }
5418 case Instruction::Store:
5419 case Instruction::Load: {
5420 ElementCount Width = VF;
5421 if (Width.isVector()) {
5422 InstWidening Decision = getWideningDecision(I, Width);
5423 assert(Decision != CM_Unknown &&
5424 "CM decision should be taken at this point");
5427 if (Decision == CM_Scalarize)
5428 Width = ElementCount::getFixed(1);
5429 }
5430 VectorTy = toVectorTy(getLoadStoreType(I), Width);
5431 return getMemoryInstructionCost(I, VF);
5432 }
5433 case Instruction::BitCast:
5434 if (I->getType()->isPointerTy())
5435 return 0;
5436 [[fallthrough]];
5437 case Instruction::ZExt:
5438 case Instruction::SExt:
5439 case Instruction::FPToUI:
5440 case Instruction::FPToSI:
5441 case Instruction::FPExt:
5442 case Instruction::PtrToInt:
5443 case Instruction::IntToPtr:
5444 case Instruction::SIToFP:
5445 case Instruction::UIToFP:
5446 case Instruction::Trunc:
5447 case Instruction::FPTrunc: {
5448 // Computes the CastContextHint from a Load/Store instruction.
5449 auto ComputeCCH = [&](Instruction *I) -> TTI::CastContextHint {
5451 "Expected a load or a store!");
5452
5453 if (VF.isScalar() || !TheLoop->contains(I))
5455
5456 switch (getWideningDecision(I, VF)) {
5468 llvm_unreachable("Instr did not go through cost modelling?");
5471 llvm_unreachable_internal("Instr has invalid widening decision");
5472 }
5473
5474 llvm_unreachable("Unhandled case!");
5475 };
5476
5477 unsigned Opcode = I->getOpcode();
5479 // For Trunc, the context is the only user, which must be a StoreInst.
5480 if (Opcode == Instruction::Trunc || Opcode == Instruction::FPTrunc) {
5481 if (I->hasOneUse())
5482 if (StoreInst *Store = dyn_cast<StoreInst>(*I->user_begin()))
5483 CCH = ComputeCCH(Store);
5484 }
5485 // For Z/Sext, the context is the operand, which must be a LoadInst.
5486 else if (Opcode == Instruction::ZExt || Opcode == Instruction::SExt ||
5487 Opcode == Instruction::FPExt) {
5488 if (LoadInst *Load = dyn_cast<LoadInst>(I->getOperand(0)))
5489 CCH = ComputeCCH(Load);
5490 }
5491
5492 // We optimize the truncation of induction variables having constant
5493 // integer steps. The cost of these truncations is the same as the scalar
5494 // operation.
5495 if (isOptimizableIVTruncate(I, VF)) {
5496 auto *Trunc = cast<TruncInst>(I);
5497 return TTI.getCastInstrCost(Instruction::Trunc, Trunc->getDestTy(),
5498 Trunc->getSrcTy(), CCH, Config.CostKind,
5499 Trunc);
5500 }
5501
5502 // Detect reduction patterns
5503 if (auto RedCost = getReductionPatternCost(I, VF, VectorTy))
5504 return *RedCost;
5505
5506 Type *SrcScalarTy = I->getOperand(0)->getType();
5507 Instruction *Op0AsInstruction = dyn_cast<Instruction>(I->getOperand(0));
5508 if (canTruncateToMinimalBitwidth(Op0AsInstruction, VF))
5509 SrcScalarTy = IntegerType::get(SrcScalarTy->getContext(),
5510 MinBWs.lookup(Op0AsInstruction));
5511 Type *SrcVecTy =
5512 VectorTy->isVectorTy() ? toVectorTy(SrcScalarTy, VF) : SrcScalarTy;
5513
5515 // If the result type is <= the source type, there will be no extend
5516 // after truncating the users to the minimal required bitwidth.
5517 if (VectorTy->getScalarSizeInBits() <= SrcVecTy->getScalarSizeInBits() &&
5518 (I->getOpcode() == Instruction::ZExt ||
5519 I->getOpcode() == Instruction::SExt))
5520 return 0;
5521 }
5522
5523 return TTI.getCastInstrCost(Opcode, VectorTy, SrcVecTy, CCH,
5524 Config.CostKind, I);
5525 }
5526 case Instruction::Call:
5527 return getVectorCallCost(cast<CallInst>(I), VF);
5528 case Instruction::ExtractValue:
5529 return TTI.getInstructionCost(I, Config.CostKind);
5530 case Instruction::Alloca:
5531 // We cannot easily widen alloca to a scalable alloca, as
5532 // the result would need to be a vector of pointers.
5533 if (VF.isScalable())
5535 return TTI.getArithmeticInstrCost(Instruction::Mul, RetTy, Config.CostKind);
5536 default:
5537 // This opcode is unknown. Assume that it is the same as 'mul'.
5538 return TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy,
5539 Config.CostKind);
5540 } // end of switch.
5541}
5542
5544 // Ignore ephemeral values.
5546
5547 SmallVector<Value *, 4> DeadInterleavePointerOps;
5549
5550 // If a scalar epilogue is required, users outside the loop won't use
5551 // live-outs from the vector loop but from the scalar epilogue. Ignore them if
5552 // that is the case.
5553 bool RequiresScalarEpilogue = requiresScalarEpilogue(true);
5554 auto IsLiveOutDead = [this, RequiresScalarEpilogue](User *U) {
5555 return RequiresScalarEpilogue &&
5556 !TheLoop->contains(cast<Instruction>(U)->getParent());
5557 };
5558
5560 DFS.perform(LI);
5561 for (BasicBlock *BB : reverse(make_range(DFS.beginRPO(), DFS.endRPO())))
5562 for (Instruction &I : reverse(*BB)) {
5563 if (VecValuesToIgnore.contains(&I) || ValuesToIgnore.contains(&I))
5564 continue;
5565
5566 // Add instructions that would be trivially dead and are only used by
5567 // values already ignored to DeadOps to seed worklist.
5569 all_of(I.users(), [this, IsLiveOutDead](User *U) {
5570 return VecValuesToIgnore.contains(U) ||
5571 ValuesToIgnore.contains(U) || IsLiveOutDead(U);
5572 }))
5573 DeadOps.push_back(&I);
5574
5575 // For interleave groups, we only create a pointer for the start of the
5576 // interleave group. Queue up addresses of group members except the insert
5577 // position for further processing.
5578 if (isAccessInterleaved(&I)) {
5579 auto *Group = getInterleavedAccessGroup(&I);
5580 if (Group->getInsertPos() == &I)
5581 continue;
5582 Value *PointerOp = getLoadStorePointerOperand(&I);
5583 DeadInterleavePointerOps.push_back(PointerOp);
5584 }
5585
5586 // Queue branches for analysis. They are dead, if their successors only
5587 // contain dead instructions.
5588 if (isa<CondBrInst>(&I))
5589 DeadOps.push_back(&I);
5590 }
5591
5592 // Mark ops feeding interleave group members as free, if they are only used
5593 // by other dead computations.
5594 for (unsigned I = 0; I != DeadInterleavePointerOps.size(); ++I) {
5595 auto *Op = dyn_cast<Instruction>(DeadInterleavePointerOps[I]);
5596 if (!Op || !TheLoop->contains(Op) || any_of(Op->users(), [this](User *U) {
5597 Instruction *UI = cast<Instruction>(U);
5598 return !VecValuesToIgnore.contains(U) &&
5599 (!isAccessInterleaved(UI) ||
5600 getInterleavedAccessGroup(UI)->getInsertPos() == UI);
5601 }))
5602 continue;
5603 VecValuesToIgnore.insert(Op);
5604 append_range(DeadInterleavePointerOps, Op->operands());
5605 }
5606
5607 // Mark ops that would be trivially dead and are only used by ignored
5608 // instructions as free.
5609 BasicBlock *Header = TheLoop->getHeader();
5610
5611 // Returns true if the block contains only dead instructions. Such blocks will
5612 // be removed by VPlan-to-VPlan transforms and won't be considered by the
5613 // VPlan-based cost model, so skip them in the legacy cost-model as well.
5614 auto IsEmptyBlock = [this](BasicBlock *BB) {
5615 return all_of(*BB, [this](Instruction &I) {
5616 return ValuesToIgnore.contains(&I) || VecValuesToIgnore.contains(&I) ||
5618 });
5619 };
5620 for (unsigned I = 0; I != DeadOps.size(); ++I) {
5621 auto *Op = dyn_cast<Instruction>(DeadOps[I]);
5622
5623 // Check if the branch should be considered dead.
5624 if (auto *Br = dyn_cast_or_null<CondBrInst>(Op)) {
5625 BasicBlock *ThenBB = Br->getSuccessor(0);
5626 BasicBlock *ElseBB = Br->getSuccessor(1);
5627 // Don't considers branches leaving the loop for simplification.
5628 if (!TheLoop->contains(ThenBB) || !TheLoop->contains(ElseBB))
5629 continue;
5630 bool ThenEmpty = IsEmptyBlock(ThenBB);
5631 bool ElseEmpty = IsEmptyBlock(ElseBB);
5632 if ((ThenEmpty && ElseEmpty) ||
5633 (ThenEmpty && ThenBB->getSingleSuccessor() == ElseBB &&
5634 ElseBB->phis().empty()) ||
5635 (ElseEmpty && ElseBB->getSingleSuccessor() == ThenBB &&
5636 ThenBB->phis().empty())) {
5637 VecValuesToIgnore.insert(Br);
5638 DeadOps.push_back(Br->getCondition());
5639 }
5640 continue;
5641 }
5642
5643 // Skip any op that shouldn't be considered dead.
5644 if (!Op || !TheLoop->contains(Op) ||
5645 (isa<PHINode>(Op) && Op->getParent() == Header) ||
5647 any_of(Op->users(), [this, IsLiveOutDead](User *U) {
5648 return !VecValuesToIgnore.contains(U) &&
5649 !ValuesToIgnore.contains(U) && !IsLiveOutDead(U);
5650 }))
5651 continue;
5652
5653 // If all of Op's users are in ValuesToIgnore, add it to ValuesToIgnore
5654 // which applies for both scalar and vector versions. Otherwise it is only
5655 // dead in vector versions, so only add it to VecValuesToIgnore.
5656 if (all_of(Op->users(),
5657 [this](User *U) { return ValuesToIgnore.contains(U); }))
5658 ValuesToIgnore.insert(Op);
5659
5660 VecValuesToIgnore.insert(Op);
5661 append_range(DeadOps, Op->operands());
5662 }
5663
5664 // Ignore type-promoting instructions we identified during reduction
5665 // detection.
5666 for (const auto &Reduction : Legal->getReductionVars()) {
5667 const RecurrenceDescriptor &RedDes = Reduction.second;
5668 const SmallPtrSetImpl<Instruction *> &Casts = RedDes.getCastInsts();
5669 VecValuesToIgnore.insert_range(Casts);
5670 }
5671 // Ignore type-casting instructions we identified during induction
5672 // detection.
5673 for (const auto &Induction : Legal->getInductionVars()) {
5674 const InductionDescriptor &IndDes = Induction.second;
5675 VecValuesToIgnore.insert_range(IndDes.getCastInsts());
5676 }
5677}
5678
5679// This function will select a scalable VF if the target supports scalable
5680// vectors and a fixed one otherwise.
5681// TODO: we could return a pair of values that specify the max VF and
5682// min VF, to be used in `buildVPlans(MinVF, MaxVF)` instead of
5683// `buildVPlans(VF, VF)`. We cannot do it because VPLAN at the moment
5684// doesn't have a cost model that can choose which plan to execute if
5685// more than one is generated.
5687 VFSelectionContext &Config) {
5688 unsigned WidestType = Config.getSmallestAndWidestTypes().second;
5689
5691 TTI.enableScalableVectorization()
5694
5695 TypeSize RegSize = TTI.getRegisterBitWidth(RegKind);
5696 unsigned N = RegSize.getKnownMinValue() / WidestType;
5697 return ElementCount::get(N, RegSize.isScalable());
5698}
5699
5702 ElementCount VF = UserVF;
5703 // Outer loop handling: They may require CFG and instruction level
5704 // transformations before even evaluating whether vectorization is profitable.
5705 // Since we cannot modify the incoming IR, we need to build VPlan upfront in
5706 // the vectorization pipeline.
5707 if (!OrigLoop->isInnermost()) {
5708 // If the user doesn't provide a vectorization factor, determine a
5709 // reasonable one.
5710 if (UserVF.isZero()) {
5711 VF = determineVPlanVF(TTI, Config);
5712 LLVM_DEBUG(dbgs() << "LV: VPlan computed VF " << VF << ".\n");
5713
5714 // Make sure we have a VF > 1 for stress testing.
5715 if (VPlanBuildStressTest && (VF.isScalar() || VF.isZero())) {
5716 LLVM_DEBUG(dbgs() << "LV: VPlan stress testing: "
5717 << "overriding computed VF.\n");
5718 VF = ElementCount::getFixed(4);
5719 }
5720 } else if (UserVF.isScalable() && !Config.supportsScalableVectors()) {
5721 LLVM_DEBUG(dbgs() << "LV: Not vectorizing. Scalable VF requested, but "
5722 << "not supported by the target.\n");
5724 "Scalable vectorization requested but not supported by the target",
5725 "the scalable user-specified vectorization width for outer-loop "
5726 "vectorization cannot be used because the target does not support "
5727 "scalable vectors.",
5728 "ScalableVFUnfeasible", ORE, OrigLoop);
5730 }
5731 assert(EnableVPlanNativePath && "VPlan-native path is not enabled.");
5733 "VF needs to be a power of two");
5734 LLVM_DEBUG(dbgs() << "LV: Using " << (!UserVF.isZero() ? "user " : "")
5735 << "VF " << VF << " to build VPlans.\n");
5736 buildVPlans(VF, VF);
5737
5738 if (VPlans.empty())
5740
5741 // For VPlan build stress testing, we bail out after VPlan construction.
5744
5745 return {VF, 0 /*Cost*/, 0 /* ScalarCost */};
5746 }
5747
5748 LLVM_DEBUG(
5749 dbgs() << "LV: Not vectorizing. Inner loops aren't supported in the "
5750 "VPlan-native path.\n");
5752}
5753
5754void LoopVectorizationPlanner::plan(ElementCount UserVF, unsigned UserIC) {
5755 assert(OrigLoop->isInnermost() && "Inner loop expected.");
5756 CM.collectValuesToIgnore();
5757 Config.collectElementTypesForWidening(&CM.ValuesToIgnore);
5758
5759 FixedScalableVFPair MaxFactors = CM.computeMaxVF(UserVF, UserIC);
5760 if (!MaxFactors) // Cases that should not to be vectorized nor interleaved.
5761 return;
5762
5763 // Compute the minimal bitwidths required for integer operations in the loop
5764 // for later use by the cost model.
5765 Config.computeMinimalBitwidths();
5766
5767 // Invalidate interleave groups if all blocks of loop will be predicated.
5768 if (CM.blockNeedsPredicationForAnyReason(OrigLoop->getHeader()) &&
5770 LLVM_DEBUG(
5771 dbgs()
5772 << "LV: Invalidate all interleaved groups due to fold-tail by masking "
5773 "which requires masked-interleaved support.\n");
5774 if (CM.InterleaveInfo.invalidateGroups())
5775 // Invalidating interleave groups also requires invalidating all decisions
5776 // based on them, which includes widening decisions and uniform and scalar
5777 // values.
5778 CM.invalidateCostModelingDecisions();
5779 }
5780
5781 if (CM.foldTailByMasking())
5782 Legal->prepareToFoldTailByMasking();
5783
5784 ElementCount MaxUserVF =
5785 UserVF.isScalable() ? MaxFactors.ScalableVF : MaxFactors.FixedVF;
5786 if (UserVF) {
5787 if (!ElementCount::isKnownLE(UserVF, MaxUserVF)) {
5789 "UserVF ignored because it may be larger than the maximal safe VF",
5790 "InvalidUserVF", ORE, OrigLoop);
5791 } else {
5793 "VF needs to be a power of two");
5794 // Collect the instructions (and their associated costs) that will be more
5795 // profitable to scalarize.
5796 Config.collectInLoopReductions();
5797 CM.collectNonVectorizedAndSetWideningDecisions(UserVF);
5798 ElementCount EpilogueUserVF =
5800 if (EpilogueUserVF.isVector() &&
5801 ElementCount::isKnownLT(EpilogueUserVF, UserVF)) {
5802 CM.collectNonVectorizedAndSetWideningDecisions(EpilogueUserVF);
5803 buildVPlansWithVPRecipes(EpilogueUserVF, EpilogueUserVF);
5804 }
5805 buildVPlansWithVPRecipes(UserVF, UserVF);
5806 if (!VPlans.empty() && VPlans.back()->getSingleVF() == UserVF) {
5807 // For scalar VF, skip VPlan cost check as VPlan cost is designed for
5808 // vector VFs only.
5809 if (UserVF.isScalar() ||
5810 cost(*VPlans.back(), UserVF, /*RU=*/nullptr).isValid()) {
5811 LLVM_DEBUG(dbgs() << "LV: Using user VF " << UserVF << ".\n");
5813 return;
5814 }
5815 }
5816 VPlans.clear();
5817 reportVectorizationInfo("UserVF ignored because of invalid costs.",
5818 "InvalidCost", ORE, OrigLoop);
5819 }
5820 }
5821
5822 // Collect the Vectorization Factor Candidates.
5823 SmallVector<ElementCount> VFCandidates;
5824 for (auto VF = ElementCount::getFixed(1);
5825 ElementCount::isKnownLE(VF, MaxFactors.FixedVF); VF *= 2)
5826 VFCandidates.push_back(VF);
5827 for (auto VF = ElementCount::getScalable(1);
5828 ElementCount::isKnownLE(VF, MaxFactors.ScalableVF); VF *= 2)
5829 VFCandidates.push_back(VF);
5830
5831 Config.collectInLoopReductions();
5832 for (const auto &VF : VFCandidates) {
5833 // Collect Uniform and Scalar instructions after vectorization with VF.
5834 CM.collectNonVectorizedAndSetWideningDecisions(VF);
5835 }
5836
5837 buildVPlansWithVPRecipes(ElementCount::getFixed(1), MaxFactors.FixedVF);
5838 buildVPlansWithVPRecipes(ElementCount::getScalable(1), MaxFactors.ScalableVF);
5839
5841}
5842
5844 ElementCount VF) const {
5845 InstructionCost Cost = CM.getInstructionCost(UI, VF);
5846 if (Cost.isValid() && ForceTargetInstructionCost.getNumOccurrences())
5848 return Cost;
5849}
5850
5851bool VPCostContext::skipCostComputation(Instruction *UI, bool IsVector) const {
5852 return CM.ValuesToIgnore.contains(UI) ||
5853 (IsVector && CM.VecValuesToIgnore.contains(UI)) ||
5854 SkipCostComputation.contains(UI);
5855}
5856
5858 return CM.getPredBlockCostDivisor(CostKind, BB);
5859}
5860
5862LoopVectorizationPlanner::precomputeCosts(VPlan &Plan, ElementCount VF,
5863 VPCostContext &CostCtx) const {
5865 // Cost modeling for inductions is inaccurate in the legacy cost model
5866 // compared to the recipes that are generated. To match here initially during
5867 // VPlan cost model bring up directly use the induction costs from the legacy
5868 // cost model. Note that we do this as pre-processing; the VPlan may not have
5869 // any recipes associated with the original induction increment instruction
5870 // and may replace truncates with VPWidenIntOrFpInductionRecipe. We precompute
5871 // the cost of induction phis and increments (both that are represented by
5872 // recipes and those that are not), to avoid distinguishing between them here,
5873 // and skip all recipes that represent induction phis and increments (the
5874 // former case) later on, if they exist, to avoid counting them twice.
5875 // Similarly we pre-compute the cost of any optimized truncates.
5876 // TODO: Switch to more accurate costing based on VPlan.
5877 for (const auto &[IV, IndDesc] : Legal->getInductionVars()) {
5879 IV->getIncomingValueForBlock(OrigLoop->getLoopLatch()));
5880 SmallVector<Instruction *> IVInsts = {IVInc};
5881 for (unsigned I = 0; I != IVInsts.size(); I++) {
5882 for (Value *Op : IVInsts[I]->operands()) {
5883 auto *OpI = dyn_cast<Instruction>(Op);
5884 if (Op == IV || !OpI || !OrigLoop->contains(OpI) || !Op->hasOneUse())
5885 continue;
5886 IVInsts.push_back(OpI);
5887 }
5888 }
5889 IVInsts.push_back(IV);
5890 for (User *U : IV->users()) {
5891 auto *CI = cast<Instruction>(U);
5892 if (!CostCtx.CM.isOptimizableIVTruncate(CI, VF))
5893 continue;
5894 IVInsts.push_back(CI);
5895 }
5896
5897 // If the vector loop gets executed exactly once with the given VF, ignore
5898 // the costs of comparison and induction instructions, as they'll get
5899 // simplified away.
5900 // TODO: Remove this code after stepping away from the legacy cost model and
5901 // adding code to simplify VPlans before calculating their costs.
5902 auto TC = getSmallConstantTripCount(PSE.getSE(), OrigLoop);
5903 if (TC == VF && !CM.foldTailByMasking())
5904 addFullyUnrolledInstructionsToIgnore(OrigLoop, Legal->getInductionVars(),
5905 CostCtx.SkipCostComputation);
5906
5907 for (Instruction *IVInst : IVInsts) {
5908 if (CostCtx.skipCostComputation(IVInst, VF.isVector()))
5909 continue;
5910 InstructionCost InductionCost = CostCtx.getLegacyCost(IVInst, VF);
5911 LLVM_DEBUG({
5912 dbgs() << "Cost of " << InductionCost << " for VF " << VF
5913 << ": induction instruction " << *IVInst << "\n";
5914 });
5915 Cost += InductionCost;
5916 CostCtx.SkipCostComputation.insert(IVInst);
5917 }
5918 }
5919
5920 /// Compute the cost of all exiting conditions of the loop using the legacy
5921 /// cost model. This is to match the legacy behavior, which adds the cost of
5922 /// all exit conditions. Note that this over-estimates the cost, as there will
5923 /// be a single condition to control the vector loop.
5925 CM.TheLoop->getExitingBlocks(Exiting);
5926 SetVector<Instruction *> ExitInstrs;
5927 // Collect all exit conditions.
5928 for (BasicBlock *EB : Exiting) {
5929 auto *Term = dyn_cast<CondBrInst>(EB->getTerminator());
5930 if (!Term || CostCtx.skipCostComputation(Term, VF.isVector()))
5931 continue;
5932 if (auto *CondI = dyn_cast<Instruction>(Term->getOperand(0))) {
5933 ExitInstrs.insert(CondI);
5934 }
5935 }
5936 // Compute the cost of all instructions only feeding the exit conditions.
5937 for (unsigned I = 0; I != ExitInstrs.size(); ++I) {
5938 Instruction *CondI = ExitInstrs[I];
5939 if (!OrigLoop->contains(CondI) ||
5940 !CostCtx.SkipCostComputation.insert(CondI).second)
5941 continue;
5942 InstructionCost CondICost = CostCtx.getLegacyCost(CondI, VF);
5943 LLVM_DEBUG({
5944 dbgs() << "Cost of " << CondICost << " for VF " << VF
5945 << ": exit condition instruction " << *CondI << "\n";
5946 });
5947 Cost += CondICost;
5948 for (Value *Op : CondI->operands()) {
5949 auto *OpI = dyn_cast<Instruction>(Op);
5950 if (!OpI || CostCtx.skipCostComputation(OpI, VF.isVector()) ||
5951 any_of(OpI->users(), [&ExitInstrs](User *U) {
5952 return !ExitInstrs.contains(cast<Instruction>(U));
5953 }))
5954 continue;
5955 ExitInstrs.insert(OpI);
5956 }
5957 }
5958
5959 // Pre-compute the costs for branches except for the backedge, as the number
5960 // of replicate regions in a VPlan may not directly match the number of
5961 // branches, which would lead to different decisions.
5962 // TODO: Compute cost of branches for each replicate region in the VPlan,
5963 // which is more accurate than the legacy cost model.
5964 for (BasicBlock *BB : OrigLoop->blocks()) {
5965 if (CostCtx.skipCostComputation(BB->getTerminator(), VF.isVector()))
5966 continue;
5967 CostCtx.SkipCostComputation.insert(BB->getTerminator());
5968 if (BB == OrigLoop->getLoopLatch())
5969 continue;
5970 auto BranchCost = CostCtx.getLegacyCost(BB->getTerminator(), VF);
5971 Cost += BranchCost;
5972 }
5973
5974 // Don't apply special costs when instruction cost is forced to make sure the
5975 // forced cost is used for each recipe.
5976 if (ForceTargetInstructionCost.getNumOccurrences())
5977 return Cost;
5978
5979 // Pre-compute costs for instructions that are forced-scalar or profitable to
5980 // scalarize. For most such instructions, their scalarization costs are
5981 // accounted for here using the legacy cost model. However, some opcodes
5982 // are excluded from these precomputed scalarization costs and are instead
5983 // modeled later by the VPlan cost model (see UseVPlanCostModel below).
5984 for (Instruction *ForcedScalar : CM.ForcedScalars[VF]) {
5985 if (CostCtx.skipCostComputation(ForcedScalar, VF.isVector()))
5986 continue;
5987 CostCtx.SkipCostComputation.insert(ForcedScalar);
5988 InstructionCost ForcedCost = CostCtx.getLegacyCost(ForcedScalar, VF);
5989 LLVM_DEBUG({
5990 dbgs() << "Cost of " << ForcedCost << " for VF " << VF
5991 << ": forced scalar " << *ForcedScalar << "\n";
5992 });
5993 Cost += ForcedCost;
5994 }
5995
5996 auto UseVPlanCostModel = [](Instruction *I) -> bool {
5997 switch (I->getOpcode()) {
5998 case Instruction::SDiv:
5999 case Instruction::UDiv:
6000 case Instruction::SRem:
6001 case Instruction::URem:
6002 return true;
6003 default:
6004 return false;
6005 }
6006 };
6007 for (const auto &[Scalarized, ScalarCost] : CM.InstsToScalarize[VF]) {
6008 if (UseVPlanCostModel(Scalarized) ||
6009 CostCtx.skipCostComputation(Scalarized, VF.isVector()))
6010 continue;
6011 CostCtx.SkipCostComputation.insert(Scalarized);
6012 LLVM_DEBUG({
6013 dbgs() << "Cost of " << ScalarCost << " for VF " << VF
6014 << ": profitable to scalarize " << *Scalarized << "\n";
6015 });
6016 Cost += ScalarCost;
6017 }
6018
6019 return Cost;
6020}
6021
6022InstructionCost LoopVectorizationPlanner::cost(VPlan &Plan, ElementCount VF,
6023 VPRegisterUsage *RU) const {
6024 VPCostContext CostCtx(CM.TTI, *CM.TLI, Plan, CM, Config.CostKind, PSE,
6025 OrigLoop);
6026 InstructionCost Cost = precomputeCosts(Plan, VF, CostCtx);
6027
6028 // Now compute and add the VPlan-based cost.
6029 Cost += Plan.cost(VF, CostCtx);
6030
6031 // Add the cost of spills due to excess register usage
6032 if (RU && Config.shouldConsiderRegPressureForVF(VF))
6033 Cost += RU->spillCost(CM.TTI, Config.CostKind, ForceTargetNumVectorRegs);
6034
6035#ifndef NDEBUG
6036 unsigned EstimatedWidth =
6037 estimateElementCount(VF, Config.getVScaleForTuning());
6038 LLVM_DEBUG(dbgs() << "Cost for VF " << VF << ": " << Cost
6039 << " (Estimated cost per lane: ");
6040 if (Cost.isValid()) {
6041 double CostPerLane = double(Cost.getValue()) / EstimatedWidth;
6042 LLVM_DEBUG(dbgs() << format("%.1f", CostPerLane));
6043 } else /* No point dividing an invalid cost - it will still be invalid */
6044 LLVM_DEBUG(dbgs() << "Invalid");
6045 LLVM_DEBUG(dbgs() << ")\n");
6046#endif
6047 return Cost;
6048}
6049
6050std::pair<VectorizationFactor, VPlan *>
6052 if (VPlans.empty())
6053 return {VectorizationFactor::Disabled(), nullptr};
6054 // If there is a single VPlan with a single VF, return it directly.
6055 VPlan &FirstPlan = *VPlans[0];
6056 ElementCount UserVF = Hints.getWidth();
6057 if (hasPlanWithVF(UserVF)) {
6058 if (VPlans.size() == 1) {
6059 assert(FirstPlan.getSingleVF() == UserVF &&
6060 "UserVF must match single VF");
6061 return {VectorizationFactor(FirstPlan.getSingleVF(), 0, 0), &FirstPlan};
6062 }
6064 assert(VPlans.size() == 2 && "Must have exactly 2 VPlans built");
6065 assert(VPlans[0]->getSingleVF() ==
6067 "expected first plan to be for the forced epilogue VF");
6068 assert(VPlans[1]->getSingleVF() == UserVF &&
6069 "expected second plan to be for the forced UserVF");
6070 return {VectorizationFactor(UserVF, 0, 0), VPlans[1].get()};
6071 }
6072 }
6073
6074 LLVM_DEBUG(dbgs() << "LV: Computing best VF using cost kind: "
6075 << (Config.CostKind == TTI::TCK_RecipThroughput
6076 ? "Reciprocal Throughput\n"
6077 : Config.CostKind == TTI::TCK_Latency
6078 ? "Instruction Latency\n"
6079 : Config.CostKind == TTI::TCK_CodeSize ? "Code Size\n"
6080 : Config.CostKind == TTI::TCK_SizeAndLatency
6081 ? "Code Size and Latency\n"
6082 : "Unknown\n"));
6083
6085 assert(FirstPlan.hasVF(ScalarVF) &&
6086 "More than a single plan/VF w/o any plan having scalar VF");
6087
6088 // TODO: Compute scalar cost using VPlan-based cost model.
6089 InstructionCost ScalarCost = CM.expectedCost(ScalarVF);
6090 LLVM_DEBUG(dbgs() << "LV: Scalar loop costs: " << ScalarCost << ".\n");
6091 VectorizationFactor ScalarFactor(ScalarVF, ScalarCost, ScalarCost);
6092 VectorizationFactor BestFactor = ScalarFactor;
6093
6094 bool ForceVectorization = Hints.getForce() == LoopVectorizeHints::FK_Enabled;
6095 if (ForceVectorization) {
6096 // Ignore scalar width, because the user explicitly wants vectorization.
6097 // Initialize cost to max so that VF = 2 is, at least, chosen during cost
6098 // evaluation.
6099 BestFactor.Cost = InstructionCost::getMax();
6100 }
6101
6102 VPlan *PlanForBestVF = &FirstPlan;
6103
6104 for (auto &P : VPlans) {
6105 ArrayRef<ElementCount> VFs(P->vectorFactors().begin(),
6106 P->vectorFactors().end());
6107
6109 bool ConsiderRegPressure = any_of(VFs, [this](ElementCount VF) {
6110 return Config.shouldConsiderRegPressureForVF(VF);
6111 });
6113 RUs = calculateRegisterUsageForPlan(*P, VFs, TTI, CM.ValuesToIgnore);
6114
6115 for (unsigned I = 0; I < VFs.size(); I++) {
6116 ElementCount VF = VFs[I];
6117 if (VF.isScalar())
6118 continue;
6119 if (!ForceVectorization && !willGenerateVectors(*P, VF, TTI)) {
6120 LLVM_DEBUG(
6121 dbgs()
6122 << "LV: Not considering vector loop of width " << VF
6123 << " because it will not generate any vector instructions.\n");
6124 continue;
6125 }
6126 if (Config.OptForSize && !ForceVectorization && hasReplicatorRegion(*P)) {
6127 LLVM_DEBUG(
6128 dbgs()
6129 << "LV: Not considering vector loop of width " << VF
6130 << " because it would cause replicated blocks to be generated,"
6131 << " which isn't allowed when optimizing for size.\n");
6132 continue;
6133 }
6134
6136 cost(*P, VF, ConsiderRegPressure ? &RUs[I] : nullptr);
6137 VectorizationFactor CurrentFactor(VF, Cost, ScalarCost);
6138
6139 if (isMoreProfitable(CurrentFactor, BestFactor, P->hasScalarTail())) {
6140 BestFactor = CurrentFactor;
6141 PlanForBestVF = P.get();
6142 }
6143
6144 // If profitable add it to ProfitableVF list.
6145 if (isMoreProfitable(CurrentFactor, ScalarFactor, P->hasScalarTail()))
6146 ProfitableVFs.push_back(CurrentFactor);
6147 }
6148 }
6149
6150 VPlan &BestPlan = *PlanForBestVF;
6151
6152 assert((BestFactor.Width.isScalar() || BestFactor.ScalarCost > 0) &&
6153 "when vectorizing, the scalar cost must be computed.");
6154
6155 LLVM_DEBUG(dbgs() << "LV: Selecting VF: " << BestFactor.Width << ".\n");
6156 return {BestFactor, &BestPlan};
6157}
6158
6160 ElementCount BestVF, unsigned BestUF, VPlan &BestVPlan,
6162 EpilogueVectorizationKind EpilogueVecKind) {
6163 assert(BestVPlan.hasVF(BestVF) &&
6164 "Trying to execute plan with unsupported VF");
6165 assert(BestVPlan.hasUF(BestUF) &&
6166 "Trying to execute plan with unsupported UF");
6167 if (BestVPlan.hasEarlyExit())
6168 ++LoopsEarlyExitVectorized;
6169
6171 BestVPlan, *PSE.getSE(), CM.TTI, Config.CostKind, BestVF, BestUF,
6172 CM.ValuesToIgnore);
6173 // TODO: Move to VPlan transform stage once the transition to the VPlan-based
6174 // cost model is complete for better cost estimates.
6175 RUN_VPLAN_PASS(VPlanTransforms::unrollByUF, BestVPlan, BestUF);
6179 bool HasBranchWeights =
6180 hasBranchWeightMD(*OrigLoop->getLoopLatch()->getTerminator());
6181 if (HasBranchWeights) {
6182 std::optional<unsigned> VScale = Config.getVScaleForTuning();
6184 BestVPlan, BestVF, VScale);
6185 }
6186
6187 // Retrieving VectorPH now when it's easier while VPlan still has Regions.
6188 VPBasicBlock *VectorPH = cast<VPBasicBlock>(BestVPlan.getVectorPreheader());
6189
6191 BestVF, BestUF, PSE);
6192 RUN_VPLAN_PASS(VPlanTransforms::optimizeForVFAndUF, BestVPlan, BestVF, BestUF,
6193 PSE);
6195 if (EpilogueVecKind == EpilogueVectorizationKind::None)
6197 /*OnlyLatches=*/false);
6198 if (BestVPlan.getEntry()->getSingleSuccessor() ==
6199 BestVPlan.getScalarPreheader()) {
6200 // TODO: The vector loop would be dead, should not even try to vectorize.
6201 ORE->emit([&]() {
6202 return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationDead",
6203 OrigLoop->getStartLoc(),
6204 OrigLoop->getHeader())
6205 << "Created vector loop never executes due to insufficient trip "
6206 "count.";
6207 });
6209 }
6210
6212
6214 // Convert the exit condition to AVLNext == 0 for EVL tail folded loops.
6216 // Regions are dissolved after optimizing for VF and UF, which completely
6217 // removes unneeded loop regions first.
6219 // Expand BranchOnTwoConds after dissolution, when latch has direct access to
6220 // its successors.
6222 // Convert loops with variable-length stepping after regions are dissolved.
6224 // Remove dead back-edges for single-iteration loops with BranchOnCond(true).
6225 // Only process loop latches to avoid removing edges from the middle block,
6226 // which may be needed for epilogue vectorization.
6227 VPlanTransforms::removeBranchOnConst(BestVPlan, /*OnlyLatches=*/true);
6229 std::optional<uint64_t> MaxRuntimeStep;
6230 if (auto MaxVScale = getMaxVScale(*CM.TheFunction, CM.TTI))
6231 MaxRuntimeStep = uint64_t(*MaxVScale) * BestVF.getKnownMinValue() * BestUF;
6233 BestVPlan, VectorPH, CM.foldTailByMasking(),
6234 CM.requiresScalarEpilogue(BestVF.isVector()), &BestVPlan.getVFxUF(),
6235 MaxRuntimeStep);
6236 VPlanTransforms::materializeFactors(BestVPlan, VectorPH, BestVF);
6237 VPlanTransforms::cse(BestVPlan);
6239 VPlanTransforms::simplifyKnownEVL(BestVPlan, BestVF, PSE);
6240
6241 // 0. Generate SCEV-dependent code in the entry, including TripCount, before
6242 // making any changes to the CFG.
6243 DenseMap<const SCEV *, Value *> ExpandedSCEVs =
6244 VPlanTransforms::expandSCEVs(BestVPlan, *PSE.getSE());
6245
6246 // Perform the actual loop transformation.
6247 VPTransformState State(&TTI, BestVF, LI, DT, ILV.AC, ILV.Builder, &BestVPlan,
6248 OrigLoop->getParentLoop(),
6249 Legal->getWidestInductionType());
6250
6251#ifdef EXPENSIVE_CHECKS
6252 assert(DT->verify(DominatorTree::VerificationLevel::Fast));
6253#endif
6254
6255 // 1. Set up the skeleton for vectorization, including vector pre-header and
6256 // middle block. The vector loop is created during VPlan execution.
6257 State.CFG.PrevBB = ILV.createVectorizedLoopSkeleton();
6258 if (VPBasicBlock *ScalarPH = BestVPlan.getScalarPreheader())
6259 replaceVPBBWithIRVPBB(ScalarPH, State.CFG.PrevBB->getSingleSuccessor(),
6260 &BestVPlan);
6262
6263 assert(verifyVPlanIsValid(BestVPlan) && "final VPlan is invalid");
6264
6265 // After vectorization, the exit blocks of the original loop will have
6266 // additional predecessors. Invalidate SCEVs for the exit phis in case SE
6267 // looked through single-entry phis.
6268 ScalarEvolution &SE = *PSE.getSE();
6269 for (VPIRBasicBlock *Exit : BestVPlan.getExitBlocks()) {
6270 if (!Exit->hasPredecessors())
6271 continue;
6272 for (VPRecipeBase &PhiR : Exit->phis())
6274 &cast<VPIRPhi>(PhiR).getIRPhi());
6275 }
6276 // Forget the original loop and block dispositions.
6277 SE.forgetLoop(OrigLoop);
6279
6281
6282 //===------------------------------------------------===//
6283 //
6284 // Notice: any optimization or new instruction that go
6285 // into the code below should also be implemented in
6286 // the cost-model.
6287 //
6288 //===------------------------------------------------===//
6289
6290 // Retrieve loop information before executing the plan, which may remove the
6291 // original loop, if it becomes unreachable.
6292 MDNode *LID = OrigLoop->getLoopID();
6293 unsigned OrigLoopInvocationWeight = 0;
6294 std::optional<unsigned> OrigAverageTripCount =
6295 getLoopEstimatedTripCount(OrigLoop, &OrigLoopInvocationWeight);
6296
6297 BestVPlan.execute(&State);
6298
6299 // 2.6. Maintain Loop Hints
6300 // Keep all loop hints from the original loop on the vector loop (we'll
6301 // replace the vectorizer-specific hints below).
6302 VPBasicBlock *HeaderVPBB = vputils::getFirstLoopHeader(BestVPlan, State.VPDT);
6303 // Add metadata to disable runtime unrolling a scalar loop when there
6304 // are no runtime checks about strides and memory. A scalar loop that is
6305 // rarely used is not worth unrolling.
6306 bool DisableRuntimeUnroll = !ILV.RTChecks.hasChecks() && !BestVF.isScalar();
6308 HeaderVPBB ? LI->getLoopFor(State.CFG.VPBB2IRBB.lookup(HeaderVPBB))
6309 : nullptr,
6310 HeaderVPBB, BestVPlan,
6311 EpilogueVecKind == EpilogueVectorizationKind::Epilogue, LID,
6312 OrigAverageTripCount, OrigLoopInvocationWeight,
6313 estimateElementCount(BestVF * BestUF, Config.getVScaleForTuning()),
6314 DisableRuntimeUnroll);
6315
6316 // 3. Fix the vectorized code: take care of header phi's, live-outs,
6317 // predication, updating analyses.
6318 ILV.fixVectorizedLoop(State);
6319
6321
6322 return ExpandedSCEVs;
6323}
6324
6325//===--------------------------------------------------------------------===//
6326// EpilogueVectorizerMainLoop
6327//===--------------------------------------------------------------------===//
6328
6330 LLVM_DEBUG({
6331 dbgs() << "Create Skeleton for epilogue vectorized loop (first pass)\n"
6332 << "Main Loop VF:" << EPI.MainLoopVF
6333 << ", Main Loop UF:" << EPI.MainLoopUF
6334 << ", Epilogue Loop VF:" << EPI.EpilogueVF
6335 << ", Epilogue Loop UF:" << EPI.EpilogueUF << "\n";
6336 });
6337}
6338
6341 dbgs() << "intermediate fn:\n"
6342 << *OrigLoop->getHeader()->getParent() << "\n";
6343 });
6344}
6345
6346//===--------------------------------------------------------------------===//
6347// EpilogueVectorizerEpilogueLoop
6348//===--------------------------------------------------------------------===//
6349
6350/// This function creates a new scalar preheader, using the previous one as
6351/// entry block to the epilogue VPlan. The minimum iteration check is being
6352/// represented in VPlan.
6354 BasicBlock *NewScalarPH = createScalarPreheader("vec.epilog.");
6355 BasicBlock *OriginalScalarPH = NewScalarPH->getSinglePredecessor();
6356 OriginalScalarPH->setName("vec.epilog.iter.check");
6357 VPIRBasicBlock *NewEntry = Plan.createVPIRBasicBlock(OriginalScalarPH);
6358 VPBasicBlock *OldEntry = Plan.getEntry();
6359 for (auto &R : make_early_inc_range(*OldEntry)) {
6360 // Skip moving VPIRInstructions (including VPIRPhis), which are unmovable by
6361 // defining.
6362 if (isa<VPIRInstruction>(&R))
6363 continue;
6364 R.moveBefore(*NewEntry, NewEntry->end());
6365 }
6366
6367 VPBlockUtils::reassociateBlocks(OldEntry, NewEntry);
6368 Plan.setEntry(NewEntry);
6369 // OldEntry is now dead and will be cleaned up when the plan gets destroyed.
6370
6371 return OriginalScalarPH;
6372}
6373
6375 LLVM_DEBUG({
6376 dbgs() << "Create Skeleton for epilogue vectorized loop (second pass)\n"
6377 << "Epilogue Loop VF:" << EPI.EpilogueVF
6378 << ", Epilogue Loop UF:" << EPI.EpilogueUF << "\n";
6379 });
6380}
6381
6384 dbgs() << "final fn:\n" << *OrigLoop->getHeader()->getParent() << "\n";
6385 });
6386}
6387
6389 VFRange &Range) {
6390 assert((VPI->getOpcode() == Instruction::Load ||
6391 VPI->getOpcode() == Instruction::Store) &&
6392 "Must be called with either a load or store");
6394
6395 auto WillWiden = [&](ElementCount VF) -> bool {
6397 CM.getWideningDecision(I, VF);
6399 "CM decision should be taken at this point.");
6401 return true;
6402 if (CM.isScalarAfterVectorization(I, VF) ||
6403 CM.isProfitableToScalarize(I, VF))
6404 return false;
6406 };
6407
6409 return nullptr;
6410
6411 // If a mask is not required, drop it - use unmasked version for safe loads.
6412 // TODO: Determine if mask is needed in VPlan.
6413 VPValue *Mask = CM.isMaskRequired(I) ? VPI->getMask() : nullptr;
6414
6415 // Determine if the pointer operand of the access is either consecutive or
6416 // reverse consecutive.
6418 CM.getWideningDecision(I, Range.Start);
6420 bool Consecutive =
6422
6423 VPValue *Ptr = VPI->getOpcode() == Instruction::Load ? VPI->getOperand(0)
6424 : VPI->getOperand(1);
6425 if (Consecutive) {
6428 VPSingleDefRecipe *VectorPtr;
6429 if (Reverse) {
6430 // When folding the tail, we may compute an address that we don't in the
6431 // original scalar loop: drop the GEP no-wrap flags in this case.
6432 // Otherwise preserve existing flags without no-unsigned-wrap, as we will
6433 // emit negative indices.
6434 GEPNoWrapFlags Flags =
6435 CM.foldTailByMasking() || !GEP
6437 : GEP->getNoWrapFlags().withoutNoUnsignedWrap();
6438 VectorPtr = new VPVectorEndPointerRecipe(
6439 Ptr, &Plan.getVF(), getLoadStoreType(I),
6440 /*Stride*/ -1, Flags, VPI->getDebugLoc());
6441 } else {
6442 VectorPtr = new VPVectorPointerRecipe(Ptr, getLoadStoreType(I),
6443 GEP ? GEP->getNoWrapFlags()
6445 VPI->getDebugLoc());
6446 }
6447 Builder.setInsertPoint(VPI);
6448 Builder.insert(VectorPtr);
6449 Ptr = VectorPtr;
6450 }
6451
6452 if (Reverse && Mask)
6453 Mask = Builder.createNaryOp(VPInstruction::Reverse, Mask, I->getDebugLoc());
6454
6455 if (VPI->getOpcode() == Instruction::Load) {
6456 auto *Load = cast<LoadInst>(I);
6457 auto *LoadR = new VPWidenLoadRecipe(*Load, Ptr, Mask, Consecutive, *VPI,
6458 Load->getDebugLoc());
6459 if (Reverse) {
6460 Builder.insert(LoadR);
6461 return new VPInstruction(VPInstruction::Reverse, LoadR, {}, {},
6462 LoadR->getDebugLoc());
6463 }
6464 return LoadR;
6465 }
6466
6467 StoreInst *Store = cast<StoreInst>(I);
6468 VPValue *StoredVal = VPI->getOperand(0);
6469 if (Reverse)
6470 StoredVal = Builder.createNaryOp(VPInstruction::Reverse, StoredVal,
6471 Store->getDebugLoc());
6472 return new VPWidenStoreRecipe(*Store, Ptr, StoredVal, Mask, Consecutive, *VPI,
6473 Store->getDebugLoc());
6474}
6475
6477VPRecipeBuilder::tryToOptimizeInductionTruncate(VPInstruction *VPI,
6478 VFRange &Range) {
6479 auto *I = cast<TruncInst>(VPI->getUnderlyingInstr());
6480 // Optimize the special case where the source is a constant integer
6481 // induction variable. Notice that we can only optimize the 'trunc' case
6482 // because (a) FP conversions lose precision, (b) sext/zext may wrap, and
6483 // (c) other casts depend on pointer size.
6484
6485 // Determine whether \p K is a truncation based on an induction variable that
6486 // can be optimized.
6489 I),
6490 Range))
6491 return nullptr;
6492
6494 VPI->getOperand(0)->getDefiningRecipe());
6495 PHINode *Phi = WidenIV->getPHINode();
6496 VPIRValue *Start = WidenIV->getStartValue();
6497 const InductionDescriptor &IndDesc = WidenIV->getInductionDescriptor();
6498
6499 // Wrap flags from the original induction do not apply to the truncated type,
6500 // so do not propagate them.
6501 VPIRFlags Flags = VPIRFlags::WrapFlagsTy(false, false);
6502 VPValue *Step =
6505 Phi, Start, Step, &Plan.getVF(), IndDesc, I, Flags, VPI->getDebugLoc());
6506}
6507
6508VPSingleDefRecipe *VPRecipeBuilder::tryToWidenCall(VPInstruction *VPI,
6509 VFRange &Range) {
6512 [this, CI](ElementCount VF) {
6513 return CM.isScalarWithPredication(CI, VF);
6514 },
6515 Range);
6516
6517 if (IsPredicated)
6518 return nullptr;
6519
6521 if (ID && (ID == Intrinsic::assume || ID == Intrinsic::lifetime_end ||
6522 ID == Intrinsic::lifetime_start || ID == Intrinsic::sideeffect ||
6523 ID == Intrinsic::pseudoprobe ||
6524 ID == Intrinsic::experimental_noalias_scope_decl))
6525 return nullptr;
6526
6528 VPI->op_begin() + CI->arg_size());
6529
6530 // Is it beneficial to perform intrinsic call compared to lib call?
6531 bool ShouldUseVectorIntrinsic =
6533 [&](ElementCount VF) -> bool {
6534 return CM.getCallWideningDecision(CI, VF).Kind ==
6536 },
6537 Range);
6538 if (ShouldUseVectorIntrinsic)
6539 return new VPWidenIntrinsicRecipe(*CI, ID, Ops, CI->getType(), *VPI, *VPI,
6540 VPI->getDebugLoc());
6541
6542 Function *Variant = nullptr;
6543 std::optional<unsigned> MaskPos;
6544 // Is better to call a vectorized version of the function than to to scalarize
6545 // the call?
6546 auto ShouldUseVectorCall = LoopVectorizationPlanner::getDecisionAndClampRange(
6547 [&](ElementCount VF) -> bool {
6548 // The following case may be scalarized depending on the VF.
6549 // The flag shows whether we can use a usual Call for vectorized
6550 // version of the instruction.
6551
6552 // If we've found a variant at a previous VF, then stop looking. A
6553 // vectorized variant of a function expects input in a certain shape
6554 // -- basically the number of input registers, the number of lanes
6555 // per register, and whether there's a mask required.
6556 // We store a pointer to the variant in the VPWidenCallRecipe, so
6557 // once we have an appropriate variant it's only valid for that VF.
6558 // This will force a different vplan to be generated for each VF that
6559 // finds a valid variant.
6560 if (Variant)
6561 return false;
6562 LoopVectorizationCostModel::CallWideningDecision Decision =
6563 CM.getCallWideningDecision(CI, VF);
6565 Variant = Decision.Variant;
6566 MaskPos = Decision.MaskPos;
6567 return true;
6568 }
6569
6570 return false;
6571 },
6572 Range);
6573 if (ShouldUseVectorCall) {
6574 if (MaskPos.has_value()) {
6575 // We have 2 cases that would require a mask:
6576 // 1) The call needs to be predicated, either due to a conditional
6577 // in the scalar loop or use of an active lane mask with
6578 // tail-folding, and we use the appropriate mask for the block.
6579 // 2) No mask is required for the call instruction, but the only
6580 // available vector variant at this VF requires a mask, so we
6581 // synthesize an all-true mask.
6582 VPValue *Mask = VPI->isMasked() ? VPI->getMask() : Plan.getTrue();
6583
6584 Ops.insert(Ops.begin() + *MaskPos, Mask);
6585 }
6586
6587 Ops.push_back(VPI->getOperand(VPI->getNumOperandsWithoutMask() - 1));
6588 return new VPWidenCallRecipe(CI, Variant, Ops, *VPI, *VPI,
6589 VPI->getDebugLoc());
6590 }
6591
6592 return nullptr;
6593}
6594
6595bool VPRecipeBuilder::shouldWiden(Instruction *I, VFRange &Range) const {
6597 "Instruction should have been handled earlier");
6598 // Instruction should be widened, unless it is scalar after vectorization,
6599 // scalarization is profitable or it is predicated.
6600 auto WillScalarize = [this, I](ElementCount VF) -> bool {
6601 return CM.isScalarAfterVectorization(I, VF) ||
6602 CM.isProfitableToScalarize(I, VF) ||
6603 CM.isScalarWithPredication(I, VF);
6604 };
6606 Range);
6607}
6608
6609VPWidenRecipe *VPRecipeBuilder::tryToWiden(VPInstruction *VPI) {
6610 auto *I = VPI->getUnderlyingInstr();
6611 switch (VPI->getOpcode()) {
6612 default:
6613 return nullptr;
6614 case Instruction::SDiv:
6615 case Instruction::UDiv:
6616 case Instruction::SRem:
6617 case Instruction::URem: {
6618 // If not provably safe, use a select to form a safe divisor before widening the
6619 // div/rem operation itself. Otherwise fall through to general handling below.
6620 if (CM.isPredicatedInst(I)) {
6622 VPValue *Mask = VPI->getMask();
6623 VPValue *One = Plan.getConstantInt(I->getType(), 1u);
6624 auto *SafeRHS =
6625 Builder.createSelect(Mask, Ops[1], One, VPI->getDebugLoc());
6626 Ops[1] = SafeRHS;
6627 return new VPWidenRecipe(*I, Ops, *VPI, *VPI, VPI->getDebugLoc());
6628 }
6629 [[fallthrough]];
6630 }
6631 case Instruction::Add:
6632 case Instruction::And:
6633 case Instruction::AShr:
6634 case Instruction::FAdd:
6635 case Instruction::FCmp:
6636 case Instruction::FDiv:
6637 case Instruction::FMul:
6638 case Instruction::FNeg:
6639 case Instruction::FRem:
6640 case Instruction::FSub:
6641 case Instruction::ICmp:
6642 case Instruction::LShr:
6643 case Instruction::Mul:
6644 case Instruction::Or:
6645 case Instruction::Select:
6646 case Instruction::Shl:
6647 case Instruction::Sub:
6648 case Instruction::Xor:
6649 case Instruction::Freeze:
6650 return new VPWidenRecipe(*I, VPI->operandsWithoutMask(), *VPI, *VPI,
6651 VPI->getDebugLoc());
6652 case Instruction::ExtractValue: {
6654 auto *EVI = cast<ExtractValueInst>(I);
6655 assert(EVI->getNumIndices() == 1 && "Expected one extractvalue index");
6656 unsigned Idx = EVI->getIndices()[0];
6657 NewOps.push_back(Plan.getConstantInt(32, Idx));
6658 return new VPWidenRecipe(*I, NewOps, *VPI, *VPI, VPI->getDebugLoc());
6659 }
6660 };
6661}
6662
6664 if (VPI->getOpcode() != Instruction::Store)
6665 return nullptr;
6666
6667 auto HistInfo =
6668 Legal->getHistogramInfo(cast<StoreInst>(VPI->getUnderlyingInstr()));
6669 if (!HistInfo)
6670 return nullptr;
6671
6672 const HistogramInfo *HI = *HistInfo;
6673 // FIXME: Support other operations.
6674 unsigned Opcode = HI->Update->getOpcode();
6675 assert((Opcode == Instruction::Add || Opcode == Instruction::Sub) &&
6676 "Histogram update operation must be an Add or Sub");
6677
6679 // Bucket address.
6680 HGramOps.push_back(VPI->getOperand(1));
6681 // Increment value.
6682 HGramOps.push_back(Plan.getOrAddLiveIn(HI->Update->getOperand(1)));
6683
6684 // In case of predicated execution (due to tail-folding, or conditional
6685 // execution, or both), pass the relevant mask.
6686 if (CM.isMaskRequired(HI->Store))
6687 HGramOps.push_back(VPI->getMask());
6688
6689 return new VPHistogramRecipe(Opcode, HGramOps, VPI->getDebugLoc());
6690}
6691
6693 VPInstruction *VPI, VPBuilder &FinalRedStoresBuilder) {
6694 StoreInst *SI;
6695 if ((SI = dyn_cast<StoreInst>(VPI->getUnderlyingInstr())) &&
6696 Legal->isInvariantAddressOfReduction(SI->getPointerOperand())) {
6697 // Only create recipe for the final invariant store of the reduction.
6698 if (Legal->isInvariantStoreOfReduction(SI)) {
6699 auto *Recipe = new VPReplicateRecipe(
6700 SI, VPI->operandsWithoutMask(), true /* IsUniform */,
6701 nullptr /*Mask*/, *VPI, *VPI, VPI->getDebugLoc());
6702 FinalRedStoresBuilder.insert(Recipe);
6703 }
6704 VPI->eraseFromParent();
6705 return true;
6706 }
6707
6708 return false;
6709}
6710
6712 VFRange &Range) {
6713 auto *I = VPI->getUnderlyingInstr();
6715 [&](ElementCount VF) { return CM.isUniformAfterVectorization(I, VF); },
6716 Range);
6717
6718 bool IsPredicated = CM.isPredicatedInst(I);
6719
6720 // Even if the instruction is not marked as uniform, there are certain
6721 // intrinsic calls that can be effectively treated as such, so we check for
6722 // them here. Conservatively, we only do this for scalable vectors, since
6723 // for fixed-width VFs we can always fall back on full scalarization.
6724 if (!IsUniform && Range.Start.isScalable() && isa<IntrinsicInst>(I)) {
6725 switch (cast<IntrinsicInst>(I)->getIntrinsicID()) {
6726 case Intrinsic::assume:
6727 case Intrinsic::lifetime_start:
6728 case Intrinsic::lifetime_end:
6729 // For scalable vectors if one of the operands is variant then we still
6730 // want to mark as uniform, which will generate one instruction for just
6731 // the first lane of the vector. We can't scalarize the call in the same
6732 // way as for fixed-width vectors because we don't know how many lanes
6733 // there are.
6734 //
6735 // The reasons for doing it this way for scalable vectors are:
6736 // 1. For the assume intrinsic generating the instruction for the first
6737 // lane is still be better than not generating any at all. For
6738 // example, the input may be a splat across all lanes.
6739 // 2. For the lifetime start/end intrinsics the pointer operand only
6740 // does anything useful when the input comes from a stack object,
6741 // which suggests it should always be uniform. For non-stack objects
6742 // the effect is to poison the object, which still allows us to
6743 // remove the call.
6744 IsUniform = true;
6745 break;
6746 default:
6747 break;
6748 }
6749 }
6750 VPValue *BlockInMask = nullptr;
6751 if (!IsPredicated) {
6752 // Finalize the recipe for Instr, first if it is not predicated.
6753 LLVM_DEBUG(dbgs() << "LV: Scalarizing:" << *I << "\n");
6754 } else {
6755 LLVM_DEBUG(dbgs() << "LV: Scalarizing and predicating:" << *I << "\n");
6756 // Instructions marked for predication are replicated and a mask operand is
6757 // added initially. Masked replicate recipes will later be placed under an
6758 // if-then construct to prevent side-effects. Generate recipes to compute
6759 // the block mask for this region.
6760 BlockInMask = VPI->getMask();
6761 }
6762
6763 // Note that there is some custom logic to mark some intrinsics as uniform
6764 // manually above for scalable vectors, which this assert needs to account for
6765 // as well.
6766 assert((Range.Start.isScalar() || !IsUniform || !IsPredicated ||
6767 (Range.Start.isScalable() && isa<IntrinsicInst>(I))) &&
6768 "Should not predicate a uniform recipe");
6769 auto *Recipe =
6770 new VPReplicateRecipe(I, VPI->operandsWithoutMask(), IsUniform,
6771 BlockInMask, *VPI, *VPI, VPI->getDebugLoc());
6772 return Recipe;
6773}
6774
6777 VFRange &Range) {
6778 assert(!R->isPhi() && "phis must be handled earlier");
6779 // First, check for specific widening recipes that deal with optimizing
6780 // truncates, calls and memory operations.
6781
6782 VPRecipeBase *Recipe;
6783 auto *VPI = cast<VPInstruction>(R);
6784 if (VPI->getOpcode() == Instruction::Trunc &&
6785 (Recipe = tryToOptimizeInductionTruncate(VPI, Range)))
6786 return Recipe;
6787
6788 // All widen recipes below deal only with VF > 1.
6790 [&](ElementCount VF) { return VF.isScalar(); }, Range))
6791 return nullptr;
6792
6793 if (VPI->getOpcode() == Instruction::Call)
6794 return tryToWidenCall(VPI, Range);
6795
6796 Instruction *Instr = R->getUnderlyingInstr();
6797 assert(!is_contained({Instruction::Load, Instruction::Store},
6798 VPI->getOpcode()) &&
6799 "Should have been handled prior to this!");
6800
6801 if (!shouldWiden(Instr, Range))
6802 return nullptr;
6803
6804 if (VPI->getOpcode() == Instruction::GetElementPtr)
6805 return new VPWidenGEPRecipe(cast<GetElementPtrInst>(Instr),
6806 VPI->operandsWithoutMask(), *VPI,
6807 VPI->getDebugLoc());
6808
6809 if (Instruction::isCast(VPI->getOpcode())) {
6810 auto *CI = cast<CastInst>(Instr);
6811 auto *CastR = cast<VPInstructionWithType>(VPI);
6812 return new VPWidenCastRecipe(CI->getOpcode(), VPI->getOperand(0),
6813 CastR->getResultType(), CI, *VPI, *VPI,
6814 VPI->getDebugLoc());
6815 }
6816
6817 return tryToWiden(VPI);
6818}
6819
6820// To allow RUN_VPLAN_PASS to print the VPlan after VF/UF independent
6821// optimizations.
6823
6824void LoopVectorizationPlanner::buildVPlansWithVPRecipes(ElementCount MinVF,
6825 ElementCount MaxVF) {
6826 if (ElementCount::isKnownGT(MinVF, MaxVF))
6827 return;
6828
6829 assert(OrigLoop->isInnermost() && "Inner loop expected.");
6830
6831 const LoopAccessInfo *LAI = Legal->getLAI();
6832 LoopVersioning LVer(*LAI, LAI->getRuntimePointerChecking()->getChecks(),
6833 OrigLoop, LI, DT, PSE.getSE());
6834 if (!LAI->getRuntimePointerChecking()->getChecks().empty() &&
6836 // Only use noalias metadata when using memory checks guaranteeing no
6837 // overlap across all iterations.
6838 LVer.prepareNoAliasMetadata();
6839 }
6840
6841 // Create initial base VPlan0, to serve as common starting point for all
6842 // candidates built later for specific VF ranges.
6843 auto VPlan0 = VPlanTransforms::buildVPlan0(
6844 OrigLoop, *LI, Legal->getWidestInductionType(),
6845 getDebugLocFromInstOrOperands(Legal->getPrimaryInduction()), PSE, &LVer);
6846
6847 // Create recipes for header phis.
6849 *OrigLoop, Legal->getInductionVars(),
6850 Legal->getReductionVars(),
6851 Legal->getFixedOrderRecurrences(),
6852 Config.getInLoopReductions(), Hints.allowReordering()))
6853 return;
6854
6857 // If we're vectorizing a loop with an uncountable exit, make sure that the
6858 // recipes are safe to handle.
6859 // TODO: Remove this once we can properly check the VPlan itself for both
6860 // the presence of an uncountable exit and the presence of stores in
6861 // the loop inside handleEarlyExits itself.
6863 if (Legal->hasUncountableEarlyExit())
6864 EEStyle = Legal->hasUncountableExitWithSideEffects()
6867
6869 OrigLoop, PSE, *DT, Legal->getAssumptionCache()))
6870 return;
6871
6873 CM.foldTailByMasking());
6875 if (CM.foldTailByMasking())
6878
6879 auto MaxVFTimes2 = MaxVF * 2;
6880 for (ElementCount VF = MinVF; ElementCount::isKnownLT(VF, MaxVFTimes2);) {
6881 VFRange SubRange = {VF, MaxVFTimes2};
6882 auto Plan = tryToBuildVPlanWithVPRecipes(
6883 std::unique_ptr<VPlan>(VPlan0->duplicate()), SubRange);
6884 VF = SubRange.End;
6885
6886 if (!Plan)
6887 continue;
6888
6889 // Now optimize the initial VPlan.
6893 Config.getMinimalBitwidths());
6895 // TODO: try to put addExplicitVectorLength close to addActiveLaneMask
6896 if (CM.foldTailWithEVL()) {
6898 Config.getMaxSafeElements());
6900 }
6901
6902 if (auto P = VPlanTransforms::narrowInterleaveGroups(*Plan, TTI))
6903 VPlans.push_back(std::move(P));
6904
6906 assert(verifyVPlanIsValid(*Plan) && "VPlan is invalid");
6907 VPlans.push_back(std::move(Plan));
6908 }
6909}
6910
6912LoopVectorizationPlanner::tryToBuildVPlanWithVPRecipes(VPlanPtr Plan,
6913 VFRange &Range) {
6914
6915 using namespace llvm::VPlanPatternMatch;
6916 SmallPtrSet<const InterleaveGroup<Instruction> *, 1> InterleaveGroups;
6917
6918 // ---------------------------------------------------------------------------
6919 // Build initial VPlan: Scan the body of the loop in a topological order to
6920 // visit each basic block after having visited its predecessor basic blocks.
6921 // ---------------------------------------------------------------------------
6922
6923 bool RequiresScalarEpilogueCheck =
6925 [this](ElementCount VF) {
6926 return !CM.requiresScalarEpilogue(VF.isVector());
6927 },
6928 Range);
6929 // Update the branch in the middle block if a scalar epilogue is required.
6930 VPBasicBlock *MiddleVPBB = Plan->getMiddleBlock();
6931 if (!RequiresScalarEpilogueCheck && MiddleVPBB->getNumSuccessors() == 2) {
6932 auto *BranchOnCond = cast<VPInstruction>(MiddleVPBB->getTerminator());
6933 assert(MiddleVPBB->getSuccessors()[1] == Plan->getScalarPreheader() &&
6934 "second successor must be scalar preheader");
6935 BranchOnCond->setOperand(0, Plan->getFalse());
6936 }
6937
6938 // Don't use getDecisionAndClampRange here, because we don't know the UF
6939 // so this function is better to be conservative, rather than to split
6940 // it up into different VPlans.
6941 // TODO: Consider using getDecisionAndClampRange here to split up VPlans.
6942 bool IVUpdateMayOverflow = false;
6943 for (ElementCount VF : Range)
6944 IVUpdateMayOverflow |= !isIndvarOverflowCheckKnownFalse(&CM, VF);
6945
6946 TailFoldingStyle Style = CM.getTailFoldingStyle();
6947 // Use NUW for the induction increment if we proved that it won't overflow in
6948 // the vector loop or when not folding the tail. In the later case, we know
6949 // that the canonical induction increment will not overflow as the vector trip
6950 // count is >= increment and a multiple of the increment.
6951 VPRegionBlock *LoopRegion = Plan->getVectorLoopRegion();
6952 bool HasNUW = !IVUpdateMayOverflow || Style == TailFoldingStyle::None;
6953 if (!HasNUW) {
6954 auto *IVInc =
6955 LoopRegion->getExitingBasicBlock()->getTerminator()->getOperand(0);
6956 assert(match(IVInc,
6957 m_VPInstruction<Instruction::Add>(
6958 m_Specific(LoopRegion->getCanonicalIV()), m_VPValue())) &&
6959 "Did not find the canonical IV increment");
6960 LoopRegion->clearCanonicalIVNUW(cast<VPInstruction>(IVInc));
6961 }
6962
6963 // ---------------------------------------------------------------------------
6964 // Pre-construction: record ingredients whose recipes we'll need to further
6965 // process after constructing the initial VPlan.
6966 // ---------------------------------------------------------------------------
6967
6968 // For each interleave group which is relevant for this (possibly trimmed)
6969 // Range, add it to the set of groups to be later applied to the VPlan and add
6970 // placeholders for its members' Recipes which we'll be replacing with a
6971 // single VPInterleaveRecipe.
6972 for (InterleaveGroup<Instruction> *IG : IAI.getInterleaveGroups()) {
6973 auto ApplyIG = [IG, this](ElementCount VF) -> bool {
6974 bool Result = (VF.isVector() && // Query is illegal for VF == 1
6975 CM.getWideningDecision(IG->getInsertPos(), VF) ==
6977 // For scalable vectors, the interleave factors must be <= 8 since we
6978 // require the (de)interleaveN intrinsics instead of shufflevectors.
6979 assert((!Result || !VF.isScalable() || IG->getFactor() <= 8) &&
6980 "Unsupported interleave factor for scalable vectors");
6981 return Result;
6982 };
6983 if (!getDecisionAndClampRange(ApplyIG, Range))
6984 continue;
6985 InterleaveGroups.insert(IG);
6986 }
6987
6988 // ---------------------------------------------------------------------------
6989 // Construct wide recipes and apply predication for original scalar
6990 // VPInstructions in the loop.
6991 // ---------------------------------------------------------------------------
6992 VPRecipeBuilder RecipeBuilder(*Plan, TLI, Legal, CM, Builder);
6993
6994 // Scan the body of the loop in a topological order to visit each basic block
6995 // after having visited its predecessor basic blocks.
6996 VPBasicBlock *HeaderVPBB = LoopRegion->getEntryBasicBlock();
6997 ReversePostOrderTraversal<VPBlockShallowTraversalWrapper<VPBlockBase *>> RPOT(
6998 HeaderVPBB);
6999
7001 Range.Start);
7002
7003 VPCostContext CostCtx(CM.TTI, *CM.TLI, *Plan, CM, Config.CostKind, CM.PSE,
7004 OrigLoop);
7005
7007 Range, RecipeBuilder);
7008
7009 // Now process all other blocks and instructions.
7010 for (VPBasicBlock *VPBB : VPBlockUtils::blocksOnly<VPBasicBlock>(RPOT)) {
7011 // Convert input VPInstructions to widened recipes.
7012 for (VPRecipeBase &R : make_early_inc_range(
7013 make_range(VPBB->getFirstNonPhi(), VPBB->end()))) {
7014 // Skip recipes that do not need transforming or have already been
7015 // transformed.
7016 if (isa<VPWidenCanonicalIVRecipe, VPBlendRecipe, VPReductionRecipe,
7017 VPReplicateRecipe, VPWidenLoadRecipe, VPWidenStoreRecipe,
7018 VPVectorPointerRecipe, VPVectorEndPointerRecipe,
7019 VPHistogramRecipe>(&R))
7020 continue;
7021 auto *VPI = cast<VPInstruction>(&R);
7022 if (!VPI->getUnderlyingValue())
7023 continue;
7024
7025 // TODO: Gradually replace uses of underlying instruction by analyses on
7026 // VPlan. Migrate code relying on the underlying instruction from VPlan0
7027 // to construct recipes below to not use the underlying instruction.
7029 Builder.setInsertPoint(VPI);
7030
7031 VPRecipeBase *Recipe =
7032 RecipeBuilder.tryToCreateWidenNonPhiRecipe(VPI, Range);
7033 if (!Recipe)
7034 Recipe =
7035 RecipeBuilder.handleReplication(cast<VPInstruction>(VPI), Range);
7036
7037 if (isa<VPWidenIntOrFpInductionRecipe>(Recipe) && isa<TruncInst>(Instr)) {
7038 // Optimized a truncate to VPWidenIntOrFpInductionRecipe. It needs to be
7039 // moved to the phi section in the header.
7040 Recipe->insertBefore(*HeaderVPBB, HeaderVPBB->getFirstNonPhi());
7041 } else {
7042 Builder.insert(Recipe);
7043 }
7044 if (Recipe->getNumDefinedValues() == 1) {
7045 VPI->replaceAllUsesWith(Recipe->getVPSingleValue());
7046 } else {
7047 assert(Recipe->getNumDefinedValues() == 0 &&
7048 "Unexpected multidef recipe");
7049 }
7050 R.eraseFromParent();
7051 }
7052 }
7053
7054 assert(isa<VPRegionBlock>(LoopRegion) &&
7055 !LoopRegion->getEntryBasicBlock()->empty() &&
7056 "entry block must be set to a VPRegionBlock having a non-empty entry "
7057 "VPBasicBlock");
7058
7059 // TODO: We can't call runPass on these transforms yet, due to verifier
7060 // failures.
7062 Range);
7063
7064 // ---------------------------------------------------------------------------
7065 // Transform initial VPlan: Apply previously taken decisions, in order, to
7066 // bring the VPlan to its final state.
7067 // ---------------------------------------------------------------------------
7068
7069 addReductionResultComputation(Plan, RecipeBuilder, Range.Start);
7070
7071 // Optimize FindIV reductions to use sentinel-based approach when possible.
7073 *OrigLoop);
7075 CM.foldTailByMasking());
7076
7077 // Apply mandatory transformation to handle reductions with multiple in-loop
7078 // uses if possible, bail out otherwise.
7080 OrigLoop))
7081 return nullptr;
7082 // Apply mandatory transformation to handle FP maxnum/minnum reduction with
7083 // NaNs if possible, bail out otherwise.
7085 return nullptr;
7086
7087 // Create whole-vector selects for find-last recurrences.
7089 return nullptr;
7090
7092
7093 // Create partial reduction recipes for scaled reductions and transform
7094 // recipes to abstract recipes if it is legal and beneficial and clamp the
7095 // range for better cost estimation.
7096 // TODO: Enable following transform when the EVL-version of extended-reduction
7097 // and mulacc-reduction are implemented.
7098 if (!CM.foldTailWithEVL()) {
7100 Range);
7102 Range);
7103 }
7104
7105 // Ensure scalar VF plans only contain VF=1, as required by hasScalarVFOnly.
7106 if (Range.Start.isScalar())
7107 Range.End = Range.Start * 2;
7108
7109 for (ElementCount VF : Range)
7110 Plan->addVF(VF);
7111 Plan->setName("Initial VPlan");
7112
7113 // Interleave memory: for each Interleave Group we marked earlier as relevant
7114 // for this VPlan, replace the Recipes widening its memory instructions with a
7115 // single VPInterleaveRecipe at its insertion point.
7117 InterleaveGroups, CM.isEpilogueAllowed());
7118
7119 // Replace VPValues for known constant strides.
7121 Legal->getLAI()->getSymbolicStrides());
7122
7123 auto BlockNeedsPredication = [this](BasicBlock *BB) {
7124 return Legal->blockNeedsPredication(BB);
7125 };
7127 BlockNeedsPredication);
7128
7129 if (useActiveLaneMask(Style)) {
7130 // TODO: Move checks to VPlanTransforms::addActiveLaneMask once
7131 // TailFoldingStyle is visible there.
7132 bool ForControlFlow = useActiveLaneMaskForControlFlow(Style);
7133 RUN_VPLAN_PASS(VPlanTransforms::addActiveLaneMask, *Plan, ForControlFlow);
7134 }
7135
7136 assert(verifyVPlanIsValid(*Plan) && "VPlan is invalid");
7137 return Plan;
7138}
7139
7140VPlanPtr LoopVectorizationPlanner::tryToBuildVPlan(VFRange &Range) {
7141 // Outer loop handling: They may require CFG and instruction level
7142 // transformations before even evaluating whether vectorization is profitable.
7143 // Since we cannot modify the incoming IR, we need to build VPlan upfront in
7144 // the vectorization pipeline.
7145 assert(!OrigLoop->isInnermost());
7146 assert(EnableVPlanNativePath && "VPlan-native path is not enabled.");
7147
7148 auto Plan = VPlanTransforms::buildVPlan0(
7149 OrigLoop, *LI, Legal->getWidestInductionType(),
7150 getDebugLocFromInstOrOperands(Legal->getPrimaryInduction()), PSE);
7151
7153 *Plan, PSE, *OrigLoop, Legal->getInductionVars(),
7154 MapVector<PHINode *, RecurrenceDescriptor>(),
7155 SmallPtrSet<const PHINode *, 1>(), SmallPtrSet<PHINode *, 1>(),
7156 /*AllowReordering=*/false))
7157 return nullptr;
7158 [[maybe_unused]] bool CanHandleExits = VPlanTransforms::handleEarlyExits(
7159 *Plan, UncountableExitStyle::NoUncountableExit, OrigLoop, PSE, *DT,
7160 Legal->getAssumptionCache());
7161 assert(CanHandleExits &&
7162 "early-exits are not supported in VPlan-native path");
7163 VPlanTransforms::addMiddleCheck(*Plan, /*TailFolded*/ false);
7164
7166
7167 for (ElementCount VF : Range)
7168 Plan->addVF(VF);
7169
7171 return nullptr;
7172
7173 // Optimize induction live-out users to use precomputed end values.
7175 /*FoldTail=*/false);
7176
7177 assert(verifyVPlanIsValid(*Plan) && "VPlan is invalid");
7178 return Plan;
7179}
7180
7181void LoopVectorizationPlanner::addReductionResultComputation(
7182 VPlanPtr &Plan, VPRecipeBuilder &RecipeBuilder, ElementCount MinVF) {
7183 using namespace VPlanPatternMatch;
7184 VPTypeAnalysis TypeInfo(*Plan);
7185 VPRegionBlock *VectorLoopRegion = Plan->getVectorLoopRegion();
7186 VPBasicBlock *MiddleVPBB = Plan->getMiddleBlock();
7188 VPBasicBlock *LatchVPBB = VectorLoopRegion->getExitingBasicBlock();
7189 Builder.setInsertPoint(&*std::prev(std::prev(LatchVPBB->end())));
7190 VPBasicBlock::iterator IP = MiddleVPBB->getFirstNonPhi();
7191 for (VPRecipeBase &R :
7192 Plan->getVectorLoopRegion()->getEntryBasicBlock()->phis()) {
7193 VPReductionPHIRecipe *PhiR = dyn_cast<VPReductionPHIRecipe>(&R);
7194 if (!PhiR)
7195 continue;
7196
7197 RecurKind RecurrenceKind = PhiR->getRecurrenceKind();
7198 const RecurrenceDescriptor &RdxDesc = Legal->getRecurrenceDescriptor(
7200 Type *PhiTy = TypeInfo.inferScalarType(PhiR);
7201 // If tail is folded by masking, introduce selects between the phi
7202 // and the users outside the vector region of each reduction, at the
7203 // beginning of the dedicated latch block.
7204 auto *OrigExitingVPV = PhiR->getBackedgeValue();
7205 auto *NewExitingVPV = PhiR->getBackedgeValue();
7206 if (!PhiR->isInLoop() && CM.foldTailByMasking()) {
7207 VPValue *Cond = vputils::findHeaderMask(*Plan);
7208 NewExitingVPV =
7209 Builder.createSelect(Cond, OrigExitingVPV, PhiR, {}, "", *PhiR);
7210 OrigExitingVPV->replaceUsesWithIf(NewExitingVPV, [](VPUser &U, unsigned) {
7211 return match(&U,
7212 m_VPInstruction<VPInstruction::ComputeReductionResult>());
7213 });
7214
7215 if (CM.usePredicatedReductionSelect(RecurrenceKind))
7216 PhiR->setOperand(1, NewExitingVPV);
7217 }
7218
7219 // We want code in the middle block to appear to execute on the location of
7220 // the scalar loop's latch terminator because: (a) it is all compiler
7221 // generated, (b) these instructions are always executed after evaluating
7222 // the latch conditional branch, and (c) other passes may add new
7223 // predecessors which terminate on this line. This is the easiest way to
7224 // ensure we don't accidentally cause an extra step back into the loop while
7225 // debugging.
7226 DebugLoc ExitDL = OrigLoop->getLoopLatch()->getTerminator()->getDebugLoc();
7227
7228 // TODO: At the moment ComputeReductionResult also drives creation of the
7229 // bc.merge.rdx phi nodes, hence it needs to be created unconditionally here
7230 // even for in-loop reductions, until the reduction resume value handling is
7231 // also modeled in VPlan.
7232 VPInstruction *FinalReductionResult;
7233 VPBuilder::InsertPointGuard Guard(Builder);
7234 Builder.setInsertPoint(MiddleVPBB, IP);
7235 // For AnyOf reductions, find the select among PhiR's users and convert
7236 // the reduction phi to operate on bools before creating the final
7237 // reduction result.
7238 if (RecurrenceDescriptor::isAnyOfRecurrenceKind(RecurrenceKind)) {
7239 auto *AnyOfSelect =
7240 cast<VPSingleDefRecipe>(*find_if(PhiR->users(), [](VPUser *U) {
7241 return match(U, m_Select(m_VPValue(), m_VPValue(), m_VPValue()));
7242 }));
7243 VPValue *Start = PhiR->getStartValue();
7244 bool TrueValIsPhi = AnyOfSelect->getOperand(1) == PhiR;
7245 // NewVal is the non-phi operand of the select.
7246 VPValue *NewVal = TrueValIsPhi ? AnyOfSelect->getOperand(2)
7247 : AnyOfSelect->getOperand(1);
7248
7249 // Adjust AnyOf reductions; replace the reduction phi for the selected
7250 // value with a boolean reduction phi node to check if the condition is
7251 // true in any iteration. The final value is selected by the final
7252 // ComputeReductionResult.
7253 VPValue *Cmp = AnyOfSelect->getOperand(0);
7254 // If the compare is checking the reduction PHI node, adjust it to check
7255 // the start value.
7256 if (VPRecipeBase *CmpR = Cmp->getDefiningRecipe())
7257 CmpR->replaceUsesOfWith(PhiR, PhiR->getStartValue());
7258 Builder.setInsertPoint(AnyOfSelect);
7259
7260 // If the true value of the select is the reduction phi, the new value
7261 // is selected if the negated condition is true in any iteration.
7262 if (TrueValIsPhi)
7263 Cmp = Builder.createNot(Cmp);
7264 VPValue *Or = Builder.createOr(PhiR, Cmp);
7265 // Only replace uses inside the vector region with Or. External uses
7266 // (e.g. scalar preheader resume phis) must be replaced by the user
7267 // update loop below with FinalReductionResult.
7268 AnyOfSelect->replaceUsesWithIf(Or, [](VPUser &U, unsigned) {
7269 return cast<VPRecipeBase>(&U)->getRegion();
7270 });
7271 ToDelete.push_back(AnyOfSelect);
7272
7273 // Convert the reduction phi to operate on bools.
7274 PhiR->setOperand(0, Plan->getFalse());
7275
7276 // Update NewExitingVPV if it was pointing to the now-replaced select.
7277 if (NewExitingVPV == AnyOfSelect)
7278 NewExitingVPV = Or;
7279
7280 Builder.setInsertPoint(MiddleVPBB, IP);
7281
7282 FinalReductionResult =
7283 Builder.createAnyOfReduction(NewExitingVPV, NewVal, Start, ExitDL);
7284 } else {
7285 VPIRFlags Flags(RecurrenceKind, PhiR->isOrdered(), PhiR->isInLoop(),
7286 PhiR->getFastMathFlags());
7287 FinalReductionResult =
7288 Builder.createNaryOp(VPInstruction::ComputeReductionResult,
7289 {NewExitingVPV}, Flags, ExitDL);
7290 }
7291 // If the vector reduction can be performed in a smaller type, we truncate
7292 // then extend the loop exit value to enable InstCombine to evaluate the
7293 // entire expression in the smaller type.
7294 if (MinVF.isVector() && PhiTy != RdxDesc.getRecurrenceType() &&
7296 assert(!PhiR->isInLoop() && "Unexpected truncated inloop reduction!");
7298 "Unexpected truncated min-max recurrence!");
7299 Type *RdxTy = RdxDesc.getRecurrenceType();
7300 VPWidenCastRecipe *Trunc;
7301 Instruction::CastOps ExtendOpc =
7302 RdxDesc.isSigned() ? Instruction::SExt : Instruction::ZExt;
7303 VPWidenCastRecipe *Extnd;
7304 {
7305 VPBuilder::InsertPointGuard Guard(Builder);
7306 Builder.setInsertPoint(
7307 NewExitingVPV->getDefiningRecipe()->getParent(),
7308 std::next(NewExitingVPV->getDefiningRecipe()->getIterator()));
7309 Trunc =
7310 Builder.createWidenCast(Instruction::Trunc, NewExitingVPV, RdxTy);
7311 Extnd = Builder.createWidenCast(ExtendOpc, Trunc, PhiTy);
7312 }
7313 if (PhiR->getOperand(1) == NewExitingVPV)
7314 PhiR->setOperand(1, Extnd->getVPSingleValue());
7315
7316 // Update ComputeReductionResult with the truncated exiting value and
7317 // extend its result. Operand 0 provides the values to be reduced.
7318 FinalReductionResult->setOperand(0, Trunc);
7319 FinalReductionResult =
7320 Builder.createScalarCast(ExtendOpc, FinalReductionResult, PhiTy, {});
7321 }
7322
7323 // Update all users outside the vector region. Also replace redundant
7324 // extracts.
7325 for (auto *U : to_vector(OrigExitingVPV->users())) {
7326 auto *Parent = cast<VPRecipeBase>(U)->getParent();
7327 if (FinalReductionResult == U || Parent->getParent())
7328 continue;
7329 // Skip ComputeReductionResult and FindIV reductions when they are not the
7330 // final result.
7331 if (match(U, m_VPInstruction<VPInstruction::ComputeReductionResult>()) ||
7333 match(U, m_VPInstruction<Instruction::ICmp>())))
7334 continue;
7335 U->replaceUsesOfWith(OrigExitingVPV, FinalReductionResult);
7336
7337 // Look through ExtractLastPart.
7339 U = cast<VPInstruction>(U)->getSingleUser();
7340
7343 cast<VPInstruction>(U)->replaceAllUsesWith(FinalReductionResult);
7344 }
7345
7346 RecurKind RK = PhiR->getRecurrenceKind();
7351 VPBuilder PHBuilder(Plan->getVectorPreheader());
7352 VPValue *Iden = Plan->getOrAddLiveIn(
7353 getRecurrenceIdentity(RK, PhiTy, PhiR->getFastMathFlags()));
7354 auto *ScaleFactorVPV = Plan->getConstantInt(32, 1);
7355 VPValue *StartV = PHBuilder.createNaryOp(
7357 {PhiR->getStartValue(), Iden, ScaleFactorVPV}, *PhiR);
7358 PhiR->setOperand(0, StartV);
7359 }
7360 }
7361 for (VPRecipeBase *R : ToDelete)
7362 R->eraseFromParent();
7363
7365}
7366
7368 VPlan &Plan, GeneratedRTChecks &RTChecks, bool HasBranchWeights) const {
7369 const auto &[SCEVCheckCond, SCEVCheckBlock] = RTChecks.getSCEVChecks();
7370 if (SCEVCheckBlock && SCEVCheckBlock->hasNPredecessors(0)) {
7371 assert((!Config.OptForSize ||
7372 CM.Hints->getForce() == LoopVectorizeHints::FK_Enabled) &&
7373 "Cannot SCEV check stride or overflow when optimizing for size");
7375 SCEVCheckBlock, HasBranchWeights);
7376 }
7377 const auto &[MemCheckCond, MemCheckBlock] = RTChecks.getMemRuntimeChecks();
7378 if (MemCheckBlock && MemCheckBlock->hasNPredecessors(0)) {
7379 // VPlan-native path does not do any analysis for runtime checks
7380 // currently.
7381 assert((!EnableVPlanNativePath || OrigLoop->isInnermost()) &&
7382 "Runtime checks are not supported for outer loops yet");
7383
7384 if (Config.OptForSize) {
7385 assert(
7386 CM.Hints->getForce() == LoopVectorizeHints::FK_Enabled &&
7387 "Cannot emit memory checks when optimizing for size, unless forced "
7388 "to vectorize.");
7389 ORE->emit([&]() {
7390 return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationCodeSize",
7391 OrigLoop->getStartLoc(),
7392 OrigLoop->getHeader())
7393 << "Code-size may be reduced by not forcing "
7394 "vectorization, or by source-code modifications "
7395 "eliminating the need for runtime checks "
7396 "(e.g., adding 'restrict').";
7397 });
7398 }
7400 MemCheckBlock, HasBranchWeights);
7401 }
7402}
7403
7405 VPlan &Plan, ElementCount VF, unsigned UF,
7406 ElementCount MinProfitableTripCount) const {
7407 const uint32_t *BranchWeights =
7408 hasBranchWeightMD(*OrigLoop->getLoopLatch()->getTerminator())
7410 : nullptr;
7412 MinProfitableTripCount,
7413 CM.requiresScalarEpilogue(VF.isVector()),
7414 CM.foldTailByMasking(), OrigLoop, BranchWeights,
7415 OrigLoop->getLoopPredecessor()->getTerminator()->getDebugLoc(),
7416 PSE, /*CheckBlock=*/nullptr);
7417}
7418
7419// Determine how to lower the epilogue, which depends on 1) optimising
7420// for minimum code-size, 2) tail-folding compiler options, 3) loop
7421// hints forcing tail-folding, and 4) a TTI hook that analyses whether the loop
7422// is suitable for tail-folding.
7423static EpilogueLowering
7425 bool OptForSize, TargetTransformInfo *TTI,
7427 InterleavedAccessInfo *IAI) {
7428 // 1) OptSize takes precedence over all other options, i.e. if this is set,
7429 // don't look at hints or options, and don't request an epilogue.
7430 if (F->hasOptSize() ||
7431 (OptForSize && Hints.getForce() != LoopVectorizeHints::FK_Enabled))
7433
7434 // 2) If set, obey the directives
7435 if (TailFoldingPolicy.getNumOccurrences()) {
7436 switch (TailFoldingPolicy) {
7438 return CM_EpilogueAllowed;
7443 };
7444 }
7445
7446 // 3) If set, obey the hints
7447 switch (Hints.getPredicate()) {
7451 return CM_EpilogueAllowed;
7452 };
7453
7454 // 4) if the TTI hook indicates this is profitable, request tail-folding.
7455 TailFoldingInfo TFI(TLI, &LVL, IAI);
7456 if (TTI->preferTailFoldingOverEpilogue(&TFI))
7458
7459 return CM_EpilogueAllowed;
7460}
7461
7462// Process the loop in the VPlan-native vectorization path. This path builds
7463// VPlan upfront in the vectorization pipeline, which allows to apply
7464// VPlan-to-VPlan transformations from the very beginning without modifying the
7465// input LLVM IR.
7471 std::function<BlockFrequencyInfo &()> GetBFI, bool OptForSize,
7472 LoopVectorizeHints &Hints, LoopVectorizationRequirements &Requirements) {
7473
7475 LLVM_DEBUG(dbgs() << "LV: cannot compute the outer-loop trip count\n");
7476 return false;
7477 }
7478 assert(EnableVPlanNativePath && "VPlan-native path is disabled.");
7479 Function *F = L->getHeader()->getParent();
7480 InterleavedAccessInfo IAI(PSE, L, DT, LI, LVL->getLAI());
7481
7482 EpilogueLowering SEL =
7483 getEpilogueLowering(F, L, Hints, OptForSize, TTI, TLI, *LVL, &IAI);
7484
7485 VFSelectionContext Config(*TTI, LVL, L, *F, PSE, DB, ORE, &Hints, OptForSize);
7486 LoopVectorizationCostModel CM(SEL, L, PSE, LI, LVL, *TTI, TLI, AC, ORE,
7487 GetBFI, F, &Hints, IAI, Config);
7488 // Use the planner for outer loop vectorization.
7489 // TODO: CM is not used at this point inside the planner. Turn CM into an
7490 // optional argument if we don't need it in the future.
7491 LoopVectorizationPlanner LVP(L, LI, DT, TLI, *TTI, LVL, CM, Config, IAI, PSE,
7492 Hints, ORE);
7493
7494 // Get user vectorization factor.
7495 ElementCount UserVF = Hints.getWidth();
7496
7498
7499 // Plan how to best vectorize, return the best VF and its cost.
7500 const VectorizationFactor VF = LVP.planInVPlanNativePath(UserVF);
7501
7502 // If we are stress testing VPlan builds, do not attempt to generate vector
7503 // code. Masked vector code generation support will follow soon.
7504 // Also, do not attempt to vectorize if no vector code will be produced.
7506 return false;
7507
7508 VPlan &BestPlan = LVP.getPlanFor(VF.Width);
7509
7510 {
7511 GeneratedRTChecks Checks(PSE, DT, LI, TTI, Config.CostKind);
7512 InnerLoopVectorizer LB(L, PSE, LI, DT, TTI, AC, VF.Width, /*UF=*/1, &CM,
7513 Checks, BestPlan);
7514 LLVM_DEBUG(dbgs() << "Vectorizing outer loop in \"" << F->getName()
7515 << "\"\n");
7516 LVP.addMinimumIterationCheck(BestPlan, VF.Width, /*UF=*/1,
7518 bool HasBranchWeights =
7519 hasBranchWeightMD(*L->getLoopLatch()->getTerminator());
7520 LVP.attachRuntimeChecks(BestPlan, Checks, HasBranchWeights);
7521
7522 reportVectorization(ORE, L, VF, 1);
7523
7524 LVP.executePlan(VF.Width, /*UF=*/1, BestPlan, LB, DT);
7525 }
7526
7527 assert(!verifyFunction(*F, &dbgs()));
7528 return true;
7529}
7530
7531// Emit a remark if there are stores to floats that required a floating point
7532// extension. If the vectorized loop was generated with floating point there
7533// will be a performance penalty from the conversion overhead and the change in
7534// the vector width.
7537 for (BasicBlock *BB : L->getBlocks()) {
7538 for (Instruction &Inst : *BB) {
7539 if (auto *S = dyn_cast<StoreInst>(&Inst)) {
7540 if (S->getValueOperand()->getType()->isFloatTy())
7541 Worklist.push_back(S);
7542 }
7543 }
7544 }
7545
7546 // Traverse the floating point stores upwards searching, for floating point
7547 // conversions.
7550 while (!Worklist.empty()) {
7551 auto *I = Worklist.pop_back_val();
7552 if (!L->contains(I))
7553 continue;
7554 if (!Visited.insert(I).second)
7555 continue;
7556
7557 // Emit a remark if the floating point store required a floating
7558 // point conversion.
7559 // TODO: More work could be done to identify the root cause such as a
7560 // constant or a function return type and point the user to it.
7561 if (isa<FPExtInst>(I) && EmittedRemark.insert(I).second)
7562 ORE->emit([&]() {
7563 return OptimizationRemarkAnalysis(LV_NAME, "VectorMixedPrecision",
7564 I->getDebugLoc(), L->getHeader())
7565 << "floating point conversion changes vector width. "
7566 << "Mixed floating point precision requires an up/down "
7567 << "cast that will negatively impact performance.";
7568 });
7569
7570 for (Use &Op : I->operands())
7571 if (auto *OpI = dyn_cast<Instruction>(Op))
7572 Worklist.push_back(OpI);
7573 }
7574}
7575
7576/// For loops with uncountable early exits, find the cost of doing work when
7577/// exiting the loop early, such as calculating the final exit values of
7578/// variables used outside the loop.
7579/// TODO: This is currently overly pessimistic because the loop may not take
7580/// the early exit, but better to keep this conservative for now. In future,
7581/// it might be possible to relax this by using branch probabilities.
7583 VPlan &Plan, ElementCount VF) {
7584 InstructionCost Cost = 0;
7585 for (auto *ExitVPBB : Plan.getExitBlocks()) {
7586 for (auto *PredVPBB : ExitVPBB->getPredecessors()) {
7587 // If the predecessor is not the middle.block, then it must be the
7588 // vector.early.exit block, which may contain work to calculate the exit
7589 // values of variables used outside the loop.
7590 if (PredVPBB != Plan.getMiddleBlock()) {
7591 LLVM_DEBUG(dbgs() << "Calculating cost of work in exit block "
7592 << PredVPBB->getName() << ":\n");
7593 Cost += PredVPBB->cost(VF, CostCtx);
7594 }
7595 }
7596 }
7597 return Cost;
7598}
7599
7600/// This function determines whether or not it's still profitable to vectorize
7601/// the loop given the extra work we have to do outside of the loop:
7602/// 1. Perform the runtime checks before entering the loop to ensure it's safe
7603/// to vectorize.
7604/// 2. In the case of loops with uncountable early exits, we may have to do
7605/// extra work when exiting the loop early, such as calculating the final
7606/// exit values of variables used outside the loop.
7607/// 3. The middle block.
7608static bool isOutsideLoopWorkProfitable(GeneratedRTChecks &Checks,
7609 VectorizationFactor &VF, Loop *L,
7611 VPCostContext &CostCtx, VPlan &Plan,
7612 EpilogueLowering SEL,
7613 std::optional<unsigned> VScale) {
7614 InstructionCost RtC = Checks.getCost();
7615 if (!RtC.isValid())
7616 return false;
7617
7618 // When interleaving only scalar and vector cost will be equal, which in turn
7619 // would lead to a divide by 0. Fall back to hard threshold.
7620 if (VF.Width.isScalar()) {
7621 // TODO: Should we rename VectorizeMemoryCheckThreshold?
7623 LLVM_DEBUG(
7624 dbgs()
7625 << "LV: Interleaving only is not profitable due to runtime checks\n");
7626 return false;
7627 }
7628 return true;
7629 }
7630
7631 // The scalar cost should only be 0 when vectorizing with a user specified
7632 // VF/IC. In those cases, runtime checks should always be generated.
7633 uint64_t ScalarC = VF.ScalarCost.getValue();
7634 if (ScalarC == 0)
7635 return true;
7636
7637 InstructionCost TotalCost = RtC;
7638 // Add on the cost of any work required in the vector early exit block, if
7639 // one exists.
7640 TotalCost += calculateEarlyExitCost(CostCtx, Plan, VF.Width);
7641 TotalCost += Plan.getMiddleBlock()->cost(VF.Width, CostCtx);
7642
7643 // First, compute the minimum iteration count required so that the vector
7644 // loop outperforms the scalar loop.
7645 // The total cost of the scalar loop is
7646 // ScalarC * TC
7647 // where
7648 // * TC is the actual trip count of the loop.
7649 // * ScalarC is the cost of a single scalar iteration.
7650 //
7651 // The total cost of the vector loop is
7652 // TotalCost + VecC * (TC / VF) + EpiC
7653 // where
7654 // * TotalCost is the sum of the costs cost of
7655 // - the generated runtime checks, i.e. RtC
7656 // - performing any additional work in the vector.early.exit block for
7657 // loops with uncountable early exits.
7658 // - the middle block, if ExpectedTC <= VF.Width.
7659 // * VecC is the cost of a single vector iteration.
7660 // * TC is the actual trip count of the loop
7661 // * VF is the vectorization factor
7662 // * EpiCost is the cost of the generated epilogue, including the cost
7663 // of the remaining scalar operations.
7664 //
7665 // Vectorization is profitable once the total vector cost is less than the
7666 // total scalar cost:
7667 // TotalCost + VecC * (TC / VF) + EpiC < ScalarC * TC
7668 //
7669 // Now we can compute the minimum required trip count TC as
7670 // VF * (TotalCost + EpiC) / (ScalarC * VF - VecC) < TC
7671 //
7672 // For now we assume the epilogue cost EpiC = 0 for simplicity. Note that
7673 // the computations are performed on doubles, not integers and the result
7674 // is rounded up, hence we get an upper estimate of the TC.
7675 unsigned IntVF = estimateElementCount(VF.Width, VScale);
7676 uint64_t Div = ScalarC * IntVF - VF.Cost.getValue();
7677 uint64_t MinTC1 =
7678 Div == 0 ? 0 : divideCeil(TotalCost.getValue() * IntVF, Div);
7679
7680 // Second, compute a minimum iteration count so that the cost of the
7681 // runtime checks is only a fraction of the total scalar loop cost. This
7682 // adds a loop-dependent bound on the overhead incurred if the runtime
7683 // checks fail. In case the runtime checks fail, the cost is RtC + ScalarC
7684 // * TC. To bound the runtime check to be a fraction 1/X of the scalar
7685 // cost, compute
7686 // RtC < ScalarC * TC * (1 / X) ==> RtC * X / ScalarC < TC
7687 uint64_t MinTC2 = divideCeil(RtC.getValue() * 10, ScalarC);
7688
7689 // Now pick the larger minimum. If it is not a multiple of VF and an epilogue
7690 // is allowed, choose the next closest multiple of VF. This should partly
7691 // compensate for ignoring the epilogue cost.
7692 uint64_t MinTC = std::max(MinTC1, MinTC2);
7693 if (SEL == CM_EpilogueAllowed)
7694 MinTC = alignTo(MinTC, IntVF);
7696
7697 LLVM_DEBUG(
7698 dbgs() << "LV: Minimum required TC for runtime checks to be profitable:"
7699 << VF.MinProfitableTripCount << "\n");
7700
7701 // Skip vectorization if the expected trip count is less than the minimum
7702 // required trip count.
7703 if (auto ExpectedTC = getSmallBestKnownTC(PSE, L)) {
7704 if (ElementCount::isKnownLT(*ExpectedTC, VF.MinProfitableTripCount)) {
7705 LLVM_DEBUG(dbgs() << "LV: Vectorization is not beneficial: expected "
7706 "trip count < minimum profitable VF ("
7707 << *ExpectedTC << " < " << VF.MinProfitableTripCount
7708 << ")\n");
7709
7710 return false;
7711 }
7712 }
7713 return true;
7714}
7715
7717 : InterleaveOnlyWhenForced(Opts.InterleaveOnlyWhenForced ||
7719 VectorizeOnlyWhenForced(Opts.VectorizeOnlyWhenForced ||
7721
7722/// Prepare \p MainPlan for vectorizing the main vector loop during epilogue
7723/// vectorization.
7726 using namespace VPlanPatternMatch;
7727 // When vectorizing the epilogue, FindFirstIV & FindLastIV reductions can
7728 // introduce multiple uses of undef/poison. If the reduction start value may
7729 // be undef or poison it needs to be frozen and the frozen start has to be
7730 // used when computing the reduction result. We also need to use the frozen
7731 // value in the resume phi generated by the main vector loop, as this is also
7732 // used to compute the reduction result after the epilogue vector loop.
7733 auto AddFreezeForFindLastIVReductions = [](VPlan &Plan,
7734 bool UpdateResumePhis) {
7735 VPBuilder Builder(Plan.getEntry());
7736 for (VPRecipeBase &R : *Plan.getMiddleBlock()) {
7737 auto *VPI = dyn_cast<VPInstruction>(&R);
7738 if (!VPI)
7739 continue;
7740 VPValue *OrigStart;
7741 if (!matchFindIVResult(VPI, m_VPValue(), m_VPValue(OrigStart)))
7742 continue;
7744 continue;
7745 VPInstruction *Freeze =
7746 Builder.createNaryOp(Instruction::Freeze, {OrigStart}, {}, "fr");
7747 VPI->setOperand(2, Freeze);
7748 if (UpdateResumePhis)
7749 OrigStart->replaceUsesWithIf(Freeze, [Freeze](VPUser &U, unsigned) {
7750 return Freeze != &U && isa<VPPhi>(&U);
7751 });
7752 }
7753 };
7754 AddFreezeForFindLastIVReductions(MainPlan, true);
7755 AddFreezeForFindLastIVReductions(EpiPlan, false);
7756
7757 VPValue *VectorTC = nullptr;
7758 auto *Term =
7760 [[maybe_unused]] bool MatchedTC =
7761 match(Term, m_BranchOnCount(m_VPValue(), m_VPValue(VectorTC)));
7762 assert(MatchedTC && "must match vector trip count");
7763
7764 // If there is a suitable resume value for the canonical induction in the
7765 // scalar (which will become vector) epilogue loop, use it and move it to the
7766 // beginning of the scalar preheader. Otherwise create it below.
7767 VPBasicBlock *MainScalarPH = MainPlan.getScalarPreheader();
7768 auto ResumePhiIter =
7769 find_if(MainScalarPH->phis(), [VectorTC](VPRecipeBase &R) {
7770 return match(&R, m_VPInstruction<Instruction::PHI>(m_Specific(VectorTC),
7771 m_ZeroInt()));
7772 });
7773 VPPhi *ResumePhi = nullptr;
7774 if (ResumePhiIter == MainScalarPH->phis().end()) {
7775 Type *Ty = VPTypeAnalysis(MainPlan).inferScalarType(VectorTC);
7776 VPBuilder ScalarPHBuilder(MainScalarPH, MainScalarPH->begin());
7777 ResumePhi = ScalarPHBuilder.createScalarPhi(
7778 {VectorTC, MainPlan.getZero(Ty)}, {}, "vec.epilog.resume.val");
7779 } else {
7780 ResumePhi = cast<VPPhi>(&*ResumePhiIter);
7781 ResumePhi->setName("vec.epilog.resume.val");
7782 if (&MainScalarPH->front() != ResumePhi)
7783 ResumePhi->moveBefore(*MainScalarPH, MainScalarPH->begin());
7784 }
7785
7786 // Create a ResumeForEpilogue for the canonical IV resume as the
7787 // first non-phi, to keep it alive for the epilogue.
7788 VPBuilder ResumeBuilder(MainScalarPH);
7789 ResumeBuilder.createNaryOp(VPInstruction::ResumeForEpilogue, ResumePhi);
7790
7791 // Create ResumeForEpilogue instructions for the resume phis of the
7792 // VPIRPhis in the scalar header of the main plan and return them so they can
7793 // be used as resume values when vectorizing the epilogue.
7794 return to_vector(
7795 map_range(MainPlan.getScalarHeader()->phis(), [&](VPRecipeBase &R) {
7796 assert(isa<VPIRPhi>(R) &&
7797 "only VPIRPhis expected in the scalar header");
7798 return ResumeBuilder.createNaryOp(VPInstruction::ResumeForEpilogue,
7799 R.getOperand(0));
7800 }));
7801}
7802
7803/// Prepare \p Plan for vectorizing the epilogue loop. That is, re-use expanded
7804/// SCEVs from \p ExpandedSCEVs and set resume values for header recipes. Some
7805/// reductions require creating new instructions to compute the resume values.
7806/// They are collected in a vector and returned. They must be moved to the
7807/// preheader of the vector epilogue loop, after created by the execution of \p
7808/// Plan.
7810 VPlan &Plan, Loop *L, const SCEV2ValueTy &ExpandedSCEVs,
7812 VFSelectionContext &Config, ScalarEvolution &SE) {
7813 VPRegionBlock *VectorLoop = Plan.getVectorLoopRegion();
7814 VPBasicBlock *Header = VectorLoop->getEntryBasicBlock();
7815 Header->setName("vec.epilog.vector.body");
7816
7817 VPValue *IV = VectorLoop->getCanonicalIV();
7818 // When vectorizing the epilogue loop, the canonical induction needs to start
7819 // at the resume value from the main vector loop. Find the resume value
7820 // created during execution of the main VPlan. It must be the first phi in the
7821 // loop preheader. Add this resume value as an offset to the canonical IV of
7822 // the epilogue loop.
7823 using namespace llvm::PatternMatch;
7824 PHINode *EPResumeVal = &*L->getLoopPreheader()->phis().begin();
7825 for (Value *Inc : EPResumeVal->incoming_values()) {
7826 if (match(Inc, m_SpecificInt(0)))
7827 continue;
7828 assert(!EPI.VectorTripCount &&
7829 "Must only have a single non-zero incoming value");
7830 EPI.VectorTripCount = Inc;
7831 }
7832 // If we didn't find a non-zero vector trip count, all incoming values
7833 // must be zero, which also means the vector trip count is zero. Pick the
7834 // first zero as vector trip count.
7835 // TODO: We should not choose VF * UF so the main vector loop is known to
7836 // be dead.
7837 if (!EPI.VectorTripCount) {
7838 assert(EPResumeVal->getNumIncomingValues() > 0 &&
7839 all_of(EPResumeVal->incoming_values(), match_fn(m_SpecificInt(0))) &&
7840 "all incoming values must be 0");
7841 EPI.VectorTripCount = EPResumeVal->getOperand(0);
7842 }
7843 VPValue *VPV = Plan.getOrAddLiveIn(EPResumeVal);
7844 assert(all_of(IV->users(),
7845 [](const VPUser *U) {
7846 return isa<VPScalarIVStepsRecipe>(U) ||
7847 isa<VPDerivedIVRecipe>(U) ||
7848 cast<VPRecipeBase>(U)->isScalarCast() ||
7849 cast<VPInstruction>(U)->getOpcode() ==
7850 Instruction::Add;
7851 }) &&
7852 "the canonical IV should only be used by its increment or "
7853 "ScalarIVSteps when resetting the start value");
7854 VPBuilder Builder(Header, Header->getFirstNonPhi());
7855 VPInstruction *Add = Builder.createAdd(IV, VPV);
7856 // Replace all users of the canonical IV and its increment with the offset
7857 // version, except for the Add itself and the canonical IV increment.
7859 assert(Increment && "Must have a canonical IV increment at this point");
7860 IV->replaceUsesWithIf(Add, [Add, Increment](VPUser &U, unsigned) {
7861 return &U != Add && &U != Increment;
7862 });
7863 VPInstruction *OffsetIVInc =
7865 Increment->replaceAllUsesWith(OffsetIVInc);
7866 OffsetIVInc->setOperand(0, Increment);
7867
7869 SmallVector<Instruction *> InstsToMove;
7870 // Ensure that the start values for all header phi recipes are updated before
7871 // vectorizing the epilogue loop.
7872 for (VPRecipeBase &R : Header->phis()) {
7873 Value *ResumeV = nullptr;
7874 // TODO: Move setting of resume values to prepareToExecute.
7875 if (auto *ReductionPhi = dyn_cast<VPReductionPHIRecipe>(&R)) {
7876 // Find the reduction result by searching users of the phi or its backedge
7877 // value.
7878 auto IsReductionResult = [](VPRecipeBase *R) {
7879 auto *VPI = dyn_cast<VPInstruction>(R);
7880 return VPI && VPI->getOpcode() == VPInstruction::ComputeReductionResult;
7881 };
7882 auto *RdxResult = cast<VPInstruction>(
7883 vputils::findRecipe(ReductionPhi->getBackedgeValue(), IsReductionResult));
7884 assert(RdxResult && "expected to find reduction result");
7885
7886 ResumeV = cast<PHINode>(ReductionPhi->getUnderlyingInstr())
7887 ->getIncomingValueForBlock(L->getLoopPreheader());
7888
7889 // Check for FindIV pattern by looking for icmp user of RdxResult.
7890 // The pattern is: select(icmp ne RdxResult, Sentinel), RdxResult, Start
7891 using namespace VPlanPatternMatch;
7892 VPValue *SentinelVPV = nullptr;
7893 bool IsFindIV = any_of(RdxResult->users(), [&](VPUser *U) {
7894 return match(U, VPlanPatternMatch::m_SpecificICmp(
7895 ICmpInst::ICMP_NE, m_Specific(RdxResult),
7896 m_VPValue(SentinelVPV)));
7897 });
7898
7899 RecurKind RK = ReductionPhi->getRecurrenceKind();
7900 if (RecurrenceDescriptor::isAnyOfRecurrenceKind(RK) || IsFindIV) {
7901 auto *ResumePhi = cast<PHINode>(ResumeV);
7902 Value *StartV = ResumePhi->getIncomingValueForBlock(
7904 IRBuilder<> Builder(ResumePhi->getParent(),
7905 ResumePhi->getParent()->getFirstNonPHIIt());
7906
7908 // VPReductionPHIRecipes for AnyOf reductions expect a boolean as
7909 // start value; compare the final value from the main vector loop
7910 // to the start value.
7911 ResumeV = Builder.CreateICmpNE(ResumeV, StartV);
7912 if (auto *I = dyn_cast<Instruction>(ResumeV))
7913 InstsToMove.push_back(I);
7914 } else {
7915 assert(SentinelVPV && "expected to find icmp using RdxResult");
7916 if (auto *FreezeI = dyn_cast<FreezeInst>(StartV))
7917 ToFrozen[FreezeI->getOperand(0)] = StartV;
7918
7919 // Adjust resume: select(icmp eq ResumeV, StartV), Sentinel, ResumeV
7920 Value *Cmp = Builder.CreateICmpEQ(ResumeV, StartV);
7921 if (auto *I = dyn_cast<Instruction>(Cmp))
7922 InstsToMove.push_back(I);
7923 ResumeV = Builder.CreateSelect(Cmp, SentinelVPV->getLiveInIRValue(),
7924 ResumeV);
7925 if (auto *I = dyn_cast<Instruction>(ResumeV))
7926 InstsToMove.push_back(I);
7927 }
7928 } else {
7929 VPValue *StartVal = Plan.getOrAddLiveIn(ResumeV);
7930 auto *PhiR = dyn_cast<VPReductionPHIRecipe>(&R);
7931 if (auto *VPI = dyn_cast<VPInstruction>(PhiR->getStartValue())) {
7933 "unexpected start value");
7934 // Partial sub-reductions always start at 0 and account for the
7935 // reduction start value in a final subtraction. Update it to use the
7936 // resume value from the main vector loop.
7937 if (PhiR->getVFScaleFactor() > 1 &&
7938 PhiR->getRecurrenceKind() == RecurKind::Sub) {
7939 auto *Sub = cast<VPInstruction>(RdxResult->getSingleUser());
7940 assert(Sub->getOpcode() == Instruction::Sub && "Unexpected opcode");
7941 assert(isa<VPIRValue>(Sub->getOperand(0)) &&
7942 "Expected operand to match the original start value of the "
7943 "reduction");
7946 "Expected start value for partial sub-reduction to start at "
7947 "zero");
7948 Sub->setOperand(0, StartVal);
7949 } else
7950 VPI->setOperand(0, StartVal);
7951 continue;
7952 }
7953 }
7954 } else {
7955 // Retrieve the induction resume values for wide inductions from
7956 // their original phi nodes in the scalar loop.
7957 PHINode *IndPhi = cast<VPWidenInductionRecipe>(&R)->getPHINode();
7958 // Hook up to the PHINode generated by a ResumePhi recipe of main
7959 // loop VPlan, which feeds the scalar loop.
7960 ResumeV = IndPhi->getIncomingValueForBlock(L->getLoopPreheader());
7961 }
7962 assert(ResumeV && "Must have a resume value");
7963 VPValue *StartVal = Plan.getOrAddLiveIn(ResumeV);
7964 cast<VPHeaderPHIRecipe>(&R)->setStartValue(StartVal);
7965 }
7966
7967 // For some VPValues in the epilogue plan we must re-use the generated IR
7968 // values from the main plan. Replace them with live-in VPValues.
7969 // TODO: This is a workaround needed for epilogue vectorization and it
7970 // should be removed once induction resume value creation is done
7971 // directly in VPlan.
7972 for (auto &R : make_early_inc_range(*Plan.getEntry())) {
7973 // Re-use frozen values from the main plan for Freeze VPInstructions in the
7974 // epilogue plan. This ensures all users use the same frozen value.
7975 auto *VPI = dyn_cast<VPInstruction>(&R);
7976 if (VPI && VPI->getOpcode() == Instruction::Freeze) {
7978 ToFrozen.lookup(VPI->getOperand(0)->getLiveInIRValue())));
7979 continue;
7980 }
7981
7982 // Re-use the trip count and steps expanded for the main loop, as
7983 // skeleton creation needs it as a value that dominates both the scalar
7984 // and vector epilogue loops
7985 auto *ExpandR = dyn_cast<VPExpandSCEVRecipe>(&R);
7986 if (!ExpandR)
7987 continue;
7988 VPValue *ExpandedVal =
7989 Plan.getOrAddLiveIn(ExpandedSCEVs.lookup(ExpandR->getSCEV()));
7990 ExpandR->replaceAllUsesWith(ExpandedVal);
7991 if (Plan.getTripCount() == ExpandR)
7992 Plan.resetTripCount(ExpandedVal);
7993 ExpandR->eraseFromParent();
7994 }
7995
7996 auto VScale = Config.getVScaleForTuning();
7997 unsigned MainLoopStep =
7998 estimateElementCount(EPI.MainLoopVF * EPI.MainLoopUF, VScale);
7999 unsigned EpilogueLoopStep =
8000 estimateElementCount(EPI.EpilogueVF * EPI.EpilogueUF, VScale);
8004 EPI.EpilogueVF, EPI.EpilogueUF, MainLoopStep, EpilogueLoopStep, SE);
8005
8006 return InstsToMove;
8007}
8008
8009static void
8011 VPlan &BestEpiPlan,
8012 ArrayRef<VPInstruction *> ResumeValues) {
8013 // Fix resume values from the additional bypass block.
8014 BasicBlock *PH = L->getLoopPreheader();
8015 for (auto *Pred : predecessors(PH)) {
8016 for (PHINode &Phi : PH->phis()) {
8017 if (Phi.getBasicBlockIndex(Pred) != -1)
8018 continue;
8019 Phi.addIncoming(Phi.getIncomingValueForBlock(BypassBlock), Pred);
8020 }
8021 }
8022 auto *ScalarPH = cast<VPIRBasicBlock>(BestEpiPlan.getScalarPreheader());
8023 if (ScalarPH->hasPredecessors()) {
8024 // Fix resume values for inductions and reductions from the additional
8025 // bypass block using the incoming values from the main loop's resume phis.
8026 // ResumeValues correspond 1:1 with the scalar loop header phis.
8027 for (auto [ResumeV, HeaderPhi] :
8028 zip(ResumeValues, BestEpiPlan.getScalarHeader()->phis())) {
8029 auto *HeaderPhiR = cast<VPIRPhi>(&HeaderPhi);
8030 auto *EpiResumePhi =
8031 cast<PHINode>(HeaderPhiR->getIRPhi().getIncomingValueForBlock(PH));
8032 if (EpiResumePhi->getBasicBlockIndex(BypassBlock) == -1)
8033 continue;
8034 auto *MainResumePhi = cast<PHINode>(ResumeV->getUnderlyingValue());
8035 EpiResumePhi->setIncomingValueForBlock(
8036 BypassBlock, MainResumePhi->getIncomingValueForBlock(BypassBlock));
8037 }
8038 }
8039}
8040
8041/// Connect the epilogue vector loop generated for \p EpiPlan to the main vector
8042/// loop, after both plans have executed, updating branches from the iteration
8043/// and runtime checks of the main loop, as well as updating various phis. \p
8044/// InstsToMove contains instructions that need to be moved to the preheader of
8045/// the epilogue vector loop.
8046static void connectEpilogueVectorLoop(VPlan &EpiPlan, Loop *L,
8048 DominatorTree *DT,
8049 GeneratedRTChecks &Checks,
8050 ArrayRef<Instruction *> InstsToMove,
8051 ArrayRef<VPInstruction *> ResumeValues) {
8052 BasicBlock *VecEpilogueIterationCountCheck =
8053 cast<VPIRBasicBlock>(EpiPlan.getEntry())->getIRBasicBlock();
8054
8055 BasicBlock *VecEpiloguePreHeader =
8056 cast<CondBrInst>(VecEpilogueIterationCountCheck->getTerminator())
8057 ->getSuccessor(1);
8058 // Adjust the control flow taking the state info from the main loop
8059 // vectorization into account.
8061 "expected this to be saved from the previous pass.");
8062 DomTreeUpdater DTU(DT, DomTreeUpdater::UpdateStrategy::Eager);
8064 VecEpilogueIterationCountCheck, VecEpiloguePreHeader);
8065
8067 VecEpilogueIterationCountCheck},
8069 VecEpiloguePreHeader}});
8070
8071 BasicBlock *ScalarPH =
8072 cast<VPIRBasicBlock>(EpiPlan.getScalarPreheader())->getIRBasicBlock();
8074 VecEpilogueIterationCountCheck, ScalarPH);
8075 DTU.applyUpdates(
8077 VecEpilogueIterationCountCheck},
8079
8080 // Adjust the terminators of runtime check blocks and phis using them.
8081 BasicBlock *SCEVCheckBlock = Checks.getSCEVChecks().second;
8082 BasicBlock *MemCheckBlock = Checks.getMemRuntimeChecks().second;
8083 if (SCEVCheckBlock) {
8084 SCEVCheckBlock->getTerminator()->replaceUsesOfWith(
8085 VecEpilogueIterationCountCheck, ScalarPH);
8086 DTU.applyUpdates({{DominatorTree::Delete, SCEVCheckBlock,
8087 VecEpilogueIterationCountCheck},
8088 {DominatorTree::Insert, SCEVCheckBlock, ScalarPH}});
8089 }
8090 if (MemCheckBlock) {
8091 MemCheckBlock->getTerminator()->replaceUsesOfWith(
8092 VecEpilogueIterationCountCheck, ScalarPH);
8093 DTU.applyUpdates(
8094 {{DominatorTree::Delete, MemCheckBlock, VecEpilogueIterationCountCheck},
8095 {DominatorTree::Insert, MemCheckBlock, ScalarPH}});
8096 }
8097
8098 // The vec.epilog.iter.check block may contain Phi nodes from inductions
8099 // or reductions which merge control-flow from the latch block and the
8100 // middle block. Update the incoming values here and move the Phi into the
8101 // preheader.
8102 SmallVector<PHINode *, 4> PhisInBlock(
8103 llvm::make_pointer_range(VecEpilogueIterationCountCheck->phis()));
8104
8105 for (PHINode *Phi : PhisInBlock) {
8106 Phi->moveBefore(VecEpiloguePreHeader->getFirstNonPHIIt());
8107 Phi->replaceIncomingBlockWith(
8108 VecEpilogueIterationCountCheck->getSinglePredecessor(),
8109 VecEpilogueIterationCountCheck);
8110
8111 // If the phi doesn't have an incoming value from the
8112 // EpilogueIterationCountCheck, we are done. Otherwise remove the
8113 // incoming value and also those from other check blocks. This is needed
8114 // for reduction phis only.
8115 if (none_of(Phi->blocks(), [&](BasicBlock *IncB) {
8116 return EPI.EpilogueIterationCountCheck == IncB;
8117 }))
8118 continue;
8119 Phi->removeIncomingValue(EPI.EpilogueIterationCountCheck);
8120 if (SCEVCheckBlock)
8121 Phi->removeIncomingValue(SCEVCheckBlock);
8122 if (MemCheckBlock)
8123 Phi->removeIncomingValue(MemCheckBlock);
8124 }
8125
8126 auto IP = VecEpiloguePreHeader->getFirstNonPHIIt();
8127 for (auto *I : InstsToMove)
8128 I->moveBefore(IP);
8129
8130 // VecEpilogueIterationCountCheck conditionally skips over the epilogue loop
8131 // after executing the main loop. We need to update the resume values of
8132 // inductions and reductions during epilogue vectorization.
8133 fixScalarResumeValuesFromBypass(VecEpilogueIterationCountCheck, L, EpiPlan,
8134 ResumeValues);
8135
8136 // Remove dead phis that were moved to the epilogue preheader but are unused
8137 // (e.g., resume phis for inductions not widened in the epilogue vector loop).
8138 for (PHINode &Phi : make_early_inc_range(VecEpiloguePreHeader->phis()))
8139 if (Phi.use_empty())
8140 Phi.eraseFromParent();
8141}
8142
8144 assert((EnableVPlanNativePath || L->isInnermost()) &&
8145 "VPlan-native path is not enabled. Only process inner loops.");
8146
8147 LLVM_DEBUG(dbgs() << "\nLV: Checking a loop in '"
8148 << L->getHeader()->getParent()->getName() << "' from "
8149 << L->getLocStr() << "\n");
8150
8151 LoopVectorizeHints Hints(L, InterleaveOnlyWhenForced, *ORE, TTI);
8152
8153 LLVM_DEBUG(
8154 dbgs() << "LV: Loop hints:"
8155 << " force="
8157 ? "disabled"
8159 ? "enabled"
8160 : "?"))
8161 << " width=" << Hints.getWidth()
8162 << " interleave=" << Hints.getInterleave() << "\n");
8163
8164 // Function containing loop
8165 Function *F = L->getHeader()->getParent();
8166
8167 // Looking at the diagnostic output is the only way to determine if a loop
8168 // was vectorized (other than looking at the IR or machine code), so it
8169 // is important to generate an optimization remark for each loop. Most of
8170 // these messages are generated as OptimizationRemarkAnalysis. Remarks
8171 // generated as OptimizationRemark and OptimizationRemarkMissed are
8172 // less verbose reporting vectorized loops and unvectorized loops that may
8173 // benefit from vectorization, respectively.
8174
8175 if (!Hints.allowVectorization(F, L, VectorizeOnlyWhenForced)) {
8176 LLVM_DEBUG(dbgs() << "LV: Loop hints prevent vectorization.\n");
8177 return false;
8178 }
8179
8180 PredicatedScalarEvolution PSE(*SE, *L);
8181
8182 // Query this against the original loop and save it here because the profile
8183 // of the original loop header may change as the transformation happens.
8184 bool OptForSize = llvm::shouldOptimizeForSize(
8185 L->getHeader(), PSI,
8186 PSI && PSI->hasProfileSummary() ? &GetBFI() : nullptr,
8188
8189 // Check if it is legal to vectorize the loop.
8190 LoopVectorizationRequirements Requirements;
8191 LoopVectorizationLegality LVL(L, PSE, DT, TTI, TLI, F, *LAIs, LI, ORE,
8192 &Requirements, &Hints, DB, AC,
8193 /*AllowRuntimeSCEVChecks=*/!OptForSize, AA);
8195 LLVM_DEBUG(dbgs() << "LV: Not vectorizing: Cannot prove legality.\n");
8196 Hints.emitRemarkWithHints();
8197 return false;
8198 }
8199
8200 if (LVL.hasUncountableEarlyExit()) {
8202 reportVectorizationFailure("Auto-vectorization of loops with uncountable "
8203 "early exit is not enabled",
8204 "UncountableEarlyExitLoopsDisabled", ORE, L);
8205 return false;
8206 }
8207 }
8208
8209 // Entrance to the VPlan-native vectorization path. Outer loops are processed
8210 // here. They may require CFG and instruction level transformations before
8211 // even evaluating whether vectorization is profitable. Since we cannot modify
8212 // the incoming IR, we need to build VPlan upfront in the vectorization
8213 // pipeline.
8214 if (!L->isInnermost())
8215 return processLoopInVPlanNativePath(L, PSE, LI, DT, &LVL, TTI, TLI, DB, AC,
8216 ORE, GetBFI, OptForSize, Hints,
8217 Requirements);
8218
8219 assert(L->isInnermost() && "Inner loop expected.");
8220
8221 InterleavedAccessInfo IAI(PSE, L, DT, LI, LVL.getLAI());
8222 bool UseInterleaved = TTI->enableInterleavedAccessVectorization();
8223
8224 // If an override option has been passed in for interleaved accesses, use it.
8225 if (EnableInterleavedMemAccesses.getNumOccurrences() > 0)
8226 UseInterleaved = EnableInterleavedMemAccesses;
8227
8228 // Analyze interleaved memory accesses.
8229 if (UseInterleaved)
8231
8232 if (LVL.hasUncountableEarlyExit()) {
8233 BasicBlock *LoopLatch = L->getLoopLatch();
8234 if (IAI.requiresScalarEpilogue() ||
8235 any_of(LVL.getCountableExitingBlocks(), not_equal_to(LoopLatch))) {
8236 reportVectorizationFailure("Auto-vectorization of early exit loops "
8237 "requiring a scalar epilogue is unsupported",
8238 "UncountableEarlyExitUnsupported", ORE, L);
8239 return false;
8240 }
8241 }
8242
8243 // Check the function attributes and profiles to find out if this function
8244 // should be optimized for size.
8245 EpilogueLowering SEL =
8246 getEpilogueLowering(F, L, Hints, OptForSize, TTI, TLI, LVL, &IAI);
8247
8248 // Check the loop for a trip count threshold: vectorize loops with a tiny trip
8249 // count by optimizing for size, to minimize overheads.
8250 auto ExpectedTC = getSmallBestKnownTC(PSE, L);
8251 if (ExpectedTC && ExpectedTC->isFixed() &&
8252 ExpectedTC->getFixedValue() < TinyTripCountVectorThreshold) {
8253 LLVM_DEBUG(dbgs() << "LV: Found a loop with a very small trip count. "
8254 << "This loop is worth vectorizing only if no scalar "
8255 << "iteration overheads are incurred.");
8257 LLVM_DEBUG(dbgs() << " But vectorizing was explicitly forced.\n");
8258 else {
8259 LLVM_DEBUG(dbgs() << "\n");
8260 // Tail-folded loops are efficient even when the loop
8261 // iteration count is low. However, setting the epilogue policy to
8262 // `CM_EpilogueNotAllowedLowTripLoop` prevents vectorizing loops
8263 // with runtime checks. It's more effective to let
8264 // `isOutsideLoopWorkProfitable` determine if vectorization is
8265 // beneficial for the loop.
8268 }
8269 }
8270
8271 // Check the function attributes to see if implicit floats or vectors are
8272 // allowed.
8273 if (F->hasFnAttribute(Attribute::NoImplicitFloat)) {
8275 "Can't vectorize when the NoImplicitFloat attribute is used",
8276 "loop not vectorized due to NoImplicitFloat attribute",
8277 "NoImplicitFloat", ORE, L);
8278 Hints.emitRemarkWithHints();
8279 return false;
8280 }
8281
8282 // Check if the target supports potentially unsafe FP vectorization.
8283 // FIXME: Add a check for the type of safety issue (denormal, signaling)
8284 // for the target we're vectorizing for, to make sure none of the
8285 // additional fp-math flags can help.
8286 if (Hints.isPotentiallyUnsafe() &&
8287 TTI->isFPVectorizationPotentiallyUnsafe()) {
8289 "Potentially unsafe FP op prevents vectorization",
8290 "loop not vectorized due to unsafe FP support.",
8291 "UnsafeFP", ORE, L);
8292 Hints.emitRemarkWithHints();
8293 return false;
8294 }
8295
8296 bool AllowOrderedReductions;
8297 // If the flag is set, use that instead and override the TTI behaviour.
8298 if (ForceOrderedReductions.getNumOccurrences() > 0)
8299 AllowOrderedReductions = ForceOrderedReductions;
8300 else
8301 AllowOrderedReductions = TTI->enableOrderedReductions();
8302 if (!LVL.canVectorizeFPMath(AllowOrderedReductions)) {
8303 ORE->emit([&]() {
8304 auto *ExactFPMathInst = Requirements.getExactFPInst();
8305 return OptimizationRemarkAnalysisFPCommute(DEBUG_TYPE, "CantReorderFPOps",
8306 ExactFPMathInst->getDebugLoc(),
8307 ExactFPMathInst->getParent())
8308 << "loop not vectorized: cannot prove it is safe to reorder "
8309 "floating-point operations";
8310 });
8311 LLVM_DEBUG(dbgs() << "LV: loop not vectorized: cannot prove it is safe to "
8312 "reorder floating-point operations\n");
8313 Hints.emitRemarkWithHints();
8314 return false;
8315 }
8316
8317 // Use the cost model.
8318 VFSelectionContext Config(*TTI, &LVL, L, *F, PSE, DB, ORE, &Hints,
8319 OptForSize);
8320 LoopVectorizationCostModel CM(SEL, L, PSE, LI, &LVL, *TTI, TLI, AC, ORE,
8321 GetBFI, F, &Hints, IAI, Config);
8322 // Use the planner for vectorization.
8323 LoopVectorizationPlanner LVP(L, LI, DT, TLI, *TTI, &LVL, CM, Config, IAI, PSE,
8324 Hints, ORE);
8325
8326 // Get user vectorization factor and interleave count.
8327 ElementCount UserVF = Hints.getWidth();
8328 unsigned UserIC = Hints.getInterleave();
8329 if (UserIC > 1 && !LVL.isSafeForAnyVectorWidth())
8330 UserIC = 1;
8331
8332 // Plan how to best vectorize.
8333 LVP.plan(UserVF, UserIC);
8334 auto [VF, BestPlanPtr] = LVP.computeBestVF();
8335 unsigned IC = 1;
8336
8337 if (ORE->allowExtraAnalysis(LV_NAME))
8339
8340 GeneratedRTChecks Checks(PSE, DT, LI, TTI, Config.CostKind);
8341 if (LVP.hasPlanWithVF(VF.Width)) {
8342 // Select the interleave count.
8343 IC = LVP.selectInterleaveCount(*BestPlanPtr, VF.Width, VF.Cost);
8344
8345 unsigned SelectedIC = std::max(IC, UserIC);
8346 // Optimistically generate runtime checks if they are needed. Drop them if
8347 // they turn out to not be profitable.
8348 if (VF.Width.isVector() || SelectedIC > 1) {
8349 Checks.create(L, *LVL.getLAI(), PSE.getPredicate(), VF.Width, SelectedIC,
8350 *ORE);
8351
8352 // Bail out early if either the SCEV or memory runtime checks are known to
8353 // fail. In that case, the vector loop would never execute.
8354 using namespace llvm::PatternMatch;
8355 if (Checks.getSCEVChecks().first &&
8356 match(Checks.getSCEVChecks().first, m_One()))
8357 return false;
8358 if (Checks.getMemRuntimeChecks().first &&
8359 match(Checks.getMemRuntimeChecks().first, m_One()))
8360 return false;
8361 }
8362
8363 // Check if it is profitable to vectorize with runtime checks.
8364 bool ForceVectorization =
8366 VPCostContext CostCtx(CM.TTI, *CM.TLI, *BestPlanPtr, CM, Config.CostKind,
8367 CM.PSE, L);
8368 if (!ForceVectorization &&
8369 !isOutsideLoopWorkProfitable(Checks, VF, L, PSE, CostCtx, *BestPlanPtr,
8370 SEL, Config.getVScaleForTuning())) {
8371 ORE->emit([&]() {
8373 DEBUG_TYPE, "CantReorderMemOps", L->getStartLoc(),
8374 L->getHeader())
8375 << "loop not vectorized: cannot prove it is safe to reorder "
8376 "memory operations";
8377 });
8378 LLVM_DEBUG(dbgs() << "LV: Too many memory checks needed.\n");
8379 Hints.emitRemarkWithHints();
8380 return false;
8381 }
8382 }
8383
8384 // Identify the diagnostic messages that should be produced.
8385 std::pair<StringRef, std::string> VecDiagMsg, IntDiagMsg;
8386 bool VectorizeLoop = true, InterleaveLoop = true;
8387 if (VF.Width.isScalar()) {
8388 LLVM_DEBUG(dbgs() << "LV: Vectorization is possible but not beneficial.\n");
8389 VecDiagMsg = {
8390 "VectorizationNotBeneficial",
8391 "the cost-model indicates that vectorization is not beneficial"};
8392 VectorizeLoop = false;
8393 }
8394
8395 if (UserIC == 1 && Hints.getInterleave() > 1) {
8397 "UserIC should only be ignored due to unsafe dependencies");
8398 LLVM_DEBUG(dbgs() << "LV: Ignoring user-specified interleave count.\n");
8399 IntDiagMsg = {"InterleavingUnsafe",
8400 "Ignoring user-specified interleave count due to possibly "
8401 "unsafe dependencies in the loop."};
8402 InterleaveLoop = false;
8403 } else if (!LVP.hasPlanWithVF(VF.Width) && UserIC > 1) {
8404 // Tell the user interleaving was avoided up-front, despite being explicitly
8405 // requested.
8406 LLVM_DEBUG(dbgs() << "LV: Ignoring UserIC, because vectorization and "
8407 "interleaving should be avoided up front\n");
8408 IntDiagMsg = {"InterleavingAvoided",
8409 "Ignoring UserIC, because interleaving was avoided up front"};
8410 InterleaveLoop = false;
8411 } else if (IC == 1 && UserIC <= 1) {
8412 // Tell the user interleaving is not beneficial.
8413 LLVM_DEBUG(dbgs() << "LV: Interleaving is not beneficial.\n");
8414 IntDiagMsg = {
8415 "InterleavingNotBeneficial",
8416 "the cost-model indicates that interleaving is not beneficial"};
8417 InterleaveLoop = false;
8418 if (UserIC == 1) {
8419 IntDiagMsg.first = "InterleavingNotBeneficialAndDisabled";
8420 IntDiagMsg.second +=
8421 " and is explicitly disabled or interleave count is set to 1";
8422 }
8423 } else if (IC > 1 && UserIC == 1) {
8424 // Tell the user interleaving is beneficial, but it explicitly disabled.
8425 LLVM_DEBUG(dbgs() << "LV: Interleaving is beneficial but is explicitly "
8426 "disabled.\n");
8427 IntDiagMsg = {"InterleavingBeneficialButDisabled",
8428 "the cost-model indicates that interleaving is beneficial "
8429 "but is explicitly disabled or interleave count is set to 1"};
8430 InterleaveLoop = false;
8431 }
8432
8433 // If there is a histogram in the loop, do not just interleave without
8434 // vectorizing. The order of operations will be incorrect without the
8435 // histogram intrinsics, which are only used for recipes with VF > 1.
8436 if (!VectorizeLoop && InterleaveLoop && LVL.hasHistograms()) {
8437 LLVM_DEBUG(dbgs() << "LV: Not interleaving without vectorization due "
8438 << "to histogram operations.\n");
8439 IntDiagMsg = {
8440 "HistogramPreventsScalarInterleaving",
8441 "Unable to interleave without vectorization due to constraints on "
8442 "the order of histogram operations"};
8443 InterleaveLoop = false;
8444 }
8445
8446 // Override IC if user provided an interleave count.
8447 IC = UserIC > 0 ? UserIC : IC;
8448
8449 // Emit diagnostic messages, if any.
8450 const char *VAPassName = Hints.vectorizeAnalysisPassName();
8451 if (!VectorizeLoop && !InterleaveLoop) {
8452 // Do not vectorize or interleaving the loop.
8453 ORE->emit([&]() {
8454 return OptimizationRemarkMissed(VAPassName, VecDiagMsg.first,
8455 L->getStartLoc(), L->getHeader())
8456 << VecDiagMsg.second;
8457 });
8458 ORE->emit([&]() {
8459 return OptimizationRemarkMissed(LV_NAME, IntDiagMsg.first,
8460 L->getStartLoc(), L->getHeader())
8461 << IntDiagMsg.second;
8462 });
8463 return false;
8464 }
8465
8466 if (!VectorizeLoop && InterleaveLoop) {
8467 LLVM_DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n');
8468 ORE->emit([&]() {
8469 return OptimizationRemarkAnalysis(VAPassName, VecDiagMsg.first,
8470 L->getStartLoc(), L->getHeader())
8471 << VecDiagMsg.second;
8472 });
8473 } else if (VectorizeLoop && !InterleaveLoop) {
8474 LLVM_DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width
8475 << ") in " << L->getLocStr() << '\n');
8476 ORE->emit([&]() {
8477 return OptimizationRemarkAnalysis(LV_NAME, IntDiagMsg.first,
8478 L->getStartLoc(), L->getHeader())
8479 << IntDiagMsg.second;
8480 });
8481 } else if (VectorizeLoop && InterleaveLoop) {
8482 LLVM_DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width
8483 << ") in " << L->getLocStr() << '\n');
8484 LLVM_DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n');
8485 }
8486
8487 // Report the vectorization decision.
8488 if (VF.Width.isScalar()) {
8489 using namespace ore;
8490 assert(IC > 1);
8491 ORE->emit([&]() {
8492 return OptimizationRemark(LV_NAME, "Interleaved", L->getStartLoc(),
8493 L->getHeader())
8494 << "interleaved loop (interleaved count: "
8495 << NV("InterleaveCount", IC) << ")";
8496 });
8497 } else {
8498 // Report the vectorization decision.
8499 reportVectorization(ORE, L, VF, IC);
8500 }
8501 if (ORE->allowExtraAnalysis(LV_NAME))
8503
8504 // If we decided that it is *legal* to interleave or vectorize the loop, then
8505 // do it.
8506
8507 VPlan &BestPlan = *BestPlanPtr;
8508 // Consider vectorizing the epilogue too if it's profitable.
8509 std::unique_ptr<VPlan> EpiPlan =
8510 LVP.selectBestEpiloguePlan(BestPlan, VF.Width, IC);
8511 bool HasBranchWeights =
8512 hasBranchWeightMD(*L->getLoopLatch()->getTerminator());
8513 if (EpiPlan) {
8514 VPlan &BestEpiPlan = *EpiPlan;
8515 VPlan &BestMainPlan = BestPlan;
8516 ElementCount EpilogueVF = BestEpiPlan.getSingleVF();
8517
8518 // The first pass vectorizes the main loop and creates a scalar epilogue
8519 // to be vectorized by executing the plan (potentially with a different
8520 // factor) again shortly afterwards.
8521 BestEpiPlan.getMiddleBlock()->setName("vec.epilog.middle.block");
8522 BestEpiPlan.getVectorPreheader()->setName("vec.epilog.ph");
8523 SmallVector<VPInstruction *> ResumeValues =
8524 preparePlanForMainVectorLoop(BestMainPlan, BestEpiPlan);
8525 EpilogueLoopVectorizationInfo EPI(VF.Width, IC, EpilogueVF, 1, BestEpiPlan);
8526
8527 // Add minimum iteration check for the epilogue plan, followed by runtime
8528 // checks for the main plan.
8529 LVP.addMinimumIterationCheck(BestMainPlan, EPI.EpilogueVF, EPI.EpilogueUF,
8531 LVP.attachRuntimeChecks(BestMainPlan, Checks, HasBranchWeights);
8533 EPI.MainLoopVF, EPI.MainLoopUF,
8535 HasBranchWeights ? MinItersBypassWeights : nullptr,
8536 L->getLoopPredecessor()->getTerminator()->getDebugLoc(),
8537 PSE);
8538
8539 EpilogueVectorizerMainLoop MainILV(L, PSE, LI, DT, TTI, AC, EPI, &CM,
8540 Checks, BestMainPlan);
8541 auto ExpandedSCEVs = LVP.executePlan(
8542 EPI.MainLoopVF, EPI.MainLoopUF, BestMainPlan, MainILV, DT,
8544 ++LoopsVectorized;
8545
8546 // Derive EPI fields from VPlan-generated IR.
8547 BasicBlock *EntryBB =
8548 cast<VPIRBasicBlock>(BestMainPlan.getEntry())->getIRBasicBlock();
8549 EntryBB->setName("iter.check");
8550 EPI.EpilogueIterationCountCheck = EntryBB;
8551 // The check chain is: Entry -> [SCEV] -> [Mem] -> MainCheck -> VecPH.
8552 // MainCheck is the non-bypass successor of the last runtime check block
8553 // (or Entry if there are no runtime checks).
8554 BasicBlock *LastCheck = EntryBB;
8555 if (BasicBlock *MemBB = Checks.getMemRuntimeChecks().second)
8556 LastCheck = MemBB;
8557 else if (BasicBlock *SCEVBB = Checks.getSCEVChecks().second)
8558 LastCheck = SCEVBB;
8559 BasicBlock *ScalarPH = L->getLoopPreheader();
8560 auto *BI = cast<CondBrInst>(LastCheck->getTerminator());
8562 BI->getSuccessor(BI->getSuccessor(0) == ScalarPH);
8563
8564 // Second pass vectorizes the epilogue and adjusts the control flow
8565 // edges from the first pass.
8566 EpilogueVectorizerEpilogueLoop EpilogILV(L, PSE, LI, DT, TTI, AC, EPI, &CM,
8567 Checks, BestEpiPlan);
8569 BestEpiPlan, L, ExpandedSCEVs, EPI, CM, Config, *PSE.getSE());
8570 LVP.attachRuntimeChecks(BestEpiPlan, Checks, HasBranchWeights);
8571 LVP.executePlan(
8572 EPI.EpilogueVF, EPI.EpilogueUF, BestEpiPlan, EpilogILV, DT,
8574 connectEpilogueVectorLoop(BestEpiPlan, L, EPI, DT, Checks, InstsToMove,
8575 ResumeValues);
8576 ++LoopsEpilogueVectorized;
8577 } else {
8578 InnerLoopVectorizer LB(L, PSE, LI, DT, TTI, AC, VF.Width, IC, &CM, Checks,
8579 BestPlan);
8580 LVP.addMinimumIterationCheck(BestPlan, VF.Width, IC,
8581 VF.MinProfitableTripCount);
8582 LVP.attachRuntimeChecks(BestPlan, Checks, HasBranchWeights);
8583
8584 LVP.executePlan(VF.Width, IC, BestPlan, LB, DT);
8585 ++LoopsVectorized;
8586 }
8587
8588 assert(DT->verify(DominatorTree::VerificationLevel::Fast) &&
8589 "DT not preserved correctly");
8590 assert(!verifyFunction(*F, &dbgs()));
8591
8592 return true;
8593}
8594
8596
8597 // Don't attempt if
8598 // 1. the target claims to have no vector registers, and
8599 // 2. interleaving won't help ILP.
8600 //
8601 // The second condition is necessary because, even if the target has no
8602 // vector registers, loop vectorization may still enable scalar
8603 // interleaving.
8604 if (!TTI->getNumberOfRegisters(TTI->getRegisterClassForType(true)) &&
8605 TTI->getMaxInterleaveFactor(ElementCount::getFixed(1)) < 2)
8606 return LoopVectorizeResult(false, false);
8607
8608 bool Changed = false, CFGChanged = false;
8609
8610 // The vectorizer requires loops to be in simplified form.
8611 // Since simplification may add new inner loops, it has to run before the
8612 // legality and profitability checks. This means running the loop vectorizer
8613 // will simplify all loops, regardless of whether anything end up being
8614 // vectorized.
8615 for (const auto &L : *LI)
8616 Changed |= CFGChanged |=
8617 simplifyLoop(L, DT, LI, SE, AC, nullptr, false /* PreserveLCSSA */);
8618
8619 // Build up a worklist of inner-loops to vectorize. This is necessary as
8620 // the act of vectorizing or partially unrolling a loop creates new loops
8621 // and can invalidate iterators across the loops.
8622 SmallVector<Loop *, 8> Worklist;
8623
8624 for (Loop *L : *LI)
8625 collectSupportedLoops(*L, LI, ORE, Worklist);
8626
8627 LoopsAnalyzed += Worklist.size();
8628
8629 // Now walk the identified inner loops.
8630 while (!Worklist.empty()) {
8631 Loop *L = Worklist.pop_back_val();
8632
8633 // For the inner loops we actually process, form LCSSA to simplify the
8634 // transform.
8635 Changed |= formLCSSARecursively(*L, *DT, LI, SE);
8636
8637 Changed |= CFGChanged |= processLoop(L);
8638
8639 if (Changed) {
8640 LAIs->clear();
8641
8642#ifndef NDEBUG
8643 if (VerifySCEV)
8644 SE->verify();
8645#endif
8646 }
8647 }
8648
8649 // Process each loop nest in the function.
8650 return LoopVectorizeResult(Changed, CFGChanged);
8651}
8652
8655 LI = &AM.getResult<LoopAnalysis>(F);
8656 // There are no loops in the function. Return before computing other
8657 // expensive analyses.
8658 if (LI->empty())
8659 return PreservedAnalyses::all();
8668 AA = &AM.getResult<AAManager>(F);
8669
8670 auto &MAMProxy = AM.getResult<ModuleAnalysisManagerFunctionProxy>(F);
8671 PSI = MAMProxy.getCachedResult<ProfileSummaryAnalysis>(*F.getParent());
8672 GetBFI = [&AM, &F]() -> BlockFrequencyInfo & {
8674 };
8675 LoopVectorizeResult Result = runImpl(F);
8676 if (!Result.MadeAnyChange)
8677 return PreservedAnalyses::all();
8679
8680 if (isAssignmentTrackingEnabled(*F.getParent())) {
8681 for (auto &BB : F)
8683 }
8684
8685 PA.preserve<LoopAnalysis>();
8689
8690 if (Result.MadeCFGChange) {
8691 // Making CFG changes likely means a loop got vectorized. Indicate that
8692 // extra simplification passes should be run.
8693 // TODO: MadeCFGChanges is not a prefect proxy. Extra passes should only
8694 // be run if runtime checks have been added.
8697 } else {
8699 }
8700 return PA;
8701}
8702
8704 raw_ostream &OS, function_ref<StringRef(StringRef)> MapClassName2PassName) {
8705 static_cast<PassInfoMixin<LoopVectorizePass> *>(this)->printPipeline(
8706 OS, MapClassName2PassName);
8707
8708 OS << '<';
8709 OS << (InterleaveOnlyWhenForced ? "" : "no-") << "interleave-forced-only;";
8710 OS << (VectorizeOnlyWhenForced ? "" : "no-") << "vectorize-forced-only;";
8711 OS << '>';
8712}
for(const MachineOperand &MO :llvm::drop_begin(OldMI.operands(), Desc.getNumOperands()))
static unsigned getIntrinsicID(const SDNode *N)
unsigned RegSize
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
AMDGPU Lower Kernel Arguments
This file implements a class to represent arbitrary precision integral constant values and operations...
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static bool isEqual(const Function &Caller, const Function &Callee)
This file contains the simple types necessary to represent the attributes associated with functions a...
static const Function * getParent(const Value *V)
This is the interface for LLVM's primary stateless and local alias analysis.
static bool IsEmptyBlock(MachineBasicBlock *MBB)
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
#define clEnumValN(ENUMVAL, FLAGNAME, DESC)
This file contains the declarations for the subclasses of Constant, which represent the different fla...
static cl::opt< OutputCostKind > CostKind("cost-kind", cl::desc("Target cost kind"), cl::init(OutputCostKind::RecipThroughput), cl::values(clEnumValN(OutputCostKind::RecipThroughput, "throughput", "Reciprocal throughput"), clEnumValN(OutputCostKind::Latency, "latency", "Instruction latency"), clEnumValN(OutputCostKind::CodeSize, "code-size", "Code size"), clEnumValN(OutputCostKind::SizeAndLatency, "size-latency", "Code size and latency"), clEnumValN(OutputCostKind::All, "all", "Print all cost kinds")))
static cl::opt< IntrinsicCostStrategy > IntrinsicCost("intrinsic-cost-strategy", cl::desc("Costing strategy for intrinsic instructions"), cl::init(IntrinsicCostStrategy::InstructionCost), cl::values(clEnumValN(IntrinsicCostStrategy::InstructionCost, "instruction-cost", "Use TargetTransformInfo::getInstructionCost"), clEnumValN(IntrinsicCostStrategy::IntrinsicCost, "intrinsic-cost", "Use TargetTransformInfo::getIntrinsicInstrCost"), clEnumValN(IntrinsicCostStrategy::TypeBasedIntrinsicCost, "type-based-intrinsic-cost", "Calculate the intrinsic cost based only on argument types")))
static InstructionCost getCost(Instruction &Inst, TTI::TargetCostKind CostKind, TargetTransformInfo &TTI)
Definition CostModel.cpp:73
This file defines DenseMapInfo traits for DenseMap.
This file defines the DenseMap class.
#define DEBUG_TYPE
This is the interface for a simple mod/ref and alias analysis over globals.
Hexagon Common GEP
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
Module.h This file contains the declarations for the Module class.
This defines the Use class.
static bool hasNoUnsignedWrap(BinaryOperator &I)
This file defines an InstructionCost class that is used when calculating the cost of an instruction,...
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
static cl::opt< unsigned, true > VectorizationFactor("force-vector-width", cl::Hidden, cl::desc("Sets the SIMD width. Zero is autoselect."), cl::location(VectorizerParams::VectorizationFactor))
This header provides classes for managing per-loop analyses.
static const char * VerboseDebug
#define LV_NAME
This file defines the LoopVectorizationLegality class.
static cl::opt< bool > ConsiderRegPressure("vectorizer-consider-reg-pressure", cl::init(false), cl::Hidden, cl::desc("Discard VFs if their register pressure is too high."))
This file provides a LoopVectorizationPlanner class.
static void collectSupportedLoops(Loop &L, LoopInfo *LI, OptimizationRemarkEmitter *ORE, SmallVectorImpl< Loop * > &V)
static cl::opt< unsigned > EpilogueVectorizationMinVF("epilogue-vectorization-minimum-VF", cl::Hidden, cl::desc("Only loops with vectorization factor equal to or larger than " "the specified value are considered for epilogue vectorization."))
static cl::opt< unsigned > EpilogueVectorizationForceVF("epilogue-vectorization-force-VF", cl::init(1), cl::Hidden, cl::desc("When epilogue vectorization is enabled, and a value greater than " "1 is specified, forces the given VF for all applicable epilogue " "loops."))
static unsigned getMaxTCFromNonZeroRange(PredicatedScalarEvolution &PSE, Loop *L)
Get the maximum trip count for L from the SCEV unsigned range, excluding zero from the range.
static Type * maybeVectorizeType(Type *Ty, ElementCount VF)
static ElementCount getSmallConstantTripCount(ScalarEvolution *SE, const Loop *L)
A version of ScalarEvolution::getSmallConstantTripCount that returns an ElementCount to include loops...
static bool hasUnsupportedHeaderPhiRecipe(VPlan &Plan)
Returns true if the VPlan contains header phi recipes that are not currently supported for epilogue v...
static cl::opt< unsigned > VectorizeMemoryCheckThreshold("vectorize-memory-check-threshold", cl::init(128), cl::Hidden, cl::desc("The maximum allowed number of runtime memory checks"))
static void connectEpilogueVectorLoop(VPlan &EpiPlan, Loop *L, EpilogueLoopVectorizationInfo &EPI, DominatorTree *DT, GeneratedRTChecks &Checks, ArrayRef< Instruction * > InstsToMove, ArrayRef< VPInstruction * > ResumeValues)
Connect the epilogue vector loop generated for EpiPlan to the main vector loop, after both plans have...
static cl::opt< unsigned > TinyTripCountVectorThreshold("vectorizer-min-trip-count", cl::init(16), cl::Hidden, cl::desc("Loops with a constant trip count that is smaller than this " "value are vectorized only if no scalar iteration overheads " "are incurred."))
Loops with a known constant trip count below this number are vectorized only if no scalar iteration o...
static void debugVectorizationMessage(const StringRef Prefix, const StringRef DebugMsg, Instruction *I)
Write a DebugMsg about vectorization to the debug output stream.
static void legacyCSE(BasicBlock *BB)
FIXME: This legacy common-subexpression-elimination routine is scheduled for removal,...
static VPIRBasicBlock * replaceVPBBWithIRVPBB(VPBasicBlock *VPBB, BasicBlock *IRBB, VPlan *Plan=nullptr)
Replace VPBB with a VPIRBasicBlock wrapping IRBB.
static DebugLoc getDebugLocFromInstOrOperands(Instruction *I)
Look for a meaningful debug location on the instruction or its operands.
TailFoldingPolicyTy
Option tail-folding-policy indicates that an epilogue is undesired, that tail folding is preferred,...
static bool useActiveLaneMaskForControlFlow(TailFoldingStyle Style)
static cl::opt< bool > EnableEarlyExitVectorization("enable-early-exit-vectorization", cl::init(true), cl::Hidden, cl::desc("Enable vectorization of early exit loops with uncountable exits."))
static bool processLoopInVPlanNativePath(Loop *L, PredicatedScalarEvolution &PSE, LoopInfo *LI, DominatorTree *DT, LoopVectorizationLegality *LVL, TargetTransformInfo *TTI, TargetLibraryInfo *TLI, DemandedBits *DB, AssumptionCache *AC, OptimizationRemarkEmitter *ORE, std::function< BlockFrequencyInfo &()> GetBFI, bool OptForSize, LoopVectorizeHints &Hints, LoopVectorizationRequirements &Requirements)
static unsigned estimateElementCount(ElementCount VF, std::optional< unsigned > VScale)
This function attempts to return a value that represents the ElementCount at runtime.
static constexpr uint32_t MinItersBypassWeights[]
static cl::opt< unsigned > ForceTargetNumScalarRegs("force-target-num-scalar-regs", cl::init(0), cl::Hidden, cl::desc("A flag that overrides the target's number of scalar registers."))
static SmallVector< VPInstruction * > preparePlanForMainVectorLoop(VPlan &MainPlan, VPlan &EpiPlan)
Prepare MainPlan for vectorizing the main vector loop during epilogue vectorization.
static cl::opt< unsigned > SmallLoopCost("small-loop-cost", cl::init(20), cl::Hidden, cl::desc("The cost of a loop that is considered 'small' by the interleaver."))
static OptimizationRemarkAnalysis createLVAnalysis(const char *PassName, StringRef RemarkName, const Loop *TheLoop, Instruction *I, DebugLoc DL={})
Create an analysis remark that explains why vectorization failed.
static cl::opt< unsigned > ForceTargetNumVectorRegs("force-target-num-vector-regs", cl::init(0), cl::Hidden, cl::desc("A flag that overrides the target's number of vector registers."))
static bool isExplicitVecOuterLoop(Loop *OuterLp, OptimizationRemarkEmitter *ORE)
static cl::opt< bool > EnableIndVarRegisterHeur("enable-ind-var-reg-heur", cl::init(true), cl::Hidden, cl::desc("Count the induction variable only once when interleaving"))
static cl::opt< TailFoldingStyle > ForceTailFoldingStyle("force-tail-folding-style", cl::desc("Force the tail folding style"), cl::init(TailFoldingStyle::None), cl::values(clEnumValN(TailFoldingStyle::None, "none", "Disable tail folding"), clEnumValN(TailFoldingStyle::Data, "data", "Create lane mask for data only, using active.lane.mask intrinsic"), clEnumValN(TailFoldingStyle::DataWithoutLaneMask, "data-without-lane-mask", "Create lane mask with compare/stepvector"), clEnumValN(TailFoldingStyle::DataAndControlFlow, "data-and-control", "Create lane mask using active.lane.mask intrinsic, and use " "it for both data and control flow"), clEnumValN(TailFoldingStyle::DataWithEVL, "data-with-evl", "Use predicated EVL instructions for tail folding. If EVL " "is unsupported, fallback to data-without-lane-mask.")))
static void printOptimizedVPlan(VPlan &)
static SmallVector< Instruction * > preparePlanForEpilogueVectorLoop(VPlan &Plan, Loop *L, const SCEV2ValueTy &ExpandedSCEVs, EpilogueLoopVectorizationInfo &EPI, LoopVectorizationCostModel &CM, VFSelectionContext &Config, ScalarEvolution &SE)
Prepare Plan for vectorizing the epilogue loop.
static cl::opt< bool > EnableEpilogueVectorization("enable-epilogue-vectorization", cl::init(true), cl::Hidden, cl::desc("Enable vectorization of epilogue loops."))
static cl::opt< bool > PreferPredicatedReductionSelect("prefer-predicated-reduction-select", cl::init(false), cl::Hidden, cl::desc("Prefer predicating a reduction operation over an after loop select."))
static std::optional< ElementCount > getSmallBestKnownTC(PredicatedScalarEvolution &PSE, Loop *L, bool CanUseConstantMax=true, bool CanExcludeZeroTrips=false)
Returns "best known" trip count, which is either a valid positive trip count or std::nullopt when an ...
static const SCEV * getAddressAccessSCEV(Value *Ptr, PredicatedScalarEvolution &PSE, const Loop *TheLoop)
Gets the address access SCEV for Ptr, if it should be used for cost modeling according to isAddressSC...
static cl::opt< bool > EnableLoadStoreRuntimeInterleave("enable-loadstore-runtime-interleave", cl::init(true), cl::Hidden, cl::desc("Enable runtime interleaving until load/store ports are saturated"))
static cl::opt< bool > VPlanBuildStressTest("vplan-build-stress-test", cl::init(false), cl::Hidden, cl::desc("Build VPlan for every supported loop nest in the function and bail " "out right after the build (stress test the VPlan H-CFG construction " "in the VPlan-native vectorization path)."))
static bool hasIrregularType(Type *Ty, const DataLayout &DL)
A helper function that returns true if the given type is irregular.
static cl::opt< bool > LoopVectorizeWithBlockFrequency("loop-vectorize-with-block-frequency", cl::init(true), cl::Hidden, cl::desc("Enable the use of the block frequency analysis to access PGO " "heuristics minimizing code growth in cold regions and being more " "aggressive in hot regions."))
static ElementCount determineVPlanVF(const TargetTransformInfo &TTI, VFSelectionContext &Config)
static bool useActiveLaneMask(TailFoldingStyle Style)
static bool hasReplicatorRegion(VPlan &Plan)
static bool isIndvarOverflowCheckKnownFalse(const LoopVectorizationCostModel *Cost, ElementCount VF, std::optional< unsigned > UF=std::nullopt)
For the given VF and UF and maximum trip count computed for the loop, return whether the induction va...
static void addFullyUnrolledInstructionsToIgnore(Loop *L, const LoopVectorizationLegality::InductionList &IL, SmallPtrSetImpl< Instruction * > &InstsToIgnore)
Knowing that loop L executes a single vector iteration, add instructions that will get simplified and...
static bool hasFindLastReductionPhi(VPlan &Plan)
Returns true if the VPlan contains a VPReductionPHIRecipe with FindLast recurrence kind.
static cl::opt< bool > EnableInterleavedMemAccesses("enable-interleaved-mem-accesses", cl::init(false), cl::Hidden, cl::desc("Enable vectorization on interleaved memory accesses in a loop"))
static cl::opt< bool > EnableMaskedInterleavedMemAccesses("enable-masked-interleaved-mem-accesses", cl::init(false), cl::Hidden, cl::desc("Enable vectorization on masked interleaved memory accesses in a loop"))
An interleave-group may need masking if it resides in a block that needs predication,...
static cl::opt< bool > ForceOrderedReductions("force-ordered-reductions", cl::init(false), cl::Hidden, cl::desc("Enable the vectorisation of loops with in-order (strict) " "FP reductions"))
static cl::opt< TailFoldingPolicyTy > TailFoldingPolicy("tail-folding-policy", cl::init(TailFoldingPolicyTy::None), cl::Hidden, cl::desc("Tail-folding preferences over creating an epilogue loop."), cl::values(clEnumValN(TailFoldingPolicyTy::None, "dont-fold-tail", "Don't tail-fold loops."), clEnumValN(TailFoldingPolicyTy::PreferFoldTail, "prefer-fold-tail", "prefer tail-folding, otherwise create an epilogue when " "appropriate."), clEnumValN(TailFoldingPolicyTy::MustFoldTail, "must-fold-tail", "always tail-fold, don't attempt vectorization if " "tail-folding fails.")))
static bool isOutsideLoopWorkProfitable(GeneratedRTChecks &Checks, VectorizationFactor &VF, Loop *L, PredicatedScalarEvolution &PSE, VPCostContext &CostCtx, VPlan &Plan, EpilogueLowering SEL, std::optional< unsigned > VScale)
This function determines whether or not it's still profitable to vectorize the loop given the extra w...
static cl::opt< cl::boolOrDefault > ForceSafeDivisor("force-widen-divrem-via-safe-divisor", cl::Hidden, cl::desc("Override cost based safe divisor widening for div/rem instructions"))
static InstructionCost calculateEarlyExitCost(VPCostContext &CostCtx, VPlan &Plan, ElementCount VF)
For loops with uncountable early exits, find the cost of doing work when exiting the loop early,...
static cl::opt< unsigned > ForceTargetMaxVectorInterleaveFactor("force-target-max-vector-interleave", cl::init(0), cl::Hidden, cl::desc("A flag that overrides the target's max interleave factor for " "vectorized loops."))
static bool useMaskedInterleavedAccesses(const TargetTransformInfo &TTI)
cl::opt< unsigned > NumberOfStoresToPredicate("vectorize-num-stores-pred", cl::init(1), cl::Hidden, cl::desc("Max number of stores to be predicated behind an if."))
The number of stores in a loop that are allowed to need predication.
static EpilogueLowering getEpilogueLowering(Function *F, Loop *L, LoopVectorizeHints &Hints, bool OptForSize, TargetTransformInfo *TTI, TargetLibraryInfo *TLI, LoopVectorizationLegality &LVL, InterleavedAccessInfo *IAI)
static void fixScalarResumeValuesFromBypass(BasicBlock *BypassBlock, Loop *L, VPlan &BestEpiPlan, ArrayRef< VPInstruction * > ResumeValues)
static cl::opt< unsigned > MaxNestedScalarReductionIC("max-nested-scalar-reduction-interleave", cl::init(2), cl::Hidden, cl::desc("The maximum interleave count to use when interleaving a scalar " "reduction in a nested loop."))
static cl::opt< unsigned > ForceTargetMaxScalarInterleaveFactor("force-target-max-scalar-interleave", cl::init(0), cl::Hidden, cl::desc("A flag that overrides the target's max interleave factor for " "scalar loops."))
static void checkMixedPrecision(Loop *L, OptimizationRemarkEmitter *ORE)
static bool willGenerateVectors(VPlan &Plan, ElementCount VF, const TargetTransformInfo &TTI)
Check if any recipe of Plan will generate a vector value, which will be assigned a vector register.
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
This file implements a map that provides insertion order iteration.
This file contains the declarations for metadata subclasses.
ConstantRange Range(APInt(BitWidth, Low), APInt(BitWidth, High))
uint64_t IntrinsicInst * II
#define P(N)
This file contains the declarations for profiling metadata utility functions.
const SmallVectorImpl< MachineOperand > & Cond
static bool isValid(const char C)
Returns true if C is a valid mangled character: <0-9a-zA-Z_>.
static InstructionCost getScalarizationOverhead(const TargetTransformInfo &TTI, Type *ScalarTy, VectorType *Ty, const APInt &DemandedElts, bool Insert, bool Extract, TTI::TargetCostKind CostKind, bool ForPoisonSrc=true, ArrayRef< Value * > VL={}, TTI::VectorInstrContext VIC=TTI::VectorInstrContext::None)
This is similar to TargetTransformInfo::getScalarizationOverhead, but if ScalarTy is a FixedVectorTyp...
This file contains some templates that are useful if you are working with the STL at all.
#define OP(OPC)
Definition Instruction.h:46
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Definition Statistic.h:171
#define LLVM_DEBUG(...)
Definition Debug.h:114
#define DEBUG_WITH_TYPE(TYPE,...)
DEBUG_WITH_TYPE macro - This macro should be used by passes to emit debug information.
Definition Debug.h:72
This pass exposes codegen information to IR-level passes.
LocallyHashedType DenseMapInfo< LocallyHashedType >::Empty
This file implements the TypeSwitch template, which mimics a switch() statement whose cases are type ...
This file contains the declarations of different VPlan-related auxiliary helpers.
This file provides utility VPlan to VPlan transformations.
#define RUN_VPLAN_PASS(PASS,...)
#define RUN_VPLAN_PASS_NO_VERIFY(PASS,...)
This file declares the class VPlanVerifier, which contains utility functions to check the consistency...
This file contains the declarations of the Vectorization Plan base classes:
static const char PassName[]
Value * RHS
Value * LHS
static const uint32_t IV[8]
Definition blake3_impl.h:83
A manager for alias analyses.
Class for arbitrary precision integers.
Definition APInt.h:78
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
Definition APInt.h:235
uint64_t getZExtValue() const
Get zero extended value.
Definition APInt.h:1563
unsigned getActiveBits() const
Compute the number of active bits in the value.
Definition APInt.h:1535
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
Definition APInt.h:381
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:40
size_t size() const
size - Get the array size.
Definition ArrayRef.h:142
A function analysis which provides an AssumptionCache.
A cache of @llvm.assume calls within a function.
LLVM Basic Block Representation.
Definition BasicBlock.h:62
iterator_range< const_phi_iterator > phis() const
Returns a range that iterates over the phis in the basic block.
Definition BasicBlock.h:530
const Function * getParent() const
Return the enclosing method, or null if none.
Definition BasicBlock.h:213
LLVM_ABI InstListType::const_iterator getFirstNonPHIIt() const
Returns an iterator to the first instruction in this block that is not a PHINode instruction.
LLVM_ABI const BasicBlock * getSinglePredecessor() const
Return the predecessor of this block if it has a single predecessor block.
LLVM_ABI const BasicBlock * getSingleSuccessor() const
Return the successor of this block if it has a single successor.
LLVM_ABI LLVMContext & getContext() const
Get the context in which this basic block lives.
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction; assumes that the block is well-formed.
Definition BasicBlock.h:237
Analysis pass which computes BlockFrequencyInfo.
BlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate IR basic block frequen...
Represents analyses that only rely on functions' control flow.
Definition Analysis.h:73
bool isNoBuiltin() const
Return true if the call should not be treated as a call to a builtin.
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
Value * getArgOperand(unsigned i) const
iterator_range< User::op_iterator > args()
Iteration adapter for range-for loops.
unsigned arg_size() const
This class represents a function call, abstracting a target machine's calling convention.
static Type * makeCmpResultType(Type *opnd_type)
Create a result type for fcmp/icmp.
Definition InstrTypes.h:986
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition InstrTypes.h:676
@ ICMP_UGT
unsigned greater than
Definition InstrTypes.h:699
@ ICMP_ULT
unsigned less than
Definition InstrTypes.h:701
Conditional Branch instruction.
BasicBlock * getSuccessor(unsigned i) const
This is the shared class of boolean and integer constants.
Definition Constants.h:87
static LLVM_ABI ConstantInt * getTrue(LLVMContext &Context)
This class represents a range of values.
LLVM_ABI APInt getUnsignedMax() const
Return the largest unsigned value contained in the ConstantRange.
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:64
A debug info location.
Definition DebugLoc.h:123
static DebugLoc getTemporary()
Definition DebugLoc.h:160
static DebugLoc getUnknown()
Definition DebugLoc.h:161
An analysis that produces DemandedBits for a function.
ValueT lookup(const_arg_type_t< KeyT > Val) const
lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...
Definition DenseMap.h:205
iterator find(const_arg_type_t< KeyT > Val)
Definition DenseMap.h:178
std::pair< iterator, bool > try_emplace(KeyT &&Key, Ts &&...Args)
Definition DenseMap.h:256
iterator end()
Definition DenseMap.h:81
bool contains(const_arg_type_t< KeyT > Val) const
Return true if the specified key is in the map, false otherwise.
Definition DenseMap.h:169
void insert_range(Range &&R)
Inserts range of 'std::pair<KeyT, ValueT>' values into the map.
Definition DenseMap.h:294
Implements a dense probed hash-table based set.
Definition DenseSet.h:279
Analysis pass which computes a DominatorTree.
Definition Dominators.h:278
void changeImmediateDominator(DomTreeNodeBase< NodeT > *N, DomTreeNodeBase< NodeT > *NewIDom)
changeImmediateDominator - This method is used to update the dominator tree information when a node's...
void eraseNode(NodeT *BB)
eraseNode - Removes a node from the dominator tree.
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition Dominators.h:159
constexpr bool isVector() const
One or more elements.
Definition TypeSize.h:324
static constexpr ElementCount getScalable(ScalarTy MinVal)
Definition TypeSize.h:312
static constexpr ElementCount getFixed(ScalarTy MinVal)
Definition TypeSize.h:309
static constexpr ElementCount get(ScalarTy MinVal, bool Scalable)
Definition TypeSize.h:315
constexpr bool isScalar() const
Exactly one element.
Definition TypeSize.h:320
EpilogueVectorizerEpilogueLoop(Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI, DominatorTree *DT, const TargetTransformInfo *TTI, AssumptionCache *AC, EpilogueLoopVectorizationInfo &EPI, LoopVectorizationCostModel *CM, GeneratedRTChecks &Checks, VPlan &Plan)
BasicBlock * createVectorizedLoopSkeleton() final
Implements the interface for creating a vectorized skeleton using the epilogue loop strategy (i....
void printDebugTracesAtStart() override
Allow subclasses to override and print debug traces before/after vplan execution, when trace informat...
A specialized derived class of inner loop vectorizer that performs vectorization of main loops in the...
void printDebugTracesAtStart() override
Allow subclasses to override and print debug traces before/after vplan execution, when trace informat...
EpilogueVectorizerMainLoop(Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI, DominatorTree *DT, const TargetTransformInfo *TTI, AssumptionCache *AC, EpilogueLoopVectorizationInfo &EPI, LoopVectorizationCostModel *CM, GeneratedRTChecks &Check, VPlan &Plan)
Convenience struct for specifying and reasoning about fast-math flags.
Definition FMF.h:23
Class to represent function types.
param_iterator param_begin() const
param_iterator param_end() const
FunctionType * getFunctionType() const
Returns the FunctionType for me.
Definition Function.h:211
Represents flags for the getelementptr instruction/expression.
static GEPNoWrapFlags none()
void applyUpdates(ArrayRef< UpdateT > Updates)
Submit updates to all available trees.
Common base class shared among various IRBuilders.
Definition IRBuilder.h:114
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition IRBuilder.h:2847
A struct for saving information about induction variables.
const SCEV * getStep() const
ArrayRef< Instruction * > getCastInsts() const
Returns an ArrayRef to the type cast instructions in the induction update chain, that are redundant w...
@ IK_PtrInduction
Pointer induction var. Step = C.
InnerLoopAndEpilogueVectorizer(Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI, DominatorTree *DT, const TargetTransformInfo *TTI, AssumptionCache *AC, EpilogueLoopVectorizationInfo &EPI, LoopVectorizationCostModel *CM, GeneratedRTChecks &Checks, VPlan &Plan, ElementCount VecWidth, ElementCount MinProfitableTripCount, unsigned UnrollFactor)
EpilogueLoopVectorizationInfo & EPI
Holds and updates state information required to vectorize the main loop and its epilogue in two separ...
InnerLoopVectorizer vectorizes loops which contain only one basic block to a specified vectorization ...
virtual void printDebugTracesAtStart()
Allow subclasses to override and print debug traces before/after vplan execution, when trace informat...
const TargetTransformInfo * TTI
Target Transform Info.
LoopVectorizationCostModel * Cost
The profitablity analysis.
friend class LoopVectorizationPlanner
InnerLoopVectorizer(Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI, DominatorTree *DT, const TargetTransformInfo *TTI, AssumptionCache *AC, ElementCount VecWidth, unsigned UnrollFactor, LoopVectorizationCostModel *CM, GeneratedRTChecks &RTChecks, VPlan &Plan)
PredicatedScalarEvolution & PSE
A wrapper around ScalarEvolution used to add runtime SCEV checks.
LoopInfo * LI
Loop Info.
DominatorTree * DT
Dominator Tree.
void fixVectorizedLoop(VPTransformState &State)
Fix the vectorized code, taking care of header phi's, and more.
virtual BasicBlock * createVectorizedLoopSkeleton()
Creates a basic block for the scalar preheader.
virtual void printDebugTracesAtEnd()
AssumptionCache * AC
Assumption Cache.
IRBuilder Builder
The builder that we use.
void fixNonInductionPHIs(VPTransformState &State)
Fix the non-induction PHIs in Plan.
VPBasicBlock * VectorPHVPBB
The vector preheader block of Plan, used as target for check blocks introduced during skeleton creati...
unsigned UF
The vectorization unroll factor to use.
GeneratedRTChecks & RTChecks
Structure to hold information about generated runtime checks, responsible for cleaning the checks,...
virtual ~InnerLoopVectorizer()=default
ElementCount VF
The vectorization SIMD factor to use.
Loop * OrigLoop
The original loop.
BasicBlock * createScalarPreheader(StringRef Prefix)
Create and return a new IR basic block for the scalar preheader whose name is prefixed with Prefix.
static InstructionCost getInvalid(CostType Val=0)
static InstructionCost getMax()
CostType getValue() const
This function is intended to be used as sparingly as possible, since the class provides the full rang...
bool isCast() const
LLVM_ABI const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
LLVM_ABI void moveBefore(InstListType::iterator InsertPos)
Unlink this instruction from its current basic block and insert it into the basic block that MovePos ...
LLVM_ABI InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
Instruction * user_back()
Specialize the methods defined in Value, as we know that an instruction can only be used by other ins...
const char * getOpcodeName() const
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
Class to represent integer types.
static LLVM_ABI IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
Definition Type.cpp:354
LLVM_ABI APInt getMask() const
For example, this is 0xFF for an 8 bit integer, 0xFFFF for i16, etc.
Definition Type.cpp:378
The group of interleaved loads/stores sharing the same stride and close to each other.
auto members() const
Return an iterator range over the non-null members of this group, in index order.
InstTy * getInsertPos() const
uint32_t getNumMembers() const
Drive the analysis of interleaved memory accesses in the loop.
bool requiresScalarEpilogue() const
Returns true if an interleaved group that may access memory out-of-bounds requires a scalar epilogue ...
LLVM_ABI void analyzeInterleaving(bool EnableMaskedInterleavedGroup)
Analyze the interleaved accesses and collect them in interleave groups.
An instruction for reading from memory.
Type * getPointerOperandType() const
This analysis provides dependence information for the memory accesses of a loop.
const RuntimePointerChecking * getRuntimePointerChecking() const
unsigned getNumRuntimePointerChecks() const
Number of memchecks required to prove independence of otherwise may-alias pointers.
Analysis pass that exposes the LoopInfo for a function.
Definition LoopInfo.h:589
bool contains(const LoopT *L) const
Return true if the specified loop is contained within in this loop.
BlockT * getLoopLatch() const
If there is a single latch block for this loop, return it.
bool isInnermost() const
Return true if the loop does not contain any (natural) loops.
void getExitingBlocks(SmallVectorImpl< BlockT * > &ExitingBlocks) const
Return all blocks inside the loop that have successors outside of the loop.
BlockT * getHeader() const
iterator_range< block_iterator > blocks() const
Store the result of a depth first search within basic blocks contained by a single loop.
RPOIterator beginRPO() const
Reverse iterate over the cached postorder blocks.
void perform(const LoopInfo *LI)
Traverse the loop blocks and store the DFS result.
RPOIterator endRPO() const
Wrapper class to LoopBlocksDFS that provides a standard begin()/end() interface for the DFS reverse p...
void perform(const LoopInfo *LI)
Traverse the loop blocks and store the DFS result.
void removeBlock(BlockT *BB)
This method completely removes BB from all data structures, including all of the Loop objects it is n...
LoopVectorizationCostModel - estimates the expected speedups due to vectorization.
bool isEpilogueVectorizationProfitable(const ElementCount VF, const unsigned IC) const
Returns true if epilogue vectorization is considered profitable, and false otherwise.
bool useWideActiveLaneMask() const
Returns true if the use of wide lane masks is requested and the loop is using tail-folding with a lan...
bool isPredicatedInst(Instruction *I) const
Returns true if I is an instruction that needs to be predicated at runtime.
void collectValuesToIgnore()
Collect values we want to ignore in the cost model.
BlockFrequencyInfo * BFI
The BlockFrequencyInfo returned from GetBFI.
BlockFrequencyInfo & getBFI()
Returns the BlockFrequencyInfo for the function if cached, otherwise fetches it via GetBFI.
bool isUniformAfterVectorization(Instruction *I, ElementCount VF) const
Returns true if I is known to be uniform after vectorization.
bool preferTailFoldedLoop() const
Returns true if tail-folding is preferred over an epilogue.
bool useEmulatedMaskMemRefHack(Instruction *I, ElementCount VF)
Returns true if an artificially high cost for emulated masked memrefs should be used.
void collectNonVectorizedAndSetWideningDecisions(ElementCount VF)
Collect values that will not be widened, including Uniforms, Scalars, and Instructions to Scalarize f...
bool isMaskRequired(Instruction *I) const
Wrapper function for LoopVectorizationLegality::isMaskRequired, that passes the Instruction I and if ...
PredicatedScalarEvolution & PSE
Predicated scalar evolution analysis.
const LoopVectorizeHints * Hints
Loop Vectorize Hint.
const TargetTransformInfo & TTI
Vector target information.
LoopVectorizationLegality * Legal
Vectorization legality.
uint64_t getPredBlockCostDivisor(TargetTransformInfo::TargetCostKind CostKind, const BasicBlock *BB)
A helper function that returns how much we should divide the cost of a predicated block by.
std::optional< InstructionCost > getReductionPatternCost(Instruction *I, ElementCount VF, Type *VectorTy) const
Return the cost of instructions in an inloop reduction pattern, if I is part of that pattern.
InstructionCost getInstructionCost(Instruction *I, ElementCount VF)
Returns the execution time cost of an instruction for a given vector width.
bool interleavedAccessCanBeWidened(Instruction *I, ElementCount VF) const
Returns true if I is a memory instruction in an interleaved-group of memory accesses that can be vect...
const TargetLibraryInfo * TLI
Target Library Info.
bool memoryInstructionCanBeWidened(Instruction *I, ElementCount VF)
Returns true if I is a memory instruction with consecutive memory access that can be widened.
const InterleaveGroup< Instruction > * getInterleavedAccessGroup(Instruction *Instr) const
Get the interleaved access group that Instr belongs to.
InstructionCost getVectorIntrinsicCost(CallInst *CI, ElementCount VF) const
Estimate cost of an intrinsic call instruction CI if it were vectorized with factor VF.
bool isScalarAfterVectorization(Instruction *I, ElementCount VF) const
Returns true if I is known to be scalar after vectorization.
bool isOptimizableIVTruncate(Instruction *I, ElementCount VF)
Return True if instruction I is an optimizable truncate whose operand is an induction variable.
FixedScalableVFPair computeMaxVF(ElementCount UserVF, unsigned UserIC)
Loop * TheLoop
The loop that we evaluate.
InterleavedAccessInfo & InterleaveInfo
The interleave access information contains groups of interleaved accesses with the same stride and cl...
SmallPtrSet< const Value *, 16 > ValuesToIgnore
Values to ignore in the cost model.
void setVectorizedCallDecision(ElementCount VF)
A call may be vectorized in different ways depending on whether we have vectorized variants available...
void invalidateCostModelingDecisions()
Invalidates decisions already taken by the cost model.
bool isAccessInterleaved(Instruction *Instr) const
Check if Instr belongs to any interleaved access group.
void setTailFoldingStyle(bool IsScalableVF, unsigned UserIC)
Selects and saves TailFoldingStyle.
OptimizationRemarkEmitter * ORE
Interface to emit optimization remarks.
LoopInfo * LI
Loop Info analysis.
bool requiresScalarEpilogue(bool IsVectorizing) const
Returns true if we're required to use a scalar epilogue for at least the final iteration of the origi...
SmallPtrSet< const Value *, 16 > VecValuesToIgnore
Values to ignore in the cost model when VF > 1.
bool isProfitableToScalarize(Instruction *I, ElementCount VF) const
void setWideningDecision(const InterleaveGroup< Instruction > *Grp, ElementCount VF, InstWidening W, InstructionCost Cost)
Save vectorization decision W and Cost taken by the cost model for interleaving group Grp and vector ...
bool isEpilogueAllowed() const
Returns true if an epilogue is allowed (e.g., not prevented by optsize or a loop hint annotation).
CallWideningDecision getCallWideningDecision(CallInst *CI, ElementCount VF) const
bool canTruncateToMinimalBitwidth(Instruction *I, ElementCount VF) const
bool shouldConsiderInvariant(Value *Op)
Returns true if Op should be considered invariant and if it is trivially hoistable.
bool foldTailByMasking() const
Returns true if all loop blocks should be masked to fold tail loop.
bool foldTailWithEVL() const
Returns true if VP intrinsics with explicit vector length support should be generated in the tail fol...
bool blockNeedsPredicationForAnyReason(BasicBlock *BB) const
Returns true if the instructions in this block requires predication for any reason,...
void setCallWideningDecision(CallInst *CI, ElementCount VF, InstWidening Kind, Function *Variant, Intrinsic::ID IID, std::optional< unsigned > MaskPos, InstructionCost Cost)
AssumptionCache * AC
Assumption cache.
void setWideningDecision(Instruction *I, ElementCount VF, InstWidening W, InstructionCost Cost)
Save vectorization decision W and Cost taken by the cost model for instruction I and vector width VF.
InstWidening
Decision that was taken during cost calculation for memory instruction.
bool usePredicatedReductionSelect(RecurKind RecurrenceKind) const
Returns true if the predicated reduction select should be used to set the incoming value for the redu...
LoopVectorizationCostModel(EpilogueLowering SEL, Loop *L, PredicatedScalarEvolution &PSE, LoopInfo *LI, LoopVectorizationLegality *Legal, const TargetTransformInfo &TTI, const TargetLibraryInfo *TLI, AssumptionCache *AC, OptimizationRemarkEmitter *ORE, std::function< BlockFrequencyInfo &()> GetBFI, const Function *F, const LoopVectorizeHints *Hints, InterleavedAccessInfo &IAI, VFSelectionContext &Config)
std::pair< InstructionCost, InstructionCost > getDivRemSpeculationCost(Instruction *I, ElementCount VF)
Return the costs for our two available strategies for lowering a div/rem operation which requires spe...
InstructionCost getVectorCallCost(CallInst *CI, ElementCount VF) const
Estimate cost of a call instruction CI if it were vectorized with factor VF.
bool isScalarWithPredication(Instruction *I, ElementCount VF)
Returns true if I is an instruction which requires predication and for which our chosen predication s...
bool isDivRemScalarWithPredication(InstructionCost ScalarCost, InstructionCost SafeDivisorCost) const
Given costs for both strategies, return true if the scalar predication lowering should be used for di...
std::function< BlockFrequencyInfo &()> GetBFI
A function to lazily fetch BlockFrequencyInfo.
InstructionCost expectedCost(ElementCount VF)
Returns the expected execution cost.
void setCostBasedWideningDecision(ElementCount VF)
Memory access instruction may be vectorized in more than one way.
InstWidening getWideningDecision(Instruction *I, ElementCount VF) const
Return the cost model decision for the given instruction I and vector width VF.
InstructionCost getWideningCost(Instruction *I, ElementCount VF)
Return the vectorization cost for the given instruction I and vector width VF.
TailFoldingStyle getTailFoldingStyle() const
Returns the TailFoldingStyle that is best for the current loop.
void collectInstsToScalarize(ElementCount VF)
Collects the instructions to scalarize for each predicated instruction in the loop.
LoopVectorizationLegality checks if it is legal to vectorize a loop, and to what vectorization factor...
MapVector< PHINode *, InductionDescriptor > InductionList
InductionList saves induction variables and maps them to the induction descriptor.
bool canVectorize(bool UseVPlanNativePath)
Returns true if it is legal to vectorize this loop.
bool canVectorizeFPMath(bool EnableStrictReductions)
Returns true if it is legal to vectorize the FP math operations in this loop.
const SmallVector< BasicBlock *, 4 > & getCountableExitingBlocks() const
Returns all exiting blocks with a countable exit, i.e.
bool hasUncountableEarlyExit() const
Returns true if the loop has uncountable early exits, i.e.
bool hasHistograms() const
Returns a list of all known histogram operations in the loop.
const LoopAccessInfo * getLAI() const
Planner drives the vectorization process after having passed Legality checks.
DenseMap< const SCEV *, Value * > executePlan(ElementCount VF, unsigned UF, VPlan &BestPlan, InnerLoopVectorizer &LB, DominatorTree *DT, EpilogueVectorizationKind EpilogueVecKind=EpilogueVectorizationKind::None)
EpilogueVectorizationKind
Generate the IR code for the vectorized loop captured in VPlan BestPlan according to the best selecte...
@ MainLoop
Vectorizing the main loop of epilogue vectorization.
VPlan & getPlanFor(ElementCount VF) const
Return the VPlan for VF.
Definition VPlan.cpp:1721
VectorizationFactor planInVPlanNativePath(ElementCount UserVF)
Use the VPlan-native path to plan how to best vectorize, return the best VF and its cost.
void updateLoopMetadataAndProfileInfo(Loop *VectorLoop, VPBasicBlock *HeaderVPBB, const VPlan &Plan, bool VectorizingEpilogue, MDNode *OrigLoopID, std::optional< unsigned > OrigAverageTripCount, unsigned OrigLoopInvocationWeight, unsigned EstimatedVFxUF, bool DisableRuntimeUnroll)
Update loop metadata and profile info for both the scalar remainder loop and VectorLoop,...
Definition VPlan.cpp:1772
void buildVPlans(ElementCount MinVF, ElementCount MaxVF)
Build VPlans for power-of-2 VF's between MinVF and MaxVF inclusive, according to the information gath...
Definition VPlan.cpp:1705
void attachRuntimeChecks(VPlan &Plan, GeneratedRTChecks &RTChecks, bool HasBranchWeights) const
Attach the runtime checks of RTChecks to Plan.
unsigned selectInterleaveCount(VPlan &Plan, ElementCount VF, InstructionCost LoopCost)
void emitInvalidCostRemarks(OptimizationRemarkEmitter *ORE)
Emit remarks for recipes with invalid costs in the available VPlans.
static bool getDecisionAndClampRange(const std::function< bool(ElementCount)> &Predicate, VFRange &Range)
Test a Predicate on a Range of VF's.
Definition VPlan.cpp:1686
void printPlans(raw_ostream &O)
Definition VPlan.cpp:1878
void plan(ElementCount UserVF, unsigned UserIC)
Build VPlans for the specified UserVF and UserIC if they are non-zero or all applicable candidate VFs...
std::unique_ptr< VPlan > selectBestEpiloguePlan(VPlan &MainPlan, ElementCount MainLoopVF, unsigned IC)
void addMinimumIterationCheck(VPlan &Plan, ElementCount VF, unsigned UF, ElementCount MinProfitableTripCount) const
Create a check to Plan to see if the vector loop should be executed based on its trip count.
bool hasPlanWithVF(ElementCount VF) const
Look through the existing plans and return true if we have one with vectorization factor VF.
std::pair< VectorizationFactor, VPlan * > computeBestVF()
Compute and return the most profitable vectorization factor and the corresponding best VPlan.
This holds vectorization requirements that must be verified late in the process.
Utility class for getting and setting loop vectorizer hints in the form of loop metadata.
bool allowVectorization(Function *F, Loop *L, bool VectorizeOnlyWhenForced) const
void emitRemarkWithHints() const
Dumps all the hint information.
const char * vectorizeAnalysisPassName() const
If hints are provided that force vectorization, use the AlwaysPrint pass name to force the frontend t...
Represents a single loop in the control flow graph.
Definition LoopInfo.h:40
bool hasLoopInvariantOperands(const Instruction *I) const
Return true if all the operands of the specified instruction are loop invariant.
Definition LoopInfo.cpp:73
DebugLoc getStartLoc() const
Return the debug location of the start of this loop.
Definition LoopInfo.cpp:659
bool isLoopInvariant(const Value *V) const
Return true if the specified value is loop invariant.
Definition LoopInfo.cpp:67
Metadata node.
Definition Metadata.h:1080
This class implements a map that also provides access to all stored values in a deterministic order.
Definition MapVector.h:36
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition MapVector.h:124
Function * getFunction(StringRef Name) const
Look up the specified function in the module symbol table.
Definition Module.cpp:235
Diagnostic information for optimization analysis remarks related to pointer aliasing.
Diagnostic information for optimization analysis remarks related to floating-point non-commutativity.
Diagnostic information for optimization analysis remarks.
The optimization diagnostic interface.
LLVM_ABI void emit(DiagnosticInfoOptimizationBase &OptDiag)
Output the remark via the diagnostic handler and to the optimization record file.
Diagnostic information for missed-optimization remarks.
Diagnostic information for applied optimization remarks.
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
op_range incoming_values()
Value * getIncomingValueForBlock(const BasicBlock *BB) const
unsigned getNumIncomingValues() const
Return the number of incoming edges.
An interface layer with SCEV used to manage how we see SCEV expressions for values in the context of ...
ScalarEvolution * getSE() const
Returns the ScalarEvolution analysis used.
LLVM_ABI const SCEVPredicate & getPredicate() const
LLVM_ABI unsigned getSmallConstantMaxTripCount()
Returns the upper bound of the loop trip count as a normal unsigned value, or 0 if the trip count is ...
LLVM_ABI const SCEV * getBackedgeTakenCount()
Get the (predicated) backedge count for the analyzed loop.
LLVM_ABI const SCEV * getSCEV(Value *V)
Returns the SCEV expression of V, in the context of the current SCEV predicate.
A set of analyses that are preserved following a run of a transformation pass.
Definition Analysis.h:112
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition Analysis.h:118
PreservedAnalyses & preserveSet()
Mark an analysis set as preserved.
Definition Analysis.h:151
PreservedAnalyses & preserve()
Mark an analysis as preserved.
Definition Analysis.h:132
An analysis pass based on the new PM to deliver ProfileSummaryInfo.
The RecurrenceDescriptor is used to identify recurrences variables in a loop.
static bool isFMulAddIntrinsic(Instruction *I)
Returns true if the instruction is a call to the llvm.fmuladd intrinsic.
FastMathFlags getFastMathFlags() const
static LLVM_ABI unsigned getOpcode(RecurKind Kind)
Returns the opcode corresponding to the RecurrenceKind.
Type * getRecurrenceType() const
Returns the type of the recurrence.
const SmallPtrSet< Instruction *, 8 > & getCastInsts() const
Returns a reference to the instructions used for type-promoting the recurrence.
static bool isFindLastRecurrenceKind(RecurKind Kind)
Returns true if the recurrence kind is of the form select(cmp(),x,y) where one of (x,...
static bool isAnyOfRecurrenceKind(RecurKind Kind)
Returns true if the recurrence kind is of the form select(cmp(),x,y) where one of (x,...
bool isSigned() const
Returns true if all source operands of the recurrence are SExtInsts.
RecurKind getRecurrenceKind() const
static bool isFindIVRecurrenceKind(RecurKind Kind)
Returns true if the recurrence kind is of the form select(cmp(),x,y) where one of (x,...
static bool isMinMaxRecurrenceKind(RecurKind Kind)
Returns true if the recurrence kind is any min/max kind.
std::optional< ArrayRef< PointerDiffInfo > > getDiffChecks() const
const SmallVectorImpl< RuntimePointerCheck > & getChecks() const
Returns the checks that generateChecks created.
This class uses information about analyze scalars to rewrite expressions in canonical form.
ScalarEvolution * getSE()
bool isInsertedInstruction(Instruction *I) const
Return true if the specified instruction was inserted by the code rewriter.
LLVM_ABI Value * expandCodeForPredicate(const SCEVPredicate *Pred, Instruction *Loc)
Generates a code sequence that evaluates this predicate.
void eraseDeadInstructions(Value *Root)
Remove inserted instructions that are dead, e.g.
virtual bool isAlwaysTrue() const =0
Returns true if the predicate is always true.
This class represents an analyzed expression in the program.
LLVM_ABI bool isZero() const
Return true if the expression is a constant zero.
LLVM_ABI Type * getType() const
Return the LLVM type of this SCEV expression.
Analysis pass that exposes the ScalarEvolution for a function.
The main scalar evolution driver.
LLVM_ABI const SCEV * getURemExpr(SCEVUse LHS, SCEVUse RHS)
Represents an unsigned remainder expression based on unsigned division.
LLVM_ABI const SCEV * getBackedgeTakenCount(const Loop *L, ExitCountKind Kind=Exact)
If the specified loop has a predictable backedge-taken count, return it, otherwise return a SCEVCould...
LLVM_ABI const SCEV * getConstant(ConstantInt *V)
LLVM_ABI const SCEV * getSCEV(Value *V)
Return a SCEV expression for the full generality of the specified expression.
LLVM_ABI const SCEV * getTripCountFromExitCount(const SCEV *ExitCount)
A version of getTripCountFromExitCount below which always picks an evaluation type which can not resu...
const SCEV * getOne(Type *Ty)
Return a SCEV for the constant 1 of a specific type.
LLVM_ABI void forgetLoop(const Loop *L)
This method should be called by the client when it has changed a loop in a way that may effect Scalar...
LLVM_ABI bool isLoopInvariant(const SCEV *S, const Loop *L)
Return true if the value of the given SCEV is unchanging in the specified loop.
LLVM_ABI const SCEV * getElementCount(Type *Ty, ElementCount EC, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap)
ConstantRange getUnsignedRange(const SCEV *S)
Determine the unsigned range for a particular SCEV.
LLVM_ABI void forgetValue(Value *V)
This method should be called by the client when it has changed a value in a way that may effect its v...
LLVM_ABI void forgetBlockAndLoopDispositions(Value *V=nullptr)
Called when the client has changed the disposition of values in a loop or block.
const SCEV * getMinusOne(Type *Ty)
Return a SCEV for the constant -1 of a specific type.
LLVM_ABI void forgetLcssaPhiWithNewPredecessor(Loop *L, PHINode *V)
Forget LCSSA phi node V of loop L to which a new predecessor was added, such that it may no longer be...
LLVM_ABI const SCEV * getMulExpr(SmallVectorImpl< SCEVUse > &Ops, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)
Get a canonical multiply expression, or something simpler if possible.
LLVM_ABI unsigned getSmallConstantTripCount(const Loop *L)
Returns the exact trip count of the loop if we can compute it, and the result is a small constant.
APInt getUnsignedRangeMax(const SCEV *S)
Determine the max of the unsigned range for a particular SCEV.
LLVM_ABI const SCEV * getAddExpr(SmallVectorImpl< SCEVUse > &Ops, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)
Get a canonical add expression, or something simpler if possible.
LLVM_ABI bool isKnownPredicate(CmpPredicate Pred, SCEVUse LHS, SCEVUse RHS)
Test if the given expression is known to satisfy the condition described by Pred, LHS,...
LLVM_ABI const SCEV * applyLoopGuards(const SCEV *Expr, const Loop *L)
Try to apply information from loop guards for L to Expr.
This class represents the LLVM 'select' instruction.
A vector that has set insertion semantics.
Definition SetVector.h:57
size_type size() const
Determine the number of elements in the SetVector.
Definition SetVector.h:103
void insert_range(Range &&R)
Definition SetVector.h:176
size_type count(const_arg_type key) const
Count the number of elements of a given key in the SetVector.
Definition SetVector.h:262
bool insert(const value_type &X)
Insert a new element into the SetVector.
Definition SetVector.h:151
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
bool contains(ConstPtrType Ptr) const
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
A SetVector that performs no allocations if smaller than a certain size.
Definition SetVector.h:339
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
An instruction for storing to memory.
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
Analysis pass providing the TargetTransformInfo.
Analysis pass providing the TargetLibraryInfo.
Provides information about what library functions are available for the current target.
This pass provides access to the codegen interfaces that are needed for IR-level transformations.
VectorInstrContext
Represents a hint about the context in which an insert/extract is used.
@ None
The insert/extract is not used with a load/store.
@ Load
The value being inserted comes from a load (InsertElement only).
@ Store
The extracted value is stored (ExtractElement only).
static LLVM_ABI OperandValueInfo getOperandInfo(const Value *V)
Collect properties of V used in cost analysis, e.g. OP_PowerOf2.
TargetCostKind
The kind of cost model.
@ TCK_RecipThroughput
Reciprocal throughput.
@ TCK_CodeSize
Instruction code size.
@ TCK_SizeAndLatency
The weighted sum of size and latency.
@ TCK_Latency
The latency of instruction.
@ TCC_Free
Expected to fold away in lowering.
LLVM_ABI InstructionCost getInstructionCost(const User *U, ArrayRef< const Value * > Operands, TargetCostKind CostKind) const
Estimate the cost of a given IR user when lowered.
@ SK_Splice
Concatenates elements from the first input vector with elements of the second input vector.
@ SK_Broadcast
Broadcast element 0 to all other elements.
@ SK_Reverse
Reverse the order of the vector.
CastContextHint
Represents a hint about the context in which a cast is used.
@ Reversed
The cast is used with a reversed load/store.
@ Masked
The cast is used with a masked load/store.
@ Normal
The cast is used with a normal load/store.
@ Interleave
The cast is used with an interleaved load/store.
@ GatherScatter
The cast is used with a gather/scatter.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition Twine.h:82
This class implements a switch-like dispatch statement for a value of 'T' using dyn_cast functionalit...
Definition TypeSwitch.h:89
TypeSwitch< T, ResultT > & Case(CallableT &&caseFn)
Add a case on the given type.
Definition TypeSwitch.h:98
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:46
LLVM_ABI unsigned getIntegerBitWidth() const
bool isVectorTy() const
True if this is an instance of VectorType.
Definition Type.h:290
static LLVM_ABI Type * getVoidTy(LLVMContext &C)
Definition Type.cpp:286
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
Definition Type.h:370
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
Definition Type.h:130
LLVM_ABI unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
Definition Type.cpp:236
static LLVM_ABI IntegerType * getInt1Ty(LLVMContext &C)
Definition Type.cpp:310
bool isVoidTy() const
Return true if this is 'void'.
Definition Type.h:141
A Use represents the edge between a Value definition and its users.
Definition Use.h:35
op_range operands()
Definition User.h:267
iterator_range< op_iterator > op_range
Definition User.h:256
LLVM_ABI bool replaceUsesOfWith(Value *From, Value *To)
Replace uses of one Value with another.
Definition User.cpp:25
Value * getOperand(unsigned i) const
Definition User.h:207
static SmallVector< VFInfo, 8 > getMappings(const CallInst &CI)
Retrieve all the VFInfo instances associated to the CallInst CI.
Definition VectorUtils.h:76
Holds state needed to make cost decisions before computing costs per-VF, including the maximum VFs.
std::pair< unsigned, unsigned > getSmallestAndWidestTypes() const
const TTI::TargetCostKind CostKind
The kind of cost that we are calculating.
void collectElementTypesForWidening(const SmallPtrSetImpl< const Value * > *ValuesToIgnore=nullptr)
Collect element types in the loop that need widening.
std::optional< unsigned > getVScaleForTuning() const
VPBasicBlock serves as the leaf of the Hierarchical Control-Flow Graph.
Definition VPlan.h:4154
RecipeListTy::iterator iterator
Instruction iterators...
Definition VPlan.h:4181
iterator end()
Definition VPlan.h:4191
iterator begin()
Recipe iterator methods.
Definition VPlan.h:4189
iterator_range< iterator > phis()
Returns an iterator range over the PHI-like recipes in the block.
Definition VPlan.h:4242
InstructionCost cost(ElementCount VF, VPCostContext &Ctx) override
Return the cost of this VPBasicBlock.
Definition VPlan.cpp:780
iterator getFirstNonPhi()
Return the position of the first non-phi node recipe in the block.
Definition VPlan.cpp:233
const VPRecipeBase & front() const
Definition VPlan.h:4201
VPRecipeBase * getTerminator()
If the block has multiple successors, return the branch recipe terminating the block.
Definition VPlan.cpp:645
bool empty() const
Definition VPlan.h:4200
const VPBasicBlock * getExitingBasicBlock() const
Definition VPlan.cpp:203
void setName(const Twine &newName)
Definition VPlan.h:182
VPlan * getPlan()
Definition VPlan.cpp:178
const VPBasicBlock * getEntryBasicBlock() const
Definition VPlan.cpp:183
VPBlockBase * getSingleSuccessor() const
Definition VPlan.h:230
static void reassociateBlocks(VPBlockBase *Old, VPBlockBase *New)
Reassociate all the blocks connected to Old so that they now point to New.
Definition VPlanUtils.h:258
static auto blocksOnly(T &&Range)
Return an iterator range over Range which only includes BlockTy blocks.
Definition VPlanUtils.h:286
VPlan-based builder utility analogous to IRBuilder.
VPInstruction * createAdd(VPValue *LHS, VPValue *RHS, DebugLoc DL=DebugLoc::getUnknown(), const Twine &Name="", VPRecipeWithIRFlags::WrapFlagsTy WrapFlags={false, false})
T * insert(T *R)
Insert R at the current insertion point. Returns R unchanged.
static VPBuilder getToInsertAfter(VPRecipeBase *R)
Create a VPBuilder to insert after R.
VPPhi * createScalarPhi(ArrayRef< VPValue * > IncomingValues, DebugLoc DL=DebugLoc::getUnknown(), const Twine &Name="", const VPIRFlags &Flags={})
VPInstruction * createNaryOp(unsigned Opcode, ArrayRef< VPValue * > Operands, Instruction *Inst=nullptr, const VPIRFlags &Flags={}, const VPIRMetadata &MD={}, DebugLoc DL=DebugLoc::getUnknown(), const Twine &Name="")
Create an N-ary operation with Opcode, Operands and set Inst as its underlying Instruction.
unsigned getNumDefinedValues() const
Returns the number of values defined by the VPDef.
Definition VPlanValue.h:498
VPValue * getVPSingleValue()
Returns the only VPValue defined by the VPDef.
Definition VPlanValue.h:471
A pure virtual base class for all recipes modeling header phis, including phis for first order recurr...
Definition VPlan.h:2295
virtual VPValue * getBackedgeValue()
Returns the incoming value from the loop backedge.
Definition VPlan.h:2337
VPValue * getStartValue()
Returns the start value of the phi, if one is set.
Definition VPlan.h:2326
A recipe representing a sequence of load -> update -> store as part of a histogram operation.
Definition VPlan.h:2040
A special type of VPBasicBlock that wraps an existing IR basic block.
Definition VPlan.h:4307
Class to record and manage LLVM IR flags.
Definition VPlan.h:687
LLVM_ABI_FOR_TEST FastMathFlags getFastMathFlags() const
This is a concrete Recipe that models a single VPlan-level instruction.
Definition VPlan.h:1222
unsigned getNumOperandsWithoutMask() const
Returns the number of operands, excluding the mask if the VPInstruction is masked.
Definition VPlan.h:1448
iterator_range< operand_iterator > operandsWithoutMask()
Returns an iterator range over the operands excluding the mask operand if present.
Definition VPlan.h:1468
@ ResumeForEpilogue
Explicit user for the resume phi of the canonical induction in the main VPlan, used by the epilogue v...
Definition VPlan.h:1322
@ ReductionStartVector
Start vector for reductions with 3 operands: the original start value, the identity value for the red...
Definition VPlan.h:1313
@ ComputeReductionResult
Reduce the operands to the final reduction result using the operation specified via the operation's V...
Definition VPlan.h:1265
unsigned getOpcode() const
Definition VPlan.h:1397
void setName(StringRef NewName)
Set the symbolic name for the VPInstruction.
Definition VPlan.h:1496
VPValue * getMask() const
Returns the mask for the VPInstruction.
Definition VPlan.h:1462
bool isMasked() const
Returns true if the VPInstruction has a mask operand.
Definition VPlan.h:1438
VPInterleaveRecipe is a recipe for transforming an interleave group of load or stores into one wide l...
Definition VPlan.h:2943
detail::zippy< llvm::detail::zip_first, VPUser::const_operand_range, const_incoming_blocks_range > incoming_values_and_blocks() const
Returns an iterator range over pairs of incoming values and corresponding incoming blocks.
Definition VPlan.h:1625
VPRecipeBase is a base class modeling a sequence of one or more output IR instructions.
Definition VPlan.h:405
DebugLoc getDebugLoc() const
Returns the debug location of the recipe.
Definition VPlan.h:553
void moveBefore(VPBasicBlock &BB, iplist< VPRecipeBase >::iterator I)
Unlink this recipe and insert into BB before I.
void insertBefore(VPRecipeBase *InsertPos)
Insert an unlinked recipe into a basic block immediately before the specified recipe.
iplist< VPRecipeBase >::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
Helper class to create VPRecipies from IR instructions.
VPRecipeBase * tryToCreateWidenNonPhiRecipe(VPSingleDefRecipe *R, VFRange &Range)
Create and return a widened recipe for a non-phi recipe R if one can be created within the given VF R...
VPHistogramRecipe * widenIfHistogram(VPInstruction *VPI)
If VPI represents a histogram operation (as determined by LoopVectorizationLegality) make that safe f...
VPRecipeBase * tryToWidenMemory(VPInstruction *VPI, VFRange &Range)
Check if the load or store instruction VPI should widened for Range.Start and potentially masked.
bool replaceWithFinalIfReductionStore(VPInstruction *VPI, VPBuilder &FinalRedStoresBuilder)
If VPI is a store of a reduction into an invariant address, delete it.
VPReplicateRecipe * handleReplication(VPInstruction *VPI, VFRange &Range)
Build a VPReplicationRecipe for VPI.
bool isOrdered() const
Returns true, if the phi is part of an ordered reduction.
Definition VPlan.h:2742
unsigned getVFScaleFactor() const
Get the factor that the VF of this recipe's output should be scaled by, or 1 if it isn't scaled.
Definition VPlan.h:2721
bool isInLoop() const
Returns true if the phi is part of an in-loop reduction.
Definition VPlan.h:2745
RecurKind getRecurrenceKind() const
Returns the recurrence kind of the reduction.
Definition VPlan.h:2739
A recipe to represent inloop, ordered or partial reduction operations.
Definition VPlan.h:3036
VPRegionBlock represents a collection of VPBasicBlocks and VPRegionBlocks which form a Single-Entry-S...
Definition VPlan.h:4364
const VPBlockBase * getEntry() const
Definition VPlan.h:4408
void clearCanonicalIVNUW(VPInstruction *Increment)
Unsets NUW for the canonical IV increment Increment, for loop regions.
Definition VPlan.h:4492
VPRegionValue * getCanonicalIV()
Return the canonical induction variable of the region, null for replicating regions.
Definition VPlan.h:4476
VPReplicateRecipe replicates a given instruction producing multiple scalar copies of the original sca...
Definition VPlan.h:3190
VPSingleDef is a base class for recipes for modeling a sequence of one or more output IR that define ...
Definition VPlan.h:605
Instruction * getUnderlyingInstr()
Returns the underlying instruction.
Definition VPlan.h:672
An analysis for type-inference for VPValues.
Type * inferScalarType(const VPValue *V)
Infer the type of V. Returns the scalar type of V.
This class augments VPValue with operands which provide the inverse def-use edges from VPValue's user...
Definition VPlanValue.h:329
void setOperand(unsigned I, VPValue *New)
Definition VPlanValue.h:373
operand_iterator op_begin()
Definition VPlanValue.h:393
VPValue * getOperand(unsigned N) const
Definition VPlanValue.h:368
This is the base class of the VPlan Def/Use graph, used for modeling the data flow into,...
Definition VPlanValue.h:49
Value * getLiveInIRValue() const
Return the underlying IR value for a VPIRValue.
Definition VPlan.cpp:138
VPRecipeBase * getDefiningRecipe()
Returns the recipe defining this VPValue or nullptr if it is not defined by a recipe,...
Definition VPlan.cpp:128
Value * getUnderlyingValue() const
Return the underlying Value attached to this VPValue.
Definition VPlanValue.h:74
void replaceAllUsesWith(VPValue *New)
Definition VPlan.cpp:1499
void replaceUsesWithIf(VPValue *New, llvm::function_ref< bool(VPUser &U, unsigned Idx)> ShouldReplace)
Go through the uses list for this VPValue and make each use point to New if the callback ShouldReplac...
Definition VPlan.cpp:1505
user_range users()
Definition VPlanValue.h:155
A recipe to compute a pointer to the last element of each part of a widened memory access for widened...
Definition VPlan.h:2146
A recipe to compute the pointers for widened memory accesses of SourceElementTy.
Definition VPlan.h:2219
VPWidenCastRecipe is a recipe to create vector cast instructions.
Definition VPlan.h:1832
A recipe for handling GEP instructions.
Definition VPlan.h:2082
A recipe for handling phi nodes of integer and floating-point inductions, producing their vector valu...
Definition VPlan.h:2443
A recipe for widened phis.
Definition VPlan.h:2579
VPWidenRecipe is a recipe for producing a widened instruction using the opcode and operands of the re...
Definition VPlan.h:1776
VPlan models a candidate for vectorization, encoding various decisions take to produce efficient outp...
Definition VPlan.h:4512
bool hasVF(ElementCount VF) const
Definition VPlan.h:4726
ElementCount getSingleVF() const
Returns the single VF of the plan, asserting that the plan has exactly one VF.
Definition VPlan.h:4739
VPBasicBlock * getEntry()
Definition VPlan.h:4604
VPValue * getTripCount() const
The trip count of the original loop.
Definition VPlan.h:4663
VPSymbolicValue & getVFxUF()
Returns VF * UF of the vector loop region.
Definition VPlan.h:4702
bool hasUF(unsigned UF) const
Definition VPlan.h:4751
ArrayRef< VPIRBasicBlock * > getExitBlocks() const
Return an ArrayRef containing VPIRBasicBlocks wrapping the exit blocks of the original scalar loop.
Definition VPlan.h:4653
VPIRValue * getOrAddLiveIn(Value *V)
Gets the live-in VPIRValue for V or adds a new live-in (if none exists yet) for V.
Definition VPlan.h:4776
VPIRValue * getZero(Type *Ty)
Return a VPIRValue wrapping the null value of type Ty.
Definition VPlan.h:4802
LLVM_ABI_FOR_TEST VPRegionBlock * getVectorLoopRegion()
Returns the VPRegionBlock of the vector loop.
Definition VPlan.cpp:1096
bool hasEarlyExit() const
Returns true if the VPlan is based on a loop with an early exit.
Definition VPlan.h:4899
InstructionCost cost(ElementCount VF, VPCostContext &Ctx)
Return the cost of this plan.
Definition VPlan.cpp:1078
void resetTripCount(VPValue *NewTripCount)
Resets the trip count for the VPlan.
Definition VPlan.h:4677
VPBasicBlock * getMiddleBlock()
Returns the 'middle' block of the plan, that is the block that selects whether to execute the scalar ...
Definition VPlan.h:4629
VPBasicBlock * getVectorPreheader() const
Returns the preheader of the vector loop region, if one exists, or null otherwise.
Definition VPlan.h:4609
VPSymbolicValue & getUF()
Returns the UF of the vector loop region.
Definition VPlan.h:4699
VPBasicBlock * getScalarPreheader() const
Return the VPBasicBlock for the preheader of the scalar loop.
Definition VPlan.h:4643
void execute(VPTransformState *State)
Generate the IR code for this VPlan.
Definition VPlan.cpp:951
VPIRBasicBlock * getScalarHeader() const
Return the VPIRBasicBlock wrapping the header of the scalar loop.
Definition VPlan.h:4649
VPSymbolicValue & getVF()
Returns the VF of the vector loop region.
Definition VPlan.h:4695
LLVM_ABI_FOR_TEST VPlan * duplicate()
Clone the current VPlan, update all VPValues of the new VPlan and cloned recipes to refer to the clon...
Definition VPlan.cpp:1244
LLVM Value Representation.
Definition Value.h:75
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:255
LLVM_ABI bool hasOneUser() const
Return true if there is exactly one user of this value.
Definition Value.cpp:162
LLVM_ABI void setName(const Twine &Name)
Change the name of the value.
Definition Value.cpp:393
LLVM_ABI void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
Definition Value.cpp:549
iterator_range< user_iterator > users()
Definition Value.h:426
LLVM_ABI const Value * stripPointerCasts() const
Strip off pointer casts, all-zero GEPs and address space casts.
Definition Value.cpp:709
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
Definition Value.cpp:318
static LLVM_ABI VectorType * get(Type *ElementType, ElementCount EC)
This static method is the primary way to construct an VectorType.
std::pair< iterator, bool > insert(const ValueT &V)
Definition DenseSet.h:202
bool contains(const_arg_type_t< ValueT > V) const
Check if the set contains the given element.
Definition DenseSet.h:175
constexpr ScalarTy getFixedValue() const
Definition TypeSize.h:200
static constexpr bool isKnownLE(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
Definition TypeSize.h:230
constexpr bool isNonZero() const
Definition TypeSize.h:155
static constexpr bool isKnownLT(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
Definition TypeSize.h:216
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
Definition TypeSize.h:168
constexpr bool isFixed() const
Returns true if the quantity is not scaled by vscale.
Definition TypeSize.h:171
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
Definition TypeSize.h:165
constexpr bool isZero() const
Definition TypeSize.h:153
static constexpr bool isKnownGT(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
Definition TypeSize.h:223
constexpr LeafTy divideCoefficientBy(ScalarTy RHS) const
We do not provide the '/' operator here because division for polynomial types does not work in the sa...
Definition TypeSize.h:252
static constexpr bool isKnownGE(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
Definition TypeSize.h:237
An efficient, type-erasing, non-owning reference to a callable.
const ParentTy * getParent() const
Definition ilist_node.h:34
self_iterator getIterator()
Definition ilist_node.h:123
IteratorT end() const
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition raw_ostream.h:53
A raw_ostream that writes to an std::string.
Changed
This provides a very simple, boring adaptor for a begin and end iterator into a range type.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
@ Tail
Attemps to make calls as fast as possible while guaranteeing that tail call optimization can always b...
Definition CallingConv.h:76
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
@ BasicBlock
Various leaf nodes.
Definition ISDOpcodes.h:81
std::variant< std::monostate, Loc::Single, Loc::Multi, Loc::MMI, Loc::EntryValue > Variant
Alias for the std::variant specialization base class of DbgVariable.
Definition DwarfDebug.h:190
SpecificConstantMatch m_ZeroInt()
Convenience matchers for specific integer values.
OneUse_match< SubPat > m_OneUse(const SubPat &SP)
match_combine_or< Ty... > m_CombineOr(const Ty &...Ps)
Combine pattern matchers matching any of Ps patterns.
BinaryOp_match< LHS, RHS, Instruction::Add > m_Add(const LHS &L, const RHS &R)
specific_intval< false > m_SpecificInt(const APInt &V)
Match a specific integer value or vector with all elements equal to the value.
bool match(Val *V, const Pattern &P)
match_bind< Instruction > m_Instruction(Instruction *&I)
Match an instruction, capturing it if we match.
specificval_ty m_Specific(const Value *V)
Match if we have a specific specified value.
auto match_fn(const Pattern &P)
A match functor that can be used as a UnaryPredicate in functional algorithms like all_of.
cst_pred_ty< is_one > m_One()
Match an integer 1 or a vector with all elements equal to 1.
auto m_Value()
Match an arbitrary value and ignore it.
BinaryOp_match< LHS, RHS, Instruction::Mul > m_Mul(const LHS &L, const RHS &R)
auto m_LogicalOr()
Matches L || R where L and R are arbitrary values.
match_combine_or< CastInst_match< OpTy, ZExtInst >, CastInst_match< OpTy, SExtInst > > m_ZExtOrSExt(const OpTy &Op)
auto m_LogicalAnd()
Matches L && R where L and R are arbitrary values.
bind_cst_ty m_scev_APInt(const APInt *&C)
Match an SCEV constant and bind it to an APInt.
specificloop_ty m_SpecificLoop(const Loop *L)
cst_pred_ty< is_specific_signed_cst > m_scev_SpecificSInt(int64_t V)
Match an SCEV constant with a plain signed integer (sign-extended value will be matched)
match_bind< const SCEVMulExpr > m_scev_Mul(const SCEVMulExpr *&V)
bool match(const SCEV *S, const Pattern &P)
SCEVAffineAddRec_match< Op0_t, Op1_t, match_isa< const Loop > > m_scev_AffineAddRec(const Op0_t &Op0, const Op1_t &Op1)
SCEVBinaryExpr_match< SCEVMulExpr, Op0_t, Op1_t, SCEV::FlagAnyWrap, true > m_scev_c_Mul(const Op0_t &Op0, const Op1_t &Op1)
int_pred_ty< is_zero_int > m_ZeroInt()
Match an integer 0 or a vector with all elements equal to 0.
bool matchFindIVResult(VPInstruction *VPI, Op0_t ReducedIV, Op1_t Start)
Match FindIV result pattern: select(icmp ne ComputeReductionResult(ReducedIV), Sentinel),...
VPInstruction_match< VPInstruction::ExtractLastLane, Op0_t > m_ExtractLastLane(const Op0_t &Op0)
VPInstruction_match< VPInstruction::BranchOnCount > m_BranchOnCount()
auto m_VPValue()
Match an arbitrary VPValue and ignore it.
VPInstruction_match< VPInstruction::ExtractLastPart, Op0_t > m_ExtractLastPart(const Op0_t &Op0)
bool match(Val *V, const Pattern &P)
VPInstruction_match< VPInstruction::ExtractLane, Op0_t, Op1_t > m_ExtractLane(const Op0_t &Op0, const Op1_t &Op1)
ValuesClass values(OptsTy... Options)
Helper to build a ValuesClass by forwarding a variable number of arguments as an initializer list to ...
initializer< Ty > init(const Ty &Val)
Add a small namespace to avoid name clashes with the classes used in the streaming interface.
DiagnosticInfoOptimizationBase::Argument NV
NodeAddr< InstrNode * > Instr
Definition RDFGraph.h:389
friend class Instruction
Iterator for Instructions in a `BasicBlock.
Definition BasicBlock.h:73
VPValue * getOrCreateVPValueForSCEVExpr(VPlan &Plan, const SCEV *Expr)
Get or create a VPValue that corresponds to the expansion of Expr.
VPBasicBlock * getFirstLoopHeader(VPlan &Plan, VPDominatorTree &VPDT)
Returns the header block of the first, top-level loop, or null if none exist.
bool isAddressSCEVForCost(const SCEV *Addr, ScalarEvolution &SE, const Loop *L)
Returns true if Addr is an address SCEV that can be passed to TTI::getAddressComputationCost,...
VPInstruction * findCanonicalIVIncrement(VPlan &Plan)
Find the canonical IV increment of Plan's vector loop region.
VPRecipeBase * findRecipe(VPValue *Start, PredT Pred)
Search Start's users for a recipe satisfying Pred, looking through recipes with definitions.
Definition VPlanUtils.h:115
VPSingleDefRecipe * findHeaderMask(VPlan &Plan)
Collect the header mask with the pattern: (ICMP_ULE, WideCanonicalIV, backedge-taken-count) TODO: Int...
const SCEV * getSCEVExprForVPValue(const VPValue *V, PredicatedScalarEvolution &PSE, const Loop *L=nullptr)
Return the SCEV expression for V.
This is an optimization pass for GlobalISel generic memory operations.
LLVM_ABI bool simplifyLoop(Loop *L, DominatorTree *DT, LoopInfo *LI, ScalarEvolution *SE, AssumptionCache *AC, MemorySSAUpdater *MSSAU, bool PreserveLCSSA)
Simplify each loop in a loop nest recursively.
void reportVectorizationInfo(const StringRef Msg, const StringRef ORETag, OptimizationRemarkEmitter *ORE, const Loop *TheLoop, Instruction *I=nullptr, DebugLoc DL={})
Reports an informative message: print Msg for debugging purposes as well as an optimization remark.
detail::zippy< detail::zip_shortest, T, U, Args... > zip(T &&t, U &&u, Args &&...args)
zip iterator for two or more iteratable types.
Definition STLExtras.h:830
constexpr auto not_equal_to(T &&Arg)
Functor variant of std::not_equal_to that can be used as a UnaryPredicate in functional algorithms li...
Definition STLExtras.h:2179
FunctionAddr VTableAddr Value
Definition InstrProf.h:137
LLVM_ABI Value * addRuntimeChecks(Instruction *Loc, Loop *TheLoop, const SmallVectorImpl< RuntimePointerCheck > &PointerChecks, SCEVExpander &Expander, bool HoistRuntimeChecks=false)
Add code that checks at runtime if the accessed arrays in PointerChecks overlap.
auto cast_if_present(const Y &Val)
cast_if_present<X> - Functionally identical to cast, except that a null value is accepted.
Definition Casting.h:683
LLVM_ABI bool RemoveRedundantDbgInstrs(BasicBlock *BB)
Try to remove redundant dbg.value instructions from given basic block.
LLVM_ABI_FOR_TEST cl::opt< bool > VerifyEachVPlan
LLVM_ABI std::optional< unsigned > getLoopEstimatedTripCount(Loop *L, unsigned *EstimatedLoopInvocationWeight=nullptr)
Return either:
static void reportVectorization(OptimizationRemarkEmitter *ORE, Loop *TheLoop, VectorizationFactor VF, unsigned IC)
Report successful vectorization of the loop.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1738
unsigned getLoadStoreAddressSpace(const Value *I)
A helper function that returns the address space of the pointer operand of load or store instruction.
LLVM_ABI Intrinsic::ID getMinMaxReductionIntrinsicOp(Intrinsic::ID RdxID)
Returns the min/max intrinsic used when expanding a min/max reduction.
LLVM_ABI Intrinsic::ID getVectorIntrinsicIDForCall(const CallInst *CI, const TargetLibraryInfo *TLI)
Returns intrinsic ID for call.
InstructionCost Cost
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
LLVM_ABI bool verifyFunction(const Function &F, raw_ostream *OS=nullptr)
Check a function for errors, useful for use when debugging a pass.
const Value * getLoadStorePointerOperand(const Value *V)
A helper function that returns the pointer operand of a load or store instruction.
OuterAnalysisManagerProxy< ModuleAnalysisManager, Function > ModuleAnalysisManagerFunctionProxy
Provide the ModuleAnalysisManager to Function proxy.
Value * getRuntimeVF(IRBuilderBase &B, Type *Ty, ElementCount VF)
Return the runtime value for VF.
LLVM_ABI bool formLCSSARecursively(Loop &L, const DominatorTree &DT, const LoopInfo *LI, ScalarEvolution *SE)
Put a loop nest into LCSSA form.
Definition LCSSA.cpp:449
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
Definition STLExtras.h:2207
LLVM_ABI bool shouldOptimizeForSize(const MachineFunction *MF, ProfileSummaryInfo *PSI, const MachineBlockFrequencyInfo *BFI, PGSOQueryType QueryType=PGSOQueryType::Other)
Returns true if machine function MF is suggested to be size-optimized based on the profile.
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
Definition STLExtras.h:633
Align getLoadStoreAlignment(const Value *I)
A helper function that returns the alignment of load or store instruction.
iterator_range< df_iterator< VPBlockShallowTraversalWrapper< VPBlockBase * > > > vp_depth_first_shallow(VPBlockBase *G)
Returns an iterator range to traverse the graph starting at G in depth-first order.
Definition VPlanCFG.h:253
LLVM_ABI bool VerifySCEV
LLVM_ABI_FOR_TEST cl::opt< bool > VPlanPrintAfterAll
LLVM_ABI bool isSafeToSpeculativelyExecute(const Instruction *I, const Instruction *CtxI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr, const TargetLibraryInfo *TLI=nullptr, bool UseVariableInfo=true, bool IgnoreUBImplyingAttrs=true)
Return true if the instruction does not have any effects besides calculating the result and does not ...
bool isa_and_nonnull(const Y &Val)
Definition Casting.h:676
iterator_range< df_iterator< VPBlockDeepTraversalWrapper< VPBlockBase * > > > vp_depth_first_deep(VPBlockBase *G)
Returns an iterator range to traverse the graph starting at G in depth-first order while traversing t...
Definition VPlanCFG.h:265
SmallVector< VPRegisterUsage, 8 > calculateRegisterUsageForPlan(VPlan &Plan, ArrayRef< ElementCount > VFs, const TargetTransformInfo &TTI, const SmallPtrSetImpl< const Value * > &ValuesToIgnore)
Estimate the register usage for Plan and vectorization factors in VFs by calculating the highest numb...
auto map_range(ContainerTy &&C, FuncTy F)
Return a range that applies F to the elements of C.
Definition STLExtras.h:365
constexpr auto bind_front(FnT &&Fn, BindArgsT &&...BindArgs)
C++20 bind_front.
auto dyn_cast_or_null(const Y &Val)
Definition Casting.h:753
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1745
void collectEphemeralRecipesForVPlan(VPlan &Plan, DenseSet< VPRecipeBase * > &EphRecipes)
auto reverse(ContainerTy &&C)
Definition STLExtras.h:407
bool containsIrreducibleCFG(RPOTraversalT &RPOTraversal, const LoopInfoT &LI)
Return true if the control flow in RPOTraversal is irreducible.
Definition CFG.h:154
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
Definition MathExtras.h:279
void sort(IteratorTy Start, IteratorTy End)
Definition STLExtras.h:1635
LLVM_ABI_FOR_TEST cl::opt< bool > EnableWideActiveLaneMask
UncountableExitStyle
Different methods of handling early exits.
Definition VPlan.h:82
@ ReadOnly
No side effects to worry about, so we can process any uncountable exits in the loop and branch either...
Definition VPlan.h:87
@ MaskedHandleExitInScalarLoop
All memory operations other than the load(s) required to determine whether an uncountable exit occurr...
Definition VPlan.h:92
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:207
bool none_of(R &&Range, UnaryPredicate P)
Provide wrappers to std::none_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1752
LLVM_ABI cl::opt< bool > EnableLoopVectorization
constexpr uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
Definition Alignment.h:144
LLVM_ABI_FOR_TEST cl::list< std::string > VPlanPrintAfterPasses
LLVM_ABI bool wouldInstructionBeTriviallyDead(const Instruction *I, const TargetLibraryInfo *TLI=nullptr)
Return true if the result produced by the instruction would have no side effects if it was not used.
Definition Local.cpp:422
SmallVector< ValueTypeFromRangeType< R >, Size > to_vector(R &&Range)
Given a range of type R, iterate the entire range and return a SmallVector with elements of the vecto...
Type * toVectorizedTy(Type *Ty, ElementCount EC)
A helper for converting to vectorized types.
LLVM_ABI void llvm_unreachable_internal(const char *msg=nullptr, const char *file=nullptr, unsigned line=0)
This function calls abort(), and prints the optional message to stderr.
T * find_singleton(R &&Range, Predicate P, bool AllowRepeats=false)
Return the single value in Range that satisfies P(<member of Range> *, AllowRepeats)->T * returning n...
Definition STLExtras.h:1836
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
std::optional< unsigned > getMaxVScale(const Function &F, const TargetTransformInfo &TTI)
cl::opt< unsigned > ForceTargetInstructionCost
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
format_object< Ts... > format(const char *Fmt, const Ts &... Vals)
These are helper functions used to produce formatted output.
Definition Format.h:129
constexpr T divideCeil(U Numerator, V Denominator)
Returns the integer ceil(Numerator / Denominator).
Definition MathExtras.h:394
bool canVectorizeTy(Type *Ty)
Returns true if Ty is a valid vector element type, void, or an unpacked literal struct where all elem...
TargetTransformInfo TTI
@ CM_EpilogueNotAllowedLowTripLoop
@ CM_EpilogueNotNeededFoldTail
@ CM_EpilogueNotAllowedFoldTail
@ CM_EpilogueNotAllowedOptSize
@ CM_EpilogueAllowed
LLVM_ABI bool isAssignmentTrackingEnabled(const Module &M)
Return true if assignment tracking is enabled for module M.
RecurKind
These are the kinds of recurrences that we support.
@ Or
Bitwise or logical OR of integers.
@ FMulAdd
Sum of float products with llvm.fmuladd(a * b + sum).
@ Sub
Subtraction of integers.
@ Add
Sum of integers.
LLVM_ABI Value * getRecurrenceIdentity(RecurKind K, Type *Tp, FastMathFlags FMF)
Given information about an recurrence kind, return the identity for the @llvm.vector....
LLVM_ABI BasicBlock * SplitBlock(BasicBlock *Old, BasicBlock::iterator SplitPt, DominatorTree *DT, LoopInfo *LI=nullptr, MemorySSAUpdater *MSSAU=nullptr, const Twine &BBName="")
Split the specified block at the specified instruction.
LLVM_ABI void reportVectorizationFailure(const StringRef DebugMsg, const StringRef OREMsg, const StringRef ORETag, OptimizationRemarkEmitter *ORE, Loop *TheLoop, Instruction *I=nullptr)
Reports a vectorization failure: print DebugMsg for debugging purposes along with the corresponding o...
DWARFExpression::Operation Op
LLVM_ABI bool isGuaranteedNotToBeUndefOrPoison(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Return true if this function can prove that V does not have undef bits and is never poison.
ArrayRef(const T &OneElt) -> ArrayRef< T >
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1771
auto predecessors(const MachineBasicBlock *BB)
iterator_range< pointer_iterator< WrappedIteratorT > > make_pointer_range(RangeT &&Range)
Definition iterator.h:368
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition STLExtras.h:1946
cl::opt< bool > EnableVPlanNativePath
Type * getLoadStoreType(const Value *I)
A helper function that returns the type of a load or store instruction.
ArrayRef< Type * > getContainedTypes(Type *const &Ty)
Returns the types contained in Ty.
LLVM_ABI Value * addDiffRuntimeChecks(Instruction *Loc, ArrayRef< PointerDiffInfo > Checks, SCEVExpander &Expander, function_ref< Value *(IRBuilderBase &, unsigned)> GetVF, unsigned IC)
bool pred_empty(const BasicBlock *BB)
Definition CFG.h:119
@ None
Don't use tail folding.
@ DataWithEVL
Use predicated EVL instructions for tail-folding.
@ DataAndControlFlow
Use predicate to control both data and control flow.
@ DataWithoutLaneMask
Same as Data, but avoids using the get.active.lane.mask intrinsic to calculate the mask and instead i...
@ Data
Use predicate only to mask operations on data in the loop.
AnalysisManager< Function > FunctionAnalysisManager
Convenience typedef for the Function analysis manager.
LLVM_ABI bool hasBranchWeightMD(const Instruction &I)
Checks if an instructions has Branch Weight Metadata.
hash_code hash_combine(const Ts &...args)
Combine values into a single hash_code.
Definition Hashing.h:592
@ Increment
Incrementally increasing token ID.
Definition AllocToken.h:26
T bit_floor(T Value)
Returns the largest integral power of two no greater than Value if Value is nonzero.
Definition bit.h:347
Type * toVectorTy(Type *Scalar, ElementCount EC)
A helper function for converting Scalar types to vector types.
std::unique_ptr< VPlan > VPlanPtr
Definition VPlan.h:77
constexpr detail::IsaCheckPredicate< Types... > IsaPred
Function object wrapper for the llvm::isa type check.
Definition Casting.h:866
LLVM_ABI_FOR_TEST bool verifyVPlanIsValid(const VPlan &Plan)
Verify invariants for general VPlans.
hash_code hash_combine_range(InputIteratorT first, InputIteratorT last)
Compute a hash_code for a sequence of values.
Definition Hashing.h:466
LLVM_ABI_FOR_TEST cl::opt< bool > VPlanPrintVectorRegionScope
LLVM_ABI cl::opt< bool > EnableLoopInterleaving
#define N
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition Alignment.h:39
A special type used by analysis passes to provide an address that identifies that particular analysis...
Definition Analysis.h:29
static LLVM_ABI void collectEphemeralValues(const Loop *L, AssumptionCache *AC, SmallPtrSetImpl< const Value * > &EphValues)
Collect a loop's ephemeral values (those used only by an assume or similar intrinsics in the loop).
An information struct used to provide DenseMap with the various necessary components for a given valu...
Encapsulate information regarding vectorization of a loop and its epilogue.
EpilogueLoopVectorizationInfo(ElementCount MVF, unsigned MUF, ElementCount EVF, unsigned EUF, VPlan &EpiloguePlan)
A class that represents two vectorization factors (initialized with 0 by default).
static FixedScalableVFPair getNone()
This holds details about a histogram operation – a load -> update -> store sequence where each lane i...
TargetLibraryInfo * TLI
LLVM_ABI LoopVectorizeResult runImpl(Function &F)
LLVM_ABI bool processLoop(Loop *L)
ProfileSummaryInfo * PSI
LoopAccessInfoManager * LAIs
LLVM_ABI void printPipeline(raw_ostream &OS, function_ref< StringRef(StringRef)> MapClassName2PassName)
LLVM_ABI LoopVectorizePass(LoopVectorizeOptions Opts={})
ScalarEvolution * SE
AssumptionCache * AC
LLVM_ABI PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM)
OptimizationRemarkEmitter * ORE
std::function< BlockFrequencyInfo &()> GetBFI
TargetTransformInfo * TTI
Storage for information about made changes.
A CRTP mix-in to automatically provide informational APIs needed for passes.
Definition PassManager.h:70
A marker analysis to determine if extra passes should be run after loop vectorization.
static LLVM_ABI AnalysisKey Key
Holds the VFShape for a specific scalar to vector function mapping.
std::optional< unsigned > getParamIndexForOptionalMask() const
Instruction Set Architecture.
Encapsulates information needed to describe a parameter.
A range of powers-of-2 vectorization factors with fixed start and adjustable end.
ElementCount End
Struct to hold various analysis needed for cost computations.
LoopVectorizationCostModel & CM
bool skipCostComputation(Instruction *UI, bool IsVector) const
Return true if the cost for UI shouldn't be computed, e.g.
InstructionCost getLegacyCost(Instruction *UI, ElementCount VF) const
Return the cost for UI with VF using the legacy cost model as fallback until computing the cost of al...
uint64_t getPredBlockCostDivisor(BasicBlock *BB) const
TargetTransformInfo::TargetCostKind CostKind
SmallPtrSet< Instruction *, 8 > SkipCostComputation
A VPValue representing a live-in from the input IR or a constant.
Definition VPlanValue.h:240
A struct that represents some properties of the register usage of a loop.
InstructionCost spillCost(const TargetTransformInfo &TTI, TargetTransformInfo::TargetCostKind CostKind, unsigned OverrideMaxNumRegs=0) const
Calculate the estimated cost of any spills due to using more registers than the number available for ...
VPTransformState holds information passed down when "executing" a VPlan, needed for generating the ou...
A recipe for widening load operations, using the address to load from and an optional mask.
Definition VPlan.h:3580
A recipe for widening store operations, using the stored value, the address to store to and an option...
Definition VPlan.h:3661
static LLVM_ABI_FOR_TEST bool tryToConvertVPInstructionsToVPRecipes(VPlan &Plan, const TargetLibraryInfo &TLI)
Replaces the VPInstructions in Plan with corresponding widen recipes.
static void makeMemOpWideningDecisions(VPlan &Plan, VFRange &Range, VPRecipeBuilder &RecipeBuilder)
Convert load/store VPInstructions in Plan into widened or replicate recipes.
static void addMinimumIterationCheck(VPlan &Plan, ElementCount VF, unsigned UF, ElementCount MinProfitableTripCount, bool RequiresScalarEpilogue, bool TailFolded, Loop *OrigLoop, const uint32_t *MinItersBypassWeights, DebugLoc DL, PredicatedScalarEvolution &PSE, VPBasicBlock *CheckBlock=nullptr)
static bool createHeaderPhiRecipes(VPlan &Plan, PredicatedScalarEvolution &PSE, Loop &OrigLoop, const MapVector< PHINode *, InductionDescriptor > &Inductions, const MapVector< PHINode *, RecurrenceDescriptor > &Reductions, const SmallPtrSetImpl< const PHINode * > &FixedOrderRecurrences, const SmallPtrSetImpl< PHINode * > &InLoopReductions, bool AllowReordering)
Replace VPPhi recipes in Plan's header with corresponding VPHeaderPHIRecipe subclasses for inductions...
static void materializeBroadcasts(VPlan &Plan)
Add explicit broadcasts for live-ins and VPValues defined in Plan's entry block if they are used as v...
static void materializePacksAndUnpacks(VPlan &Plan)
Add explicit Build[Struct]Vector recipes to Pack multiple scalar values into vectors and Unpack recip...
static void createInterleaveGroups(VPlan &Plan, const SmallPtrSetImpl< const InterleaveGroup< Instruction > * > &InterleaveGroups, const bool &EpilogueAllowed)
static LLVM_ABI_FOR_TEST std::unique_ptr< VPlan > buildVPlan0(Loop *TheLoop, LoopInfo &LI, Type *InductionTy, DebugLoc IVDL, PredicatedScalarEvolution &PSE, LoopVersioning *LVer=nullptr)
Create a base VPlan0, serving as the common starting point for all later candidates.
static bool simplifyKnownEVL(VPlan &Plan, ElementCount VF, PredicatedScalarEvolution &PSE)
Try to simplify VPInstruction::ExplicitVectorLength recipes when the AVL is known to be <= VF,...
static void removeBranchOnConst(VPlan &Plan, bool OnlyLatches=false)
Remove BranchOnCond recipes with true or false conditions together with removing dead edges to their ...
static void introduceMasksAndLinearize(VPlan &Plan)
Predicate and linearize the control-flow in the only loop region of Plan.
static void materializeFactors(VPlan &Plan, VPBasicBlock *VectorPH, ElementCount VF)
Materialize UF, VF and VFxUF to be computed explicitly using VPInstructions.
static void foldTailByMasking(VPlan &Plan)
Adapts the vector loop region for tail folding by introducing a header mask and conditionally executi...
static void materializeBackedgeTakenCount(VPlan &Plan, VPBasicBlock *VectorPH)
Materialize the backedge-taken count to be computed explicitly using VPInstructions.
static void addMinimumVectorEpilogueIterationCheck(VPlan &Plan, Value *VectorTripCount, bool RequiresScalarEpilogue, ElementCount EpilogueVF, unsigned EpilogueUF, unsigned MainLoopStep, unsigned EpilogueLoopStep, ScalarEvolution &SE)
Add a check to Plan to see if the epilogue vector loop should be executed.
static void addActiveLaneMask(VPlan &Plan, bool UseActiveLaneMaskForControlFlow)
Replace (ICMP_ULE, wide canonical IV, backedge-taken-count) checks with an (active-lane-mask recipe,...
static bool handleMultiUseReductions(VPlan &Plan, OptimizationRemarkEmitter *ORE, Loop *TheLoop)
Try to legalize reductions with multiple in-loop uses.
static void dropPoisonGeneratingRecipes(VPlan &Plan, const std::function< bool(BasicBlock *)> &BlockNeedsPredication)
Drop poison flags from recipes that may generate a poison value that is used after vectorization,...
static void replaceWideCanonicalIVWithWideIV(VPlan &Plan, ScalarEvolution &SE, const TargetTransformInfo &TTI, TargetTransformInfo::TargetCostKind CostKind, ElementCount VF, unsigned UF, const SmallPtrSetImpl< const Value * > &ValuesToIgnore)
Replace a VPWidenCanonicalIVRecipe if it is present in Plan, with a VPWidenIntOrFpInductionRecipe,...
static void convertToVariableLengthStep(VPlan &Plan)
Transform loops with variable-length stepping after region dissolution.
static void addBranchWeightToMiddleTerminator(VPlan &Plan, ElementCount VF, std::optional< unsigned > VScaleForTuning)
Add branch weight metadata, if the Plan's middle block is terminated by a BranchOnCond recipe.
static std::unique_ptr< VPlan > narrowInterleaveGroups(VPlan &Plan, const TargetTransformInfo &TTI)
Try to find a single VF among Plan's VFs for which all interleave groups (with known minimum VF eleme...
static bool handleFindLastReductions(VPlan &Plan)
Check if Plan contains any FindLast reductions.
static void createInLoopReductionRecipes(VPlan &Plan, ElementCount MinVF)
Create VPReductionRecipes for in-loop reductions.
static void unrollByUF(VPlan &Plan, unsigned UF)
Explicitly unroll Plan by UF.
static DenseMap< const SCEV *, Value * > expandSCEVs(VPlan &Plan, ScalarEvolution &SE)
Expand VPExpandSCEVRecipes in Plan's entry block.
static void convertToConcreteRecipes(VPlan &Plan)
Lower abstract recipes to concrete ones, that can be codegen'd.
static void expandBranchOnTwoConds(VPlan &Plan)
Expand BranchOnTwoConds instructions into explicit CFG with BranchOnCond instructions.
static void materializeVectorTripCount(VPlan &Plan, VPBasicBlock *VectorPHVPBB, bool TailByMasking, bool RequiresScalarEpilogue, VPValue *Step, std::optional< uint64_t > MaxRuntimeStep=std::nullopt)
Materialize vector trip count computations to a set of VPInstructions.
static void hoistPredicatedLoads(VPlan &Plan, PredicatedScalarEvolution &PSE, const Loop *L)
Hoist predicated loads from the same address to the loop entry block, if they are guaranteed to execu...
static void optimizeFindIVReductions(VPlan &Plan, PredicatedScalarEvolution &PSE, Loop &L)
Optimize FindLast reductions selecting IVs (or expressions of IVs) by converting them to FindIV reduc...
static void convertToAbstractRecipes(VPlan &Plan, VPCostContext &Ctx, VFRange &Range)
This function converts initial recipes to the abstract recipes and clamps Range based on cost model f...
static void materializeConstantVectorTripCount(VPlan &Plan, ElementCount BestVF, unsigned BestUF, PredicatedScalarEvolution &PSE)
static void addExplicitVectorLength(VPlan &Plan, const std::optional< unsigned > &MaxEVLSafeElements)
Add a VPCurrentIterationPHIRecipe and related recipes to Plan and replaces all uses of the canonical ...
static void adjustFirstOrderRecurrenceMiddleUsers(VPlan &Plan, VFRange &Range)
Adjust first-order recurrence users in the middle block: create penultimate element extracts for LCSS...
static void optimizeEVLMasks(VPlan &Plan)
Optimize recipes which use an EVL-based header mask to VP intrinsics, for example:
static LLVM_ABI_FOR_TEST bool handleEarlyExits(VPlan &Plan, UncountableExitStyle Style, Loop *TheLoop, PredicatedScalarEvolution &PSE, DominatorTree &DT, AssumptionCache *AC)
Update Plan to account for all early exits.
static void replaceSymbolicStrides(VPlan &Plan, PredicatedScalarEvolution &PSE, const DenseMap< Value *, const SCEV * > &StridesMap)
Replace symbolic strides from StridesMap in Plan with constants when possible.
static bool handleMaxMinNumReductions(VPlan &Plan)
Check if Plan contains any FMaxNum or FMinNum reductions.
static LLVM_ABI_FOR_TEST void createLoopRegions(VPlan &Plan)
Replace loops in Plan's flat CFG with VPRegionBlocks, turning Plan's flat CFG into a hierarchical CFG...
static void removeDeadRecipes(VPlan &Plan)
Remove dead recipes from Plan.
static void attachCheckBlock(VPlan &Plan, Value *Cond, BasicBlock *CheckBlock, bool AddBranchWeights)
Wrap runtime check block CheckBlock in a VPIRBB and Cond in a VPValue and connect the block to Plan,...
static void simplifyRecipes(VPlan &Plan)
Perform instcombine-like simplifications on recipes in Plan.
static void sinkPredicatedStores(VPlan &Plan, PredicatedScalarEvolution &PSE, const Loop *L)
Sink predicated stores to the same address with complementary predicates (P and NOT P) to an uncondit...
static void replicateByVF(VPlan &Plan, ElementCount VF)
Replace replicating VPReplicateRecipe, VPScalarIVStepsRecipe and VPInstruction in Plan with VF single...
static void addIterationCountCheckBlock(VPlan &Plan, ElementCount VF, unsigned UF, bool RequiresScalarEpilogue, Loop *OrigLoop, const uint32_t *MinItersBypassWeights, DebugLoc DL, PredicatedScalarEvolution &PSE)
Add a new check block before the vector preheader to Plan to check if the main vector loop should be ...
static void clearReductionWrapFlags(VPlan &Plan)
Clear NSW/NUW flags from reduction instructions if necessary.
static void optimizeInductionLiveOutUsers(VPlan &Plan, PredicatedScalarEvolution &PSE, bool FoldTail)
If there's a single exit block, optimize its phi recipes that use exiting IV values by feeding them p...
static void createPartialReductions(VPlan &Plan, VPCostContext &CostCtx, VFRange &Range)
Detect and create partial reduction recipes for scaled reductions in Plan.
static void cse(VPlan &Plan)
Perform common-subexpression-elimination on Plan.
static LLVM_ABI_FOR_TEST void optimize(VPlan &Plan)
Apply VPlan-to-VPlan optimizations to Plan, including induction recipe optimizations,...
static void dissolveLoopRegions(VPlan &Plan)
Replace loop regions with explicit CFG.
static void truncateToMinimalBitwidths(VPlan &Plan, const MapVector< Instruction *, uint64_t > &MinBWs)
Insert truncates and extends for any truncated recipe.
static void optimizeForVFAndUF(VPlan &Plan, ElementCount BestVF, unsigned BestUF, PredicatedScalarEvolution &PSE)
Optimize Plan based on BestVF and BestUF.
static void convertEVLExitCond(VPlan &Plan)
Replaces the exit condition from (branch-on-cond eq CanonicalIVInc, VectorTripCount) to (branch-on-co...
static LLVM_ABI_FOR_TEST void addMiddleCheck(VPlan &Plan, bool TailFolded)
If a check is needed to guard executing the scalar epilogue loop, it will be added to the middle bloc...
TODO: The following VectorizationFactor was pulled out of LoopVectorizationCostModel class.
InstructionCost Cost
Cost of the loop with that width.
ElementCount MinProfitableTripCount
The minimum trip count required to make vectorization profitable, e.g.
ElementCount Width
Vector width with best cost.
InstructionCost ScalarCost
Cost of the scalar loop.
static VectorizationFactor Disabled()
Width 1 means no vectorization, cost 0 means uncomputed cost.
static LLVM_ABI bool HoistRuntimeChecks