LLVM 23.0.0git
LoopVectorize.cpp
Go to the documentation of this file.
1//===- LoopVectorize.cpp - A Loop Vectorizer ------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This is the LLVM loop vectorizer. This pass modifies 'vectorizable' loops
10// and generates target-independent LLVM-IR.
11// The vectorizer uses the TargetTransformInfo analysis to estimate the costs
12// of instructions in order to estimate the profitability of vectorization.
13//
14// The loop vectorizer combines consecutive loop iterations into a single
15// 'wide' iteration. After this transformation the index is incremented
16// by the SIMD vector width, and not by one.
17//
18// This pass has three parts:
19// 1. The main loop pass that drives the different parts.
20// 2. LoopVectorizationLegality - A unit that checks for the legality
21// of the vectorization.
22// 3. InnerLoopVectorizer - A unit that performs the actual
23// widening of instructions.
24// 4. LoopVectorizationCostModel - A unit that checks for the profitability
25// of vectorization. It decides on the optimal vector width, which
26// can be one, if vectorization is not profitable.
27//
28// There is a development effort going on to migrate loop vectorizer to the
29// VPlan infrastructure and to introduce outer loop vectorization support (see
30// docs/VectorizationPlan.rst and
31// http://lists.llvm.org/pipermail/llvm-dev/2017-December/119523.html). For this
32// purpose, we temporarily introduced the VPlan-native vectorization path: an
33// alternative vectorization path that is natively implemented on top of the
34// VPlan infrastructure. See EnableVPlanNativePath for enabling.
35//
36//===----------------------------------------------------------------------===//
37//
38// The reduction-variable vectorization is based on the paper:
39// D. Nuzman and R. Henderson. Multi-platform Auto-vectorization.
40//
41// Variable uniformity checks are inspired by:
42// Karrenberg, R. and Hack, S. Whole Function Vectorization.
43//
44// The interleaved access vectorization is based on the paper:
45// Dorit Nuzman, Ira Rosen and Ayal Zaks. Auto-Vectorization of Interleaved
46// Data for SIMD
47//
48// Other ideas/concepts are from:
49// A. Zaks and D. Nuzman. Autovectorization in GCC-two years later.
50//
51// S. Maleki, Y. Gao, M. Garzaran, T. Wong and D. Padua. An Evaluation of
52// Vectorizing Compilers.
53//
54//===----------------------------------------------------------------------===//
55
58#include "VPRecipeBuilder.h"
59#include "VPlan.h"
60#include "VPlanAnalysis.h"
61#include "VPlanCFG.h"
62#include "VPlanHelpers.h"
63#include "VPlanPatternMatch.h"
64#include "VPlanTransforms.h"
65#include "VPlanUtils.h"
66#include "VPlanVerifier.h"
67#include "llvm/ADT/APInt.h"
68#include "llvm/ADT/ArrayRef.h"
69#include "llvm/ADT/DenseMap.h"
71#include "llvm/ADT/Hashing.h"
72#include "llvm/ADT/MapVector.h"
73#include "llvm/ADT/STLExtras.h"
76#include "llvm/ADT/Statistic.h"
77#include "llvm/ADT/StringRef.h"
78#include "llvm/ADT/Twine.h"
79#include "llvm/ADT/TypeSwitch.h"
84#include "llvm/Analysis/CFG.h"
101#include "llvm/IR/Attributes.h"
102#include "llvm/IR/BasicBlock.h"
103#include "llvm/IR/CFG.h"
104#include "llvm/IR/Constant.h"
105#include "llvm/IR/Constants.h"
106#include "llvm/IR/DataLayout.h"
107#include "llvm/IR/DebugInfo.h"
108#include "llvm/IR/DebugLoc.h"
109#include "llvm/IR/DerivedTypes.h"
111#include "llvm/IR/Dominators.h"
112#include "llvm/IR/Function.h"
113#include "llvm/IR/IRBuilder.h"
114#include "llvm/IR/InstrTypes.h"
115#include "llvm/IR/Instruction.h"
116#include "llvm/IR/Instructions.h"
118#include "llvm/IR/Intrinsics.h"
119#include "llvm/IR/MDBuilder.h"
120#include "llvm/IR/Metadata.h"
121#include "llvm/IR/Module.h"
122#include "llvm/IR/Operator.h"
123#include "llvm/IR/PatternMatch.h"
125#include "llvm/IR/Type.h"
126#include "llvm/IR/Use.h"
127#include "llvm/IR/User.h"
128#include "llvm/IR/Value.h"
129#include "llvm/IR/Verifier.h"
130#include "llvm/Support/Casting.h"
132#include "llvm/Support/Debug.h"
147#include <algorithm>
148#include <cassert>
149#include <cmath>
150#include <cstdint>
151#include <functional>
152#include <iterator>
153#include <limits>
154#include <memory>
155#include <string>
156#include <tuple>
157#include <utility>
158
159using namespace llvm;
160using namespace SCEVPatternMatch;
161
162#define LV_NAME "loop-vectorize"
163#define DEBUG_TYPE LV_NAME
164
165#ifndef NDEBUG
166const char VerboseDebug[] = DEBUG_TYPE "-verbose";
167#endif
168
169STATISTIC(LoopsVectorized, "Number of loops vectorized");
170STATISTIC(LoopsAnalyzed, "Number of loops analyzed for vectorization");
171STATISTIC(LoopsEpilogueVectorized, "Number of epilogues vectorized");
172STATISTIC(LoopsEarlyExitVectorized, "Number of early exit loops vectorized");
173
175 "enable-epilogue-vectorization", cl::init(true), cl::Hidden,
176 cl::desc("Enable vectorization of epilogue loops."));
177
179 "epilogue-vectorization-force-VF", cl::init(1), cl::Hidden,
180 cl::desc("When epilogue vectorization is enabled, and a value greater than "
181 "1 is specified, forces the given VF for all applicable epilogue "
182 "loops."));
183
185 "epilogue-vectorization-minimum-VF", cl::Hidden,
186 cl::desc("Only loops with vectorization factor equal to or larger than "
187 "the specified value are considered for epilogue vectorization."));
188
189/// Loops with a known constant trip count below this number are vectorized only
190/// if no scalar iteration overheads are incurred.
192 "vectorizer-min-trip-count", cl::init(16), cl::Hidden,
193 cl::desc("Loops with a constant trip count that is smaller than this "
194 "value are vectorized only if no scalar iteration overheads "
195 "are incurred."));
196
198 "vectorize-memory-check-threshold", cl::init(128), cl::Hidden,
199 cl::desc("The maximum allowed number of runtime memory checks"));
200
201/// Note: This currently only applies to `llvm.masked.load` and
202/// `llvm.masked.store`. TODO: Extend this to cover other operations as needed.
204 "force-target-supports-masked-memory-ops", cl::init(false), cl::Hidden,
205 cl::desc("Assume the target supports masked memory operations (used for "
206 "testing)."));
207
208// Option prefer-predicate-over-epilogue indicates that an epilogue is undesired,
209// that predication is preferred, and this lists all options. I.e., the
210// vectorizer will try to fold the tail-loop (epilogue) into the vector body
211// and predicate the instructions accordingly. If tail-folding fails, there are
212// different fallback strategies depending on these values:
219} // namespace PreferPredicateTy
220
222 "prefer-predicate-over-epilogue",
225 cl::desc("Tail-folding and predication preferences over creating a scalar "
226 "epilogue loop."),
228 "scalar-epilogue",
229 "Don't tail-predicate loops, create scalar epilogue"),
231 "predicate-else-scalar-epilogue",
232 "prefer tail-folding, create scalar epilogue if tail "
233 "folding fails."),
235 "predicate-dont-vectorize",
236 "prefers tail-folding, don't attempt vectorization if "
237 "tail-folding fails.")));
238
240 "force-tail-folding-style", cl::desc("Force the tail folding style"),
243 clEnumValN(TailFoldingStyle::None, "none", "Disable tail folding"),
246 "Create lane mask for data only, using active.lane.mask intrinsic"),
248 "data-without-lane-mask",
249 "Create lane mask with compare/stepvector"),
251 "Create lane mask using active.lane.mask intrinsic, and use "
252 "it for both data and control flow"),
254 "Use predicated EVL instructions for tail folding. If EVL "
255 "is unsupported, fallback to data-without-lane-mask.")));
256
258 "enable-wide-lane-mask", cl::init(false), cl::Hidden,
259 cl::desc("Enable use of wide lane masks when used for control flow in "
260 "tail-folded loops"));
261
263 "vectorizer-maximize-bandwidth", cl::init(false), cl::Hidden,
264 cl::desc("Maximize bandwidth when selecting vectorization factor which "
265 "will be determined by the smallest type in loop."));
266
268 "enable-interleaved-mem-accesses", cl::init(false), cl::Hidden,
269 cl::desc("Enable vectorization on interleaved memory accesses in a loop"));
270
271/// An interleave-group may need masking if it resides in a block that needs
272/// predication, or in order to mask away gaps.
274 "enable-masked-interleaved-mem-accesses", cl::init(false), cl::Hidden,
275 cl::desc("Enable vectorization on masked interleaved memory accesses in a loop"));
276
278 "force-target-num-scalar-regs", cl::init(0), cl::Hidden,
279 cl::desc("A flag that overrides the target's number of scalar registers."));
280
282 "force-target-num-vector-regs", cl::init(0), cl::Hidden,
283 cl::desc("A flag that overrides the target's number of vector registers."));
284
286 "force-target-max-scalar-interleave", cl::init(0), cl::Hidden,
287 cl::desc("A flag that overrides the target's max interleave factor for "
288 "scalar loops."));
289
291 "force-target-max-vector-interleave", cl::init(0), cl::Hidden,
292 cl::desc("A flag that overrides the target's max interleave factor for "
293 "vectorized loops."));
294
296 "force-target-instruction-cost", cl::init(0), cl::Hidden,
297 cl::desc("A flag that overrides the target's expected cost for "
298 "an instruction to a single constant value. Mostly "
299 "useful for getting consistent testing."));
300
302 "force-target-supports-scalable-vectors", cl::init(false), cl::Hidden,
303 cl::desc(
304 "Pretend that scalable vectors are supported, even if the target does "
305 "not support them. This flag should only be used for testing."));
306
308 "small-loop-cost", cl::init(20), cl::Hidden,
309 cl::desc(
310 "The cost of a loop that is considered 'small' by the interleaver."));
311
313 "loop-vectorize-with-block-frequency", cl::init(true), cl::Hidden,
314 cl::desc("Enable the use of the block frequency analysis to access PGO "
315 "heuristics minimizing code growth in cold regions and being more "
316 "aggressive in hot regions."));
317
318// Runtime interleave loops for load/store throughput.
320 "enable-loadstore-runtime-interleave", cl::init(true), cl::Hidden,
321 cl::desc(
322 "Enable runtime interleaving until load/store ports are saturated"));
323
324/// The number of stores in a loop that are allowed to need predication.
326 "vectorize-num-stores-pred", cl::init(1), cl::Hidden,
327 cl::desc("Max number of stores to be predicated behind an if."));
328
330 "enable-ind-var-reg-heur", cl::init(true), cl::Hidden,
331 cl::desc("Count the induction variable only once when interleaving"));
332
334 "max-nested-scalar-reduction-interleave", cl::init(2), cl::Hidden,
335 cl::desc("The maximum interleave count to use when interleaving a scalar "
336 "reduction in a nested loop."));
337
338static cl::opt<bool>
339 PreferInLoopReductions("prefer-inloop-reductions", cl::init(false),
341 cl::desc("Prefer in-loop vector reductions, "
342 "overriding the targets preference."));
343
345 "force-ordered-reductions", cl::init(false), cl::Hidden,
346 cl::desc("Enable the vectorisation of loops with in-order (strict) "
347 "FP reductions"));
348
350 "prefer-predicated-reduction-select", cl::init(false), cl::Hidden,
351 cl::desc(
352 "Prefer predicating a reduction operation over an after loop select."));
353
355 "enable-vplan-native-path", cl::Hidden,
356 cl::desc("Enable VPlan-native vectorization path with "
357 "support for outer loop vectorization."));
358
360 llvm::VerifyEachVPlan("vplan-verify-each",
361#ifdef EXPENSIVE_CHECKS
362 cl::init(true),
363#else
364 cl::init(false),
365#endif
367 cl::desc("Verify VPlans after VPlan transforms."));
368
369#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
371 "vplan-print-after-all", cl::init(false), cl::Hidden,
372 cl::desc("Print VPlans after all VPlan transformations."));
373
375 "vplan-print-after", cl::Hidden,
376 cl::desc("Print VPlans after specified VPlan transformations (regexp)."));
377
379 "vplan-print-vector-region-scope", cl::init(false), cl::Hidden,
380 cl::desc("Limit VPlan printing to vector loop region in "
381 "`-vplan-print-after*` if the plan has one."));
382#endif
383
384// This flag enables the stress testing of the VPlan H-CFG construction in the
385// VPlan-native vectorization path. It must be used in conjuction with
386// -enable-vplan-native-path. -vplan-verify-hcfg can also be used to enable the
387// verification of the H-CFGs built.
389 "vplan-build-stress-test", cl::init(false), cl::Hidden,
390 cl::desc(
391 "Build VPlan for every supported loop nest in the function and bail "
392 "out right after the build (stress test the VPlan H-CFG construction "
393 "in the VPlan-native vectorization path)."));
394
396 "interleave-loops", cl::init(true), cl::Hidden,
397 cl::desc("Enable loop interleaving in Loop vectorization passes"));
399 "vectorize-loops", cl::init(true), cl::Hidden,
400 cl::desc("Run the Loop vectorization passes"));
401
403 "force-widen-divrem-via-safe-divisor", cl::Hidden,
404 cl::desc(
405 "Override cost based safe divisor widening for div/rem instructions"));
406
408 "vectorizer-maximize-bandwidth-for-vector-calls", cl::init(true),
410 cl::desc("Try wider VFs if they enable the use of vector variants"));
411
413 "enable-early-exit-vectorization", cl::init(true), cl::Hidden,
414 cl::desc(
415 "Enable vectorization of early exit loops with uncountable exits."));
416
418 "vectorizer-consider-reg-pressure", cl::init(false), cl::Hidden,
419 cl::desc("Discard VFs if their register pressure is too high."));
420
421// Likelyhood of bypassing the vectorized loop because there are zero trips left
422// after prolog. See `emitIterationCountCheck`.
423static constexpr uint32_t MinItersBypassWeights[] = {1, 127};
424
425/// A helper function that returns true if the given type is irregular. The
426/// type is irregular if its allocated size doesn't equal the store size of an
427/// element of the corresponding vector type.
428static bool hasIrregularType(Type *Ty, const DataLayout &DL) {
429 // Determine if an array of N elements of type Ty is "bitcast compatible"
430 // with a <N x Ty> vector.
431 // This is only true if there is no padding between the array elements.
432 return DL.getTypeAllocSizeInBits(Ty) != DL.getTypeSizeInBits(Ty);
433}
434
435/// A version of ScalarEvolution::getSmallConstantTripCount that returns an
436/// ElementCount to include loops whose trip count is a function of vscale.
438 const Loop *L) {
439 if (unsigned ExpectedTC = SE->getSmallConstantTripCount(L))
440 return ElementCount::getFixed(ExpectedTC);
441
442 const SCEV *BTC = SE->getBackedgeTakenCount(L);
444 return ElementCount::getFixed(0);
445
446 const SCEV *ExitCount = SE->getTripCountFromExitCount(BTC, BTC->getType(), L);
447 if (isa<SCEVVScale>(ExitCount))
449
450 const APInt *Scale;
451 if (match(ExitCount, m_scev_Mul(m_scev_APInt(Scale), m_SCEVVScale())))
452 if (cast<SCEVMulExpr>(ExitCount)->hasNoUnsignedWrap())
453 if (Scale->getActiveBits() <= 32)
455
456 return ElementCount::getFixed(0);
457}
458
459/// Get the maximum trip count for \p L from the SCEV unsigned range, excluding
460/// zero from the range. Only valid when not folding the tail, as the minimum
461/// iteration count check guards against a zero trip count. Returns 0 if
462/// unknown.
464 Loop *L) {
465 const SCEV *BTC = PSE.getBackedgeTakenCount();
467 return 0;
468 ScalarEvolution *SE = PSE.getSE();
469 const SCEV *TripCount = SE->getTripCountFromExitCount(BTC, BTC->getType(), L);
470 ConstantRange TCRange = SE->getUnsignedRange(TripCount);
471 APInt MaxTCFromRange = TCRange.getUnsignedMax();
472 if (!MaxTCFromRange.isZero() && MaxTCFromRange.getActiveBits() <= 32)
473 return MaxTCFromRange.getZExtValue();
474 return 0;
475}
476
477/// Returns "best known" trip count, which is either a valid positive trip count
478/// or std::nullopt when an estimate cannot be made (including when the trip
479/// count would overflow), for the specified loop \p L as defined by the
480/// following procedure:
481/// 1) Returns exact trip count if it is known.
482/// 2) Returns expected trip count according to profile data if any.
483/// 3) Returns upper bound estimate if known, and if \p CanUseConstantMax.
484/// 4) Returns the maximum trip count from the SCEV range excluding zero,
485/// if \p CanUseConstantMax and \p CanExcludeZeroTrips.
486/// 5) Returns std::nullopt if all of the above failed.
487static std::optional<ElementCount>
489 bool CanUseConstantMax = true,
490 bool CanExcludeZeroTrips = false) {
491 // Check if exact trip count is known.
492 if (auto ExpectedTC = getSmallConstantTripCount(PSE.getSE(), L))
493 return ExpectedTC;
494
495 // Check if there is an expected trip count available from profile data.
497 if (auto EstimatedTC = getLoopEstimatedTripCount(L))
498 return ElementCount::getFixed(*EstimatedTC);
499
500 if (!CanUseConstantMax)
501 return std::nullopt;
502
503 // Check if upper bound estimate is known.
504 if (unsigned ExpectedTC = PSE.getSmallConstantMaxTripCount())
505 return ElementCount::getFixed(ExpectedTC);
506
507 // Get the maximum trip count from the SCEV range excluding zero. This is
508 // only safe when not folding the tail, as the minimum iteration count check
509 // prevents entering the vector loop with a zero trip count.
510 if (CanUseConstantMax && CanExcludeZeroTrips)
511 if (unsigned RefinedTC = getMaxTCFromNonZeroRange(PSE, L))
512 return ElementCount::getFixed(RefinedTC);
513
514 return std::nullopt;
515}
516
517namespace {
518// Forward declare GeneratedRTChecks.
519class GeneratedRTChecks;
520
521using SCEV2ValueTy = DenseMap<const SCEV *, Value *>;
522} // namespace
523
524namespace llvm {
525
527
528/// InnerLoopVectorizer vectorizes loops which contain only one basic
529/// block to a specified vectorization factor (VF).
530/// This class performs the widening of scalars into vectors, or multiple
531/// scalars. This class also implements the following features:
532/// * It inserts an epilogue loop for handling loops that don't have iteration
533/// counts that are known to be a multiple of the vectorization factor.
534/// * It handles the code generation for reduction variables.
535/// * Scalarization (implementation using scalars) of un-vectorizable
536/// instructions.
537/// InnerLoopVectorizer does not perform any vectorization-legality
538/// checks, and relies on the caller to check for the different legality
539/// aspects. The InnerLoopVectorizer relies on the
540/// LoopVectorizationLegality class to provide information about the induction
541/// and reduction variables that were found to a given vectorization factor.
543public:
547 ElementCount VecWidth, unsigned UnrollFactor,
549 GeneratedRTChecks &RTChecks, VPlan &Plan)
550 : OrigLoop(OrigLoop), PSE(PSE), LI(LI), DT(DT), TTI(TTI), AC(AC),
551 VF(VecWidth), UF(UnrollFactor), Builder(PSE.getSE()->getContext()),
554 Plan.getVectorLoopRegion()->getSinglePredecessor())) {}
555
556 virtual ~InnerLoopVectorizer() = default;
557
558 /// Creates a basic block for the scalar preheader. Both
559 /// EpilogueVectorizerMainLoop and EpilogueVectorizerEpilogueLoop overwrite
560 /// the method to create additional blocks and checks needed for epilogue
561 /// vectorization.
563
564 /// Fix the vectorized code, taking care of header phi's, and more.
566
567 /// Fix the non-induction PHIs in \p Plan.
569
570protected:
572
573 /// Create and return a new IR basic block for the scalar preheader whose name
574 /// is prefixed with \p Prefix.
576
577 /// Allow subclasses to override and print debug traces before/after vplan
578 /// execution, when trace information is requested.
579 virtual void printDebugTracesAtStart() {}
580 virtual void printDebugTracesAtEnd() {}
581
582 /// The original loop.
584
585 /// A wrapper around ScalarEvolution used to add runtime SCEV checks. Applies
586 /// dynamic knowledge to simplify SCEV expressions and converts them to a
587 /// more usable form.
589
590 /// Loop Info.
592
593 /// Dominator Tree.
595
596 /// Target Transform Info.
598
599 /// Assumption Cache.
601
602 /// The vectorization SIMD factor to use. Each vector will have this many
603 /// vector elements.
605
606 /// The vectorization unroll factor to use. Each scalar is vectorized to this
607 /// many different vector instructions.
608 unsigned UF;
609
610 /// The builder that we use
612
613 // --- Vectorization state ---
614
615 /// The profitablity analysis.
617
618 /// Structure to hold information about generated runtime checks, responsible
619 /// for cleaning the checks, if vectorization turns out unprofitable.
620 GeneratedRTChecks &RTChecks;
621
623
624 /// The vector preheader block of \p Plan, used as target for check blocks
625 /// introduced during skeleton creation.
627};
628
629/// Encapsulate information regarding vectorization of a loop and its epilogue.
630/// This information is meant to be updated and used across two stages of
631/// epilogue vectorization.
634 unsigned MainLoopUF = 0;
636 unsigned EpilogueUF = 0;
641
643 ElementCount EVF, unsigned EUF,
645 : MainLoopVF(MVF), MainLoopUF(MUF), EpilogueVF(EVF), EpilogueUF(EUF),
647 assert(EUF == 1 &&
648 "A high UF for the epilogue loop is likely not beneficial.");
649 }
650};
651
652/// An extension of the inner loop vectorizer that creates a skeleton for a
653/// vectorized loop that has its epilogue (residual) also vectorized.
654/// The idea is to run the vplan on a given loop twice, firstly to setup the
655/// skeleton and vectorize the main loop, and secondly to complete the skeleton
656/// from the first step and vectorize the epilogue. This is achieved by
657/// deriving two concrete strategy classes from this base class and invoking
658/// them in succession from the loop vectorizer planner.
660public:
670
671 /// Holds and updates state information required to vectorize the main loop
672 /// and its epilogue in two separate passes. This setup helps us avoid
673 /// regenerating and recomputing runtime safety checks. It also helps us to
674 /// shorten the iteration-count-check path length for the cases where the
675 /// iteration count of the loop is so small that the main vector loop is
676 /// completely skipped.
678
679protected:
681};
682
683/// A specialized derived class of inner loop vectorizer that performs
684/// vectorization of *main* loops in the process of vectorizing loops and their
685/// epilogues.
687public:
698
699protected:
700 void printDebugTracesAtStart() override;
701 void printDebugTracesAtEnd() override;
702};
703
704// A specialized derived class of inner loop vectorizer that performs
705// vectorization of *epilogue* loops in the process of vectorizing loops and
706// their epilogues.
708public:
715 GeneratedRTChecks &Checks, VPlan &Plan)
717 Checks, Plan, EPI.EpilogueVF,
718 EPI.EpilogueVF, EPI.EpilogueUF) {}
719 /// Implements the interface for creating a vectorized skeleton using the
720 /// *epilogue loop* strategy (i.e., the second pass of VPlan execution).
722
723protected:
724 void printDebugTracesAtStart() override;
725 void printDebugTracesAtEnd() override;
726};
727} // end namespace llvm
728
729/// Look for a meaningful debug location on the instruction or its operands.
731 if (!I)
732 return DebugLoc::getUnknown();
733
735 if (I->getDebugLoc() != Empty)
736 return I->getDebugLoc();
737
738 for (Use &Op : I->operands()) {
739 if (Instruction *OpInst = dyn_cast<Instruction>(Op))
740 if (OpInst->getDebugLoc() != Empty)
741 return OpInst->getDebugLoc();
742 }
743
744 return I->getDebugLoc();
745}
746
747/// Write a \p DebugMsg about vectorization to the debug output stream. If \p I
748/// is passed, the message relates to that particular instruction.
749#ifndef NDEBUG
750static void debugVectorizationMessage(const StringRef Prefix,
751 const StringRef DebugMsg,
752 Instruction *I) {
753 dbgs() << "LV: " << Prefix << DebugMsg;
754 if (I != nullptr)
755 dbgs() << " " << *I;
756 else
757 dbgs() << '.';
758 dbgs() << '\n';
759}
760#endif
761
762/// Create an analysis remark that explains why vectorization failed
763///
764/// \p PassName is the name of the pass (e.g. can be AlwaysPrint). \p
765/// RemarkName is the identifier for the remark. If \p I is passed it is an
766/// instruction that prevents vectorization. Otherwise \p TheLoop is used for
767/// the location of the remark. If \p DL is passed, use it as debug location for
768/// the remark. \return the remark object that can be streamed to.
769static OptimizationRemarkAnalysis
770createLVAnalysis(const char *PassName, StringRef RemarkName, Loop *TheLoop,
771 Instruction *I, DebugLoc DL = {}) {
772 BasicBlock *CodeRegion = I ? I->getParent() : TheLoop->getHeader();
773 // If debug location is attached to the instruction, use it. Otherwise if DL
774 // was not provided, use the loop's.
775 if (I && I->getDebugLoc())
776 DL = I->getDebugLoc();
777 else if (!DL)
778 DL = TheLoop->getStartLoc();
779
780 return OptimizationRemarkAnalysis(PassName, RemarkName, DL, CodeRegion);
781}
782
783namespace llvm {
784
785/// Return the runtime value for VF.
787 return B.CreateElementCount(Ty, VF);
788}
789
791 const StringRef OREMsg, const StringRef ORETag,
792 OptimizationRemarkEmitter *ORE, Loop *TheLoop,
793 Instruction *I) {
794 LLVM_DEBUG(debugVectorizationMessage("Not vectorizing: ", DebugMsg, I));
795 LoopVectorizeHints Hints(TheLoop, true /* doesn't matter */, *ORE);
796 ORE->emit(
797 createLVAnalysis(Hints.vectorizeAnalysisPassName(), ORETag, TheLoop, I)
798 << "loop not vectorized: " << OREMsg);
799}
800
801/// Reports an informative message: print \p Msg for debugging purposes as well
802/// as an optimization remark. Uses either \p I as location of the remark, or
803/// otherwise \p TheLoop. If \p DL is passed, use it as debug location for the
804/// remark. If \p DL is passed, use it as debug location for the remark.
805static void reportVectorizationInfo(const StringRef Msg, const StringRef ORETag,
807 Loop *TheLoop, Instruction *I = nullptr,
808 DebugLoc DL = {}) {
810 LoopVectorizeHints Hints(TheLoop, true /* doesn't matter */, *ORE);
811 ORE->emit(createLVAnalysis(Hints.vectorizeAnalysisPassName(), ORETag, TheLoop,
812 I, DL)
813 << Msg);
814}
815
816/// Report successful vectorization of the loop. In case an outer loop is
817/// vectorized, prepend "outer" to the vectorization remark.
819 VectorizationFactor VF, unsigned IC) {
821 "Vectorizing: ", TheLoop->isInnermost() ? "innermost loop" : "outer loop",
822 nullptr));
823 StringRef LoopType = TheLoop->isInnermost() ? "" : "outer ";
824 ORE->emit([&]() {
825 return OptimizationRemark(LV_NAME, "Vectorized", TheLoop->getStartLoc(),
826 TheLoop->getHeader())
827 << "vectorized " << LoopType << "loop (vectorization width: "
828 << ore::NV("VectorizationFactor", VF.Width)
829 << ", interleaved count: " << ore::NV("InterleaveCount", IC) << ")";
830 });
831}
832
833} // end namespace llvm
834
835namespace llvm {
836
837// Loop vectorization cost-model hints how the scalar epilogue loop should be
838// lowered.
840
841 // The default: allowing scalar epilogues.
843
844 // Vectorization with OptForSize: don't allow epilogues.
846
847 // A special case of vectorisation with OptForSize: loops with a very small
848 // trip count are considered for vectorization under OptForSize, thereby
849 // making sure the cost of their loop body is dominant, free of runtime
850 // guards and scalar iteration overheads.
852
853 // Loop hint predicate indicating an epilogue is undesired.
855
856 // Directive indicating we must either tail fold or not vectorize
858};
859
860/// LoopVectorizationCostModel - estimates the expected speedups due to
861/// vectorization.
862/// In many cases vectorization is not profitable. This can happen because of
863/// a number of reasons. In this class we mainly attempt to predict the
864/// expected speedup/slowdowns due to the supported instruction set. We use the
865/// TargetTransformInfo to query the different backends for the cost of
866/// different operations.
869
870public:
878 std::function<BlockFrequencyInfo &()> GetBFI,
879 const Function *F, const LoopVectorizeHints *Hints,
881 : ScalarEpilogueStatus(SEL), TheLoop(L), PSE(PSE), LI(LI), Legal(Legal),
882 TTI(TTI), TLI(TLI), DB(DB), AC(AC), ORE(ORE), GetBFI(GetBFI),
885 if (TTI.supportsScalableVectors() || ForceTargetSupportsScalableVectors)
886 initializeVScaleForTuning();
888 }
889
890 /// \return An upper bound for the vectorization factors (both fixed and
891 /// scalable). If the factors are 0, vectorization and interleaving should be
892 /// avoided up front.
893 FixedScalableVFPair computeMaxVF(ElementCount UserVF, unsigned UserIC);
894
895 /// \return True if runtime checks are required for vectorization, and false
896 /// otherwise.
897 bool runtimeChecksRequired();
898
899 /// Setup cost-based decisions for user vectorization factor.
900 /// \return true if the UserVF is a feasible VF to be chosen.
903 return expectedCost(UserVF).isValid();
904 }
905
906 /// \return True if maximizing vector bandwidth is enabled by the target or
907 /// user options, for the given register kind.
908 bool useMaxBandwidth(TargetTransformInfo::RegisterKind RegKind);
909
910 /// \return True if register pressure should be considered for the given VF.
911 bool shouldConsiderRegPressureForVF(ElementCount VF);
912
913 /// \return The size (in bits) of the smallest and widest types in the code
914 /// that needs to be vectorized. We ignore values that remain scalar such as
915 /// 64 bit loop indices.
916 std::pair<unsigned, unsigned> getSmallestAndWidestTypes();
917
918 /// Memory access instruction may be vectorized in more than one way.
919 /// Form of instruction after vectorization depends on cost.
920 /// This function takes cost-based decisions for Load/Store instructions
921 /// and collects them in a map. This decisions map is used for building
922 /// the lists of loop-uniform and loop-scalar instructions.
923 /// The calculated cost is saved with widening decision in order to
924 /// avoid redundant calculations.
925 void setCostBasedWideningDecision(ElementCount VF);
926
927 /// A call may be vectorized in different ways depending on whether we have
928 /// vectorized variants available and whether the target supports masking.
929 /// This function analyzes all calls in the function at the supplied VF,
930 /// makes a decision based on the costs of available options, and stores that
931 /// decision in a map for use in planning and plan execution.
932 void setVectorizedCallDecision(ElementCount VF);
933
934 /// Collect values we want to ignore in the cost model.
935 void collectValuesToIgnore();
936
937 /// Collect all element types in the loop for which widening is needed.
938 void collectElementTypesForWidening();
939
940 /// Split reductions into those that happen in the loop, and those that happen
941 /// outside. In loop reductions are collected into InLoopReductions.
942 void collectInLoopReductions();
943
944 /// Returns true if we should use strict in-order reductions for the given
945 /// RdxDesc. This is true if the -enable-strict-reductions flag is passed,
946 /// the IsOrdered flag of RdxDesc is set and we do not allow reordering
947 /// of FP operations.
948 bool useOrderedReductions(const RecurrenceDescriptor &RdxDesc) const {
949 return !Hints->allowReordering() && RdxDesc.isOrdered();
950 }
951
952 /// \returns The smallest bitwidth each instruction can be represented with.
953 /// The vector equivalents of these instructions should be truncated to this
954 /// type.
956 return MinBWs;
957 }
958
959 /// \returns True if it is more profitable to scalarize instruction \p I for
960 /// vectorization factor \p VF.
962 assert(VF.isVector() &&
963 "Profitable to scalarize relevant only for VF > 1.");
964 assert(
965 TheLoop->isInnermost() &&
966 "cost-model should not be used for outer loops (in VPlan-native path)");
967
968 auto Scalars = InstsToScalarize.find(VF);
969 assert(Scalars != InstsToScalarize.end() &&
970 "VF not yet analyzed for scalarization profitability");
971 return Scalars->second.contains(I);
972 }
973
974 /// Returns true if \p I is known to be uniform after vectorization.
976 assert(
977 TheLoop->isInnermost() &&
978 "cost-model should not be used for outer loops (in VPlan-native path)");
979
980 // If VF is scalar, then all instructions are trivially uniform.
981 if (VF.isScalar())
982 return true;
983
984 // Pseudo probes must be duplicated per vector lane so that the
985 // profiled loop trip count is not undercounted.
987 return false;
988
989 auto UniformsPerVF = Uniforms.find(VF);
990 assert(UniformsPerVF != Uniforms.end() &&
991 "VF not yet analyzed for uniformity");
992 return UniformsPerVF->second.count(I);
993 }
994
995 /// Returns true if \p I is known to be scalar after vectorization.
997 assert(
998 TheLoop->isInnermost() &&
999 "cost-model should not be used for outer loops (in VPlan-native path)");
1000 if (VF.isScalar())
1001 return true;
1002
1003 auto ScalarsPerVF = Scalars.find(VF);
1004 assert(ScalarsPerVF != Scalars.end() &&
1005 "Scalar values are not calculated for VF");
1006 return ScalarsPerVF->second.count(I);
1007 }
1008
1009 /// \returns True if instruction \p I can be truncated to a smaller bitwidth
1010 /// for vectorization factor \p VF.
1012 // Truncs must truncate at most to their destination type.
1013 if (isa_and_nonnull<TruncInst>(I) && MinBWs.contains(I) &&
1014 I->getType()->getScalarSizeInBits() < MinBWs.lookup(I))
1015 return false;
1016 return VF.isVector() && MinBWs.contains(I) &&
1017 !isProfitableToScalarize(I, VF) &&
1019 }
1020
1021 /// Decision that was taken during cost calculation for memory instruction.
1024 CM_Widen, // For consecutive accesses with stride +1.
1025 CM_Widen_Reverse, // For consecutive accesses with stride -1.
1031 };
1032
1033 /// Save vectorization decision \p W and \p Cost taken by the cost model for
1034 /// instruction \p I and vector width \p VF.
1037 assert(VF.isVector() && "Expected VF >=2");
1038 WideningDecisions[{I, VF}] = {W, Cost};
1039 }
1040
1041 /// Save vectorization decision \p W and \p Cost taken by the cost model for
1042 /// interleaving group \p Grp and vector width \p VF.
1046 assert(VF.isVector() && "Expected VF >=2");
1047 /// Broadcast this decicion to all instructions inside the group.
1048 /// When interleaving, the cost will only be assigned one instruction, the
1049 /// insert position. For other cases, add the appropriate fraction of the
1050 /// total cost to each instruction. This ensures accurate costs are used,
1051 /// even if the insert position instruction is not used.
1052 InstructionCost InsertPosCost = Cost;
1053 InstructionCost OtherMemberCost = 0;
1054 if (W != CM_Interleave)
1055 OtherMemberCost = InsertPosCost = Cost / Grp->getNumMembers();
1056 ;
1057 for (unsigned Idx = 0; Idx < Grp->getFactor(); ++Idx) {
1058 if (auto *I = Grp->getMember(Idx)) {
1059 if (Grp->getInsertPos() == I)
1060 WideningDecisions[{I, VF}] = {W, InsertPosCost};
1061 else
1062 WideningDecisions[{I, VF}] = {W, OtherMemberCost};
1063 }
1064 }
1065 }
1066
1067 /// Return the cost model decision for the given instruction \p I and vector
1068 /// width \p VF. Return CM_Unknown if this instruction did not pass
1069 /// through the cost modeling.
1071 assert(VF.isVector() && "Expected VF to be a vector VF");
1072 assert(
1073 TheLoop->isInnermost() &&
1074 "cost-model should not be used for outer loops (in VPlan-native path)");
1075
1076 std::pair<Instruction *, ElementCount> InstOnVF(I, VF);
1077 auto Itr = WideningDecisions.find(InstOnVF);
1078 if (Itr == WideningDecisions.end())
1079 return CM_Unknown;
1080 return Itr->second.first;
1081 }
1082
1083 /// Return the vectorization cost for the given instruction \p I and vector
1084 /// width \p VF.
1086 assert(VF.isVector() && "Expected VF >=2");
1087 std::pair<Instruction *, ElementCount> InstOnVF(I, VF);
1088 assert(WideningDecisions.contains(InstOnVF) &&
1089 "The cost is not calculated");
1090 return WideningDecisions[InstOnVF].second;
1091 }
1092
1100
1102 Function *Variant, Intrinsic::ID IID,
1103 std::optional<unsigned> MaskPos,
1105 assert(!VF.isScalar() && "Expected vector VF");
1106 CallWideningDecisions[{CI, VF}] = {Kind, Variant, IID, MaskPos, Cost};
1107 }
1108
1110 ElementCount VF) const {
1111 assert(!VF.isScalar() && "Expected vector VF");
1112 auto I = CallWideningDecisions.find({CI, VF});
1113 if (I == CallWideningDecisions.end())
1114 return {CM_Unknown, nullptr, Intrinsic::not_intrinsic, std::nullopt, 0};
1115 return I->second;
1116 }
1117
1118 /// Return True if instruction \p I is an optimizable truncate whose operand
1119 /// is an induction variable. Such a truncate will be removed by adding a new
1120 /// induction variable with the destination type.
1122 // If the instruction is not a truncate, return false.
1123 auto *Trunc = dyn_cast<TruncInst>(I);
1124 if (!Trunc)
1125 return false;
1126
1127 // Get the source and destination types of the truncate.
1128 Type *SrcTy = toVectorTy(Trunc->getSrcTy(), VF);
1129 Type *DestTy = toVectorTy(Trunc->getDestTy(), VF);
1130
1131 // If the truncate is free for the given types, return false. Replacing a
1132 // free truncate with an induction variable would add an induction variable
1133 // update instruction to each iteration of the loop. We exclude from this
1134 // check the primary induction variable since it will need an update
1135 // instruction regardless.
1136 Value *Op = Trunc->getOperand(0);
1137 if (Op != Legal->getPrimaryInduction() && TTI.isTruncateFree(SrcTy, DestTy))
1138 return false;
1139
1140 // If the truncated value is not an induction variable, return false.
1141 return Legal->isInductionPhi(Op);
1142 }
1143
1144 /// Collects the instructions to scalarize for each predicated instruction in
1145 /// the loop.
1146 void collectInstsToScalarize(ElementCount VF);
1147
1148 /// Collect values that will not be widened, including Uniforms, Scalars, and
1149 /// Instructions to Scalarize for the given \p VF.
1150 /// The sets depend on CM decision for Load/Store instructions
1151 /// that may be vectorized as interleave, gather-scatter or scalarized.
1152 /// Also make a decision on what to do about call instructions in the loop
1153 /// at that VF -- scalarize, call a known vector routine, or call a
1154 /// vector intrinsic.
1156 // Do the analysis once.
1157 if (VF.isScalar() || Uniforms.contains(VF))
1158 return;
1160 collectLoopUniforms(VF);
1162 collectLoopScalars(VF);
1164 }
1165
1166 /// Returns true if the target machine supports masked store operation
1167 /// for the given \p DataType and kind of access to \p Ptr.
1168 bool isLegalMaskedStore(Type *DataType, Value *Ptr, Align Alignment,
1169 unsigned AddressSpace) const {
1170 return Legal->isConsecutivePtr(DataType, Ptr) &&
1172 TTI.isLegalMaskedStore(DataType, Alignment, AddressSpace));
1173 }
1174
1175 /// Returns true if the target machine supports masked load operation
1176 /// for the given \p DataType and kind of access to \p Ptr.
1177 bool isLegalMaskedLoad(Type *DataType, Value *Ptr, Align Alignment,
1178 unsigned AddressSpace) const {
1179 return Legal->isConsecutivePtr(DataType, Ptr) &&
1181 TTI.isLegalMaskedLoad(DataType, Alignment, AddressSpace));
1182 }
1183
1184 /// Returns true if the target machine can represent \p V as a masked gather
1185 /// or scatter operation.
1187 bool LI = isa<LoadInst>(V);
1188 bool SI = isa<StoreInst>(V);
1189 if (!LI && !SI)
1190 return false;
1191 auto *Ty = getLoadStoreType(V);
1193 if (VF.isVector())
1194 Ty = VectorType::get(Ty, VF);
1195 return (LI && TTI.isLegalMaskedGather(Ty, Align)) ||
1196 (SI && TTI.isLegalMaskedScatter(Ty, Align));
1197 }
1198
1199 /// Returns true if the target machine supports all of the reduction
1200 /// variables found for the given VF.
1202 return (all_of(Legal->getReductionVars(), [&](auto &Reduction) -> bool {
1203 const RecurrenceDescriptor &RdxDesc = Reduction.second;
1204 return TTI.isLegalToVectorizeReduction(RdxDesc, VF);
1205 }));
1206 }
1207
1208 /// Given costs for both strategies, return true if the scalar predication
1209 /// lowering should be used for div/rem. This incorporates an override
1210 /// option so it is not simply a cost comparison.
1212 InstructionCost SafeDivisorCost) const {
1213 switch (ForceSafeDivisor) {
1214 case cl::BOU_UNSET:
1215 return ScalarCost < SafeDivisorCost;
1216 case cl::BOU_TRUE:
1217 return false;
1218 case cl::BOU_FALSE:
1219 return true;
1220 }
1221 llvm_unreachable("impossible case value");
1222 }
1223
1224 /// Returns true if \p I is an instruction which requires predication and
1225 /// for which our chosen predication strategy is scalarization (i.e. we
1226 /// don't have an alternate strategy such as masking available).
1227 /// \p VF is the vectorization factor that will be used to vectorize \p I.
1228 bool isScalarWithPredication(Instruction *I, ElementCount VF);
1229
1230 /// Wrapper function for LoopVectorizationLegality::isMaskRequired,
1231 /// that passes the Instruction \p I and if we fold tail.
1232 bool isMaskRequired(Instruction *I) const;
1233
1234 /// Returns true if \p I is an instruction that needs to be predicated
1235 /// at runtime. The result is independent of the predication mechanism.
1236 /// Superset of instructions that return true for isScalarWithPredication.
1237 bool isPredicatedInst(Instruction *I) const;
1238
1239 /// A helper function that returns how much we should divide the cost of a
1240 /// predicated block by. Typically this is the reciprocal of the block
1241 /// probability, i.e. if we return X we are assuming the predicated block will
1242 /// execute once for every X iterations of the loop header so the block should
1243 /// only contribute 1/X of its cost to the total cost calculation, but when
1244 /// optimizing for code size it will just be 1 as code size costs don't depend
1245 /// on execution probabilities.
1246 ///
1247 /// Note that if a block wasn't originally predicated but was predicated due
1248 /// to tail folding, the divisor will still be 1 because it will execute for
1249 /// every iteration of the loop header.
1250 inline uint64_t
1251 getPredBlockCostDivisor(TargetTransformInfo::TargetCostKind CostKind,
1252 const BasicBlock *BB);
1253
1254 /// Returns true if an artificially high cost for emulated masked memrefs
1255 /// should be used.
1256 bool useEmulatedMaskMemRefHack(Instruction *I, ElementCount VF);
1257
1258 /// Return the costs for our two available strategies for lowering a
1259 /// div/rem operation which requires speculating at least one lane.
1260 /// First result is for scalarization (will be invalid for scalable
1261 /// vectors); second is for the safe-divisor strategy.
1262 std::pair<InstructionCost, InstructionCost>
1263 getDivRemSpeculationCost(Instruction *I, ElementCount VF);
1264
1265 /// Returns true if \p I is a memory instruction with consecutive memory
1266 /// access that can be widened.
1267 bool memoryInstructionCanBeWidened(Instruction *I, ElementCount VF);
1268
1269 /// Returns true if \p I is a memory instruction in an interleaved-group
1270 /// of memory accesses that can be vectorized with wide vector loads/stores
1271 /// and shuffles.
1272 bool interleavedAccessCanBeWidened(Instruction *I, ElementCount VF) const;
1273
1274 /// Check if \p Instr belongs to any interleaved access group.
1276 return InterleaveInfo.isInterleaved(Instr);
1277 }
1278
1279 /// Get the interleaved access group that \p Instr belongs to.
1282 return InterleaveInfo.getInterleaveGroup(Instr);
1283 }
1284
1285 /// Returns true if we're required to use a scalar epilogue for at least
1286 /// the final iteration of the original loop.
1287 bool requiresScalarEpilogue(bool IsVectorizing) const {
1288 if (!isScalarEpilogueAllowed()) {
1289 LLVM_DEBUG(dbgs() << "LV: Loop does not require scalar epilogue\n");
1290 return false;
1291 }
1292 // If we might exit from anywhere but the latch and early exit vectorization
1293 // is disabled, we must run the exiting iteration in scalar form.
1294 if (TheLoop->getExitingBlock() != TheLoop->getLoopLatch() &&
1295 !(EnableEarlyExitVectorization && Legal->hasUncountableEarlyExit())) {
1296 LLVM_DEBUG(dbgs() << "LV: Loop requires scalar epilogue: not exiting "
1297 "from latch block\n");
1298 return true;
1299 }
1300 if (IsVectorizing && InterleaveInfo.requiresScalarEpilogue()) {
1301 LLVM_DEBUG(dbgs() << "LV: Loop requires scalar epilogue: "
1302 "interleaved group requires scalar epilogue\n");
1303 return true;
1304 }
1305 LLVM_DEBUG(dbgs() << "LV: Loop does not require scalar epilogue\n");
1306 return false;
1307 }
1308
1309 /// Returns true if a scalar epilogue is allowed (e.g.., not prevented by
1310 /// optsize or a loop hint annotation).
1312 return ScalarEpilogueStatus == CM_ScalarEpilogueAllowed;
1313 }
1314
1315 /// Returns true if tail-folding is preferred over a scalar epilogue.
1317 return ScalarEpilogueStatus == CM_ScalarEpilogueNotNeededUsePredicate ||
1318 ScalarEpilogueStatus == CM_ScalarEpilogueNotAllowedUsePredicate;
1319 }
1320
1321 /// Returns the TailFoldingStyle that is best for the current loop.
1323 return ChosenTailFoldingStyle;
1324 }
1325
1326 /// Selects and saves TailFoldingStyle.
1327 /// \param IsScalableVF true if scalable vector factors enabled.
1328 /// \param UserIC User specific interleave count.
1329 void setTailFoldingStyle(bool IsScalableVF, unsigned UserIC) {
1330 assert(ChosenTailFoldingStyle == TailFoldingStyle::None &&
1331 "Tail folding must not be selected yet.");
1332 if (!Legal->canFoldTailByMasking()) {
1333 ChosenTailFoldingStyle = TailFoldingStyle::None;
1334 return;
1335 }
1336
1337 // Default to TTI preference, but allow command line override.
1338 ChosenTailFoldingStyle = TTI.getPreferredTailFoldingStyle();
1339 if (ForceTailFoldingStyle.getNumOccurrences())
1340 ChosenTailFoldingStyle = ForceTailFoldingStyle.getValue();
1341
1342 if (ChosenTailFoldingStyle != TailFoldingStyle::DataWithEVL)
1343 return;
1344 // Override EVL styles if needed.
1345 // FIXME: Investigate opportunity for fixed vector factor.
1346 bool EVLIsLegal = UserIC <= 1 && IsScalableVF &&
1347 TTI.hasActiveVectorLength() && !EnableVPlanNativePath;
1348 if (EVLIsLegal)
1349 return;
1350 // If for some reason EVL mode is unsupported, fallback to a scalar epilogue
1351 // if it's allowed, or DataWithoutLaneMask otherwise.
1352 if (ScalarEpilogueStatus == CM_ScalarEpilogueAllowed ||
1353 ScalarEpilogueStatus == CM_ScalarEpilogueNotNeededUsePredicate)
1354 ChosenTailFoldingStyle = TailFoldingStyle::None;
1355 else
1356 ChosenTailFoldingStyle = TailFoldingStyle::DataWithoutLaneMask;
1357
1358 LLVM_DEBUG(
1359 dbgs() << "LV: Preference for VP intrinsics indicated. Will "
1360 "not try to generate VP Intrinsics "
1361 << (UserIC > 1
1362 ? "since interleave count specified is greater than 1.\n"
1363 : "due to non-interleaving reasons.\n"));
1364 }
1365
1366 /// Returns true if all loop blocks should be masked to fold tail loop.
1367 bool foldTailByMasking() const {
1369 }
1370
1371 /// Returns true if the use of wide lane masks is requested and the loop is
1372 /// using tail-folding with a lane mask for control flow.
1375 return false;
1376
1378 }
1379
1380 /// Return maximum safe number of elements to be processed per vector
1381 /// iteration, which do not prevent store-load forwarding and are safe with
1382 /// regard to the memory dependencies. Required for EVL-based VPlans to
1383 /// correctly calculate AVL (application vector length) as min(remaining AVL,
1384 /// MaxSafeElements).
1385 /// TODO: need to consider adjusting cost model to use this value as a
1386 /// vectorization factor for EVL-based vectorization.
1387 std::optional<unsigned> getMaxSafeElements() const { return MaxSafeElements; }
1388
1389 /// Returns true if the instructions in this block requires predication
1390 /// for any reason, e.g. because tail folding now requires a predicate
1391 /// or because the block in the original loop was predicated.
1393 return foldTailByMasking() || Legal->blockNeedsPredication(BB);
1394 }
1395
1396 /// Returns true if VP intrinsics with explicit vector length support should
1397 /// be generated in the tail folded loop.
1401
1402 /// Returns true if the Phi is part of an inloop reduction.
1403 bool isInLoopReduction(PHINode *Phi) const {
1404 return InLoopReductions.contains(Phi);
1405 }
1406
1407 /// Returns the set of in-loop reduction PHIs.
1409 return InLoopReductions;
1410 }
1411
1412 /// Returns true if the predicated reduction select should be used to set the
1413 /// incoming value for the reduction phi.
1414 bool usePredicatedReductionSelect(RecurKind RecurrenceKind) const {
1415 // Force to use predicated reduction select since the EVL of the
1416 // second-to-last iteration might not be VF*UF.
1417 if (foldTailWithEVL())
1418 return true;
1419
1420 // Note: For FindLast recurrences we prefer a predicated select to simplify
1421 // matching in handleFindLastReductions(), rather than handle multiple
1422 // cases.
1424 return true;
1425
1427 TTI.preferPredicatedReductionSelect();
1428 }
1429
1430 /// Estimate cost of an intrinsic call instruction CI if it were vectorized
1431 /// with factor VF. Return the cost of the instruction, including
1432 /// scalarization overhead if it's needed.
1433 InstructionCost getVectorIntrinsicCost(CallInst *CI, ElementCount VF) const;
1434
1435 /// Estimate cost of a call instruction CI if it were vectorized with factor
1436 /// VF. Return the cost of the instruction, including scalarization overhead
1437 /// if it's needed.
1438 InstructionCost getVectorCallCost(CallInst *CI, ElementCount VF) const;
1439
1440 /// Invalidates decisions already taken by the cost model.
1442 WideningDecisions.clear();
1443 CallWideningDecisions.clear();
1444 Uniforms.clear();
1445 Scalars.clear();
1446 }
1447
1448 /// Returns the expected execution cost. The unit of the cost does
1449 /// not matter because we use the 'cost' units to compare different
1450 /// vector widths. The cost that is returned is *not* normalized by
1451 /// the factor width.
1452 InstructionCost expectedCost(ElementCount VF);
1453
1454 /// Returns true if epilogue vectorization is considered profitable, and
1455 /// false otherwise.
1456 /// \p VF is the vectorization factor chosen for the original loop.
1457 /// \p Multiplier is an aditional scaling factor applied to VF before
1458 /// comparing to EpilogueVectorizationMinVF.
1459 bool isEpilogueVectorizationProfitable(const ElementCount VF,
1460 const unsigned IC) const;
1461
1462 /// Returns the execution time cost of an instruction for a given vector
1463 /// width. Vector width of one means scalar.
1464 InstructionCost getInstructionCost(Instruction *I, ElementCount VF);
1465
1466 /// Return the cost of instructions in an inloop reduction pattern, if I is
1467 /// part of that pattern.
1468 std::optional<InstructionCost> getReductionPatternCost(Instruction *I,
1469 ElementCount VF,
1470 Type *VectorTy) const;
1471
1472 /// Returns true if \p Op should be considered invariant and if it is
1473 /// trivially hoistable.
1474 bool shouldConsiderInvariant(Value *Op);
1475
1476 /// Return the value of vscale used for tuning the cost model.
1477 std::optional<unsigned> getVScaleForTuning() const { return VScaleForTuning; }
1478
1479private:
1480 unsigned NumPredStores = 0;
1481
1482 /// Used to store the value of vscale used for tuning the cost model. It is
1483 /// initialized during object construction.
1484 std::optional<unsigned> VScaleForTuning;
1485
1486 /// Initializes the value of vscale used for tuning the cost model. If
1487 /// vscale_range.min == vscale_range.max then return vscale_range.max, else
1488 /// return the value returned by the corresponding TTI method.
1489 void initializeVScaleForTuning() {
1490 const Function *Fn = TheLoop->getHeader()->getParent();
1491 if (Fn->hasFnAttribute(Attribute::VScaleRange)) {
1492 auto Attr = Fn->getFnAttribute(Attribute::VScaleRange);
1493 auto Min = Attr.getVScaleRangeMin();
1494 auto Max = Attr.getVScaleRangeMax();
1495 if (Max && Min == Max) {
1496 VScaleForTuning = Max;
1497 return;
1498 }
1499 }
1500
1501 VScaleForTuning = TTI.getVScaleForTuning();
1502 }
1503
1504 /// \return An upper bound for the vectorization factors for both
1505 /// fixed and scalable vectorization, where the minimum-known number of
1506 /// elements is a power-of-2 larger than zero. If scalable vectorization is
1507 /// disabled or unsupported, then the scalable part will be equal to
1508 /// ElementCount::getScalable(0).
1509 FixedScalableVFPair computeFeasibleMaxVF(unsigned MaxTripCount,
1510 ElementCount UserVF, unsigned UserIC,
1511 bool FoldTailByMasking);
1512
1513 /// If \p VF * \p UserIC > MaxTripcount, clamps VF to the next lower VF that
1514 /// results in VF * UserIC <= MaxTripCount.
1515 ElementCount clampVFByMaxTripCount(ElementCount VF, unsigned MaxTripCount,
1516 unsigned UserIC,
1517 bool FoldTailByMasking) const;
1518
1519 /// \return the maximized element count based on the targets vector
1520 /// registers and the loop trip-count, but limited to a maximum safe VF.
1521 /// This is a helper function of computeFeasibleMaxVF.
1522 ElementCount getMaximizedVFForTarget(unsigned MaxTripCount,
1523 unsigned SmallestType,
1524 unsigned WidestType,
1525 ElementCount MaxSafeVF, unsigned UserIC,
1526 bool FoldTailByMasking);
1527
1528 /// Checks if scalable vectorization is supported and enabled. Caches the
1529 /// result to avoid repeated debug dumps for repeated queries.
1530 bool isScalableVectorizationAllowed();
1531
1532 /// \return the maximum legal scalable VF, based on the safe max number
1533 /// of elements.
1534 ElementCount getMaxLegalScalableVF(unsigned MaxSafeElements);
1535
1536 /// Calculate vectorization cost of memory instruction \p I.
1537 InstructionCost getMemoryInstructionCost(Instruction *I, ElementCount VF);
1538
1539 /// The cost computation for scalarized memory instruction.
1540 InstructionCost getMemInstScalarizationCost(Instruction *I, ElementCount VF);
1541
1542 /// The cost computation for interleaving group of memory instructions.
1543 InstructionCost getInterleaveGroupCost(Instruction *I, ElementCount VF);
1544
1545 /// The cost computation for Gather/Scatter instruction.
1546 InstructionCost getGatherScatterCost(Instruction *I, ElementCount VF);
1547
1548 /// The cost computation for widening instruction \p I with consecutive
1549 /// memory access.
1550 InstructionCost getConsecutiveMemOpCost(Instruction *I, ElementCount VF);
1551
1552 /// The cost calculation for Load/Store instruction \p I with uniform pointer -
1553 /// Load: scalar load + broadcast.
1554 /// Store: scalar store + (loop invariant value stored? 0 : extract of last
1555 /// element)
1556 InstructionCost getUniformMemOpCost(Instruction *I, ElementCount VF);
1557
1558 /// Estimate the overhead of scalarizing an instruction. This is a
1559 /// convenience wrapper for the type-based getScalarizationOverhead API.
1561 ElementCount VF) const;
1562
1563 /// Map of scalar integer values to the smallest bitwidth they can be legally
1564 /// represented as. The vector equivalents of these values should be truncated
1565 /// to this type.
1566 MapVector<Instruction *, uint64_t> MinBWs;
1567
1568 /// A type representing the costs for instructions if they were to be
1569 /// scalarized rather than vectorized. The entries are Instruction-Cost
1570 /// pairs.
1571 using ScalarCostsTy = MapVector<Instruction *, InstructionCost>;
1572
1573 /// A set containing all BasicBlocks that are known to present after
1574 /// vectorization as a predicated block.
1575 DenseMap<ElementCount, SmallPtrSet<BasicBlock *, 4>>
1576 PredicatedBBsAfterVectorization;
1577
1578 /// Records whether it is allowed to have the original scalar loop execute at
1579 /// least once. This may be needed as a fallback loop in case runtime
1580 /// aliasing/dependence checks fail, or to handle the tail/remainder
1581 /// iterations when the trip count is unknown or doesn't divide by the VF,
1582 /// or as a peel-loop to handle gaps in interleave-groups.
1583 /// Under optsize and when the trip count is very small we don't allow any
1584 /// iterations to execute in the scalar loop.
1585 ScalarEpilogueLowering ScalarEpilogueStatus = CM_ScalarEpilogueAllowed;
1586
1587 /// Control finally chosen tail folding style.
1588 TailFoldingStyle ChosenTailFoldingStyle = TailFoldingStyle::None;
1589
1590 /// true if scalable vectorization is supported and enabled.
1591 std::optional<bool> IsScalableVectorizationAllowed;
1592
1593 /// Maximum safe number of elements to be processed per vector iteration,
1594 /// which do not prevent store-load forwarding and are safe with regard to the
1595 /// memory dependencies. Required for EVL-based veectorization, where this
1596 /// value is used as the upper bound of the safe AVL.
1597 std::optional<unsigned> MaxSafeElements;
1598
1599 /// A map holding scalar costs for different vectorization factors. The
1600 /// presence of a cost for an instruction in the mapping indicates that the
1601 /// instruction will be scalarized when vectorizing with the associated
1602 /// vectorization factor. The entries are VF-ScalarCostTy pairs.
1603 MapVector<ElementCount, ScalarCostsTy> InstsToScalarize;
1604
1605 /// Holds the instructions known to be uniform after vectorization.
1606 /// The data is collected per VF.
1607 DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> Uniforms;
1608
1609 /// Holds the instructions known to be scalar after vectorization.
1610 /// The data is collected per VF.
1611 DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> Scalars;
1612
1613 /// Holds the instructions (address computations) that are forced to be
1614 /// scalarized.
1615 DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> ForcedScalars;
1616
1617 /// PHINodes of the reductions that should be expanded in-loop.
1618 SmallPtrSet<PHINode *, 4> InLoopReductions;
1619
1620 /// A Map of inloop reduction operations and their immediate chain operand.
1621 /// FIXME: This can be removed once reductions can be costed correctly in
1622 /// VPlan. This was added to allow quick lookup of the inloop operations.
1623 DenseMap<Instruction *, Instruction *> InLoopReductionImmediateChains;
1624
1625 /// Returns the expected difference in cost from scalarizing the expression
1626 /// feeding a predicated instruction \p PredInst. The instructions to
1627 /// scalarize and their scalar costs are collected in \p ScalarCosts. A
1628 /// non-negative return value implies the expression will be scalarized.
1629 /// Currently, only single-use chains are considered for scalarization.
1630 InstructionCost computePredInstDiscount(Instruction *PredInst,
1631 ScalarCostsTy &ScalarCosts,
1632 ElementCount VF);
1633
1634 /// Collect the instructions that are uniform after vectorization. An
1635 /// instruction is uniform if we represent it with a single scalar value in
1636 /// the vectorized loop corresponding to each vector iteration. Examples of
1637 /// uniform instructions include pointer operands of consecutive or
1638 /// interleaved memory accesses. Note that although uniformity implies an
1639 /// instruction will be scalar, the reverse is not true. In general, a
1640 /// scalarized instruction will be represented by VF scalar values in the
1641 /// vectorized loop, each corresponding to an iteration of the original
1642 /// scalar loop.
1643 void collectLoopUniforms(ElementCount VF);
1644
1645 /// Collect the instructions that are scalar after vectorization. An
1646 /// instruction is scalar if it is known to be uniform or will be scalarized
1647 /// during vectorization. collectLoopScalars should only add non-uniform nodes
1648 /// to the list if they are used by a load/store instruction that is marked as
1649 /// CM_Scalarize. Non-uniform scalarized instructions will be represented by
1650 /// VF values in the vectorized loop, each corresponding to an iteration of
1651 /// the original scalar loop.
1652 void collectLoopScalars(ElementCount VF);
1653
1654 /// Keeps cost model vectorization decision and cost for instructions.
1655 /// Right now it is used for memory instructions only.
1656 using DecisionList = DenseMap<std::pair<Instruction *, ElementCount>,
1657 std::pair<InstWidening, InstructionCost>>;
1658
1659 DecisionList WideningDecisions;
1660
1661 using CallDecisionList =
1662 DenseMap<std::pair<CallInst *, ElementCount>, CallWideningDecision>;
1663
1664 CallDecisionList CallWideningDecisions;
1665
1666 /// Returns true if \p V is expected to be vectorized and it needs to be
1667 /// extracted.
1668 bool needsExtract(Value *V, ElementCount VF) const {
1670 if (VF.isScalar() || !I || !TheLoop->contains(I) ||
1671 TheLoop->isLoopInvariant(I) ||
1672 getWideningDecision(I, VF) == CM_Scalarize ||
1673 (isa<CallInst>(I) &&
1674 getCallWideningDecision(cast<CallInst>(I), VF).Kind == CM_Scalarize))
1675 return false;
1676
1677 // Assume we can vectorize V (and hence we need extraction) if the
1678 // scalars are not computed yet. This can happen, because it is called
1679 // via getScalarizationOverhead from setCostBasedWideningDecision, before
1680 // the scalars are collected. That should be a safe assumption in most
1681 // cases, because we check if the operands have vectorizable types
1682 // beforehand in LoopVectorizationLegality.
1683 return !Scalars.contains(VF) || !isScalarAfterVectorization(I, VF);
1684 };
1685
1686 /// Returns a range containing only operands needing to be extracted.
1687 SmallVector<Value *, 4> filterExtractingOperands(Instruction::op_range Ops,
1688 ElementCount VF) const {
1689
1690 SmallPtrSet<const Value *, 4> UniqueOperands;
1691 SmallVector<Value *, 4> Res;
1692 for (Value *Op : Ops) {
1693 if (isa<Constant>(Op) || !UniqueOperands.insert(Op).second ||
1694 !needsExtract(Op, VF))
1695 continue;
1696 Res.push_back(Op);
1697 }
1698 return Res;
1699 }
1700
1701public:
1702 /// The loop that we evaluate.
1704
1705 /// Predicated scalar evolution analysis.
1707
1708 /// Loop Info analysis.
1710
1711 /// Vectorization legality.
1713
1714 /// Vector target information.
1716
1717 /// Target Library Info.
1719
1720 /// Demanded bits analysis.
1722
1723 /// Assumption cache.
1725
1726 /// Interface to emit optimization remarks.
1728
1729 /// A function to lazily fetch BlockFrequencyInfo. This avoids computing it
1730 /// unless necessary, e.g. when the loop isn't legal to vectorize or when
1731 /// there is no predication.
1732 std::function<BlockFrequencyInfo &()> GetBFI;
1733 /// The BlockFrequencyInfo returned from GetBFI.
1735 /// Returns the BlockFrequencyInfo for the function if cached, otherwise
1736 /// fetches it via GetBFI. Avoids an indirect call to the std::function.
1738 if (!BFI)
1739 BFI = &GetBFI();
1740 return *BFI;
1741 }
1742
1744
1745 /// Loop Vectorize Hint.
1747
1748 /// The interleave access information contains groups of interleaved accesses
1749 /// with the same stride and close to each other.
1751
1752 /// Values to ignore in the cost model.
1754
1755 /// Values to ignore in the cost model when VF > 1.
1757
1758 /// All element types found in the loop.
1760
1761 /// The kind of cost that we are calculating
1763
1764 /// Whether this loop should be optimized for size based on function attribute
1765 /// or profile information.
1767
1768 /// The highest VF possible for this loop, without using MaxBandwidth.
1770};
1771} // end namespace llvm
1772
1773namespace {
1774/// Helper struct to manage generating runtime checks for vectorization.
1775///
1776/// The runtime checks are created up-front in temporary blocks to allow better
1777/// estimating the cost and un-linked from the existing IR. After deciding to
1778/// vectorize, the checks are moved back. If deciding not to vectorize, the
1779/// temporary blocks are completely removed.
1780class GeneratedRTChecks {
1781 /// Basic block which contains the generated SCEV checks, if any.
1782 BasicBlock *SCEVCheckBlock = nullptr;
1783
1784 /// The value representing the result of the generated SCEV checks. If it is
1785 /// nullptr no SCEV checks have been generated.
1786 Value *SCEVCheckCond = nullptr;
1787
1788 /// Basic block which contains the generated memory runtime checks, if any.
1789 BasicBlock *MemCheckBlock = nullptr;
1790
1791 /// The value representing the result of the generated memory runtime checks.
1792 /// If it is nullptr no memory runtime checks have been generated.
1793 Value *MemRuntimeCheckCond = nullptr;
1794
1795 DominatorTree *DT;
1796 LoopInfo *LI;
1798
1799 SCEVExpander SCEVExp;
1800 SCEVExpander MemCheckExp;
1801
1802 bool CostTooHigh = false;
1803
1804 Loop *OuterLoop = nullptr;
1805
1807
1808 /// The kind of cost that we are calculating
1810
1811public:
1812 GeneratedRTChecks(PredicatedScalarEvolution &PSE, DominatorTree *DT,
1815 : DT(DT), LI(LI), TTI(TTI),
1816 SCEVExp(*PSE.getSE(), "scev.check", /*PreserveLCSSA=*/false),
1817 MemCheckExp(*PSE.getSE(), "scev.check", /*PreserveLCSSA=*/false),
1818 PSE(PSE), CostKind(CostKind) {}
1819
1820 /// Generate runtime checks in SCEVCheckBlock and MemCheckBlock, so we can
1821 /// accurately estimate the cost of the runtime checks. The blocks are
1822 /// un-linked from the IR and are added back during vector code generation. If
1823 /// there is no vector code generation, the check blocks are removed
1824 /// completely.
1825 void create(Loop *L, const LoopAccessInfo &LAI,
1826 const SCEVPredicate &UnionPred, ElementCount VF, unsigned IC,
1827 OptimizationRemarkEmitter &ORE) {
1828
1829 // Hard cutoff to limit compile-time increase in case a very large number of
1830 // runtime checks needs to be generated.
1831 // TODO: Skip cutoff if the loop is guaranteed to execute, e.g. due to
1832 // profile info.
1833 CostTooHigh =
1835 if (CostTooHigh) {
1836 // Mark runtime checks as never succeeding when they exceed the threshold.
1837 MemRuntimeCheckCond = ConstantInt::getTrue(L->getHeader()->getContext());
1838 SCEVCheckCond = ConstantInt::getTrue(L->getHeader()->getContext());
1839 ORE.emit([&]() {
1840 return OptimizationRemarkAnalysisAliasing(
1841 DEBUG_TYPE, "TooManyMemoryRuntimeChecks", L->getStartLoc(),
1842 L->getHeader())
1843 << "loop not vectorized: too many memory checks needed";
1844 });
1845 LLVM_DEBUG(dbgs() << "LV: Too many memory checks needed.\n");
1846 return;
1847 }
1848
1849 BasicBlock *LoopHeader = L->getHeader();
1850 BasicBlock *Preheader = L->getLoopPreheader();
1851
1852 // Use SplitBlock to create blocks for SCEV & memory runtime checks to
1853 // ensure the blocks are properly added to LoopInfo & DominatorTree. Those
1854 // may be used by SCEVExpander. The blocks will be un-linked from their
1855 // predecessors and removed from LI & DT at the end of the function.
1856 if (!UnionPred.isAlwaysTrue()) {
1857 SCEVCheckBlock = SplitBlock(Preheader, Preheader->getTerminator(), DT, LI,
1858 nullptr, "vector.scevcheck");
1859
1860 SCEVCheckCond = SCEVExp.expandCodeForPredicate(
1861 &UnionPred, SCEVCheckBlock->getTerminator());
1862 if (isa<Constant>(SCEVCheckCond)) {
1863 // Clean up directly after expanding the predicate to a constant, to
1864 // avoid further expansions re-using anything left over from SCEVExp.
1865 SCEVExpanderCleaner SCEVCleaner(SCEVExp);
1866 SCEVCleaner.cleanup();
1867 }
1868 }
1869
1870 const auto &RtPtrChecking = *LAI.getRuntimePointerChecking();
1871 if (RtPtrChecking.Need) {
1872 auto *Pred = SCEVCheckBlock ? SCEVCheckBlock : Preheader;
1873 MemCheckBlock = SplitBlock(Pred, Pred->getTerminator(), DT, LI, nullptr,
1874 "vector.memcheck");
1875
1876 auto DiffChecks = RtPtrChecking.getDiffChecks();
1877 if (DiffChecks) {
1878 Value *RuntimeVF = nullptr;
1879 MemRuntimeCheckCond = addDiffRuntimeChecks(
1880 MemCheckBlock->getTerminator(), *DiffChecks, MemCheckExp,
1881 [VF, &RuntimeVF](IRBuilderBase &B, unsigned Bits) {
1882 if (!RuntimeVF)
1883 RuntimeVF = getRuntimeVF(B, B.getIntNTy(Bits), VF);
1884 return RuntimeVF;
1885 },
1886 IC);
1887 } else {
1888 MemRuntimeCheckCond = addRuntimeChecks(
1889 MemCheckBlock->getTerminator(), L, RtPtrChecking.getChecks(),
1891 }
1892 assert(MemRuntimeCheckCond &&
1893 "no RT checks generated although RtPtrChecking "
1894 "claimed checks are required");
1895 }
1896
1897 SCEVExp.eraseDeadInstructions(SCEVCheckCond);
1898
1899 if (!MemCheckBlock && !SCEVCheckBlock)
1900 return;
1901
1902 // Unhook the temporary block with the checks, update various places
1903 // accordingly.
1904 if (SCEVCheckBlock)
1905 SCEVCheckBlock->replaceAllUsesWith(Preheader);
1906 if (MemCheckBlock)
1907 MemCheckBlock->replaceAllUsesWith(Preheader);
1908
1909 if (SCEVCheckBlock) {
1910 SCEVCheckBlock->getTerminator()->moveBefore(
1911 Preheader->getTerminator()->getIterator());
1912 auto *UI = new UnreachableInst(Preheader->getContext(), SCEVCheckBlock);
1913 UI->setDebugLoc(DebugLoc::getTemporary());
1914 Preheader->getTerminator()->eraseFromParent();
1915 }
1916 if (MemCheckBlock) {
1917 MemCheckBlock->getTerminator()->moveBefore(
1918 Preheader->getTerminator()->getIterator());
1919 auto *UI = new UnreachableInst(Preheader->getContext(), MemCheckBlock);
1920 UI->setDebugLoc(DebugLoc::getTemporary());
1921 Preheader->getTerminator()->eraseFromParent();
1922 }
1923
1924 DT->changeImmediateDominator(LoopHeader, Preheader);
1925 if (MemCheckBlock) {
1926 DT->eraseNode(MemCheckBlock);
1927 LI->removeBlock(MemCheckBlock);
1928 }
1929 if (SCEVCheckBlock) {
1930 DT->eraseNode(SCEVCheckBlock);
1931 LI->removeBlock(SCEVCheckBlock);
1932 }
1933
1934 // Outer loop is used as part of the later cost calculations.
1935 OuterLoop = L->getParentLoop();
1936 }
1937
1939 if (SCEVCheckBlock || MemCheckBlock)
1940 LLVM_DEBUG(dbgs() << "Calculating cost of runtime checks:\n");
1941
1942 if (CostTooHigh) {
1944 Cost.setInvalid();
1945 LLVM_DEBUG(dbgs() << " number of checks exceeded threshold\n");
1946 return Cost;
1947 }
1948
1949 InstructionCost RTCheckCost = 0;
1950 if (SCEVCheckBlock)
1951 for (Instruction &I : *SCEVCheckBlock) {
1952 if (SCEVCheckBlock->getTerminator() == &I)
1953 continue;
1955 LLVM_DEBUG(dbgs() << " " << C << " for " << I << "\n");
1956 RTCheckCost += C;
1957 }
1958 if (MemCheckBlock) {
1959 InstructionCost MemCheckCost = 0;
1960 for (Instruction &I : *MemCheckBlock) {
1961 if (MemCheckBlock->getTerminator() == &I)
1962 continue;
1964 LLVM_DEBUG(dbgs() << " " << C << " for " << I << "\n");
1965 MemCheckCost += C;
1966 }
1967
1968 // If the runtime memory checks are being created inside an outer loop
1969 // we should find out if these checks are outer loop invariant. If so,
1970 // the checks will likely be hoisted out and so the effective cost will
1971 // reduce according to the outer loop trip count.
1972 if (OuterLoop) {
1973 ScalarEvolution *SE = MemCheckExp.getSE();
1974 // TODO: If profitable, we could refine this further by analysing every
1975 // individual memory check, since there could be a mixture of loop
1976 // variant and invariant checks that mean the final condition is
1977 // variant.
1978 const SCEV *Cond = SE->getSCEV(MemRuntimeCheckCond);
1979 if (SE->isLoopInvariant(Cond, OuterLoop)) {
1980 // It seems reasonable to assume that we can reduce the effective
1981 // cost of the checks even when we know nothing about the trip
1982 // count. Assume that the outer loop executes at least twice.
1983 unsigned BestTripCount = 2;
1984
1985 // Get the best known TC estimate.
1986 if (auto EstimatedTC = getSmallBestKnownTC(
1987 PSE, OuterLoop, /* CanUseConstantMax = */ false))
1988 if (EstimatedTC->isFixed())
1989 BestTripCount = EstimatedTC->getFixedValue();
1990
1991 InstructionCost NewMemCheckCost = MemCheckCost / BestTripCount;
1992
1993 // Let's ensure the cost is always at least 1.
1994 NewMemCheckCost = std::max(NewMemCheckCost.getValue(),
1995 (InstructionCost::CostType)1);
1996
1997 if (BestTripCount > 1)
1999 << "We expect runtime memory checks to be hoisted "
2000 << "out of the outer loop. Cost reduced from "
2001 << MemCheckCost << " to " << NewMemCheckCost << '\n');
2002
2003 MemCheckCost = NewMemCheckCost;
2004 }
2005 }
2006
2007 RTCheckCost += MemCheckCost;
2008 }
2009
2010 if (SCEVCheckBlock || MemCheckBlock)
2011 LLVM_DEBUG(dbgs() << "Total cost of runtime checks: " << RTCheckCost
2012 << "\n");
2013
2014 return RTCheckCost;
2015 }
2016
2017 /// Remove the created SCEV & memory runtime check blocks & instructions, if
2018 /// unused.
2019 ~GeneratedRTChecks() {
2020 SCEVExpanderCleaner SCEVCleaner(SCEVExp);
2021 SCEVExpanderCleaner MemCheckCleaner(MemCheckExp);
2022 bool SCEVChecksUsed = !SCEVCheckBlock || !pred_empty(SCEVCheckBlock);
2023 bool MemChecksUsed = !MemCheckBlock || !pred_empty(MemCheckBlock);
2024 if (SCEVChecksUsed)
2025 SCEVCleaner.markResultUsed();
2026
2027 if (MemChecksUsed) {
2028 MemCheckCleaner.markResultUsed();
2029 } else {
2030 auto &SE = *MemCheckExp.getSE();
2031 // Memory runtime check generation creates compares that use expanded
2032 // values. Remove them before running the SCEVExpanderCleaners.
2033 for (auto &I : make_early_inc_range(reverse(*MemCheckBlock))) {
2034 if (MemCheckExp.isInsertedInstruction(&I))
2035 continue;
2036 SE.forgetValue(&I);
2037 I.eraseFromParent();
2038 }
2039 }
2040 MemCheckCleaner.cleanup();
2041 SCEVCleaner.cleanup();
2042
2043 if (!SCEVChecksUsed)
2044 SCEVCheckBlock->eraseFromParent();
2045 if (!MemChecksUsed)
2046 MemCheckBlock->eraseFromParent();
2047 }
2048
2049 /// Retrieves the SCEVCheckCond and SCEVCheckBlock that were generated as IR
2050 /// outside VPlan.
2051 std::pair<Value *, BasicBlock *> getSCEVChecks() const {
2052 using namespace llvm::PatternMatch;
2053 if (!SCEVCheckCond || match(SCEVCheckCond, m_ZeroInt()))
2054 return {nullptr, nullptr};
2055
2056 return {SCEVCheckCond, SCEVCheckBlock};
2057 }
2058
2059 /// Retrieves the MemCheckCond and MemCheckBlock that were generated as IR
2060 /// outside VPlan.
2061 std::pair<Value *, BasicBlock *> getMemRuntimeChecks() const {
2062 using namespace llvm::PatternMatch;
2063 if (MemRuntimeCheckCond && match(MemRuntimeCheckCond, m_ZeroInt()))
2064 return {nullptr, nullptr};
2065 return {MemRuntimeCheckCond, MemCheckBlock};
2066 }
2067
2068 /// Return true if any runtime checks have been added
2069 bool hasChecks() const {
2070 return getSCEVChecks().first || getMemRuntimeChecks().first;
2071 }
2072};
2073} // namespace
2074
2076 return Style == TailFoldingStyle::Data ||
2078}
2079
2083
2084// Return true if \p OuterLp is an outer loop annotated with hints for explicit
2085// vectorization. The loop needs to be annotated with #pragma omp simd
2086// simdlen(#) or #pragma clang vectorize(enable) vectorize_width(#). If the
2087// vector length information is not provided, vectorization is not considered
2088// explicit. Interleave hints are not allowed either. These limitations will be
2089// relaxed in the future.
2090// Please, note that we are currently forced to abuse the pragma 'clang
2091// vectorize' semantics. This pragma provides *auto-vectorization hints*
2092// (i.e., LV must check that vectorization is legal) whereas pragma 'omp simd'
2093// provides *explicit vectorization hints* (LV can bypass legal checks and
2094// assume that vectorization is legal). However, both hints are implemented
2095// using the same metadata (llvm.loop.vectorize, processed by
2096// LoopVectorizeHints). This will be fixed in the future when the native IR
2097// representation for pragma 'omp simd' is introduced.
2098static bool isExplicitVecOuterLoop(Loop *OuterLp,
2100 assert(!OuterLp->isInnermost() && "This is not an outer loop");
2101 LoopVectorizeHints Hints(OuterLp, true /*DisableInterleaving*/, *ORE);
2102
2103 // Only outer loops with an explicit vectorization hint are supported.
2104 // Unannotated outer loops are ignored.
2106 return false;
2107
2108 Function *Fn = OuterLp->getHeader()->getParent();
2109 if (!Hints.allowVectorization(Fn, OuterLp,
2110 true /*VectorizeOnlyWhenForced*/)) {
2111 LLVM_DEBUG(dbgs() << "LV: Loop hints prevent outer loop vectorization.\n");
2112 return false;
2113 }
2114
2115 if (Hints.getInterleave() > 1) {
2116 // TODO: Interleave support is future work.
2117 LLVM_DEBUG(dbgs() << "LV: Not vectorizing: Interleave is not supported for "
2118 "outer loops.\n");
2119 Hints.emitRemarkWithHints();
2120 return false;
2121 }
2122
2123 return true;
2124}
2125
2129 // Collect inner loops and outer loops without irreducible control flow. For
2130 // now, only collect outer loops that have explicit vectorization hints. If we
2131 // are stress testing the VPlan H-CFG construction, we collect the outermost
2132 // loop of every loop nest.
2133 if (L.isInnermost() || VPlanBuildStressTest ||
2135 LoopBlocksRPO RPOT(&L);
2136 RPOT.perform(LI);
2138 V.push_back(&L);
2139 // TODO: Collect inner loops inside marked outer loops in case
2140 // vectorization fails for the outer loop. Do not invoke
2141 // 'containsIrreducibleCFG' again for inner loops when the outer loop is
2142 // already known to be reducible. We can use an inherited attribute for
2143 // that.
2144 return;
2145 }
2146 }
2147 for (Loop *InnerL : L)
2148 collectSupportedLoops(*InnerL, LI, ORE, V);
2149}
2150
2151//===----------------------------------------------------------------------===//
2152// Implementation of LoopVectorizationLegality, InnerLoopVectorizer and
2153// LoopVectorizationCostModel and LoopVectorizationPlanner.
2154//===----------------------------------------------------------------------===//
2155
2156/// FIXME: The newly created binary instructions should contain nsw/nuw
2157/// flags, which can be found from the original scalar operations.
2158Value *
2160 Value *Step,
2162 const BinaryOperator *InductionBinOp) {
2163 using namespace llvm::PatternMatch;
2164 Type *StepTy = Step->getType();
2165 Value *CastedIndex = StepTy->isIntegerTy()
2166 ? B.CreateSExtOrTrunc(Index, StepTy)
2167 : B.CreateCast(Instruction::SIToFP, Index, StepTy);
2168 if (CastedIndex != Index) {
2169 CastedIndex->setName(CastedIndex->getName() + ".cast");
2170 Index = CastedIndex;
2171 }
2172
2173 // Note: the IR at this point is broken. We cannot use SE to create any new
2174 // SCEV and then expand it, hoping that SCEV's simplification will give us
2175 // a more optimal code. Unfortunately, attempt of doing so on invalid IR may
2176 // lead to various SCEV crashes. So all we can do is to use builder and rely
2177 // on InstCombine for future simplifications. Here we handle some trivial
2178 // cases only.
2179 auto CreateAdd = [&B](Value *X, Value *Y) {
2180 assert(X->getType() == Y->getType() && "Types don't match!");
2181 if (match(X, m_ZeroInt()))
2182 return Y;
2183 if (match(Y, m_ZeroInt()))
2184 return X;
2185 return B.CreateAdd(X, Y);
2186 };
2187
2188 // We allow X to be a vector type, in which case Y will potentially be
2189 // splatted into a vector with the same element count.
2190 auto CreateMul = [&B](Value *X, Value *Y) {
2191 assert(X->getType()->getScalarType() == Y->getType() &&
2192 "Types don't match!");
2193 if (match(X, m_One()))
2194 return Y;
2195 if (match(Y, m_One()))
2196 return X;
2197 VectorType *XVTy = dyn_cast<VectorType>(X->getType());
2198 if (XVTy && !isa<VectorType>(Y->getType()))
2199 Y = B.CreateVectorSplat(XVTy->getElementCount(), Y);
2200 return B.CreateMul(X, Y);
2201 };
2202
2203 switch (InductionKind) {
2205 assert(!isa<VectorType>(Index->getType()) &&
2206 "Vector indices not supported for integer inductions yet");
2207 assert(Index->getType() == StartValue->getType() &&
2208 "Index type does not match StartValue type");
2209 if (isa<ConstantInt>(Step) && cast<ConstantInt>(Step)->isMinusOne())
2210 return B.CreateSub(StartValue, Index);
2211 auto *Offset = CreateMul(Index, Step);
2212 return CreateAdd(StartValue, Offset);
2213 }
2215 return B.CreatePtrAdd(StartValue, CreateMul(Index, Step));
2217 assert(!isa<VectorType>(Index->getType()) &&
2218 "Vector indices not supported for FP inductions yet");
2219 assert(Step->getType()->isFloatingPointTy() && "Expected FP Step value");
2220 assert(InductionBinOp &&
2221 (InductionBinOp->getOpcode() == Instruction::FAdd ||
2222 InductionBinOp->getOpcode() == Instruction::FSub) &&
2223 "Original bin op should be defined for FP induction");
2224
2225 Value *MulExp = B.CreateFMul(Step, Index);
2226 return B.CreateBinOp(InductionBinOp->getOpcode(), StartValue, MulExp,
2227 "induction");
2228 }
2230 return nullptr;
2231 }
2232 llvm_unreachable("invalid enum");
2233}
2234
2235static std::optional<unsigned> getMaxVScale(const Function &F,
2236 const TargetTransformInfo &TTI) {
2237 if (std::optional<unsigned> MaxVScale = TTI.getMaxVScale())
2238 return MaxVScale;
2239
2240 if (F.hasFnAttribute(Attribute::VScaleRange))
2241 return F.getFnAttribute(Attribute::VScaleRange).getVScaleRangeMax();
2242
2243 return std::nullopt;
2244}
2245
2246/// For the given VF and UF and maximum trip count computed for the loop, return
2247/// whether the induction variable might overflow in the vectorized loop. If not,
2248/// then we know a runtime overflow check always evaluates to false and can be
2249/// removed.
2251 const LoopVectorizationCostModel *Cost,
2252 ElementCount VF, std::optional<unsigned> UF = std::nullopt) {
2253 // Always be conservative if we don't know the exact unroll factor.
2254 unsigned MaxUF = UF ? *UF : Cost->TTI.getMaxInterleaveFactor(VF);
2255
2256 IntegerType *IdxTy = Cost->Legal->getWidestInductionType();
2257 APInt MaxUIntTripCount = IdxTy->getMask();
2258
2259 // We know the runtime overflow check is known false iff the (max) trip-count
2260 // is known and (max) trip-count + (VF * UF) does not overflow in the type of
2261 // the vector loop induction variable.
2262 if (unsigned TC = Cost->PSE.getSmallConstantMaxTripCount()) {
2263 uint64_t MaxVF = VF.getKnownMinValue();
2264 if (VF.isScalable()) {
2265 std::optional<unsigned> MaxVScale =
2266 getMaxVScale(*Cost->TheFunction, Cost->TTI);
2267 if (!MaxVScale)
2268 return false;
2269 MaxVF *= *MaxVScale;
2270 }
2271
2272 return (MaxUIntTripCount - TC).ugt(MaxVF * MaxUF);
2273 }
2274
2275 return false;
2276}
2277
2278// Return whether we allow using masked interleave-groups (for dealing with
2279// strided loads/stores that reside in predicated blocks, or for dealing
2280// with gaps).
2282 // If an override option has been passed in for interleaved accesses, use it.
2283 if (EnableMaskedInterleavedMemAccesses.getNumOccurrences() > 0)
2285
2286 return TTI.enableMaskedInterleavedAccessVectorization();
2287}
2288
2289/// Replace \p VPBB with a VPIRBasicBlock wrapping \p IRBB. All recipes from \p
2290/// VPBB are moved to the end of the newly created VPIRBasicBlock. All
2291/// predecessors and successors of VPBB, if any, are rewired to the new
2292/// VPIRBasicBlock. If \p VPBB may be unreachable, \p Plan must be passed.
2294 BasicBlock *IRBB,
2295 VPlan *Plan = nullptr) {
2296 if (!Plan)
2297 Plan = VPBB->getPlan();
2298 VPIRBasicBlock *IRVPBB = Plan->createVPIRBasicBlock(IRBB);
2299 auto IP = IRVPBB->begin();
2300 for (auto &R : make_early_inc_range(VPBB->phis()))
2301 R.moveBefore(*IRVPBB, IP);
2302
2303 for (auto &R :
2305 R.moveBefore(*IRVPBB, IRVPBB->end());
2306
2307 VPBlockUtils::reassociateBlocks(VPBB, IRVPBB);
2308 // VPBB is now dead and will be cleaned up when the plan gets destroyed.
2309 return IRVPBB;
2310}
2311
2313 BasicBlock *VectorPH = OrigLoop->getLoopPreheader();
2314 assert(VectorPH && "Invalid loop structure");
2315 assert((OrigLoop->getUniqueLatchExitBlock() ||
2316 Cost->requiresScalarEpilogue(VF.isVector())) &&
2317 "loops not exiting via the latch without required epilogue?");
2318
2319 // NOTE: The Plan's scalar preheader VPBB isn't replaced with a VPIRBasicBlock
2320 // wrapping the newly created scalar preheader here at the moment, because the
2321 // Plan's scalar preheader may be unreachable at this point. Instead it is
2322 // replaced in executePlan.
2323 return SplitBlock(VectorPH, VectorPH->getTerminator(), DT, LI, nullptr,
2324 Twine(Prefix) + "scalar.ph");
2325}
2326
2327/// Knowing that loop \p L executes a single vector iteration, add instructions
2328/// that will get simplified and thus should not have any cost to \p
2329/// InstsToIgnore.
2332 SmallPtrSetImpl<Instruction *> &InstsToIgnore) {
2333 auto *Cmp = L->getLatchCmpInst();
2334 if (Cmp)
2335 InstsToIgnore.insert(Cmp);
2336 for (const auto &KV : IL) {
2337 // Extract the key by hand so that it can be used in the lambda below. Note
2338 // that captured structured bindings are a C++20 extension.
2339 const PHINode *IV = KV.first;
2340
2341 // Get next iteration value of the induction variable.
2342 Instruction *IVInst =
2343 cast<Instruction>(IV->getIncomingValueForBlock(L->getLoopLatch()));
2344 if (all_of(IVInst->users(),
2345 [&](const User *U) { return U == IV || U == Cmp; }))
2346 InstsToIgnore.insert(IVInst);
2347 }
2348}
2349
2351 // Create a new IR basic block for the scalar preheader.
2352 BasicBlock *ScalarPH = createScalarPreheader("");
2353 return ScalarPH->getSinglePredecessor();
2354}
2355
2356namespace {
2357
2358struct CSEDenseMapInfo {
2359 static bool canHandle(const Instruction *I) {
2362 }
2363
2364 static inline Instruction *getEmptyKey() {
2366 }
2367
2368 static inline Instruction *getTombstoneKey() {
2369 return DenseMapInfo<Instruction *>::getTombstoneKey();
2370 }
2371
2372 static unsigned getHashValue(const Instruction *I) {
2373 assert(canHandle(I) && "Unknown instruction!");
2374 return hash_combine(I->getOpcode(),
2375 hash_combine_range(I->operand_values()));
2376 }
2377
2378 static bool isEqual(const Instruction *LHS, const Instruction *RHS) {
2379 if (LHS == getEmptyKey() || RHS == getEmptyKey() ||
2380 LHS == getTombstoneKey() || RHS == getTombstoneKey())
2381 return LHS == RHS;
2382 return LHS->isIdenticalTo(RHS);
2383 }
2384};
2385
2386} // end anonymous namespace
2387
2388/// FIXME: This legacy common-subexpression-elimination routine is scheduled for
2389/// removal, in favor of the VPlan-based one.
2390static void legacyCSE(BasicBlock *BB) {
2391 // Perform simple cse.
2393 for (Instruction &In : llvm::make_early_inc_range(*BB)) {
2394 if (!CSEDenseMapInfo::canHandle(&In))
2395 continue;
2396
2397 // Check if we can replace this instruction with any of the
2398 // visited instructions.
2399 if (Instruction *V = CSEMap.lookup(&In)) {
2400 In.replaceAllUsesWith(V);
2401 In.eraseFromParent();
2402 continue;
2403 }
2404
2405 CSEMap[&In] = &In;
2406 }
2407}
2408
2409/// This function attempts to return a value that represents the ElementCount
2410/// at runtime. For fixed-width VFs we know this precisely at compile
2411/// time, but for scalable VFs we calculate it based on an estimate of the
2412/// vscale value.
2414 std::optional<unsigned> VScale) {
2415 unsigned EstimatedVF = VF.getKnownMinValue();
2416 if (VF.isScalable())
2417 if (VScale)
2418 EstimatedVF *= *VScale;
2419 assert(EstimatedVF >= 1 && "Estimated VF shouldn't be less than 1");
2420 return EstimatedVF;
2421}
2422
2425 ElementCount VF) const {
2426 // We only need to calculate a cost if the VF is scalar; for actual vectors
2427 // we should already have a pre-calculated cost at each VF.
2428 if (!VF.isScalar())
2429 return getCallWideningDecision(CI, VF).Cost;
2430
2431 Type *RetTy = CI->getType();
2433 if (auto RedCost = getReductionPatternCost(CI, VF, RetTy))
2434 return *RedCost;
2435
2437 for (auto &ArgOp : CI->args())
2438 Tys.push_back(ArgOp->getType());
2439
2440 InstructionCost ScalarCallCost =
2441 TTI.getCallInstrCost(CI->getCalledFunction(), RetTy, Tys, CostKind);
2442
2443 // If this is an intrinsic we may have a lower cost for it.
2446 return std::min(ScalarCallCost, IntrinsicCost);
2447 }
2448 return ScalarCallCost;
2449}
2450
2452 if (VF.isScalar() || !canVectorizeTy(Ty))
2453 return Ty;
2454 return toVectorizedTy(Ty, VF);
2455}
2456
2459 ElementCount VF) const {
2461 assert(ID && "Expected intrinsic call!");
2462 Type *RetTy = maybeVectorizeType(CI->getType(), VF);
2463 FastMathFlags FMF;
2464 if (auto *FPMO = dyn_cast<FPMathOperator>(CI))
2465 FMF = FPMO->getFastMathFlags();
2466
2469 SmallVector<Type *> ParamTys;
2470 std::transform(FTy->param_begin(), FTy->param_end(),
2471 std::back_inserter(ParamTys),
2472 [&](Type *Ty) { return maybeVectorizeType(Ty, VF); });
2473
2474 IntrinsicCostAttributes CostAttrs(ID, RetTy, Arguments, ParamTys, FMF,
2477 return TTI.getIntrinsicInstrCost(CostAttrs, CostKind);
2478}
2479
2481 // Fix widened non-induction PHIs by setting up the PHI operands.
2482 fixNonInductionPHIs(State);
2483
2484 // Don't apply optimizations below when no (vector) loop remains, as they all
2485 // require one at the moment.
2486 VPBasicBlock *HeaderVPBB =
2487 vputils::getFirstLoopHeader(*State.Plan, State.VPDT);
2488 if (!HeaderVPBB)
2489 return;
2490
2491 BasicBlock *HeaderBB = State.CFG.VPBB2IRBB[HeaderVPBB];
2492
2493 // Remove redundant induction instructions.
2494 legacyCSE(HeaderBB);
2495}
2496
2498 auto Iter = vp_depth_first_shallow(Plan.getEntry());
2500 for (VPRecipeBase &P : VPBB->phis()) {
2502 if (!VPPhi)
2503 continue;
2504 PHINode *NewPhi = cast<PHINode>(State.get(VPPhi));
2505 // Make sure the builder has a valid insert point.
2506 Builder.SetInsertPoint(NewPhi);
2507 for (const auto &[Inc, VPBB] : VPPhi->incoming_values_and_blocks())
2508 NewPhi->addIncoming(State.get(Inc), State.CFG.VPBB2IRBB[VPBB]);
2509 }
2510 }
2511}
2512
2513void LoopVectorizationCostModel::collectLoopScalars(ElementCount VF) {
2514 // We should not collect Scalars more than once per VF. Right now, this
2515 // function is called from collectUniformsAndScalars(), which already does
2516 // this check. Collecting Scalars for VF=1 does not make any sense.
2517 assert(VF.isVector() && !Scalars.contains(VF) &&
2518 "This function should not be visited twice for the same VF");
2519
2520 // This avoids any chances of creating a REPLICATE recipe during planning
2521 // since that would result in generation of scalarized code during execution,
2522 // which is not supported for scalable vectors.
2523 if (VF.isScalable()) {
2524 Scalars[VF].insert_range(Uniforms[VF]);
2525 return;
2526 }
2527
2529
2530 // These sets are used to seed the analysis with pointers used by memory
2531 // accesses that will remain scalar.
2533 SmallPtrSet<Instruction *, 8> PossibleNonScalarPtrs;
2534 auto *Latch = TheLoop->getLoopLatch();
2535
2536 // A helper that returns true if the use of Ptr by MemAccess will be scalar.
2537 // The pointer operands of loads and stores will be scalar as long as the
2538 // memory access is not a gather or scatter operation. The value operand of a
2539 // store will remain scalar if the store is scalarized.
2540 auto IsScalarUse = [&](Instruction *MemAccess, Value *Ptr) {
2541 InstWidening WideningDecision = getWideningDecision(MemAccess, VF);
2542 assert(WideningDecision != CM_Unknown &&
2543 "Widening decision should be ready at this moment");
2544 if (auto *Store = dyn_cast<StoreInst>(MemAccess))
2545 if (Ptr == Store->getValueOperand())
2546 return WideningDecision == CM_Scalarize;
2547 assert(Ptr == getLoadStorePointerOperand(MemAccess) &&
2548 "Ptr is neither a value or pointer operand");
2549 return WideningDecision != CM_GatherScatter;
2550 };
2551
2552 // A helper that returns true if the given value is a getelementptr
2553 // instruction contained in the loop.
2554 auto IsLoopVaryingGEP = [&](Value *V) {
2555 return isa<GetElementPtrInst>(V) && !TheLoop->isLoopInvariant(V);
2556 };
2557
2558 // A helper that evaluates a memory access's use of a pointer. If the use will
2559 // be a scalar use and the pointer is only used by memory accesses, we place
2560 // the pointer in ScalarPtrs. Otherwise, the pointer is placed in
2561 // PossibleNonScalarPtrs.
2562 auto EvaluatePtrUse = [&](Instruction *MemAccess, Value *Ptr) {
2563 // We only care about bitcast and getelementptr instructions contained in
2564 // the loop.
2565 if (!IsLoopVaryingGEP(Ptr))
2566 return;
2567
2568 // If the pointer has already been identified as scalar (e.g., if it was
2569 // also identified as uniform), there's nothing to do.
2570 auto *I = cast<Instruction>(Ptr);
2571 if (Worklist.count(I))
2572 return;
2573
2574 // If the use of the pointer will be a scalar use, and all users of the
2575 // pointer are memory accesses, place the pointer in ScalarPtrs. Otherwise,
2576 // place the pointer in PossibleNonScalarPtrs.
2577 if (IsScalarUse(MemAccess, Ptr) &&
2579 ScalarPtrs.insert(I);
2580 else
2581 PossibleNonScalarPtrs.insert(I);
2582 };
2583
2584 // We seed the scalars analysis with three classes of instructions: (1)
2585 // instructions marked uniform-after-vectorization and (2) bitcast,
2586 // getelementptr and (pointer) phi instructions used by memory accesses
2587 // requiring a scalar use.
2588 //
2589 // (1) Add to the worklist all instructions that have been identified as
2590 // uniform-after-vectorization.
2591 Worklist.insert_range(Uniforms[VF]);
2592
2593 // (2) Add to the worklist all bitcast and getelementptr instructions used by
2594 // memory accesses requiring a scalar use. The pointer operands of loads and
2595 // stores will be scalar unless the operation is a gather or scatter.
2596 // The value operand of a store will remain scalar if the store is scalarized.
2597 for (auto *BB : TheLoop->blocks())
2598 for (auto &I : *BB) {
2599 if (auto *Load = dyn_cast<LoadInst>(&I)) {
2600 EvaluatePtrUse(Load, Load->getPointerOperand());
2601 } else if (auto *Store = dyn_cast<StoreInst>(&I)) {
2602 EvaluatePtrUse(Store, Store->getPointerOperand());
2603 EvaluatePtrUse(Store, Store->getValueOperand());
2604 }
2605 }
2606 for (auto *I : ScalarPtrs)
2607 if (!PossibleNonScalarPtrs.count(I)) {
2608 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *I << "\n");
2609 Worklist.insert(I);
2610 }
2611
2612 // Insert the forced scalars.
2613 // FIXME: Currently VPWidenPHIRecipe() often creates a dead vector
2614 // induction variable when the PHI user is scalarized.
2615 auto ForcedScalar = ForcedScalars.find(VF);
2616 if (ForcedScalar != ForcedScalars.end())
2617 for (auto *I : ForcedScalar->second) {
2618 LLVM_DEBUG(dbgs() << "LV: Found (forced) scalar instruction: " << *I << "\n");
2619 Worklist.insert(I);
2620 }
2621
2622 // Expand the worklist by looking through any bitcasts and getelementptr
2623 // instructions we've already identified as scalar. This is similar to the
2624 // expansion step in collectLoopUniforms(); however, here we're only
2625 // expanding to include additional bitcasts and getelementptr instructions.
2626 unsigned Idx = 0;
2627 while (Idx != Worklist.size()) {
2628 Instruction *Dst = Worklist[Idx++];
2629 if (!IsLoopVaryingGEP(Dst->getOperand(0)))
2630 continue;
2631 auto *Src = cast<Instruction>(Dst->getOperand(0));
2632 if (llvm::all_of(Src->users(), [&](User *U) -> bool {
2633 auto *J = cast<Instruction>(U);
2634 return !TheLoop->contains(J) || Worklist.count(J) ||
2635 ((isa<LoadInst>(J) || isa<StoreInst>(J)) &&
2636 IsScalarUse(J, Src));
2637 })) {
2638 Worklist.insert(Src);
2639 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Src << "\n");
2640 }
2641 }
2642
2643 // An induction variable will remain scalar if all users of the induction
2644 // variable and induction variable update remain scalar.
2645 for (const auto &Induction : Legal->getInductionVars()) {
2646 auto *Ind = Induction.first;
2647 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch));
2648
2649 // If tail-folding is applied, the primary induction variable will be used
2650 // to feed a vector compare.
2651 if (Ind == Legal->getPrimaryInduction() && foldTailByMasking())
2652 continue;
2653
2654 // Returns true if \p Indvar is a pointer induction that is used directly by
2655 // load/store instruction \p I.
2656 auto IsDirectLoadStoreFromPtrIndvar = [&](Instruction *Indvar,
2657 Instruction *I) {
2658 return Induction.second.getKind() ==
2661 Indvar == getLoadStorePointerOperand(I) && IsScalarUse(I, Indvar);
2662 };
2663
2664 // Determine if all users of the induction variable are scalar after
2665 // vectorization.
2666 bool ScalarInd = all_of(Ind->users(), [&](User *U) -> bool {
2667 auto *I = cast<Instruction>(U);
2668 return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I) ||
2669 IsDirectLoadStoreFromPtrIndvar(Ind, I);
2670 });
2671 if (!ScalarInd)
2672 continue;
2673
2674 // If the induction variable update is a fixed-order recurrence, neither the
2675 // induction variable or its update should be marked scalar after
2676 // vectorization.
2677 auto *IndUpdatePhi = dyn_cast<PHINode>(IndUpdate);
2678 if (IndUpdatePhi && Legal->isFixedOrderRecurrence(IndUpdatePhi))
2679 continue;
2680
2681 // Determine if all users of the induction variable update instruction are
2682 // scalar after vectorization.
2683 bool ScalarIndUpdate = all_of(IndUpdate->users(), [&](User *U) -> bool {
2684 auto *I = cast<Instruction>(U);
2685 return I == Ind || !TheLoop->contains(I) || Worklist.count(I) ||
2686 IsDirectLoadStoreFromPtrIndvar(IndUpdate, I);
2687 });
2688 if (!ScalarIndUpdate)
2689 continue;
2690
2691 // The induction variable and its update instruction will remain scalar.
2692 Worklist.insert(Ind);
2693 Worklist.insert(IndUpdate);
2694 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Ind << "\n");
2695 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *IndUpdate
2696 << "\n");
2697 }
2698
2699 Scalars[VF].insert_range(Worklist);
2700}
2701
2703 ElementCount VF) {
2704 if (!isPredicatedInst(I))
2705 return false;
2706
2707 // Do we have a non-scalar lowering for this predicated
2708 // instruction? No - it is scalar with predication.
2709 switch(I->getOpcode()) {
2710 default:
2711 return true;
2712 case Instruction::Call:
2713 if (VF.isScalar())
2714 return true;
2716 case Instruction::Load:
2717 case Instruction::Store: {
2718 auto *Ptr = getLoadStorePointerOperand(I);
2719 auto *Ty = getLoadStoreType(I);
2720 unsigned AS = getLoadStoreAddressSpace(I);
2721 Type *VTy = Ty;
2722 if (VF.isVector())
2723 VTy = VectorType::get(Ty, VF);
2724 const Align Alignment = getLoadStoreAlignment(I);
2725 return isa<LoadInst>(I) ? !(isLegalMaskedLoad(Ty, Ptr, Alignment, AS) ||
2726 TTI.isLegalMaskedGather(VTy, Alignment))
2727 : !(isLegalMaskedStore(Ty, Ptr, Alignment, AS) ||
2728 TTI.isLegalMaskedScatter(VTy, Alignment));
2729 }
2730 case Instruction::UDiv:
2731 case Instruction::SDiv:
2732 case Instruction::SRem:
2733 case Instruction::URem: {
2734 // We have the option to use the safe-divisor idiom to avoid predication.
2735 // The cost based decision here will always select safe-divisor for
2736 // scalable vectors as scalarization isn't legal.
2737 const auto [ScalarCost, SafeDivisorCost] = getDivRemSpeculationCost(I, VF);
2738 return isDivRemScalarWithPredication(ScalarCost, SafeDivisorCost);
2739 }
2740 }
2741}
2742
2744 return Legal->isMaskRequired(I, foldTailByMasking());
2745}
2746
2747// TODO: Fold into LoopVectorizationLegality::isMaskRequired.
2749 // TODO: We can use the loop-preheader as context point here and get
2750 // context sensitive reasoning for isSafeToSpeculativelyExecute.
2754 return false;
2755
2756 // If the instruction was executed conditionally in the original scalar loop,
2757 // predication is needed with a mask whose lanes are all possibly inactive.
2758 if (Legal->blockNeedsPredication(I->getParent()))
2759 return true;
2760
2761 // If we're not folding the tail by masking, predication is unnecessary.
2762 if (!foldTailByMasking())
2763 return false;
2764
2765 // All that remain are instructions with side-effects originally executed in
2766 // the loop unconditionally, but now execute under a tail-fold mask (only)
2767 // having at least one active lane (the first). If the side-effects of the
2768 // instruction are invariant, executing it w/o (the tail-folding) mask is safe
2769 // - it will cause the same side-effects as when masked.
2770 switch(I->getOpcode()) {
2771 default:
2773 "instruction should have been considered by earlier checks");
2774 case Instruction::Call:
2775 // Side-effects of a Call are assumed to be non-invariant, needing a
2776 // (fold-tail) mask.
2778 "should have returned earlier for calls not needing a mask");
2779 return true;
2780 case Instruction::Load:
2781 // If the address is loop invariant no predication is needed.
2782 return !Legal->isInvariant(getLoadStorePointerOperand(I));
2783 case Instruction::Store: {
2784 // For stores, we need to prove both speculation safety (which follows from
2785 // the same argument as loads), but also must prove the value being stored
2786 // is correct. The easiest form of the later is to require that all values
2787 // stored are the same.
2788 return !(Legal->isInvariant(getLoadStorePointerOperand(I)) &&
2789 TheLoop->isLoopInvariant(cast<StoreInst>(I)->getValueOperand()));
2790 }
2791 case Instruction::UDiv:
2792 case Instruction::URem:
2793 // If the divisor is loop-invariant no predication is needed.
2794 return !Legal->isInvariant(I->getOperand(1));
2795 case Instruction::SDiv:
2796 case Instruction::SRem:
2797 // Conservative for now, since masked-off lanes may be poison and could
2798 // trigger signed overflow.
2799 return true;
2800 }
2801}
2802
2806 return 1;
2807 // If the block wasn't originally predicated then return early to avoid
2808 // computing BlockFrequencyInfo unnecessarily.
2809 if (!Legal->blockNeedsPredication(BB))
2810 return 1;
2811
2812 uint64_t HeaderFreq =
2813 getBFI().getBlockFreq(TheLoop->getHeader()).getFrequency();
2814 uint64_t BBFreq = getBFI().getBlockFreq(BB).getFrequency();
2815 assert(HeaderFreq >= BBFreq &&
2816 "Header has smaller block freq than dominated BB?");
2817 return std::round((double)HeaderFreq / BBFreq);
2818}
2819
2820std::pair<InstructionCost, InstructionCost>
2822 ElementCount VF) {
2823 assert(I->getOpcode() == Instruction::UDiv ||
2824 I->getOpcode() == Instruction::SDiv ||
2825 I->getOpcode() == Instruction::SRem ||
2826 I->getOpcode() == Instruction::URem);
2828
2829 // Scalarization isn't legal for scalable vector types
2830 InstructionCost ScalarizationCost = InstructionCost::getInvalid();
2831 if (!VF.isScalable()) {
2832 // Get the scalarization cost and scale this amount by the probability of
2833 // executing the predicated block. If the instruction is not predicated,
2834 // we fall through to the next case.
2835 ScalarizationCost = 0;
2836
2837 // These instructions have a non-void type, so account for the phi nodes
2838 // that we will create. This cost is likely to be zero. The phi node
2839 // cost, if any, should be scaled by the block probability because it
2840 // models a copy at the end of each predicated block.
2841 ScalarizationCost +=
2842 VF.getFixedValue() * TTI.getCFInstrCost(Instruction::PHI, CostKind);
2843
2844 // The cost of the non-predicated instruction.
2845 ScalarizationCost +=
2846 VF.getFixedValue() *
2847 TTI.getArithmeticInstrCost(I->getOpcode(), I->getType(), CostKind);
2848
2849 // The cost of insertelement and extractelement instructions needed for
2850 // scalarization.
2851 ScalarizationCost += getScalarizationOverhead(I, VF);
2852
2853 // Scale the cost by the probability of executing the predicated blocks.
2854 // This assumes the predicated block for each vector lane is equally
2855 // likely.
2856 ScalarizationCost =
2857 ScalarizationCost / getPredBlockCostDivisor(CostKind, I->getParent());
2858 }
2859
2860 InstructionCost SafeDivisorCost = 0;
2861 auto *VecTy = toVectorTy(I->getType(), VF);
2862 // The cost of the select guard to ensure all lanes are well defined
2863 // after we speculate above any internal control flow.
2864 SafeDivisorCost +=
2865 TTI.getCmpSelInstrCost(Instruction::Select, VecTy,
2866 toVectorTy(Type::getInt1Ty(I->getContext()), VF),
2868
2869 SmallVector<const Value *, 4> Operands(I->operand_values());
2870 SafeDivisorCost += TTI.getArithmeticInstrCost(
2871 I->getOpcode(), VecTy, CostKind,
2872 {TargetTransformInfo::OK_AnyValue, TargetTransformInfo::OP_None},
2873 {TargetTransformInfo::OK_AnyValue, TargetTransformInfo::OP_None},
2874 Operands, I);
2875 return {ScalarizationCost, SafeDivisorCost};
2876}
2877
2879 Instruction *I, ElementCount VF) const {
2880 assert(isAccessInterleaved(I) && "Expecting interleaved access.");
2882 "Decision should not be set yet.");
2883 auto *Group = getInterleavedAccessGroup(I);
2884 assert(Group && "Must have a group.");
2885 unsigned InterleaveFactor = Group->getFactor();
2886
2887 // If the instruction's allocated size doesn't equal its type size, it
2888 // requires padding and will be scalarized.
2889 auto &DL = I->getDataLayout();
2890 auto *ScalarTy = getLoadStoreType(I);
2891 if (hasIrregularType(ScalarTy, DL))
2892 return false;
2893
2894 // For scalable vectors, the interleave factors must be <= 8 since we require
2895 // the (de)interleaveN intrinsics instead of shufflevectors.
2896 if (VF.isScalable() && InterleaveFactor > 8)
2897 return false;
2898
2899 // If the group involves a non-integral pointer, we may not be able to
2900 // losslessly cast all values to a common type.
2901 bool ScalarNI = DL.isNonIntegralPointerType(ScalarTy);
2902 for (unsigned Idx = 0; Idx < InterleaveFactor; Idx++) {
2903 Instruction *Member = Group->getMember(Idx);
2904 if (!Member)
2905 continue;
2906 auto *MemberTy = getLoadStoreType(Member);
2907 bool MemberNI = DL.isNonIntegralPointerType(MemberTy);
2908 // Don't coerce non-integral pointers to integers or vice versa.
2909 if (MemberNI != ScalarNI)
2910 // TODO: Consider adding special nullptr value case here
2911 return false;
2912 if (MemberNI && ScalarNI &&
2913 ScalarTy->getPointerAddressSpace() !=
2914 MemberTy->getPointerAddressSpace())
2915 return false;
2916 }
2917
2918 // Check if masking is required.
2919 // A Group may need masking for one of two reasons: it resides in a block that
2920 // needs predication, or it was decided to use masking to deal with gaps
2921 // (either a gap at the end of a load-access that may result in a speculative
2922 // load, or any gaps in a store-access).
2923 bool PredicatedAccessRequiresMasking =
2925 bool LoadAccessWithGapsRequiresEpilogMasking =
2926 isa<LoadInst>(I) && Group->requiresScalarEpilogue() &&
2928 bool StoreAccessWithGapsRequiresMasking =
2929 isa<StoreInst>(I) && !Group->isFull();
2930 if (!PredicatedAccessRequiresMasking &&
2931 !LoadAccessWithGapsRequiresEpilogMasking &&
2932 !StoreAccessWithGapsRequiresMasking)
2933 return true;
2934
2935 // If masked interleaving is required, we expect that the user/target had
2936 // enabled it, because otherwise it either wouldn't have been created or
2937 // it should have been invalidated by the CostModel.
2939 "Masked interleave-groups for predicated accesses are not enabled.");
2940
2941 if (Group->isReverse())
2942 return false;
2943
2944 // TODO: Support interleaved access that requires a gap mask for scalable VFs.
2945 bool NeedsMaskForGaps = LoadAccessWithGapsRequiresEpilogMasking ||
2946 StoreAccessWithGapsRequiresMasking;
2947 if (VF.isScalable() && NeedsMaskForGaps)
2948 return false;
2949
2950 auto *Ty = getLoadStoreType(I);
2951 const Align Alignment = getLoadStoreAlignment(I);
2952 unsigned AS = getLoadStoreAddressSpace(I);
2953 return isa<LoadInst>(I) ? TTI.isLegalMaskedLoad(Ty, Alignment, AS)
2954 : TTI.isLegalMaskedStore(Ty, Alignment, AS);
2955}
2956
2958 Instruction *I, ElementCount VF) {
2959 // Get and ensure we have a valid memory instruction.
2960 assert((isa<LoadInst, StoreInst>(I)) && "Invalid memory instruction");
2961
2962 auto *Ptr = getLoadStorePointerOperand(I);
2963 auto *ScalarTy = getLoadStoreType(I);
2964
2965 // In order to be widened, the pointer should be consecutive, first of all.
2966 if (!Legal->isConsecutivePtr(ScalarTy, Ptr))
2967 return false;
2968
2969 // If the instruction is a store located in a predicated block, it will be
2970 // scalarized.
2971 if (isScalarWithPredication(I, VF))
2972 return false;
2973
2974 // If the instruction's allocated size doesn't equal it's type size, it
2975 // requires padding and will be scalarized.
2976 auto &DL = I->getDataLayout();
2977 if (hasIrregularType(ScalarTy, DL))
2978 return false;
2979
2980 return true;
2981}
2982
2983void LoopVectorizationCostModel::collectLoopUniforms(ElementCount VF) {
2984 // We should not collect Uniforms more than once per VF. Right now,
2985 // this function is called from collectUniformsAndScalars(), which
2986 // already does this check. Collecting Uniforms for VF=1 does not make any
2987 // sense.
2988
2989 assert(VF.isVector() && !Uniforms.contains(VF) &&
2990 "This function should not be visited twice for the same VF");
2991
2992 // Visit the list of Uniforms. If we find no uniform value, we won't
2993 // analyze again. Uniforms.count(VF) will return 1.
2994 Uniforms[VF].clear();
2995
2996 // Now we know that the loop is vectorizable!
2997 // Collect instructions inside the loop that will remain uniform after
2998 // vectorization.
2999
3000 // Global values, params and instructions outside of current loop are out of
3001 // scope.
3002 auto IsOutOfScope = [&](Value *V) -> bool {
3004 return (!I || !TheLoop->contains(I));
3005 };
3006
3007 // Worklist containing uniform instructions demanding lane 0.
3008 SetVector<Instruction *> Worklist;
3009
3010 // Add uniform instructions demanding lane 0 to the worklist. Instructions
3011 // that require predication must not be considered uniform after
3012 // vectorization, because that would create an erroneous replicating region
3013 // where only a single instance out of VF should be formed.
3014 auto AddToWorklistIfAllowed = [&](Instruction *I) -> void {
3015 if (IsOutOfScope(I)) {
3016 LLVM_DEBUG(dbgs() << "LV: Found not uniform due to scope: "
3017 << *I << "\n");
3018 return;
3019 }
3020 if (isPredicatedInst(I)) {
3021 LLVM_DEBUG(
3022 dbgs() << "LV: Found not uniform due to requiring predication: " << *I
3023 << "\n");
3024 return;
3025 }
3026 LLVM_DEBUG(dbgs() << "LV: Found uniform instruction: " << *I << "\n");
3027 Worklist.insert(I);
3028 };
3029
3030 // Start with the conditional branches exiting the loop. If the branch
3031 // condition is an instruction contained in the loop that is only used by the
3032 // branch, it is uniform. Note conditions from uncountable early exits are not
3033 // uniform.
3035 TheLoop->getExitingBlocks(Exiting);
3036 for (BasicBlock *E : Exiting) {
3037 if (Legal->hasUncountableEarlyExit() && TheLoop->getLoopLatch() != E)
3038 continue;
3039 auto *Cmp = dyn_cast<Instruction>(E->getTerminator()->getOperand(0));
3040 if (Cmp && TheLoop->contains(Cmp) && Cmp->hasOneUse())
3041 AddToWorklistIfAllowed(Cmp);
3042 }
3043
3044 auto PrevVF = VF.divideCoefficientBy(2);
3045 // Return true if all lanes perform the same memory operation, and we can
3046 // thus choose to execute only one.
3047 auto IsUniformMemOpUse = [&](Instruction *I) {
3048 // If the value was already known to not be uniform for the previous
3049 // (smaller VF), it cannot be uniform for the larger VF.
3050 if (PrevVF.isVector()) {
3051 auto Iter = Uniforms.find(PrevVF);
3052 if (Iter != Uniforms.end() && !Iter->second.contains(I))
3053 return false;
3054 }
3055 if (!Legal->isUniformMemOp(*I, VF))
3056 return false;
3057 if (isa<LoadInst>(I))
3058 // Loading the same address always produces the same result - at least
3059 // assuming aliasing and ordering which have already been checked.
3060 return true;
3061 // Storing the same value on every iteration.
3062 return TheLoop->isLoopInvariant(cast<StoreInst>(I)->getValueOperand());
3063 };
3064
3065 auto IsUniformDecision = [&](Instruction *I, ElementCount VF) {
3066 InstWidening WideningDecision = getWideningDecision(I, VF);
3067 assert(WideningDecision != CM_Unknown &&
3068 "Widening decision should be ready at this moment");
3069
3070 if (IsUniformMemOpUse(I))
3071 return true;
3072
3073 return (WideningDecision == CM_Widen ||
3074 WideningDecision == CM_Widen_Reverse ||
3075 WideningDecision == CM_Interleave);
3076 };
3077
3078 // Returns true if Ptr is the pointer operand of a memory access instruction
3079 // I, I is known to not require scalarization, and the pointer is not also
3080 // stored.
3081 auto IsVectorizedMemAccessUse = [&](Instruction *I, Value *Ptr) -> bool {
3082 if (isa<StoreInst>(I) && I->getOperand(0) == Ptr)
3083 return false;
3084 return getLoadStorePointerOperand(I) == Ptr &&
3085 (IsUniformDecision(I, VF) || Legal->isInvariant(Ptr));
3086 };
3087
3088 // Holds a list of values which are known to have at least one uniform use.
3089 // Note that there may be other uses which aren't uniform. A "uniform use"
3090 // here is something which only demands lane 0 of the unrolled iterations;
3091 // it does not imply that all lanes produce the same value (e.g. this is not
3092 // the usual meaning of uniform)
3093 SetVector<Value *> HasUniformUse;
3094
3095 // Scan the loop for instructions which are either a) known to have only
3096 // lane 0 demanded or b) are uses which demand only lane 0 of their operand.
3097 for (auto *BB : TheLoop->blocks())
3098 for (auto &I : *BB) {
3099 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(&I)) {
3100 switch (II->getIntrinsicID()) {
3101 case Intrinsic::sideeffect:
3102 case Intrinsic::experimental_noalias_scope_decl:
3103 case Intrinsic::assume:
3104 case Intrinsic::lifetime_start:
3105 case Intrinsic::lifetime_end:
3106 if (TheLoop->hasLoopInvariantOperands(&I))
3107 AddToWorklistIfAllowed(&I);
3108 break;
3109 default:
3110 break;
3111 }
3112 }
3113
3114 if (auto *EVI = dyn_cast<ExtractValueInst>(&I)) {
3115 if (IsOutOfScope(EVI->getAggregateOperand())) {
3116 AddToWorklistIfAllowed(EVI);
3117 continue;
3118 }
3119 // Only ExtractValue instructions where the aggregate value comes from a
3120 // call are allowed to be non-uniform.
3121 assert(isa<CallInst>(EVI->getAggregateOperand()) &&
3122 "Expected aggregate value to be call return value");
3123 }
3124
3125 // If there's no pointer operand, there's nothing to do.
3126 auto *Ptr = getLoadStorePointerOperand(&I);
3127 if (!Ptr)
3128 continue;
3129
3130 // If the pointer can be proven to be uniform, always add it to the
3131 // worklist.
3132 if (isa<Instruction>(Ptr) && Legal->isUniform(Ptr, VF))
3133 AddToWorklistIfAllowed(cast<Instruction>(Ptr));
3134
3135 if (IsUniformMemOpUse(&I))
3136 AddToWorklistIfAllowed(&I);
3137
3138 if (IsVectorizedMemAccessUse(&I, Ptr))
3139 HasUniformUse.insert(Ptr);
3140 }
3141
3142 // Add to the worklist any operands which have *only* uniform (e.g. lane 0
3143 // demanding) users. Since loops are assumed to be in LCSSA form, this
3144 // disallows uses outside the loop as well.
3145 for (auto *V : HasUniformUse) {
3146 if (IsOutOfScope(V))
3147 continue;
3148 auto *I = cast<Instruction>(V);
3149 bool UsersAreMemAccesses = all_of(I->users(), [&](User *U) -> bool {
3150 auto *UI = cast<Instruction>(U);
3151 return TheLoop->contains(UI) && IsVectorizedMemAccessUse(UI, V);
3152 });
3153 if (UsersAreMemAccesses)
3154 AddToWorklistIfAllowed(I);
3155 }
3156
3157 // Expand Worklist in topological order: whenever a new instruction
3158 // is added , its users should be already inside Worklist. It ensures
3159 // a uniform instruction will only be used by uniform instructions.
3160 unsigned Idx = 0;
3161 while (Idx != Worklist.size()) {
3162 Instruction *I = Worklist[Idx++];
3163
3164 for (auto *OV : I->operand_values()) {
3165 // isOutOfScope operands cannot be uniform instructions.
3166 if (IsOutOfScope(OV))
3167 continue;
3168 // First order recurrence Phi's should typically be considered
3169 // non-uniform.
3170 auto *OP = dyn_cast<PHINode>(OV);
3171 if (OP && Legal->isFixedOrderRecurrence(OP))
3172 continue;
3173 // If all the users of the operand are uniform, then add the
3174 // operand into the uniform worklist.
3175 auto *OI = cast<Instruction>(OV);
3176 if (llvm::all_of(OI->users(), [&](User *U) -> bool {
3177 auto *J = cast<Instruction>(U);
3178 return Worklist.count(J) || IsVectorizedMemAccessUse(J, OI);
3179 }))
3180 AddToWorklistIfAllowed(OI);
3181 }
3182 }
3183
3184 // For an instruction to be added into Worklist above, all its users inside
3185 // the loop should also be in Worklist. However, this condition cannot be
3186 // true for phi nodes that form a cyclic dependence. We must process phi
3187 // nodes separately. An induction variable will remain uniform if all users
3188 // of the induction variable and induction variable update remain uniform.
3189 // The code below handles both pointer and non-pointer induction variables.
3190 BasicBlock *Latch = TheLoop->getLoopLatch();
3191 for (const auto &Induction : Legal->getInductionVars()) {
3192 auto *Ind = Induction.first;
3193 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch));
3194
3195 // Determine if all users of the induction variable are uniform after
3196 // vectorization.
3197 bool UniformInd = all_of(Ind->users(), [&](User *U) -> bool {
3198 auto *I = cast<Instruction>(U);
3199 return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I) ||
3200 IsVectorizedMemAccessUse(I, Ind);
3201 });
3202 if (!UniformInd)
3203 continue;
3204
3205 // Determine if all users of the induction variable update instruction are
3206 // uniform after vectorization.
3207 bool UniformIndUpdate = all_of(IndUpdate->users(), [&](User *U) -> bool {
3208 auto *I = cast<Instruction>(U);
3209 return I == Ind || Worklist.count(I) ||
3210 IsVectorizedMemAccessUse(I, IndUpdate);
3211 });
3212 if (!UniformIndUpdate)
3213 continue;
3214
3215 // The induction variable and its update instruction will remain uniform.
3216 AddToWorklistIfAllowed(Ind);
3217 AddToWorklistIfAllowed(IndUpdate);
3218 }
3219
3220 Uniforms[VF].insert_range(Worklist);
3221}
3222
3224 LLVM_DEBUG(dbgs() << "LV: Performing code size checks.\n");
3225
3226 if (Legal->getRuntimePointerChecking()->Need) {
3227 reportVectorizationFailure("Runtime ptr check is required with -Os/-Oz",
3228 "runtime pointer checks needed. Enable vectorization of this "
3229 "loop with '#pragma clang loop vectorize(enable)' when "
3230 "compiling with -Os/-Oz",
3231 "CantVersionLoopWithOptForSize", ORE, TheLoop);
3232 return true;
3233 }
3234
3235 if (!PSE.getPredicate().isAlwaysTrue()) {
3236 reportVectorizationFailure("Runtime SCEV check is required with -Os/-Oz",
3237 "runtime SCEV checks needed. Enable vectorization of this "
3238 "loop with '#pragma clang loop vectorize(enable)' when "
3239 "compiling with -Os/-Oz",
3240 "CantVersionLoopWithOptForSize", ORE, TheLoop);
3241 return true;
3242 }
3243
3244 // FIXME: Avoid specializing for stride==1 instead of bailing out.
3245 if (!Legal->getLAI()->getSymbolicStrides().empty()) {
3246 reportVectorizationFailure("Runtime stride check for small trip count",
3247 "runtime stride == 1 checks needed. Enable vectorization of "
3248 "this loop without such check by compiling with -Os/-Oz",
3249 "CantVersionLoopWithOptForSize", ORE, TheLoop);
3250 return true;
3251 }
3252
3253 return false;
3254}
3255
3256bool LoopVectorizationCostModel::isScalableVectorizationAllowed() {
3257 if (IsScalableVectorizationAllowed)
3258 return *IsScalableVectorizationAllowed;
3259
3260 IsScalableVectorizationAllowed = false;
3261 if (!TTI.supportsScalableVectors() && !ForceTargetSupportsScalableVectors)
3262 return false;
3263
3264 if (Hints->isScalableVectorizationDisabled()) {
3265 reportVectorizationInfo("Scalable vectorization is explicitly disabled",
3266 "ScalableVectorizationDisabled", ORE, TheLoop);
3267 return false;
3268 }
3269
3270 LLVM_DEBUG(dbgs() << "LV: Scalable vectorization is available\n");
3271
3272 auto MaxScalableVF = ElementCount::getScalable(
3273 std::numeric_limits<ElementCount::ScalarTy>::max());
3274
3275 // Test that the loop-vectorizer can legalize all operations for this MaxVF.
3276 // FIXME: While for scalable vectors this is currently sufficient, this should
3277 // be replaced by a more detailed mechanism that filters out specific VFs,
3278 // instead of invalidating vectorization for a whole set of VFs based on the
3279 // MaxVF.
3280
3281 // Disable scalable vectorization if the loop contains unsupported reductions.
3282 if (!canVectorizeReductions(MaxScalableVF)) {
3284 "Scalable vectorization not supported for the reduction "
3285 "operations found in this loop.",
3286 "ScalableVFUnfeasible", ORE, TheLoop);
3287 return false;
3288 }
3289
3290 // Disable scalable vectorization if the loop contains any instructions
3291 // with element types not supported for scalable vectors.
3292 if (any_of(ElementTypesInLoop, [&](Type *Ty) {
3293 return !Ty->isVoidTy() &&
3295 })) {
3296 reportVectorizationInfo("Scalable vectorization is not supported "
3297 "for all element types found in this loop.",
3298 "ScalableVFUnfeasible", ORE, TheLoop);
3299 return false;
3300 }
3301
3302 if (!Legal->isSafeForAnyVectorWidth() && !getMaxVScale(*TheFunction, TTI)) {
3303 reportVectorizationInfo("The target does not provide maximum vscale value "
3304 "for safe distance analysis.",
3305 "ScalableVFUnfeasible", ORE, TheLoop);
3306 return false;
3307 }
3308
3309 IsScalableVectorizationAllowed = true;
3310 return true;
3311}
3312
3313ElementCount
3314LoopVectorizationCostModel::getMaxLegalScalableVF(unsigned MaxSafeElements) {
3315 if (!isScalableVectorizationAllowed())
3316 return ElementCount::getScalable(0);
3317
3318 auto MaxScalableVF = ElementCount::getScalable(
3319 std::numeric_limits<ElementCount::ScalarTy>::max());
3320 if (Legal->isSafeForAnyVectorWidth())
3321 return MaxScalableVF;
3322
3323 std::optional<unsigned> MaxVScale = getMaxVScale(*TheFunction, TTI);
3324 // Limit MaxScalableVF by the maximum safe dependence distance.
3325 MaxScalableVF = ElementCount::getScalable(MaxSafeElements / *MaxVScale);
3326
3327 if (!MaxScalableVF)
3329 "Max legal vector width too small, scalable vectorization "
3330 "unfeasible.",
3331 "ScalableVFUnfeasible", ORE, TheLoop);
3332
3333 return MaxScalableVF;
3334}
3335
3336FixedScalableVFPair LoopVectorizationCostModel::computeFeasibleMaxVF(
3337 unsigned MaxTripCount, ElementCount UserVF, unsigned UserIC,
3338 bool FoldTailByMasking) {
3339 MinBWs = computeMinimumValueSizes(TheLoop->getBlocks(), *DB, &TTI);
3340 unsigned SmallestType, WidestType;
3341 std::tie(SmallestType, WidestType) = getSmallestAndWidestTypes();
3342
3343 // Get the maximum safe dependence distance in bits computed by LAA.
3344 // It is computed by MaxVF * sizeOf(type) * 8, where type is taken from
3345 // the memory accesses that is most restrictive (involved in the smallest
3346 // dependence distance).
3347 unsigned MaxSafeElementsPowerOf2 =
3348 bit_floor(Legal->getMaxSafeVectorWidthInBits() / WidestType);
3349 if (!Legal->isSafeForAnyStoreLoadForwardDistances()) {
3350 unsigned SLDist = Legal->getMaxStoreLoadForwardSafeDistanceInBits();
3351 MaxSafeElementsPowerOf2 =
3352 std::min(MaxSafeElementsPowerOf2, SLDist / WidestType);
3353 }
3354 auto MaxSafeFixedVF = ElementCount::getFixed(MaxSafeElementsPowerOf2);
3355 auto MaxSafeScalableVF = getMaxLegalScalableVF(MaxSafeElementsPowerOf2);
3356
3357 if (!Legal->isSafeForAnyVectorWidth())
3358 this->MaxSafeElements = MaxSafeElementsPowerOf2;
3359
3360 LLVM_DEBUG(dbgs() << "LV: The max safe fixed VF is: " << MaxSafeFixedVF
3361 << ".\n");
3362 LLVM_DEBUG(dbgs() << "LV: The max safe scalable VF is: " << MaxSafeScalableVF
3363 << ".\n");
3364
3365 // First analyze the UserVF, fall back if the UserVF should be ignored.
3366 if (UserVF) {
3367 auto MaxSafeUserVF =
3368 UserVF.isScalable() ? MaxSafeScalableVF : MaxSafeFixedVF;
3369
3370 if (ElementCount::isKnownLE(UserVF, MaxSafeUserVF)) {
3371 // If `VF=vscale x N` is safe, then so is `VF=N`
3372 if (UserVF.isScalable())
3373 return FixedScalableVFPair(
3374 ElementCount::getFixed(UserVF.getKnownMinValue()), UserVF);
3375
3376 return UserVF;
3377 }
3378
3379 assert(ElementCount::isKnownGT(UserVF, MaxSafeUserVF));
3380
3381 // Only clamp if the UserVF is not scalable. If the UserVF is scalable, it
3382 // is better to ignore the hint and let the compiler choose a suitable VF.
3383 if (!UserVF.isScalable()) {
3384 LLVM_DEBUG(dbgs() << "LV: User VF=" << UserVF
3385 << " is unsafe, clamping to max safe VF="
3386 << MaxSafeFixedVF << ".\n");
3387 ORE->emit([&]() {
3388 return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationFactor",
3389 TheLoop->getStartLoc(),
3390 TheLoop->getHeader())
3391 << "User-specified vectorization factor "
3392 << ore::NV("UserVectorizationFactor", UserVF)
3393 << " is unsafe, clamping to maximum safe vectorization factor "
3394 << ore::NV("VectorizationFactor", MaxSafeFixedVF);
3395 });
3396 return MaxSafeFixedVF;
3397 }
3398
3400 LLVM_DEBUG(dbgs() << "LV: User VF=" << UserVF
3401 << " is ignored because scalable vectors are not "
3402 "available.\n");
3403 ORE->emit([&]() {
3404 return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationFactor",
3405 TheLoop->getStartLoc(),
3406 TheLoop->getHeader())
3407 << "User-specified vectorization factor "
3408 << ore::NV("UserVectorizationFactor", UserVF)
3409 << " is ignored because the target does not support scalable "
3410 "vectors. The compiler will pick a more suitable value.";
3411 });
3412 } else {
3413 LLVM_DEBUG(dbgs() << "LV: User VF=" << UserVF
3414 << " is unsafe. Ignoring scalable UserVF.\n");
3415 ORE->emit([&]() {
3416 return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationFactor",
3417 TheLoop->getStartLoc(),
3418 TheLoop->getHeader())
3419 << "User-specified vectorization factor "
3420 << ore::NV("UserVectorizationFactor", UserVF)
3421 << " is unsafe. Ignoring the hint to let the compiler pick a "
3422 "more suitable value.";
3423 });
3424 }
3425 }
3426
3427 LLVM_DEBUG(dbgs() << "LV: The Smallest and Widest types: " << SmallestType
3428 << " / " << WidestType << " bits.\n");
3429
3430 FixedScalableVFPair Result(ElementCount::getFixed(1),
3432 if (auto MaxVF =
3433 getMaximizedVFForTarget(MaxTripCount, SmallestType, WidestType,
3434 MaxSafeFixedVF, UserIC, FoldTailByMasking))
3435 Result.FixedVF = MaxVF;
3436
3437 if (auto MaxVF =
3438 getMaximizedVFForTarget(MaxTripCount, SmallestType, WidestType,
3439 MaxSafeScalableVF, UserIC, FoldTailByMasking))
3440 if (MaxVF.isScalable()) {
3441 Result.ScalableVF = MaxVF;
3442 LLVM_DEBUG(dbgs() << "LV: Found feasible scalable VF = " << MaxVF
3443 << "\n");
3444 }
3445
3446 return Result;
3447}
3448
3449FixedScalableVFPair
3451 if (Legal->getRuntimePointerChecking()->Need && TTI.hasBranchDivergence()) {
3452 // TODO: It may be useful to do since it's still likely to be dynamically
3453 // uniform if the target can skip.
3455 "Not inserting runtime ptr check for divergent target",
3456 "runtime pointer checks needed. Not enabled for divergent target",
3457 "CantVersionLoopWithDivergentTarget", ORE, TheLoop);
3459 }
3460
3461 ScalarEvolution *SE = PSE.getSE();
3463 unsigned MaxTC = PSE.getSmallConstantMaxTripCount();
3464 if (!MaxTC && ScalarEpilogueStatus == CM_ScalarEpilogueAllowed)
3466 LLVM_DEBUG(dbgs() << "LV: Found trip count: " << TC << '\n');
3467 if (TC != ElementCount::getFixed(MaxTC))
3468 LLVM_DEBUG(dbgs() << "LV: Found maximum trip count: " << MaxTC << '\n');
3469 if (TC.isScalar()) {
3470 reportVectorizationFailure("Single iteration (non) loop",
3471 "loop trip count is one, irrelevant for vectorization",
3472 "SingleIterationLoop", ORE, TheLoop);
3474 }
3475
3476 // If BTC matches the widest induction type and is -1 then the trip count
3477 // computation will wrap to 0 and the vector trip count will be 0. Do not try
3478 // to vectorize.
3479 const SCEV *BTC = SE->getBackedgeTakenCount(TheLoop);
3480 if (!isa<SCEVCouldNotCompute>(BTC) &&
3481 BTC->getType()->getScalarSizeInBits() >=
3482 Legal->getWidestInductionType()->getScalarSizeInBits() &&
3484 SE->getMinusOne(BTC->getType()))) {
3486 "Trip count computation wrapped",
3487 "backedge-taken count is -1, loop trip count wrapped to 0",
3488 "TripCountWrapped", ORE, TheLoop);
3490 }
3491
3492 switch (ScalarEpilogueStatus) {
3494 return computeFeasibleMaxVF(MaxTC, UserVF, UserIC, false);
3496 [[fallthrough]];
3498 LLVM_DEBUG(
3499 dbgs() << "LV: vector predicate hint/switch found.\n"
3500 << "LV: Not allowing scalar epilogue, creating predicated "
3501 << "vector loop.\n");
3502 break;
3504 // fallthrough as a special case of OptForSize
3506 if (ScalarEpilogueStatus == CM_ScalarEpilogueNotAllowedOptSize)
3507 LLVM_DEBUG(
3508 dbgs() << "LV: Not allowing scalar epilogue due to -Os/-Oz.\n");
3509 else
3510 LLVM_DEBUG(dbgs() << "LV: Not allowing scalar epilogue due to low trip "
3511 << "count.\n");
3512
3513 // Bail if runtime checks are required, which are not good when optimising
3514 // for size.
3517
3518 break;
3519 }
3520
3521 // Now try the tail folding
3522
3523 // Invalidate interleave groups that require an epilogue if we can't mask
3524 // the interleave-group.
3526 assert(WideningDecisions.empty() && Uniforms.empty() && Scalars.empty() &&
3527 "No decisions should have been taken at this point");
3528 // Note: There is no need to invalidate any cost modeling decisions here, as
3529 // none were taken so far.
3530 InterleaveInfo.invalidateGroupsRequiringScalarEpilogue();
3531 }
3532
3533 FixedScalableVFPair MaxFactors =
3534 computeFeasibleMaxVF(MaxTC, UserVF, UserIC, true);
3535
3536 // Avoid tail folding if the trip count is known to be a multiple of any VF
3537 // we choose.
3538 std::optional<unsigned> MaxPowerOf2RuntimeVF =
3539 MaxFactors.FixedVF.getFixedValue();
3540 if (MaxFactors.ScalableVF) {
3541 std::optional<unsigned> MaxVScale = getMaxVScale(*TheFunction, TTI);
3542 if (MaxVScale) {
3543 MaxPowerOf2RuntimeVF = std::max<unsigned>(
3544 *MaxPowerOf2RuntimeVF,
3545 *MaxVScale * MaxFactors.ScalableVF.getKnownMinValue());
3546 } else
3547 MaxPowerOf2RuntimeVF = std::nullopt; // Stick with tail-folding for now.
3548 }
3549
3550 auto NoScalarEpilogueNeeded = [this, &UserIC](unsigned MaxVF) {
3551 // Return false if the loop is neither a single-latch-exit loop nor an
3552 // early-exit loop as tail-folding is not supported in that case.
3553 if (TheLoop->getExitingBlock() != TheLoop->getLoopLatch() &&
3554 !Legal->hasUncountableEarlyExit())
3555 return false;
3556 unsigned MaxVFtimesIC = UserIC ? MaxVF * UserIC : MaxVF;
3557 ScalarEvolution *SE = PSE.getSE();
3558 // Calling getSymbolicMaxBackedgeTakenCount enables support for loops
3559 // with uncountable exits. For countable loops, the symbolic maximum must
3560 // remain identical to the known back-edge taken count.
3561 const SCEV *BackedgeTakenCount = PSE.getSymbolicMaxBackedgeTakenCount();
3562 assert((Legal->hasUncountableEarlyExit() ||
3563 BackedgeTakenCount == PSE.getBackedgeTakenCount()) &&
3564 "Invalid loop count");
3565 const SCEV *ExitCount = SE->getAddExpr(
3566 BackedgeTakenCount, SE->getOne(BackedgeTakenCount->getType()));
3567 const SCEV *Rem = SE->getURemExpr(
3568 SE->applyLoopGuards(ExitCount, TheLoop),
3569 SE->getConstant(BackedgeTakenCount->getType(), MaxVFtimesIC));
3570 return Rem->isZero();
3571 };
3572
3573 if (MaxPowerOf2RuntimeVF > 0u) {
3574 assert((UserVF.isNonZero() || isPowerOf2_32(*MaxPowerOf2RuntimeVF)) &&
3575 "MaxFixedVF must be a power of 2");
3576 if (NoScalarEpilogueNeeded(*MaxPowerOf2RuntimeVF)) {
3577 // Accept MaxFixedVF if we do not have a tail.
3578 LLVM_DEBUG(dbgs() << "LV: No tail will remain for any chosen VF.\n");
3579 return MaxFactors;
3580 }
3581 }
3582
3583 auto ExpectedTC = getSmallBestKnownTC(PSE, TheLoop);
3584 if (ExpectedTC && ExpectedTC->isFixed() &&
3585 ExpectedTC->getFixedValue() <=
3586 TTI.getMinTripCountTailFoldingThreshold()) {
3587 if (MaxPowerOf2RuntimeVF > 0u) {
3588 // If we have a low-trip-count, and the fixed-width VF is known to divide
3589 // the trip count but the scalable factor does not, use the fixed-width
3590 // factor in preference to allow the generation of a non-predicated loop.
3591 if (ScalarEpilogueStatus == CM_ScalarEpilogueNotAllowedLowTripLoop &&
3592 NoScalarEpilogueNeeded(MaxFactors.FixedVF.getFixedValue())) {
3593 LLVM_DEBUG(dbgs() << "LV: Picking a fixed-width so that no tail will "
3594 "remain for any chosen VF.\n");
3595 MaxFactors.ScalableVF = ElementCount::getScalable(0);
3596 return MaxFactors;
3597 }
3598 }
3599
3601 "The trip count is below the minial threshold value.",
3602 "loop trip count is too low, avoiding vectorization", "LowTripCount",
3603 ORE, TheLoop);
3605 }
3606
3607 // If we don't know the precise trip count, or if the trip count that we
3608 // found modulo the vectorization factor is not zero, try to fold the tail
3609 // by masking.
3610 // FIXME: look for a smaller MaxVF that does divide TC rather than masking.
3611 bool ContainsScalableVF = MaxFactors.ScalableVF.isNonZero();
3612 setTailFoldingStyle(ContainsScalableVF, UserIC);
3613 if (foldTailByMasking()) {
3614 if (foldTailWithEVL()) {
3615 LLVM_DEBUG(
3616 dbgs()
3617 << "LV: tail is folded with EVL, forcing unroll factor to be 1. Will "
3618 "try to generate VP Intrinsics with scalable vector "
3619 "factors only.\n");
3620 // Tail folded loop using VP intrinsics restricts the VF to be scalable
3621 // for now.
3622 // TODO: extend it for fixed vectors, if required.
3623 assert(ContainsScalableVF && "Expected scalable vector factor.");
3624
3625 MaxFactors.FixedVF = ElementCount::getFixed(1);
3626 }
3627 return MaxFactors;
3628 }
3629
3630 // If there was a tail-folding hint/switch, but we can't fold the tail by
3631 // masking, fallback to a vectorization with a scalar epilogue.
3632 if (ScalarEpilogueStatus == CM_ScalarEpilogueNotNeededUsePredicate) {
3633 LLVM_DEBUG(dbgs() << "LV: Cannot fold tail by masking: vectorize with a "
3634 "scalar epilogue instead.\n");
3635 ScalarEpilogueStatus = CM_ScalarEpilogueAllowed;
3636 return MaxFactors;
3637 }
3638
3639 if (ScalarEpilogueStatus == CM_ScalarEpilogueNotAllowedUsePredicate) {
3640 LLVM_DEBUG(dbgs() << "LV: Can't fold tail by masking: don't vectorize\n");
3642 }
3643
3644 if (TC.isZero()) {
3646 "unable to calculate the loop count due to complex control flow",
3647 "UnknownLoopCountComplexCFG", ORE, TheLoop);
3649 }
3650
3652 "Cannot optimize for size and vectorize at the same time.",
3653 "cannot optimize for size and vectorize at the same time. "
3654 "Enable vectorization of this loop with '#pragma clang loop "
3655 "vectorize(enable)' when compiling with -Os/-Oz",
3656 "NoTailLoopWithOptForSize", ORE, TheLoop);
3658}
3659
3661 ElementCount VF) {
3662 if (ConsiderRegPressure.getNumOccurrences())
3663 return ConsiderRegPressure;
3664
3665 // TODO: We should eventually consider register pressure for all targets. The
3666 // TTI hook is temporary whilst target-specific issues are being fixed.
3667 if (TTI.shouldConsiderVectorizationRegPressure())
3668 return true;
3669
3670 if (!useMaxBandwidth(VF.isScalable()
3673 return false;
3674 // Only calculate register pressure for VFs enabled by MaxBandwidth.
3676 VF, VF.isScalable() ? MaxPermissibleVFWithoutMaxBW.ScalableVF
3678}
3679
3682 return MaximizeBandwidth || (MaximizeBandwidth.getNumOccurrences() == 0 &&
3683 (TTI.shouldMaximizeVectorBandwidth(RegKind) ||
3685 Legal->hasVectorCallVariants())));
3686}
3687
3688ElementCount LoopVectorizationCostModel::clampVFByMaxTripCount(
3689 ElementCount VF, unsigned MaxTripCount, unsigned UserIC,
3690 bool FoldTailByMasking) const {
3691 unsigned EstimatedVF = VF.getKnownMinValue();
3692 if (VF.isScalable() && TheFunction->hasFnAttribute(Attribute::VScaleRange)) {
3693 auto Attr = TheFunction->getFnAttribute(Attribute::VScaleRange);
3694 auto Min = Attr.getVScaleRangeMin();
3695 EstimatedVF *= Min;
3696 }
3697
3698 // When a scalar epilogue is required, at least one iteration of the scalar
3699 // loop has to execute. Adjust MaxTripCount accordingly to avoid picking a
3700 // max VF that results in a dead vector loop.
3701 if (MaxTripCount > 0 && requiresScalarEpilogue(true))
3702 MaxTripCount -= 1;
3703
3704 // When the user specifies an interleave count, we need to ensure that
3705 // VF * UserIC <= MaxTripCount to avoid a dead vector loop.
3706 unsigned IC = UserIC > 0 ? UserIC : 1;
3707 unsigned EstimatedVFTimesIC = EstimatedVF * IC;
3708
3709 if (MaxTripCount && MaxTripCount <= EstimatedVFTimesIC &&
3710 (!FoldTailByMasking || isPowerOf2_32(MaxTripCount))) {
3711 // If upper bound loop trip count (TC) is known at compile time there is no
3712 // point in choosing VF greater than TC / IC (as done in the loop below).
3713 // Select maximum power of two which doesn't exceed TC / IC. If VF is
3714 // scalable, we only fall back on a fixed VF when the TC is less than or
3715 // equal to the known number of lanes.
3716 auto ClampedUpperTripCount = llvm::bit_floor(MaxTripCount / IC);
3717 if (ClampedUpperTripCount == 0)
3718 ClampedUpperTripCount = 1;
3719 LLVM_DEBUG(dbgs() << "LV: Clamping the MaxVF to maximum power of two not "
3720 "exceeding the constant trip count"
3721 << (UserIC > 0 ? " divided by UserIC" : "") << ": "
3722 << ClampedUpperTripCount << "\n");
3723 return ElementCount::get(ClampedUpperTripCount,
3724 FoldTailByMasking ? VF.isScalable() : false);
3725 }
3726 return VF;
3727}
3728
3729ElementCount LoopVectorizationCostModel::getMaximizedVFForTarget(
3730 unsigned MaxTripCount, unsigned SmallestType, unsigned WidestType,
3731 ElementCount MaxSafeVF, unsigned UserIC, bool FoldTailByMasking) {
3732 bool ComputeScalableMaxVF = MaxSafeVF.isScalable();
3733 const TypeSize WidestRegister = TTI.getRegisterBitWidth(
3734 ComputeScalableMaxVF ? TargetTransformInfo::RGK_ScalableVector
3736
3737 // Convenience function to return the minimum of two ElementCounts.
3738 auto MinVF = [](const ElementCount &LHS, const ElementCount &RHS) {
3739 assert((LHS.isScalable() == RHS.isScalable()) &&
3740 "Scalable flags must match");
3741 return ElementCount::isKnownLT(LHS, RHS) ? LHS : RHS;
3742 };
3743
3744 // Ensure MaxVF is a power of 2; the dependence distance bound may not be.
3745 // Note that both WidestRegister and WidestType may not be a powers of 2.
3746 auto MaxVectorElementCount = ElementCount::get(
3747 llvm::bit_floor(WidestRegister.getKnownMinValue() / WidestType),
3748 ComputeScalableMaxVF);
3749 MaxVectorElementCount = MinVF(MaxVectorElementCount, MaxSafeVF);
3750 LLVM_DEBUG(dbgs() << "LV: The Widest register safe to use is: "
3751 << (MaxVectorElementCount * WidestType) << " bits.\n");
3752
3753 if (!MaxVectorElementCount) {
3754 LLVM_DEBUG(dbgs() << "LV: The target has no "
3755 << (ComputeScalableMaxVF ? "scalable" : "fixed")
3756 << " vector registers.\n");
3757 return ElementCount::getFixed(1);
3758 }
3759
3760 ElementCount MaxVF = clampVFByMaxTripCount(
3761 MaxVectorElementCount, MaxTripCount, UserIC, FoldTailByMasking);
3762 // If the MaxVF was already clamped, there's no point in trying to pick a
3763 // larger one.
3764 if (MaxVF != MaxVectorElementCount)
3765 return MaxVF;
3766
3768 ComputeScalableMaxVF ? TargetTransformInfo::RGK_ScalableVector
3770
3771 if (MaxVF.isScalable())
3772 MaxPermissibleVFWithoutMaxBW.ScalableVF = MaxVF;
3773 else
3774 MaxPermissibleVFWithoutMaxBW.FixedVF = MaxVF;
3775
3776 if (useMaxBandwidth(RegKind)) {
3777 auto MaxVectorElementCountMaxBW = ElementCount::get(
3778 llvm::bit_floor(WidestRegister.getKnownMinValue() / SmallestType),
3779 ComputeScalableMaxVF);
3780 MaxVF = MinVF(MaxVectorElementCountMaxBW, MaxSafeVF);
3781
3782 if (ElementCount MinVF =
3783 TTI.getMinimumVF(SmallestType, ComputeScalableMaxVF)) {
3784 if (ElementCount::isKnownLT(MaxVF, MinVF)) {
3785 LLVM_DEBUG(dbgs() << "LV: Overriding calculated MaxVF(" << MaxVF
3786 << ") with target's minimum: " << MinVF << '\n');
3787 MaxVF = MinVF;
3788 }
3789 }
3790
3791 MaxVF =
3792 clampVFByMaxTripCount(MaxVF, MaxTripCount, UserIC, FoldTailByMasking);
3793
3794 assert((MaxVectorElementCount == MaxVF ||
3795 (WideningDecisions.empty() && CallWideningDecisions.empty() &&
3796 Uniforms.empty() && Scalars.empty())) &&
3797 "No decisions should have been taken at this point");
3798 }
3799 return MaxVF;
3800}
3801
3802bool LoopVectorizationPlanner::isMoreProfitable(const VectorizationFactor &A,
3803 const VectorizationFactor &B,
3804 const unsigned MaxTripCount,
3805 bool HasTail,
3806 bool IsEpilogue) const {
3807 InstructionCost CostA = A.Cost;
3808 InstructionCost CostB = B.Cost;
3809
3810 // Improve estimate for the vector width if it is scalable.
3811 unsigned EstimatedWidthA = A.Width.getKnownMinValue();
3812 unsigned EstimatedWidthB = B.Width.getKnownMinValue();
3813 if (std::optional<unsigned> VScale = CM.getVScaleForTuning()) {
3814 if (A.Width.isScalable())
3815 EstimatedWidthA *= *VScale;
3816 if (B.Width.isScalable())
3817 EstimatedWidthB *= *VScale;
3818 }
3819
3820 // When optimizing for size choose whichever is smallest, which will be the
3821 // one with the smallest cost for the whole loop. On a tie pick the larger
3822 // vector width, on the assumption that throughput will be greater.
3823 if (CM.CostKind == TTI::TCK_CodeSize)
3824 return CostA < CostB ||
3825 (CostA == CostB && EstimatedWidthA > EstimatedWidthB);
3826
3827 // Assume vscale may be larger than 1 (or the value being tuned for),
3828 // so that scalable vectorization is slightly favorable over fixed-width
3829 // vectorization.
3830 bool PreferScalable = !TTI.preferFixedOverScalableIfEqualCost(IsEpilogue) &&
3831 A.Width.isScalable() && !B.Width.isScalable();
3832
3833 auto CmpFn = [PreferScalable](const InstructionCost &LHS,
3834 const InstructionCost &RHS) {
3835 return PreferScalable ? LHS <= RHS : LHS < RHS;
3836 };
3837
3838 // To avoid the need for FP division:
3839 // (CostA / EstimatedWidthA) < (CostB / EstimatedWidthB)
3840 // <=> (CostA * EstimatedWidthB) < (CostB * EstimatedWidthA)
3841 bool LowerCostWithoutTC =
3842 CmpFn(CostA * EstimatedWidthB, CostB * EstimatedWidthA);
3843 if (!MaxTripCount)
3844 return LowerCostWithoutTC;
3845
3846 auto GetCostForTC = [MaxTripCount, HasTail](unsigned VF,
3847 InstructionCost VectorCost,
3848 InstructionCost ScalarCost) {
3849 // If the trip count is a known (possibly small) constant, the trip count
3850 // will be rounded up to an integer number of iterations under
3851 // FoldTailByMasking. The total cost in that case will be
3852 // VecCost*ceil(TripCount/VF). When not folding the tail, the total
3853 // cost will be VecCost*floor(TC/VF) + ScalarCost*(TC%VF). There will be
3854 // some extra overheads, but for the purpose of comparing the costs of
3855 // different VFs we can use this to compare the total loop-body cost
3856 // expected after vectorization.
3857 if (HasTail)
3858 return VectorCost * (MaxTripCount / VF) +
3859 ScalarCost * (MaxTripCount % VF);
3860 return VectorCost * divideCeil(MaxTripCount, VF);
3861 };
3862
3863 auto RTCostA = GetCostForTC(EstimatedWidthA, CostA, A.ScalarCost);
3864 auto RTCostB = GetCostForTC(EstimatedWidthB, CostB, B.ScalarCost);
3865 bool LowerCostWithTC = CmpFn(RTCostA, RTCostB);
3866 LLVM_DEBUG(if (LowerCostWithTC != LowerCostWithoutTC) {
3867 dbgs() << "LV: VF " << (LowerCostWithTC ? A.Width : B.Width)
3868 << " has lower cost than VF "
3869 << (LowerCostWithTC ? B.Width : A.Width)
3870 << " when taking the cost of the remaining scalar loop iterations "
3871 "into consideration for a maximum trip count of "
3872 << MaxTripCount << ".\n";
3873 });
3874 return LowerCostWithTC;
3875}
3876
3877bool LoopVectorizationPlanner::isMoreProfitable(const VectorizationFactor &A,
3878 const VectorizationFactor &B,
3879 bool HasTail,
3880 bool IsEpilogue) const {
3881 const unsigned MaxTripCount = PSE.getSmallConstantMaxTripCount();
3882 return LoopVectorizationPlanner::isMoreProfitable(A, B, MaxTripCount, HasTail,
3883 IsEpilogue);
3884}
3885
3888 using RecipeVFPair = std::pair<VPRecipeBase *, ElementCount>;
3889 SmallVector<RecipeVFPair> InvalidCosts;
3890 for (const auto &Plan : VPlans) {
3891 for (ElementCount VF : Plan->vectorFactors()) {
3892 // The VPlan-based cost model is designed for computing vector cost.
3893 // Querying VPlan-based cost model with a scarlar VF will cause some
3894 // errors because we expect the VF is vector for most of the widen
3895 // recipes.
3896 if (VF.isScalar())
3897 continue;
3898
3899 VPCostContext CostCtx(CM.TTI, *CM.TLI, *Plan, CM, CM.CostKind, CM.PSE,
3900 OrigLoop);
3901 precomputeCosts(*Plan, VF, CostCtx);
3902 auto Iter = vp_depth_first_deep(Plan->getVectorLoopRegion()->getEntry());
3904 for (auto &R : *VPBB) {
3905 if (!R.cost(VF, CostCtx).isValid())
3906 InvalidCosts.emplace_back(&R, VF);
3907 }
3908 }
3909 }
3910 }
3911 if (InvalidCosts.empty())
3912 return;
3913
3914 // Emit a report of VFs with invalid costs in the loop.
3915
3916 // Group the remarks per recipe, keeping the recipe order from InvalidCosts.
3918 unsigned I = 0;
3919 for (auto &Pair : InvalidCosts)
3920 if (Numbering.try_emplace(Pair.first, I).second)
3921 ++I;
3922
3923 // Sort the list, first on recipe(number) then on VF.
3924 sort(InvalidCosts, [&Numbering](RecipeVFPair &A, RecipeVFPair &B) {
3925 unsigned NA = Numbering[A.first];
3926 unsigned NB = Numbering[B.first];
3927 if (NA != NB)
3928 return NA < NB;
3929 return ElementCount::isKnownLT(A.second, B.second);
3930 });
3931
3932 // For a list of ordered recipe-VF pairs:
3933 // [(load, VF1), (load, VF2), (store, VF1)]
3934 // group the recipes together to emit separate remarks for:
3935 // load (VF1, VF2)
3936 // store (VF1)
3937 auto Tail = ArrayRef<RecipeVFPair>(InvalidCosts);
3938 auto Subset = ArrayRef<RecipeVFPair>();
3939 do {
3940 if (Subset.empty())
3941 Subset = Tail.take_front(1);
3942
3943 VPRecipeBase *R = Subset.front().first;
3944
3945 unsigned Opcode =
3947 .Case([](const VPHeaderPHIRecipe *R) { return Instruction::PHI; })
3948 .Case(
3949 [](const VPWidenStoreRecipe *R) { return Instruction::Store; })
3950 .Case([](const VPWidenLoadRecipe *R) { return Instruction::Load; })
3951 .Case<VPWidenCallRecipe, VPWidenIntrinsicRecipe>(
3952 [](const auto *R) { return Instruction::Call; })
3955 [](const auto *R) { return R->getOpcode(); })
3956 .Case([](const VPInterleaveRecipe *R) {
3957 return R->getStoredValues().empty() ? Instruction::Load
3958 : Instruction::Store;
3959 })
3960 .Case([](const VPReductionRecipe *R) {
3961 return RecurrenceDescriptor::getOpcode(R->getRecurrenceKind());
3962 });
3963
3964 // If the next recipe is different, or if there are no other pairs,
3965 // emit a remark for the collated subset. e.g.
3966 // [(load, VF1), (load, VF2))]
3967 // to emit:
3968 // remark: invalid costs for 'load' at VF=(VF1, VF2)
3969 if (Subset == Tail || Tail[Subset.size()].first != R) {
3970 std::string OutString;
3971 raw_string_ostream OS(OutString);
3972 assert(!Subset.empty() && "Unexpected empty range");
3973 OS << "Recipe with invalid costs prevented vectorization at VF=(";
3974 for (const auto &Pair : Subset)
3975 OS << (Pair.second == Subset.front().second ? "" : ", ") << Pair.second;
3976 OS << "):";
3977 if (Opcode == Instruction::Call) {
3978 StringRef Name = "";
3979 if (auto *Int = dyn_cast<VPWidenIntrinsicRecipe>(R)) {
3980 Name = Int->getIntrinsicName();
3981 } else {
3982 auto *WidenCall = dyn_cast<VPWidenCallRecipe>(R);
3983 Function *CalledFn =
3984 WidenCall ? WidenCall->getCalledScalarFunction()
3985 : cast<Function>(R->getOperand(R->getNumOperands() - 1)
3986 ->getLiveInIRValue());
3987 Name = CalledFn->getName();
3988 }
3989 OS << " call to " << Name;
3990 } else
3991 OS << " " << Instruction::getOpcodeName(Opcode);
3992 reportVectorizationInfo(OutString, "InvalidCost", ORE, OrigLoop, nullptr,
3993 R->getDebugLoc());
3994 Tail = Tail.drop_front(Subset.size());
3995 Subset = {};
3996 } else
3997 // Grow the subset by one element
3998 Subset = Tail.take_front(Subset.size() + 1);
3999 } while (!Tail.empty());
4000}
4001
4002/// Check if any recipe of \p Plan will generate a vector value, which will be
4003/// assigned a vector register.
4005 const TargetTransformInfo &TTI) {
4006 assert(VF.isVector() && "Checking a scalar VF?");
4007 VPTypeAnalysis TypeInfo(Plan);
4008 DenseSet<VPRecipeBase *> EphemeralRecipes;
4009 collectEphemeralRecipesForVPlan(Plan, EphemeralRecipes);
4010 // Set of already visited types.
4011 DenseSet<Type *> Visited;
4014 for (VPRecipeBase &R : *VPBB) {
4015 if (EphemeralRecipes.contains(&R))
4016 continue;
4017 // Continue early if the recipe is considered to not produce a vector
4018 // result. Note that this includes VPInstruction where some opcodes may
4019 // produce a vector, to preserve existing behavior as VPInstructions model
4020 // aspects not directly mapped to existing IR instructions.
4021 switch (R.getVPRecipeID()) {
4022 case VPRecipeBase::VPDerivedIVSC:
4023 case VPRecipeBase::VPScalarIVStepsSC:
4024 case VPRecipeBase::VPReplicateSC:
4025 case VPRecipeBase::VPInstructionSC:
4026 case VPRecipeBase::VPCanonicalIVPHISC:
4027 case VPRecipeBase::VPCurrentIterationPHISC:
4028 case VPRecipeBase::VPVectorPointerSC:
4029 case VPRecipeBase::VPVectorEndPointerSC:
4030 case VPRecipeBase::VPExpandSCEVSC:
4031 case VPRecipeBase::VPPredInstPHISC:
4032 case VPRecipeBase::VPBranchOnMaskSC:
4033 continue;
4034 case VPRecipeBase::VPReductionSC:
4035 case VPRecipeBase::VPActiveLaneMaskPHISC:
4036 case VPRecipeBase::VPWidenCallSC:
4037 case VPRecipeBase::VPWidenCanonicalIVSC:
4038 case VPRecipeBase::VPWidenCastSC:
4039 case VPRecipeBase::VPWidenGEPSC:
4040 case VPRecipeBase::VPWidenIntrinsicSC:
4041 case VPRecipeBase::VPWidenSC:
4042 case VPRecipeBase::VPBlendSC:
4043 case VPRecipeBase::VPFirstOrderRecurrencePHISC:
4044 case VPRecipeBase::VPHistogramSC:
4045 case VPRecipeBase::VPWidenPHISC:
4046 case VPRecipeBase::VPWidenIntOrFpInductionSC:
4047 case VPRecipeBase::VPWidenPointerInductionSC:
4048 case VPRecipeBase::VPReductionPHISC:
4049 case VPRecipeBase::VPInterleaveEVLSC:
4050 case VPRecipeBase::VPInterleaveSC:
4051 case VPRecipeBase::VPWidenLoadEVLSC:
4052 case VPRecipeBase::VPWidenLoadSC:
4053 case VPRecipeBase::VPWidenStoreEVLSC:
4054 case VPRecipeBase::VPWidenStoreSC:
4055 break;
4056 default:
4057 llvm_unreachable("unhandled recipe");
4058 }
4059
4060 auto WillGenerateTargetVectors = [&TTI, VF](Type *VectorTy) {
4061 unsigned NumLegalParts = TTI.getNumberOfParts(VectorTy);
4062 if (!NumLegalParts)
4063 return false;
4064 if (VF.isScalable()) {
4065 // <vscale x 1 x iN> is assumed to be profitable over iN because
4066 // scalable registers are a distinct register class from scalar
4067 // ones. If we ever find a target which wants to lower scalable
4068 // vectors back to scalars, we'll need to update this code to
4069 // explicitly ask TTI about the register class uses for each part.
4070 return NumLegalParts <= VF.getKnownMinValue();
4071 }
4072 // Two or more elements that share a register - are vectorized.
4073 return NumLegalParts < VF.getFixedValue();
4074 };
4075
4076 // If no def nor is a store, e.g., branches, continue - no value to check.
4077 if (R.getNumDefinedValues() == 0 &&
4079 continue;
4080 // For multi-def recipes, currently only interleaved loads, suffice to
4081 // check first def only.
4082 // For stores check their stored value; for interleaved stores suffice
4083 // the check first stored value only. In all cases this is the second
4084 // operand.
4085 VPValue *ToCheck =
4086 R.getNumDefinedValues() >= 1 ? R.getVPValue(0) : R.getOperand(1);
4087 Type *ScalarTy = TypeInfo.inferScalarType(ToCheck);
4088 if (!Visited.insert({ScalarTy}).second)
4089 continue;
4090 Type *WideTy = toVectorizedTy(ScalarTy, VF);
4091 if (any_of(getContainedTypes(WideTy), WillGenerateTargetVectors))
4092 return true;
4093 }
4094 }
4095
4096 return false;
4097}
4098
4099static bool hasReplicatorRegion(VPlan &Plan) {
4101 Plan.getVectorLoopRegion()->getEntry())),
4102 [](auto *VPRB) { return VPRB->isReplicator(); });
4103}
4104
4105/// Returns true if the VPlan contains a VPReductionPHIRecipe with
4106/// FindLast recurrence kind.
4107static bool hasFindLastReductionPhi(VPlan &Plan) {
4109 [](VPRecipeBase &R) {
4110 auto *RedPhi = dyn_cast<VPReductionPHIRecipe>(&R);
4111 return RedPhi &&
4112 RecurrenceDescriptor::isFindLastRecurrenceKind(
4113 RedPhi->getRecurrenceKind());
4114 });
4115}
4116
4117/// Returns true if the VPlan contains header phi recipes that are not currently
4118/// supported for epilogue vectorization.
4120 return any_of(
4122 [](VPRecipeBase &R) {
4123 if (auto *WidenInd = dyn_cast<VPWidenIntOrFpInductionRecipe>(&R))
4124 return !WidenInd->getPHINode();
4125 auto *RedPhi = dyn_cast<VPReductionPHIRecipe>(&R);
4126 if (!RedPhi)
4127 return false;
4128 if (RecurrenceDescriptor::isFindLastRecurrenceKind(
4129 RedPhi->getRecurrenceKind()) ||
4130 !RedPhi->getUnderlyingValue())
4131 return true;
4132 // FindIV reductions with sunk expressions are not yet supported for
4133 // epilogue vectorization: the resume value from the main loop is in
4134 // expression domain (e.g., mul(ReducedIV, 3)), but the epilogue tracks
4135 // raw IV values. A sunk expression is identified by a non-VPInstruction
4136 // user of ComputeReductionResult.
4137 if (RecurrenceDescriptor::isFindIVRecurrenceKind(
4138 RedPhi->getRecurrenceKind())) {
4139 auto *RdxResult = vputils::findComputeReductionResult(RedPhi);
4140 assert(RdxResult &&
4141 "FindIV reduction must have ComputeReductionResult");
4142 return any_of(RdxResult->users(),
4143 [](VPUser *U) { return !isa<VPInstruction>(U); });
4144 }
4145 return false;
4146 });
4147}
4148
4149bool LoopVectorizationPlanner::isCandidateForEpilogueVectorization(
4150 VPlan &MainPlan) const {
4151 // Cross iteration phis such as fixed-order recurrences and FMaxNum/FMinNum
4152 // reductions need special handling and are currently unsupported.
4153 if (any_of(OrigLoop->getHeader()->phis(), [&](PHINode &Phi) {
4154 if (!Legal->isReductionVariable(&Phi))
4155 return Legal->isFixedOrderRecurrence(&Phi);
4156 RecurKind Kind =
4157 Legal->getRecurrenceDescriptor(&Phi).getRecurrenceKind();
4158 return RecurrenceDescriptor::isFPMinMaxNumRecurrenceKind(Kind);
4159 }))
4160 return false;
4161
4162 // FindLast reductions and inductions without underlying PHI require special
4163 // handling and are currently not supported for epilogue vectorization.
4164 if (hasUnsupportedHeaderPhiRecipe(MainPlan))
4165 return false;
4166
4167 // Phis with uses outside of the loop require special handling and are
4168 // currently unsupported.
4169 for (const auto &Entry : Legal->getInductionVars()) {
4170 // Look for uses of the value of the induction at the last iteration.
4171 Value *PostInc =
4172 Entry.first->getIncomingValueForBlock(OrigLoop->getLoopLatch());
4173 for (User *U : PostInc->users())
4174 if (!OrigLoop->contains(cast<Instruction>(U)))
4175 return false;
4176 // Look for uses of penultimate value of the induction.
4177 for (User *U : Entry.first->users())
4178 if (!OrigLoop->contains(cast<Instruction>(U)))
4179 return false;
4180 }
4181
4182 // Epilogue vectorization code has not been auditted to ensure it handles
4183 // non-latch exits properly. It may be fine, but it needs auditted and
4184 // tested.
4185 // TODO: Add support for loops with an early exit.
4186 if (OrigLoop->getExitingBlock() != OrigLoop->getLoopLatch())
4187 return false;
4188
4189 return true;
4190}
4191
4193 const ElementCount VF, const unsigned IC) const {
4194 // FIXME: We need a much better cost-model to take different parameters such
4195 // as register pressure, code size increase and cost of extra branches into
4196 // account. For now we apply a very crude heuristic and only consider loops
4197 // with vectorization factors larger than a certain value.
4198
4199 // Allow the target to opt out.
4200 if (!TTI.preferEpilogueVectorization(VF * IC))
4201 return false;
4202
4203 unsigned MinVFThreshold = EpilogueVectorizationMinVF.getNumOccurrences() > 0
4205 : TTI.getEpilogueVectorizationMinVF();
4206 return estimateElementCount(VF * IC, VScaleForTuning) >= MinVFThreshold;
4207}
4208
4210 VPlan &MainPlan, ElementCount MainLoopVF, unsigned IC) {
4212 LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization is disabled.\n");
4213 return nullptr;
4214 }
4215
4216 if (!CM.isScalarEpilogueAllowed()) {
4217 LLVM_DEBUG(dbgs() << "LEV: Unable to vectorize epilogue because no "
4218 "epilogue is allowed.\n");
4219 return nullptr;
4220 }
4221
4222 // Not really a cost consideration, but check for unsupported cases here to
4223 // simplify the logic.
4224 if (!isCandidateForEpilogueVectorization(MainPlan)) {
4225 LLVM_DEBUG(dbgs() << "LEV: Unable to vectorize epilogue because the loop "
4226 "is not a supported candidate.\n");
4227 return nullptr;
4228 }
4229
4232 IC * estimateElementCount(MainLoopVF, CM.getVScaleForTuning())) {
4233 // Note that the main loop leaves IC * MainLoopVF iterations iff a scalar
4234 // epilogue is required, but then the epilogue loop also requires a scalar
4235 // epilogue.
4236 LLVM_DEBUG(dbgs() << "LEV: Forced epilogue VF results in dead epilogue "
4237 "vector loop, skipping vectorizing epilogue.\n");
4238 return nullptr;
4239 }
4240
4241 LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization factor is forced.\n");
4243 if (hasPlanWithVF(ForcedEC)) {
4244 std::unique_ptr<VPlan> Clone(getPlanFor(ForcedEC).duplicate());
4245 Clone->setVF(ForcedEC);
4246 return Clone;
4247 }
4248
4249 LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization forced factor is not "
4250 "viable.\n");
4251 return nullptr;
4252 }
4253
4254 if (OrigLoop->getHeader()->getParent()->hasOptSize()) {
4255 LLVM_DEBUG(
4256 dbgs() << "LEV: Epilogue vectorization skipped due to opt for size.\n");
4257 return nullptr;
4258 }
4259
4260 if (!CM.isEpilogueVectorizationProfitable(MainLoopVF, IC)) {
4261 LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization is not profitable for "
4262 "this loop\n");
4263 return nullptr;
4264 }
4265
4266 // Check if a plan's vector loop processes fewer iterations than VF (e.g. when
4267 // interleave groups have been narrowed) narrowInterleaveGroups) and return
4268 // the adjusted, effective VF.
4269 using namespace VPlanPatternMatch;
4270 auto GetEffectiveVF = [](VPlan &Plan, ElementCount VF) -> ElementCount {
4271 auto *Exiting = Plan.getVectorLoopRegion()->getExitingBasicBlock();
4272 if (match(&Exiting->back(),
4273 m_BranchOnCount(m_Add(m_CanonicalIV(), m_Specific(&Plan.getUF())),
4274 m_VPValue())))
4275 return ElementCount::get(1, VF.isScalable());
4276 return VF;
4277 };
4278
4279 // Check if the main loop processes fewer than MainLoopVF elements per
4280 // iteration (e.g. due to narrowing interleave groups). Adjust MainLoopVF
4281 // as needed.
4282 MainLoopVF = GetEffectiveVF(MainPlan, MainLoopVF);
4283
4284 // If MainLoopVF = vscale x 2, and vscale is expected to be 4, then we know
4285 // the main loop handles 8 lanes per iteration. We could still benefit from
4286 // vectorizing the epilogue loop with VF=4.
4287 ElementCount EstimatedRuntimeVF = ElementCount::getFixed(
4288 estimateElementCount(MainLoopVF, CM.getVScaleForTuning()));
4289
4290 Type *TCType = Legal->getWidestInductionType();
4291 const SCEV *RemainingIterations = nullptr;
4292 unsigned MaxTripCount = 0;
4293 const SCEV *TC = vputils::getSCEVExprForVPValue(MainPlan.getTripCount(), PSE);
4294 assert(!isa<SCEVCouldNotCompute>(TC) && "Trip count SCEV must be computable");
4295 const SCEV *KnownMinTC;
4296 bool ScalableTC = match(TC, m_scev_c_Mul(m_SCEV(KnownMinTC), m_SCEVVScale()));
4297 bool ScalableRemIter = false;
4298 ScalarEvolution &SE = *PSE.getSE();
4299 // Use versions of TC and VF in which both are either scalable or fixed.
4300 if (ScalableTC == MainLoopVF.isScalable()) {
4301 ScalableRemIter = ScalableTC;
4302 RemainingIterations =
4303 SE.getURemExpr(TC, SE.getElementCount(TCType, MainLoopVF * IC));
4304 } else if (ScalableTC) {
4305 const SCEV *EstimatedTC = SE.getMulExpr(
4306 KnownMinTC,
4307 SE.getConstant(TCType, CM.getVScaleForTuning().value_or(1)));
4308 RemainingIterations = SE.getURemExpr(
4309 EstimatedTC, SE.getElementCount(TCType, MainLoopVF * IC));
4310 } else
4311 RemainingIterations =
4312 SE.getURemExpr(TC, SE.getElementCount(TCType, EstimatedRuntimeVF * IC));
4313
4314 // No iterations left to process in the epilogue.
4315 if (RemainingIterations->isZero())
4316 return nullptr;
4317
4318 if (MainLoopVF.isFixed()) {
4319 MaxTripCount = MainLoopVF.getFixedValue() * IC - 1;
4320 if (SE.isKnownPredicate(CmpInst::ICMP_ULT, RemainingIterations,
4321 SE.getConstant(TCType, MaxTripCount))) {
4322 MaxTripCount = SE.getUnsignedRangeMax(RemainingIterations).getZExtValue();
4323 }
4324 LLVM_DEBUG(dbgs() << "LEV: Maximum Trip Count for Epilogue: "
4325 << MaxTripCount << "\n");
4326 }
4327
4328 auto SkipVF = [&](const SCEV *VF, const SCEV *RemIter) -> bool {
4329 return SE.isKnownPredicate(CmpInst::ICMP_UGT, VF, RemIter);
4330 };
4332 VPlan *BestPlan = nullptr;
4333 for (auto &NextVF : ProfitableVFs) {
4334 // Skip candidate VFs without a corresponding VPlan.
4335 if (!hasPlanWithVF(NextVF.Width))
4336 continue;
4337
4338 VPlan &CurrentPlan = getPlanFor(NextVF.Width);
4339 ElementCount EffectiveVF = GetEffectiveVF(CurrentPlan, NextVF.Width);
4340 // Skip candidate VFs with widths >= the (estimated) runtime VF (scalable
4341 // vectors) or > the VF of the main loop (fixed vectors).
4342 if ((!EffectiveVF.isScalable() && MainLoopVF.isScalable() &&
4343 ElementCount::isKnownGE(EffectiveVF, EstimatedRuntimeVF)) ||
4344 (EffectiveVF.isScalable() &&
4345 ElementCount::isKnownGE(EffectiveVF, MainLoopVF)) ||
4346 (!EffectiveVF.isScalable() && !MainLoopVF.isScalable() &&
4347 ElementCount::isKnownGT(EffectiveVF, MainLoopVF)))
4348 continue;
4349
4350 // If EffectiveVF is greater than the number of remaining iterations, the
4351 // epilogue loop would be dead. Skip such factors. If the epilogue plan
4352 // also has narrowed interleave groups, use the effective VF since
4353 // the epilogue step will be reduced to its IC.
4354 // TODO: We should also consider comparing against a scalable
4355 // RemainingIterations when SCEV be able to evaluate non-canonical
4356 // vscale-based expressions.
4357 if (!ScalableRemIter) {
4358 // Handle the case where EffectiveVF and RemainingIterations are in
4359 // different numerical spaces.
4360 if (EffectiveVF.isScalable())
4361 EffectiveVF = ElementCount::getFixed(
4362 estimateElementCount(EffectiveVF, CM.getVScaleForTuning()));
4363 if (SkipVF(SE.getElementCount(TCType, EffectiveVF), RemainingIterations))
4364 continue;
4365 }
4366
4367 if (Result.Width.isScalar() ||
4368 isMoreProfitable(NextVF, Result, MaxTripCount, !CM.foldTailByMasking(),
4369 /*IsEpilogue*/ true)) {
4370 Result = NextVF;
4371 BestPlan = &CurrentPlan;
4372 }
4373 }
4374
4375 if (!BestPlan)
4376 return nullptr;
4377
4378 LLVM_DEBUG(dbgs() << "LEV: Vectorizing epilogue loop with VF = "
4379 << Result.Width << "\n");
4380 std::unique_ptr<VPlan> Clone(BestPlan->duplicate());
4381 Clone->setVF(Result.Width);
4382 return Clone;
4383}
4384
4385std::pair<unsigned, unsigned>
4387 unsigned MinWidth = -1U;
4388 unsigned MaxWidth = 8;
4389 const DataLayout &DL = TheFunction->getDataLayout();
4390 // For in-loop reductions, no element types are added to ElementTypesInLoop
4391 // if there are no loads/stores in the loop. In this case, check through the
4392 // reduction variables to determine the maximum width.
4393 if (ElementTypesInLoop.empty() && !Legal->getReductionVars().empty()) {
4394 for (const auto &PhiDescriptorPair : Legal->getReductionVars()) {
4395 const RecurrenceDescriptor &RdxDesc = PhiDescriptorPair.second;
4396 // When finding the min width used by the recurrence we need to account
4397 // for casts on the input operands of the recurrence.
4398 MinWidth = std::min(
4399 MinWidth,
4400 std::min(RdxDesc.getMinWidthCastToRecurrenceTypeInBits(),
4402 MaxWidth = std::max(MaxWidth,
4404 }
4405 } else {
4406 for (Type *T : ElementTypesInLoop) {
4407 MinWidth = std::min<unsigned>(
4408 MinWidth, DL.getTypeSizeInBits(T->getScalarType()).getFixedValue());
4409 MaxWidth = std::max<unsigned>(
4410 MaxWidth, DL.getTypeSizeInBits(T->getScalarType()).getFixedValue());
4411 }
4412 }
4413 return {MinWidth, MaxWidth};
4414}
4415
4417 ElementTypesInLoop.clear();
4418 // For each block.
4419 for (BasicBlock *BB : TheLoop->blocks()) {
4420 // For each instruction in the loop.
4421 for (Instruction &I : *BB) {
4422 Type *T = I.getType();
4423
4424 // Skip ignored values.
4425 if (ValuesToIgnore.count(&I))
4426 continue;
4427
4428 // Only examine Loads, Stores and PHINodes.
4429 if (!isa<LoadInst>(I) && !isa<StoreInst>(I) && !isa<PHINode>(I))
4430 continue;
4431
4432 // Examine PHI nodes that are reduction variables. Update the type to
4433 // account for the recurrence type.
4434 if (auto *PN = dyn_cast<PHINode>(&I)) {
4435 if (!Legal->isReductionVariable(PN))
4436 continue;
4437 const RecurrenceDescriptor &RdxDesc =
4438 Legal->getRecurrenceDescriptor(PN);
4440 TTI.preferInLoopReduction(RdxDesc.getRecurrenceKind(),
4441 RdxDesc.getRecurrenceType()))
4442 continue;
4443 T = RdxDesc.getRecurrenceType();
4444 }
4445
4446 // Examine the stored values.
4447 if (auto *ST = dyn_cast<StoreInst>(&I))
4448 T = ST->getValueOperand()->getType();
4449
4450 assert(T->isSized() &&
4451 "Expected the load/store/recurrence type to be sized");
4452
4453 ElementTypesInLoop.insert(T);
4454 }
4455 }
4456}
4457
4458unsigned
4460 InstructionCost LoopCost) {
4461 // -- The interleave heuristics --
4462 // We interleave the loop in order to expose ILP and reduce the loop overhead.
4463 // There are many micro-architectural considerations that we can't predict
4464 // at this level. For example, frontend pressure (on decode or fetch) due to
4465 // code size, or the number and capabilities of the execution ports.
4466 //
4467 // We use the following heuristics to select the interleave count:
4468 // 1. If the code has reductions, then we interleave to break the cross
4469 // iteration dependency.
4470 // 2. If the loop is really small, then we interleave to reduce the loop
4471 // overhead.
4472 // 3. We don't interleave if we think that we will spill registers to memory
4473 // due to the increased register pressure.
4474
4475 // Only interleave tail-folded loops if wide lane masks are requested, as the
4476 // overhead of multiple instructions to calculate the predicate is likely
4477 // not beneficial. If a scalar epilogue is not allowed for any other reason,
4478 // do not interleave.
4479 if (!CM.isScalarEpilogueAllowed() &&
4480 !(CM.preferPredicatedLoop() && CM.useWideActiveLaneMask()))
4481 return 1;
4482
4485 LLVM_DEBUG(dbgs() << "LV: Loop requires variable-length step. "
4486 "Unroll factor forced to be 1.\n");
4487 return 1;
4488 }
4489
4490 // We used the distance for the interleave count.
4491 if (!Legal->isSafeForAnyVectorWidth())
4492 return 1;
4493
4494 // We don't attempt to perform interleaving for loops with uncountable early
4495 // exits because the VPInstruction::AnyOf code cannot currently handle
4496 // multiple parts.
4497 if (Plan.hasEarlyExit())
4498 return 1;
4499
4500 const bool HasReductions =
4503
4504 // FIXME: implement interleaving for FindLast transform correctly.
4505 if (hasFindLastReductionPhi(Plan))
4506 return 1;
4507
4508 VPRegisterUsage R =
4509 calculateRegisterUsageForPlan(Plan, {VF}, TTI, CM.ValuesToIgnore)[0];
4510
4511 // If we did not calculate the cost for VF (because the user selected the VF)
4512 // then we calculate the cost of VF here.
4513 if (LoopCost == 0) {
4514 if (VF.isScalar())
4515 LoopCost = CM.expectedCost(VF);
4516 else
4517 LoopCost = cost(Plan, VF, &R);
4518 assert(LoopCost.isValid() && "Expected to have chosen a VF with valid cost");
4519
4520 // Loop body is free and there is no need for interleaving.
4521 if (LoopCost == 0)
4522 return 1;
4523 }
4524
4525 // We divide by these constants so assume that we have at least one
4526 // instruction that uses at least one register.
4527 for (auto &Pair : R.MaxLocalUsers) {
4528 Pair.second = std::max(Pair.second, 1U);
4529 }
4530
4531 // We calculate the interleave count using the following formula.
4532 // Subtract the number of loop invariants from the number of available
4533 // registers. These registers are used by all of the interleaved instances.
4534 // Next, divide the remaining registers by the number of registers that is
4535 // required by the loop, in order to estimate how many parallel instances
4536 // fit without causing spills. All of this is rounded down if necessary to be
4537 // a power of two. We want power of two interleave count to simplify any
4538 // addressing operations or alignment considerations.
4539 // We also want power of two interleave counts to ensure that the induction
4540 // variable of the vector loop wraps to zero, when tail is folded by masking;
4541 // this currently happens when OptForSize, in which case IC is set to 1 above.
4542 unsigned IC = UINT_MAX;
4543
4544 for (const auto &Pair : R.MaxLocalUsers) {
4545 unsigned TargetNumRegisters = TTI.getNumberOfRegisters(Pair.first);
4546 LLVM_DEBUG(dbgs() << "LV: The target has " << TargetNumRegisters
4547 << " registers of "
4548 << TTI.getRegisterClassName(Pair.first)
4549 << " register class\n");
4550 if (VF.isScalar()) {
4551 if (ForceTargetNumScalarRegs.getNumOccurrences() > 0)
4552 TargetNumRegisters = ForceTargetNumScalarRegs;
4553 } else {
4554 if (ForceTargetNumVectorRegs.getNumOccurrences() > 0)
4555 TargetNumRegisters = ForceTargetNumVectorRegs;
4556 }
4557 unsigned MaxLocalUsers = Pair.second;
4558 unsigned LoopInvariantRegs = 0;
4559 if (R.LoopInvariantRegs.contains(Pair.first))
4560 LoopInvariantRegs = R.LoopInvariantRegs[Pair.first];
4561
4562 unsigned TmpIC = llvm::bit_floor((TargetNumRegisters - LoopInvariantRegs) /
4563 MaxLocalUsers);
4564 // Don't count the induction variable as interleaved.
4566 TmpIC = llvm::bit_floor((TargetNumRegisters - LoopInvariantRegs - 1) /
4567 std::max(1U, (MaxLocalUsers - 1)));
4568 }
4569
4570 IC = std::min(IC, TmpIC);
4571 }
4572
4573 // Clamp the interleave ranges to reasonable counts.
4574 unsigned MaxInterleaveCount = TTI.getMaxInterleaveFactor(VF);
4575 LLVM_DEBUG(dbgs() << "LV: MaxInterleaveFactor for the target is "
4576 << MaxInterleaveCount << "\n");
4577
4578 // Check if the user has overridden the max.
4579 if (VF.isScalar()) {
4580 if (ForceTargetMaxScalarInterleaveFactor.getNumOccurrences() > 0)
4581 MaxInterleaveCount = ForceTargetMaxScalarInterleaveFactor;
4582 } else {
4583 if (ForceTargetMaxVectorInterleaveFactor.getNumOccurrences() > 0)
4584 MaxInterleaveCount = ForceTargetMaxVectorInterleaveFactor;
4585 }
4586
4587 // Try to get the exact trip count, or an estimate based on profiling data or
4588 // ConstantMax from PSE, failing that.
4589 auto BestKnownTC =
4590 getSmallBestKnownTC(PSE, OrigLoop,
4591 /*CanUseConstantMax=*/true,
4592 /*CanExcludeZeroTrips=*/CM.isScalarEpilogueAllowed());
4593
4594 // For fixed length VFs treat a scalable trip count as unknown.
4595 if (BestKnownTC && (BestKnownTC->isFixed() || VF.isScalable())) {
4596 // Re-evaluate trip counts and VFs to be in the same numerical space.
4597 unsigned AvailableTC =
4598 estimateElementCount(*BestKnownTC, CM.getVScaleForTuning());
4599 unsigned EstimatedVF = estimateElementCount(VF, CM.getVScaleForTuning());
4600
4601 // At least one iteration must be scalar when this constraint holds. So the
4602 // maximum available iterations for interleaving is one less.
4603 if (CM.requiresScalarEpilogue(VF.isVector()))
4604 --AvailableTC;
4605
4606 unsigned InterleaveCountLB = bit_floor(std::max(
4607 1u, std::min(AvailableTC / (EstimatedVF * 2), MaxInterleaveCount)));
4608
4609 if (getSmallConstantTripCount(PSE.getSE(), OrigLoop).isNonZero()) {
4610 // If the best known trip count is exact, we select between two
4611 // prospective ICs, where
4612 //
4613 // 1) the aggressive IC is capped by the trip count divided by VF
4614 // 2) the conservative IC is capped by the trip count divided by (VF * 2)
4615 //
4616 // The final IC is selected in a way that the epilogue loop trip count is
4617 // minimized while maximizing the IC itself, so that we either run the
4618 // vector loop at least once if it generates a small epilogue loop, or
4619 // else we run the vector loop at least twice.
4620
4621 unsigned InterleaveCountUB = bit_floor(std::max(
4622 1u, std::min(AvailableTC / EstimatedVF, MaxInterleaveCount)));
4623 MaxInterleaveCount = InterleaveCountLB;
4624
4625 if (InterleaveCountUB != InterleaveCountLB) {
4626 unsigned TailTripCountUB =
4627 (AvailableTC % (EstimatedVF * InterleaveCountUB));
4628 unsigned TailTripCountLB =
4629 (AvailableTC % (EstimatedVF * InterleaveCountLB));
4630 // If both produce same scalar tail, maximize the IC to do the same work
4631 // in fewer vector loop iterations
4632 if (TailTripCountUB == TailTripCountLB)
4633 MaxInterleaveCount = InterleaveCountUB;
4634 }
4635 } else {
4636 // If trip count is an estimated compile time constant, limit the
4637 // IC to be capped by the trip count divided by VF * 2, such that the
4638 // vector loop runs at least twice to make interleaving seem profitable
4639 // when there is an epilogue loop present. Since exact Trip count is not
4640 // known we choose to be conservative in our IC estimate.
4641 MaxInterleaveCount = InterleaveCountLB;
4642 }
4643 }
4644
4645 assert(MaxInterleaveCount > 0 &&
4646 "Maximum interleave count must be greater than 0");
4647
4648 // Clamp the calculated IC to be between the 1 and the max interleave count
4649 // that the target and trip count allows.
4650 if (IC > MaxInterleaveCount)
4651 IC = MaxInterleaveCount;
4652 else
4653 // Make sure IC is greater than 0.
4654 IC = std::max(1u, IC);
4655
4656 assert(IC > 0 && "Interleave count must be greater than 0.");
4657
4658 // Interleave if we vectorized this loop and there is a reduction that could
4659 // benefit from interleaving.
4660 if (VF.isVector() && HasReductions) {
4661 LLVM_DEBUG(dbgs() << "LV: Interleaving because of reductions.\n");
4662 return IC;
4663 }
4664
4665 // For any scalar loop that either requires runtime checks or predication we
4666 // are better off leaving this to the unroller. Note that if we've already
4667 // vectorized the loop we will have done the runtime check and so interleaving
4668 // won't require further checks.
4669 bool ScalarInterleavingRequiresPredication =
4670 (VF.isScalar() && any_of(OrigLoop->blocks(), [this](BasicBlock *BB) {
4671 return Legal->blockNeedsPredication(BB);
4672 }));
4673 bool ScalarInterleavingRequiresRuntimePointerCheck =
4674 (VF.isScalar() && Legal->getRuntimePointerChecking()->Need);
4675
4676 // We want to interleave small loops in order to reduce the loop overhead and
4677 // potentially expose ILP opportunities.
4678 LLVM_DEBUG(dbgs() << "LV: Loop cost is " << LoopCost << '\n'
4679 << "LV: IC is " << IC << '\n'
4680 << "LV: VF is " << VF << '\n');
4681 const bool AggressivelyInterleave =
4682 TTI.enableAggressiveInterleaving(HasReductions);
4683 if (!ScalarInterleavingRequiresRuntimePointerCheck &&
4684 !ScalarInterleavingRequiresPredication && LoopCost < SmallLoopCost) {
4685 // We assume that the cost overhead is 1 and we use the cost model
4686 // to estimate the cost of the loop and interleave until the cost of the
4687 // loop overhead is about 5% of the cost of the loop.
4688 unsigned SmallIC = std::min(IC, (unsigned)llvm::bit_floor<uint64_t>(
4689 SmallLoopCost / LoopCost.getValue()));
4690
4691 // Interleave until store/load ports (estimated by max interleave count) are
4692 // saturated.
4693 unsigned NumStores = 0;
4694 unsigned NumLoads = 0;
4697 for (VPRecipeBase &R : *VPBB) {
4699 NumLoads++;
4700 continue;
4701 }
4703 NumStores++;
4704 continue;
4705 }
4706
4707 if (auto *InterleaveR = dyn_cast<VPInterleaveRecipe>(&R)) {
4708 if (unsigned StoreOps = InterleaveR->getNumStoreOperands())
4709 NumStores += StoreOps;
4710 else
4711 NumLoads += InterleaveR->getNumDefinedValues();
4712 continue;
4713 }
4714 if (auto *RepR = dyn_cast<VPReplicateRecipe>(&R)) {
4715 NumLoads += isa<LoadInst>(RepR->getUnderlyingInstr());
4716 NumStores += isa<StoreInst>(RepR->getUnderlyingInstr());
4717 continue;
4718 }
4719 if (isa<VPHistogramRecipe>(&R)) {
4720 NumLoads++;
4721 NumStores++;
4722 continue;
4723 }
4724 }
4725 }
4726 unsigned StoresIC = IC / (NumStores ? NumStores : 1);
4727 unsigned LoadsIC = IC / (NumLoads ? NumLoads : 1);
4728
4729 // There is little point in interleaving for reductions containing selects
4730 // and compares when VF=1 since it may just create more overhead than it's
4731 // worth for loops with small trip counts. This is because we still have to
4732 // do the final reduction after the loop.
4733 bool HasSelectCmpReductions =
4734 HasReductions &&
4736 [](VPRecipeBase &R) {
4737 auto *RedR = dyn_cast<VPReductionPHIRecipe>(&R);
4738 return RedR && (RecurrenceDescriptor::isAnyOfRecurrenceKind(
4739 RedR->getRecurrenceKind()) ||
4740 RecurrenceDescriptor::isFindIVRecurrenceKind(
4741 RedR->getRecurrenceKind()));
4742 });
4743 if (HasSelectCmpReductions) {
4744 LLVM_DEBUG(dbgs() << "LV: Not interleaving select-cmp reductions.\n");
4745 return 1;
4746 }
4747
4748 // If we have a scalar reduction (vector reductions are already dealt with
4749 // by this point), we can increase the critical path length if the loop
4750 // we're interleaving is inside another loop. For tree-wise reductions
4751 // set the limit to 2, and for ordered reductions it's best to disable
4752 // interleaving entirely.
4753 if (HasReductions && OrigLoop->getLoopDepth() > 1) {
4754 bool HasOrderedReductions =
4756 [](VPRecipeBase &R) {
4757 auto *RedR = dyn_cast<VPReductionPHIRecipe>(&R);
4758
4759 return RedR && RedR->isOrdered();
4760 });
4761 if (HasOrderedReductions) {
4762 LLVM_DEBUG(
4763 dbgs() << "LV: Not interleaving scalar ordered reductions.\n");
4764 return 1;
4765 }
4766
4767 unsigned F = MaxNestedScalarReductionIC;
4768 SmallIC = std::min(SmallIC, F);
4769 StoresIC = std::min(StoresIC, F);
4770 LoadsIC = std::min(LoadsIC, F);
4771 }
4772
4774 std::max(StoresIC, LoadsIC) > SmallIC) {
4775 LLVM_DEBUG(
4776 dbgs() << "LV: Interleaving to saturate store or load ports.\n");
4777 return std::max(StoresIC, LoadsIC);
4778 }
4779
4780 // If there are scalar reductions and TTI has enabled aggressive
4781 // interleaving for reductions, we will interleave to expose ILP.
4782 if (VF.isScalar() && AggressivelyInterleave) {
4783 LLVM_DEBUG(dbgs() << "LV: Interleaving to expose ILP.\n");
4784 // Interleave no less than SmallIC but not as aggressive as the normal IC
4785 // to satisfy the rare situation when resources are too limited.
4786 return std::max(IC / 2, SmallIC);
4787 }
4788
4789 LLVM_DEBUG(dbgs() << "LV: Interleaving to reduce branch cost.\n");
4790 return SmallIC;
4791 }
4792
4793 // Interleave if this is a large loop (small loops are already dealt with by
4794 // this point) that could benefit from interleaving.
4795 if (AggressivelyInterleave) {
4796 LLVM_DEBUG(dbgs() << "LV: Interleaving to expose ILP.\n");
4797 return IC;
4798 }
4799
4800 LLVM_DEBUG(dbgs() << "LV: Not Interleaving.\n");
4801 return 1;
4802}
4803
4805 ElementCount VF) {
4806 // TODO: Cost model for emulated masked load/store is completely
4807 // broken. This hack guides the cost model to use an artificially
4808 // high enough value to practically disable vectorization with such
4809 // operations, except where previously deployed legality hack allowed
4810 // using very low cost values. This is to avoid regressions coming simply
4811 // from moving "masked load/store" check from legality to cost model.
4812 // Masked Load/Gather emulation was previously never allowed.
4813 // Limited number of Masked Store/Scatter emulation was allowed.
4815 "Expecting a scalar emulated instruction");
4816 return isa<LoadInst>(I) ||
4817 (isa<StoreInst>(I) &&
4818 NumPredStores > NumberOfStoresToPredicate);
4819}
4820
4822 assert(VF.isVector() && "Expected VF >= 2");
4823
4824 // If we've already collected the instructions to scalarize or the predicated
4825 // BBs after vectorization, there's nothing to do. Collection may already have
4826 // occurred if we have a user-selected VF and are now computing the expected
4827 // cost for interleaving.
4828 if (InstsToScalarize.contains(VF) ||
4829 PredicatedBBsAfterVectorization.contains(VF))
4830 return;
4831
4832 // Initialize a mapping for VF in InstsToScalalarize. If we find that it's
4833 // not profitable to scalarize any instructions, the presence of VF in the
4834 // map will indicate that we've analyzed it already.
4835 ScalarCostsTy &ScalarCostsVF = InstsToScalarize[VF];
4836
4837 // Find all the instructions that are scalar with predication in the loop and
4838 // determine if it would be better to not if-convert the blocks they are in.
4839 // If so, we also record the instructions to scalarize.
4840 for (BasicBlock *BB : TheLoop->blocks()) {
4842 continue;
4843 for (Instruction &I : *BB)
4844 if (isScalarWithPredication(&I, VF)) {
4845 ScalarCostsTy ScalarCosts;
4846 // Do not apply discount logic for:
4847 // 1. Scalars after vectorization, as there will only be a single copy
4848 // of the instruction.
4849 // 2. Scalable VF, as that would lead to invalid scalarization costs.
4850 // 3. Emulated masked memrefs, if a hacked cost is needed.
4851 if (!isScalarAfterVectorization(&I, VF) && !VF.isScalable() &&
4853 computePredInstDiscount(&I, ScalarCosts, VF) >= 0) {
4854 for (const auto &[I, IC] : ScalarCosts)
4855 ScalarCostsVF.insert({I, IC});
4856 // Check if we decided to scalarize a call. If so, update the widening
4857 // decision of the call to CM_Scalarize with the computed scalar cost.
4858 for (const auto &[I, Cost] : ScalarCosts) {
4859 auto *CI = dyn_cast<CallInst>(I);
4860 if (!CI || !CallWideningDecisions.contains({CI, VF}))
4861 continue;
4862 CallWideningDecisions[{CI, VF}].Kind = CM_Scalarize;
4863 CallWideningDecisions[{CI, VF}].Cost = Cost;
4864 }
4865 }
4866 // Remember that BB will remain after vectorization.
4867 PredicatedBBsAfterVectorization[VF].insert(BB);
4868 for (auto *Pred : predecessors(BB)) {
4869 if (Pred->getSingleSuccessor() == BB)
4870 PredicatedBBsAfterVectorization[VF].insert(Pred);
4871 }
4872 }
4873 }
4874}
4875
4876InstructionCost LoopVectorizationCostModel::computePredInstDiscount(
4877 Instruction *PredInst, ScalarCostsTy &ScalarCosts, ElementCount VF) {
4878 assert(!isUniformAfterVectorization(PredInst, VF) &&
4879 "Instruction marked uniform-after-vectorization will be predicated");
4880
4881 // Initialize the discount to zero, meaning that the scalar version and the
4882 // vector version cost the same.
4883 InstructionCost Discount = 0;
4884
4885 // Holds instructions to analyze. The instructions we visit are mapped in
4886 // ScalarCosts. Those instructions are the ones that would be scalarized if
4887 // we find that the scalar version costs less.
4889
4890 // Returns true if the given instruction can be scalarized.
4891 auto CanBeScalarized = [&](Instruction *I) -> bool {
4892 // We only attempt to scalarize instructions forming a single-use chain
4893 // from the original predicated block that would otherwise be vectorized.
4894 // Although not strictly necessary, we give up on instructions we know will
4895 // already be scalar to avoid traversing chains that are unlikely to be
4896 // beneficial.
4897 if (!I->hasOneUse() || PredInst->getParent() != I->getParent() ||
4899 return false;
4900
4901 // If the instruction is scalar with predication, it will be analyzed
4902 // separately. We ignore it within the context of PredInst.
4903 if (isScalarWithPredication(I, VF))
4904 return false;
4905
4906 // If any of the instruction's operands are uniform after vectorization,
4907 // the instruction cannot be scalarized. This prevents, for example, a
4908 // masked load from being scalarized.
4909 //
4910 // We assume we will only emit a value for lane zero of an instruction
4911 // marked uniform after vectorization, rather than VF identical values.
4912 // Thus, if we scalarize an instruction that uses a uniform, we would
4913 // create uses of values corresponding to the lanes we aren't emitting code
4914 // for. This behavior can be changed by allowing getScalarValue to clone
4915 // the lane zero values for uniforms rather than asserting.
4916 for (Use &U : I->operands())
4917 if (auto *J = dyn_cast<Instruction>(U.get()))
4918 if (isUniformAfterVectorization(J, VF))
4919 return false;
4920
4921 // Otherwise, we can scalarize the instruction.
4922 return true;
4923 };
4924
4925 // Compute the expected cost discount from scalarizing the entire expression
4926 // feeding the predicated instruction. We currently only consider expressions
4927 // that are single-use instruction chains.
4928 Worklist.push_back(PredInst);
4929 while (!Worklist.empty()) {
4930 Instruction *I = Worklist.pop_back_val();
4931
4932 // If we've already analyzed the instruction, there's nothing to do.
4933 if (ScalarCosts.contains(I))
4934 continue;
4935
4936 // Cannot scalarize fixed-order recurrence phis at the moment.
4937 if (isa<PHINode>(I) && Legal->isFixedOrderRecurrence(cast<PHINode>(I)))
4938 continue;
4939
4940 // Compute the cost of the vector instruction. Note that this cost already
4941 // includes the scalarization overhead of the predicated instruction.
4942 InstructionCost VectorCost = getInstructionCost(I, VF);
4943
4944 // Compute the cost of the scalarized instruction. This cost is the cost of
4945 // the instruction as if it wasn't if-converted and instead remained in the
4946 // predicated block. We will scale this cost by block probability after
4947 // computing the scalarization overhead.
4948 InstructionCost ScalarCost =
4950
4951 // Compute the scalarization overhead of needed insertelement instructions
4952 // and phi nodes.
4953 if (isScalarWithPredication(I, VF) && !I->getType()->isVoidTy()) {
4954 Type *WideTy = toVectorizedTy(I->getType(), VF);
4955 for (Type *VectorTy : getContainedTypes(WideTy)) {
4956 ScalarCost += TTI.getScalarizationOverhead(
4958 /*Insert=*/true,
4959 /*Extract=*/false, CostKind);
4960 }
4961 ScalarCost +=
4962 VF.getFixedValue() * TTI.getCFInstrCost(Instruction::PHI, CostKind);
4963 }
4964
4965 // Compute the scalarization overhead of needed extractelement
4966 // instructions. For each of the instruction's operands, if the operand can
4967 // be scalarized, add it to the worklist; otherwise, account for the
4968 // overhead.
4969 for (Use &U : I->operands())
4970 if (auto *J = dyn_cast<Instruction>(U.get())) {
4971 assert(canVectorizeTy(J->getType()) &&
4972 "Instruction has non-scalar type");
4973 if (CanBeScalarized(J))
4974 Worklist.push_back(J);
4975 else if (needsExtract(J, VF)) {
4976 Type *WideTy = toVectorizedTy(J->getType(), VF);
4977 for (Type *VectorTy : getContainedTypes(WideTy)) {
4978 ScalarCost += TTI.getScalarizationOverhead(
4979 cast<VectorType>(VectorTy),
4980 APInt::getAllOnes(VF.getFixedValue()), /*Insert*/ false,
4981 /*Extract*/ true, CostKind);
4982 }
4983 }
4984 }
4985
4986 // Scale the total scalar cost by block probability.
4987 ScalarCost /= getPredBlockCostDivisor(CostKind, I->getParent());
4988
4989 // Compute the discount. A non-negative discount means the vector version
4990 // of the instruction costs more, and scalarizing would be beneficial.
4991 Discount += VectorCost - ScalarCost;
4992 ScalarCosts[I] = ScalarCost;
4993 }
4994
4995 return Discount;
4996}
4997
5000
5001 // If the vector loop gets executed exactly once with the given VF, ignore the
5002 // costs of comparison and induction instructions, as they'll get simplified
5003 // away.
5004 SmallPtrSet<Instruction *, 2> ValuesToIgnoreForVF;
5005 auto TC = getSmallConstantTripCount(PSE.getSE(), TheLoop);
5006 if (TC == VF && !foldTailByMasking())
5008 ValuesToIgnoreForVF);
5009
5010 // For each block.
5011 for (BasicBlock *BB : TheLoop->blocks()) {
5012 InstructionCost BlockCost;
5013
5014 // For each instruction in the old loop.
5015 for (Instruction &I : *BB) {
5016 // Skip ignored values.
5017 if (ValuesToIgnore.count(&I) || ValuesToIgnoreForVF.count(&I) ||
5018 (VF.isVector() && VecValuesToIgnore.count(&I)))
5019 continue;
5020
5022
5023 // Check if we should override the cost.
5024 if (C.isValid() && ForceTargetInstructionCost.getNumOccurrences() > 0) {
5025 // For interleave groups, use ForceTargetInstructionCost once for the
5026 // whole group.
5027 if (VF.isVector() && getWideningDecision(&I, VF) == CM_Interleave) {
5028 if (getInterleavedAccessGroup(&I)->getInsertPos() == &I)
5030 else
5031 C = InstructionCost(0);
5032 } else {
5034 }
5035 }
5036
5037 BlockCost += C;
5038 LLVM_DEBUG(dbgs() << "LV: Found an estimated cost of " << C << " for VF "
5039 << VF << " For instruction: " << I << '\n');
5040 }
5041
5042 // If we are vectorizing a predicated block, it will have been
5043 // if-converted. This means that the block's instructions (aside from
5044 // stores and instructions that may divide by zero) will now be
5045 // unconditionally executed. For the scalar case, we may not always execute
5046 // the predicated block, if it is an if-else block. Thus, scale the block's
5047 // cost by the probability of executing it.
5048 // getPredBlockCostDivisor will return 1 for blocks that are only predicated
5049 // by the header mask when folding the tail.
5050 if (VF.isScalar())
5051 BlockCost /= getPredBlockCostDivisor(CostKind, BB);
5052
5053 Cost += BlockCost;
5054 }
5055
5056 return Cost;
5057}
5058
5059/// Gets the address access SCEV for Ptr, if it should be used for cost modeling
5060/// according to isAddressSCEVForCost.
5061///
5062/// This SCEV can be sent to the Target in order to estimate the address
5063/// calculation cost.
5065 Value *Ptr,
5067 const Loop *TheLoop) {
5068 const SCEV *Addr = PSE.getSCEV(Ptr);
5069 return vputils::isAddressSCEVForCost(Addr, *PSE.getSE(), TheLoop) ? Addr
5070 : nullptr;
5071}
5072
5074LoopVectorizationCostModel::getMemInstScalarizationCost(Instruction *I,
5075 ElementCount VF) {
5076 assert(VF.isVector() &&
5077 "Scalarization cost of instruction implies vectorization.");
5078 if (VF.isScalable())
5080
5081 Type *ValTy = getLoadStoreType(I);
5082 auto *SE = PSE.getSE();
5083
5084 unsigned AS = getLoadStoreAddressSpace(I);
5086 Type *PtrTy = toVectorTy(Ptr->getType(), VF);
5087 // NOTE: PtrTy is a vector to signal `TTI::getAddressComputationCost`
5088 // that it is being called from this specific place.
5089
5090 // Figure out whether the access is strided and get the stride value
5091 // if it's known in compile time
5092 const SCEV *PtrSCEV = getAddressAccessSCEV(Ptr, PSE, TheLoop);
5093
5094 // Get the cost of the scalar memory instruction and address computation.
5095 InstructionCost Cost = VF.getFixedValue() * TTI.getAddressComputationCost(
5096 PtrTy, SE, PtrSCEV, CostKind);
5097
5098 // Don't pass *I here, since it is scalar but will actually be part of a
5099 // vectorized loop where the user of it is a vectorized instruction.
5100 const Align Alignment = getLoadStoreAlignment(I);
5101 TTI::OperandValueInfo OpInfo = TTI::getOperandInfo(I->getOperand(0));
5102 Cost += VF.getFixedValue() *
5103 TTI.getMemoryOpCost(I->getOpcode(), ValTy->getScalarType(), Alignment,
5104 AS, CostKind, OpInfo);
5105
5106 // Get the overhead of the extractelement and insertelement instructions
5107 // we might create due to scalarization.
5108 Cost += getScalarizationOverhead(I, VF);
5109
5110 // If we have a predicated load/store, it will need extra i1 extracts and
5111 // conditional branches, but may not be executed for each vector lane. Scale
5112 // the cost by the probability of executing the predicated block.
5113 if (isPredicatedInst(I)) {
5114 Cost /= getPredBlockCostDivisor(CostKind, I->getParent());
5115
5116 // Add the cost of an i1 extract and a branch
5117 auto *VecI1Ty =
5119 Cost += TTI.getScalarizationOverhead(
5120 VecI1Ty, APInt::getAllOnes(VF.getFixedValue()),
5121 /*Insert=*/false, /*Extract=*/true, CostKind);
5122 Cost += TTI.getCFInstrCost(Instruction::CondBr, CostKind);
5123
5125 // Artificially setting to a high enough value to practically disable
5126 // vectorization with such operations.
5127 Cost = 3000000;
5128 }
5129
5130 return Cost;
5131}
5132
5134LoopVectorizationCostModel::getConsecutiveMemOpCost(Instruction *I,
5135 ElementCount VF) {
5136 Type *ValTy = getLoadStoreType(I);
5137 auto *VectorTy = cast<VectorType>(toVectorTy(ValTy, VF));
5139 unsigned AS = getLoadStoreAddressSpace(I);
5140 int ConsecutiveStride = Legal->isConsecutivePtr(ValTy, Ptr);
5141
5142 assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) &&
5143 "Stride should be 1 or -1 for consecutive memory access");
5144 const Align Alignment = getLoadStoreAlignment(I);
5146 if (isMaskRequired(I)) {
5147 unsigned IID = I->getOpcode() == Instruction::Load
5148 ? Intrinsic::masked_load
5149 : Intrinsic::masked_store;
5150 Cost += TTI.getMemIntrinsicInstrCost(
5151 MemIntrinsicCostAttributes(IID, VectorTy, Alignment, AS), CostKind);
5152 } else {
5153 TTI::OperandValueInfo OpInfo = TTI::getOperandInfo(I->getOperand(0));
5154 Cost += TTI.getMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS,
5155 CostKind, OpInfo, I);
5156 }
5157
5158 bool Reverse = ConsecutiveStride < 0;
5159 if (Reverse)
5160 Cost += TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy,
5161 VectorTy, {}, CostKind, 0);
5162 return Cost;
5163}
5164
5166LoopVectorizationCostModel::getUniformMemOpCost(Instruction *I,
5167 ElementCount VF) {
5168 assert(Legal->isUniformMemOp(*I, VF));
5169
5170 Type *ValTy = getLoadStoreType(I);
5172 auto *VectorTy = cast<VectorType>(toVectorTy(ValTy, VF));
5173 const Align Alignment = getLoadStoreAlignment(I);
5174 unsigned AS = getLoadStoreAddressSpace(I);
5175 if (isa<LoadInst>(I)) {
5176 return TTI.getAddressComputationCost(PtrTy, nullptr, nullptr, CostKind) +
5177 TTI.getMemoryOpCost(Instruction::Load, ValTy, Alignment, AS,
5178 CostKind) +
5179 TTI.getShuffleCost(TargetTransformInfo::SK_Broadcast, VectorTy,
5180 VectorTy, {}, CostKind);
5181 }
5182 StoreInst *SI = cast<StoreInst>(I);
5183
5184 bool IsLoopInvariantStoreValue = Legal->isInvariant(SI->getValueOperand());
5185 // TODO: We have existing tests that request the cost of extracting element
5186 // VF.getKnownMinValue() - 1 from a scalable vector. This does not represent
5187 // the actual generated code, which involves extracting the last element of
5188 // a scalable vector where the lane to extract is unknown at compile time.
5190 TTI.getAddressComputationCost(PtrTy, nullptr, nullptr, CostKind) +
5191 TTI.getMemoryOpCost(Instruction::Store, ValTy, Alignment, AS, CostKind);
5192 if (!IsLoopInvariantStoreValue)
5193 Cost += TTI.getIndexedVectorInstrCostFromEnd(Instruction::ExtractElement,
5194 VectorTy, CostKind, 0);
5195 return Cost;
5196}
5197
5199LoopVectorizationCostModel::getGatherScatterCost(Instruction *I,
5200 ElementCount VF) {
5201 Type *ValTy = getLoadStoreType(I);
5202 auto *VectorTy = cast<VectorType>(toVectorTy(ValTy, VF));
5203 const Align Alignment = getLoadStoreAlignment(I);
5205 Type *PtrTy = Ptr->getType();
5206
5207 if (!Legal->isUniform(Ptr, VF))
5208 PtrTy = toVectorTy(PtrTy, VF);
5209
5210 unsigned IID = I->getOpcode() == Instruction::Load
5211 ? Intrinsic::masked_gather
5212 : Intrinsic::masked_scatter;
5213 return TTI.getAddressComputationCost(PtrTy, nullptr, nullptr, CostKind) +
5214 TTI.getMemIntrinsicInstrCost(
5215 MemIntrinsicCostAttributes(IID, VectorTy, Ptr, isMaskRequired(I),
5216 Alignment, I),
5217 CostKind);
5218}
5219
5221LoopVectorizationCostModel::getInterleaveGroupCost(Instruction *I,
5222 ElementCount VF) {
5223 const auto *Group = getInterleavedAccessGroup(I);
5224 assert(Group && "Fail to get an interleaved access group.");
5225
5226 Instruction *InsertPos = Group->getInsertPos();
5227 Type *ValTy = getLoadStoreType(InsertPos);
5228 auto *VectorTy = cast<VectorType>(toVectorTy(ValTy, VF));
5229 unsigned AS = getLoadStoreAddressSpace(InsertPos);
5230
5231 unsigned InterleaveFactor = Group->getFactor();
5232 auto *WideVecTy = VectorType::get(ValTy, VF * InterleaveFactor);
5233
5234 // Holds the indices of existing members in the interleaved group.
5235 SmallVector<unsigned, 4> Indices;
5236 for (unsigned IF = 0; IF < InterleaveFactor; IF++)
5237 if (Group->getMember(IF))
5238 Indices.push_back(IF);
5239
5240 // Calculate the cost of the whole interleaved group.
5241 bool UseMaskForGaps =
5242 (Group->requiresScalarEpilogue() && !isScalarEpilogueAllowed()) ||
5243 (isa<StoreInst>(I) && !Group->isFull());
5244 InstructionCost Cost = TTI.getInterleavedMemoryOpCost(
5245 InsertPos->getOpcode(), WideVecTy, Group->getFactor(), Indices,
5246 Group->getAlign(), AS, CostKind, isMaskRequired(I), UseMaskForGaps);
5247
5248 if (Group->isReverse()) {
5249 // TODO: Add support for reversed masked interleaved access.
5251 "Reverse masked interleaved access not supported.");
5252 Cost += Group->getNumMembers() *
5253 TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy,
5254 VectorTy, {}, CostKind, 0);
5255 }
5256 return Cost;
5257}
5258
5259std::optional<InstructionCost>
5261 ElementCount VF,
5262 Type *Ty) const {
5263 using namespace llvm::PatternMatch;
5264 // Early exit for no inloop reductions
5265 if (InLoopReductions.empty() || VF.isScalar() || !isa<VectorType>(Ty))
5266 return std::nullopt;
5267 auto *VectorTy = cast<VectorType>(Ty);
5268
5269 // We are looking for a pattern of, and finding the minimal acceptable cost:
5270 // reduce(mul(ext(A), ext(B))) or
5271 // reduce(mul(A, B)) or
5272 // reduce(ext(A)) or
5273 // reduce(A).
5274 // The basic idea is that we walk down the tree to do that, finding the root
5275 // reduction instruction in InLoopReductionImmediateChains. From there we find
5276 // the pattern of mul/ext and test the cost of the entire pattern vs the cost
5277 // of the components. If the reduction cost is lower then we return it for the
5278 // reduction instruction and 0 for the other instructions in the pattern. If
5279 // it is not we return an invalid cost specifying the orignal cost method
5280 // should be used.
5281 Instruction *RetI = I;
5282 if (match(RetI, m_ZExtOrSExt(m_Value()))) {
5283 if (!RetI->hasOneUser())
5284 return std::nullopt;
5285 RetI = RetI->user_back();
5286 }
5287
5288 if (match(RetI, m_OneUse(m_Mul(m_Value(), m_Value()))) &&
5289 RetI->user_back()->getOpcode() == Instruction::Add) {
5290 RetI = RetI->user_back();
5291 }
5292
5293 // Test if the found instruction is a reduction, and if not return an invalid
5294 // cost specifying the parent to use the original cost modelling.
5295 Instruction *LastChain = InLoopReductionImmediateChains.lookup(RetI);
5296 if (!LastChain)
5297 return std::nullopt;
5298
5299 // Find the reduction this chain is a part of and calculate the basic cost of
5300 // the reduction on its own.
5301 Instruction *ReductionPhi = LastChain;
5302 while (!isa<PHINode>(ReductionPhi))
5303 ReductionPhi = InLoopReductionImmediateChains.at(ReductionPhi);
5304
5305 const RecurrenceDescriptor &RdxDesc =
5306 Legal->getRecurrenceDescriptor(cast<PHINode>(ReductionPhi));
5307
5308 InstructionCost BaseCost;
5309 RecurKind RK = RdxDesc.getRecurrenceKind();
5312 BaseCost = TTI.getMinMaxReductionCost(MinMaxID, VectorTy,
5313 RdxDesc.getFastMathFlags(), CostKind);
5314 } else {
5315 BaseCost = TTI.getArithmeticReductionCost(
5316 RdxDesc.getOpcode(), VectorTy, RdxDesc.getFastMathFlags(), CostKind);
5317 }
5318
5319 // For a call to the llvm.fmuladd intrinsic we need to add the cost of a
5320 // normal fmul instruction to the cost of the fadd reduction.
5321 if (RK == RecurKind::FMulAdd)
5322 BaseCost +=
5323 TTI.getArithmeticInstrCost(Instruction::FMul, VectorTy, CostKind);
5324
5325 // If we're using ordered reductions then we can just return the base cost
5326 // here, since getArithmeticReductionCost calculates the full ordered
5327 // reduction cost when FP reassociation is not allowed.
5328 if (useOrderedReductions(RdxDesc))
5329 return BaseCost;
5330
5331 // Get the operand that was not the reduction chain and match it to one of the
5332 // patterns, returning the better cost if it is found.
5333 Instruction *RedOp = RetI->getOperand(1) == LastChain
5336
5337 VectorTy = VectorType::get(I->getOperand(0)->getType(), VectorTy);
5338
5339 Instruction *Op0, *Op1;
5340 if (RedOp && RdxDesc.getOpcode() == Instruction::Add &&
5341 match(RedOp,
5343 match(Op0, m_ZExtOrSExt(m_Value())) &&
5344 Op0->getOpcode() == Op1->getOpcode() &&
5345 Op0->getOperand(0)->getType() == Op1->getOperand(0)->getType() &&
5346 !TheLoop->isLoopInvariant(Op0) && !TheLoop->isLoopInvariant(Op1) &&
5347 (Op0->getOpcode() == RedOp->getOpcode() || Op0 == Op1)) {
5348
5349 // Matched reduce.add(ext(mul(ext(A), ext(B)))
5350 // Note that the extend opcodes need to all match, or if A==B they will have
5351 // been converted to zext(mul(sext(A), sext(A))) as it is known positive,
5352 // which is equally fine.
5353 bool IsUnsigned = isa<ZExtInst>(Op0);
5354 auto *ExtType = VectorType::get(Op0->getOperand(0)->getType(), VectorTy);
5355 auto *MulType = VectorType::get(Op0->getType(), VectorTy);
5356
5357 InstructionCost ExtCost =
5358 TTI.getCastInstrCost(Op0->getOpcode(), MulType, ExtType,
5360 InstructionCost MulCost =
5361 TTI.getArithmeticInstrCost(Instruction::Mul, MulType, CostKind);
5362 InstructionCost Ext2Cost =
5363 TTI.getCastInstrCost(RedOp->getOpcode(), VectorTy, MulType,
5365
5366 InstructionCost RedCost = TTI.getMulAccReductionCost(
5367 IsUnsigned, RdxDesc.getOpcode(), RdxDesc.getRecurrenceType(), ExtType,
5368 CostKind);
5369
5370 if (RedCost.isValid() &&
5371 RedCost < ExtCost * 2 + MulCost + Ext2Cost + BaseCost)
5372 return I == RetI ? RedCost : 0;
5373 } else if (RedOp && match(RedOp, m_ZExtOrSExt(m_Value())) &&
5374 !TheLoop->isLoopInvariant(RedOp)) {
5375 // Matched reduce(ext(A))
5376 bool IsUnsigned = isa<ZExtInst>(RedOp);
5377 auto *ExtType = VectorType::get(RedOp->getOperand(0)->getType(), VectorTy);
5378 InstructionCost RedCost = TTI.getExtendedReductionCost(
5379 RdxDesc.getOpcode(), IsUnsigned, RdxDesc.getRecurrenceType(), ExtType,
5380 RdxDesc.getFastMathFlags(), CostKind);
5381
5382 InstructionCost ExtCost =
5383 TTI.getCastInstrCost(RedOp->getOpcode(), VectorTy, ExtType,
5385 if (RedCost.isValid() && RedCost < BaseCost + ExtCost)
5386 return I == RetI ? RedCost : 0;
5387 } else if (RedOp && RdxDesc.getOpcode() == Instruction::Add &&
5388 match(RedOp, m_Mul(m_Instruction(Op0), m_Instruction(Op1)))) {
5389 if (match(Op0, m_ZExtOrSExt(m_Value())) &&
5390 Op0->getOpcode() == Op1->getOpcode() &&
5391 !TheLoop->isLoopInvariant(Op0) && !TheLoop->isLoopInvariant(Op1)) {
5392 bool IsUnsigned = isa<ZExtInst>(Op0);
5393 Type *Op0Ty = Op0->getOperand(0)->getType();
5394 Type *Op1Ty = Op1->getOperand(0)->getType();
5395 Type *LargestOpTy =
5396 Op0Ty->getIntegerBitWidth() < Op1Ty->getIntegerBitWidth() ? Op1Ty
5397 : Op0Ty;
5398 auto *ExtType = VectorType::get(LargestOpTy, VectorTy);
5399
5400 // Matched reduce.add(mul(ext(A), ext(B))), where the two ext may be of
5401 // different sizes. We take the largest type as the ext to reduce, and add
5402 // the remaining cost as, for example reduce(mul(ext(ext(A)), ext(B))).
5403 InstructionCost ExtCost0 = TTI.getCastInstrCost(
5404 Op0->getOpcode(), VectorTy, VectorType::get(Op0Ty, VectorTy),
5406 InstructionCost ExtCost1 = TTI.getCastInstrCost(
5407 Op1->getOpcode(), VectorTy, VectorType::get(Op1Ty, VectorTy),
5409 InstructionCost MulCost =
5410 TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy, CostKind);
5411
5412 InstructionCost RedCost = TTI.getMulAccReductionCost(
5413 IsUnsigned, RdxDesc.getOpcode(), RdxDesc.getRecurrenceType(), ExtType,
5414 CostKind);
5415 InstructionCost ExtraExtCost = 0;
5416 if (Op0Ty != LargestOpTy || Op1Ty != LargestOpTy) {
5417 Instruction *ExtraExtOp = (Op0Ty != LargestOpTy) ? Op0 : Op1;
5418 ExtraExtCost = TTI.getCastInstrCost(
5419 ExtraExtOp->getOpcode(), ExtType,
5420 VectorType::get(ExtraExtOp->getOperand(0)->getType(), VectorTy),
5422 }
5423
5424 if (RedCost.isValid() &&
5425 (RedCost + ExtraExtCost) < (ExtCost0 + ExtCost1 + MulCost + BaseCost))
5426 return I == RetI ? RedCost : 0;
5427 } else if (!match(I, m_ZExtOrSExt(m_Value()))) {
5428 // Matched reduce.add(mul())
5429 InstructionCost MulCost =
5430 TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy, CostKind);
5431
5432 InstructionCost RedCost = TTI.getMulAccReductionCost(
5433 true, RdxDesc.getOpcode(), RdxDesc.getRecurrenceType(), VectorTy,
5434 CostKind);
5435
5436 if (RedCost.isValid() && RedCost < MulCost + BaseCost)
5437 return I == RetI ? RedCost : 0;
5438 }
5439 }
5440
5441 return I == RetI ? std::optional<InstructionCost>(BaseCost) : std::nullopt;
5442}
5443
5445LoopVectorizationCostModel::getMemoryInstructionCost(Instruction *I,
5446 ElementCount VF) {
5447 // Calculate scalar cost only. Vectorization cost should be ready at this
5448 // moment.
5449 if (VF.isScalar()) {
5450 Type *ValTy = getLoadStoreType(I);
5452 const Align Alignment = getLoadStoreAlignment(I);
5453 unsigned AS = getLoadStoreAddressSpace(I);
5454
5455 TTI::OperandValueInfo OpInfo = TTI::getOperandInfo(I->getOperand(0));
5456 return TTI.getAddressComputationCost(PtrTy, nullptr, nullptr, CostKind) +
5457 TTI.getMemoryOpCost(I->getOpcode(), ValTy, Alignment, AS, CostKind,
5458 OpInfo, I);
5459 }
5460 return getWideningCost(I, VF);
5461}
5462
5464LoopVectorizationCostModel::getScalarizationOverhead(Instruction *I,
5465 ElementCount VF) const {
5466
5467 // There is no mechanism yet to create a scalable scalarization loop,
5468 // so this is currently Invalid.
5469 if (VF.isScalable())
5471
5472 if (VF.isScalar())
5473 return 0;
5474
5476 Type *RetTy = toVectorizedTy(I->getType(), VF);
5477 if (!RetTy->isVoidTy() &&
5478 (!isa<LoadInst>(I) || !TTI.supportsEfficientVectorElementLoadStore())) {
5479
5481 if (isa<LoadInst>(I))
5483 else if (isa<StoreInst>(I))
5485
5486 for (Type *VectorTy : getContainedTypes(RetTy)) {
5487 Cost += TTI.getScalarizationOverhead(
5489 /*Insert=*/true, /*Extract=*/false, CostKind,
5490 /*ForPoisonSrc=*/true, {}, VIC);
5491 }
5492 }
5493
5494 // Some targets keep addresses scalar.
5495 if (isa<LoadInst>(I) && !TTI.prefersVectorizedAddressing())
5496 return Cost;
5497
5498 // Some targets support efficient element stores.
5499 if (isa<StoreInst>(I) && TTI.supportsEfficientVectorElementLoadStore())
5500 return Cost;
5501
5502 // Collect operands to consider.
5503 CallInst *CI = dyn_cast<CallInst>(I);
5504 Instruction::op_range Ops = CI ? CI->args() : I->operands();
5505
5506 // Skip operands that do not require extraction/scalarization and do not incur
5507 // any overhead.
5509 for (auto *V : filterExtractingOperands(Ops, VF))
5510 Tys.push_back(maybeVectorizeType(V->getType(), VF));
5511
5515 return Cost + TTI.getOperandsScalarizationOverhead(Tys, CostKind, OperandVIC);
5516}
5517
5519 if (VF.isScalar())
5520 return;
5521 NumPredStores = 0;
5522 for (BasicBlock *BB : TheLoop->blocks()) {
5523 // For each instruction in the old loop.
5524 for (Instruction &I : *BB) {
5526 if (!Ptr)
5527 continue;
5528
5529 // TODO: We should generate better code and update the cost model for
5530 // predicated uniform stores. Today they are treated as any other
5531 // predicated store (see added test cases in
5532 // invariant-store-vectorization.ll).
5534 NumPredStores++;
5535
5536 if (Legal->isUniformMemOp(I, VF)) {
5537 auto IsLegalToScalarize = [&]() {
5538 if (!VF.isScalable())
5539 // Scalarization of fixed length vectors "just works".
5540 return true;
5541
5542 // We have dedicated lowering for unpredicated uniform loads and
5543 // stores. Note that even with tail folding we know that at least
5544 // one lane is active (i.e. generalized predication is not possible
5545 // here), and the logic below depends on this fact.
5546 if (!foldTailByMasking())
5547 return true;
5548
5549 // For scalable vectors, a uniform memop load is always
5550 // uniform-by-parts and we know how to scalarize that.
5551 if (isa<LoadInst>(I))
5552 return true;
5553
5554 // A uniform store isn't neccessarily uniform-by-part
5555 // and we can't assume scalarization.
5556 auto &SI = cast<StoreInst>(I);
5557 return TheLoop->isLoopInvariant(SI.getValueOperand());
5558 };
5559
5560 const InstructionCost GatherScatterCost =
5562 getGatherScatterCost(&I, VF) : InstructionCost::getInvalid();
5563
5564 // Load: Scalar load + broadcast
5565 // Store: Scalar store + isLoopInvariantStoreValue ? 0 : extract
5566 // FIXME: This cost is a significant under-estimate for tail folded
5567 // memory ops.
5568 const InstructionCost ScalarizationCost =
5569 IsLegalToScalarize() ? getUniformMemOpCost(&I, VF)
5571
5572 // Choose better solution for the current VF, Note that Invalid
5573 // costs compare as maximumal large. If both are invalid, we get
5574 // scalable invalid which signals a failure and a vectorization abort.
5575 if (GatherScatterCost < ScalarizationCost)
5576 setWideningDecision(&I, VF, CM_GatherScatter, GatherScatterCost);
5577 else
5578 setWideningDecision(&I, VF, CM_Scalarize, ScalarizationCost);
5579 continue;
5580 }
5581
5582 // We assume that widening is the best solution when possible.
5583 if (memoryInstructionCanBeWidened(&I, VF)) {
5584 InstructionCost Cost = getConsecutiveMemOpCost(&I, VF);
5585 int ConsecutiveStride = Legal->isConsecutivePtr(
5587 assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) &&
5588 "Expected consecutive stride.");
5589 InstWidening Decision =
5590 ConsecutiveStride == 1 ? CM_Widen : CM_Widen_Reverse;
5591 setWideningDecision(&I, VF, Decision, Cost);
5592 continue;
5593 }
5594
5595 // Choose between Interleaving, Gather/Scatter or Scalarization.
5597 unsigned NumAccesses = 1;
5598 if (isAccessInterleaved(&I)) {
5599 const auto *Group = getInterleavedAccessGroup(&I);
5600 assert(Group && "Fail to get an interleaved access group.");
5601
5602 // Make one decision for the whole group.
5603 if (getWideningDecision(&I, VF) != CM_Unknown)
5604 continue;
5605
5606 NumAccesses = Group->getNumMembers();
5608 InterleaveCost = getInterleaveGroupCost(&I, VF);
5609 }
5610
5611 InstructionCost GatherScatterCost =
5613 ? getGatherScatterCost(&I, VF) * NumAccesses
5615
5616 InstructionCost ScalarizationCost =
5617 getMemInstScalarizationCost(&I, VF) * NumAccesses;
5618
5619 // Choose better solution for the current VF,
5620 // write down this decision and use it during vectorization.
5622 InstWidening Decision;
5623 if (InterleaveCost <= GatherScatterCost &&
5624 InterleaveCost < ScalarizationCost) {
5625 Decision = CM_Interleave;
5626 Cost = InterleaveCost;
5627 } else if (GatherScatterCost < ScalarizationCost) {
5628 Decision = CM_GatherScatter;
5629 Cost = GatherScatterCost;
5630 } else {
5631 Decision = CM_Scalarize;
5632 Cost = ScalarizationCost;
5633 }
5634 // If the instructions belongs to an interleave group, the whole group
5635 // receives the same decision. The whole group receives the cost, but
5636 // the cost will actually be assigned to one instruction.
5637 if (const auto *Group = getInterleavedAccessGroup(&I)) {
5638 if (Decision == CM_Scalarize) {
5639 for (unsigned Idx = 0; Idx < Group->getFactor(); ++Idx) {
5640 if (auto *I = Group->getMember(Idx)) {
5641 setWideningDecision(I, VF, Decision,
5642 getMemInstScalarizationCost(I, VF));
5643 }
5644 }
5645 } else {
5646 setWideningDecision(Group, VF, Decision, Cost);
5647 }
5648 } else
5649 setWideningDecision(&I, VF, Decision, Cost);
5650 }
5651 }
5652
5653 // Make sure that any load of address and any other address computation
5654 // remains scalar unless there is gather/scatter support. This avoids
5655 // inevitable extracts into address registers, and also has the benefit of
5656 // activating LSR more, since that pass can't optimize vectorized
5657 // addresses.
5658 if (TTI.prefersVectorizedAddressing())
5659 return;
5660
5661 // Start with all scalar pointer uses.
5663 for (BasicBlock *BB : TheLoop->blocks())
5664 for (Instruction &I : *BB) {
5665 Instruction *PtrDef =
5667 if (PtrDef && TheLoop->contains(PtrDef) &&
5669 AddrDefs.insert(PtrDef);
5670 }
5671
5672 // Add all instructions used to generate the addresses.
5674 append_range(Worklist, AddrDefs);
5675 while (!Worklist.empty()) {
5676 Instruction *I = Worklist.pop_back_val();
5677 for (auto &Op : I->operands())
5678 if (auto *InstOp = dyn_cast<Instruction>(Op))
5679 if (TheLoop->contains(InstOp) && !isa<PHINode>(InstOp) &&
5680 AddrDefs.insert(InstOp).second)
5681 Worklist.push_back(InstOp);
5682 }
5683
5684 auto UpdateMemOpUserCost = [this, VF](LoadInst *LI) {
5685 // If there are direct memory op users of the newly scalarized load,
5686 // their cost may have changed because there's no scalarization
5687 // overhead for the operand. Update it.
5688 for (User *U : LI->users()) {
5690 continue;
5692 continue;
5695 getMemInstScalarizationCost(cast<Instruction>(U), VF));
5696 }
5697 };
5698 for (auto *I : AddrDefs) {
5699 if (isa<LoadInst>(I)) {
5700 // Setting the desired widening decision should ideally be handled in
5701 // by cost functions, but since this involves the task of finding out
5702 // if the loaded register is involved in an address computation, it is
5703 // instead changed here when we know this is the case.
5704 InstWidening Decision = getWideningDecision(I, VF);
5705 if (!isPredicatedInst(I) &&
5706 (Decision == CM_Widen || Decision == CM_Widen_Reverse ||
5707 (!Legal->isUniformMemOp(*I, VF) && Decision == CM_Scalarize))) {
5708 // Scalarize a widened load of address or update the cost of a scalar
5709 // load of an address.
5711 I, VF, CM_Scalarize,
5712 (VF.getKnownMinValue() *
5713 getMemoryInstructionCost(I, ElementCount::getFixed(1))));
5714 UpdateMemOpUserCost(cast<LoadInst>(I));
5715 } else if (const auto *Group = getInterleavedAccessGroup(I)) {
5716 // Scalarize all members of this interleaved group when any member
5717 // is used as an address. The address-used load skips scalarization
5718 // overhead, other members include it.
5719 for (unsigned Idx = 0; Idx < Group->getFactor(); ++Idx) {
5720 if (Instruction *Member = Group->getMember(Idx)) {
5722 AddrDefs.contains(Member)
5723 ? (VF.getKnownMinValue() *
5724 getMemoryInstructionCost(Member,
5726 : getMemInstScalarizationCost(Member, VF);
5728 UpdateMemOpUserCost(cast<LoadInst>(Member));
5729 }
5730 }
5731 }
5732 } else {
5733 // Cannot scalarize fixed-order recurrence phis at the moment.
5734 if (isa<PHINode>(I) && Legal->isFixedOrderRecurrence(cast<PHINode>(I)))
5735 continue;
5736
5737 // Make sure I gets scalarized and a cost estimate without
5738 // scalarization overhead.
5739 ForcedScalars[VF].insert(I);
5740 }
5741 }
5742}
5743
5745 assert(!VF.isScalar() &&
5746 "Trying to set a vectorization decision for a scalar VF");
5747
5748 auto ForcedScalar = ForcedScalars.find(VF);
5749 for (BasicBlock *BB : TheLoop->blocks()) {
5750 // For each instruction in the old loop.
5751 for (Instruction &I : *BB) {
5753
5754 if (!CI)
5755 continue;
5756
5760 Function *ScalarFunc = CI->getCalledFunction();
5761 Type *ScalarRetTy = CI->getType();
5762 SmallVector<Type *, 4> Tys, ScalarTys;
5763 for (auto &ArgOp : CI->args())
5764 ScalarTys.push_back(ArgOp->getType());
5765
5766 // Estimate cost of scalarized vector call. The source operands are
5767 // assumed to be vectors, so we need to extract individual elements from
5768 // there, execute VF scalar calls, and then gather the result into the
5769 // vector return value.
5770 if (VF.isFixed()) {
5771 InstructionCost ScalarCallCost =
5772 TTI.getCallInstrCost(ScalarFunc, ScalarRetTy, ScalarTys, CostKind);
5773
5774 // Compute costs of unpacking argument values for the scalar calls and
5775 // packing the return values to a vector.
5776 InstructionCost ScalarizationCost = getScalarizationOverhead(CI, VF);
5777 ScalarCost = ScalarCallCost * VF.getKnownMinValue() + ScalarizationCost;
5778 } else {
5779 // There is no point attempting to calculate the scalar cost for a
5780 // scalable VF as we know it will be Invalid.
5781 assert(!getScalarizationOverhead(CI, VF).isValid() &&
5782 "Unexpected valid cost for scalarizing scalable vectors");
5783 ScalarCost = InstructionCost::getInvalid();
5784 }
5785
5786 // Honor ForcedScalars and UniformAfterVectorization decisions.
5787 // TODO: For calls, it might still be more profitable to widen. Use
5788 // VPlan-based cost model to compare different options.
5789 if (VF.isVector() && ((ForcedScalar != ForcedScalars.end() &&
5790 ForcedScalar->second.contains(CI)) ||
5791 isUniformAfterVectorization(CI, VF))) {
5792 setCallWideningDecision(CI, VF, CM_Scalarize, nullptr,
5793 Intrinsic::not_intrinsic, std::nullopt,
5794 ScalarCost);
5795 continue;
5796 }
5797
5798 bool MaskRequired = isMaskRequired(CI);
5799 // Compute corresponding vector type for return value and arguments.
5800 Type *RetTy = toVectorizedTy(ScalarRetTy, VF);
5801 for (Type *ScalarTy : ScalarTys)
5802 Tys.push_back(toVectorizedTy(ScalarTy, VF));
5803
5804 // An in-loop reduction using an fmuladd intrinsic is a special case;
5805 // we don't want the normal cost for that intrinsic.
5807 if (auto RedCost = getReductionPatternCost(CI, VF, RetTy)) {
5810 std::nullopt, *RedCost);
5811 continue;
5812 }
5813
5814 // Find the cost of vectorizing the call, if we can find a suitable
5815 // vector variant of the function.
5816 VFInfo FuncInfo;
5817 Function *VecFunc = nullptr;
5818 // Search through any available variants for one we can use at this VF.
5819 for (VFInfo &Info : VFDatabase::getMappings(*CI)) {
5820 // Must match requested VF.
5821 if (Info.Shape.VF != VF)
5822 continue;
5823
5824 // Must take a mask argument if one is required
5825 if (MaskRequired && !Info.isMasked())
5826 continue;
5827
5828 // Check that all parameter kinds are supported
5829 bool ParamsOk = true;
5830 for (VFParameter Param : Info.Shape.Parameters) {
5831 switch (Param.ParamKind) {
5833 break;
5835 Value *ScalarParam = CI->getArgOperand(Param.ParamPos);
5836 // Make sure the scalar parameter in the loop is invariant.
5837 if (!PSE.getSE()->isLoopInvariant(PSE.getSCEV(ScalarParam),
5838 TheLoop))
5839 ParamsOk = false;
5840 break;
5841 }
5843 Value *ScalarParam = CI->getArgOperand(Param.ParamPos);
5844 // Find the stride for the scalar parameter in this loop and see if
5845 // it matches the stride for the variant.
5846 // TODO: do we need to figure out the cost of an extract to get the
5847 // first lane? Or do we hope that it will be folded away?
5848 ScalarEvolution *SE = PSE.getSE();
5849 if (!match(SE->getSCEV(ScalarParam),
5851 m_SCEV(), m_scev_SpecificSInt(Param.LinearStepOrPos),
5853 ParamsOk = false;
5854 break;
5855 }
5857 break;
5858 default:
5859 ParamsOk = false;
5860 break;
5861 }
5862 }
5863
5864 if (!ParamsOk)
5865 continue;
5866
5867 // Found a suitable candidate, stop here.
5868 VecFunc = CI->getModule()->getFunction(Info.VectorName);
5869 FuncInfo = Info;
5870 break;
5871 }
5872
5873 if (TLI && VecFunc && !CI->isNoBuiltin())
5874 VectorCost = TTI.getCallInstrCost(nullptr, RetTy, Tys, CostKind);
5875
5876 // Find the cost of an intrinsic; some targets may have instructions that
5877 // perform the operation without needing an actual call.
5879 if (IID != Intrinsic::not_intrinsic)
5881
5882 InstructionCost Cost = ScalarCost;
5883 InstWidening Decision = CM_Scalarize;
5884
5885 if (VectorCost.isValid() && VectorCost <= Cost) {
5886 Cost = VectorCost;
5887 Decision = CM_VectorCall;
5888 }
5889
5890 if (IntrinsicCost.isValid() && IntrinsicCost <= Cost) {
5892 Decision = CM_IntrinsicCall;
5893 }
5894
5895 setCallWideningDecision(CI, VF, Decision, VecFunc, IID,
5897 }
5898 }
5899}
5900
5902 if (!Legal->isInvariant(Op))
5903 return false;
5904 // Consider Op invariant, if it or its operands aren't predicated
5905 // instruction in the loop. In that case, it is not trivially hoistable.
5906 auto *OpI = dyn_cast<Instruction>(Op);
5907 return !OpI || !TheLoop->contains(OpI) ||
5908 (!isPredicatedInst(OpI) &&
5909 (!isa<PHINode>(OpI) || OpI->getParent() != TheLoop->getHeader()) &&
5910 all_of(OpI->operands(),
5911 [this](Value *Op) { return shouldConsiderInvariant(Op); }));
5912}
5913
5916 ElementCount VF) {
5917 // If we know that this instruction will remain uniform, check the cost of
5918 // the scalar version.
5920 VF = ElementCount::getFixed(1);
5921
5922 if (VF.isVector() && isProfitableToScalarize(I, VF))
5923 return InstsToScalarize[VF][I];
5924
5925 // Forced scalars do not have any scalarization overhead.
5926 auto ForcedScalar = ForcedScalars.find(VF);
5927 if (VF.isVector() && ForcedScalar != ForcedScalars.end()) {
5928 auto InstSet = ForcedScalar->second;
5929 if (InstSet.count(I))
5931 VF.getKnownMinValue();
5932 }
5933
5934 Type *RetTy = I->getType();
5936 RetTy = IntegerType::get(RetTy->getContext(), MinBWs[I]);
5937 auto *SE = PSE.getSE();
5938
5939 Type *VectorTy;
5940 if (isScalarAfterVectorization(I, VF)) {
5941 [[maybe_unused]] auto HasSingleCopyAfterVectorization =
5942 [this](Instruction *I, ElementCount VF) -> bool {
5943 if (VF.isScalar())
5944 return true;
5945
5946 auto Scalarized = InstsToScalarize.find(VF);
5947 assert(Scalarized != InstsToScalarize.end() &&
5948 "VF not yet analyzed for scalarization profitability");
5949 return !Scalarized->second.count(I) &&
5950 llvm::all_of(I->users(), [&](User *U) {
5951 auto *UI = cast<Instruction>(U);
5952 return !Scalarized->second.count(UI);
5953 });
5954 };
5955
5956 // With the exception of GEPs and PHIs, after scalarization there should
5957 // only be one copy of the instruction generated in the loop. This is
5958 // because the VF is either 1, or any instructions that need scalarizing
5959 // have already been dealt with by the time we get here. As a result,
5960 // it means we don't have to multiply the instruction cost by VF.
5961 assert(I->getOpcode() == Instruction::GetElementPtr ||
5962 I->getOpcode() == Instruction::PHI ||
5963 (I->getOpcode() == Instruction::BitCast &&
5964 I->getType()->isPointerTy()) ||
5965 HasSingleCopyAfterVectorization(I, VF));
5966 VectorTy = RetTy;
5967 } else
5968 VectorTy = toVectorizedTy(RetTy, VF);
5969
5970 if (VF.isVector() && VectorTy->isVectorTy() &&
5971 !TTI.getNumberOfParts(VectorTy))
5973
5974 // TODO: We need to estimate the cost of intrinsic calls.
5975 switch (I->getOpcode()) {
5976 case Instruction::GetElementPtr:
5977 // We mark this instruction as zero-cost because the cost of GEPs in
5978 // vectorized code depends on whether the corresponding memory instruction
5979 // is scalarized or not. Therefore, we handle GEPs with the memory
5980 // instruction cost.
5981 return 0;
5982 case Instruction::UncondBr:
5983 case Instruction::CondBr: {
5984 // In cases of scalarized and predicated instructions, there will be VF
5985 // predicated blocks in the vectorized loop. Each branch around these
5986 // blocks requires also an extract of its vector compare i1 element.
5987 // Note that the conditional branch from the loop latch will be replaced by
5988 // a single branch controlling the loop, so there is no extra overhead from
5989 // scalarization.
5990 bool ScalarPredicatedBB = false;
5992 if (VF.isVector() && BI &&
5993 (PredicatedBBsAfterVectorization[VF].count(BI->getSuccessor(0)) ||
5994 PredicatedBBsAfterVectorization[VF].count(BI->getSuccessor(1))) &&
5995 BI->getParent() != TheLoop->getLoopLatch())
5996 ScalarPredicatedBB = true;
5997
5998 if (ScalarPredicatedBB) {
5999 // Not possible to scalarize scalable vector with predicated instructions.
6000 if (VF.isScalable())
6002 // Return cost for branches around scalarized and predicated blocks.
6003 auto *VecI1Ty =
6005 return (TTI.getScalarizationOverhead(
6006 VecI1Ty, APInt::getAllOnes(VF.getFixedValue()),
6007 /*Insert*/ false, /*Extract*/ true, CostKind) +
6008 (TTI.getCFInstrCost(Instruction::CondBr, CostKind) *
6009 VF.getFixedValue()));
6010 }
6011
6012 if (I->getParent() == TheLoop->getLoopLatch() || VF.isScalar())
6013 // The back-edge branch will remain, as will all scalar branches.
6014 return TTI.getCFInstrCost(Instruction::UncondBr, CostKind);
6015
6016 // This branch will be eliminated by if-conversion.
6017 return 0;
6018 // Note: We currently assume zero cost for an unconditional branch inside
6019 // a predicated block since it will become a fall-through, although we
6020 // may decide in the future to call TTI for all branches.
6021 }
6022 case Instruction::Switch: {
6023 if (VF.isScalar())
6024 return TTI.getCFInstrCost(Instruction::Switch, CostKind);
6025 auto *Switch = cast<SwitchInst>(I);
6026 return Switch->getNumCases() *
6027 TTI.getCmpSelInstrCost(
6028 Instruction::ICmp,
6029 toVectorTy(Switch->getCondition()->getType(), VF),
6030 toVectorTy(Type::getInt1Ty(I->getContext()), VF),
6032 }
6033 case Instruction::PHI: {
6034 auto *Phi = cast<PHINode>(I);
6035
6036 // First-order recurrences are replaced by vector shuffles inside the loop.
6037 if (VF.isVector() && Legal->isFixedOrderRecurrence(Phi)) {
6039 std::iota(Mask.begin(), Mask.end(), VF.getKnownMinValue() - 1);
6040 return TTI.getShuffleCost(TargetTransformInfo::SK_Splice,
6041 cast<VectorType>(VectorTy),
6042 cast<VectorType>(VectorTy), Mask, CostKind,
6043 VF.getKnownMinValue() - 1);
6044 }
6045
6046 // Phi nodes in non-header blocks (not inductions, reductions, etc.) are
6047 // converted into select instructions. We require N - 1 selects per phi
6048 // node, where N is the number of incoming values.
6049 if (VF.isVector() && Phi->getParent() != TheLoop->getHeader()) {
6050 Type *ResultTy = Phi->getType();
6051
6052 // All instructions in an Any-of reduction chain are narrowed to bool.
6053 // Check if that is the case for this phi node.
6054 auto *HeaderUser = cast_if_present<PHINode>(
6055 find_singleton<User>(Phi->users(), [this](User *U, bool) -> User * {
6056 auto *Phi = dyn_cast<PHINode>(U);
6057 if (Phi && Phi->getParent() == TheLoop->getHeader())
6058 return Phi;
6059 return nullptr;
6060 }));
6061 if (HeaderUser) {
6062 auto &ReductionVars = Legal->getReductionVars();
6063 auto Iter = ReductionVars.find(HeaderUser);
6064 if (Iter != ReductionVars.end() &&
6066 Iter->second.getRecurrenceKind()))
6067 ResultTy = Type::getInt1Ty(Phi->getContext());
6068 }
6069 return (Phi->getNumIncomingValues() - 1) *
6070 TTI.getCmpSelInstrCost(
6071 Instruction::Select, toVectorTy(ResultTy, VF),
6072 toVectorTy(Type::getInt1Ty(Phi->getContext()), VF),
6074 }
6075
6076 // When tail folding with EVL, if the phi is part of an out of loop
6077 // reduction then it will be transformed into a wide vp_merge.
6078 if (VF.isVector() && foldTailWithEVL() &&
6079 Legal->getReductionVars().contains(Phi) && !isInLoopReduction(Phi)) {
6081 Intrinsic::vp_merge, toVectorTy(Phi->getType(), VF),
6082 {toVectorTy(Type::getInt1Ty(Phi->getContext()), VF)});
6083 return TTI.getIntrinsicInstrCost(ICA, CostKind);
6084 }
6085
6086 return TTI.getCFInstrCost(Instruction::PHI, CostKind);
6087 }
6088 case Instruction::UDiv:
6089 case Instruction::SDiv:
6090 case Instruction::URem:
6091 case Instruction::SRem:
6092 if (VF.isVector() && isPredicatedInst(I)) {
6093 const auto [ScalarCost, SafeDivisorCost] = getDivRemSpeculationCost(I, VF);
6094 return isDivRemScalarWithPredication(ScalarCost, SafeDivisorCost) ?
6095 ScalarCost : SafeDivisorCost;
6096 }
6097 // We've proven all lanes safe to speculate, fall through.
6098 [[fallthrough]];
6099 case Instruction::Add:
6100 case Instruction::Sub: {
6101 auto Info = Legal->getHistogramInfo(I);
6102 if (Info && VF.isVector()) {
6103 const HistogramInfo *HGram = Info.value();
6104 // Assume that a non-constant update value (or a constant != 1) requires
6105 // a multiply, and add that into the cost.
6107 ConstantInt *RHS = dyn_cast<ConstantInt>(I->getOperand(1));
6108 if (!RHS || RHS->getZExtValue() != 1)
6109 MulCost =
6110 TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy, CostKind);
6111
6112 // Find the cost of the histogram operation itself.
6113 Type *PtrTy = VectorType::get(HGram->Load->getPointerOperandType(), VF);
6114 Type *ScalarTy = I->getType();
6115 Type *MaskTy = VectorType::get(Type::getInt1Ty(I->getContext()), VF);
6116 IntrinsicCostAttributes ICA(Intrinsic::experimental_vector_histogram_add,
6117 Type::getVoidTy(I->getContext()),
6118 {PtrTy, ScalarTy, MaskTy});
6119
6120 // Add the costs together with the add/sub operation.
6121 return TTI.getIntrinsicInstrCost(ICA, CostKind) + MulCost +
6122 TTI.getArithmeticInstrCost(I->getOpcode(), VectorTy, CostKind);
6123 }
6124 [[fallthrough]];
6125 }
6126 case Instruction::FAdd:
6127 case Instruction::FSub:
6128 case Instruction::Mul:
6129 case Instruction::FMul:
6130 case Instruction::FDiv:
6131 case Instruction::FRem:
6132 case Instruction::Shl:
6133 case Instruction::LShr:
6134 case Instruction::AShr:
6135 case Instruction::And:
6136 case Instruction::Or:
6137 case Instruction::Xor: {
6138 // If we're speculating on the stride being 1, the multiplication may
6139 // fold away. We can generalize this for all operations using the notion
6140 // of neutral elements. (TODO)
6141 if (I->getOpcode() == Instruction::Mul &&
6142 ((TheLoop->isLoopInvariant(I->getOperand(0)) &&
6143 PSE.getSCEV(I->getOperand(0))->isOne()) ||
6144 (TheLoop->isLoopInvariant(I->getOperand(1)) &&
6145 PSE.getSCEV(I->getOperand(1))->isOne())))
6146 return 0;
6147
6148 // Detect reduction patterns
6149 if (auto RedCost = getReductionPatternCost(I, VF, VectorTy))
6150 return *RedCost;
6151
6152 // Certain instructions can be cheaper to vectorize if they have a constant
6153 // second vector operand. One example of this are shifts on x86.
6154 Value *Op2 = I->getOperand(1);
6155 if (!isa<Constant>(Op2) && TheLoop->isLoopInvariant(Op2) &&
6156 PSE.getSE()->isSCEVable(Op2->getType()) &&
6157 isa<SCEVConstant>(PSE.getSCEV(Op2))) {
6158 Op2 = cast<SCEVConstant>(PSE.getSCEV(Op2))->getValue();
6159 }
6160 auto Op2Info = TTI.getOperandInfo(Op2);
6161 if (Op2Info.Kind == TargetTransformInfo::OK_AnyValue &&
6164
6165 SmallVector<const Value *, 4> Operands(I->operand_values());
6166 return TTI.getArithmeticInstrCost(
6167 I->getOpcode(), VectorTy, CostKind,
6168 {TargetTransformInfo::OK_AnyValue, TargetTransformInfo::OP_None},
6169 Op2Info, Operands, I, TLI);
6170 }
6171 case Instruction::FNeg: {
6172 return TTI.getArithmeticInstrCost(
6173 I->getOpcode(), VectorTy, CostKind,
6174 {TargetTransformInfo::OK_AnyValue, TargetTransformInfo::OP_None},
6175 {TargetTransformInfo::OK_AnyValue, TargetTransformInfo::OP_None},
6176 I->getOperand(0), I);
6177 }
6178 case Instruction::Select: {
6180 const SCEV *CondSCEV = SE->getSCEV(SI->getCondition());
6181 bool ScalarCond = (SE->isLoopInvariant(CondSCEV, TheLoop));
6182
6183 const Value *Op0, *Op1;
6184 using namespace llvm::PatternMatch;
6185 if (!ScalarCond && (match(I, m_LogicalAnd(m_Value(Op0), m_Value(Op1))) ||
6186 match(I, m_LogicalOr(m_Value(Op0), m_Value(Op1))))) {
6187 // select x, y, false --> x & y
6188 // select x, true, y --> x | y
6189 const auto [Op1VK, Op1VP] = TTI::getOperandInfo(Op0);
6190 const auto [Op2VK, Op2VP] = TTI::getOperandInfo(Op1);
6191 assert(Op0->getType()->getScalarSizeInBits() == 1 &&
6192 Op1->getType()->getScalarSizeInBits() == 1);
6193
6194 return TTI.getArithmeticInstrCost(
6195 match(I, m_LogicalOr()) ? Instruction::Or : Instruction::And,
6196 VectorTy, CostKind, {Op1VK, Op1VP}, {Op2VK, Op2VP}, {Op0, Op1}, I);
6197 }
6198
6199 Type *CondTy = SI->getCondition()->getType();
6200 if (!ScalarCond)
6201 CondTy = VectorType::get(CondTy, VF);
6202
6204 if (auto *Cmp = dyn_cast<CmpInst>(SI->getCondition()))
6205 Pred = Cmp->getPredicate();
6206 return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, CondTy, Pred,
6207 CostKind, {TTI::OK_AnyValue, TTI::OP_None},
6208 {TTI::OK_AnyValue, TTI::OP_None}, I);
6209 }
6210 case Instruction::ICmp:
6211 case Instruction::FCmp: {
6212 Type *ValTy = I->getOperand(0)->getType();
6213
6215 [[maybe_unused]] Instruction *Op0AsInstruction =
6216 dyn_cast<Instruction>(I->getOperand(0));
6217 assert((!canTruncateToMinimalBitwidth(Op0AsInstruction, VF) ||
6218 MinBWs[I] == MinBWs[Op0AsInstruction]) &&
6219 "if both the operand and the compare are marked for "
6220 "truncation, they must have the same bitwidth");
6221 ValTy = IntegerType::get(ValTy->getContext(), MinBWs[I]);
6222 }
6223
6224 VectorTy = toVectorTy(ValTy, VF);
6225 return TTI.getCmpSelInstrCost(
6226 I->getOpcode(), VectorTy, CmpInst::makeCmpResultType(VectorTy),
6227 cast<CmpInst>(I)->getPredicate(), CostKind,
6228 {TTI::OK_AnyValue, TTI::OP_None}, {TTI::OK_AnyValue, TTI::OP_None}, I);
6229 }
6230 case Instruction::Store:
6231 case Instruction::Load: {
6232 ElementCount Width = VF;
6233 if (Width.isVector()) {
6234 InstWidening Decision = getWideningDecision(I, Width);
6235 assert(Decision != CM_Unknown &&
6236 "CM decision should be taken at this point");
6239 if (Decision == CM_Scalarize)
6240 Width = ElementCount::getFixed(1);
6241 }
6242 VectorTy = toVectorTy(getLoadStoreType(I), Width);
6243 return getMemoryInstructionCost(I, VF);
6244 }
6245 case Instruction::BitCast:
6246 if (I->getType()->isPointerTy())
6247 return 0;
6248 [[fallthrough]];
6249 case Instruction::ZExt:
6250 case Instruction::SExt:
6251 case Instruction::FPToUI:
6252 case Instruction::FPToSI:
6253 case Instruction::FPExt:
6254 case Instruction::PtrToInt:
6255 case Instruction::IntToPtr:
6256 case Instruction::SIToFP:
6257 case Instruction::UIToFP:
6258 case Instruction::Trunc:
6259 case Instruction::FPTrunc: {
6260 // Computes the CastContextHint from a Load/Store instruction.
6261 auto ComputeCCH = [&](Instruction *I) -> TTI::CastContextHint {
6263 "Expected a load or a store!");
6264
6265 if (VF.isScalar() || !TheLoop->contains(I))
6267
6268 switch (getWideningDecision(I, VF)) {
6280 llvm_unreachable("Instr did not go through cost modelling?");
6283 llvm_unreachable_internal("Instr has invalid widening decision");
6284 }
6285
6286 llvm_unreachable("Unhandled case!");
6287 };
6288
6289 unsigned Opcode = I->getOpcode();
6291 // For Trunc, the context is the only user, which must be a StoreInst.
6292 if (Opcode == Instruction::Trunc || Opcode == Instruction::FPTrunc) {
6293 if (I->hasOneUse())
6294 if (StoreInst *Store = dyn_cast<StoreInst>(*I->user_begin()))
6295 CCH = ComputeCCH(Store);
6296 }
6297 // For Z/Sext, the context is the operand, which must be a LoadInst.
6298 else if (Opcode == Instruction::ZExt || Opcode == Instruction::SExt ||
6299 Opcode == Instruction::FPExt) {
6300 if (LoadInst *Load = dyn_cast<LoadInst>(I->getOperand(0)))
6301 CCH = ComputeCCH(Load);
6302 }
6303
6304 // We optimize the truncation of induction variables having constant
6305 // integer steps. The cost of these truncations is the same as the scalar
6306 // operation.
6307 if (isOptimizableIVTruncate(I, VF)) {
6308 auto *Trunc = cast<TruncInst>(I);
6309 return TTI.getCastInstrCost(Instruction::Trunc, Trunc->getDestTy(),
6310 Trunc->getSrcTy(), CCH, CostKind, Trunc);
6311 }
6312
6313 // Detect reduction patterns
6314 if (auto RedCost = getReductionPatternCost(I, VF, VectorTy))
6315 return *RedCost;
6316
6317 Type *SrcScalarTy = I->getOperand(0)->getType();
6318 Instruction *Op0AsInstruction = dyn_cast<Instruction>(I->getOperand(0));
6319 if (canTruncateToMinimalBitwidth(Op0AsInstruction, VF))
6320 SrcScalarTy =
6321 IntegerType::get(SrcScalarTy->getContext(), MinBWs[Op0AsInstruction]);
6322 Type *SrcVecTy =
6323 VectorTy->isVectorTy() ? toVectorTy(SrcScalarTy, VF) : SrcScalarTy;
6324
6326 // If the result type is <= the source type, there will be no extend
6327 // after truncating the users to the minimal required bitwidth.
6328 if (VectorTy->getScalarSizeInBits() <= SrcVecTy->getScalarSizeInBits() &&
6329 (I->getOpcode() == Instruction::ZExt ||
6330 I->getOpcode() == Instruction::SExt))
6331 return 0;
6332 }
6333
6334 return TTI.getCastInstrCost(Opcode, VectorTy, SrcVecTy, CCH, CostKind, I);
6335 }
6336 case Instruction::Call:
6337 return getVectorCallCost(cast<CallInst>(I), VF);
6338 case Instruction::ExtractValue:
6339 return TTI.getInstructionCost(I, CostKind);
6340 case Instruction::Alloca:
6341 // We cannot easily widen alloca to a scalable alloca, as
6342 // the result would need to be a vector of pointers.
6343 if (VF.isScalable())
6345 return TTI.getArithmeticInstrCost(Instruction::Mul, RetTy, CostKind);
6346 default:
6347 // This opcode is unknown. Assume that it is the same as 'mul'.
6348 return TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy, CostKind);
6349 } // end of switch.
6350}
6351
6353 // Ignore ephemeral values.
6355
6356 SmallVector<Value *, 4> DeadInterleavePointerOps;
6358
6359 // If a scalar epilogue is required, users outside the loop won't use
6360 // live-outs from the vector loop but from the scalar epilogue. Ignore them if
6361 // that is the case.
6362 bool RequiresScalarEpilogue = requiresScalarEpilogue(true);
6363 auto IsLiveOutDead = [this, RequiresScalarEpilogue](User *U) {
6364 return RequiresScalarEpilogue &&
6365 !TheLoop->contains(cast<Instruction>(U)->getParent());
6366 };
6367
6369 DFS.perform(LI);
6370 for (BasicBlock *BB : reverse(make_range(DFS.beginRPO(), DFS.endRPO())))
6371 for (Instruction &I : reverse(*BB)) {
6372 if (VecValuesToIgnore.contains(&I) || ValuesToIgnore.contains(&I))
6373 continue;
6374
6375 // Add instructions that would be trivially dead and are only used by
6376 // values already ignored to DeadOps to seed worklist.
6378 all_of(I.users(), [this, IsLiveOutDead](User *U) {
6379 return VecValuesToIgnore.contains(U) ||
6380 ValuesToIgnore.contains(U) || IsLiveOutDead(U);
6381 }))
6382 DeadOps.push_back(&I);
6383
6384 // For interleave groups, we only create a pointer for the start of the
6385 // interleave group. Queue up addresses of group members except the insert
6386 // position for further processing.
6387 if (isAccessInterleaved(&I)) {
6388 auto *Group = getInterleavedAccessGroup(&I);
6389 if (Group->getInsertPos() == &I)
6390 continue;
6391 Value *PointerOp = getLoadStorePointerOperand(&I);
6392 DeadInterleavePointerOps.push_back(PointerOp);
6393 }
6394
6395 // Queue branches for analysis. They are dead, if their successors only
6396 // contain dead instructions.
6397 if (isa<CondBrInst>(&I))
6398 DeadOps.push_back(&I);
6399 }
6400
6401 // Mark ops feeding interleave group members as free, if they are only used
6402 // by other dead computations.
6403 for (unsigned I = 0; I != DeadInterleavePointerOps.size(); ++I) {
6404 auto *Op = dyn_cast<Instruction>(DeadInterleavePointerOps[I]);
6405 if (!Op || !TheLoop->contains(Op) || any_of(Op->users(), [this](User *U) {
6406 Instruction *UI = cast<Instruction>(U);
6407 return !VecValuesToIgnore.contains(U) &&
6408 (!isAccessInterleaved(UI) ||
6409 getInterleavedAccessGroup(UI)->getInsertPos() == UI);
6410 }))
6411 continue;
6412 VecValuesToIgnore.insert(Op);
6413 append_range(DeadInterleavePointerOps, Op->operands());
6414 }
6415
6416 // Mark ops that would be trivially dead and are only used by ignored
6417 // instructions as free.
6418 BasicBlock *Header = TheLoop->getHeader();
6419
6420 // Returns true if the block contains only dead instructions. Such blocks will
6421 // be removed by VPlan-to-VPlan transforms and won't be considered by the
6422 // VPlan-based cost model, so skip them in the legacy cost-model as well.
6423 auto IsEmptyBlock = [this](BasicBlock *BB) {
6424 return all_of(*BB, [this](Instruction &I) {
6425 return ValuesToIgnore.contains(&I) || VecValuesToIgnore.contains(&I) ||
6427 });
6428 };
6429 for (unsigned I = 0; I != DeadOps.size(); ++I) {
6430 auto *Op = dyn_cast<Instruction>(DeadOps[I]);
6431
6432 // Check if the branch should be considered dead.
6433 if (auto *Br = dyn_cast_or_null<CondBrInst>(Op)) {
6434 BasicBlock *ThenBB = Br->getSuccessor(0);
6435 BasicBlock *ElseBB = Br->getSuccessor(1);
6436 // Don't considers branches leaving the loop for simplification.
6437 if (!TheLoop->contains(ThenBB) || !TheLoop->contains(ElseBB))
6438 continue;
6439 bool ThenEmpty = IsEmptyBlock(ThenBB);
6440 bool ElseEmpty = IsEmptyBlock(ElseBB);
6441 if ((ThenEmpty && ElseEmpty) ||
6442 (ThenEmpty && ThenBB->getSingleSuccessor() == ElseBB &&
6443 ElseBB->phis().empty()) ||
6444 (ElseEmpty && ElseBB->getSingleSuccessor() == ThenBB &&
6445 ThenBB->phis().empty())) {
6446 VecValuesToIgnore.insert(Br);
6447 DeadOps.push_back(Br->getCondition());
6448 }
6449 continue;
6450 }
6451
6452 // Skip any op that shouldn't be considered dead.
6453 if (!Op || !TheLoop->contains(Op) ||
6454 (isa<PHINode>(Op) && Op->getParent() == Header) ||
6456 any_of(Op->users(), [this, IsLiveOutDead](User *U) {
6457 return !VecValuesToIgnore.contains(U) &&
6458 !ValuesToIgnore.contains(U) && !IsLiveOutDead(U);
6459 }))
6460 continue;
6461
6462 // If all of Op's users are in ValuesToIgnore, add it to ValuesToIgnore
6463 // which applies for both scalar and vector versions. Otherwise it is only
6464 // dead in vector versions, so only add it to VecValuesToIgnore.
6465 if (all_of(Op->users(),
6466 [this](User *U) { return ValuesToIgnore.contains(U); }))
6467 ValuesToIgnore.insert(Op);
6468
6469 VecValuesToIgnore.insert(Op);
6470 append_range(DeadOps, Op->operands());
6471 }
6472
6473 // Ignore type-promoting instructions we identified during reduction
6474 // detection.
6475 for (const auto &Reduction : Legal->getReductionVars()) {
6476 const RecurrenceDescriptor &RedDes = Reduction.second;
6477 const SmallPtrSetImpl<Instruction *> &Casts = RedDes.getCastInsts();
6478 VecValuesToIgnore.insert_range(Casts);
6479 }
6480 // Ignore type-casting instructions we identified during induction
6481 // detection.
6482 for (const auto &Induction : Legal->getInductionVars()) {
6483 const InductionDescriptor &IndDes = Induction.second;
6484 VecValuesToIgnore.insert_range(IndDes.getCastInsts());
6485 }
6486}
6487
6489 // Avoid duplicating work finding in-loop reductions.
6490 if (!InLoopReductions.empty())
6491 return;
6492
6493 for (const auto &Reduction : Legal->getReductionVars()) {
6494 PHINode *Phi = Reduction.first;
6495 const RecurrenceDescriptor &RdxDesc = Reduction.second;
6496
6497 // Multi-use reductions (e.g., used in FindLastIV patterns) are handled
6498 // separately and should not be considered for in-loop reductions.
6499 if (RdxDesc.hasUsesOutsideReductionChain())
6500 continue;
6501
6502 // We don't collect reductions that are type promoted (yet).
6503 if (RdxDesc.getRecurrenceType() != Phi->getType())
6504 continue;
6505
6506 // In-loop AnyOf and FindIV reductions are not yet supported.
6507 RecurKind Kind = RdxDesc.getRecurrenceKind();
6511 continue;
6512
6513 // If the target would prefer this reduction to happen "in-loop", then we
6514 // want to record it as such.
6515 if (!PreferInLoopReductions && !useOrderedReductions(RdxDesc) &&
6516 !TTI.preferInLoopReduction(Kind, Phi->getType()))
6517 continue;
6518
6519 // Check that we can correctly put the reductions into the loop, by
6520 // finding the chain of operations that leads from the phi to the loop
6521 // exit value.
6522 SmallVector<Instruction *, 4> ReductionOperations =
6523 RdxDesc.getReductionOpChain(Phi, TheLoop);
6524 bool InLoop = !ReductionOperations.empty();
6525
6526 if (InLoop) {
6527 InLoopReductions.insert(Phi);
6528 // Add the elements to InLoopReductionImmediateChains for cost modelling.
6529 Instruction *LastChain = Phi;
6530 for (auto *I : ReductionOperations) {
6531 InLoopReductionImmediateChains[I] = LastChain;
6532 LastChain = I;
6533 }
6534 }
6535 LLVM_DEBUG(dbgs() << "LV: Using " << (InLoop ? "inloop" : "out of loop")
6536 << " reduction for phi: " << *Phi << "\n");
6537 }
6538}
6539
6540// This function will select a scalable VF if the target supports scalable
6541// vectors and a fixed one otherwise.
6542// TODO: we could return a pair of values that specify the max VF and
6543// min VF, to be used in `buildVPlans(MinVF, MaxVF)` instead of
6544// `buildVPlans(VF, VF)`. We cannot do it because VPLAN at the moment
6545// doesn't have a cost model that can choose which plan to execute if
6546// more than one is generated.
6549 unsigned WidestType;
6550 std::tie(std::ignore, WidestType) = CM.getSmallestAndWidestTypes();
6551
6553 TTI.enableScalableVectorization()
6556
6557 TypeSize RegSize = TTI.getRegisterBitWidth(RegKind);
6558 unsigned N = RegSize.getKnownMinValue() / WidestType;
6559 return ElementCount::get(N, RegSize.isScalable());
6560}
6561
6564 ElementCount VF = UserVF;
6565 // Outer loop handling: They may require CFG and instruction level
6566 // transformations before even evaluating whether vectorization is profitable.
6567 // Since we cannot modify the incoming IR, we need to build VPlan upfront in
6568 // the vectorization pipeline.
6569 if (!OrigLoop->isInnermost()) {
6570 // If the user doesn't provide a vectorization factor, determine a
6571 // reasonable one.
6572 if (UserVF.isZero()) {
6573 VF = determineVPlanVF(TTI, CM);
6574 LLVM_DEBUG(dbgs() << "LV: VPlan computed VF " << VF << ".\n");
6575
6576 // Make sure we have a VF > 1 for stress testing.
6577 if (VPlanBuildStressTest && (VF.isScalar() || VF.isZero())) {
6578 LLVM_DEBUG(dbgs() << "LV: VPlan stress testing: "
6579 << "overriding computed VF.\n");
6580 VF = ElementCount::getFixed(4);
6581 }
6582 } else if (UserVF.isScalable() && !TTI.supportsScalableVectors() &&
6584 LLVM_DEBUG(dbgs() << "LV: Not vectorizing. Scalable VF requested, but "
6585 << "not supported by the target.\n");
6587 "Scalable vectorization requested but not supported by the target",
6588 "the scalable user-specified vectorization width for outer-loop "
6589 "vectorization cannot be used because the target does not support "
6590 "scalable vectors.",
6591 "ScalableVFUnfeasible", ORE, OrigLoop);
6593 }
6594 assert(EnableVPlanNativePath && "VPlan-native path is not enabled.");
6596 "VF needs to be a power of two");
6597 LLVM_DEBUG(dbgs() << "LV: Using " << (!UserVF.isZero() ? "user " : "")
6598 << "VF " << VF << " to build VPlans.\n");
6599 buildVPlans(VF, VF);
6600
6601 if (VPlans.empty())
6603
6604 // For VPlan build stress testing, we bail out after VPlan construction.
6607
6608 return {VF, 0 /*Cost*/, 0 /* ScalarCost */};
6609 }
6610
6611 LLVM_DEBUG(
6612 dbgs() << "LV: Not vectorizing. Inner loops aren't supported in the "
6613 "VPlan-native path.\n");
6615}
6616
6617void LoopVectorizationPlanner::plan(ElementCount UserVF, unsigned UserIC) {
6618 assert(OrigLoop->isInnermost() && "Inner loop expected.");
6619 CM.collectValuesToIgnore();
6620 CM.collectElementTypesForWidening();
6621
6622 FixedScalableVFPair MaxFactors = CM.computeMaxVF(UserVF, UserIC);
6623 if (!MaxFactors) // Cases that should not to be vectorized nor interleaved.
6624 return;
6625
6626 // Invalidate interleave groups if all blocks of loop will be predicated.
6627 if (CM.blockNeedsPredicationForAnyReason(OrigLoop->getHeader()) &&
6629 LLVM_DEBUG(
6630 dbgs()
6631 << "LV: Invalidate all interleaved groups due to fold-tail by masking "
6632 "which requires masked-interleaved support.\n");
6633 if (CM.InterleaveInfo.invalidateGroups())
6634 // Invalidating interleave groups also requires invalidating all decisions
6635 // based on them, which includes widening decisions and uniform and scalar
6636 // values.
6637 CM.invalidateCostModelingDecisions();
6638 }
6639
6640 if (CM.foldTailByMasking())
6641 Legal->prepareToFoldTailByMasking();
6642
6643 ElementCount MaxUserVF =
6644 UserVF.isScalable() ? MaxFactors.ScalableVF : MaxFactors.FixedVF;
6645 if (UserVF) {
6646 if (!ElementCount::isKnownLE(UserVF, MaxUserVF)) {
6648 "UserVF ignored because it may be larger than the maximal safe VF",
6649 "InvalidUserVF", ORE, OrigLoop);
6650 } else {
6652 "VF needs to be a power of two");
6653 // Collect the instructions (and their associated costs) that will be more
6654 // profitable to scalarize.
6655 CM.collectInLoopReductions();
6656 if (CM.selectUserVectorizationFactor(UserVF)) {
6657 LLVM_DEBUG(dbgs() << "LV: Using user VF " << UserVF << ".\n");
6658 ElementCount EpilogueUserVF =
6660 if (EpilogueUserVF.isVector() &&
6661 ElementCount::isKnownLT(EpilogueUserVF, UserVF) &&
6662 CM.selectUserVectorizationFactor(EpilogueUserVF)) {
6663 // Build a separate plan for the forced epilogue VF.
6664 buildVPlansWithVPRecipes(EpilogueUserVF, EpilogueUserVF);
6665 }
6666 buildVPlansWithVPRecipes(UserVF, UserVF);
6668 return;
6669 }
6670 reportVectorizationInfo("UserVF ignored because of invalid costs.",
6671 "InvalidCost", ORE, OrigLoop);
6672 }
6673 }
6674
6675 // Collect the Vectorization Factor Candidates.
6676 SmallVector<ElementCount> VFCandidates;
6677 for (auto VF = ElementCount::getFixed(1);
6678 ElementCount::isKnownLE(VF, MaxFactors.FixedVF); VF *= 2)
6679 VFCandidates.push_back(VF);
6680 for (auto VF = ElementCount::getScalable(1);
6681 ElementCount::isKnownLE(VF, MaxFactors.ScalableVF); VF *= 2)
6682 VFCandidates.push_back(VF);
6683
6684 CM.collectInLoopReductions();
6685 for (const auto &VF : VFCandidates) {
6686 // Collect Uniform and Scalar instructions after vectorization with VF.
6687 CM.collectNonVectorizedAndSetWideningDecisions(VF);
6688 }
6689
6690 buildVPlansWithVPRecipes(ElementCount::getFixed(1), MaxFactors.FixedVF);
6691 buildVPlansWithVPRecipes(ElementCount::getScalable(1), MaxFactors.ScalableVF);
6692
6694}
6695
6697 ElementCount VF) const {
6698 InstructionCost Cost = CM.getInstructionCost(UI, VF);
6699 if (Cost.isValid() && ForceTargetInstructionCost.getNumOccurrences())
6701 return Cost;
6702}
6703
6704bool VPCostContext::skipCostComputation(Instruction *UI, bool IsVector) const {
6705 return CM.ValuesToIgnore.contains(UI) ||
6706 (IsVector && CM.VecValuesToIgnore.contains(UI)) ||
6707 SkipCostComputation.contains(UI);
6708}
6709
6711 return CM.getPredBlockCostDivisor(CostKind, BB);
6712}
6713
6715LoopVectorizationPlanner::precomputeCosts(VPlan &Plan, ElementCount VF,
6716 VPCostContext &CostCtx) const {
6718 // Cost modeling for inductions is inaccurate in the legacy cost model
6719 // compared to the recipes that are generated. To match here initially during
6720 // VPlan cost model bring up directly use the induction costs from the legacy
6721 // cost model. Note that we do this as pre-processing; the VPlan may not have
6722 // any recipes associated with the original induction increment instruction
6723 // and may replace truncates with VPWidenIntOrFpInductionRecipe. We precompute
6724 // the cost of induction phis and increments (both that are represented by
6725 // recipes and those that are not), to avoid distinguishing between them here,
6726 // and skip all recipes that represent induction phis and increments (the
6727 // former case) later on, if they exist, to avoid counting them twice.
6728 // Similarly we pre-compute the cost of any optimized truncates.
6729 // TODO: Switch to more accurate costing based on VPlan.
6730 for (const auto &[IV, IndDesc] : Legal->getInductionVars()) {
6732 IV->getIncomingValueForBlock(OrigLoop->getLoopLatch()));
6733 SmallVector<Instruction *> IVInsts = {IVInc};
6734 for (unsigned I = 0; I != IVInsts.size(); I++) {
6735 for (Value *Op : IVInsts[I]->operands()) {
6736 auto *OpI = dyn_cast<Instruction>(Op);
6737 if (Op == IV || !OpI || !OrigLoop->contains(OpI) || !Op->hasOneUse())
6738 continue;
6739 IVInsts.push_back(OpI);
6740 }
6741 }
6742 IVInsts.push_back(IV);
6743 for (User *U : IV->users()) {
6744 auto *CI = cast<Instruction>(U);
6745 if (!CostCtx.CM.isOptimizableIVTruncate(CI, VF))
6746 continue;
6747 IVInsts.push_back(CI);
6748 }
6749
6750 // If the vector loop gets executed exactly once with the given VF, ignore
6751 // the costs of comparison and induction instructions, as they'll get
6752 // simplified away.
6753 // TODO: Remove this code after stepping away from the legacy cost model and
6754 // adding code to simplify VPlans before calculating their costs.
6755 auto TC = getSmallConstantTripCount(PSE.getSE(), OrigLoop);
6756 if (TC == VF && !CM.foldTailByMasking())
6757 addFullyUnrolledInstructionsToIgnore(OrigLoop, Legal->getInductionVars(),
6758 CostCtx.SkipCostComputation);
6759
6760 for (Instruction *IVInst : IVInsts) {
6761 if (CostCtx.skipCostComputation(IVInst, VF.isVector()))
6762 continue;
6763 InstructionCost InductionCost = CostCtx.getLegacyCost(IVInst, VF);
6764 LLVM_DEBUG({
6765 dbgs() << "Cost of " << InductionCost << " for VF " << VF
6766 << ": induction instruction " << *IVInst << "\n";
6767 });
6768 Cost += InductionCost;
6769 CostCtx.SkipCostComputation.insert(IVInst);
6770 }
6771 }
6772
6773 /// Compute the cost of all exiting conditions of the loop using the legacy
6774 /// cost model. This is to match the legacy behavior, which adds the cost of
6775 /// all exit conditions. Note that this over-estimates the cost, as there will
6776 /// be a single condition to control the vector loop.
6778 CM.TheLoop->getExitingBlocks(Exiting);
6779 SetVector<Instruction *> ExitInstrs;
6780 // Collect all exit conditions.
6781 for (BasicBlock *EB : Exiting) {
6782 auto *Term = dyn_cast<CondBrInst>(EB->getTerminator());
6783 if (!Term || CostCtx.skipCostComputation(Term, VF.isVector()))
6784 continue;
6785 if (auto *CondI = dyn_cast<Instruction>(Term->getOperand(0))) {
6786 ExitInstrs.insert(CondI);
6787 }
6788 }
6789 // Compute the cost of all instructions only feeding the exit conditions.
6790 for (unsigned I = 0; I != ExitInstrs.size(); ++I) {
6791 Instruction *CondI = ExitInstrs[I];
6792 if (!OrigLoop->contains(CondI) ||
6793 !CostCtx.SkipCostComputation.insert(CondI).second)
6794 continue;
6795 InstructionCost CondICost = CostCtx.getLegacyCost(CondI, VF);
6796 LLVM_DEBUG({
6797 dbgs() << "Cost of " << CondICost << " for VF " << VF
6798 << ": exit condition instruction " << *CondI << "\n";
6799 });
6800 Cost += CondICost;
6801 for (Value *Op : CondI->operands()) {
6802 auto *OpI = dyn_cast<Instruction>(Op);
6803 if (!OpI || CostCtx.skipCostComputation(OpI, VF.isVector()) ||
6804 any_of(OpI->users(), [&ExitInstrs](User *U) {
6805 return !ExitInstrs.contains(cast<Instruction>(U));
6806 }))
6807 continue;
6808 ExitInstrs.insert(OpI);
6809 }
6810 }
6811
6812 // Pre-compute the costs for branches except for the backedge, as the number
6813 // of replicate regions in a VPlan may not directly match the number of
6814 // branches, which would lead to different decisions.
6815 // TODO: Compute cost of branches for each replicate region in the VPlan,
6816 // which is more accurate than the legacy cost model.
6817 for (BasicBlock *BB : OrigLoop->blocks()) {
6818 if (CostCtx.skipCostComputation(BB->getTerminator(), VF.isVector()))
6819 continue;
6820 CostCtx.SkipCostComputation.insert(BB->getTerminator());
6821 if (BB == OrigLoop->getLoopLatch())
6822 continue;
6823 auto BranchCost = CostCtx.getLegacyCost(BB->getTerminator(), VF);
6824 Cost += BranchCost;
6825 }
6826
6827 // Don't apply special costs when instruction cost is forced to make sure the
6828 // forced cost is used for each recipe.
6829 if (ForceTargetInstructionCost.getNumOccurrences())
6830 return Cost;
6831
6832 // Pre-compute costs for instructions that are forced-scalar or profitable to
6833 // scalarize. For most such instructions, their scalarization costs are
6834 // accounted for here using the legacy cost model. However, some opcodes
6835 // are excluded from these precomputed scalarization costs and are instead
6836 // modeled later by the VPlan cost model (see UseVPlanCostModel below).
6837 for (Instruction *ForcedScalar : CM.ForcedScalars[VF]) {
6838 if (CostCtx.skipCostComputation(ForcedScalar, VF.isVector()))
6839 continue;
6840 CostCtx.SkipCostComputation.insert(ForcedScalar);
6841 InstructionCost ForcedCost = CostCtx.getLegacyCost(ForcedScalar, VF);
6842 LLVM_DEBUG({
6843 dbgs() << "Cost of " << ForcedCost << " for VF " << VF
6844 << ": forced scalar " << *ForcedScalar << "\n";
6845 });
6846 Cost += ForcedCost;
6847 }
6848
6849 auto UseVPlanCostModel = [](Instruction *I) -> bool {
6850 switch (I->getOpcode()) {
6851 case Instruction::SDiv:
6852 case Instruction::UDiv:
6853 case Instruction::SRem:
6854 case Instruction::URem:
6855 return true;
6856 default:
6857 return false;
6858 }
6859 };
6860 for (const auto &[Scalarized, ScalarCost] : CM.InstsToScalarize[VF]) {
6861 if (UseVPlanCostModel(Scalarized) ||
6862 CostCtx.skipCostComputation(Scalarized, VF.isVector()))
6863 continue;
6864 CostCtx.SkipCostComputation.insert(Scalarized);
6865 LLVM_DEBUG({
6866 dbgs() << "Cost of " << ScalarCost << " for VF " << VF
6867 << ": profitable to scalarize " << *Scalarized << "\n";
6868 });
6869 Cost += ScalarCost;
6870 }
6871
6872 return Cost;
6873}
6874
6875InstructionCost LoopVectorizationPlanner::cost(VPlan &Plan, ElementCount VF,
6876 VPRegisterUsage *RU) const {
6877 VPCostContext CostCtx(CM.TTI, *CM.TLI, Plan, CM, CM.CostKind, PSE, OrigLoop);
6878 InstructionCost Cost = precomputeCosts(Plan, VF, CostCtx);
6879
6880 // Now compute and add the VPlan-based cost.
6881 Cost += Plan.cost(VF, CostCtx);
6882
6883 // Add the cost of spills due to excess register usage
6884 if (CM.shouldConsiderRegPressureForVF(VF))
6885 Cost += RU->spillCost(CostCtx, ForceTargetNumVectorRegs);
6886
6887#ifndef NDEBUG
6888 unsigned EstimatedWidth = estimateElementCount(VF, CM.getVScaleForTuning());
6889 LLVM_DEBUG(dbgs() << "Cost for VF " << VF << ": " << Cost
6890 << " (Estimated cost per lane: ");
6891 if (Cost.isValid()) {
6892 double CostPerLane = double(Cost.getValue()) / EstimatedWidth;
6893 LLVM_DEBUG(dbgs() << format("%.1f", CostPerLane));
6894 } else /* No point dividing an invalid cost - it will still be invalid */
6895 LLVM_DEBUG(dbgs() << "Invalid");
6896 LLVM_DEBUG(dbgs() << ")\n");
6897#endif
6898 return Cost;
6899}
6900
6901std::pair<VectorizationFactor, VPlan *>
6903 if (VPlans.empty())
6904 return {VectorizationFactor::Disabled(), nullptr};
6905 // If there is a single VPlan with a single VF, return it directly.
6906 VPlan &FirstPlan = *VPlans[0];
6907 ElementCount UserVF = Hints.getWidth();
6908 if (hasPlanWithVF(UserVF)) {
6909 if (VPlans.size() == 1) {
6910 assert(FirstPlan.getSingleVF() == UserVF &&
6911 "UserVF must match single VF");
6912 return {VectorizationFactor(FirstPlan.getSingleVF(), 0, 0), &FirstPlan};
6913 }
6915 assert(VPlans.size() == 2 && "Must have exactly 2 VPlans built");
6916 assert(VPlans[0]->getSingleVF() ==
6918 "expected first plan to be for the forced epilogue VF");
6919 assert(VPlans[1]->getSingleVF() == UserVF &&
6920 "expected second plan to be for the forced UserVF");
6921 return {VectorizationFactor(UserVF, 0, 0), VPlans[1].get()};
6922 }
6923 }
6924
6925 LLVM_DEBUG(dbgs() << "LV: Computing best VF using cost kind: "
6926 << (CM.CostKind == TTI::TCK_RecipThroughput
6927 ? "Reciprocal Throughput\n"
6928 : CM.CostKind == TTI::TCK_Latency
6929 ? "Instruction Latency\n"
6930 : CM.CostKind == TTI::TCK_CodeSize ? "Code Size\n"
6931 : CM.CostKind == TTI::TCK_SizeAndLatency
6932 ? "Code Size and Latency\n"
6933 : "Unknown\n"));
6934
6936 assert(FirstPlan.hasVF(ScalarVF) &&
6937 "More than a single plan/VF w/o any plan having scalar VF");
6938
6939 // TODO: Compute scalar cost using VPlan-based cost model.
6940 InstructionCost ScalarCost = CM.expectedCost(ScalarVF);
6941 LLVM_DEBUG(dbgs() << "LV: Scalar loop costs: " << ScalarCost << ".\n");
6942 VectorizationFactor ScalarFactor(ScalarVF, ScalarCost, ScalarCost);
6943 VectorizationFactor BestFactor = ScalarFactor;
6944
6945 bool ForceVectorization = Hints.getForce() == LoopVectorizeHints::FK_Enabled;
6946 if (ForceVectorization) {
6947 // Ignore scalar width, because the user explicitly wants vectorization.
6948 // Initialize cost to max so that VF = 2 is, at least, chosen during cost
6949 // evaluation.
6950 BestFactor.Cost = InstructionCost::getMax();
6951 }
6952
6953 VPlan *PlanForBestVF = &FirstPlan;
6954
6955 for (auto &P : VPlans) {
6956 ArrayRef<ElementCount> VFs(P->vectorFactors().begin(),
6957 P->vectorFactors().end());
6958
6960 bool ConsiderRegPressure = any_of(VFs, [this](ElementCount VF) {
6961 return CM.shouldConsiderRegPressureForVF(VF);
6962 });
6964 RUs = calculateRegisterUsageForPlan(*P, VFs, TTI, CM.ValuesToIgnore);
6965
6966 for (unsigned I = 0; I < VFs.size(); I++) {
6967 ElementCount VF = VFs[I];
6968 if (VF.isScalar())
6969 continue;
6970 if (!ForceVectorization && !willGenerateVectors(*P, VF, TTI)) {
6971 LLVM_DEBUG(
6972 dbgs()
6973 << "LV: Not considering vector loop of width " << VF
6974 << " because it will not generate any vector instructions.\n");
6975 continue;
6976 }
6977 if (CM.OptForSize && !ForceVectorization && hasReplicatorRegion(*P)) {
6978 LLVM_DEBUG(
6979 dbgs()
6980 << "LV: Not considering vector loop of width " << VF
6981 << " because it would cause replicated blocks to be generated,"
6982 << " which isn't allowed when optimizing for size.\n");
6983 continue;
6984 }
6985
6987 cost(*P, VF, ConsiderRegPressure ? &RUs[I] : nullptr);
6988 VectorizationFactor CurrentFactor(VF, Cost, ScalarCost);
6989
6990 if (isMoreProfitable(CurrentFactor, BestFactor, P->hasScalarTail())) {
6991 BestFactor = CurrentFactor;
6992 PlanForBestVF = P.get();
6993 }
6994
6995 // If profitable add it to ProfitableVF list.
6996 if (isMoreProfitable(CurrentFactor, ScalarFactor, P->hasScalarTail()))
6997 ProfitableVFs.push_back(CurrentFactor);
6998 }
6999 }
7000
7001 VPlan &BestPlan = *PlanForBestVF;
7002
7003 assert((BestFactor.Width.isScalar() || BestFactor.ScalarCost > 0) &&
7004 "when vectorizing, the scalar cost must be computed.");
7005
7006 LLVM_DEBUG(dbgs() << "LV: Selecting VF: " << BestFactor.Width << ".\n");
7007 return {BestFactor, &BestPlan};
7008}
7009
7011 ElementCount BestVF, unsigned BestUF, VPlan &BestVPlan,
7013 EpilogueVectorizationKind EpilogueVecKind) {
7014 assert(BestVPlan.hasVF(BestVF) &&
7015 "Trying to execute plan with unsupported VF");
7016 assert(BestVPlan.hasUF(BestUF) &&
7017 "Trying to execute plan with unsupported UF");
7018 if (BestVPlan.hasEarlyExit())
7019 ++LoopsEarlyExitVectorized;
7020 // TODO: Move to VPlan transform stage once the transition to the VPlan-based
7021 // cost model is complete for better cost estimates.
7022 RUN_VPLAN_PASS(VPlanTransforms::unrollByUF, BestVPlan, BestUF);
7026 bool HasBranchWeights =
7027 hasBranchWeightMD(*OrigLoop->getLoopLatch()->getTerminator());
7028 if (HasBranchWeights) {
7029 std::optional<unsigned> VScale = CM.getVScaleForTuning();
7031 BestVPlan, BestVF, VScale);
7032 }
7033
7034 // Retrieving VectorPH now when it's easier while VPlan still has Regions.
7035 VPBasicBlock *VectorPH = cast<VPBasicBlock>(BestVPlan.getVectorPreheader());
7036
7038 PSE);
7039 VPlanTransforms::optimizeForVFAndUF(BestVPlan, BestVF, BestUF, PSE);
7041 if (EpilogueVecKind == EpilogueVectorizationKind::None)
7043 if (BestVPlan.getEntry()->getSingleSuccessor() ==
7044 BestVPlan.getScalarPreheader()) {
7045 // TODO: The vector loop would be dead, should not even try to vectorize.
7046 ORE->emit([&]() {
7047 return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationDead",
7048 OrigLoop->getStartLoc(),
7049 OrigLoop->getHeader())
7050 << "Created vector loop never executes due to insufficient trip "
7051 "count.";
7052 });
7054 }
7055
7057
7059 // Convert the exit condition to AVLNext == 0 for EVL tail folded loops.
7061 // Regions are dissolved after optimizing for VF and UF, which completely
7062 // removes unneeded loop regions first.
7064 // Expand BranchOnTwoConds after dissolution, when latch has direct access to
7065 // its successors.
7067 // Convert loops with variable-length stepping after regions are dissolved.
7069 // Remove dead back-edges for single-iteration loops with BranchOnCond(true).
7070 // Only process loop latches to avoid removing edges from the middle block,
7071 // which may be needed for epilogue vectorization.
7072 VPlanTransforms::removeBranchOnConst(BestVPlan, /*OnlyLatches=*/true);
7075 BestVPlan, VectorPH, CM.foldTailByMasking(),
7076 CM.requiresScalarEpilogue(BestVF.isVector()), &BestVPlan.getVFxUF());
7077 VPlanTransforms::materializeFactors(BestVPlan, VectorPH, BestVF);
7078 VPlanTransforms::cse(BestVPlan);
7080 VPlanTransforms::simplifyKnownEVL(BestVPlan, BestVF, PSE);
7081
7082 // 0. Generate SCEV-dependent code in the entry, including TripCount, before
7083 // making any changes to the CFG.
7084 DenseMap<const SCEV *, Value *> ExpandedSCEVs =
7085 VPlanTransforms::expandSCEVs(BestVPlan, *PSE.getSE());
7086
7087 // Perform the actual loop transformation.
7088 VPTransformState State(&TTI, BestVF, LI, DT, ILV.AC, ILV.Builder, &BestVPlan,
7089 OrigLoop->getParentLoop(),
7090 Legal->getWidestInductionType());
7091
7092#ifdef EXPENSIVE_CHECKS
7093 assert(DT->verify(DominatorTree::VerificationLevel::Fast));
7094#endif
7095
7096 // 1. Set up the skeleton for vectorization, including vector pre-header and
7097 // middle block. The vector loop is created during VPlan execution.
7098 State.CFG.PrevBB = ILV.createVectorizedLoopSkeleton();
7100 State.CFG.PrevBB->getSingleSuccessor(), &BestVPlan);
7102
7103 assert(verifyVPlanIsValid(BestVPlan) && "final VPlan is invalid");
7104
7105 // After vectorization, the exit blocks of the original loop will have
7106 // additional predecessors. Invalidate SCEVs for the exit phis in case SE
7107 // looked through single-entry phis.
7108 ScalarEvolution &SE = *PSE.getSE();
7109 for (VPIRBasicBlock *Exit : BestVPlan.getExitBlocks()) {
7110 if (!Exit->hasPredecessors())
7111 continue;
7112 for (VPRecipeBase &PhiR : Exit->phis())
7114 &cast<VPIRPhi>(PhiR).getIRPhi());
7115 }
7116 // Forget the original loop and block dispositions.
7117 SE.forgetLoop(OrigLoop);
7119
7121
7122 //===------------------------------------------------===//
7123 //
7124 // Notice: any optimization or new instruction that go
7125 // into the code below should also be implemented in
7126 // the cost-model.
7127 //
7128 //===------------------------------------------------===//
7129
7130 // Retrieve loop information before executing the plan, which may remove the
7131 // original loop, if it becomes unreachable.
7132 MDNode *LID = OrigLoop->getLoopID();
7133 unsigned OrigLoopInvocationWeight = 0;
7134 std::optional<unsigned> OrigAverageTripCount =
7135 getLoopEstimatedTripCount(OrigLoop, &OrigLoopInvocationWeight);
7136
7137 BestVPlan.execute(&State);
7138
7139 // 2.6. Maintain Loop Hints
7140 // Keep all loop hints from the original loop on the vector loop (we'll
7141 // replace the vectorizer-specific hints below).
7142 VPBasicBlock *HeaderVPBB = vputils::getFirstLoopHeader(BestVPlan, State.VPDT);
7143 // Add metadata to disable runtime unrolling a scalar loop when there
7144 // are no runtime checks about strides and memory. A scalar loop that is
7145 // rarely used is not worth unrolling.
7146 bool DisableRuntimeUnroll = !ILV.RTChecks.hasChecks() && !BestVF.isScalar();
7148 HeaderVPBB ? LI->getLoopFor(State.CFG.VPBB2IRBB.lookup(HeaderVPBB))
7149 : nullptr,
7150 HeaderVPBB, BestVPlan,
7151 EpilogueVecKind == EpilogueVectorizationKind::Epilogue, LID,
7152 OrigAverageTripCount, OrigLoopInvocationWeight,
7153 estimateElementCount(BestVF * BestUF, CM.getVScaleForTuning()),
7154 DisableRuntimeUnroll);
7155
7156 // 3. Fix the vectorized code: take care of header phi's, live-outs,
7157 // predication, updating analyses.
7158 ILV.fixVectorizedLoop(State);
7159
7161
7162 return ExpandedSCEVs;
7163}
7164
7165//===--------------------------------------------------------------------===//
7166// EpilogueVectorizerMainLoop
7167//===--------------------------------------------------------------------===//
7168
7170 LLVM_DEBUG({
7171 dbgs() << "Create Skeleton for epilogue vectorized loop (first pass)\n"
7172 << "Main Loop VF:" << EPI.MainLoopVF
7173 << ", Main Loop UF:" << EPI.MainLoopUF
7174 << ", Epilogue Loop VF:" << EPI.EpilogueVF
7175 << ", Epilogue Loop UF:" << EPI.EpilogueUF << "\n";
7176 });
7177}
7178
7181 dbgs() << "intermediate fn:\n"
7182 << *OrigLoop->getHeader()->getParent() << "\n";
7183 });
7184}
7185
7186//===--------------------------------------------------------------------===//
7187// EpilogueVectorizerEpilogueLoop
7188//===--------------------------------------------------------------------===//
7189
7190/// This function creates a new scalar preheader, using the previous one as
7191/// entry block to the epilogue VPlan. The minimum iteration check is being
7192/// represented in VPlan.
7194 BasicBlock *NewScalarPH = createScalarPreheader("vec.epilog.");
7195 BasicBlock *OriginalScalarPH = NewScalarPH->getSinglePredecessor();
7196 OriginalScalarPH->setName("vec.epilog.iter.check");
7197 VPIRBasicBlock *NewEntry = Plan.createVPIRBasicBlock(OriginalScalarPH);
7198 VPBasicBlock *OldEntry = Plan.getEntry();
7199 for (auto &R : make_early_inc_range(*OldEntry)) {
7200 // Skip moving VPIRInstructions (including VPIRPhis), which are unmovable by
7201 // defining.
7202 if (isa<VPIRInstruction>(&R))
7203 continue;
7204 R.moveBefore(*NewEntry, NewEntry->end());
7205 }
7206
7207 VPBlockUtils::reassociateBlocks(OldEntry, NewEntry);
7208 Plan.setEntry(NewEntry);
7209 // OldEntry is now dead and will be cleaned up when the plan gets destroyed.
7210
7211 return OriginalScalarPH;
7212}
7213
7215 LLVM_DEBUG({
7216 dbgs() << "Create Skeleton for epilogue vectorized loop (second pass)\n"
7217 << "Epilogue Loop VF:" << EPI.EpilogueVF
7218 << ", Epilogue Loop UF:" << EPI.EpilogueUF << "\n";
7219 });
7220}
7221
7224 dbgs() << "final fn:\n" << *OrigLoop->getHeader()->getParent() << "\n";
7225 });
7226}
7227
7229 VFRange &Range) {
7230 assert((VPI->getOpcode() == Instruction::Load ||
7231 VPI->getOpcode() == Instruction::Store) &&
7232 "Must be called with either a load or store");
7234
7235 auto WillWiden = [&](ElementCount VF) -> bool {
7237 CM.getWideningDecision(I, VF);
7239 "CM decision should be taken at this point.");
7241 return true;
7242 if (CM.isScalarAfterVectorization(I, VF) ||
7243 CM.isProfitableToScalarize(I, VF))
7244 return false;
7246 };
7247
7249 return nullptr;
7250
7251 // If a mask is not required, drop it - use unmasked version for safe loads.
7252 // TODO: Determine if mask is needed in VPlan.
7253 VPValue *Mask = CM.isMaskRequired(I) ? VPI->getMask() : nullptr;
7254
7255 // Determine if the pointer operand of the access is either consecutive or
7256 // reverse consecutive.
7258 CM.getWideningDecision(I, Range.Start);
7260 bool Consecutive =
7262
7263 VPValue *Ptr = VPI->getOpcode() == Instruction::Load ? VPI->getOperand(0)
7264 : VPI->getOperand(1);
7265 if (Consecutive) {
7268 VPSingleDefRecipe *VectorPtr;
7269 if (Reverse) {
7270 // When folding the tail, we may compute an address that we don't in the
7271 // original scalar loop: drop the GEP no-wrap flags in this case.
7272 // Otherwise preserve existing flags without no-unsigned-wrap, as we will
7273 // emit negative indices.
7274 GEPNoWrapFlags Flags =
7275 CM.foldTailByMasking() || !GEP
7277 : GEP->getNoWrapFlags().withoutNoUnsignedWrap();
7278 VectorPtr = new VPVectorEndPointerRecipe(
7279 Ptr, &Plan.getVF(), getLoadStoreType(I),
7280 /*Stride*/ -1, Flags, VPI->getDebugLoc());
7281 } else {
7282 VectorPtr = new VPVectorPointerRecipe(Ptr, getLoadStoreType(I),
7283 GEP ? GEP->getNoWrapFlags()
7285 VPI->getDebugLoc());
7286 }
7287 Builder.setInsertPoint(VPI);
7288 Builder.insert(VectorPtr);
7289 Ptr = VectorPtr;
7290 }
7291
7292 if (VPI->getOpcode() == Instruction::Load) {
7293 auto *Load = cast<LoadInst>(I);
7294 auto *LoadR = new VPWidenLoadRecipe(*Load, Ptr, Mask, Consecutive, Reverse,
7295 *VPI, Load->getDebugLoc());
7296 if (Reverse) {
7297 Builder.insert(LoadR);
7298 return new VPInstruction(VPInstruction::Reverse, LoadR, {}, {},
7299 LoadR->getDebugLoc());
7300 }
7301 return LoadR;
7302 }
7303
7304 StoreInst *Store = cast<StoreInst>(I);
7305 VPValue *StoredVal = VPI->getOperand(0);
7306 if (Reverse)
7307 StoredVal = Builder.createNaryOp(VPInstruction::Reverse, StoredVal,
7308 Store->getDebugLoc());
7309 return new VPWidenStoreRecipe(*Store, Ptr, StoredVal, Mask, Consecutive,
7310 Reverse, *VPI, Store->getDebugLoc());
7311}
7312
7314VPRecipeBuilder::tryToOptimizeInductionTruncate(VPInstruction *VPI,
7315 VFRange &Range) {
7316 auto *I = cast<TruncInst>(VPI->getUnderlyingInstr());
7317 // Optimize the special case where the source is a constant integer
7318 // induction variable. Notice that we can only optimize the 'trunc' case
7319 // because (a) FP conversions lose precision, (b) sext/zext may wrap, and
7320 // (c) other casts depend on pointer size.
7321
7322 // Determine whether \p K is a truncation based on an induction variable that
7323 // can be optimized.
7326 I),
7327 Range))
7328 return nullptr;
7329
7331 VPI->getOperand(0)->getDefiningRecipe());
7332 PHINode *Phi = WidenIV->getPHINode();
7333 VPIRValue *Start = WidenIV->getStartValue();
7334 const InductionDescriptor &IndDesc = WidenIV->getInductionDescriptor();
7335
7336 // Wrap flags from the original induction do not apply to the truncated type,
7337 // so do not propagate them.
7338 VPIRFlags Flags = VPIRFlags::WrapFlagsTy(false, false);
7339 VPValue *Step =
7342 Phi, Start, Step, &Plan.getVF(), IndDesc, I, Flags, VPI->getDebugLoc());
7343}
7344
7345VPSingleDefRecipe *VPRecipeBuilder::tryToWidenCall(VPInstruction *VPI,
7346 VFRange &Range) {
7349 [this, CI](ElementCount VF) {
7350 return CM.isScalarWithPredication(CI, VF);
7351 },
7352 Range);
7353
7354 if (IsPredicated)
7355 return nullptr;
7356
7358 if (ID && (ID == Intrinsic::assume || ID == Intrinsic::lifetime_end ||
7359 ID == Intrinsic::lifetime_start || ID == Intrinsic::sideeffect ||
7360 ID == Intrinsic::pseudoprobe ||
7361 ID == Intrinsic::experimental_noalias_scope_decl))
7362 return nullptr;
7363
7365 VPI->op_begin() + CI->arg_size());
7366
7367 // Is it beneficial to perform intrinsic call compared to lib call?
7368 bool ShouldUseVectorIntrinsic =
7370 [&](ElementCount VF) -> bool {
7371 return CM.getCallWideningDecision(CI, VF).Kind ==
7373 },
7374 Range);
7375 if (ShouldUseVectorIntrinsic)
7376 return new VPWidenIntrinsicRecipe(*CI, ID, Ops, CI->getType(), *VPI, *VPI,
7377 VPI->getDebugLoc());
7378
7379 Function *Variant = nullptr;
7380 std::optional<unsigned> MaskPos;
7381 // Is better to call a vectorized version of the function than to to scalarize
7382 // the call?
7383 auto ShouldUseVectorCall = LoopVectorizationPlanner::getDecisionAndClampRange(
7384 [&](ElementCount VF) -> bool {
7385 // The following case may be scalarized depending on the VF.
7386 // The flag shows whether we can use a usual Call for vectorized
7387 // version of the instruction.
7388
7389 // If we've found a variant at a previous VF, then stop looking. A
7390 // vectorized variant of a function expects input in a certain shape
7391 // -- basically the number of input registers, the number of lanes
7392 // per register, and whether there's a mask required.
7393 // We store a pointer to the variant in the VPWidenCallRecipe, so
7394 // once we have an appropriate variant it's only valid for that VF.
7395 // This will force a different vplan to be generated for each VF that
7396 // finds a valid variant.
7397 if (Variant)
7398 return false;
7399 LoopVectorizationCostModel::CallWideningDecision Decision =
7400 CM.getCallWideningDecision(CI, VF);
7402 Variant = Decision.Variant;
7403 MaskPos = Decision.MaskPos;
7404 return true;
7405 }
7406
7407 return false;
7408 },
7409 Range);
7410 if (ShouldUseVectorCall) {
7411 if (MaskPos.has_value()) {
7412 // We have 2 cases that would require a mask:
7413 // 1) The call needs to be predicated, either due to a conditional
7414 // in the scalar loop or use of an active lane mask with
7415 // tail-folding, and we use the appropriate mask for the block.
7416 // 2) No mask is required for the call instruction, but the only
7417 // available vector variant at this VF requires a mask, so we
7418 // synthesize an all-true mask.
7419 VPValue *Mask = VPI->isMasked() ? VPI->getMask() : Plan.getTrue();
7420
7421 Ops.insert(Ops.begin() + *MaskPos, Mask);
7422 }
7423
7424 Ops.push_back(VPI->getOperand(VPI->getNumOperandsWithoutMask() - 1));
7425 return new VPWidenCallRecipe(CI, Variant, Ops, *VPI, *VPI,
7426 VPI->getDebugLoc());
7427 }
7428
7429 return nullptr;
7430}
7431
7432bool VPRecipeBuilder::shouldWiden(Instruction *I, VFRange &Range) const {
7434 "Instruction should have been handled earlier");
7435 // Instruction should be widened, unless it is scalar after vectorization,
7436 // scalarization is profitable or it is predicated.
7437 auto WillScalarize = [this, I](ElementCount VF) -> bool {
7438 return CM.isScalarAfterVectorization(I, VF) ||
7439 CM.isProfitableToScalarize(I, VF) ||
7440 CM.isScalarWithPredication(I, VF);
7441 };
7443 Range);
7444}
7445
7446VPWidenRecipe *VPRecipeBuilder::tryToWiden(VPInstruction *VPI) {
7447 auto *I = VPI->getUnderlyingInstr();
7448 switch (VPI->getOpcode()) {
7449 default:
7450 return nullptr;
7451 case Instruction::SDiv:
7452 case Instruction::UDiv:
7453 case Instruction::SRem:
7454 case Instruction::URem: {
7455 // If not provably safe, use a select to form a safe divisor before widening the
7456 // div/rem operation itself. Otherwise fall through to general handling below.
7457 if (CM.isPredicatedInst(I)) {
7459 VPValue *Mask = VPI->getMask();
7460 VPValue *One = Plan.getConstantInt(I->getType(), 1u);
7461 auto *SafeRHS =
7462 Builder.createSelect(Mask, Ops[1], One, VPI->getDebugLoc());
7463 Ops[1] = SafeRHS;
7464 return new VPWidenRecipe(*I, Ops, *VPI, *VPI, VPI->getDebugLoc());
7465 }
7466 [[fallthrough]];
7467 }
7468 case Instruction::Add:
7469 case Instruction::And:
7470 case Instruction::AShr:
7471 case Instruction::FAdd:
7472 case Instruction::FCmp:
7473 case Instruction::FDiv:
7474 case Instruction::FMul:
7475 case Instruction::FNeg:
7476 case Instruction::FRem:
7477 case Instruction::FSub:
7478 case Instruction::ICmp:
7479 case Instruction::LShr:
7480 case Instruction::Mul:
7481 case Instruction::Or:
7482 case Instruction::Select:
7483 case Instruction::Shl:
7484 case Instruction::Sub:
7485 case Instruction::Xor:
7486 case Instruction::Freeze:
7487 return new VPWidenRecipe(*I, VPI->operandsWithoutMask(), *VPI, *VPI,
7488 VPI->getDebugLoc());
7489 case Instruction::ExtractValue: {
7491 auto *EVI = cast<ExtractValueInst>(I);
7492 assert(EVI->getNumIndices() == 1 && "Expected one extractvalue index");
7493 unsigned Idx = EVI->getIndices()[0];
7494 NewOps.push_back(Plan.getConstantInt(32, Idx));
7495 return new VPWidenRecipe(*I, NewOps, *VPI, *VPI, VPI->getDebugLoc());
7496 }
7497 };
7498}
7499
7501 if (VPI->getOpcode() != Instruction::Store)
7502 return nullptr;
7503
7504 auto HistInfo =
7505 Legal->getHistogramInfo(cast<StoreInst>(VPI->getUnderlyingInstr()));
7506 if (!HistInfo)
7507 return nullptr;
7508
7509 const HistogramInfo *HI = *HistInfo;
7510 // FIXME: Support other operations.
7511 unsigned Opcode = HI->Update->getOpcode();
7512 assert((Opcode == Instruction::Add || Opcode == Instruction::Sub) &&
7513 "Histogram update operation must be an Add or Sub");
7514
7516 // Bucket address.
7517 HGramOps.push_back(VPI->getOperand(1));
7518 // Increment value.
7519 HGramOps.push_back(getVPValueOrAddLiveIn(HI->Update->getOperand(1)));
7520
7521 // In case of predicated execution (due to tail-folding, or conditional
7522 // execution, or both), pass the relevant mask.
7523 if (CM.isMaskRequired(HI->Store))
7524 HGramOps.push_back(VPI->getMask());
7525
7526 return new VPHistogramRecipe(Opcode, HGramOps, VPI->getDebugLoc());
7527}
7528
7530 VPInstruction *VPI, VPBuilder &FinalRedStoresBuilder) {
7531 StoreInst *SI;
7532 if ((SI = dyn_cast<StoreInst>(VPI->getUnderlyingInstr())) &&
7533 Legal->isInvariantAddressOfReduction(SI->getPointerOperand())) {
7534 // Only create recipe for the final invariant store of the reduction.
7535 if (Legal->isInvariantStoreOfReduction(SI)) {
7536 auto *Recipe = new VPReplicateRecipe(
7537 SI, VPI->operandsWithoutMask(), true /* IsUniform */,
7538 nullptr /*Mask*/, *VPI, *VPI, VPI->getDebugLoc());
7539 FinalRedStoresBuilder.insert(Recipe);
7540 }
7541 VPI->eraseFromParent();
7542 return true;
7543 }
7544
7545 return false;
7546}
7547
7549 VFRange &Range) {
7550 auto *I = VPI->getUnderlyingInstr();
7552 [&](ElementCount VF) { return CM.isUniformAfterVectorization(I, VF); },
7553 Range);
7554
7555 bool IsPredicated = CM.isPredicatedInst(I);
7556
7557 // Even if the instruction is not marked as uniform, there are certain
7558 // intrinsic calls that can be effectively treated as such, so we check for
7559 // them here. Conservatively, we only do this for scalable vectors, since
7560 // for fixed-width VFs we can always fall back on full scalarization.
7561 if (!IsUniform && Range.Start.isScalable() && isa<IntrinsicInst>(I)) {
7562 switch (cast<IntrinsicInst>(I)->getIntrinsicID()) {
7563 case Intrinsic::assume:
7564 case Intrinsic::lifetime_start:
7565 case Intrinsic::lifetime_end:
7566 // For scalable vectors if one of the operands is variant then we still
7567 // want to mark as uniform, which will generate one instruction for just
7568 // the first lane of the vector. We can't scalarize the call in the same
7569 // way as for fixed-width vectors because we don't know how many lanes
7570 // there are.
7571 //
7572 // The reasons for doing it this way for scalable vectors are:
7573 // 1. For the assume intrinsic generating the instruction for the first
7574 // lane is still be better than not generating any at all. For
7575 // example, the input may be a splat across all lanes.
7576 // 2. For the lifetime start/end intrinsics the pointer operand only
7577 // does anything useful when the input comes from a stack object,
7578 // which suggests it should always be uniform. For non-stack objects
7579 // the effect is to poison the object, which still allows us to
7580 // remove the call.
7581 IsUniform = true;
7582 break;
7583 default:
7584 break;
7585 }
7586 }
7587 VPValue *BlockInMask = nullptr;
7588 if (!IsPredicated) {
7589 // Finalize the recipe for Instr, first if it is not predicated.
7590 LLVM_DEBUG(dbgs() << "LV: Scalarizing:" << *I << "\n");
7591 } else {
7592 LLVM_DEBUG(dbgs() << "LV: Scalarizing and predicating:" << *I << "\n");
7593 // Instructions marked for predication are replicated and a mask operand is
7594 // added initially. Masked replicate recipes will later be placed under an
7595 // if-then construct to prevent side-effects. Generate recipes to compute
7596 // the block mask for this region.
7597 BlockInMask = VPI->getMask();
7598 }
7599
7600 // Note that there is some custom logic to mark some intrinsics as uniform
7601 // manually above for scalable vectors, which this assert needs to account for
7602 // as well.
7603 assert((Range.Start.isScalar() || !IsUniform || !IsPredicated ||
7604 (Range.Start.isScalable() && isa<IntrinsicInst>(I))) &&
7605 "Should not predicate a uniform recipe");
7606 auto *Recipe =
7607 new VPReplicateRecipe(I, VPI->operandsWithoutMask(), IsUniform,
7608 BlockInMask, *VPI, *VPI, VPI->getDebugLoc());
7609 return Recipe;
7610}
7611
7614 VFRange &Range) {
7615 assert(!R->isPhi() && "phis must be handled earlier");
7616 // First, check for specific widening recipes that deal with optimizing
7617 // truncates, calls and memory operations.
7618
7619 VPRecipeBase *Recipe;
7620 auto *VPI = cast<VPInstruction>(R);
7621 if (VPI->getOpcode() == Instruction::Trunc &&
7622 (Recipe = tryToOptimizeInductionTruncate(VPI, Range)))
7623 return Recipe;
7624
7625 // All widen recipes below deal only with VF > 1.
7627 [&](ElementCount VF) { return VF.isScalar(); }, Range))
7628 return nullptr;
7629
7630 if (VPI->getOpcode() == Instruction::Call)
7631 return tryToWidenCall(VPI, Range);
7632
7633 Instruction *Instr = R->getUnderlyingInstr();
7634 assert(!is_contained({Instruction::Load, Instruction::Store},
7635 VPI->getOpcode()) &&
7636 "Should have been handled prior to this!");
7637
7638 if (!shouldWiden(Instr, Range))
7639 return nullptr;
7640
7641 if (VPI->getOpcode() == Instruction::GetElementPtr)
7642 return new VPWidenGEPRecipe(cast<GetElementPtrInst>(Instr),
7643 VPI->operandsWithoutMask(), *VPI,
7644 VPI->getDebugLoc());
7645
7646 if (Instruction::isCast(VPI->getOpcode())) {
7647 auto *CI = cast<CastInst>(Instr);
7648 auto *CastR = cast<VPInstructionWithType>(VPI);
7649 return new VPWidenCastRecipe(CI->getOpcode(), VPI->getOperand(0),
7650 CastR->getResultType(), CI, *VPI, *VPI,
7651 VPI->getDebugLoc());
7652 }
7653
7654 return tryToWiden(VPI);
7655}
7656
7657// To allow RUN_VPLAN_PASS to print the VPlan after VF/UF independent
7658// optimizations.
7660
7661void LoopVectorizationPlanner::buildVPlansWithVPRecipes(ElementCount MinVF,
7662 ElementCount MaxVF) {
7663 if (ElementCount::isKnownGT(MinVF, MaxVF))
7664 return;
7665
7666 assert(OrigLoop->isInnermost() && "Inner loop expected.");
7667
7668 const LoopAccessInfo *LAI = Legal->getLAI();
7669 LoopVersioning LVer(*LAI, LAI->getRuntimePointerChecking()->getChecks(),
7670 OrigLoop, LI, DT, PSE.getSE());
7671 if (!LAI->getRuntimePointerChecking()->getChecks().empty() &&
7673 // Only use noalias metadata when using memory checks guaranteeing no
7674 // overlap across all iterations.
7675 LVer.prepareNoAliasMetadata();
7676 }
7677
7678 // Create initial base VPlan0, to serve as common starting point for all
7679 // candidates built later for specific VF ranges.
7680 auto VPlan0 = VPlanTransforms::buildVPlan0(
7681 OrigLoop, *LI, Legal->getWidestInductionType(),
7682 getDebugLocFromInstOrOperands(Legal->getPrimaryInduction()), PSE, &LVer);
7683
7684 // Create recipes for header phis.
7686 *VPlan0, PSE, *OrigLoop, Legal->getInductionVars(),
7687 Legal->getReductionVars(), Legal->getFixedOrderRecurrences(),
7688 CM.getInLoopReductions(), Hints.allowReordering());
7689
7691 // If we're vectorizing a loop with an uncountable exit, make sure that the
7692 // recipes are safe to handle.
7693 // TODO: Remove this once we can properly check the VPlan itself for both
7694 // the presence of an uncountable exit and the presence of stores in
7695 // the loop inside handleEarlyExits itself.
7697 if (Legal->hasUncountableEarlyExit())
7698 EEStyle = Legal->hasUncountableExitWithSideEffects()
7701
7702 if (!VPlanTransforms::handleEarlyExits(*VPlan0, EEStyle, OrigLoop, PSE, *DT,
7703 Legal->getAssumptionCache()))
7704 return;
7705 VPlanTransforms::addMiddleCheck(*VPlan0, CM.foldTailByMasking());
7707 if (CM.foldTailByMasking())
7710 *VPlan0);
7711
7712 auto MaxVFTimes2 = MaxVF * 2;
7713 for (ElementCount VF = MinVF; ElementCount::isKnownLT(VF, MaxVFTimes2);) {
7714 VFRange SubRange = {VF, MaxVFTimes2};
7715 if (auto Plan = tryToBuildVPlanWithVPRecipes(
7716 std::unique_ptr<VPlan>(VPlan0->duplicate()), SubRange, &LVer)) {
7717 // Now optimize the initial VPlan.
7718 VPlanTransforms::hoistPredicatedLoads(*Plan, PSE, OrigLoop);
7719 VPlanTransforms::sinkPredicatedStores(*Plan, PSE, OrigLoop);
7721 CM.getMinimalBitwidths());
7723 // TODO: try to put addExplicitVectorLength close to addActiveLaneMask
7724 if (CM.foldTailWithEVL()) {
7726 CM.getMaxSafeElements());
7728 }
7729
7730 if (auto P = VPlanTransforms::narrowInterleaveGroups(*Plan, TTI))
7731 VPlans.push_back(std::move(P));
7732
7734 assert(verifyVPlanIsValid(*Plan) && "VPlan is invalid");
7735 VPlans.push_back(std::move(Plan));
7736 }
7737 VF = SubRange.End;
7738 }
7739}
7740
7741VPlanPtr LoopVectorizationPlanner::tryToBuildVPlanWithVPRecipes(
7742 VPlanPtr Plan, VFRange &Range, LoopVersioning *LVer) {
7743
7744 using namespace llvm::VPlanPatternMatch;
7745 SmallPtrSet<const InterleaveGroup<Instruction> *, 1> InterleaveGroups;
7746
7747 // ---------------------------------------------------------------------------
7748 // Build initial VPlan: Scan the body of the loop in a topological order to
7749 // visit each basic block after having visited its predecessor basic blocks.
7750 // ---------------------------------------------------------------------------
7751
7752 bool RequiresScalarEpilogueCheck =
7754 [this](ElementCount VF) {
7755 return !CM.requiresScalarEpilogue(VF.isVector());
7756 },
7757 Range);
7758 // Update the branch in the middle block if a scalar epilogue is required.
7759 VPBasicBlock *MiddleVPBB = Plan->getMiddleBlock();
7760 if (!RequiresScalarEpilogueCheck && MiddleVPBB->getNumSuccessors() == 2) {
7761 auto *BranchOnCond = cast<VPInstruction>(MiddleVPBB->getTerminator());
7762 assert(MiddleVPBB->getSuccessors()[1] == Plan->getScalarPreheader() &&
7763 "second successor must be scalar preheader");
7764 BranchOnCond->setOperand(0, Plan->getFalse());
7765 }
7766
7767 // Don't use getDecisionAndClampRange here, because we don't know the UF
7768 // so this function is better to be conservative, rather than to split
7769 // it up into different VPlans.
7770 // TODO: Consider using getDecisionAndClampRange here to split up VPlans.
7771 bool IVUpdateMayOverflow = false;
7772 for (ElementCount VF : Range)
7773 IVUpdateMayOverflow |= !isIndvarOverflowCheckKnownFalse(&CM, VF);
7774
7775 TailFoldingStyle Style = CM.getTailFoldingStyle();
7776 // Use NUW for the induction increment if we proved that it won't overflow in
7777 // the vector loop or when not folding the tail. In the later case, we know
7778 // that the canonical induction increment will not overflow as the vector trip
7779 // count is >= increment and a multiple of the increment.
7780 VPRegionBlock *LoopRegion = Plan->getVectorLoopRegion();
7781 bool HasNUW = !IVUpdateMayOverflow || Style == TailFoldingStyle::None;
7782 if (!HasNUW) {
7783 auto *IVInc =
7784 LoopRegion->getExitingBasicBlock()->getTerminator()->getOperand(0);
7785 assert(match(IVInc,
7786 m_VPInstruction<Instruction::Add>(
7787 m_Specific(LoopRegion->getCanonicalIV()), m_VPValue())) &&
7788 "Did not find the canonical IV increment");
7789 cast<VPRecipeWithIRFlags>(IVInc)->dropPoisonGeneratingFlags();
7790 }
7791
7792 // ---------------------------------------------------------------------------
7793 // Pre-construction: record ingredients whose recipes we'll need to further
7794 // process after constructing the initial VPlan.
7795 // ---------------------------------------------------------------------------
7796
7797 // For each interleave group which is relevant for this (possibly trimmed)
7798 // Range, add it to the set of groups to be later applied to the VPlan and add
7799 // placeholders for its members' Recipes which we'll be replacing with a
7800 // single VPInterleaveRecipe.
7801 for (InterleaveGroup<Instruction> *IG : IAI.getInterleaveGroups()) {
7802 auto ApplyIG = [IG, this](ElementCount VF) -> bool {
7803 bool Result = (VF.isVector() && // Query is illegal for VF == 1
7804 CM.getWideningDecision(IG->getInsertPos(), VF) ==
7806 // For scalable vectors, the interleave factors must be <= 8 since we
7807 // require the (de)interleaveN intrinsics instead of shufflevectors.
7808 assert((!Result || !VF.isScalable() || IG->getFactor() <= 8) &&
7809 "Unsupported interleave factor for scalable vectors");
7810 return Result;
7811 };
7812 if (!getDecisionAndClampRange(ApplyIG, Range))
7813 continue;
7814 InterleaveGroups.insert(IG);
7815 }
7816
7817 // ---------------------------------------------------------------------------
7818 // Construct wide recipes and apply predication for original scalar
7819 // VPInstructions in the loop.
7820 // ---------------------------------------------------------------------------
7821 VPRecipeBuilder RecipeBuilder(*Plan, TLI, Legal, CM, Builder);
7822
7823 // Scan the body of the loop in a topological order to visit each basic block
7824 // after having visited its predecessor basic blocks.
7825 VPBasicBlock *HeaderVPBB = LoopRegion->getEntryBasicBlock();
7826 ReversePostOrderTraversal<VPBlockShallowTraversalWrapper<VPBlockBase *>> RPOT(
7827 HeaderVPBB);
7828
7829 // Collect blocks that need predication for in-loop reduction recipes.
7830 DenseSet<BasicBlock *> BlocksNeedingPredication;
7831 for (BasicBlock *BB : OrigLoop->blocks())
7832 if (CM.blockNeedsPredicationForAnyReason(BB))
7833 BlocksNeedingPredication.insert(BB);
7834
7835 VPlanTransforms::createInLoopReductionRecipes(*Plan, BlocksNeedingPredication,
7836 Range.Start);
7837
7838 VPCostContext CostCtx(CM.TTI, *CM.TLI, *Plan, CM, CM.CostKind, CM.PSE,
7839 OrigLoop);
7840
7842 Range, RecipeBuilder);
7843
7844 // Now process all other blocks and instructions.
7845 for (VPBasicBlock *VPBB : VPBlockUtils::blocksOnly<VPBasicBlock>(RPOT)) {
7846 // Convert input VPInstructions to widened recipes.
7847 for (VPRecipeBase &R : make_early_inc_range(
7848 make_range(VPBB->getFirstNonPhi(), VPBB->end()))) {
7849 // Skip recipes that do not need transforming or have already been
7850 // transformed.
7851 if (isa<VPWidenCanonicalIVRecipe, VPBlendRecipe, VPReductionRecipe,
7852 VPReplicateRecipe, VPWidenLoadRecipe, VPWidenStoreRecipe,
7853 VPVectorPointerRecipe, VPVectorEndPointerRecipe,
7854 VPHistogramRecipe>(&R))
7855 continue;
7856 auto *VPI = cast<VPInstruction>(&R);
7857 if (!VPI->getUnderlyingValue())
7858 continue;
7859
7860 // TODO: Gradually replace uses of underlying instruction by analyses on
7861 // VPlan. Migrate code relying on the underlying instruction from VPlan0
7862 // to construct recipes below to not use the underlying instruction.
7864 Builder.setInsertPoint(VPI);
7865
7866 VPRecipeBase *Recipe =
7867 RecipeBuilder.tryToCreateWidenNonPhiRecipe(VPI, Range);
7868 if (!Recipe)
7869 Recipe =
7870 RecipeBuilder.handleReplication(cast<VPInstruction>(VPI), Range);
7871
7872 RecipeBuilder.setRecipe(Instr, Recipe);
7873 if (isa<VPWidenIntOrFpInductionRecipe>(Recipe) && isa<TruncInst>(Instr)) {
7874 // Optimized a truncate to VPWidenIntOrFpInductionRecipe. It needs to be
7875 // moved to the phi section in the header.
7876 Recipe->insertBefore(*HeaderVPBB, HeaderVPBB->getFirstNonPhi());
7877 } else {
7878 Builder.insert(Recipe);
7879 }
7880 if (Recipe->getNumDefinedValues() == 1) {
7881 VPI->replaceAllUsesWith(Recipe->getVPSingleValue());
7882 } else {
7883 assert(Recipe->getNumDefinedValues() == 0 &&
7884 "Unexpected multidef recipe");
7885 }
7886 R.eraseFromParent();
7887 }
7888 }
7889
7890 assert(isa<VPRegionBlock>(LoopRegion) &&
7891 !LoopRegion->getEntryBasicBlock()->empty() &&
7892 "entry block must be set to a VPRegionBlock having a non-empty entry "
7893 "VPBasicBlock");
7894
7895 // TODO: We can't call runPass on these transforms yet, due to verifier
7896 // failures.
7898
7899 // ---------------------------------------------------------------------------
7900 // Transform initial VPlan: Apply previously taken decisions, in order, to
7901 // bring the VPlan to its final state.
7902 // ---------------------------------------------------------------------------
7903
7904 addReductionResultComputation(Plan, RecipeBuilder, Range.Start);
7905
7906 // Optimize FindIV reductions to use sentinel-based approach when possible.
7908 *OrigLoop);
7910 CM.foldTailByMasking());
7911
7912 // Apply mandatory transformation to handle reductions with multiple in-loop
7913 // uses if possible, bail out otherwise.
7915 OrigLoop))
7916 return nullptr;
7917 // Apply mandatory transformation to handle FP maxnum/minnum reduction with
7918 // NaNs if possible, bail out otherwise.
7920 return nullptr;
7921
7922 // Create whole-vector selects for find-last recurrences.
7924 return nullptr;
7925
7926 // Create partial reduction recipes for scaled reductions and transform
7927 // recipes to abstract recipes if it is legal and beneficial and clamp the
7928 // range for better cost estimation.
7929 // TODO: Enable following transform when the EVL-version of extended-reduction
7930 // and mulacc-reduction are implemented.
7931 if (!CM.foldTailWithEVL()) {
7933 Range);
7935 Range);
7936 }
7937
7938 for (ElementCount VF : Range)
7939 Plan->addVF(VF);
7940 Plan->setName("Initial VPlan");
7941
7942 // Interleave memory: for each Interleave Group we marked earlier as relevant
7943 // for this VPlan, replace the Recipes widening its memory instructions with a
7944 // single VPInterleaveRecipe at its insertion point.
7946 InterleaveGroups, RecipeBuilder, CM.isScalarEpilogueAllowed());
7947
7948 // Replace VPValues for known constant strides.
7950 Legal->getLAI()->getSymbolicStrides());
7951
7952 auto BlockNeedsPredication = [this](BasicBlock *BB) {
7953 return Legal->blockNeedsPredication(BB);
7954 };
7956 BlockNeedsPredication);
7957
7958 // Sink users of fixed-order recurrence past the recipe defining the previous
7959 // value and introduce FirstOrderRecurrenceSplice VPInstructions.
7961 Builder))
7962 return nullptr;
7963
7964 if (useActiveLaneMask(Style)) {
7965 // TODO: Move checks to VPlanTransforms::addActiveLaneMask once
7966 // TailFoldingStyle is visible there.
7967 bool ForControlFlow = useActiveLaneMaskForControlFlow(Style);
7968 VPlanTransforms::addActiveLaneMask(*Plan, ForControlFlow);
7969 }
7970
7971 assert(verifyVPlanIsValid(*Plan) && "VPlan is invalid");
7972 return Plan;
7973}
7974
7975VPlanPtr LoopVectorizationPlanner::tryToBuildVPlan(VFRange &Range) {
7976 // Outer loop handling: They may require CFG and instruction level
7977 // transformations before even evaluating whether vectorization is profitable.
7978 // Since we cannot modify the incoming IR, we need to build VPlan upfront in
7979 // the vectorization pipeline.
7980 assert(!OrigLoop->isInnermost());
7981 assert(EnableVPlanNativePath && "VPlan-native path is not enabled.");
7982
7983 auto Plan = VPlanTransforms::buildVPlan0(
7984 OrigLoop, *LI, Legal->getWidestInductionType(),
7985 getDebugLocFromInstOrOperands(Legal->getPrimaryInduction()), PSE);
7986
7988 *Plan, PSE, *OrigLoop, Legal->getInductionVars(),
7989 MapVector<PHINode *, RecurrenceDescriptor>(),
7990 SmallPtrSet<const PHINode *, 1>(), SmallPtrSet<PHINode *, 1>(),
7991 /*AllowReordering=*/false);
7992 [[maybe_unused]] bool CanHandleExits = VPlanTransforms::handleEarlyExits(
7993 *Plan, UncountableExitStyle::NoUncountableExit, OrigLoop, PSE, *DT,
7994 Legal->getAssumptionCache());
7995 assert(CanHandleExits &&
7996 "early-exits are not supported in VPlan-native path");
7997 VPlanTransforms::addMiddleCheck(*Plan, /*TailFolded*/ false);
7998
8000
8001 for (ElementCount VF : Range)
8002 Plan->addVF(VF);
8003
8005 return nullptr;
8006
8007 // Optimize induction live-out users to use precomputed end values.
8009 /*FoldTail=*/false);
8010
8011 assert(verifyVPlanIsValid(*Plan) && "VPlan is invalid");
8012 return Plan;
8013}
8014
8015void LoopVectorizationPlanner::addReductionResultComputation(
8016 VPlanPtr &Plan, VPRecipeBuilder &RecipeBuilder, ElementCount MinVF) {
8017 using namespace VPlanPatternMatch;
8018 VPTypeAnalysis TypeInfo(*Plan);
8019 VPRegionBlock *VectorLoopRegion = Plan->getVectorLoopRegion();
8020 VPBasicBlock *MiddleVPBB = Plan->getMiddleBlock();
8022 VPBasicBlock *LatchVPBB = VectorLoopRegion->getExitingBasicBlock();
8023 Builder.setInsertPoint(&*std::prev(std::prev(LatchVPBB->end())));
8024 VPBasicBlock::iterator IP = MiddleVPBB->getFirstNonPhi();
8025 for (VPRecipeBase &R :
8026 Plan->getVectorLoopRegion()->getEntryBasicBlock()->phis()) {
8027 VPReductionPHIRecipe *PhiR = dyn_cast<VPReductionPHIRecipe>(&R);
8028 // TODO: Remove check for constant incoming value once removeDeadRecipes is
8029 // used on VPlan0.
8030 if (!PhiR || isa<VPIRValue>(PhiR->getOperand(1)))
8031 continue;
8032
8033 RecurKind RecurrenceKind = PhiR->getRecurrenceKind();
8034 const RecurrenceDescriptor &RdxDesc = Legal->getRecurrenceDescriptor(
8036 Type *PhiTy = TypeInfo.inferScalarType(PhiR);
8037 // If tail is folded by masking, introduce selects between the phi
8038 // and the users outside the vector region of each reduction, at the
8039 // beginning of the dedicated latch block.
8040 auto *OrigExitingVPV = PhiR->getBackedgeValue();
8041 auto *NewExitingVPV = PhiR->getBackedgeValue();
8042 // Don't output selects for partial reductions because they have an output
8043 // with fewer lanes than the VF. So the operands of the select would have
8044 // different numbers of lanes. Partial reductions mask the input instead.
8045 auto *RR = dyn_cast<VPReductionRecipe>(OrigExitingVPV->getDefiningRecipe());
8046 if (!PhiR->isInLoop() && CM.foldTailByMasking() &&
8047 (!RR || !RR->isPartialReduction())) {
8048 VPValue *Cond = vputils::findHeaderMask(*Plan);
8049 NewExitingVPV =
8050 Builder.createSelect(Cond, OrigExitingVPV, PhiR, {}, "", *PhiR);
8051 OrigExitingVPV->replaceUsesWithIf(NewExitingVPV, [](VPUser &U, unsigned) {
8052 using namespace VPlanPatternMatch;
8053 return match(
8054 &U, m_CombineOr(
8055 m_VPInstruction<VPInstruction::ComputeAnyOfResult>(),
8056 m_VPInstruction<VPInstruction::ComputeReductionResult>()));
8057 });
8058
8059 if (CM.usePredicatedReductionSelect(RecurrenceKind))
8060 PhiR->setOperand(1, NewExitingVPV);
8061 }
8062
8063 // We want code in the middle block to appear to execute on the location of
8064 // the scalar loop's latch terminator because: (a) it is all compiler
8065 // generated, (b) these instructions are always executed after evaluating
8066 // the latch conditional branch, and (c) other passes may add new
8067 // predecessors which terminate on this line. This is the easiest way to
8068 // ensure we don't accidentally cause an extra step back into the loop while
8069 // debugging.
8070 DebugLoc ExitDL = OrigLoop->getLoopLatch()->getTerminator()->getDebugLoc();
8071
8072 // TODO: At the moment ComputeReductionResult also drives creation of the
8073 // bc.merge.rdx phi nodes, hence it needs to be created unconditionally here
8074 // even for in-loop reductions, until the reduction resume value handling is
8075 // also modeled in VPlan.
8076 VPInstruction *FinalReductionResult;
8077 VPBuilder::InsertPointGuard Guard(Builder);
8078 Builder.setInsertPoint(MiddleVPBB, IP);
8079 // For AnyOf reductions, find the select among PhiR's users. This is used
8080 // both to find NewVal for ComputeAnyOfResult and to adjust the reduction.
8081 VPRecipeBase *AnyOfSelect = nullptr;
8082 if (RecurrenceDescriptor::isAnyOfRecurrenceKind(RecurrenceKind)) {
8083 AnyOfSelect = cast<VPRecipeBase>(*find_if(PhiR->users(), [](VPUser *U) {
8084 return match(U, m_Select(m_VPValue(), m_VPValue(), m_VPValue()));
8085 }));
8086 }
8087 if (AnyOfSelect) {
8088 VPValue *Start = PhiR->getStartValue();
8089 // NewVal is the non-phi operand of the select.
8090 VPValue *NewVal = AnyOfSelect->getOperand(1) == PhiR
8091 ? AnyOfSelect->getOperand(2)
8092 : AnyOfSelect->getOperand(1);
8093 VPIRFlags OrFlags(RecurKind::Or, /*IsOrdered=*/false,
8094 /*IsInLoop=*/false, FastMathFlags());
8095 auto *OrReduce =
8096 Builder.createNaryOp(VPInstruction::ComputeReductionResult,
8097 {NewExitingVPV}, OrFlags, ExitDL);
8098 FinalReductionResult = Builder.createNaryOp(
8099 VPInstruction::ComputeAnyOfResult, {Start, NewVal, OrReduce}, ExitDL);
8100 } else {
8101 VPIRFlags Flags(RecurrenceKind, PhiR->isOrdered(), PhiR->isInLoop(),
8102 PhiR->getFastMathFlags());
8103 FinalReductionResult =
8104 Builder.createNaryOp(VPInstruction::ComputeReductionResult,
8105 {NewExitingVPV}, Flags, ExitDL);
8106 }
8107 // If the vector reduction can be performed in a smaller type, we truncate
8108 // then extend the loop exit value to enable InstCombine to evaluate the
8109 // entire expression in the smaller type.
8110 if (MinVF.isVector() && PhiTy != RdxDesc.getRecurrenceType() &&
8112 assert(!PhiR->isInLoop() && "Unexpected truncated inloop reduction!");
8114 "Unexpected truncated min-max recurrence!");
8115 Type *RdxTy = RdxDesc.getRecurrenceType();
8116 VPWidenCastRecipe *Trunc;
8117 Instruction::CastOps ExtendOpc =
8118 RdxDesc.isSigned() ? Instruction::SExt : Instruction::ZExt;
8119 VPWidenCastRecipe *Extnd;
8120 {
8121 VPBuilder::InsertPointGuard Guard(Builder);
8122 Builder.setInsertPoint(
8123 NewExitingVPV->getDefiningRecipe()->getParent(),
8124 std::next(NewExitingVPV->getDefiningRecipe()->getIterator()));
8125 Trunc =
8126 Builder.createWidenCast(Instruction::Trunc, NewExitingVPV, RdxTy);
8127 Extnd = Builder.createWidenCast(ExtendOpc, Trunc, PhiTy);
8128 }
8129 if (PhiR->getOperand(1) == NewExitingVPV)
8130 PhiR->setOperand(1, Extnd->getVPSingleValue());
8131
8132 // Update ComputeReductionResult with the truncated exiting value and
8133 // extend its result. Operand 0 provides the values to be reduced.
8134 FinalReductionResult->setOperand(0, Trunc);
8135 FinalReductionResult =
8136 Builder.createScalarCast(ExtendOpc, FinalReductionResult, PhiTy, {});
8137 }
8138
8139 // Update all users outside the vector region. Also replace redundant
8140 // extracts.
8141 for (auto *U : to_vector(OrigExitingVPV->users())) {
8142 auto *Parent = cast<VPRecipeBase>(U)->getParent();
8143 if (FinalReductionResult == U || Parent->getParent())
8144 continue;
8145 // Skip ComputeReductionResult and FindIV reductions when they are not the
8146 // final result.
8147 if (match(U, m_VPInstruction<VPInstruction::ComputeReductionResult>()) ||
8149 match(U, m_VPInstruction<Instruction::ICmp>())))
8150 continue;
8151 U->replaceUsesOfWith(OrigExitingVPV, FinalReductionResult);
8152
8153 // Look through ExtractLastPart.
8155 U = cast<VPInstruction>(U)->getSingleUser();
8156
8159 cast<VPInstruction>(U)->replaceAllUsesWith(FinalReductionResult);
8160 }
8161
8162 // Adjust AnyOf reductions; replace the reduction phi for the selected value
8163 // with a boolean reduction phi node to check if the condition is true in
8164 // any iteration. The final value is selected by the final
8165 // ComputeReductionResult.
8166 if (AnyOfSelect) {
8167 VPValue *Cmp = AnyOfSelect->getOperand(0);
8168 // If the compare is checking the reduction PHI node, adjust it to check
8169 // the start value.
8170 if (VPRecipeBase *CmpR = Cmp->getDefiningRecipe())
8171 CmpR->replaceUsesOfWith(PhiR, PhiR->getStartValue());
8172 Builder.setInsertPoint(AnyOfSelect);
8173
8174 // If the true value of the select is the reduction phi, the new value is
8175 // selected if the negated condition is true in any iteration.
8176 if (AnyOfSelect->getOperand(1) == PhiR)
8177 Cmp = Builder.createNot(Cmp);
8178 VPValue *Or = Builder.createOr(PhiR, Cmp);
8179 AnyOfSelect->getVPSingleValue()->replaceAllUsesWith(Or);
8180 // Delete AnyOfSelect now that it has invalid types.
8181 ToDelete.push_back(AnyOfSelect);
8182
8183 // Convert the reduction phi to operate on bools.
8184 PhiR->setOperand(0, Plan->getFalse());
8185 continue;
8186 }
8187
8188 RecurKind RK = PhiR->getRecurrenceKind();
8193 VPBuilder PHBuilder(Plan->getVectorPreheader());
8194 VPValue *Iden = Plan->getOrAddLiveIn(
8195 getRecurrenceIdentity(RK, PhiTy, PhiR->getFastMathFlags()));
8196 auto *ScaleFactorVPV = Plan->getConstantInt(32, 1);
8197 VPValue *StartV = PHBuilder.createNaryOp(
8199 {PhiR->getStartValue(), Iden, ScaleFactorVPV}, *PhiR);
8200 PhiR->setOperand(0, StartV);
8201 }
8202 }
8203 for (VPRecipeBase *R : ToDelete)
8204 R->eraseFromParent();
8205
8207}
8208
8210 VPlan &Plan, GeneratedRTChecks &RTChecks, bool HasBranchWeights) const {
8211 const auto &[SCEVCheckCond, SCEVCheckBlock] = RTChecks.getSCEVChecks();
8212 if (SCEVCheckBlock && SCEVCheckBlock->hasNPredecessors(0)) {
8213 assert((!CM.OptForSize ||
8214 CM.Hints->getForce() == LoopVectorizeHints::FK_Enabled) &&
8215 "Cannot SCEV check stride or overflow when optimizing for size");
8216 VPlanTransforms::attachCheckBlock(Plan, SCEVCheckCond, SCEVCheckBlock,
8217 HasBranchWeights);
8218 }
8219 const auto &[MemCheckCond, MemCheckBlock] = RTChecks.getMemRuntimeChecks();
8220 if (MemCheckBlock && MemCheckBlock->hasNPredecessors(0)) {
8221 // VPlan-native path does not do any analysis for runtime checks
8222 // currently.
8223 assert((!EnableVPlanNativePath || OrigLoop->isInnermost()) &&
8224 "Runtime checks are not supported for outer loops yet");
8225
8226 if (CM.OptForSize) {
8227 assert(
8228 CM.Hints->getForce() == LoopVectorizeHints::FK_Enabled &&
8229 "Cannot emit memory checks when optimizing for size, unless forced "
8230 "to vectorize.");
8231 ORE->emit([&]() {
8232 return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationCodeSize",
8233 OrigLoop->getStartLoc(),
8234 OrigLoop->getHeader())
8235 << "Code-size may be reduced by not forcing "
8236 "vectorization, or by source-code modifications "
8237 "eliminating the need for runtime checks "
8238 "(e.g., adding 'restrict').";
8239 });
8240 }
8241 VPlanTransforms::attachCheckBlock(Plan, MemCheckCond, MemCheckBlock,
8242 HasBranchWeights);
8243 }
8244}
8245
8247 VPlan &Plan, ElementCount VF, unsigned UF,
8248 ElementCount MinProfitableTripCount) const {
8249 const uint32_t *BranchWeights =
8250 hasBranchWeightMD(*OrigLoop->getLoopLatch()->getTerminator())
8252 : nullptr;
8254 Plan, VF, UF, MinProfitableTripCount,
8255 CM.requiresScalarEpilogue(VF.isVector()), CM.foldTailByMasking(),
8256 OrigLoop, BranchWeights,
8257 OrigLoop->getLoopPredecessor()->getTerminator()->getDebugLoc(), PSE);
8258}
8259
8260// Determine how to lower the scalar epilogue, which depends on 1) optimising
8261// for minimum code-size, 2) predicate compiler options, 3) loop hints forcing
8262// predication, and 4) a TTI hook that analyses whether the loop is suitable
8263// for predication.
8265 Function *F, Loop *L, LoopVectorizeHints &Hints, bool OptForSize,
8268 // 1) OptSize takes precedence over all other options, i.e. if this is set,
8269 // don't look at hints or options, and don't request a scalar epilogue.
8270 if (F->hasOptSize() ||
8271 (OptForSize && Hints.getForce() != LoopVectorizeHints::FK_Enabled))
8273
8274 // 2) If set, obey the directives
8275 if (PreferPredicateOverEpilogue.getNumOccurrences()) {
8283 };
8284 }
8285
8286 // 3) If set, obey the hints
8287 switch (Hints.getPredicate()) {
8292 };
8293
8294 // 4) if the TTI hook indicates this is profitable, request predication.
8295 TailFoldingInfo TFI(TLI, &LVL, IAI);
8296 if (TTI->preferPredicateOverEpilogue(&TFI))
8298
8300}
8301
8302// Process the loop in the VPlan-native vectorization path. This path builds
8303// VPlan upfront in the vectorization pipeline, which allows to apply
8304// VPlan-to-VPlan transformations from the very beginning without modifying the
8305// input LLVM IR.
8311 std::function<BlockFrequencyInfo &()> GetBFI, bool OptForSize,
8312 LoopVectorizeHints &Hints, LoopVectorizationRequirements &Requirements) {
8313
8315 LLVM_DEBUG(dbgs() << "LV: cannot compute the outer-loop trip count\n");
8316 return false;
8317 }
8318 assert(EnableVPlanNativePath && "VPlan-native path is disabled.");
8319 Function *F = L->getHeader()->getParent();
8320 InterleavedAccessInfo IAI(PSE, L, DT, LI, LVL->getLAI());
8321
8323 getScalarEpilogueLowering(F, L, Hints, OptForSize, TTI, TLI, *LVL, &IAI);
8324
8325 LoopVectorizationCostModel CM(SEL, L, PSE, LI, LVL, *TTI, TLI, DB, AC, ORE,
8326 GetBFI, F, &Hints, IAI, OptForSize);
8327 // Use the planner for outer loop vectorization.
8328 // TODO: CM is not used at this point inside the planner. Turn CM into an
8329 // optional argument if we don't need it in the future.
8330 LoopVectorizationPlanner LVP(L, LI, DT, TLI, *TTI, LVL, CM, IAI, PSE, Hints,
8331 ORE);
8332
8333 // Get user vectorization factor.
8334 ElementCount UserVF = Hints.getWidth();
8335
8337
8338 // Plan how to best vectorize, return the best VF and its cost.
8339 const VectorizationFactor VF = LVP.planInVPlanNativePath(UserVF);
8340
8341 // If we are stress testing VPlan builds, do not attempt to generate vector
8342 // code. Masked vector code generation support will follow soon.
8343 // Also, do not attempt to vectorize if no vector code will be produced.
8345 return false;
8346
8347 VPlan &BestPlan = LVP.getPlanFor(VF.Width);
8348
8349 {
8350 GeneratedRTChecks Checks(PSE, DT, LI, TTI, CM.CostKind);
8351 InnerLoopVectorizer LB(L, PSE, LI, DT, TTI, AC, VF.Width, /*UF=*/1, &CM,
8352 Checks, BestPlan);
8353 LLVM_DEBUG(dbgs() << "Vectorizing outer loop in \"" << F->getName()
8354 << "\"\n");
8355 LVP.addMinimumIterationCheck(BestPlan, VF.Width, /*UF=*/1,
8357 bool HasBranchWeights =
8358 hasBranchWeightMD(*L->getLoopLatch()->getTerminator());
8359 LVP.attachRuntimeChecks(BestPlan, Checks, HasBranchWeights);
8360
8361 reportVectorization(ORE, L, VF, 1);
8362
8363 LVP.executePlan(VF.Width, /*UF=*/1, BestPlan, LB, DT);
8364 }
8365
8366 assert(!verifyFunction(*F, &dbgs()));
8367 return true;
8368}
8369
8370// Emit a remark if there are stores to floats that required a floating point
8371// extension. If the vectorized loop was generated with floating point there
8372// will be a performance penalty from the conversion overhead and the change in
8373// the vector width.
8376 for (BasicBlock *BB : L->getBlocks()) {
8377 for (Instruction &Inst : *BB) {
8378 if (auto *S = dyn_cast<StoreInst>(&Inst)) {
8379 if (S->getValueOperand()->getType()->isFloatTy())
8380 Worklist.push_back(S);
8381 }
8382 }
8383 }
8384
8385 // Traverse the floating point stores upwards searching, for floating point
8386 // conversions.
8389 while (!Worklist.empty()) {
8390 auto *I = Worklist.pop_back_val();
8391 if (!L->contains(I))
8392 continue;
8393 if (!Visited.insert(I).second)
8394 continue;
8395
8396 // Emit a remark if the floating point store required a floating
8397 // point conversion.
8398 // TODO: More work could be done to identify the root cause such as a
8399 // constant or a function return type and point the user to it.
8400 if (isa<FPExtInst>(I) && EmittedRemark.insert(I).second)
8401 ORE->emit([&]() {
8402 return OptimizationRemarkAnalysis(LV_NAME, "VectorMixedPrecision",
8403 I->getDebugLoc(), L->getHeader())
8404 << "floating point conversion changes vector width. "
8405 << "Mixed floating point precision requires an up/down "
8406 << "cast that will negatively impact performance.";
8407 });
8408
8409 for (Use &Op : I->operands())
8410 if (auto *OpI = dyn_cast<Instruction>(Op))
8411 Worklist.push_back(OpI);
8412 }
8413}
8414
8415/// For loops with uncountable early exits, find the cost of doing work when
8416/// exiting the loop early, such as calculating the final exit values of
8417/// variables used outside the loop.
8418/// TODO: This is currently overly pessimistic because the loop may not take
8419/// the early exit, but better to keep this conservative for now. In future,
8420/// it might be possible to relax this by using branch probabilities.
8422 VPlan &Plan, ElementCount VF) {
8423 InstructionCost Cost = 0;
8424 for (auto *ExitVPBB : Plan.getExitBlocks()) {
8425 for (auto *PredVPBB : ExitVPBB->getPredecessors()) {
8426 // If the predecessor is not the middle.block, then it must be the
8427 // vector.early.exit block, which may contain work to calculate the exit
8428 // values of variables used outside the loop.
8429 if (PredVPBB != Plan.getMiddleBlock()) {
8430 LLVM_DEBUG(dbgs() << "Calculating cost of work in exit block "
8431 << PredVPBB->getName() << ":\n");
8432 Cost += PredVPBB->cost(VF, CostCtx);
8433 }
8434 }
8435 }
8436 return Cost;
8437}
8438
8439/// This function determines whether or not it's still profitable to vectorize
8440/// the loop given the extra work we have to do outside of the loop:
8441/// 1. Perform the runtime checks before entering the loop to ensure it's safe
8442/// to vectorize.
8443/// 2. In the case of loops with uncountable early exits, we may have to do
8444/// extra work when exiting the loop early, such as calculating the final
8445/// exit values of variables used outside the loop.
8446/// 3. The middle block.
8447static bool isOutsideLoopWorkProfitable(GeneratedRTChecks &Checks,
8448 VectorizationFactor &VF, Loop *L,
8450 VPCostContext &CostCtx, VPlan &Plan,
8452 std::optional<unsigned> VScale) {
8453 InstructionCost RtC = Checks.getCost();
8454 if (!RtC.isValid())
8455 return false;
8456
8457 // When interleaving only scalar and vector cost will be equal, which in turn
8458 // would lead to a divide by 0. Fall back to hard threshold.
8459 if (VF.Width.isScalar()) {
8460 // TODO: Should we rename VectorizeMemoryCheckThreshold?
8462 LLVM_DEBUG(
8463 dbgs()
8464 << "LV: Interleaving only is not profitable due to runtime checks\n");
8465 return false;
8466 }
8467 return true;
8468 }
8469
8470 // The scalar cost should only be 0 when vectorizing with a user specified
8471 // VF/IC. In those cases, runtime checks should always be generated.
8472 uint64_t ScalarC = VF.ScalarCost.getValue();
8473 if (ScalarC == 0)
8474 return true;
8475
8476 InstructionCost TotalCost = RtC;
8477 // Add on the cost of any work required in the vector early exit block, if
8478 // one exists.
8479 TotalCost += calculateEarlyExitCost(CostCtx, Plan, VF.Width);
8480 TotalCost += Plan.getMiddleBlock()->cost(VF.Width, CostCtx);
8481
8482 // First, compute the minimum iteration count required so that the vector
8483 // loop outperforms the scalar loop.
8484 // The total cost of the scalar loop is
8485 // ScalarC * TC
8486 // where
8487 // * TC is the actual trip count of the loop.
8488 // * ScalarC is the cost of a single scalar iteration.
8489 //
8490 // The total cost of the vector loop is
8491 // TotalCost + VecC * (TC / VF) + EpiC
8492 // where
8493 // * TotalCost is the sum of the costs cost of
8494 // - the generated runtime checks, i.e. RtC
8495 // - performing any additional work in the vector.early.exit block for
8496 // loops with uncountable early exits.
8497 // - the middle block, if ExpectedTC <= VF.Width.
8498 // * VecC is the cost of a single vector iteration.
8499 // * TC is the actual trip count of the loop
8500 // * VF is the vectorization factor
8501 // * EpiCost is the cost of the generated epilogue, including the cost
8502 // of the remaining scalar operations.
8503 //
8504 // Vectorization is profitable once the total vector cost is less than the
8505 // total scalar cost:
8506 // TotalCost + VecC * (TC / VF) + EpiC < ScalarC * TC
8507 //
8508 // Now we can compute the minimum required trip count TC as
8509 // VF * (TotalCost + EpiC) / (ScalarC * VF - VecC) < TC
8510 //
8511 // For now we assume the epilogue cost EpiC = 0 for simplicity. Note that
8512 // the computations are performed on doubles, not integers and the result
8513 // is rounded up, hence we get an upper estimate of the TC.
8514 unsigned IntVF = estimateElementCount(VF.Width, VScale);
8515 uint64_t Div = ScalarC * IntVF - VF.Cost.getValue();
8516 uint64_t MinTC1 =
8517 Div == 0 ? 0 : divideCeil(TotalCost.getValue() * IntVF, Div);
8518
8519 // Second, compute a minimum iteration count so that the cost of the
8520 // runtime checks is only a fraction of the total scalar loop cost. This
8521 // adds a loop-dependent bound on the overhead incurred if the runtime
8522 // checks fail. In case the runtime checks fail, the cost is RtC + ScalarC
8523 // * TC. To bound the runtime check to be a fraction 1/X of the scalar
8524 // cost, compute
8525 // RtC < ScalarC * TC * (1 / X) ==> RtC * X / ScalarC < TC
8526 uint64_t MinTC2 = divideCeil(RtC.getValue() * 10, ScalarC);
8527
8528 // Now pick the larger minimum. If it is not a multiple of VF and a scalar
8529 // epilogue is allowed, choose the next closest multiple of VF. This should
8530 // partly compensate for ignoring the epilogue cost.
8531 uint64_t MinTC = std::max(MinTC1, MinTC2);
8532 if (SEL == CM_ScalarEpilogueAllowed)
8533 MinTC = alignTo(MinTC, IntVF);
8535
8536 LLVM_DEBUG(
8537 dbgs() << "LV: Minimum required TC for runtime checks to be profitable:"
8538 << VF.MinProfitableTripCount << "\n");
8539
8540 // Skip vectorization if the expected trip count is less than the minimum
8541 // required trip count.
8542 if (auto ExpectedTC = getSmallBestKnownTC(PSE, L)) {
8543 if (ElementCount::isKnownLT(*ExpectedTC, VF.MinProfitableTripCount)) {
8544 LLVM_DEBUG(dbgs() << "LV: Vectorization is not beneficial: expected "
8545 "trip count < minimum profitable VF ("
8546 << *ExpectedTC << " < " << VF.MinProfitableTripCount
8547 << ")\n");
8548
8549 return false;
8550 }
8551 }
8552 return true;
8553}
8554
8556 : InterleaveOnlyWhenForced(Opts.InterleaveOnlyWhenForced ||
8558 VectorizeOnlyWhenForced(Opts.VectorizeOnlyWhenForced ||
8560
8561/// Prepare \p MainPlan for vectorizing the main vector loop during epilogue
8562/// vectorization.
8565 using namespace VPlanPatternMatch;
8566 // When vectorizing the epilogue, FindFirstIV & FindLastIV reductions can
8567 // introduce multiple uses of undef/poison. If the reduction start value may
8568 // be undef or poison it needs to be frozen and the frozen start has to be
8569 // used when computing the reduction result. We also need to use the frozen
8570 // value in the resume phi generated by the main vector loop, as this is also
8571 // used to compute the reduction result after the epilogue vector loop.
8572 auto AddFreezeForFindLastIVReductions = [](VPlan &Plan,
8573 bool UpdateResumePhis) {
8574 VPBuilder Builder(Plan.getEntry());
8575 for (VPRecipeBase &R : *Plan.getMiddleBlock()) {
8576 auto *VPI = dyn_cast<VPInstruction>(&R);
8577 if (!VPI)
8578 continue;
8579 VPValue *OrigStart;
8580 if (!matchFindIVResult(VPI, m_VPValue(), m_VPValue(OrigStart)))
8581 continue;
8583 continue;
8584 VPInstruction *Freeze =
8585 Builder.createNaryOp(Instruction::Freeze, {OrigStart}, {}, "fr");
8586 VPI->setOperand(2, Freeze);
8587 if (UpdateResumePhis)
8588 OrigStart->replaceUsesWithIf(Freeze, [Freeze](VPUser &U, unsigned) {
8589 return Freeze != &U && isa<VPPhi>(&U);
8590 });
8591 }
8592 };
8593 AddFreezeForFindLastIVReductions(MainPlan, true);
8594 AddFreezeForFindLastIVReductions(EpiPlan, false);
8595
8596 VPValue *VectorTC = nullptr;
8597 auto *Term =
8599 [[maybe_unused]] bool MatchedTC =
8600 match(Term, m_BranchOnCount(m_VPValue(), m_VPValue(VectorTC)));
8601 assert(MatchedTC && "must match vector trip count");
8602
8603 // If there is a suitable resume value for the canonical induction in the
8604 // scalar (which will become vector) epilogue loop, use it and move it to the
8605 // beginning of the scalar preheader. Otherwise create it below.
8606 VPBasicBlock *MainScalarPH = MainPlan.getScalarPreheader();
8607 auto ResumePhiIter =
8608 find_if(MainScalarPH->phis(), [VectorTC](VPRecipeBase &R) {
8609 return match(&R, m_VPInstruction<Instruction::PHI>(m_Specific(VectorTC),
8610 m_ZeroInt()));
8611 });
8612 VPPhi *ResumePhi = nullptr;
8613 if (ResumePhiIter == MainScalarPH->phis().end()) {
8614 using namespace llvm::VPlanPatternMatch;
8615 assert(
8617 m_ZeroInt()) &&
8618 "canonical IV must start at 0");
8619 Type *Ty = VPTypeAnalysis(MainPlan).inferScalarType(VectorTC);
8620 VPBuilder ScalarPHBuilder(MainScalarPH, MainScalarPH->begin());
8621 ResumePhi = ScalarPHBuilder.createScalarPhi(
8622 {VectorTC, MainPlan.getZero(Ty)}, {}, "vec.epilog.resume.val");
8623 } else {
8624 ResumePhi = cast<VPPhi>(&*ResumePhiIter);
8625 ResumePhi->setName("vec.epilog.resume.val");
8626 if (&MainScalarPH->front() != ResumePhi)
8627 ResumePhi->moveBefore(*MainScalarPH, MainScalarPH->begin());
8628 }
8629
8630 // Create a ResumeForEpilogue for the canonical IV resume as the
8631 // first non-phi, to keep it alive for the epilogue.
8632 VPBuilder ResumeBuilder(MainScalarPH);
8633 ResumeBuilder.createNaryOp(VPInstruction::ResumeForEpilogue, ResumePhi);
8634
8635 // Create ResumeForEpilogue instructions for the resume phis of the
8636 // VPIRPhis in the scalar header of the main plan and return them so they can
8637 // be used as resume values when vectorizing the epilogue.
8638 return to_vector(
8639 map_range(MainPlan.getScalarHeader()->phis(), [&](VPRecipeBase &R) {
8640 assert(isa<VPIRPhi>(R) &&
8641 "only VPIRPhis expected in the scalar header");
8642 return ResumeBuilder.createNaryOp(VPInstruction::ResumeForEpilogue,
8643 R.getOperand(0));
8644 }));
8645}
8646
8647/// Prepare \p Plan for vectorizing the epilogue loop. That is, re-use expanded
8648/// SCEVs from \p ExpandedSCEVs and set resume values for header recipes. Some
8649/// reductions require creating new instructions to compute the resume values.
8650/// They are collected in a vector and returned. They must be moved to the
8651/// preheader of the vector epilogue loop, after created by the execution of \p
8652/// Plan.
8654 VPlan &Plan, Loop *L, const SCEV2ValueTy &ExpandedSCEVs,
8656 ScalarEvolution &SE) {
8657 VPRegionBlock *VectorLoop = Plan.getVectorLoopRegion();
8658 VPBasicBlock *Header = VectorLoop->getEntryBasicBlock();
8659 Header->setName("vec.epilog.vector.body");
8660
8661 VPCanonicalIVPHIRecipe *IV = VectorLoop->getCanonicalIV();
8662 // When vectorizing the epilogue loop, the canonical induction needs to start
8663 // at the resume value from the main vector loop. Find the resume value
8664 // created during execution of the main VPlan. It must be the first phi in the
8665 // loop preheader. Add this resume value as an offset to the canonical IV of
8666 // the epilogue loop.
8667 using namespace llvm::PatternMatch;
8668 PHINode *EPResumeVal = &*L->getLoopPreheader()->phis().begin();
8669 for (Value *Inc : EPResumeVal->incoming_values()) {
8670 if (match(Inc, m_SpecificInt(0)))
8671 continue;
8672 assert(!EPI.VectorTripCount &&
8673 "Must only have a single non-zero incoming value");
8674 EPI.VectorTripCount = Inc;
8675 }
8676 // If we didn't find a non-zero vector trip count, all incoming values
8677 // must be zero, which also means the vector trip count is zero. Pick the
8678 // first zero as vector trip count.
8679 // TODO: We should not choose VF * UF so the main vector loop is known to
8680 // be dead.
8681 if (!EPI.VectorTripCount) {
8682 assert(EPResumeVal->getNumIncomingValues() > 0 &&
8683 all_of(EPResumeVal->incoming_values(),
8684 [](Value *Inc) { return match(Inc, m_SpecificInt(0)); }) &&
8685 "all incoming values must be 0");
8686 EPI.VectorTripCount = EPResumeVal->getOperand(0);
8687 }
8688 VPValue *VPV = Plan.getOrAddLiveIn(EPResumeVal);
8689 assert(all_of(IV->users(),
8690 [](const VPUser *U) {
8691 return isa<VPScalarIVStepsRecipe>(U) ||
8692 isa<VPDerivedIVRecipe>(U) ||
8693 cast<VPRecipeBase>(U)->isScalarCast() ||
8694 cast<VPInstruction>(U)->getOpcode() ==
8695 Instruction::Add;
8696 }) &&
8697 "the canonical IV should only be used by its increment or "
8698 "ScalarIVSteps when resetting the start value");
8699 VPBuilder Builder(Header, Header->getFirstNonPhi());
8700 VPInstruction *Add = Builder.createAdd(IV, VPV);
8701 // Replace all users of the canonical IV and its increment with the offset
8702 // version, except for the Add itself and the canonical IV increment.
8703 auto *Increment = cast<VPInstruction>(IV->getBackedgeValue());
8704 IV->replaceUsesWithIf(Add, [Add, Increment](VPUser &U, unsigned) {
8705 return &U != Add && &U != Increment;
8706 });
8707 VPInstruction *OffsetIVInc =
8709 Increment->replaceUsesWithIf(OffsetIVInc,
8710 [IV](VPUser &U, unsigned) { return &U != IV; });
8711 OffsetIVInc->setOperand(0, Increment);
8712
8714 SmallVector<Instruction *> InstsToMove;
8715 // Ensure that the start values for all header phi recipes are updated before
8716 // vectorizing the epilogue loop. Skip the canonical IV, which has been
8717 // handled above.
8718 for (VPRecipeBase &R : drop_begin(Header->phis())) {
8719 Value *ResumeV = nullptr;
8720 // TODO: Move setting of resume values to prepareToExecute.
8721 if (auto *ReductionPhi = dyn_cast<VPReductionPHIRecipe>(&R)) {
8722 // Find the reduction result by searching users of the phi or its backedge
8723 // value.
8724 using namespace VPlanPatternMatch;
8725 auto IsReductionResult = [](VPRecipeBase *R) {
8726 auto *VPI = dyn_cast<VPInstruction>(R);
8727 if (!VPI)
8728 return false;
8730 return true;
8731 // ComputeReductionResult is also considered, unless it is used for the
8732 // Or reduction in AnyOf reductions and feeds a ComputeAnyOfReduction,
8733 // in which case the latter will be considered instead.
8735 return false;
8736 return !any_of(VPI->users(), [](VPUser *U) {
8737 return match(U, m_VPInstruction<VPInstruction::ComputeAnyOfResult>());
8738 });
8739 };
8740 auto *RdxResult = cast<VPInstruction>(
8741 vputils::findRecipe(ReductionPhi->getBackedgeValue(), IsReductionResult));
8742 assert(RdxResult && "expected to find reduction result");
8743
8744 ResumeV = cast<PHINode>(ReductionPhi->getUnderlyingInstr())
8745 ->getIncomingValueForBlock(L->getLoopPreheader());
8746
8747 // Check for FindIV pattern by looking for icmp user of RdxResult.
8748 // The pattern is: select(icmp ne RdxResult, Sentinel), RdxResult, Start
8749 using namespace VPlanPatternMatch;
8750 VPValue *SentinelVPV = nullptr;
8751 bool IsFindIV = any_of(RdxResult->users(), [&](VPUser *U) {
8752 return match(U, VPlanPatternMatch::m_SpecificICmp(
8753 ICmpInst::ICMP_NE, m_Specific(RdxResult),
8754 m_VPValue(SentinelVPV)));
8755 });
8756
8757 if (RdxResult->getOpcode() == VPInstruction::ComputeAnyOfResult) {
8758 Value *StartV = RdxResult->getOperand(0)->getLiveInIRValue();
8759 // VPReductionPHIRecipes for AnyOf reductions expect a boolean as
8760 // start value; compare the final value from the main vector loop
8761 // to the start value.
8762 BasicBlock *PBB = cast<Instruction>(ResumeV)->getParent();
8763 IRBuilder<> Builder(PBB, PBB->getFirstNonPHIIt());
8764 ResumeV = Builder.CreateICmpNE(ResumeV, StartV);
8765 if (auto *I = dyn_cast<Instruction>(ResumeV))
8766 InstsToMove.push_back(I);
8767 } else if (IsFindIV) {
8768 assert(SentinelVPV && "expected to find icmp using RdxResult");
8769
8770 // Get the frozen start value from the main loop.
8771 Value *FrozenStartV = cast<PHINode>(ResumeV)->getIncomingValueForBlock(
8773 if (auto *FreezeI = dyn_cast<FreezeInst>(FrozenStartV))
8774 ToFrozen[FreezeI->getOperand(0)] = FrozenStartV;
8775
8776 // Adjust resume: select(icmp eq ResumeV, FrozenStartV), Sentinel,
8777 // ResumeV
8778 BasicBlock *ResumeBB = cast<Instruction>(ResumeV)->getParent();
8779 IRBuilder<> Builder(ResumeBB, ResumeBB->getFirstNonPHIIt());
8780 Value *Cmp = Builder.CreateICmpEQ(ResumeV, FrozenStartV);
8781 if (auto *I = dyn_cast<Instruction>(Cmp))
8782 InstsToMove.push_back(I);
8783 ResumeV =
8784 Builder.CreateSelect(Cmp, SentinelVPV->getLiveInIRValue(), ResumeV);
8785 if (auto *I = dyn_cast<Instruction>(ResumeV))
8786 InstsToMove.push_back(I);
8787 } else {
8788 VPValue *StartVal = Plan.getOrAddLiveIn(ResumeV);
8789 auto *PhiR = dyn_cast<VPReductionPHIRecipe>(&R);
8790 if (auto *VPI = dyn_cast<VPInstruction>(PhiR->getStartValue())) {
8792 "unexpected start value");
8793 // Partial sub-reductions always start at 0 and account for the
8794 // reduction start value in a final subtraction. Update it to use the
8795 // resume value from the main vector loop.
8796 if (PhiR->getVFScaleFactor() > 1 &&
8797 PhiR->getRecurrenceKind() == RecurKind::Sub) {
8798 auto *Sub = cast<VPInstruction>(RdxResult->getSingleUser());
8799 assert(Sub->getOpcode() == Instruction::Sub && "Unexpected opcode");
8800 assert(isa<VPIRValue>(Sub->getOperand(0)) &&
8801 "Expected operand to match the original start value of the "
8802 "reduction");
8805 "Expected start value for partial sub-reduction to start at "
8806 "zero");
8807 Sub->setOperand(0, StartVal);
8808 } else
8809 VPI->setOperand(0, StartVal);
8810 continue;
8811 }
8812 }
8813 } else {
8814 // Retrieve the induction resume values for wide inductions from
8815 // their original phi nodes in the scalar loop.
8816 PHINode *IndPhi = cast<VPWidenInductionRecipe>(&R)->getPHINode();
8817 // Hook up to the PHINode generated by a ResumePhi recipe of main
8818 // loop VPlan, which feeds the scalar loop.
8819 ResumeV = IndPhi->getIncomingValueForBlock(L->getLoopPreheader());
8820 }
8821 assert(ResumeV && "Must have a resume value");
8822 VPValue *StartVal = Plan.getOrAddLiveIn(ResumeV);
8823 cast<VPHeaderPHIRecipe>(&R)->setStartValue(StartVal);
8824 }
8825
8826 // For some VPValues in the epilogue plan we must re-use the generated IR
8827 // values from the main plan. Replace them with live-in VPValues.
8828 // TODO: This is a workaround needed for epilogue vectorization and it
8829 // should be removed once induction resume value creation is done
8830 // directly in VPlan.
8831 for (auto &R : make_early_inc_range(*Plan.getEntry())) {
8832 // Re-use frozen values from the main plan for Freeze VPInstructions in the
8833 // epilogue plan. This ensures all users use the same frozen value.
8834 auto *VPI = dyn_cast<VPInstruction>(&R);
8835 if (VPI && VPI->getOpcode() == Instruction::Freeze) {
8837 ToFrozen.lookup(VPI->getOperand(0)->getLiveInIRValue())));
8838 continue;
8839 }
8840
8841 // Re-use the trip count and steps expanded for the main loop, as
8842 // skeleton creation needs it as a value that dominates both the scalar
8843 // and vector epilogue loops
8844 auto *ExpandR = dyn_cast<VPExpandSCEVRecipe>(&R);
8845 if (!ExpandR)
8846 continue;
8847 VPValue *ExpandedVal =
8848 Plan.getOrAddLiveIn(ExpandedSCEVs.lookup(ExpandR->getSCEV()));
8849 ExpandR->replaceAllUsesWith(ExpandedVal);
8850 if (Plan.getTripCount() == ExpandR)
8851 Plan.resetTripCount(ExpandedVal);
8852 ExpandR->eraseFromParent();
8853 }
8854
8855 auto VScale = CM.getVScaleForTuning();
8856 unsigned MainLoopStep =
8857 estimateElementCount(EPI.MainLoopVF * EPI.MainLoopUF, VScale);
8858 unsigned EpilogueLoopStep =
8859 estimateElementCount(EPI.EpilogueVF * EPI.EpilogueUF, VScale);
8861 Plan, EPI.VectorTripCount,
8863 EPI.EpilogueUF, MainLoopStep, EpilogueLoopStep, SE);
8864
8865 return InstsToMove;
8866}
8867
8868static void
8870 VPlan &BestEpiPlan,
8871 ArrayRef<VPInstruction *> ResumeValues) {
8872 // Fix resume values from the additional bypass block.
8873 BasicBlock *PH = L->getLoopPreheader();
8874 for (auto *Pred : predecessors(PH)) {
8875 for (PHINode &Phi : PH->phis()) {
8876 if (Phi.getBasicBlockIndex(Pred) != -1)
8877 continue;
8878 Phi.addIncoming(Phi.getIncomingValueForBlock(BypassBlock), Pred);
8879 }
8880 }
8881 auto *ScalarPH = cast<VPIRBasicBlock>(BestEpiPlan.getScalarPreheader());
8882 if (ScalarPH->hasPredecessors()) {
8883 // Fix resume values for inductions and reductions from the additional
8884 // bypass block using the incoming values from the main loop's resume phis.
8885 // ResumeValues correspond 1:1 with the scalar loop header phis.
8886 for (auto [ResumeV, HeaderPhi] :
8887 zip(ResumeValues, BestEpiPlan.getScalarHeader()->phis())) {
8888 auto *HeaderPhiR = cast<VPIRPhi>(&HeaderPhi);
8889 auto *EpiResumePhi =
8890 cast<PHINode>(HeaderPhiR->getIRPhi().getIncomingValueForBlock(PH));
8891 if (EpiResumePhi->getBasicBlockIndex(BypassBlock) == -1)
8892 continue;
8893 auto *MainResumePhi = cast<PHINode>(ResumeV->getUnderlyingValue());
8894 EpiResumePhi->setIncomingValueForBlock(
8895 BypassBlock, MainResumePhi->getIncomingValueForBlock(BypassBlock));
8896 }
8897 }
8898}
8899
8900/// Connect the epilogue vector loop generated for \p EpiPlan to the main vector
8901/// loop, after both plans have executed, updating branches from the iteration
8902/// and runtime checks of the main loop, as well as updating various phis. \p
8903/// InstsToMove contains instructions that need to be moved to the preheader of
8904/// the epilogue vector loop.
8905static void connectEpilogueVectorLoop(VPlan &EpiPlan, Loop *L,
8907 DominatorTree *DT,
8908 GeneratedRTChecks &Checks,
8909 ArrayRef<Instruction *> InstsToMove,
8910 ArrayRef<VPInstruction *> ResumeValues) {
8911 BasicBlock *VecEpilogueIterationCountCheck =
8912 cast<VPIRBasicBlock>(EpiPlan.getEntry())->getIRBasicBlock();
8913
8914 BasicBlock *VecEpiloguePreHeader =
8915 cast<CondBrInst>(VecEpilogueIterationCountCheck->getTerminator())
8916 ->getSuccessor(1);
8917 // Adjust the control flow taking the state info from the main loop
8918 // vectorization into account.
8920 "expected this to be saved from the previous pass.");
8921 DomTreeUpdater DTU(DT, DomTreeUpdater::UpdateStrategy::Eager);
8923 VecEpilogueIterationCountCheck, VecEpiloguePreHeader);
8924
8926 VecEpilogueIterationCountCheck},
8928 VecEpiloguePreHeader}});
8929
8930 BasicBlock *ScalarPH =
8931 cast<VPIRBasicBlock>(EpiPlan.getScalarPreheader())->getIRBasicBlock();
8933 VecEpilogueIterationCountCheck, ScalarPH);
8934 DTU.applyUpdates(
8936 VecEpilogueIterationCountCheck},
8938
8939 // Adjust the terminators of runtime check blocks and phis using them.
8940 BasicBlock *SCEVCheckBlock = Checks.getSCEVChecks().second;
8941 BasicBlock *MemCheckBlock = Checks.getMemRuntimeChecks().second;
8942 if (SCEVCheckBlock) {
8943 SCEVCheckBlock->getTerminator()->replaceUsesOfWith(
8944 VecEpilogueIterationCountCheck, ScalarPH);
8945 DTU.applyUpdates({{DominatorTree::Delete, SCEVCheckBlock,
8946 VecEpilogueIterationCountCheck},
8947 {DominatorTree::Insert, SCEVCheckBlock, ScalarPH}});
8948 }
8949 if (MemCheckBlock) {
8950 MemCheckBlock->getTerminator()->replaceUsesOfWith(
8951 VecEpilogueIterationCountCheck, ScalarPH);
8952 DTU.applyUpdates(
8953 {{DominatorTree::Delete, MemCheckBlock, VecEpilogueIterationCountCheck},
8954 {DominatorTree::Insert, MemCheckBlock, ScalarPH}});
8955 }
8956
8957 // The vec.epilog.iter.check block may contain Phi nodes from inductions
8958 // or reductions which merge control-flow from the latch block and the
8959 // middle block. Update the incoming values here and move the Phi into the
8960 // preheader.
8961 SmallVector<PHINode *, 4> PhisInBlock(
8962 llvm::make_pointer_range(VecEpilogueIterationCountCheck->phis()));
8963
8964 for (PHINode *Phi : PhisInBlock) {
8965 Phi->moveBefore(VecEpiloguePreHeader->getFirstNonPHIIt());
8966 Phi->replaceIncomingBlockWith(
8967 VecEpilogueIterationCountCheck->getSinglePredecessor(),
8968 VecEpilogueIterationCountCheck);
8969
8970 // If the phi doesn't have an incoming value from the
8971 // EpilogueIterationCountCheck, we are done. Otherwise remove the
8972 // incoming value and also those from other check blocks. This is needed
8973 // for reduction phis only.
8974 if (none_of(Phi->blocks(), [&](BasicBlock *IncB) {
8975 return EPI.EpilogueIterationCountCheck == IncB;
8976 }))
8977 continue;
8978 Phi->removeIncomingValue(EPI.EpilogueIterationCountCheck);
8979 if (SCEVCheckBlock)
8980 Phi->removeIncomingValue(SCEVCheckBlock);
8981 if (MemCheckBlock)
8982 Phi->removeIncomingValue(MemCheckBlock);
8983 }
8984
8985 auto IP = VecEpiloguePreHeader->getFirstNonPHIIt();
8986 for (auto *I : InstsToMove)
8987 I->moveBefore(IP);
8988
8989 // VecEpilogueIterationCountCheck conditionally skips over the epilogue loop
8990 // after executing the main loop. We need to update the resume values of
8991 // inductions and reductions during epilogue vectorization.
8992 fixScalarResumeValuesFromBypass(VecEpilogueIterationCountCheck, L, EpiPlan,
8993 ResumeValues);
8994
8995 // Remove dead phis that were moved to the epilogue preheader but are unused
8996 // (e.g., resume phis for inductions not widened in the epilogue vector loop).
8997 for (PHINode &Phi : make_early_inc_range(VecEpiloguePreHeader->phis()))
8998 if (Phi.use_empty())
8999 Phi.eraseFromParent();
9000}
9001
9003 assert((EnableVPlanNativePath || L->isInnermost()) &&
9004 "VPlan-native path is not enabled. Only process inner loops.");
9005
9006 LLVM_DEBUG(dbgs() << "\nLV: Checking a loop in '"
9007 << L->getHeader()->getParent()->getName() << "' from "
9008 << L->getLocStr() << "\n");
9009
9010 LoopVectorizeHints Hints(L, InterleaveOnlyWhenForced, *ORE, TTI);
9011
9012 LLVM_DEBUG(
9013 dbgs() << "LV: Loop hints:"
9014 << " force="
9016 ? "disabled"
9018 ? "enabled"
9019 : "?"))
9020 << " width=" << Hints.getWidth()
9021 << " interleave=" << Hints.getInterleave() << "\n");
9022
9023 // Function containing loop
9024 Function *F = L->getHeader()->getParent();
9025
9026 // Looking at the diagnostic output is the only way to determine if a loop
9027 // was vectorized (other than looking at the IR or machine code), so it
9028 // is important to generate an optimization remark for each loop. Most of
9029 // these messages are generated as OptimizationRemarkAnalysis. Remarks
9030 // generated as OptimizationRemark and OptimizationRemarkMissed are
9031 // less verbose reporting vectorized loops and unvectorized loops that may
9032 // benefit from vectorization, respectively.
9033
9034 if (!Hints.allowVectorization(F, L, VectorizeOnlyWhenForced)) {
9035 LLVM_DEBUG(dbgs() << "LV: Loop hints prevent vectorization.\n");
9036 return false;
9037 }
9038
9039 PredicatedScalarEvolution PSE(*SE, *L);
9040
9041 // Query this against the original loop and save it here because the profile
9042 // of the original loop header may change as the transformation happens.
9043 bool OptForSize = llvm::shouldOptimizeForSize(
9044 L->getHeader(), PSI,
9045 PSI && PSI->hasProfileSummary() ? &GetBFI() : nullptr,
9047
9048 // Check if it is legal to vectorize the loop.
9049 LoopVectorizationRequirements Requirements;
9050 LoopVectorizationLegality LVL(L, PSE, DT, TTI, TLI, F, *LAIs, LI, ORE,
9051 &Requirements, &Hints, DB, AC,
9052 /*AllowRuntimeSCEVChecks=*/!OptForSize, AA);
9054 LLVM_DEBUG(dbgs() << "LV: Not vectorizing: Cannot prove legality.\n");
9055 Hints.emitRemarkWithHints();
9056 return false;
9057 }
9058
9059 if (LVL.hasUncountableEarlyExit()) {
9061 reportVectorizationFailure("Auto-vectorization of loops with uncountable "
9062 "early exit is not enabled",
9063 "UncountableEarlyExitLoopsDisabled", ORE, L);
9064 return false;
9065 }
9066 }
9067
9068 // Entrance to the VPlan-native vectorization path. Outer loops are processed
9069 // here. They may require CFG and instruction level transformations before
9070 // even evaluating whether vectorization is profitable. Since we cannot modify
9071 // the incoming IR, we need to build VPlan upfront in the vectorization
9072 // pipeline.
9073 if (!L->isInnermost())
9074 return processLoopInVPlanNativePath(L, PSE, LI, DT, &LVL, TTI, TLI, DB, AC,
9075 ORE, GetBFI, OptForSize, Hints,
9076 Requirements);
9077
9078 assert(L->isInnermost() && "Inner loop expected.");
9079
9080 InterleavedAccessInfo IAI(PSE, L, DT, LI, LVL.getLAI());
9081 bool UseInterleaved = TTI->enableInterleavedAccessVectorization();
9082
9083 // If an override option has been passed in for interleaved accesses, use it.
9084 if (EnableInterleavedMemAccesses.getNumOccurrences() > 0)
9085 UseInterleaved = EnableInterleavedMemAccesses;
9086
9087 // Analyze interleaved memory accesses.
9088 if (UseInterleaved)
9090
9091 if (LVL.hasUncountableEarlyExit()) {
9092 BasicBlock *LoopLatch = L->getLoopLatch();
9093 if (IAI.requiresScalarEpilogue() ||
9095 [LoopLatch](BasicBlock *BB) { return BB != LoopLatch; })) {
9096 reportVectorizationFailure("Auto-vectorization of early exit loops "
9097 "requiring a scalar epilogue is unsupported",
9098 "UncountableEarlyExitUnsupported", ORE, L);
9099 return false;
9100 }
9101 }
9102
9103 // Check the function attributes and profiles to find out if this function
9104 // should be optimized for size.
9106 getScalarEpilogueLowering(F, L, Hints, OptForSize, TTI, TLI, LVL, &IAI);
9107
9108 // Check the loop for a trip count threshold: vectorize loops with a tiny trip
9109 // count by optimizing for size, to minimize overheads.
9110 auto ExpectedTC = getSmallBestKnownTC(PSE, L);
9111 if (ExpectedTC && ExpectedTC->isFixed() &&
9112 ExpectedTC->getFixedValue() < TinyTripCountVectorThreshold) {
9113 LLVM_DEBUG(dbgs() << "LV: Found a loop with a very small trip count. "
9114 << "This loop is worth vectorizing only if no scalar "
9115 << "iteration overheads are incurred.");
9117 LLVM_DEBUG(dbgs() << " But vectorizing was explicitly forced.\n");
9118 else {
9119 LLVM_DEBUG(dbgs() << "\n");
9120 // Predicate tail-folded loops are efficient even when the loop
9121 // iteration count is low. However, setting the epilogue policy to
9122 // `CM_ScalarEpilogueNotAllowedLowTripLoop` prevents vectorizing loops
9123 // with runtime checks. It's more effective to let
9124 // `isOutsideLoopWorkProfitable` determine if vectorization is
9125 // beneficial for the loop.
9128 }
9129 }
9130
9131 // Check the function attributes to see if implicit floats or vectors are
9132 // allowed.
9133 if (F->hasFnAttribute(Attribute::NoImplicitFloat)) {
9135 "Can't vectorize when the NoImplicitFloat attribute is used",
9136 "loop not vectorized due to NoImplicitFloat attribute",
9137 "NoImplicitFloat", ORE, L);
9138 Hints.emitRemarkWithHints();
9139 return false;
9140 }
9141
9142 // Check if the target supports potentially unsafe FP vectorization.
9143 // FIXME: Add a check for the type of safety issue (denormal, signaling)
9144 // for the target we're vectorizing for, to make sure none of the
9145 // additional fp-math flags can help.
9146 if (Hints.isPotentiallyUnsafe() &&
9147 TTI->isFPVectorizationPotentiallyUnsafe()) {
9149 "Potentially unsafe FP op prevents vectorization",
9150 "loop not vectorized due to unsafe FP support.",
9151 "UnsafeFP", ORE, L);
9152 Hints.emitRemarkWithHints();
9153 return false;
9154 }
9155
9156 bool AllowOrderedReductions;
9157 // If the flag is set, use that instead and override the TTI behaviour.
9158 if (ForceOrderedReductions.getNumOccurrences() > 0)
9159 AllowOrderedReductions = ForceOrderedReductions;
9160 else
9161 AllowOrderedReductions = TTI->enableOrderedReductions();
9162 if (!LVL.canVectorizeFPMath(AllowOrderedReductions)) {
9163 ORE->emit([&]() {
9164 auto *ExactFPMathInst = Requirements.getExactFPInst();
9165 return OptimizationRemarkAnalysisFPCommute(DEBUG_TYPE, "CantReorderFPOps",
9166 ExactFPMathInst->getDebugLoc(),
9167 ExactFPMathInst->getParent())
9168 << "loop not vectorized: cannot prove it is safe to reorder "
9169 "floating-point operations";
9170 });
9171 LLVM_DEBUG(dbgs() << "LV: loop not vectorized: cannot prove it is safe to "
9172 "reorder floating-point operations\n");
9173 Hints.emitRemarkWithHints();
9174 return false;
9175 }
9176
9177 // Use the cost model.
9178 LoopVectorizationCostModel CM(SEL, L, PSE, LI, &LVL, *TTI, TLI, DB, AC, ORE,
9179 GetBFI, F, &Hints, IAI, OptForSize);
9180 // Use the planner for vectorization.
9181 LoopVectorizationPlanner LVP(L, LI, DT, TLI, *TTI, &LVL, CM, IAI, PSE, Hints,
9182 ORE);
9183
9184 // Get user vectorization factor and interleave count.
9185 ElementCount UserVF = Hints.getWidth();
9186 unsigned UserIC = Hints.getInterleave();
9187 if (UserIC > 1 && !LVL.isSafeForAnyVectorWidth())
9188 UserIC = 1;
9189
9190 // Plan how to best vectorize.
9191 LVP.plan(UserVF, UserIC);
9192 auto [VF, BestPlanPtr] = LVP.computeBestVF();
9193 unsigned IC = 1;
9194
9195 if (ORE->allowExtraAnalysis(LV_NAME))
9197
9198 GeneratedRTChecks Checks(PSE, DT, LI, TTI, CM.CostKind);
9199 if (LVP.hasPlanWithVF(VF.Width)) {
9200 // Select the interleave count.
9201 IC = LVP.selectInterleaveCount(*BestPlanPtr, VF.Width, VF.Cost);
9202
9203 unsigned SelectedIC = std::max(IC, UserIC);
9204 // Optimistically generate runtime checks if they are needed. Drop them if
9205 // they turn out to not be profitable.
9206 if (VF.Width.isVector() || SelectedIC > 1) {
9207 Checks.create(L, *LVL.getLAI(), PSE.getPredicate(), VF.Width, SelectedIC,
9208 *ORE);
9209
9210 // Bail out early if either the SCEV or memory runtime checks are known to
9211 // fail. In that case, the vector loop would never execute.
9212 using namespace llvm::PatternMatch;
9213 if (Checks.getSCEVChecks().first &&
9214 match(Checks.getSCEVChecks().first, m_One()))
9215 return false;
9216 if (Checks.getMemRuntimeChecks().first &&
9217 match(Checks.getMemRuntimeChecks().first, m_One()))
9218 return false;
9219 }
9220
9221 // Check if it is profitable to vectorize with runtime checks.
9222 bool ForceVectorization =
9224 VPCostContext CostCtx(CM.TTI, *CM.TLI, *BestPlanPtr, CM, CM.CostKind,
9225 CM.PSE, L);
9226 if (!ForceVectorization &&
9227 !isOutsideLoopWorkProfitable(Checks, VF, L, PSE, CostCtx, *BestPlanPtr,
9228 SEL, CM.getVScaleForTuning())) {
9229 ORE->emit([&]() {
9231 DEBUG_TYPE, "CantReorderMemOps", L->getStartLoc(),
9232 L->getHeader())
9233 << "loop not vectorized: cannot prove it is safe to reorder "
9234 "memory operations";
9235 });
9236 LLVM_DEBUG(dbgs() << "LV: Too many memory checks needed.\n");
9237 Hints.emitRemarkWithHints();
9238 return false;
9239 }
9240 }
9241
9242 // Identify the diagnostic messages that should be produced.
9243 std::pair<StringRef, std::string> VecDiagMsg, IntDiagMsg;
9244 bool VectorizeLoop = true, InterleaveLoop = true;
9245 if (VF.Width.isScalar()) {
9246 LLVM_DEBUG(dbgs() << "LV: Vectorization is possible but not beneficial.\n");
9247 VecDiagMsg = {
9248 "VectorizationNotBeneficial",
9249 "the cost-model indicates that vectorization is not beneficial"};
9250 VectorizeLoop = false;
9251 }
9252
9253 if (UserIC == 1 && Hints.getInterleave() > 1) {
9255 "UserIC should only be ignored due to unsafe dependencies");
9256 LLVM_DEBUG(dbgs() << "LV: Ignoring user-specified interleave count.\n");
9257 IntDiagMsg = {"InterleavingUnsafe",
9258 "Ignoring user-specified interleave count due to possibly "
9259 "unsafe dependencies in the loop."};
9260 InterleaveLoop = false;
9261 } else if (!LVP.hasPlanWithVF(VF.Width) && UserIC > 1) {
9262 // Tell the user interleaving was avoided up-front, despite being explicitly
9263 // requested.
9264 LLVM_DEBUG(dbgs() << "LV: Ignoring UserIC, because vectorization and "
9265 "interleaving should be avoided up front\n");
9266 IntDiagMsg = {"InterleavingAvoided",
9267 "Ignoring UserIC, because interleaving was avoided up front"};
9268 InterleaveLoop = false;
9269 } else if (IC == 1 && UserIC <= 1) {
9270 // Tell the user interleaving is not beneficial.
9271 LLVM_DEBUG(dbgs() << "LV: Interleaving is not beneficial.\n");
9272 IntDiagMsg = {
9273 "InterleavingNotBeneficial",
9274 "the cost-model indicates that interleaving is not beneficial"};
9275 InterleaveLoop = false;
9276 if (UserIC == 1) {
9277 IntDiagMsg.first = "InterleavingNotBeneficialAndDisabled";
9278 IntDiagMsg.second +=
9279 " and is explicitly disabled or interleave count is set to 1";
9280 }
9281 } else if (IC > 1 && UserIC == 1) {
9282 // Tell the user interleaving is beneficial, but it explicitly disabled.
9283 LLVM_DEBUG(dbgs() << "LV: Interleaving is beneficial but is explicitly "
9284 "disabled.\n");
9285 IntDiagMsg = {"InterleavingBeneficialButDisabled",
9286 "the cost-model indicates that interleaving is beneficial "
9287 "but is explicitly disabled or interleave count is set to 1"};
9288 InterleaveLoop = false;
9289 }
9290
9291 // If there is a histogram in the loop, do not just interleave without
9292 // vectorizing. The order of operations will be incorrect without the
9293 // histogram intrinsics, which are only used for recipes with VF > 1.
9294 if (!VectorizeLoop && InterleaveLoop && LVL.hasHistograms()) {
9295 LLVM_DEBUG(dbgs() << "LV: Not interleaving without vectorization due "
9296 << "to histogram operations.\n");
9297 IntDiagMsg = {
9298 "HistogramPreventsScalarInterleaving",
9299 "Unable to interleave without vectorization due to constraints on "
9300 "the order of histogram operations"};
9301 InterleaveLoop = false;
9302 }
9303
9304 // Override IC if user provided an interleave count.
9305 IC = UserIC > 0 ? UserIC : IC;
9306
9307 // Emit diagnostic messages, if any.
9308 const char *VAPassName = Hints.vectorizeAnalysisPassName();
9309 if (!VectorizeLoop && !InterleaveLoop) {
9310 // Do not vectorize or interleaving the loop.
9311 ORE->emit([&]() {
9312 return OptimizationRemarkMissed(VAPassName, VecDiagMsg.first,
9313 L->getStartLoc(), L->getHeader())
9314 << VecDiagMsg.second;
9315 });
9316 ORE->emit([&]() {
9317 return OptimizationRemarkMissed(LV_NAME, IntDiagMsg.first,
9318 L->getStartLoc(), L->getHeader())
9319 << IntDiagMsg.second;
9320 });
9321 return false;
9322 }
9323
9324 if (!VectorizeLoop && InterleaveLoop) {
9325 LLVM_DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n');
9326 ORE->emit([&]() {
9327 return OptimizationRemarkAnalysis(VAPassName, VecDiagMsg.first,
9328 L->getStartLoc(), L->getHeader())
9329 << VecDiagMsg.second;
9330 });
9331 } else if (VectorizeLoop && !InterleaveLoop) {
9332 LLVM_DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width
9333 << ") in " << L->getLocStr() << '\n');
9334 ORE->emit([&]() {
9335 return OptimizationRemarkAnalysis(LV_NAME, IntDiagMsg.first,
9336 L->getStartLoc(), L->getHeader())
9337 << IntDiagMsg.second;
9338 });
9339 } else if (VectorizeLoop && InterleaveLoop) {
9340 LLVM_DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width
9341 << ") in " << L->getLocStr() << '\n');
9342 LLVM_DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n');
9343 }
9344
9345 // Report the vectorization decision.
9346 if (VF.Width.isScalar()) {
9347 using namespace ore;
9348 assert(IC > 1);
9349 ORE->emit([&]() {
9350 return OptimizationRemark(LV_NAME, "Interleaved", L->getStartLoc(),
9351 L->getHeader())
9352 << "interleaved loop (interleaved count: "
9353 << NV("InterleaveCount", IC) << ")";
9354 });
9355 } else {
9356 // Report the vectorization decision.
9357 reportVectorization(ORE, L, VF, IC);
9358 }
9359 if (ORE->allowExtraAnalysis(LV_NAME))
9361
9362 // If we decided that it is *legal* to interleave or vectorize the loop, then
9363 // do it.
9364
9365 VPlan &BestPlan = *BestPlanPtr;
9366 // Consider vectorizing the epilogue too if it's profitable.
9367 std::unique_ptr<VPlan> EpiPlan =
9368 LVP.selectBestEpiloguePlan(BestPlan, VF.Width, IC);
9369 bool HasBranchWeights =
9370 hasBranchWeightMD(*L->getLoopLatch()->getTerminator());
9371 if (EpiPlan) {
9372 VPlan &BestEpiPlan = *EpiPlan;
9373 VPlan &BestMainPlan = BestPlan;
9374 ElementCount EpilogueVF = BestEpiPlan.getSingleVF();
9375
9376 // The first pass vectorizes the main loop and creates a scalar epilogue
9377 // to be vectorized by executing the plan (potentially with a different
9378 // factor) again shortly afterwards.
9379 BestEpiPlan.getMiddleBlock()->setName("vec.epilog.middle.block");
9380 BestEpiPlan.getVectorPreheader()->setName("vec.epilog.ph");
9381 SmallVector<VPInstruction *> ResumeValues =
9382 preparePlanForMainVectorLoop(BestMainPlan, BestEpiPlan);
9383 EpilogueLoopVectorizationInfo EPI(VF.Width, IC, EpilogueVF, 1, BestEpiPlan);
9384
9385 // Add minimum iteration check for the epilogue plan, followed by runtime
9386 // checks for the main plan.
9387 LVP.addMinimumIterationCheck(BestMainPlan, EPI.EpilogueVF, EPI.EpilogueUF,
9389 LVP.attachRuntimeChecks(BestMainPlan, Checks, HasBranchWeights);
9391 BestMainPlan, EPI.MainLoopVF, EPI.MainLoopUF,
9393 HasBranchWeights ? MinItersBypassWeights : nullptr,
9394 L->getLoopPredecessor()->getTerminator()->getDebugLoc(), PSE);
9395
9396 EpilogueVectorizerMainLoop MainILV(L, PSE, LI, DT, TTI, AC, EPI, &CM,
9397 Checks, BestMainPlan);
9398 auto ExpandedSCEVs = LVP.executePlan(
9399 EPI.MainLoopVF, EPI.MainLoopUF, BestMainPlan, MainILV, DT,
9401 ++LoopsVectorized;
9402
9403 // Derive EPI fields from VPlan-generated IR.
9404 BasicBlock *EntryBB =
9405 cast<VPIRBasicBlock>(BestMainPlan.getEntry())->getIRBasicBlock();
9406 EntryBB->setName("iter.check");
9407 EPI.EpilogueIterationCountCheck = EntryBB;
9408 // The check chain is: Entry -> [SCEV] -> [Mem] -> MainCheck -> VecPH.
9409 // MainCheck is the non-bypass successor of the last runtime check block
9410 // (or Entry if there are no runtime checks).
9411 BasicBlock *LastCheck = EntryBB;
9412 if (BasicBlock *MemBB = Checks.getMemRuntimeChecks().second)
9413 LastCheck = MemBB;
9414 else if (BasicBlock *SCEVBB = Checks.getSCEVChecks().second)
9415 LastCheck = SCEVBB;
9416 BasicBlock *ScalarPH = L->getLoopPreheader();
9417 auto *BI = cast<CondBrInst>(LastCheck->getTerminator());
9419 BI->getSuccessor(BI->getSuccessor(0) == ScalarPH);
9420
9421 // Second pass vectorizes the epilogue and adjusts the control flow
9422 // edges from the first pass.
9423 EpilogueVectorizerEpilogueLoop EpilogILV(L, PSE, LI, DT, TTI, AC, EPI, &CM,
9424 Checks, BestEpiPlan);
9426 BestEpiPlan, L, ExpandedSCEVs, EPI, CM, *PSE.getSE());
9427 LVP.attachRuntimeChecks(BestEpiPlan, Checks, HasBranchWeights);
9428 LVP.executePlan(
9429 EPI.EpilogueVF, EPI.EpilogueUF, BestEpiPlan, EpilogILV, DT,
9431 connectEpilogueVectorLoop(BestEpiPlan, L, EPI, DT, Checks, InstsToMove,
9432 ResumeValues);
9433 ++LoopsEpilogueVectorized;
9434 } else {
9435 InnerLoopVectorizer LB(L, PSE, LI, DT, TTI, AC, VF.Width, IC, &CM, Checks,
9436 BestPlan);
9437 LVP.addMinimumIterationCheck(BestPlan, VF.Width, IC,
9438 VF.MinProfitableTripCount);
9439 LVP.attachRuntimeChecks(BestPlan, Checks, HasBranchWeights);
9440
9441 LVP.executePlan(VF.Width, IC, BestPlan, LB, DT);
9442 ++LoopsVectorized;
9443 }
9444
9445 assert(DT->verify(DominatorTree::VerificationLevel::Fast) &&
9446 "DT not preserved correctly");
9447 assert(!verifyFunction(*F, &dbgs()));
9448
9449 return true;
9450}
9451
9453
9454 // Don't attempt if
9455 // 1. the target claims to have no vector registers, and
9456 // 2. interleaving won't help ILP.
9457 //
9458 // The second condition is necessary because, even if the target has no
9459 // vector registers, loop vectorization may still enable scalar
9460 // interleaving.
9461 if (!TTI->getNumberOfRegisters(TTI->getRegisterClassForType(true)) &&
9462 TTI->getMaxInterleaveFactor(ElementCount::getFixed(1)) < 2)
9463 return LoopVectorizeResult(false, false);
9464
9465 bool Changed = false, CFGChanged = false;
9466
9467 // The vectorizer requires loops to be in simplified form.
9468 // Since simplification may add new inner loops, it has to run before the
9469 // legality and profitability checks. This means running the loop vectorizer
9470 // will simplify all loops, regardless of whether anything end up being
9471 // vectorized.
9472 for (const auto &L : *LI)
9473 Changed |= CFGChanged |=
9474 simplifyLoop(L, DT, LI, SE, AC, nullptr, false /* PreserveLCSSA */);
9475
9476 // Build up a worklist of inner-loops to vectorize. This is necessary as
9477 // the act of vectorizing or partially unrolling a loop creates new loops
9478 // and can invalidate iterators across the loops.
9479 SmallVector<Loop *, 8> Worklist;
9480
9481 for (Loop *L : *LI)
9482 collectSupportedLoops(*L, LI, ORE, Worklist);
9483
9484 LoopsAnalyzed += Worklist.size();
9485
9486 // Now walk the identified inner loops.
9487 while (!Worklist.empty()) {
9488 Loop *L = Worklist.pop_back_val();
9489
9490 // For the inner loops we actually process, form LCSSA to simplify the
9491 // transform.
9492 Changed |= formLCSSARecursively(*L, *DT, LI, SE);
9493
9494 Changed |= CFGChanged |= processLoop(L);
9495
9496 if (Changed) {
9497 LAIs->clear();
9498
9499#ifndef NDEBUG
9500 if (VerifySCEV)
9501 SE->verify();
9502#endif
9503 }
9504 }
9505
9506 // Process each loop nest in the function.
9507 return LoopVectorizeResult(Changed, CFGChanged);
9508}
9509
9512 LI = &AM.getResult<LoopAnalysis>(F);
9513 // There are no loops in the function. Return before computing other
9514 // expensive analyses.
9515 if (LI->empty())
9516 return PreservedAnalyses::all();
9525 AA = &AM.getResult<AAManager>(F);
9526
9527 auto &MAMProxy = AM.getResult<ModuleAnalysisManagerFunctionProxy>(F);
9528 PSI = MAMProxy.getCachedResult<ProfileSummaryAnalysis>(*F.getParent());
9529 GetBFI = [&AM, &F]() -> BlockFrequencyInfo & {
9531 };
9532 LoopVectorizeResult Result = runImpl(F);
9533 if (!Result.MadeAnyChange)
9534 return PreservedAnalyses::all();
9536
9537 if (isAssignmentTrackingEnabled(*F.getParent())) {
9538 for (auto &BB : F)
9540 }
9541
9542 PA.preserve<LoopAnalysis>();
9546
9547 if (Result.MadeCFGChange) {
9548 // Making CFG changes likely means a loop got vectorized. Indicate that
9549 // extra simplification passes should be run.
9550 // TODO: MadeCFGChanges is not a prefect proxy. Extra passes should only
9551 // be run if runtime checks have been added.
9554 } else {
9556 }
9557 return PA;
9558}
9559
9561 raw_ostream &OS, function_ref<StringRef(StringRef)> MapClassName2PassName) {
9562 static_cast<PassInfoMixin<LoopVectorizePass> *>(this)->printPipeline(
9563 OS, MapClassName2PassName);
9564
9565 OS << '<';
9566 OS << (InterleaveOnlyWhenForced ? "" : "no-") << "interleave-forced-only;";
9567 OS << (VectorizeOnlyWhenForced ? "" : "no-") << "vectorize-forced-only;";
9568 OS << '>';
9569}
for(const MachineOperand &MO :llvm::drop_begin(OldMI.operands(), Desc.getNumOperands()))
static unsigned getIntrinsicID(const SDNode *N)
unsigned RegSize
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
AMDGPU Lower Kernel Arguments
This file implements a class to represent arbitrary precision integral constant values and operations...
@ PostInc
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static bool isEqual(const Function &Caller, const Function &Callee)
This file contains the simple types necessary to represent the attributes associated with functions a...
static const Function * getParent(const Value *V)
This is the interface for LLVM's primary stateless and local alias analysis.
#define X(NUM, ENUM, NAME)
Definition ELF.h:851
static bool IsEmptyBlock(MachineBasicBlock *MBB)
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
#define clEnumValN(ENUMVAL, FLAGNAME, DESC)
This file contains the declarations for the subclasses of Constant, which represent the different fla...
static cl::opt< OutputCostKind > CostKind("cost-kind", cl::desc("Target cost kind"), cl::init(OutputCostKind::RecipThroughput), cl::values(clEnumValN(OutputCostKind::RecipThroughput, "throughput", "Reciprocal throughput"), clEnumValN(OutputCostKind::Latency, "latency", "Instruction latency"), clEnumValN(OutputCostKind::CodeSize, "code-size", "Code size"), clEnumValN(OutputCostKind::SizeAndLatency, "size-latency", "Code size and latency"), clEnumValN(OutputCostKind::All, "all", "Print all cost kinds")))
static cl::opt< IntrinsicCostStrategy > IntrinsicCost("intrinsic-cost-strategy", cl::desc("Costing strategy for intrinsic instructions"), cl::init(IntrinsicCostStrategy::InstructionCost), cl::values(clEnumValN(IntrinsicCostStrategy::InstructionCost, "instruction-cost", "Use TargetTransformInfo::getInstructionCost"), clEnumValN(IntrinsicCostStrategy::IntrinsicCost, "intrinsic-cost", "Use TargetTransformInfo::getIntrinsicInstrCost"), clEnumValN(IntrinsicCostStrategy::TypeBasedIntrinsicCost, "type-based-intrinsic-cost", "Calculate the intrinsic cost based only on argument types")))
static InstructionCost getCost(Instruction &Inst, TTI::TargetCostKind CostKind, TargetTransformInfo &TTI)
Definition CostModel.cpp:73
This file defines DenseMapInfo traits for DenseMap.
This file defines the DenseMap class.
#define DEBUG_TYPE
This is the interface for a simple mod/ref and alias analysis over globals.
Hexagon Common GEP
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
Module.h This file contains the declarations for the Module class.
This defines the Use class.
static bool hasNoUnsignedWrap(BinaryOperator &I)
This file defines an InstructionCost class that is used when calculating the cost of an instruction,...
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
static cl::opt< unsigned, true > VectorizationFactor("force-vector-width", cl::Hidden, cl::desc("Sets the SIMD width. Zero is autoselect."), cl::location(VectorizerParams::VectorizationFactor))
This header provides classes for managing per-loop analyses.
static const char * VerboseDebug
#define LV_NAME
This file defines the LoopVectorizationLegality class.
This file provides a LoopVectorizationPlanner class.
static void collectSupportedLoops(Loop &L, LoopInfo *LI, OptimizationRemarkEmitter *ORE, SmallVectorImpl< Loop * > &V)
static cl::opt< unsigned > EpilogueVectorizationMinVF("epilogue-vectorization-minimum-VF", cl::Hidden, cl::desc("Only loops with vectorization factor equal to or larger than " "the specified value are considered for epilogue vectorization."))
static cl::opt< unsigned > EpilogueVectorizationForceVF("epilogue-vectorization-force-VF", cl::init(1), cl::Hidden, cl::desc("When epilogue vectorization is enabled, and a value greater than " "1 is specified, forces the given VF for all applicable epilogue " "loops."))
static unsigned getMaxTCFromNonZeroRange(PredicatedScalarEvolution &PSE, Loop *L)
Get the maximum trip count for L from the SCEV unsigned range, excluding zero from the range.
static Type * maybeVectorizeType(Type *Ty, ElementCount VF)
static ElementCount determineVPlanVF(const TargetTransformInfo &TTI, LoopVectorizationCostModel &CM)
static ElementCount getSmallConstantTripCount(ScalarEvolution *SE, const Loop *L)
A version of ScalarEvolution::getSmallConstantTripCount that returns an ElementCount to include loops...
static bool hasUnsupportedHeaderPhiRecipe(VPlan &Plan)
Returns true if the VPlan contains header phi recipes that are not currently supported for epilogue v...
static cl::opt< unsigned > VectorizeMemoryCheckThreshold("vectorize-memory-check-threshold", cl::init(128), cl::Hidden, cl::desc("The maximum allowed number of runtime memory checks"))
static void connectEpilogueVectorLoop(VPlan &EpiPlan, Loop *L, EpilogueLoopVectorizationInfo &EPI, DominatorTree *DT, GeneratedRTChecks &Checks, ArrayRef< Instruction * > InstsToMove, ArrayRef< VPInstruction * > ResumeValues)
Connect the epilogue vector loop generated for EpiPlan to the main vector loop, after both plans have...
static cl::opt< unsigned > TinyTripCountVectorThreshold("vectorizer-min-trip-count", cl::init(16), cl::Hidden, cl::desc("Loops with a constant trip count that is smaller than this " "value are vectorized only if no scalar iteration overheads " "are incurred."))
Loops with a known constant trip count below this number are vectorized only if no scalar iteration o...
static void debugVectorizationMessage(const StringRef Prefix, const StringRef DebugMsg, Instruction *I)
Write a DebugMsg about vectorization to the debug output stream.
static void legacyCSE(BasicBlock *BB)
FIXME: This legacy common-subexpression-elimination routine is scheduled for removal,...
static VPIRBasicBlock * replaceVPBBWithIRVPBB(VPBasicBlock *VPBB, BasicBlock *IRBB, VPlan *Plan=nullptr)
Replace VPBB with a VPIRBasicBlock wrapping IRBB.
static DebugLoc getDebugLocFromInstOrOperands(Instruction *I)
Look for a meaningful debug location on the instruction or its operands.
static cl::opt< bool > ForceTargetSupportsScalableVectors("force-target-supports-scalable-vectors", cl::init(false), cl::Hidden, cl::desc("Pretend that scalable vectors are supported, even if the target does " "not support them. This flag should only be used for testing."))
static bool useActiveLaneMaskForControlFlow(TailFoldingStyle Style)
static cl::opt< bool > EnableEarlyExitVectorization("enable-early-exit-vectorization", cl::init(true), cl::Hidden, cl::desc("Enable vectorization of early exit loops with uncountable exits."))
static bool processLoopInVPlanNativePath(Loop *L, PredicatedScalarEvolution &PSE, LoopInfo *LI, DominatorTree *DT, LoopVectorizationLegality *LVL, TargetTransformInfo *TTI, TargetLibraryInfo *TLI, DemandedBits *DB, AssumptionCache *AC, OptimizationRemarkEmitter *ORE, std::function< BlockFrequencyInfo &()> GetBFI, bool OptForSize, LoopVectorizeHints &Hints, LoopVectorizationRequirements &Requirements)
static cl::opt< bool > ConsiderRegPressure("vectorizer-consider-reg-pressure", cl::init(false), cl::Hidden, cl::desc("Discard VFs if their register pressure is too high."))
static unsigned estimateElementCount(ElementCount VF, std::optional< unsigned > VScale)
This function attempts to return a value that represents the ElementCount at runtime.
static constexpr uint32_t MinItersBypassWeights[]
static cl::opt< unsigned > ForceTargetNumScalarRegs("force-target-num-scalar-regs", cl::init(0), cl::Hidden, cl::desc("A flag that overrides the target's number of scalar registers."))
static cl::opt< bool > UseWiderVFIfCallVariantsPresent("vectorizer-maximize-bandwidth-for-vector-calls", cl::init(true), cl::Hidden, cl::desc("Try wider VFs if they enable the use of vector variants"))
static std::optional< unsigned > getMaxVScale(const Function &F, const TargetTransformInfo &TTI)
static SmallVector< VPInstruction * > preparePlanForMainVectorLoop(VPlan &MainPlan, VPlan &EpiPlan)
Prepare MainPlan for vectorizing the main vector loop during epilogue vectorization.
static cl::opt< unsigned > SmallLoopCost("small-loop-cost", cl::init(20), cl::Hidden, cl::desc("The cost of a loop that is considered 'small' by the interleaver."))
static cl::opt< unsigned > ForceTargetNumVectorRegs("force-target-num-vector-regs", cl::init(0), cl::Hidden, cl::desc("A flag that overrides the target's number of vector registers."))
static bool isExplicitVecOuterLoop(Loop *OuterLp, OptimizationRemarkEmitter *ORE)
static cl::opt< bool > EnableIndVarRegisterHeur("enable-ind-var-reg-heur", cl::init(true), cl::Hidden, cl::desc("Count the induction variable only once when interleaving"))
static cl::opt< TailFoldingStyle > ForceTailFoldingStyle("force-tail-folding-style", cl::desc("Force the tail folding style"), cl::init(TailFoldingStyle::None), cl::values(clEnumValN(TailFoldingStyle::None, "none", "Disable tail folding"), clEnumValN(TailFoldingStyle::Data, "data", "Create lane mask for data only, using active.lane.mask intrinsic"), clEnumValN(TailFoldingStyle::DataWithoutLaneMask, "data-without-lane-mask", "Create lane mask with compare/stepvector"), clEnumValN(TailFoldingStyle::DataAndControlFlow, "data-and-control", "Create lane mask using active.lane.mask intrinsic, and use " "it for both data and control flow"), clEnumValN(TailFoldingStyle::DataWithEVL, "data-with-evl", "Use predicated EVL instructions for tail folding. If EVL " "is unsupported, fallback to data-without-lane-mask.")))
static void printOptimizedVPlan(VPlan &)
static ScalarEpilogueLowering getScalarEpilogueLowering(Function *F, Loop *L, LoopVectorizeHints &Hints, bool OptForSize, TargetTransformInfo *TTI, TargetLibraryInfo *TLI, LoopVectorizationLegality &LVL, InterleavedAccessInfo *IAI)
static cl::opt< bool > EnableEpilogueVectorization("enable-epilogue-vectorization", cl::init(true), cl::Hidden, cl::desc("Enable vectorization of epilogue loops."))
static cl::opt< bool > PreferPredicatedReductionSelect("prefer-predicated-reduction-select", cl::init(false), cl::Hidden, cl::desc("Prefer predicating a reduction operation over an after loop select."))
static cl::opt< bool > PreferInLoopReductions("prefer-inloop-reductions", cl::init(false), cl::Hidden, cl::desc("Prefer in-loop vector reductions, " "overriding the targets preference."))
static std::optional< ElementCount > getSmallBestKnownTC(PredicatedScalarEvolution &PSE, Loop *L, bool CanUseConstantMax=true, bool CanExcludeZeroTrips=false)
Returns "best known" trip count, which is either a valid positive trip count or std::nullopt when an ...
static SmallVector< Instruction * > preparePlanForEpilogueVectorLoop(VPlan &Plan, Loop *L, const SCEV2ValueTy &ExpandedSCEVs, EpilogueLoopVectorizationInfo &EPI, LoopVectorizationCostModel &CM, ScalarEvolution &SE)
Prepare Plan for vectorizing the epilogue loop.
static const SCEV * getAddressAccessSCEV(Value *Ptr, PredicatedScalarEvolution &PSE, const Loop *TheLoop)
Gets the address access SCEV for Ptr, if it should be used for cost modeling according to isAddressSC...
static cl::opt< bool > EnableLoadStoreRuntimeInterleave("enable-loadstore-runtime-interleave", cl::init(true), cl::Hidden, cl::desc("Enable runtime interleaving until load/store ports are saturated"))
static cl::opt< bool > VPlanBuildStressTest("vplan-build-stress-test", cl::init(false), cl::Hidden, cl::desc("Build VPlan for every supported loop nest in the function and bail " "out right after the build (stress test the VPlan H-CFG construction " "in the VPlan-native vectorization path)."))
static bool hasIrregularType(Type *Ty, const DataLayout &DL)
A helper function that returns true if the given type is irregular.
static cl::opt< bool > LoopVectorizeWithBlockFrequency("loop-vectorize-with-block-frequency", cl::init(true), cl::Hidden, cl::desc("Enable the use of the block frequency analysis to access PGO " "heuristics minimizing code growth in cold regions and being more " "aggressive in hot regions."))
static bool useActiveLaneMask(TailFoldingStyle Style)
static bool hasReplicatorRegion(VPlan &Plan)
static bool isIndvarOverflowCheckKnownFalse(const LoopVectorizationCostModel *Cost, ElementCount VF, std::optional< unsigned > UF=std::nullopt)
For the given VF and UF and maximum trip count computed for the loop, return whether the induction va...
static void addFullyUnrolledInstructionsToIgnore(Loop *L, const LoopVectorizationLegality::InductionList &IL, SmallPtrSetImpl< Instruction * > &InstsToIgnore)
Knowing that loop L executes a single vector iteration, add instructions that will get simplified and...
static cl::opt< PreferPredicateTy::Option > PreferPredicateOverEpilogue("prefer-predicate-over-epilogue", cl::init(PreferPredicateTy::ScalarEpilogue), cl::Hidden, cl::desc("Tail-folding and predication preferences over creating a scalar " "epilogue loop."), cl::values(clEnumValN(PreferPredicateTy::ScalarEpilogue, "scalar-epilogue", "Don't tail-predicate loops, create scalar epilogue"), clEnumValN(PreferPredicateTy::PredicateElseScalarEpilogue, "predicate-else-scalar-epilogue", "prefer tail-folding, create scalar epilogue if tail " "folding fails."), clEnumValN(PreferPredicateTy::PredicateOrDontVectorize, "predicate-dont-vectorize", "prefers tail-folding, don't attempt vectorization if " "tail-folding fails.")))
static bool hasFindLastReductionPhi(VPlan &Plan)
Returns true if the VPlan contains a VPReductionPHIRecipe with FindLast recurrence kind.
static cl::opt< bool > EnableInterleavedMemAccesses("enable-interleaved-mem-accesses", cl::init(false), cl::Hidden, cl::desc("Enable vectorization on interleaved memory accesses in a loop"))
static cl::opt< bool > EnableMaskedInterleavedMemAccesses("enable-masked-interleaved-mem-accesses", cl::init(false), cl::Hidden, cl::desc("Enable vectorization on masked interleaved memory accesses in a loop"))
An interleave-group may need masking if it resides in a block that needs predication,...
static cl::opt< bool > ForceOrderedReductions("force-ordered-reductions", cl::init(false), cl::Hidden, cl::desc("Enable the vectorisation of loops with in-order (strict) " "FP reductions"))
static cl::opt< cl::boolOrDefault > ForceSafeDivisor("force-widen-divrem-via-safe-divisor", cl::Hidden, cl::desc("Override cost based safe divisor widening for div/rem instructions"))
static InstructionCost calculateEarlyExitCost(VPCostContext &CostCtx, VPlan &Plan, ElementCount VF)
For loops with uncountable early exits, find the cost of doing work when exiting the loop early,...
static cl::opt< unsigned > ForceTargetMaxVectorInterleaveFactor("force-target-max-vector-interleave", cl::init(0), cl::Hidden, cl::desc("A flag that overrides the target's max interleave factor for " "vectorized loops."))
static bool useMaskedInterleavedAccesses(const TargetTransformInfo &TTI)
cl::opt< unsigned > NumberOfStoresToPredicate("vectorize-num-stores-pred", cl::init(1), cl::Hidden, cl::desc("Max number of stores to be predicated behind an if."))
The number of stores in a loop that are allowed to need predication.
static void fixScalarResumeValuesFromBypass(BasicBlock *BypassBlock, Loop *L, VPlan &BestEpiPlan, ArrayRef< VPInstruction * > ResumeValues)
static cl::opt< unsigned > MaxNestedScalarReductionIC("max-nested-scalar-reduction-interleave", cl::init(2), cl::Hidden, cl::desc("The maximum interleave count to use when interleaving a scalar " "reduction in a nested loop."))
static cl::opt< unsigned > ForceTargetMaxScalarInterleaveFactor("force-target-max-scalar-interleave", cl::init(0), cl::Hidden, cl::desc("A flag that overrides the target's max interleave factor for " "scalar loops."))
static void checkMixedPrecision(Loop *L, OptimizationRemarkEmitter *ORE)
static bool willGenerateVectors(VPlan &Plan, ElementCount VF, const TargetTransformInfo &TTI)
Check if any recipe of Plan will generate a vector value, which will be assigned a vector register.
static cl::opt< bool > ForceTargetSupportsMaskedMemoryOps("force-target-supports-masked-memory-ops", cl::init(false), cl::Hidden, cl::desc("Assume the target supports masked memory operations (used for " "testing)."))
Note: This currently only applies to llvm.masked.load and llvm.masked.store.
static bool isOutsideLoopWorkProfitable(GeneratedRTChecks &Checks, VectorizationFactor &VF, Loop *L, PredicatedScalarEvolution &PSE, VPCostContext &CostCtx, VPlan &Plan, ScalarEpilogueLowering SEL, std::optional< unsigned > VScale)
This function determines whether or not it's still profitable to vectorize the loop given the extra w...
static cl::opt< bool > MaximizeBandwidth("vectorizer-maximize-bandwidth", cl::init(false), cl::Hidden, cl::desc("Maximize bandwidth when selecting vectorization factor which " "will be determined by the smallest type in loop."))
static OptimizationRemarkAnalysis createLVAnalysis(const char *PassName, StringRef RemarkName, Loop *TheLoop, Instruction *I, DebugLoc DL={})
Create an analysis remark that explains why vectorization failed.
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
This file implements a map that provides insertion order iteration.
This file contains the declarations for metadata subclasses.
#define T
ConstantRange Range(APInt(BitWidth, Low), APInt(BitWidth, High))
uint64_t IntrinsicInst * II
#define P(N)
This file contains the declarations for profiling metadata utility functions.
const SmallVectorImpl< MachineOperand > & Cond
static BinaryOperator * CreateMul(Value *S1, Value *S2, const Twine &Name, BasicBlock::iterator InsertBefore, Value *FlagsOp)
static BinaryOperator * CreateAdd(Value *S1, Value *S2, const Twine &Name, BasicBlock::iterator InsertBefore, Value *FlagsOp)
static bool isValid(const char C)
Returns true if C is a valid mangled character: <0-9a-zA-Z_>.
static InstructionCost getScalarizationOverhead(const TargetTransformInfo &TTI, Type *ScalarTy, VectorType *Ty, const APInt &DemandedElts, bool Insert, bool Extract, TTI::TargetCostKind CostKind, bool ForPoisonSrc=true, ArrayRef< Value * > VL={})
This is similar to TargetTransformInfo::getScalarizationOverhead, but if ScalarTy is a FixedVectorTyp...
This file contains some templates that are useful if you are working with the STL at all.
#define OP(OPC)
Definition Instruction.h:46
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Definition Statistic.h:171
#define LLVM_DEBUG(...)
Definition Debug.h:114
#define DEBUG_WITH_TYPE(TYPE,...)
DEBUG_WITH_TYPE macro - This macro should be used by passes to emit debug information.
Definition Debug.h:72
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
This pass exposes codegen information to IR-level passes.
LocallyHashedType DenseMapInfo< LocallyHashedType >::Empty
This file implements the TypeSwitch template, which mimics a switch() statement whose cases are type ...
This file contains the declarations of different VPlan-related auxiliary helpers.
This file provides utility VPlan to VPlan transformations.
#define RUN_VPLAN_PASS(PASS,...)
#define RUN_VPLAN_PASS_NO_VERIFY(PASS,...)
This file declares the class VPlanVerifier, which contains utility functions to check the consistency...
This file contains the declarations of the Vectorization Plan base classes:
static const char PassName[]
Value * RHS
Value * LHS
static const uint32_t IV[8]
Definition blake3_impl.h:83
A manager for alias analyses.
Class for arbitrary precision integers.
Definition APInt.h:78
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
Definition APInt.h:235
uint64_t getZExtValue() const
Get zero extended value.
Definition APInt.h:1555
unsigned getActiveBits() const
Compute the number of active bits in the value.
Definition APInt.h:1527
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
Definition APInt.h:381
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:40
size_t size() const
size - Get the array size.
Definition ArrayRef.h:142
A function analysis which provides an AssumptionCache.
A cache of @llvm.assume calls within a function.
LLVM_ABI unsigned getVScaleRangeMin() const
Returns the minimum value for the vscale_range attribute.
LLVM Basic Block Representation.
Definition BasicBlock.h:62
iterator_range< const_phi_iterator > phis() const
Returns a range that iterates over the phis in the basic block.
Definition BasicBlock.h:530
const Function * getParent() const
Return the enclosing method, or null if none.
Definition BasicBlock.h:213
LLVM_ABI InstListType::const_iterator getFirstNonPHIIt() const
Returns an iterator to the first instruction in this block that is not a PHINode instruction.
LLVM_ABI const BasicBlock * getSinglePredecessor() const
Return the predecessor of this block if it has a single predecessor block.
LLVM_ABI const BasicBlock * getSingleSuccessor() const
Return the successor of this block if it has a single successor.
LLVM_ABI LLVMContext & getContext() const
Get the context in which this basic block lives.
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction; assumes that the block is well-formed.
Definition BasicBlock.h:237
BinaryOps getOpcode() const
Definition InstrTypes.h:374
Analysis pass which computes BlockFrequencyInfo.
BlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate IR basic block frequen...
Represents analyses that only rely on functions' control flow.
Definition Analysis.h:73
bool isNoBuiltin() const
Return true if the call should not be treated as a call to a builtin.
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
Value * getArgOperand(unsigned i) const
iterator_range< User::op_iterator > args()
Iteration adapter for range-for loops.
unsigned arg_size() const
This class represents a function call, abstracting a target machine's calling convention.
static Type * makeCmpResultType(Type *opnd_type)
Create a result type for fcmp/icmp.
Definition InstrTypes.h:986
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition InstrTypes.h:676
@ ICMP_UGT
unsigned greater than
Definition InstrTypes.h:699
@ ICMP_ULT
unsigned less than
Definition InstrTypes.h:701
Conditional Branch instruction.
BasicBlock * getSuccessor(unsigned i) const
This is the shared class of boolean and integer constants.
Definition Constants.h:87
static LLVM_ABI ConstantInt * getTrue(LLVMContext &Context)
This class represents a range of values.
LLVM_ABI APInt getUnsignedMax() const
Return the largest unsigned value contained in the ConstantRange.
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:64
A debug info location.
Definition DebugLoc.h:123
static DebugLoc getTemporary()
Definition DebugLoc.h:160
static DebugLoc getUnknown()
Definition DebugLoc.h:161
An analysis that produces DemandedBits for a function.
ValueT lookup(const_arg_type_t< KeyT > Val) const
lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...
Definition DenseMap.h:205
iterator find(const_arg_type_t< KeyT > Val)
Definition DenseMap.h:178
std::pair< iterator, bool > try_emplace(KeyT &&Key, Ts &&...Args)
Definition DenseMap.h:256
bool empty() const
Definition DenseMap.h:109
iterator end()
Definition DenseMap.h:81
bool contains(const_arg_type_t< KeyT > Val) const
Return true if the specified key is in the map, false otherwise.
Definition DenseMap.h:169
void insert_range(Range &&R)
Inserts range of 'std::pair<KeyT, ValueT>' values into the map.
Definition DenseMap.h:294
Implements a dense probed hash-table based set.
Definition DenseSet.h:279
Analysis pass which computes a DominatorTree.
Definition Dominators.h:278
void changeImmediateDominator(DomTreeNodeBase< NodeT > *N, DomTreeNodeBase< NodeT > *NewIDom)
changeImmediateDominator - This method is used to update the dominator tree information when a node's...
void eraseNode(NodeT *BB)
eraseNode - Removes a node from the dominator tree.
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition Dominators.h:159
constexpr bool isVector() const
One or more elements.
Definition TypeSize.h:324
static constexpr ElementCount getScalable(ScalarTy MinVal)
Definition TypeSize.h:312
static constexpr ElementCount getFixed(ScalarTy MinVal)
Definition TypeSize.h:309
static constexpr ElementCount get(ScalarTy MinVal, bool Scalable)
Definition TypeSize.h:315
constexpr bool isScalar() const
Exactly one element.
Definition TypeSize.h:320
EpilogueVectorizerEpilogueLoop(Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI, DominatorTree *DT, const TargetTransformInfo *TTI, AssumptionCache *AC, EpilogueLoopVectorizationInfo &EPI, LoopVectorizationCostModel *CM, GeneratedRTChecks &Checks, VPlan &Plan)
BasicBlock * createVectorizedLoopSkeleton() final
Implements the interface for creating a vectorized skeleton using the epilogue loop strategy (i....
void printDebugTracesAtStart() override
Allow subclasses to override and print debug traces before/after vplan execution, when trace informat...
A specialized derived class of inner loop vectorizer that performs vectorization of main loops in the...
void printDebugTracesAtStart() override
Allow subclasses to override and print debug traces before/after vplan execution, when trace informat...
EpilogueVectorizerMainLoop(Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI, DominatorTree *DT, const TargetTransformInfo *TTI, AssumptionCache *AC, EpilogueLoopVectorizationInfo &EPI, LoopVectorizationCostModel *CM, GeneratedRTChecks &Check, VPlan &Plan)
Convenience struct for specifying and reasoning about fast-math flags.
Definition FMF.h:23
Class to represent function types.
param_iterator param_begin() const
param_iterator param_end() const
FunctionType * getFunctionType() const
Returns the FunctionType for me.
Definition Function.h:211
Attribute getFnAttribute(Attribute::AttrKind Kind) const
Return the attribute for the given attribute kind.
Definition Function.cpp:763
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
Definition Function.cpp:728
Represents flags for the getelementptr instruction/expression.
static GEPNoWrapFlags none()
void applyUpdates(ArrayRef< UpdateT > Updates)
Submit updates to all available trees.
Common base class shared among various IRBuilders.
Definition IRBuilder.h:114
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition IRBuilder.h:2812
A struct for saving information about induction variables.
const SCEV * getStep() const
ArrayRef< Instruction * > getCastInsts() const
Returns an ArrayRef to the type cast instructions in the induction update chain, that are redundant w...
InductionKind
This enum represents the kinds of inductions that we support.
@ IK_NoInduction
Not an induction variable.
@ IK_FpInduction
Floating point induction variable.
@ IK_PtrInduction
Pointer induction var. Step = C.
@ IK_IntInduction
Integer induction variable. Step = C.
InnerLoopAndEpilogueVectorizer(Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI, DominatorTree *DT, const TargetTransformInfo *TTI, AssumptionCache *AC, EpilogueLoopVectorizationInfo &EPI, LoopVectorizationCostModel *CM, GeneratedRTChecks &Checks, VPlan &Plan, ElementCount VecWidth, ElementCount MinProfitableTripCount, unsigned UnrollFactor)
EpilogueLoopVectorizationInfo & EPI
Holds and updates state information required to vectorize the main loop and its epilogue in two separ...
InnerLoopVectorizer vectorizes loops which contain only one basic block to a specified vectorization ...
virtual void printDebugTracesAtStart()
Allow subclasses to override and print debug traces before/after vplan execution, when trace informat...
const TargetTransformInfo * TTI
Target Transform Info.
LoopVectorizationCostModel * Cost
The profitablity analysis.
friend class LoopVectorizationPlanner
InnerLoopVectorizer(Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI, DominatorTree *DT, const TargetTransformInfo *TTI, AssumptionCache *AC, ElementCount VecWidth, unsigned UnrollFactor, LoopVectorizationCostModel *CM, GeneratedRTChecks &RTChecks, VPlan &Plan)
PredicatedScalarEvolution & PSE
A wrapper around ScalarEvolution used to add runtime SCEV checks.
LoopInfo * LI
Loop Info.
DominatorTree * DT
Dominator Tree.
void fixVectorizedLoop(VPTransformState &State)
Fix the vectorized code, taking care of header phi's, and more.
virtual BasicBlock * createVectorizedLoopSkeleton()
Creates a basic block for the scalar preheader.
virtual void printDebugTracesAtEnd()
AssumptionCache * AC
Assumption Cache.
IRBuilder Builder
The builder that we use.
void fixNonInductionPHIs(VPTransformState &State)
Fix the non-induction PHIs in Plan.
VPBasicBlock * VectorPHVPBB
The vector preheader block of Plan, used as target for check blocks introduced during skeleton creati...
unsigned UF
The vectorization unroll factor to use.
GeneratedRTChecks & RTChecks
Structure to hold information about generated runtime checks, responsible for cleaning the checks,...
virtual ~InnerLoopVectorizer()=default
ElementCount VF
The vectorization SIMD factor to use.
Loop * OrigLoop
The original loop.
BasicBlock * createScalarPreheader(StringRef Prefix)
Create and return a new IR basic block for the scalar preheader whose name is prefixed with Prefix.
static InstructionCost getInvalid(CostType Val=0)
static InstructionCost getMax()
CostType getValue() const
This function is intended to be used as sparingly as possible, since the class provides the full rang...
bool isCast() const
LLVM_ABI const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
LLVM_ABI void moveBefore(InstListType::iterator InsertPos)
Unlink this instruction from its current basic block and insert it into the basic block that MovePos ...
LLVM_ABI InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
Instruction * user_back()
Specialize the methods defined in Value, as we know that an instruction can only be used by other ins...
const char * getOpcodeName() const
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
Class to represent integer types.
static LLVM_ABI IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
Definition Type.cpp:354
LLVM_ABI APInt getMask() const
For example, this is 0xFF for an 8 bit integer, 0xFFFF for i16, etc.
Definition Type.cpp:378
The group of interleaved loads/stores sharing the same stride and close to each other.
uint32_t getFactor() const
InstTy * getMember(uint32_t Index) const
Get the member with the given index Index.
InstTy * getInsertPos() const
uint32_t getNumMembers() const
Drive the analysis of interleaved memory accesses in the loop.
bool requiresScalarEpilogue() const
Returns true if an interleaved group that may access memory out-of-bounds requires a scalar epilogue ...
LLVM_ABI void analyzeInterleaving(bool EnableMaskedInterleavedGroup)
Analyze the interleaved accesses and collect them in interleave groups.
An instruction for reading from memory.
Type * getPointerOperandType() const
This analysis provides dependence information for the memory accesses of a loop.
const RuntimePointerChecking * getRuntimePointerChecking() const
unsigned getNumRuntimePointerChecks() const
Number of memchecks required to prove independence of otherwise may-alias pointers.
Analysis pass that exposes the LoopInfo for a function.
Definition LoopInfo.h:569
bool contains(const LoopT *L) const
Return true if the specified loop is contained within in this loop.
BlockT * getLoopLatch() const
If there is a single latch block for this loop, return it.
bool isInnermost() const
Return true if the loop does not contain any (natural) loops.
void getExitingBlocks(SmallVectorImpl< BlockT * > &ExitingBlocks) const
Return all blocks inside the loop that have successors outside of the loop.
BlockT * getHeader() const
iterator_range< block_iterator > blocks() const
ArrayRef< BlockT * > getBlocks() const
Get a list of the basic blocks which make up this loop.
Store the result of a depth first search within basic blocks contained by a single loop.
RPOIterator beginRPO() const
Reverse iterate over the cached postorder blocks.
void perform(const LoopInfo *LI)
Traverse the loop blocks and store the DFS result.
RPOIterator endRPO() const
Wrapper class to LoopBlocksDFS that provides a standard begin()/end() interface for the DFS reverse p...
void perform(const LoopInfo *LI)
Traverse the loop blocks and store the DFS result.
void removeBlock(BlockT *BB)
This method completely removes BB from all data structures, including all of the Loop objects it is n...
LoopVectorizationCostModel - estimates the expected speedups due to vectorization.
SmallPtrSet< Type *, 16 > ElementTypesInLoop
All element types found in the loop.
bool isLegalMaskedLoad(Type *DataType, Value *Ptr, Align Alignment, unsigned AddressSpace) const
Returns true if the target machine supports masked load operation for the given DataType and kind of ...
void collectElementTypesForWidening()
Collect all element types in the loop for which widening is needed.
bool canVectorizeReductions(ElementCount VF) const
Returns true if the target machine supports all of the reduction variables found for the given VF.
bool isLegalMaskedStore(Type *DataType, Value *Ptr, Align Alignment, unsigned AddressSpace) const
Returns true if the target machine supports masked store operation for the given DataType and kind of...
bool isEpilogueVectorizationProfitable(const ElementCount VF, const unsigned IC) const
Returns true if epilogue vectorization is considered profitable, and false otherwise.
bool useWideActiveLaneMask() const
Returns true if the use of wide lane masks is requested and the loop is using tail-folding with a lan...
bool isPredicatedInst(Instruction *I) const
Returns true if I is an instruction that needs to be predicated at runtime.
void collectValuesToIgnore()
Collect values we want to ignore in the cost model.
BlockFrequencyInfo * BFI
The BlockFrequencyInfo returned from GetBFI.
void collectInLoopReductions()
Split reductions into those that happen in the loop, and those that happen outside.
BlockFrequencyInfo & getBFI()
Returns the BlockFrequencyInfo for the function if cached, otherwise fetches it via GetBFI.
std::pair< unsigned, unsigned > getSmallestAndWidestTypes()
bool isUniformAfterVectorization(Instruction *I, ElementCount VF) const
Returns true if I is known to be uniform after vectorization.
bool useEmulatedMaskMemRefHack(Instruction *I, ElementCount VF)
Returns true if an artificially high cost for emulated masked memrefs should be used.
void collectNonVectorizedAndSetWideningDecisions(ElementCount VF)
Collect values that will not be widened, including Uniforms, Scalars, and Instructions to Scalarize f...
bool isMaskRequired(Instruction *I) const
Wrapper function for LoopVectorizationLegality::isMaskRequired, that passes the Instruction I and if ...
PredicatedScalarEvolution & PSE
Predicated scalar evolution analysis.
const LoopVectorizeHints * Hints
Loop Vectorize Hint.
std::optional< unsigned > getMaxSafeElements() const
Return maximum safe number of elements to be processed per vector iteration, which do not prevent sto...
const TargetTransformInfo & TTI
Vector target information.
LoopVectorizationLegality * Legal
Vectorization legality.
uint64_t getPredBlockCostDivisor(TargetTransformInfo::TargetCostKind CostKind, const BasicBlock *BB)
A helper function that returns how much we should divide the cost of a predicated block by.
std::optional< InstructionCost > getReductionPatternCost(Instruction *I, ElementCount VF, Type *VectorTy) const
Return the cost of instructions in an inloop reduction pattern, if I is part of that pattern.
InstructionCost getInstructionCost(Instruction *I, ElementCount VF)
Returns the execution time cost of an instruction for a given vector width.
DemandedBits * DB
Demanded bits analysis.
bool interleavedAccessCanBeWidened(Instruction *I, ElementCount VF) const
Returns true if I is a memory instruction in an interleaved-group of memory accesses that can be vect...
const TargetLibraryInfo * TLI
Target Library Info.
bool memoryInstructionCanBeWidened(Instruction *I, ElementCount VF)
Returns true if I is a memory instruction with consecutive memory access that can be widened.
const InterleaveGroup< Instruction > * getInterleavedAccessGroup(Instruction *Instr) const
Get the interleaved access group that Instr belongs to.
InstructionCost getVectorIntrinsicCost(CallInst *CI, ElementCount VF) const
Estimate cost of an intrinsic call instruction CI if it were vectorized with factor VF.
bool OptForSize
Whether this loop should be optimized for size based on function attribute or profile information.
bool useMaxBandwidth(TargetTransformInfo::RegisterKind RegKind)
bool isScalarAfterVectorization(Instruction *I, ElementCount VF) const
Returns true if I is known to be scalar after vectorization.
bool isOptimizableIVTruncate(Instruction *I, ElementCount VF)
Return True if instruction I is an optimizable truncate whose operand is an induction variable.
FixedScalableVFPair computeMaxVF(ElementCount UserVF, unsigned UserIC)
bool shouldConsiderRegPressureForVF(ElementCount VF)
Loop * TheLoop
The loop that we evaluate.
TTI::TargetCostKind CostKind
The kind of cost that we are calculating.
InterleavedAccessInfo & InterleaveInfo
The interleave access information contains groups of interleaved accesses with the same stride and cl...
SmallPtrSet< const Value *, 16 > ValuesToIgnore
Values to ignore in the cost model.
void setVectorizedCallDecision(ElementCount VF)
A call may be vectorized in different ways depending on whether we have vectorized variants available...
void invalidateCostModelingDecisions()
Invalidates decisions already taken by the cost model.
bool isAccessInterleaved(Instruction *Instr) const
Check if Instr belongs to any interleaved access group.
bool selectUserVectorizationFactor(ElementCount UserVF)
Setup cost-based decisions for user vectorization factor.
std::optional< unsigned > getVScaleForTuning() const
Return the value of vscale used for tuning the cost model.
void setTailFoldingStyle(bool IsScalableVF, unsigned UserIC)
Selects and saves TailFoldingStyle.
OptimizationRemarkEmitter * ORE
Interface to emit optimization remarks.
bool preferPredicatedLoop() const
Returns true if tail-folding is preferred over a scalar epilogue.
LoopInfo * LI
Loop Info analysis.
bool requiresScalarEpilogue(bool IsVectorizing) const
Returns true if we're required to use a scalar epilogue for at least the final iteration of the origi...
SmallPtrSet< const Value *, 16 > VecValuesToIgnore
Values to ignore in the cost model when VF > 1.
bool isInLoopReduction(PHINode *Phi) const
Returns true if the Phi is part of an inloop reduction.
bool isProfitableToScalarize(Instruction *I, ElementCount VF) const
void setWideningDecision(const InterleaveGroup< Instruction > *Grp, ElementCount VF, InstWidening W, InstructionCost Cost)
Save vectorization decision W and Cost taken by the cost model for interleaving group Grp and vector ...
const MapVector< Instruction *, uint64_t > & getMinimalBitwidths() const
CallWideningDecision getCallWideningDecision(CallInst *CI, ElementCount VF) const
bool isLegalGatherOrScatter(Value *V, ElementCount VF)
Returns true if the target machine can represent V as a masked gather or scatter operation.
bool canTruncateToMinimalBitwidth(Instruction *I, ElementCount VF) const
bool shouldConsiderInvariant(Value *Op)
Returns true if Op should be considered invariant and if it is trivially hoistable.
bool foldTailByMasking() const
Returns true if all loop blocks should be masked to fold tail loop.
bool foldTailWithEVL() const
Returns true if VP intrinsics with explicit vector length support should be generated in the tail fol...
bool blockNeedsPredicationForAnyReason(BasicBlock *BB) const
Returns true if the instructions in this block requires predication for any reason,...
void setCallWideningDecision(CallInst *CI, ElementCount VF, InstWidening Kind, Function *Variant, Intrinsic::ID IID, std::optional< unsigned > MaskPos, InstructionCost Cost)
AssumptionCache * AC
Assumption cache.
void setWideningDecision(Instruction *I, ElementCount VF, InstWidening W, InstructionCost Cost)
Save vectorization decision W and Cost taken by the cost model for instruction I and vector width VF.
InstWidening
Decision that was taken during cost calculation for memory instruction.
bool usePredicatedReductionSelect(RecurKind RecurrenceKind) const
Returns true if the predicated reduction select should be used to set the incoming value for the redu...
std::pair< InstructionCost, InstructionCost > getDivRemSpeculationCost(Instruction *I, ElementCount VF)
Return the costs for our two available strategies for lowering a div/rem operation which requires spe...
InstructionCost getVectorCallCost(CallInst *CI, ElementCount VF) const
Estimate cost of a call instruction CI if it were vectorized with factor VF.
bool isScalarWithPredication(Instruction *I, ElementCount VF)
Returns true if I is an instruction which requires predication and for which our chosen predication s...
bool useOrderedReductions(const RecurrenceDescriptor &RdxDesc) const
Returns true if we should use strict in-order reductions for the given RdxDesc.
bool isDivRemScalarWithPredication(InstructionCost ScalarCost, InstructionCost SafeDivisorCost) const
Given costs for both strategies, return true if the scalar predication lowering should be used for di...
std::function< BlockFrequencyInfo &()> GetBFI
A function to lazily fetch BlockFrequencyInfo.
LoopVectorizationCostModel(ScalarEpilogueLowering SEL, Loop *L, PredicatedScalarEvolution &PSE, LoopInfo *LI, LoopVectorizationLegality *Legal, const TargetTransformInfo &TTI, const TargetLibraryInfo *TLI, DemandedBits *DB, AssumptionCache *AC, OptimizationRemarkEmitter *ORE, std::function< BlockFrequencyInfo &()> GetBFI, const Function *F, const LoopVectorizeHints *Hints, InterleavedAccessInfo &IAI, bool OptForSize)
InstructionCost expectedCost(ElementCount VF)
Returns the expected execution cost.
void setCostBasedWideningDecision(ElementCount VF)
Memory access instruction may be vectorized in more than one way.
InstWidening getWideningDecision(Instruction *I, ElementCount VF) const
Return the cost model decision for the given instruction I and vector width VF.
FixedScalableVFPair MaxPermissibleVFWithoutMaxBW
The highest VF possible for this loop, without using MaxBandwidth.
const SmallPtrSetImpl< PHINode * > & getInLoopReductions() const
Returns the set of in-loop reduction PHIs.
bool isScalarEpilogueAllowed() const
Returns true if a scalar epilogue is allowed (e.g.., not prevented by optsize or a loop hint annotati...
InstructionCost getWideningCost(Instruction *I, ElementCount VF)
Return the vectorization cost for the given instruction I and vector width VF.
TailFoldingStyle getTailFoldingStyle() const
Returns the TailFoldingStyle that is best for the current loop.
void collectInstsToScalarize(ElementCount VF)
Collects the instructions to scalarize for each predicated instruction in the loop.
LoopVectorizationLegality checks if it is legal to vectorize a loop, and to what vectorization factor...
MapVector< PHINode *, InductionDescriptor > InductionList
InductionList saves induction variables and maps them to the induction descriptor.
bool canVectorize(bool UseVPlanNativePath)
Returns true if it is legal to vectorize this loop.
bool canVectorizeFPMath(bool EnableStrictReductions)
Returns true if it is legal to vectorize the FP math operations in this loop.
const SmallVector< BasicBlock *, 4 > & getCountableExitingBlocks() const
Returns all exiting blocks with a countable exit, i.e.
bool hasUncountableEarlyExit() const
Returns true if the loop has uncountable early exits, i.e.
bool hasHistograms() const
Returns a list of all known histogram operations in the loop.
const LoopAccessInfo * getLAI() const
Planner drives the vectorization process after having passed Legality checks.
DenseMap< const SCEV *, Value * > executePlan(ElementCount VF, unsigned UF, VPlan &BestPlan, InnerLoopVectorizer &LB, DominatorTree *DT, EpilogueVectorizationKind EpilogueVecKind=EpilogueVectorizationKind::None)
EpilogueVectorizationKind
Generate the IR code for the vectorized loop captured in VPlan BestPlan according to the best selecte...
@ MainLoop
Vectorizing the main loop of epilogue vectorization.
VPlan & getPlanFor(ElementCount VF) const
Return the VPlan for VF.
Definition VPlan.cpp:1653
VectorizationFactor planInVPlanNativePath(ElementCount UserVF)
Use the VPlan-native path to plan how to best vectorize, return the best VF and its cost.
void updateLoopMetadataAndProfileInfo(Loop *VectorLoop, VPBasicBlock *HeaderVPBB, const VPlan &Plan, bool VectorizingEpilogue, MDNode *OrigLoopID, std::optional< unsigned > OrigAverageTripCount, unsigned OrigLoopInvocationWeight, unsigned EstimatedVFxUF, bool DisableRuntimeUnroll)
Update loop metadata and profile info for both the scalar remainder loop and VectorLoop,...
Definition VPlan.cpp:1704
void buildVPlans(ElementCount MinVF, ElementCount MaxVF)
Build VPlans for power-of-2 VF's between MinVF and MaxVF inclusive, according to the information gath...
Definition VPlan.cpp:1637
void attachRuntimeChecks(VPlan &Plan, GeneratedRTChecks &RTChecks, bool HasBranchWeights) const
Attach the runtime checks of RTChecks to Plan.
unsigned selectInterleaveCount(VPlan &Plan, ElementCount VF, InstructionCost LoopCost)
void emitInvalidCostRemarks(OptimizationRemarkEmitter *ORE)
Emit remarks for recipes with invalid costs in the available VPlans.
static bool getDecisionAndClampRange(const std::function< bool(ElementCount)> &Predicate, VFRange &Range)
Test a Predicate on a Range of VF's.
Definition VPlan.cpp:1618
void printPlans(raw_ostream &O)
Definition VPlan.cpp:1798
void plan(ElementCount UserVF, unsigned UserIC)
Build VPlans for the specified UserVF and UserIC if they are non-zero or all applicable candidate VFs...
std::unique_ptr< VPlan > selectBestEpiloguePlan(VPlan &MainPlan, ElementCount MainLoopVF, unsigned IC)
void addMinimumIterationCheck(VPlan &Plan, ElementCount VF, unsigned UF, ElementCount MinProfitableTripCount) const
Create a check to Plan to see if the vector loop should be executed based on its trip count.
bool hasPlanWithVF(ElementCount VF) const
Look through the existing plans and return true if we have one with vectorization factor VF.
std::pair< VectorizationFactor, VPlan * > computeBestVF()
Compute and return the most profitable vectorization factor and the corresponding best VPlan.
This holds vectorization requirements that must be verified late in the process.
Utility class for getting and setting loop vectorizer hints in the form of loop metadata.
bool allowVectorization(Function *F, Loop *L, bool VectorizeOnlyWhenForced) const
void emitRemarkWithHints() const
Dumps all the hint information.
const char * vectorizeAnalysisPassName() const
If hints are provided that force vectorization, use the AlwaysPrint pass name to force the frontend t...
This class emits a version of the loop where run-time checks ensure that may-alias pointers can't ove...
Represents a single loop in the control flow graph.
Definition LoopInfo.h:40
bool hasLoopInvariantOperands(const Instruction *I) const
Return true if all the operands of the specified instruction are loop invariant.
Definition LoopInfo.cpp:73
DebugLoc getStartLoc() const
Return the debug location of the start of this loop.
Definition LoopInfo.cpp:653
bool isLoopInvariant(const Value *V) const
Return true if the specified value is loop invariant.
Definition LoopInfo.cpp:67
Metadata node.
Definition Metadata.h:1080
This class implements a map that also provides access to all stored values in a deterministic order.
Definition MapVector.h:36
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition MapVector.h:124
Function * getFunction(StringRef Name) const
Look up the specified function in the module symbol table.
Definition Module.cpp:235
Diagnostic information for optimization analysis remarks related to pointer aliasing.
Diagnostic information for optimization analysis remarks related to floating-point non-commutativity.
Diagnostic information for optimization analysis remarks.
The optimization diagnostic interface.
LLVM_ABI void emit(DiagnosticInfoOptimizationBase &OptDiag)
Output the remark via the diagnostic handler and to the optimization record file.
Diagnostic information for missed-optimization remarks.
Diagnostic information for applied optimization remarks.
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
op_range incoming_values()
Value * getIncomingValueForBlock(const BasicBlock *BB) const
unsigned getNumIncomingValues() const
Return the number of incoming edges.
An interface layer with SCEV used to manage how we see SCEV expressions for values in the context of ...
ScalarEvolution * getSE() const
Returns the ScalarEvolution analysis used.
LLVM_ABI const SCEVPredicate & getPredicate() const
LLVM_ABI unsigned getSmallConstantMaxTripCount()
Returns the upper bound of the loop trip count as a normal unsigned value, or 0 if the trip count is ...
LLVM_ABI const SCEV * getBackedgeTakenCount()
Get the (predicated) backedge count for the analyzed loop.
LLVM_ABI const SCEV * getSCEV(Value *V)
Returns the SCEV expression of V, in the context of the current SCEV predicate.
A set of analyses that are preserved following a run of a transformation pass.
Definition Analysis.h:112
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition Analysis.h:118
PreservedAnalyses & preserveSet()
Mark an analysis set as preserved.
Definition Analysis.h:151
PreservedAnalyses & preserve()
Mark an analysis as preserved.
Definition Analysis.h:132
An analysis pass based on the new PM to deliver ProfileSummaryInfo.
The RecurrenceDescriptor is used to identify recurrences variables in a loop.
static bool isFMulAddIntrinsic(Instruction *I)
Returns true if the instruction is a call to the llvm.fmuladd intrinsic.
FastMathFlags getFastMathFlags() const
static LLVM_ABI unsigned getOpcode(RecurKind Kind)
Returns the opcode corresponding to the RecurrenceKind.
Type * getRecurrenceType() const
Returns the type of the recurrence.
bool hasUsesOutsideReductionChain() const
Returns true if the reduction PHI has any uses outside the reduction chain.
const SmallPtrSet< Instruction *, 8 > & getCastInsts() const
Returns a reference to the instructions used for type-promoting the recurrence.
static bool isFindLastRecurrenceKind(RecurKind Kind)
Returns true if the recurrence kind is of the form select(cmp(),x,y) where one of (x,...
unsigned getMinWidthCastToRecurrenceTypeInBits() const
Returns the minimum width used by the recurrence in bits.
LLVM_ABI SmallVector< Instruction *, 4 > getReductionOpChain(PHINode *Phi, Loop *L) const
Attempts to find a chain of operations from Phi to LoopExitInst that can be treated as a set of reduc...
static bool isAnyOfRecurrenceKind(RecurKind Kind)
Returns true if the recurrence kind is of the form select(cmp(),x,y) where one of (x,...
bool isSigned() const
Returns true if all source operands of the recurrence are SExtInsts.
RecurKind getRecurrenceKind() const
bool isOrdered() const
Expose an ordered FP reduction to the instance users.
static bool isFindIVRecurrenceKind(RecurKind Kind)
Returns true if the recurrence kind is of the form select(cmp(),x,y) where one of (x,...
static bool isMinMaxRecurrenceKind(RecurKind Kind)
Returns true if the recurrence kind is any min/max kind.
std::optional< ArrayRef< PointerDiffInfo > > getDiffChecks() const
const SmallVectorImpl< RuntimePointerCheck > & getChecks() const
Returns the checks that generateChecks created.
This class uses information about analyze scalars to rewrite expressions in canonical form.
ScalarEvolution * getSE()
bool isInsertedInstruction(Instruction *I) const
Return true if the specified instruction was inserted by the code rewriter.
LLVM_ABI Value * expandCodeForPredicate(const SCEVPredicate *Pred, Instruction *Loc)
Generates a code sequence that evaluates this predicate.
void eraseDeadInstructions(Value *Root)
Remove inserted instructions that are dead, e.g.
virtual bool isAlwaysTrue() const =0
Returns true if the predicate is always true.
This class represents an analyzed expression in the program.
LLVM_ABI bool isZero() const
Return true if the expression is a constant zero.
LLVM_ABI Type * getType() const
Return the LLVM type of this SCEV expression.
Analysis pass that exposes the ScalarEvolution for a function.
The main scalar evolution driver.
LLVM_ABI const SCEV * getURemExpr(SCEVUse LHS, SCEVUse RHS)
Represents an unsigned remainder expression based on unsigned division.
LLVM_ABI const SCEV * getBackedgeTakenCount(const Loop *L, ExitCountKind Kind=Exact)
If the specified loop has a predictable backedge-taken count, return it, otherwise return a SCEVCould...
LLVM_ABI const SCEV * getConstant(ConstantInt *V)
LLVM_ABI const SCEV * getSCEV(Value *V)
Return a SCEV expression for the full generality of the specified expression.
LLVM_ABI const SCEV * getTripCountFromExitCount(const SCEV *ExitCount)
A version of getTripCountFromExitCount below which always picks an evaluation type which can not resu...
const SCEV * getOne(Type *Ty)
Return a SCEV for the constant 1 of a specific type.
LLVM_ABI void forgetLoop(const Loop *L)
This method should be called by the client when it has changed a loop in a way that may effect Scalar...
LLVM_ABI bool isLoopInvariant(const SCEV *S, const Loop *L)
Return true if the value of the given SCEV is unchanging in the specified loop.
LLVM_ABI const SCEV * getElementCount(Type *Ty, ElementCount EC, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap)
ConstantRange getUnsignedRange(const SCEV *S)
Determine the unsigned range for a particular SCEV.
LLVM_ABI void forgetValue(Value *V)
This method should be called by the client when it has changed a value in a way that may effect its v...
LLVM_ABI void forgetBlockAndLoopDispositions(Value *V=nullptr)
Called when the client has changed the disposition of values in a loop or block.
const SCEV * getMinusOne(Type *Ty)
Return a SCEV for the constant -1 of a specific type.
LLVM_ABI void forgetLcssaPhiWithNewPredecessor(Loop *L, PHINode *V)
Forget LCSSA phi node V of loop L to which a new predecessor was added, such that it may no longer be...
LLVM_ABI const SCEV * getMulExpr(SmallVectorImpl< SCEVUse > &Ops, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)
Get a canonical multiply expression, or something simpler if possible.
LLVM_ABI unsigned getSmallConstantTripCount(const Loop *L)
Returns the exact trip count of the loop if we can compute it, and the result is a small constant.
APInt getUnsignedRangeMax(const SCEV *S)
Determine the max of the unsigned range for a particular SCEV.
LLVM_ABI const SCEV * getAddExpr(SmallVectorImpl< SCEVUse > &Ops, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)
Get a canonical add expression, or something simpler if possible.
LLVM_ABI bool isKnownPredicate(CmpPredicate Pred, SCEVUse LHS, SCEVUse RHS)
Test if the given expression is known to satisfy the condition described by Pred, LHS,...
LLVM_ABI const SCEV * applyLoopGuards(const SCEV *Expr, const Loop *L)
Try to apply information from loop guards for L to Expr.
This class represents the LLVM 'select' instruction.
A vector that has set insertion semantics.
Definition SetVector.h:57
size_type size() const
Determine the number of elements in the SetVector.
Definition SetVector.h:103
void insert_range(Range &&R)
Definition SetVector.h:176
size_type count(const_arg_type key) const
Count the number of elements of a given key in the SetVector.
Definition SetVector.h:262
bool insert(const value_type &X)
Insert a new element into the SetVector.
Definition SetVector.h:151
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
bool contains(ConstPtrType Ptr) const
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
A SetVector that performs no allocations if smaller than a certain size.
Definition SetVector.h:339
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
An instruction for storing to memory.
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
Analysis pass providing the TargetTransformInfo.
Analysis pass providing the TargetLibraryInfo.
Provides information about what library functions are available for the current target.
This pass provides access to the codegen interfaces that are needed for IR-level transformations.
LLVM_ABI std::optional< unsigned > getVScaleForTuning() const
VectorInstrContext
Represents a hint about the context in which an insert/extract is used.
@ None
The insert/extract is not used with a load/store.
@ Load
The value being inserted comes from a load (InsertElement only).
@ Store
The extracted value is stored (ExtractElement only).
LLVM_ABI TypeSize getRegisterBitWidth(RegisterKind K) const
LLVM_ABI bool preferFixedOverScalableIfEqualCost(bool IsEpilogue) const
static LLVM_ABI OperandValueInfo getOperandInfo(const Value *V)
Collect properties of V used in cost analysis, e.g. OP_PowerOf2.
LLVM_ABI bool isElementTypeLegalForScalableVector(Type *Ty) const
LLVM_ABI ElementCount getMinimumVF(unsigned ElemWidth, bool IsScalable) const
TargetCostKind
The kind of cost model.
@ TCK_RecipThroughput
Reciprocal throughput.
@ TCK_CodeSize
Instruction code size.
@ TCK_SizeAndLatency
The weighted sum of size and latency.
@ TCK_Latency
The latency of instruction.
LLVM_ABI bool supportsScalableVectors() const
@ TCC_Free
Expected to fold away in lowering.
LLVM_ABI InstructionCost getInstructionCost(const User *U, ArrayRef< const Value * > Operands, TargetCostKind CostKind) const
Estimate the cost of a given IR user when lowered.
@ SK_Splice
Concatenates elements from the first input vector with elements of the second input vector.
@ SK_Broadcast
Broadcast element 0 to all other elements.
@ SK_Reverse
Reverse the order of the vector.
CastContextHint
Represents a hint about the context in which a cast is used.
@ Reversed
The cast is used with a reversed load/store.
@ Masked
The cast is used with a masked load/store.
@ Normal
The cast is used with a normal load/store.
@ Interleave
The cast is used with an interleaved load/store.
@ GatherScatter
The cast is used with a gather/scatter.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition Twine.h:82
This class implements a switch-like dispatch statement for a value of 'T' using dyn_cast functionalit...
Definition TypeSwitch.h:89
TypeSwitch< T, ResultT > & Case(CallableT &&caseFn)
Add a case on the given type.
Definition TypeSwitch.h:98
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:46
LLVM_ABI unsigned getIntegerBitWidth() const
bool isVectorTy() const
True if this is an instance of VectorType.
Definition Type.h:290
static LLVM_ABI Type * getVoidTy(LLVMContext &C)
Definition Type.cpp:286
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
Definition Type.h:370
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
Definition Type.h:130
LLVM_ABI unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
Definition Type.cpp:236
static LLVM_ABI IntegerType * getInt1Ty(LLVMContext &C)
Definition Type.cpp:310
bool isFloatingPointTy() const
Return true if this is one of the floating-point types.
Definition Type.h:186
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition Type.h:257
bool isVoidTy() const
Return true if this is 'void'.
Definition Type.h:141
A Use represents the edge between a Value definition and its users.
Definition Use.h:35
op_range operands()
Definition User.h:267
iterator_range< op_iterator > op_range
Definition User.h:256
LLVM_ABI bool replaceUsesOfWith(Value *From, Value *To)
Replace uses of one Value with another.
Definition User.cpp:25
Value * getOperand(unsigned i) const
Definition User.h:207
static SmallVector< VFInfo, 8 > getMappings(const CallInst &CI)
Retrieve all the VFInfo instances associated to the CallInst CI.
Definition VectorUtils.h:74
VPBasicBlock serves as the leaf of the Hierarchical Control-Flow Graph.
Definition VPlan.h:4253
RecipeListTy::iterator iterator
Instruction iterators...
Definition VPlan.h:4280
iterator end()
Definition VPlan.h:4290
iterator begin()
Recipe iterator methods.
Definition VPlan.h:4288
iterator_range< iterator > phis()
Returns an iterator range over the PHI-like recipes in the block.
Definition VPlan.h:4341
InstructionCost cost(ElementCount VF, VPCostContext &Ctx) override
Return the cost of this VPBasicBlock.
Definition VPlan.cpp:778
iterator getFirstNonPhi()
Return the position of the first non-phi node recipe in the block.
Definition VPlan.cpp:232
const VPRecipeBase & front() const
Definition VPlan.h:4300
VPRecipeBase * getTerminator()
If the block has multiple successors, return the branch recipe terminating the block.
Definition VPlan.cpp:644
bool empty() const
Definition VPlan.h:4299
const VPBasicBlock * getExitingBasicBlock() const
Definition VPlan.cpp:202
void setName(const Twine &newName)
Definition VPlan.h:183
VPlan * getPlan()
Definition VPlan.cpp:177
const VPBasicBlock * getEntryBasicBlock() const
Definition VPlan.cpp:182
VPBlockBase * getSingleSuccessor() const
Definition VPlan.h:231
static void reassociateBlocks(VPBlockBase *Old, VPBlockBase *New)
Reassociate all the blocks connected to Old so that they now point to New.
Definition VPlanUtils.h:244
static auto blocksOnly(T &&Range)
Return an iterator range over Range which only includes BlockTy blocks.
Definition VPlanUtils.h:272
VPlan-based builder utility analogous to IRBuilder.
VPInstruction * createAdd(VPValue *LHS, VPValue *RHS, DebugLoc DL=DebugLoc::getUnknown(), const Twine &Name="", VPRecipeWithIRFlags::WrapFlagsTy WrapFlags={false, false})
void insert(VPRecipeBase *R)
Insert R at the current insertion point.
static VPBuilder getToInsertAfter(VPRecipeBase *R)
Create a VPBuilder to insert after R.
VPPhi * createScalarPhi(ArrayRef< VPValue * > IncomingValues, DebugLoc DL=DebugLoc::getUnknown(), const Twine &Name="", const VPIRFlags &Flags={})
VPInstruction * createNaryOp(unsigned Opcode, ArrayRef< VPValue * > Operands, Instruction *Inst=nullptr, const VPIRFlags &Flags={}, const VPIRMetadata &MD={}, DebugLoc DL=DebugLoc::getUnknown(), const Twine &Name="")
Create an N-ary operation with Opcode, Operands and set Inst as its underlying Instruction.
Canonical scalar induction phi of the vector loop.
Definition VPlan.h:3831
VPIRValue * getStartValue() const
Returns the start value of the canonical induction.
Definition VPlan.h:3853
unsigned getNumDefinedValues() const
Returns the number of values defined by the VPDef.
Definition VPlanValue.h:465
VPValue * getVPSingleValue()
Returns the only VPValue defined by the VPDef.
Definition VPlanValue.h:438
A pure virtual base class for all recipes modeling header phis, including phis for first order recurr...
Definition VPlan.h:2306
virtual VPValue * getBackedgeValue()
Returns the incoming value from the loop backedge.
Definition VPlan.h:2348
VPValue * getStartValue()
Returns the start value of the phi, if one is set.
Definition VPlan.h:2337
A recipe representing a sequence of load -> update -> store as part of a histogram operation.
Definition VPlan.h:2048
A special type of VPBasicBlock that wraps an existing IR basic block.
Definition VPlan.h:4406
Class to record and manage LLVM IR flags.
Definition VPlan.h:690
LLVM_ABI_FOR_TEST FastMathFlags getFastMathFlags() const
This is a concrete Recipe that models a single VPlan-level instruction.
Definition VPlan.h:1225
unsigned getNumOperandsWithoutMask() const
Returns the number of operands, excluding the mask if the VPInstruction is masked.
Definition VPlan.h:1456
iterator_range< operand_iterator > operandsWithoutMask()
Returns an iterator range over the operands excluding the mask operand if present.
Definition VPlan.h:1476
@ ComputeAnyOfResult
Compute the final result of a AnyOf reduction with select(cmp(),x,y), where one of (x,...
Definition VPlan.h:1272
@ ResumeForEpilogue
Explicit user for the resume phi of the canonical induction in the main VPlan, used by the epilogue v...
Definition VPlan.h:1330
@ ReductionStartVector
Start vector for reductions with 3 operands: the original start value, the identity value for the red...
Definition VPlan.h:1321
unsigned getOpcode() const
Definition VPlan.h:1405
void setName(StringRef NewName)
Set the symbolic name for the VPInstruction.
Definition VPlan.h:1504
VPValue * getMask() const
Returns the mask for the VPInstruction.
Definition VPlan.h:1470
bool isMasked() const
Returns true if the VPInstruction has a mask operand.
Definition VPlan.h:1446
VPInterleaveRecipe is a recipe for transforming an interleave group of load or stores into one wide l...
Definition VPlan.h:2970
detail::zippy< llvm::detail::zip_first, VPUser::const_operand_range, const_incoming_blocks_range > incoming_values_and_blocks() const
Returns an iterator range over pairs of incoming values and corresponding incoming blocks.
Definition VPlan.h:1633
VPRecipeBase is a base class modeling a sequence of one or more output IR instructions.
Definition VPlan.h:406
DebugLoc getDebugLoc() const
Returns the debug location of the recipe.
Definition VPlan.h:555
void moveBefore(VPBasicBlock &BB, iplist< VPRecipeBase >::iterator I)
Unlink this recipe and insert into BB before I.
void insertBefore(VPRecipeBase *InsertPos)
Insert an unlinked recipe into a basic block immediately before the specified recipe.
iplist< VPRecipeBase >::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
Helper class to create VPRecipies from IR instructions.
VPRecipeBase * tryToCreateWidenNonPhiRecipe(VPSingleDefRecipe *R, VFRange &Range)
Create and return a widened recipe for a non-phi recipe R if one can be created within the given VF R...
VPHistogramRecipe * widenIfHistogram(VPInstruction *VPI)
If VPI represents a histogram operation (as determined by LoopVectorizationLegality) make that safe f...
VPValue * getVPValueOrAddLiveIn(Value *V)
VPRecipeBase * tryToWidenMemory(VPInstruction *VPI, VFRange &Range)
Check if the load or store instruction VPI should widened for Range.Start and potentially masked.
bool replaceWithFinalIfReductionStore(VPInstruction *VPI, VPBuilder &FinalRedStoresBuilder)
If VPI is a store of a reduction into an invariant address, delete it.
VPReplicateRecipe * handleReplication(VPInstruction *VPI, VFRange &Range)
Build a VPReplicationRecipe for VPI.
bool isOrdered() const
Returns true, if the phi is part of an ordered reduction.
Definition VPlan.h:2761
unsigned getVFScaleFactor() const
Get the factor that the VF of this recipe's output should be scaled by, or 1 if it isn't scaled.
Definition VPlan.h:2740
bool isInLoop() const
Returns true if the phi is part of an in-loop reduction.
Definition VPlan.h:2764
RecurKind getRecurrenceKind() const
Returns the recurrence kind of the reduction.
Definition VPlan.h:2758
A recipe to represent inloop, ordered or partial reduction operations.
Definition VPlan.h:3063
VPRegionBlock represents a collection of VPBasicBlocks and VPRegionBlocks which form a Single-Entry-S...
Definition VPlan.h:4441
const VPBlockBase * getEntry() const
Definition VPlan.h:4477
VPCanonicalIVPHIRecipe * getCanonicalIV()
Returns the canonical induction recipe of the region.
Definition VPlan.h:4539
VPReplicateRecipe replicates a given instruction producing multiple scalar copies of the original sca...
Definition VPlan.h:3217
VPSingleDef is a base class for recipes for modeling a sequence of one or more output IR that define ...
Definition VPlan.h:607
Instruction * getUnderlyingInstr()
Returns the underlying instruction.
Definition VPlan.h:675
An analysis for type-inference for VPValues.
Type * inferScalarType(const VPValue *V)
Infer the type of V. Returns the scalar type of V.
This class augments VPValue with operands which provide the inverse def-use edges from VPValue's user...
Definition VPlanValue.h:296
void setOperand(unsigned I, VPValue *New)
Definition VPlanValue.h:340
operand_iterator op_begin()
Definition VPlanValue.h:360
VPValue * getOperand(unsigned N) const
Definition VPlanValue.h:335
This is the base class of the VPlan Def/Use graph, used for modeling the data flow into,...
Definition VPlanValue.h:46
Value * getLiveInIRValue() const
Return the underlying IR value for a VPIRValue.
Definition VPlan.cpp:137
VPRecipeBase * getDefiningRecipe()
Returns the recipe defining this VPValue or nullptr if it is not defined by a recipe,...
Definition VPlan.cpp:127
Value * getUnderlyingValue() const
Return the underlying Value attached to this VPValue.
Definition VPlanValue.h:70
void replaceAllUsesWith(VPValue *New)
Definition VPlan.cpp:1449
void replaceUsesWithIf(VPValue *New, llvm::function_ref< bool(VPUser &U, unsigned Idx)> ShouldReplace)
Go through the uses list for this VPValue and make each use point to New if the callback ShouldReplac...
Definition VPlan.cpp:1455
user_range users()
Definition VPlanValue.h:149
A recipe to compute a pointer to the last element of each part of a widened memory access for widened...
Definition VPlan.h:2154
A recipe to compute the pointers for widened memory accesses of SourceElementTy.
Definition VPlan.h:2227
VPWidenCastRecipe is a recipe to create vector cast instructions.
Definition VPlan.h:1840
A recipe for handling GEP instructions.
Definition VPlan.h:2090
A recipe for handling phi nodes of integer and floating-point inductions, producing their vector valu...
Definition VPlan.h:2454
A recipe for widened phis.
Definition VPlan.h:2590
VPWidenRecipe is a recipe for producing a widened instruction using the opcode and operands of the re...
Definition VPlan.h:1784
VPlan models a candidate for vectorization, encoding various decisions take to produce efficient outp...
Definition VPlan.h:4571
bool hasVF(ElementCount VF) const
Definition VPlan.h:4784
ElementCount getSingleVF() const
Returns the single VF of the plan, asserting that the plan has exactly one VF.
Definition VPlan.h:4797
VPBasicBlock * getEntry()
Definition VPlan.h:4663
VPValue * getTripCount() const
The trip count of the original loop.
Definition VPlan.h:4721
VPSymbolicValue & getVFxUF()
Returns VF * UF of the vector loop region.
Definition VPlan.h:4760
bool hasUF(unsigned UF) const
Definition VPlan.h:4809
ArrayRef< VPIRBasicBlock * > getExitBlocks() const
Return an ArrayRef containing VPIRBasicBlocks wrapping the exit blocks of the original scalar loop.
Definition VPlan.h:4711
VPIRValue * getOrAddLiveIn(Value *V)
Gets the live-in VPIRValue for V or adds a new live-in (if none exists yet) for V.
Definition VPlan.h:4834
VPIRValue * getZero(Type *Ty)
Return a VPIRValue wrapping the null value of type Ty.
Definition VPlan.h:4860
LLVM_ABI_FOR_TEST VPRegionBlock * getVectorLoopRegion()
Returns the VPRegionBlock of the vector loop.
Definition VPlan.cpp:1067
bool hasEarlyExit() const
Returns true if the VPlan is based on a loop with an early exit.
Definition VPlan.h:4955
InstructionCost cost(ElementCount VF, VPCostContext &Ctx)
Return the cost of this plan.
Definition VPlan.cpp:1049
void resetTripCount(VPValue *NewTripCount)
Resets the trip count for the VPlan.
Definition VPlan.h:4735
VPBasicBlock * getMiddleBlock()
Returns the 'middle' block of the plan, that is the block that selects whether to execute the scalar ...
Definition VPlan.h:4688
VPSymbolicValue & getUF()
Returns the UF of the vector loop region.
Definition VPlan.h:4757
VPBasicBlock * getScalarPreheader() const
Return the VPBasicBlock for the preheader of the scalar loop.
Definition VPlan.h:4702
void execute(VPTransformState *State)
Generate the IR code for this VPlan.
Definition VPlan.cpp:922
VPIRBasicBlock * getScalarHeader() const
Return the VPIRBasicBlock wrapping the header of the scalar loop.
Definition VPlan.h:4707
VPBasicBlock * getVectorPreheader()
Returns the preheader of the vector loop region, if one exists, or null otherwise.
Definition VPlan.h:4668
VPSymbolicValue & getVF()
Returns the VF of the vector loop region.
Definition VPlan.h:4753
LLVM_ABI_FOR_TEST VPlan * duplicate()
Clone the current VPlan, update all VPValues of the new VPlan and cloned recipes to refer to the clon...
Definition VPlan.cpp:1215
LLVM Value Representation.
Definition Value.h:75
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:255
LLVM_ABI bool hasOneUser() const
Return true if there is exactly one user of this value.
Definition Value.cpp:162
LLVM_ABI void setName(const Twine &Name)
Change the name of the value.
Definition Value.cpp:393
LLVM_ABI void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
Definition Value.cpp:549
iterator_range< user_iterator > users()
Definition Value.h:426
LLVM_ABI const Value * stripPointerCasts() const
Strip off pointer casts, all-zero GEPs and address space casts.
Definition Value.cpp:709
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
Definition Value.cpp:318
Base class of all SIMD vector types.
ElementCount getElementCount() const
Return an ElementCount instance to represent the (possibly scalable) number of elements in the vector...
static LLVM_ABI VectorType * get(Type *ElementType, ElementCount EC)
This static method is the primary way to construct an VectorType.
std::pair< iterator, bool > insert(const ValueT &V)
Definition DenseSet.h:202
bool contains(const_arg_type_t< ValueT > V) const
Check if the set contains the given element.
Definition DenseSet.h:175
constexpr ScalarTy getFixedValue() const
Definition TypeSize.h:200
static constexpr bool isKnownLE(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
Definition TypeSize.h:230
constexpr bool isNonZero() const
Definition TypeSize.h:155
static constexpr bool isKnownLT(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
Definition TypeSize.h:216
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
Definition TypeSize.h:168
constexpr bool isFixed() const
Returns true if the quantity is not scaled by vscale.
Definition TypeSize.h:171
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
Definition TypeSize.h:165
constexpr bool isZero() const
Definition TypeSize.h:153
static constexpr bool isKnownGT(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
Definition TypeSize.h:223
constexpr LeafTy divideCoefficientBy(ScalarTy RHS) const
We do not provide the '/' operator here because division for polynomial types does not work in the sa...
Definition TypeSize.h:252
static constexpr bool isKnownGE(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
Definition TypeSize.h:237
An efficient, type-erasing, non-owning reference to a callable.
const ParentTy * getParent() const
Definition ilist_node.h:34
self_iterator getIterator()
Definition ilist_node.h:123
IteratorT end() const
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition raw_ostream.h:53
A raw_ostream that writes to an std::string.
Changed
This provides a very simple, boring adaptor for a begin and end iterator into a range type.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
@ Entry
Definition COFF.h:862
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
@ Tail
Attemps to make calls as fast as possible while guaranteeing that tail call optimization can always b...
Definition CallingConv.h:76
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
@ BasicBlock
Various leaf nodes.
Definition ISDOpcodes.h:81
std::variant< std::monostate, Loc::Single, Loc::Multi, Loc::MMI, Loc::EntryValue > Variant
Alias for the std::variant specialization base class of DbgVariable.
Definition DwarfDebug.h:190
SpecificConstantMatch m_ZeroInt()
Convenience matchers for specific integer values.
OneUse_match< SubPat > m_OneUse(const SubPat &SP)
match_combine_or< Ty... > m_CombineOr(const Ty &...Ps)
Combine pattern matchers matching any of Ps patterns.
BinaryOp_match< LHS, RHS, Instruction::Add > m_Add(const LHS &L, const RHS &R)
specific_intval< false > m_SpecificInt(const APInt &V)
Match a specific integer value or vector with all elements equal to the value.
bool match(Val *V, const Pattern &P)
bind_ty< Instruction > m_Instruction(Instruction *&I)
Match an instruction, capturing it if we match.
specificval_ty m_Specific(const Value *V)
Match if we have a specific specified value.
cst_pred_ty< is_one > m_One()
Match an integer 1 or a vector with all elements equal to 1.
auto m_Value()
Match an arbitrary value and ignore it.
BinaryOp_match< LHS, RHS, Instruction::Mul > m_Mul(const LHS &L, const RHS &R)
auto m_LogicalOr()
Matches L || R where L and R are arbitrary values.
match_combine_or< CastInst_match< OpTy, ZExtInst >, CastInst_match< OpTy, SExtInst > > m_ZExtOrSExt(const OpTy &Op)
auto m_LogicalAnd()
Matches L && R where L and R are arbitrary values.
bind_cst_ty m_scev_APInt(const APInt *&C)
Match an SCEV constant and bind it to an APInt.
specificloop_ty m_SpecificLoop(const Loop *L)
cst_pred_ty< is_specific_signed_cst > m_scev_SpecificSInt(int64_t V)
Match an SCEV constant with a plain signed integer (sign-extended value will be matched)
bind_ty< const SCEVMulExpr > m_scev_Mul(const SCEVMulExpr *&V)
bool match(const SCEV *S, const Pattern &P)
SCEVAffineAddRec_match< Op0_t, Op1_t, match_isa< const Loop > > m_scev_AffineAddRec(const Op0_t &Op0, const Op1_t &Op1)
SCEVBinaryExpr_match< SCEVMulExpr, Op0_t, Op1_t, SCEV::FlagAnyWrap, true > m_scev_c_Mul(const Op0_t &Op0, const Op1_t &Op1)
int_pred_ty< is_zero_int > m_ZeroInt()
Match an integer 0 or a vector with all elements equal to 0.
bool matchFindIVResult(VPInstruction *VPI, Op0_t ReducedIV, Op1_t Start)
Match FindIV result pattern: select(icmp ne ComputeReductionResult(ReducedIV), Sentinel),...
VPInstruction_match< VPInstruction::ExtractLastLane, Op0_t > m_ExtractLastLane(const Op0_t &Op0)
VPInstruction_match< VPInstruction::BranchOnCount > m_BranchOnCount()
auto m_VPValue()
Match an arbitrary VPValue and ignore it.
VPInstruction_match< VPInstruction::ExtractLastPart, Op0_t > m_ExtractLastPart(const Op0_t &Op0)
bool match(Val *V, const Pattern &P)
VPInstruction_match< VPInstruction::ExtractLane, Op0_t, Op1_t > m_ExtractLane(const Op0_t &Op0, const Op1_t &Op1)
ValuesClass values(OptsTy... Options)
Helper to build a ValuesClass by forwarding a variable number of arguments as an initializer list to ...
initializer< Ty > init(const Ty &Val)
Add a small namespace to avoid name clashes with the classes used in the streaming interface.
DiagnosticInfoOptimizationBase::Argument NV
NodeAddr< InstrNode * > Instr
Definition RDFGraph.h:389
friend class Instruction
Iterator for Instructions in a `BasicBlock.
Definition BasicBlock.h:73
VPValue * getOrCreateVPValueForSCEVExpr(VPlan &Plan, const SCEV *Expr)
Get or create a VPValue that corresponds to the expansion of Expr.
VPBasicBlock * getFirstLoopHeader(VPlan &Plan, VPDominatorTree &VPDT)
Returns the header block of the first, top-level loop, or null if none exist.
bool isAddressSCEVForCost(const SCEV *Addr, ScalarEvolution &SE, const Loop *L)
Returns true if Addr is an address SCEV that can be passed to TTI::getAddressComputationCost,...
VPRecipeBase * findRecipe(VPValue *Start, PredT Pred)
Search Start's users for a recipe satisfying Pred, looking through recipes with definitions.
Definition VPlanUtils.h:111
VPSingleDefRecipe * findHeaderMask(VPlan &Plan)
Collect the header mask with the pattern: (ICMP_ULE, WideCanonicalIV, backedge-taken-count) TODO: Int...
const SCEV * getSCEVExprForVPValue(const VPValue *V, PredicatedScalarEvolution &PSE, const Loop *L=nullptr)
Return the SCEV expression for V.
This is an optimization pass for GlobalISel generic memory operations.
LLVM_ABI bool simplifyLoop(Loop *L, DominatorTree *DT, LoopInfo *LI, ScalarEvolution *SE, AssumptionCache *AC, MemorySSAUpdater *MSSAU, bool PreserveLCSSA)
Simplify each loop in a loop nest recursively.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Definition STLExtras.h:316
@ Offset
Definition DWP.cpp:532
detail::zippy< detail::zip_shortest, T, U, Args... > zip(T &&t, U &&u, Args &&...args)
zip iterator for two or more iteratable types.
Definition STLExtras.h:831
FunctionAddr VTableAddr Value
Definition InstrProf.h:137
LLVM_ABI Value * addRuntimeChecks(Instruction *Loc, Loop *TheLoop, const SmallVectorImpl< RuntimePointerCheck > &PointerChecks, SCEVExpander &Expander, bool HoistRuntimeChecks=false)
Add code that checks at runtime if the accessed arrays in PointerChecks overlap.
auto cast_if_present(const Y &Val)
cast_if_present<X> - Functionally identical to cast, except that a null value is accepted.
Definition Casting.h:683
LLVM_ABI bool RemoveRedundantDbgInstrs(BasicBlock *BB)
Try to remove redundant dbg.value instructions from given basic block.
LLVM_ABI_FOR_TEST cl::opt< bool > VerifyEachVPlan
LLVM_ABI std::optional< unsigned > getLoopEstimatedTripCount(Loop *L, unsigned *EstimatedLoopInvocationWeight=nullptr)
Return either:
static void reportVectorization(OptimizationRemarkEmitter *ORE, Loop *TheLoop, VectorizationFactor VF, unsigned IC)
Report successful vectorization of the loop.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1739
unsigned getLoadStoreAddressSpace(const Value *I)
A helper function that returns the address space of the pointer operand of load or store instruction.
LLVM_ABI Intrinsic::ID getMinMaxReductionIntrinsicOp(Intrinsic::ID RdxID)
Returns the min/max intrinsic used when expanding a min/max reduction.
LLVM_ABI Intrinsic::ID getVectorIntrinsicIDForCall(const CallInst *CI, const TargetLibraryInfo *TLI)
Returns intrinsic ID for call.
InstructionCost Cost
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
LLVM_ABI bool verifyFunction(const Function &F, raw_ostream *OS=nullptr)
Check a function for errors, useful for use when debugging a pass.
const Value * getLoadStorePointerOperand(const Value *V)
A helper function that returns the pointer operand of a load or store instruction.
OuterAnalysisManagerProxy< ModuleAnalysisManager, Function > ModuleAnalysisManagerFunctionProxy
Provide the ModuleAnalysisManager to Function proxy.
Value * getRuntimeVF(IRBuilderBase &B, Type *Ty, ElementCount VF)
Return the runtime value for VF.
LLVM_ABI bool formLCSSARecursively(Loop &L, const DominatorTree &DT, const LoopInfo *LI, ScalarEvolution *SE)
Put a loop nest into LCSSA form.
Definition LCSSA.cpp:449
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
Definition STLExtras.h:2208
LLVM_ABI bool shouldOptimizeForSize(const MachineFunction *MF, ProfileSummaryInfo *PSI, const MachineBlockFrequencyInfo *BFI, PGSOQueryType QueryType=PGSOQueryType::Other)
Returns true if machine function MF is suggested to be size-optimized based on the profile.
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
Definition STLExtras.h:634
Align getLoadStoreAlignment(const Value *I)
A helper function that returns the alignment of load or store instruction.
iterator_range< df_iterator< VPBlockShallowTraversalWrapper< VPBlockBase * > > > vp_depth_first_shallow(VPBlockBase *G)
Returns an iterator range to traverse the graph starting at G in depth-first order.
Definition VPlanCFG.h:253
LLVM_ABI bool VerifySCEV
LLVM_ABI_FOR_TEST cl::opt< bool > VPlanPrintAfterAll
LLVM_ABI bool isSafeToSpeculativelyExecute(const Instruction *I, const Instruction *CtxI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr, const TargetLibraryInfo *TLI=nullptr, bool UseVariableInfo=true, bool IgnoreUBImplyingAttrs=true)
Return true if the instruction does not have any effects besides calculating the result and does not ...
bool isa_and_nonnull(const Y &Val)
Definition Casting.h:676
iterator_range< df_iterator< VPBlockDeepTraversalWrapper< VPBlockBase * > > > vp_depth_first_deep(VPBlockBase *G)
Returns an iterator range to traverse the graph starting at G in depth-first order while traversing t...
Definition VPlanCFG.h:279
SmallVector< VPRegisterUsage, 8 > calculateRegisterUsageForPlan(VPlan &Plan, ArrayRef< ElementCount > VFs, const TargetTransformInfo &TTI, const SmallPtrSetImpl< const Value * > &ValuesToIgnore)
Estimate the register usage for Plan and vectorization factors in VFs by calculating the highest numb...
auto map_range(ContainerTy &&C, FuncTy F)
Return a range that applies F to the elements of C.
Definition STLExtras.h:366
constexpr auto bind_front(FnT &&Fn, BindArgsT &&...BindArgs)
C++20 bind_front.
auto dyn_cast_or_null(const Y &Val)
Definition Casting.h:753
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1746
void collectEphemeralRecipesForVPlan(VPlan &Plan, DenseSet< VPRecipeBase * > &EphRecipes)
auto reverse(ContainerTy &&C)
Definition STLExtras.h:408
bool containsIrreducibleCFG(RPOTraversalT &RPOTraversal, const LoopInfoT &LI)
Return true if the control flow in RPOTraversal is irreducible.
Definition CFG.h:154
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
Definition MathExtras.h:279
void sort(IteratorTy Start, IteratorTy End)
Definition STLExtras.h:1636
LLVM_ABI_FOR_TEST cl::opt< bool > EnableWideActiveLaneMask
UncountableExitStyle
Different methods of handling early exits.
Definition VPlan.h:83
@ ReadOnly
No side effects to worry about, so we can process any uncountable exits in the loop and branch either...
Definition VPlan.h:88
@ MaskedHandleExitInScalarLoop
All memory operations other than the load(s) required to determine whether an uncountable exit occurr...
Definition VPlan.h:93
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:207
bool none_of(R &&Range, UnaryPredicate P)
Provide wrappers to std::none_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1753
LLVM_ABI cl::opt< bool > EnableLoopVectorization
constexpr uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
Definition Alignment.h:144
LLVM_ABI_FOR_TEST cl::list< std::string > VPlanPrintAfterPasses
LLVM_ABI bool wouldInstructionBeTriviallyDead(const Instruction *I, const TargetLibraryInfo *TLI=nullptr)
Return true if the result produced by the instruction would have no side effects if it was not used.
Definition Local.cpp:422
SmallVector< ValueTypeFromRangeType< R >, Size > to_vector(R &&Range)
Given a range of type R, iterate the entire range and return a SmallVector with elements of the vecto...
Type * toVectorizedTy(Type *Ty, ElementCount EC)
A helper for converting to vectorized types.
LLVM_ABI void llvm_unreachable_internal(const char *msg=nullptr, const char *file=nullptr, unsigned line=0)
This function calls abort(), and prints the optional message to stderr.
T * find_singleton(R &&Range, Predicate P, bool AllowRepeats=false)
Return the single value in Range that satisfies P(<member of Range> *, AllowRepeats)->T * returning n...
Definition STLExtras.h:1837
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
cl::opt< unsigned > ForceTargetInstructionCost
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
format_object< Ts... > format(const char *Fmt, const Ts &... Vals)
These are helper functions used to produce formatted output.
Definition Format.h:129
constexpr T divideCeil(U Numerator, V Denominator)
Returns the integer ceil(Numerator / Denominator).
Definition MathExtras.h:394
bool canVectorizeTy(Type *Ty)
Returns true if Ty is a valid vector element type, void, or an unpacked literal struct where all elem...
TargetTransformInfo TTI
static void reportVectorizationInfo(const StringRef Msg, const StringRef ORETag, OptimizationRemarkEmitter *ORE, Loop *TheLoop, Instruction *I=nullptr, DebugLoc DL={})
Reports an informative message: print Msg for debugging purposes as well as an optimization remark.
LLVM_ABI bool isAssignmentTrackingEnabled(const Module &M)
Return true if assignment tracking is enabled for module M.
RecurKind
These are the kinds of recurrences that we support.
@ Or
Bitwise or logical OR of integers.
@ FMulAdd
Sum of float products with llvm.fmuladd(a * b + sum).
@ Sub
Subtraction of integers.
@ Add
Sum of integers.
LLVM_ABI Value * getRecurrenceIdentity(RecurKind K, Type *Tp, FastMathFlags FMF)
Given information about an recurrence kind, return the identity for the @llvm.vector....
LLVM_ABI BasicBlock * SplitBlock(BasicBlock *Old, BasicBlock::iterator SplitPt, DominatorTree *DT, LoopInfo *LI=nullptr, MemorySSAUpdater *MSSAU=nullptr, const Twine &BBName="")
Split the specified block at the specified instruction.
LLVM_ABI void reportVectorizationFailure(const StringRef DebugMsg, const StringRef OREMsg, const StringRef ORETag, OptimizationRemarkEmitter *ORE, Loop *TheLoop, Instruction *I=nullptr)
Reports a vectorization failure: print DebugMsg for debugging purposes along with the corresponding o...
DWARFExpression::Operation Op
ScalarEpilogueLowering
@ CM_ScalarEpilogueNotAllowedLowTripLoop
@ CM_ScalarEpilogueNotNeededUsePredicate
@ CM_ScalarEpilogueNotAllowedOptSize
@ CM_ScalarEpilogueAllowed
@ CM_ScalarEpilogueNotAllowedUsePredicate
LLVM_ABI bool isGuaranteedNotToBeUndefOrPoison(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Return true if this function can prove that V does not have undef bits and is never poison.
ArrayRef(const T &OneElt) -> ArrayRef< T >
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1772
Value * emitTransformedIndex(IRBuilderBase &B, Value *Index, Value *StartValue, Value *Step, InductionDescriptor::InductionKind InductionKind, const BinaryOperator *InductionBinOp)
Compute the transformed value of Index at offset StartValue using step StepValue.
auto predecessors(const MachineBasicBlock *BB)
iterator_range< pointer_iterator< WrappedIteratorT > > make_pointer_range(RangeT &&Range)
Definition iterator.h:368
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition STLExtras.h:1947
cl::opt< bool > EnableVPlanNativePath
Type * getLoadStoreType(const Value *I)
A helper function that returns the type of a load or store instruction.
ArrayRef< Type * > getContainedTypes(Type *const &Ty)
Returns the types contained in Ty.
LLVM_ABI Value * addDiffRuntimeChecks(Instruction *Loc, ArrayRef< PointerDiffInfo > Checks, SCEVExpander &Expander, function_ref< Value *(IRBuilderBase &, unsigned)> GetVF, unsigned IC)
bool pred_empty(const BasicBlock *BB)
Definition CFG.h:119
@ None
Don't use tail folding.
@ DataWithEVL
Use predicated EVL instructions for tail-folding.
@ DataAndControlFlow
Use predicate to control both data and control flow.
@ DataWithoutLaneMask
Same as Data, but avoids using the get.active.lane.mask intrinsic to calculate the mask and instead i...
@ Data
Use predicate only to mask operations on data in the loop.
AnalysisManager< Function > FunctionAnalysisManager
Convenience typedef for the Function analysis manager.
LLVM_ABI bool hasBranchWeightMD(const Instruction &I)
Checks if an instructions has Branch Weight Metadata.
hash_code hash_combine(const Ts &...args)
Combine values into a single hash_code.
Definition Hashing.h:592
@ Increment
Incrementally increasing token ID.
Definition AllocToken.h:26
T bit_floor(T Value)
Returns the largest integral power of two no greater than Value if Value is nonzero.
Definition bit.h:345
Type * toVectorTy(Type *Scalar, ElementCount EC)
A helper function for converting Scalar types to vector types.
std::unique_ptr< VPlan > VPlanPtr
Definition VPlan.h:78
constexpr detail::IsaCheckPredicate< Types... > IsaPred
Function object wrapper for the llvm::isa type check.
Definition Casting.h:866
LLVM_ABI_FOR_TEST bool verifyVPlanIsValid(const VPlan &Plan)
Verify invariants for general VPlans.
LLVM_ABI MapVector< Instruction *, uint64_t > computeMinimumValueSizes(ArrayRef< BasicBlock * > Blocks, DemandedBits &DB, const TargetTransformInfo *TTI=nullptr)
Compute a map of integer instructions to their minimum legal type size.
hash_code hash_combine_range(InputIteratorT first, InputIteratorT last)
Compute a hash_code for a sequence of values.
Definition Hashing.h:466
LLVM_ABI_FOR_TEST cl::opt< bool > VPlanPrintVectorRegionScope
LLVM_ABI cl::opt< bool > EnableLoopInterleaving
#define N
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition Alignment.h:39
A special type used by analysis passes to provide an address that identifies that particular analysis...
Definition Analysis.h:29
static LLVM_ABI void collectEphemeralValues(const Loop *L, AssumptionCache *AC, SmallPtrSetImpl< const Value * > &EphValues)
Collect a loop's ephemeral values (those used only by an assume or similar intrinsics in the loop).
An information struct used to provide DenseMap with the various necessary components for a given valu...
Encapsulate information regarding vectorization of a loop and its epilogue.
EpilogueLoopVectorizationInfo(ElementCount MVF, unsigned MUF, ElementCount EVF, unsigned EUF, VPlan &EpiloguePlan)
A class that represents two vectorization factors (initialized with 0 by default).
static FixedScalableVFPair getNone()
This holds details about a histogram operation – a load -> update -> store sequence where each lane i...
TargetLibraryInfo * TLI
LLVM_ABI LoopVectorizeResult runImpl(Function &F)
LLVM_ABI bool processLoop(Loop *L)
ProfileSummaryInfo * PSI
LoopAccessInfoManager * LAIs
LLVM_ABI void printPipeline(raw_ostream &OS, function_ref< StringRef(StringRef)> MapClassName2PassName)
LLVM_ABI LoopVectorizePass(LoopVectorizeOptions Opts={})
ScalarEvolution * SE
AssumptionCache * AC
LLVM_ABI PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM)
OptimizationRemarkEmitter * ORE
std::function< BlockFrequencyInfo &()> GetBFI
TargetTransformInfo * TTI
Storage for information about made changes.
A CRTP mix-in to automatically provide informational APIs needed for passes.
Definition PassManager.h:70
A marker analysis to determine if extra passes should be run after loop vectorization.
static LLVM_ABI AnalysisKey Key
Holds the VFShape for a specific scalar to vector function mapping.
std::optional< unsigned > getParamIndexForOptionalMask() const
Instruction Set Architecture.
Encapsulates information needed to describe a parameter.
A range of powers-of-2 vectorization factors with fixed start and adjustable end.
ElementCount End
Struct to hold various analysis needed for cost computations.
LoopVectorizationCostModel & CM
bool skipCostComputation(Instruction *UI, bool IsVector) const
Return true if the cost for UI shouldn't be computed, e.g.
InstructionCost getLegacyCost(Instruction *UI, ElementCount VF) const
Return the cost for UI with VF using the legacy cost model as fallback until computing the cost of al...
uint64_t getPredBlockCostDivisor(BasicBlock *BB) const
TargetTransformInfo::TargetCostKind CostKind
SmallPtrSet< Instruction *, 8 > SkipCostComputation
A VPValue representing a live-in from the input IR or a constant.
Definition VPlanValue.h:207
A struct that represents some properties of the register usage of a loop.
InstructionCost spillCost(VPCostContext &Ctx, unsigned OverrideMaxNumRegs=0) const
Calculate the estimated cost of any spills due to using more registers than the number available for ...
VPTransformState holds information passed down when "executing" a VPlan, needed for generating the ou...
A recipe for widening load operations, using the address to load from and an optional mask.
Definition VPlan.h:3619
A recipe for widening store operations, using the stored value, the address to store to and an option...
Definition VPlan.h:3702
static LLVM_ABI_FOR_TEST bool tryToConvertVPInstructionsToVPRecipes(VPlan &Plan, const TargetLibraryInfo &TLI)
Replaces the VPInstructions in Plan with corresponding widen recipes.
static void makeMemOpWideningDecisions(VPlan &Plan, VFRange &Range, VPRecipeBuilder &RecipeBuilder)
Convert load/store VPInstructions in Plan into widened or replicate recipes.
static void addMinimumIterationCheck(VPlan &Plan, ElementCount VF, unsigned UF, ElementCount MinProfitableTripCount, bool RequiresScalarEpilogue, bool TailFolded, Loop *OrigLoop, const uint32_t *MinItersBypassWeights, DebugLoc DL, PredicatedScalarEvolution &PSE, VPBasicBlock *CheckBlock=nullptr)
static void materializeBroadcasts(VPlan &Plan)
Add explicit broadcasts for live-ins and VPValues defined in Plan's entry block if they are used as v...
static void materializePacksAndUnpacks(VPlan &Plan)
Add explicit Build[Struct]Vector recipes to Pack multiple scalar values into vectors and Unpack recip...
static LLVM_ABI_FOR_TEST std::unique_ptr< VPlan > buildVPlan0(Loop *TheLoop, LoopInfo &LI, Type *InductionTy, DebugLoc IVDL, PredicatedScalarEvolution &PSE, LoopVersioning *LVer=nullptr)
Create a base VPlan0, serving as the common starting point for all later candidates.
static bool simplifyKnownEVL(VPlan &Plan, ElementCount VF, PredicatedScalarEvolution &PSE)
Try to simplify VPInstruction::ExplicitVectorLength recipes when the AVL is known to be <= VF,...
static void removeBranchOnConst(VPlan &Plan, bool OnlyLatches=false)
Remove BranchOnCond recipes with true or false conditions together with removing dead edges to their ...
static void introduceMasksAndLinearize(VPlan &Plan)
Predicate and linearize the control-flow in the only loop region of Plan.
static void materializeFactors(VPlan &Plan, VPBasicBlock *VectorPH, ElementCount VF)
Materialize UF, VF and VFxUF to be computed explicitly using VPInstructions.
static void createInLoopReductionRecipes(VPlan &Plan, const DenseSet< BasicBlock * > &BlocksNeedingPredication, ElementCount MinVF)
Create VPReductionRecipes for in-loop reductions.
static void foldTailByMasking(VPlan &Plan)
Adapts the vector loop region for tail folding by introducing a header mask and conditionally executi...
static void materializeBackedgeTakenCount(VPlan &Plan, VPBasicBlock *VectorPH)
Materialize the backedge-taken count to be computed explicitly using VPInstructions.
static void addMinimumVectorEpilogueIterationCheck(VPlan &Plan, Value *VectorTripCount, bool RequiresScalarEpilogue, ElementCount EpilogueVF, unsigned EpilogueUF, unsigned MainLoopStep, unsigned EpilogueLoopStep, ScalarEvolution &SE)
Add a check to Plan to see if the epilogue vector loop should be executed.
static void addActiveLaneMask(VPlan &Plan, bool UseActiveLaneMaskForControlFlow)
Replace (ICMP_ULE, wide canonical IV, backedge-taken-count) checks with an (active-lane-mask recipe,...
static bool handleMultiUseReductions(VPlan &Plan, OptimizationRemarkEmitter *ORE, Loop *TheLoop)
Try to legalize reductions with multiple in-loop uses.
static void dropPoisonGeneratingRecipes(VPlan &Plan, const std::function< bool(BasicBlock *)> &BlockNeedsPredication)
Drop poison flags from recipes that may generate a poison value that is used after vectorization,...
static void convertToVariableLengthStep(VPlan &Plan)
Transform loops with variable-length stepping after region dissolution.
static void createInterleaveGroups(VPlan &Plan, const SmallPtrSetImpl< const InterleaveGroup< Instruction > * > &InterleaveGroups, VPRecipeBuilder &RecipeBuilder, const bool &ScalarEpilogueAllowed)
static void addBranchWeightToMiddleTerminator(VPlan &Plan, ElementCount VF, std::optional< unsigned > VScaleForTuning)
Add branch weight metadata, if the Plan's middle block is terminated by a BranchOnCond recipe.
static std::unique_ptr< VPlan > narrowInterleaveGroups(VPlan &Plan, const TargetTransformInfo &TTI)
Try to find a single VF among Plan's VFs for which all interleave groups (with known minimum VF eleme...
static bool handleFindLastReductions(VPlan &Plan)
Check if Plan contains any FindLast reductions.
static void unrollByUF(VPlan &Plan, unsigned UF)
Explicitly unroll Plan by UF.
static DenseMap< const SCEV *, Value * > expandSCEVs(VPlan &Plan, ScalarEvolution &SE)
Expand VPExpandSCEVRecipes in Plan's entry block.
static void convertToConcreteRecipes(VPlan &Plan)
Lower abstract recipes to concrete ones, that can be codegen'd.
static void expandBranchOnTwoConds(VPlan &Plan)
Expand BranchOnTwoConds instructions into explicit CFG with BranchOnCond instructions.
static void hoistPredicatedLoads(VPlan &Plan, PredicatedScalarEvolution &PSE, const Loop *L)
Hoist predicated loads from the same address to the loop entry block, if they are guaranteed to execu...
static void optimizeFindIVReductions(VPlan &Plan, PredicatedScalarEvolution &PSE, Loop &L)
Optimize FindLast reductions selecting IVs (or expressions of IVs) by converting them to FindIV reduc...
static void convertToAbstractRecipes(VPlan &Plan, VPCostContext &Ctx, VFRange &Range)
This function converts initial recipes to the abstract recipes and clamps Range based on cost model f...
static void materializeConstantVectorTripCount(VPlan &Plan, ElementCount BestVF, unsigned BestUF, PredicatedScalarEvolution &PSE)
static void addExitUsersForFirstOrderRecurrences(VPlan &Plan, VFRange &Range)
Handle users in the exit block for first order reductions in the original exit block.
static void createHeaderPhiRecipes(VPlan &Plan, PredicatedScalarEvolution &PSE, Loop &OrigLoop, const MapVector< PHINode *, InductionDescriptor > &Inductions, const MapVector< PHINode *, RecurrenceDescriptor > &Reductions, const SmallPtrSetImpl< const PHINode * > &FixedOrderRecurrences, const SmallPtrSetImpl< PHINode * > &InLoopReductions, bool AllowReordering)
Replace VPPhi recipes in Plan's header with corresponding VPHeaderPHIRecipe subclasses for inductions...
static void addExplicitVectorLength(VPlan &Plan, const std::optional< unsigned > &MaxEVLSafeElements)
Add a VPCurrentIterationPHIRecipe and related recipes to Plan and replaces all uses except the canoni...
static void optimizeEVLMasks(VPlan &Plan)
Optimize recipes which use an EVL-based header mask to VP intrinsics, for example:
static LLVM_ABI_FOR_TEST bool handleEarlyExits(VPlan &Plan, UncountableExitStyle Style, Loop *TheLoop, PredicatedScalarEvolution &PSE, DominatorTree &DT, AssumptionCache *AC)
Update Plan to account for all early exits.
static void replaceSymbolicStrides(VPlan &Plan, PredicatedScalarEvolution &PSE, const DenseMap< Value *, const SCEV * > &StridesMap)
Replace symbolic strides from StridesMap in Plan with constants when possible.
static bool handleMaxMinNumReductions(VPlan &Plan)
Check if Plan contains any FMaxNum or FMinNum reductions.
static LLVM_ABI_FOR_TEST void createLoopRegions(VPlan &Plan)
Replace loops in Plan's flat CFG with VPRegionBlocks, turning Plan's flat CFG into a hierarchical CFG...
static void removeDeadRecipes(VPlan &Plan)
Remove dead recipes from Plan.
static void attachCheckBlock(VPlan &Plan, Value *Cond, BasicBlock *CheckBlock, bool AddBranchWeights)
Wrap runtime check block CheckBlock in a VPIRBB and Cond in a VPValue and connect the block to Plan,...
static void simplifyRecipes(VPlan &Plan)
Perform instcombine-like simplifications on recipes in Plan.
static void sinkPredicatedStores(VPlan &Plan, PredicatedScalarEvolution &PSE, const Loop *L)
Sink predicated stores to the same address with complementary predicates (P and NOT P) to an uncondit...
static void replicateByVF(VPlan &Plan, ElementCount VF)
Replace replicating VPReplicateRecipe, VPScalarIVStepsRecipe and VPInstruction in Plan with VF single...
static void addIterationCountCheckBlock(VPlan &Plan, ElementCount VF, unsigned UF, bool RequiresScalarEpilogue, Loop *OrigLoop, const uint32_t *MinItersBypassWeights, DebugLoc DL, PredicatedScalarEvolution &PSE)
Add a new check block before the vector preheader to Plan to check if the main vector loop should be ...
static void clearReductionWrapFlags(VPlan &Plan)
Clear NSW/NUW flags from reduction instructions if necessary.
static void optimizeInductionLiveOutUsers(VPlan &Plan, PredicatedScalarEvolution &PSE, bool FoldTail)
If there's a single exit block, optimize its phi recipes that use exiting IV values by feeding them p...
static void createPartialReductions(VPlan &Plan, VPCostContext &CostCtx, VFRange &Range)
Detect and create partial reduction recipes for scaled reductions in Plan.
static void cse(VPlan &Plan)
Perform common-subexpression-elimination on Plan.
static void materializeVectorTripCount(VPlan &Plan, VPBasicBlock *VectorPHVPBB, bool TailByMasking, bool RequiresScalarEpilogue, VPValue *Step)
Materialize vector trip count computations to a set of VPInstructions.
static LLVM_ABI_FOR_TEST void optimize(VPlan &Plan)
Apply VPlan-to-VPlan optimizations to Plan, including induction recipe optimizations,...
static void dissolveLoopRegions(VPlan &Plan)
Replace loop regions with explicit CFG.
static void truncateToMinimalBitwidths(VPlan &Plan, const MapVector< Instruction *, uint64_t > &MinBWs)
Insert truncates and extends for any truncated recipe.
static bool adjustFixedOrderRecurrences(VPlan &Plan, VPBuilder &Builder)
Try to have all users of fixed-order recurrences appear after the recipe defining their previous valu...
static void optimizeForVFAndUF(VPlan &Plan, ElementCount BestVF, unsigned BestUF, PredicatedScalarEvolution &PSE)
Optimize Plan based on BestVF and BestUF.
static void convertEVLExitCond(VPlan &Plan)
Replaces the exit condition from (branch-on-cond eq CanonicalIVInc, VectorTripCount) to (branch-on-co...
static LLVM_ABI_FOR_TEST void addMiddleCheck(VPlan &Plan, bool TailFolded)
If a check is needed to guard executing the scalar epilogue loop, it will be added to the middle bloc...
TODO: The following VectorizationFactor was pulled out of LoopVectorizationCostModel class.
InstructionCost Cost
Cost of the loop with that width.
ElementCount MinProfitableTripCount
The minimum trip count required to make vectorization profitable, e.g.
ElementCount Width
Vector width with best cost.
InstructionCost ScalarCost
Cost of the scalar loop.
static VectorizationFactor Disabled()
Width 1 means no vectorization, cost 0 means uncomputed cost.
static LLVM_ABI bool HoistRuntimeChecks