LLVM 22.0.0git
LoopVectorizationLegality.h
Go to the documentation of this file.
1//===- llvm/Transforms/Vectorize/LoopVectorizationLegality.h ----*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9/// \file
10/// This file defines the LoopVectorizationLegality class. Original code
11/// in Loop Vectorizer has been moved out to its own file for modularity
12/// and reusability.
13///
14/// Currently, it works for innermost loop vectorization. Extending this to
15/// outer loop vectorization is a TODO item.
16///
17/// Also provides:
18/// 1) LoopVectorizeHints class which keeps a number of loop annotations
19/// locally for easy look up. It has the ability to write them back as
20/// loop metadata, upon request.
21/// 2) LoopVectorizationRequirements class for lazy bail out for the purpose
22/// of reporting useful failure to vectorize message.
23//
24//===----------------------------------------------------------------------===//
25
26#ifndef LLVM_TRANSFORMS_VECTORIZE_LOOPVECTORIZATIONLEGALITY_H
27#define LLVM_TRANSFORMS_VECTORIZE_LOOPVECTORIZATIONLEGALITY_H
28
29#include "llvm/ADT/MapVector.h"
33
34namespace llvm {
35class AssumptionCache;
36class BasicBlock;
38class DemandedBits;
39class DominatorTree;
40class Function;
41class Loop;
42class LoopInfo;
43class Metadata;
49class Type;
50
51/// Utility class for getting and setting loop vectorizer hints in the form
52/// of loop metadata.
53/// This class keeps a number of loop annotations locally (as member variables)
54/// and can, upon request, write them back as metadata on the loop. It will
55/// initially scan the loop for existing metadata, and will update the local
56/// values based on information in the loop.
57/// We cannot write all values to metadata, as the mere presence of some info,
58/// for example 'force', means a decision has been made. So, we need to be
59/// careful NOT to add them if the user hasn't specifically asked so.
61 enum HintKind {
62 HK_WIDTH,
63 HK_INTERLEAVE,
64 HK_FORCE,
65 HK_ISVECTORIZED,
66 HK_PREDICATE,
67 HK_SCALABLE
68 };
69
70 /// Hint - associates name and validation with the hint value.
71 struct Hint {
72 const char *Name;
73 unsigned Value; // This may have to change for non-numeric values.
74 HintKind Kind;
75
76 Hint(const char *Name, unsigned Value, HintKind Kind)
77 : Name(Name), Value(Value), Kind(Kind) {}
78
79 bool validate(unsigned Val);
80 };
81
82 /// Vectorization width.
83 Hint Width;
84
85 /// Vectorization interleave factor.
86 Hint Interleave;
87
88 /// Vectorization forced
89 Hint Force;
90
91 /// Already Vectorized
92 Hint IsVectorized;
93
94 /// Vector Predicate
95 Hint Predicate;
96
97 /// Says whether we should use fixed width or scalable vectorization.
98 Hint Scalable;
99
100 /// Return the loop metadata prefix.
101 static StringRef Prefix() { return "llvm.loop."; }
102
103 /// True if there is any unsafe math in the loop.
104 bool PotentiallyUnsafe = false;
105
106public:
108 FK_Undefined = -1, ///< Not selected.
109 FK_Disabled = 0, ///< Forcing disabled.
110 FK_Enabled = 1, ///< Forcing enabled.
111 };
112
114 /// Not selected.
116 /// Disables vectorization with scalable vectors.
118 /// Vectorize loops using scalable vectors or fixed-width vectors, but favor
119 /// scalable vectors when the cost-model is inconclusive. This is the
120 /// default when the scalable.enable hint is enabled through a pragma.
122 };
123
124 LoopVectorizeHints(const Loop *L, bool InterleaveOnlyWhenForced,
126 const TargetTransformInfo *TTI = nullptr);
127
128 /// Mark the loop L as already vectorized by setting the width to 1.
130
132 bool VectorizeOnlyWhenForced) const;
133
134 /// Dumps all the hint information.
135 void emitRemarkWithHints() const;
136
138 return ElementCount::get(Width.Value, (ScalableForceKind)Scalable.Value ==
140 }
141
142 unsigned getInterleave() const {
143 if (Interleave.Value)
144 return Interleave.Value;
145 // If interleaving is not explicitly set, assume that if we do not want
146 // unrolling, we also don't want any interleaving.
148 return 1;
149 return 0;
150 }
151 unsigned getIsVectorized() const { return IsVectorized.Value; }
152 unsigned getPredicate() const { return Predicate.Value; }
153 enum ForceKind getForce() const {
154 if ((ForceKind)Force.Value == FK_Undefined &&
156 return FK_Disabled;
157 return (ForceKind)Force.Value;
158 }
159
160 /// \return true if scalable vectorization has been explicitly disabled.
162 return (ScalableForceKind)Scalable.Value == SK_FixedWidthOnly;
163 }
164
165 /// If hints are provided that force vectorization, use the AlwaysPrint
166 /// pass name to force the frontend to print the diagnostic.
167 const char *vectorizeAnalysisPassName() const;
168
169 /// When enabling loop hints are provided we allow the vectorizer to change
170 /// the order of operations that is given by the scalar loop. This is not
171 /// enabled by default because can be unsafe or inefficient. For example,
172 /// reordering floating-point operations will change the way round-off
173 /// error accumulates in the loop.
174 bool allowReordering() const;
175
176 bool isPotentiallyUnsafe() const {
177 // Avoid FP vectorization if the target is unsure about proper support.
178 // This may be related to the SIMD unit in the target not handling
179 // IEEE 754 FP ops properly, or bad single-to-double promotions.
180 // Otherwise, a sequence of vectorized loops, even without reduction,
181 // could lead to different end results on the destination vectors.
182 return getForce() != LoopVectorizeHints::FK_Enabled && PotentiallyUnsafe;
183 }
184
185 void setPotentiallyUnsafe() { PotentiallyUnsafe = true; }
186
187private:
188 /// Find hints specified in the loop metadata and update local values.
189 void getHintsFromMetadata();
190
191 /// Checks string hint with one operand and set value if valid.
192 void setHint(StringRef Name, Metadata *Arg);
193
194 /// The loop these hints belong to.
195 const Loop *TheLoop;
196
197 /// Interface to emit optimization remarks.
199};
200
201/// This holds vectorization requirements that must be verified late in
202/// the process. The requirements are set by legalize and costmodel. Once
203/// vectorization has been determined to be possible and profitable the
204/// requirements can be verified by looking for metadata or compiler options.
205/// For example, some loops require FP commutativity which is only allowed if
206/// vectorization is explicitly specified or if the fast-math compiler option
207/// has been provided.
208/// Late evaluation of these requirements allows helpful diagnostics to be
209/// composed that tells the user what need to be done to vectorize the loop. For
210/// example, by specifying #pragma clang loop vectorize or -ffast-math. Late
211/// evaluation should be used only when diagnostics can generated that can be
212/// followed by a non-expert user.
214public:
215 /// Track the 1st floating-point instruction that can not be reassociated.
217 if (I && !ExactFPMathInst)
218 ExactFPMathInst = I;
219 }
220
221 Instruction *getExactFPInst() { return ExactFPMathInst; }
222
223private:
224 Instruction *ExactFPMathInst = nullptr;
225};
226
227/// This holds details about a histogram operation -- a load -> update -> store
228/// sequence where each lane in a vector might be updating the same element as
229/// another lane.
238
239/// LoopVectorizationLegality checks if it is legal to vectorize a loop, and
240/// to what vectorization factor.
241/// This class does not look at the profitability of vectorization, only the
242/// legality. This class has two main kinds of checks:
243/// * Memory checks - The code in canVectorizeMemory checks if vectorization
244/// will change the order of memory accesses in a way that will change the
245/// correctness of the program.
246/// * Scalars checks - The code in canVectorizeInstrs and canVectorizeMemory
247/// checks for a number of different conditions, such as the availability of a
248/// single induction variable, that all types are supported and vectorize-able,
249/// etc. This code reflects the capabilities of InnerLoopVectorizer.
250/// This class is also used by InnerLoopVectorizer for identifying
251/// induction variable and the different reduction variables.
253public:
260 : TheLoop(L), LI(LI), PSE(PSE), TTI(TTI), TLI(TLI), DT(DT), LAIs(LAIs),
261 ORE(ORE), Requirements(R), Hints(H), DB(DB), AC(AC), BFI(BFI),
262 PSI(PSI) {}
263
264 /// ReductionList contains the reduction descriptors for all
265 /// of the reductions that were found in the loop.
267
268 /// InductionList saves induction variables and maps them to the
269 /// induction descriptor.
271
272 /// RecurrenceSet contains the phi nodes that are recurrences other than
273 /// inductions and reductions.
275
276 /// Returns true if it is legal to vectorize this loop.
277 /// This does not mean that it is profitable to vectorize this
278 /// loop, only that it is legal to do so.
279 /// Temporarily taking UseVPlanNativePath parameter. If true, take
280 /// the new code path being implemented for outer loop vectorization
281 /// (should be functional for inner loop vectorization) based on VPlan.
282 /// If false, good old LV code.
283 bool canVectorize(bool UseVPlanNativePath);
284
285 /// Returns true if it is legal to vectorize the FP math operations in this
286 /// loop. Vectorizing is legal if we allow reordering of FP operations, or if
287 /// we can use in-order reductions.
288 bool canVectorizeFPMath(bool EnableStrictReductions);
289
290 /// Return true if we can vectorize this loop while folding its tail by
291 /// masking.
292 bool canFoldTailByMasking() const;
293
294 /// Mark all respective loads/stores for masking. Must only be called when
295 /// tail-folding is possible.
297
298 /// Returns the primary induction variable.
299 PHINode *getPrimaryInduction() { return PrimaryInduction; }
300
301 /// Returns the reduction variables found in the loop.
302 const ReductionList &getReductionVars() const { return Reductions; }
303
304 /// Returns the recurrence descriptor associated with a given phi node \p PN,
305 /// expecting one to exist.
308 "only reductions have recurrence descriptors");
309 return Reductions.find(PN)->second;
310 }
311
312 /// Returns the induction variables found in the loop.
313 const InductionList &getInductionVars() const { return Inductions; }
314
315 /// Return the fixed-order recurrences found in the loop.
316 RecurrenceSet &getFixedOrderRecurrences() { return FixedOrderRecurrences; }
317
318 /// Returns the widest induction type.
319 IntegerType *getWidestInductionType() { return WidestIndTy; }
320
321 /// Returns True if given store is a final invariant store of one of the
322 /// reductions found in the loop.
324
325 /// Returns True if given address is invariant and is used to store recurrent
326 /// expression
328
329 /// Returns True if V is a Phi node of an induction variable in this loop.
330 bool isInductionPhi(const Value *V) const;
331
332 /// Returns a pointer to the induction descriptor, if \p Phi is an integer or
333 /// floating point induction.
335
336 /// Returns a pointer to the induction descriptor, if \p Phi is pointer
337 /// induction.
339
340 /// Returns True if V is a cast that is part of an induction def-use chain,
341 /// and had been proven to be redundant under a runtime guard (in other
342 /// words, the cast has the same SCEV expression as the induction phi).
343 bool isCastedInductionVariable(const Value *V) const;
344
345 /// Returns True if V can be considered as an induction variable in this
346 /// loop. V can be the induction phi, or some redundant cast in the def-use
347 /// chain of the inducion phi.
348 bool isInductionVariable(const Value *V) const;
349
350 /// Returns True if PN is a reduction variable in this loop.
351 bool isReductionVariable(PHINode *PN) const { return Reductions.count(PN); }
352
353 /// Returns True if Phi is a fixed-order recurrence in this loop.
354 bool isFixedOrderRecurrence(const PHINode *Phi) const;
355
356 /// Return true if the block BB needs to be predicated in order for the loop
357 /// to be vectorized.
358 bool blockNeedsPredication(BasicBlock *BB) const;
359
360 /// Check if this pointer is consecutive when vectorizing. This happens
361 /// when the last index of the GEP is the induction variable, or that the
362 /// pointer itself is an induction variable.
363 /// This check allows us to vectorize A[idx] into a wide load/store.
364 /// Returns:
365 /// 0 - Stride is unknown or non-consecutive.
366 /// 1 - Address is consecutive.
367 /// -1 - Address is consecutive, and decreasing.
368 /// NOTE: This method must only be used before modifying the original scalar
369 /// loop. Do not use after invoking 'createVectorizedLoopSkeleton' (PR34965).
370 int isConsecutivePtr(Type *AccessTy, Value *Ptr) const;
371
372 /// Returns true if \p V is invariant across all loop iterations according to
373 /// SCEV.
374 bool isInvariant(Value *V) const;
375
376 /// Returns true if value V is uniform across \p VF lanes, when \p VF is
377 /// provided, and otherwise if \p V is invariant across all loop iterations.
378 bool isUniform(Value *V, ElementCount VF) const;
379
380 /// A uniform memory op is a load or store which accesses the same memory
381 /// location on all \p VF lanes, if \p VF is provided and otherwise if the
382 /// memory location is invariant.
383 bool isUniformMemOp(Instruction &I, ElementCount VF) const;
384
385 /// Returns the information that we collected about runtime memory check.
387 return LAI->getRuntimePointerChecking();
388 }
389
390 const LoopAccessInfo *getLAI() const { return LAI; }
391
393 return LAI->getDepChecker().isSafeForAnyVectorWidth() &&
394 LAI->getDepChecker().isSafeForAnyStoreLoadForwardDistances();
395 }
396
398 return LAI->getDepChecker().getMaxSafeVectorWidthInBits();
399 }
400
401 /// Returns true if the loop has exactly one uncountable early exit, i.e. an
402 /// uncountable exit that isn't the latch block.
403 bool hasUncountableEarlyExit() const { return UncountableExitingBB; }
404
405 /// Returns the uncountable early exiting block, if there is exactly one.
407 return UncountableExitingBB;
408 }
409
410 /// Return true if there is store-load forwarding dependencies.
412 return LAI->getDepChecker().isSafeForAnyStoreLoadForwardDistances();
413 }
414
415 /// Return safe power-of-2 number of elements, which do not prevent store-load
416 /// forwarding and safe to operate simultaneously.
418 return LAI->getDepChecker().getStoreLoadForwardSafeDistanceInBits();
419 }
420
421 /// Returns true if vector representation of the instruction \p I
422 /// requires mask.
423 bool isMaskRequired(const Instruction *I) const {
424 return MaskedOp.contains(I);
425 }
426
427 /// Returns true if there is at least one function call in the loop which
428 /// has a vectorized variant available.
429 bool hasVectorCallVariants() const { return VecCallVariantsFound; }
430
431 unsigned getNumStores() const { return LAI->getNumStores(); }
432 unsigned getNumLoads() const { return LAI->getNumLoads(); }
433
434 /// Returns a HistogramInfo* for the given instruction if it was determined
435 /// to be part of a load -> update -> store sequence where multiple lanes
436 /// may be working on the same memory address.
437 std::optional<const HistogramInfo *> getHistogramInfo(Instruction *I) const {
438 for (const HistogramInfo &HGram : Histograms)
439 if (HGram.Load == I || HGram.Update == I || HGram.Store == I)
440 return &HGram;
441
442 return std::nullopt;
443 }
444
445 /// Returns a list of all known histogram operations in the loop.
446 bool hasHistograms() const { return !Histograms.empty(); }
447
448 /// Returns potentially faulting loads.
451 return PotentiallyFaultingLoads;
452 }
453
457
458 Loop *getLoop() const { return TheLoop; }
459
460 LoopInfo *getLoopInfo() const { return LI; }
461
462 AssumptionCache *getAssumptionCache() const { return AC; }
463
464 ScalarEvolution *getScalarEvolution() const { return PSE.getSE(); }
465
466 DominatorTree *getDominatorTree() const { return DT; }
467
468 /// Returns all exiting blocks with a countable exit, i.e. the
469 /// exit-not-taken count is known exactly at compile time.
471 return CountableExitingBlocks;
472 }
473
474private:
475 /// Return true if the pre-header, exiting and latch blocks of \p Lp and all
476 /// its nested loops are considered legal for vectorization. These legal
477 /// checks are common for inner and outer loop vectorization.
478 /// Temporarily taking UseVPlanNativePath parameter. If true, take
479 /// the new code path being implemented for outer loop vectorization
480 /// (should be functional for inner loop vectorization) based on VPlan.
481 /// If false, good old LV code.
482 bool canVectorizeLoopNestCFG(Loop *Lp, bool UseVPlanNativePath);
483
484 /// Set up outer loop inductions by checking Phis in outer loop header for
485 /// supported inductions (int inductions). Return false if any of these Phis
486 /// is not a supported induction or if we fail to find an induction.
487 bool setupOuterLoopInductions();
488
489 /// Return true if the pre-header, exiting and latch blocks of \p Lp
490 /// (non-recursive) are considered legal for vectorization.
491 /// Temporarily taking UseVPlanNativePath parameter. If true, take
492 /// the new code path being implemented for outer loop vectorization
493 /// (should be functional for inner loop vectorization) based on VPlan.
494 /// If false, good old LV code.
495 bool canVectorizeLoopCFG(Loop *Lp, bool UseVPlanNativePath);
496
497 /// Check if a single basic block loop is vectorizable.
498 /// At this point we know that this is a loop with a constant trip count
499 /// and we only need to check individual instructions.
500 bool canVectorizeInstrs();
501
502 /// Check if an individual instruction is vectorizable.
503 bool canVectorizeInstr(Instruction &I);
504
505 /// When we vectorize loops we may change the order in which
506 /// we read and write from memory. This method checks if it is
507 /// legal to vectorize the code, considering only memory constrains.
508 /// Returns true if the loop is vectorizable
509 bool canVectorizeMemory();
510
511 /// If LAA cannot determine whether all dependences are safe, we may be able
512 /// to further analyse some IndirectUnsafe dependences and if they match a
513 /// certain pattern (like a histogram) then we may still be able to vectorize.
514 bool canVectorizeIndirectUnsafeDependences();
515
516 /// Return true if we can vectorize this loop using the IF-conversion
517 /// transformation.
518 bool canVectorizeWithIfConvert();
519
520 /// Return true if we can vectorize this outer loop. The method performs
521 /// specific checks for outer loop vectorization.
522 bool canVectorizeOuterLoop();
523
524 /// Returns true if this is an early exit loop that can be vectorized.
525 /// Currently, a loop with an uncountable early exit is considered
526 /// vectorizable if:
527 /// 1. There are no writes to memory in the loop.
528 /// 2. The loop has only one early uncountable exit
529 /// 3. The early exit block dominates the latch block.
530 /// 4. The latch block has an exact exit count.
531 /// 5. The loop does not contain reductions or recurrences.
532 /// 6. We can prove at compile-time that loops will not contain faulting
533 /// loads.
534 /// 7. It is safe to speculatively execute instructions such as divide or
535 /// call instructions.
536 /// The list above is not based on theoretical limitations of vectorization,
537 /// but simply a statement that more work is needed to support these
538 /// additional cases safely.
539 bool isVectorizableEarlyExitLoop();
540
541 /// Return true if all of the instructions in the block can be speculatively
542 /// executed, and record the loads/stores that require masking.
543 /// \p SafePtrs is a list of addresses that are known to be legal and we know
544 /// that we can read from them without segfault.
545 /// \p MaskedOp is a list of instructions that have to be transformed into
546 /// calls to the appropriate masked intrinsic when the loop is vectorized
547 /// or dropped if the instruction is a conditional assume intrinsic.
548 bool
549 blockCanBePredicated(BasicBlock *BB, SmallPtrSetImpl<Value *> &SafePtrs,
551
552 /// Updates the vectorization state by adding \p Phi to the inductions list.
553 /// This can set \p Phi as the main induction of the loop if \p Phi is a
554 /// better choice for the main induction than the existing one.
555 void addInductionPhi(PHINode *Phi, const InductionDescriptor &ID,
556 SmallPtrSetImpl<Value *> &AllowedExit);
557
558 /// The loop that we evaluate.
559 Loop *TheLoop;
560
561 /// Loop Info analysis.
562 LoopInfo *LI;
563
564 /// A wrapper around ScalarEvolution used to add runtime SCEV checks.
565 /// Applies dynamic knowledge to simplify SCEV expressions in the context
566 /// of existing SCEV assumptions. The analysis will also add a minimal set
567 /// of new predicates if this is required to enable vectorization and
568 /// unrolling.
570
571 /// Target Transform Info.
573
574 /// Target Library Info.
576
577 /// Dominator Tree.
578 DominatorTree *DT;
579
580 // LoopAccess analysis.
582
583 const LoopAccessInfo *LAI = nullptr;
584
585 /// Interface to emit optimization remarks.
587
588 // --- vectorization state --- //
589
590 /// Holds the primary induction variable. This is the counter of the
591 /// loop.
592 PHINode *PrimaryInduction = nullptr;
593
594 /// Holds the reduction variables.
596
597 /// Holds all of the induction variables that we found in the loop.
598 /// Notice that inductions don't need to start at zero and that induction
599 /// variables can be pointers.
600 InductionList Inductions;
601
602 /// Holds all the casts that participate in the update chain of the induction
603 /// variables, and that have been proven to be redundant (possibly under a
604 /// runtime guard). These casts can be ignored when creating the vectorized
605 /// loop body.
606 SmallPtrSet<Instruction *, 4> InductionCastsToIgnore;
607
608 /// Holds the phi nodes that are fixed-order recurrences.
609 RecurrenceSet FixedOrderRecurrences;
610
611 /// Holds the widest induction type encountered.
612 IntegerType *WidestIndTy = nullptr;
613
614 /// Allowed outside users. This holds the variables that can be accessed from
615 /// outside the loop.
616 SmallPtrSet<Value *, 4> AllowedExit;
617
618 /// Vectorization requirements that will go through late-evaluation.
619 LoopVectorizationRequirements *Requirements;
620
621 /// Used to emit an analysis of any legality issues.
622 LoopVectorizeHints *Hints;
623
624 /// The demanded bits analysis is used to compute the minimum type size in
625 /// which a reduction can be computed.
626 DemandedBits *DB;
627
628 /// The assumption cache analysis is used to compute the minimum type size in
629 /// which a reduction can be computed.
630 AssumptionCache *AC;
631
632 /// While vectorizing these instructions we have to generate a
633 /// call to the appropriate masked intrinsic or drop them in case of
634 /// conditional assumes.
636
637 /// Contains all identified histogram operations, which are sequences of
638 /// load -> update -> store instructions where multiple lanes in a vector
639 /// may work on the same memory location.
641
642 /// Hold potentially faulting loads.
643 SmallPtrSet<const Instruction *, 4> PotentiallyFaultingLoads;
644
645 /// BFI and PSI are used to check for profile guided size optimizations.
648
649 /// If we discover function calls within the loop which have a valid
650 /// vectorized variant, record that fact so that LoopVectorize can
651 /// (potentially) make a better decision on the maximum VF and enable
652 /// the use of those function variants.
653 bool VecCallVariantsFound = false;
654
655 /// Keep track of all the countable and uncountable exiting blocks if
656 /// the exact backedge taken count is not computable.
657 SmallVector<BasicBlock *, 4> CountableExitingBlocks;
658
659 /// Keep track of an uncountable exiting block, if there is exactly one early
660 /// exit.
661 BasicBlock *UncountableExitingBB = nullptr;
662};
663
664} // namespace llvm
665
666#endif // LLVM_TRANSFORMS_VECTORIZE_LOOPVECTORIZATIONLEGALITY_H
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
#define F(x, y, z)
Definition MD5.cpp:55
#define I(x, y, z)
Definition MD5.cpp:58
#define H(x, y, z)
Definition MD5.cpp:57
This file implements a map that provides insertion order iteration.
A cache of @llvm.assume calls within a function.
LLVM Basic Block Representation.
Definition BasicBlock.h:62
BlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate IR basic block frequen...
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition Dominators.h:165
static constexpr ElementCount get(ScalarTy MinVal, bool Scalable)
Definition TypeSize.h:315
A struct for saving information about induction variables.
Class to represent integer types.
An instruction for reading from memory.
Drive the analysis of memory accesses in the loop.
MapVector< PHINode *, InductionDescriptor > InductionList
InductionList saves induction variables and maps them to the induction descriptor.
bool isInvariantStoreOfReduction(StoreInst *SI)
Returns True if given store is a final invariant store of one of the reductions found in the loop.
bool hasVectorCallVariants() const
Returns true if there is at least one function call in the loop which has a vectorized variant availa...
const RecurrenceDescriptor & getRecurrenceDescriptor(PHINode *PN) const
Returns the recurrence descriptor associated with a given phi node PN, expecting one to exist.
const SmallPtrSetImpl< const Instruction * > & getPotentiallyFaultingLoads() const
Returns potentially faulting loads.
RecurrenceSet & getFixedOrderRecurrences()
Return the fixed-order recurrences found in the loop.
uint64_t getMaxStoreLoadForwardSafeDistanceInBits() const
Return safe power-of-2 number of elements, which do not prevent store-load forwarding and safe to ope...
bool isInvariantAddressOfReduction(Value *V)
Returns True if given address is invariant and is used to store recurrent expression.
bool blockNeedsPredication(BasicBlock *BB) const
Return true if the block BB needs to be predicated in order for the loop to be vectorized.
bool canVectorize(bool UseVPlanNativePath)
Returns true if it is legal to vectorize this loop.
PredicatedScalarEvolution * getPredicatedScalarEvolution() const
int isConsecutivePtr(Type *AccessTy, Value *Ptr) const
Check if this pointer is consecutive when vectorizing.
AssumptionCache * getAssumptionCache() const
std::optional< const HistogramInfo * > getHistogramInfo(Instruction *I) const
Returns a HistogramInfo* for the given instruction if it was determined to be part of a load -> updat...
SmallPtrSet< const PHINode *, 8 > RecurrenceSet
RecurrenceSet contains the phi nodes that are recurrences other than inductions and reductions.
bool canVectorizeFPMath(bool EnableStrictReductions)
Returns true if it is legal to vectorize the FP math operations in this loop.
LoopVectorizationLegality(Loop *L, PredicatedScalarEvolution &PSE, DominatorTree *DT, TargetTransformInfo *TTI, TargetLibraryInfo *TLI, Function *F, LoopAccessInfoManager &LAIs, LoopInfo *LI, OptimizationRemarkEmitter *ORE, LoopVectorizationRequirements *R, LoopVectorizeHints *H, DemandedBits *DB, AssumptionCache *AC, BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI)
bool isReductionVariable(PHINode *PN) const
Returns True if PN is a reduction variable in this loop.
bool isFixedOrderRecurrence(const PHINode *Phi) const
Returns True if Phi is a fixed-order recurrence in this loop.
IntegerType * getWidestInductionType()
Returns the widest induction type.
const InductionDescriptor * getPointerInductionDescriptor(PHINode *Phi) const
Returns a pointer to the induction descriptor, if Phi is pointer induction.
const InductionDescriptor * getIntOrFpInductionDescriptor(PHINode *Phi) const
Returns a pointer to the induction descriptor, if Phi is an integer or floating point induction.
bool isInductionPhi(const Value *V) const
Returns True if V is a Phi node of an induction variable in this loop.
PHINode * getPrimaryInduction()
Returns the primary induction variable.
const SmallVector< BasicBlock *, 4 > & getCountableExitingBlocks() const
Returns all exiting blocks with a countable exit, i.e.
bool isUniform(Value *V, ElementCount VF) const
Returns true if value V is uniform across VF lanes, when VF is provided, and otherwise if V is invari...
const InductionList & getInductionVars() const
Returns the induction variables found in the loop.
bool isInvariant(Value *V) const
Returns true if V is invariant across all loop iterations according to SCEV.
const ReductionList & getReductionVars() const
Returns the reduction variables found in the loop.
bool isSafeForAnyStoreLoadForwardDistances() const
Return true if there is store-load forwarding dependencies.
bool canFoldTailByMasking() const
Return true if we can vectorize this loop while folding its tail by masking.
void prepareToFoldTailByMasking()
Mark all respective loads/stores for masking.
bool hasUncountableEarlyExit() const
Returns true if the loop has exactly one uncountable early exit, i.e.
bool hasHistograms() const
Returns a list of all known histogram operations in the loop.
const LoopAccessInfo * getLAI() const
MapVector< PHINode *, RecurrenceDescriptor > ReductionList
ReductionList contains the reduction descriptors for all of the reductions that were found in the loo...
ScalarEvolution * getScalarEvolution() const
bool isUniformMemOp(Instruction &I, ElementCount VF) const
A uniform memory op is a load or store which accesses the same memory location on all VF lanes,...
BasicBlock * getUncountableEarlyExitingBlock() const
Returns the uncountable early exiting block, if there is exactly one.
bool isMaskRequired(const Instruction *I) const
Returns true if vector representation of the instruction I requires mask.
const RuntimePointerChecking * getRuntimePointerChecking() const
Returns the information that we collected about runtime memory check.
bool isInductionVariable(const Value *V) const
Returns True if V can be considered as an induction variable in this loop.
bool isCastedInductionVariable(const Value *V) const
Returns True if V is a cast that is part of an induction def-use chain, and had been proven to be red...
This holds vectorization requirements that must be verified late in the process.
void addExactFPMathInst(Instruction *I)
Track the 1st floating-point instruction that can not be reassociated.
Utility class for getting and setting loop vectorizer hints in the form of loop metadata.
@ SK_PreferScalable
Vectorize loops using scalable vectors or fixed-width vectors, but favor scalable vectors when the co...
@ SK_FixedWidthOnly
Disables vectorization with scalable vectors.
bool allowVectorization(Function *F, Loop *L, bool VectorizeOnlyWhenForced) const
bool allowReordering() const
When enabling loop hints are provided we allow the vectorizer to change the order of operations that ...
void emitRemarkWithHints() const
Dumps all the hint information.
void setAlreadyVectorized()
Mark the loop L as already vectorized by setting the width to 1.
LoopVectorizeHints(const Loop *L, bool InterleaveOnlyWhenForced, OptimizationRemarkEmitter &ORE, const TargetTransformInfo *TTI=nullptr)
const char * vectorizeAnalysisPassName() const
If hints are provided that force vectorization, use the AlwaysPrint pass name to force the frontend t...
Represents a single loop in the control flow graph.
Definition LoopInfo.h:40
This class implements a map that also provides access to all stored values in a deterministic order.
Definition MapVector.h:36
Root of the metadata hierarchy.
Definition Metadata.h:63
The optimization diagnostic interface.
An interface layer with SCEV used to manage how we see SCEV expressions for values in the context of ...
Analysis providing profile information.
The RecurrenceDescriptor is used to identify recurrences variables in a loop.
Holds information about the memory runtime legality checks to verify that a group of pointers do not ...
The main scalar evolution driver.
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
An instruction for storing to memory.
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
Provides information about what library functions are available for the current target.
This pass provides access to the codegen interfaces that are needed for IR-level transformations.
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:45
LLVM Value Representation.
Definition Value.h:75
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
This is an optimization pass for GlobalISel generic memory operations.
FunctionAddr VTableAddr Value
Definition InstrProf.h:137
LLVM_ABI bool hasDisableAllTransformsHint(const Loop *L)
Look for the loop attribute that disables all transformation heuristic.
LLVM_ABI TransformationMode hasUnrollTransformation(const Loop *L)
TargetTransformInfo TTI
@ TM_Disable
The transformation should not be applied.
Definition LoopUtils.h:294
This holds details about a histogram operation – a load -> update -> store sequence where each lane i...
HistogramInfo(LoadInst *Load, Instruction *Update, StoreInst *Store)