LLVM 23.0.0git
LoopVectorizationLegality.h
Go to the documentation of this file.
1//===- llvm/Transforms/Vectorize/LoopVectorizationLegality.h ----*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9/// \file
10/// This file defines the LoopVectorizationLegality class. Original code
11/// in Loop Vectorizer has been moved out to its own file for modularity
12/// and reusability.
13///
14/// Currently, it works for innermost loop vectorization. Extending this to
15/// outer loop vectorization is a TODO item.
16///
17/// Also provides:
18/// 1) LoopVectorizeHints class which keeps a number of loop annotations
19/// locally for easy look up. It has the ability to write them back as
20/// loop metadata, upon request.
21/// 2) LoopVectorizationRequirements class for lazy bail out for the purpose
22/// of reporting useful failure to vectorize message.
23//
24//===----------------------------------------------------------------------===//
25
26#ifndef LLVM_TRANSFORMS_VECTORIZE_LOOPVECTORIZATIONLEGALITY_H
27#define LLVM_TRANSFORMS_VECTORIZE_LOOPVECTORIZATIONLEGALITY_H
28
29#include "llvm/ADT/MapVector.h"
33
34namespace llvm {
35class AssumptionCache;
36class BasicBlock;
38class DemandedBits;
39class DominatorTree;
40class Function;
41class Loop;
42class LoopInfo;
43class Metadata;
49class Type;
50
51/// Utility class for getting and setting loop vectorizer hints in the form
52/// of loop metadata.
53/// This class keeps a number of loop annotations locally (as member variables)
54/// and can, upon request, write them back as metadata on the loop. It will
55/// initially scan the loop for existing metadata, and will update the local
56/// values based on information in the loop.
57/// We cannot write all values to metadata, as the mere presence of some info,
58/// for example 'force', means a decision has been made. So, we need to be
59/// careful NOT to add them if the user hasn't specifically asked so.
61 enum HintKind {
62 HK_WIDTH,
63 HK_INTERLEAVE,
64 HK_FORCE,
65 HK_ISVECTORIZED,
66 HK_PREDICATE,
67 HK_SCALABLE
68 };
69
70 /// Hint - associates name and validation with the hint value.
71 struct Hint {
72 const char *Name;
73 unsigned Value; // This may have to change for non-numeric values.
74 HintKind Kind;
75
76 Hint(const char *Name, unsigned Value, HintKind Kind)
77 : Name(Name), Value(Value), Kind(Kind) {}
78
79 bool validate(unsigned Val);
80 };
81
82 /// Vectorization width.
83 Hint Width;
84
85 /// Vectorization interleave factor.
86 Hint Interleave;
87
88 /// Vectorization forced
89 Hint Force;
90
91 /// Already Vectorized
92 Hint IsVectorized;
93
94 /// Vector Predicate
95 Hint Predicate;
96
97 /// Says whether we should use fixed width or scalable vectorization.
98 Hint Scalable;
99
100 /// Return the loop metadata prefix.
101 static StringRef Prefix() { return "llvm.loop."; }
102
103 /// True if there is any unsafe math in the loop.
104 bool PotentiallyUnsafe = false;
105
106public:
108 FK_Undefined = -1, ///< Not selected.
109 FK_Disabled = 0, ///< Forcing disabled.
110 FK_Enabled = 1, ///< Forcing enabled.
111 };
112
114 /// Not selected.
116 /// Disables vectorization with scalable vectors.
118 /// Vectorize loops using scalable vectors or fixed-width vectors, but favor
119 /// scalable vectors when the cost-model is inconclusive. This is the
120 /// default when the scalable.enable hint is enabled through a pragma.
122 /// Always vectorize loops using scalable vectors if feasible (i.e. the plan
123 /// has a valid cost and is not restricted by fixed-length dependence
124 /// distances).
126 };
127
128 LoopVectorizeHints(const Loop *L, bool InterleaveOnlyWhenForced,
130 const TargetTransformInfo *TTI = nullptr);
131
132 /// Mark the loop L as already vectorized by setting the width to 1.
134
136 bool VectorizeOnlyWhenForced) const;
137
138 /// Dumps all the hint information.
139 void emitRemarkWithHints() const;
140
142 return ElementCount::get(
143 Width.Value,
144 (ScalableForceKind)Scalable.Value == SK_PreferScalable ||
145 (ScalableForceKind)Scalable.Value == SK_AlwaysScalable);
146 }
147
148 unsigned getInterleave() const {
149 if (Interleave.Value)
150 return Interleave.Value;
151 // If interleaving is not explicitly set, assume that if we do not want
152 // unrolling, we also don't want any interleaving.
154 return 1;
155 return 0;
156 }
157 unsigned getIsVectorized() const { return IsVectorized.Value; }
158 unsigned getPredicate() const { return Predicate.Value; }
159 enum ForceKind getForce() const {
160 if ((ForceKind)Force.Value == FK_Undefined &&
162 return FK_Disabled;
163 return (ForceKind)Force.Value;
164 }
165
166 /// \return true if scalable vectorization has been explicitly disabled.
168 return (ScalableForceKind)Scalable.Value == SK_FixedWidthOnly;
169 }
170
171 /// \return true if scalable vectorization is always preferred over
172 /// fixed-length when feasible, regardless of cost.
174 return (ScalableForceKind)Scalable.Value == SK_AlwaysScalable;
175 }
176
177 /// If hints are provided that force vectorization, use the AlwaysPrint
178 /// pass name to force the frontend to print the diagnostic.
179 const char *vectorizeAnalysisPassName() const;
180
181 /// When enabling loop hints are provided we allow the vectorizer to change
182 /// the order of operations that is given by the scalar loop. This is not
183 /// enabled by default because can be unsafe or inefficient. For example,
184 /// reordering floating-point operations will change the way round-off
185 /// error accumulates in the loop.
186 bool allowReordering() const;
187
188 bool isPotentiallyUnsafe() const {
189 // Avoid FP vectorization if the target is unsure about proper support.
190 // This may be related to the SIMD unit in the target not handling
191 // IEEE 754 FP ops properly, or bad single-to-double promotions.
192 // Otherwise, a sequence of vectorized loops, even without reduction,
193 // could lead to different end results on the destination vectors.
194 return getForce() != LoopVectorizeHints::FK_Enabled && PotentiallyUnsafe;
195 }
196
197 void setPotentiallyUnsafe() { PotentiallyUnsafe = true; }
198
199private:
200 /// Find hints specified in the loop metadata and update local values.
201 void getHintsFromMetadata();
202
203 /// Checks string hint with one operand and set value if valid.
204 void setHint(StringRef Name, Metadata *Arg);
205
206 /// The loop these hints belong to.
207 const Loop *TheLoop;
208
209 /// Interface to emit optimization remarks.
211
212 /// Reports a condition where loop vectorization is disallowed: prints
213 /// \p DebugMsg for debugging purposes along with the corresponding
214 /// optimization remark \p RemarkName, with \p RemarkMsg as the user-facing
215 /// message. The loop \p L is used for the location of the remark.
216 void reportDisallowedVectorization(const StringRef DebugMsg,
217 const StringRef RemarkName,
218 const StringRef RemarkMsg,
219 const Loop *L) const;
220};
221
222/// This holds vectorization requirements that must be verified late in
223/// the process. The requirements are set by legalize and costmodel. Once
224/// vectorization has been determined to be possible and profitable the
225/// requirements can be verified by looking for metadata or compiler options.
226/// For example, some loops require FP commutativity which is only allowed if
227/// vectorization is explicitly specified or if the fast-math compiler option
228/// has been provided.
229/// Late evaluation of these requirements allows helpful diagnostics to be
230/// composed that tells the user what need to be done to vectorize the loop. For
231/// example, by specifying #pragma clang loop vectorize or -ffast-math. Late
232/// evaluation should be used only when diagnostics can generated that can be
233/// followed by a non-expert user.
235public:
236 /// Track the 1st floating-point instruction that can not be reassociated.
238 if (I && !ExactFPMathInst)
239 ExactFPMathInst = I;
240 }
241
242 Instruction *getExactFPInst() { return ExactFPMathInst; }
243
244private:
245 Instruction *ExactFPMathInst = nullptr;
246};
247
248/// This holds details about a histogram operation -- a load -> update -> store
249/// sequence where each lane in a vector might be updating the same element as
250/// another lane.
259
260/// Indicates the characteristics of a loop with an uncountable exit.
261/// * None -- No uncountable exit present.
262/// * ReadOnly -- At least one uncountable exit in a readonly loop.
263/// * ReadWrite -- At least one uncountable exit in a loop with side effects
264/// that may require masking.
266
267/// LoopVectorizationLegality checks if it is legal to vectorize a loop, and
268/// to what vectorization factor.
269/// This class does not look at the profitability of vectorization, only the
270/// legality. This class has two main kinds of checks:
271/// * Memory checks - The code in canVectorizeMemory checks if vectorization
272/// will change the order of memory accesses in a way that will change the
273/// correctness of the program.
274/// * Scalars checks - The code in canVectorizeInstrs and canVectorizeMemory
275/// checks for a number of different conditions, such as the availability of a
276/// single induction variable, that all types are supported and vectorize-able,
277/// etc. This code reflects the capabilities of InnerLoopVectorizer.
278/// This class is also used by InnerLoopVectorizer for identifying
279/// induction variable and the different reduction variables.
281public:
287 AssumptionCache *AC, bool AllowRuntimeSCEVChecks, AAResults *AA)
288 : TheLoop(L), LI(LI), PSE(PSE), TTI(TTI), TLI(TLI), DT(DT), LAIs(LAIs),
289 ORE(ORE), Requirements(R), Hints(H), DB(DB), AC(AC),
290 AllowRuntimeSCEVChecks(AllowRuntimeSCEVChecks), AA(AA) {}
291
292 /// ReductionList contains the reduction descriptors for all
293 /// of the reductions that were found in the loop.
295
296 /// InductionList saves induction variables and maps them to the
297 /// induction descriptor.
299
300 /// RecurrenceSet contains the phi nodes that are recurrences other than
301 /// inductions and reductions.
303
304 /// Returns true if it is legal to vectorize this loop.
305 /// This does not mean that it is profitable to vectorize this
306 /// loop, only that it is legal to do so.
307 /// Temporarily taking UseVPlanNativePath parameter. If true, take
308 /// the new code path being implemented for outer loop vectorization
309 /// (should be functional for inner loop vectorization) based on VPlan.
310 /// If false, good old LV code.
311 bool canVectorize(bool UseVPlanNativePath);
312
313 /// Returns true if it is legal to vectorize the FP math operations in this
314 /// loop. Vectorizing is legal if we allow reordering of FP operations, or if
315 /// we can use in-order reductions.
316 bool canVectorizeFPMath(bool EnableStrictReductions);
317
318 /// Return true if we can vectorize this loop while folding its tail by
319 /// masking.
320 bool canFoldTailByMasking() const;
321
322 /// Mark all respective loads/stores for masking. Must only be called when
323 /// tail-folding is possible.
325
326 /// Returns the primary induction variable.
327 PHINode *getPrimaryInduction() { return PrimaryInduction; }
328
329 /// Returns the reduction variables found in the loop.
330 const ReductionList &getReductionVars() const { return Reductions; }
331
332 /// Returns the recurrence descriptor associated with a given phi node \p PN,
333 /// expecting one to exist.
336 "only reductions have recurrence descriptors");
337 return Reductions.find(PN)->second;
338 }
339
340 /// Returns the induction variables found in the loop.
341 const InductionList &getInductionVars() const { return Inductions; }
342
343 /// Return the fixed-order recurrences found in the loop.
344 RecurrenceSet &getFixedOrderRecurrences() { return FixedOrderRecurrences; }
345
346 /// Returns the widest induction type.
347 IntegerType *getWidestInductionType() { return WidestIndTy; }
348
349 /// Returns True if given store is a final invariant store of one of the
350 /// reductions found in the loop.
352
353 /// Returns True if given address is invariant and is used to store recurrent
354 /// expression
356
357 /// Returns True if V is a Phi node of an induction variable in this loop.
358 bool isInductionPhi(const Value *V) const;
359
360 /// Returns a pointer to the induction descriptor, if \p Phi is an integer or
361 /// floating point induction.
363
364 /// Returns a pointer to the induction descriptor, if \p Phi is pointer
365 /// induction.
367
368 /// Returns True if V is a cast that is part of an induction def-use chain,
369 /// and had been proven to be redundant under a runtime guard (in other
370 /// words, the cast has the same SCEV expression as the induction phi).
371 bool isCastedInductionVariable(const Value *V) const;
372
373 /// Returns True if V can be considered as an induction variable in this
374 /// loop. V can be the induction phi, or some redundant cast in the def-use
375 /// chain of the inducion phi.
376 bool isInductionVariable(const Value *V) const;
377
378 /// Returns True if PN is a reduction variable in this loop.
379 bool isReductionVariable(PHINode *PN) const { return Reductions.count(PN); }
380
381 /// Returns True if Phi is a fixed-order recurrence in this loop.
382 bool isFixedOrderRecurrence(const PHINode *Phi) const;
383
384 /// Return true if the block BB needs to be predicated in order for the loop
385 /// to be vectorized.
386 bool blockNeedsPredication(const BasicBlock *BB) const;
387
388 /// Check if this pointer is consecutive when vectorizing. This happens
389 /// when the last index of the GEP is the induction variable, or that the
390 /// pointer itself is an induction variable.
391 /// This check allows us to vectorize A[idx] into a wide load/store.
392 /// Returns:
393 /// 0 - Stride is unknown or non-consecutive.
394 /// 1 - Address is consecutive.
395 /// -1 - Address is consecutive, and decreasing.
396 /// NOTE: This method must only be used before modifying the original scalar
397 /// loop. Do not use after invoking 'createVectorizedLoopSkeleton' (PR34965).
398 int isConsecutivePtr(Type *AccessTy, Value *Ptr) const;
399
400 /// Returns true if \p V is invariant across all loop iterations according to
401 /// SCEV.
402 bool isInvariant(Value *V) const;
403
404 /// Returns true if value V is uniform across \p VF lanes, when \p VF is
405 /// provided, and otherwise if \p V is invariant across all loop iterations.
406 bool isUniform(Value *V, ElementCount VF) const;
407
408 /// A uniform memory op is a load or store which accesses the same memory
409 /// location on all \p VF lanes, if \p VF is provided and otherwise if the
410 /// memory location is invariant.
411 bool isUniformMemOp(Instruction &I, ElementCount VF) const;
412
413 /// Returns the information that we collected about runtime memory check.
415 return LAI->getRuntimePointerChecking();
416 }
417
418 const LoopAccessInfo *getLAI() const { return LAI; }
419
421 return LAI->getDepChecker().isSafeForAnyVectorWidth() &&
422 LAI->getDepChecker().isSafeForAnyStoreLoadForwardDistances();
423 }
424
426 return LAI->getDepChecker().getMaxSafeVectorWidthInBits();
427 }
428
429 /// Returns information about whether this loop contains at least one
430 /// uncountable early exit, and if so, if it also contains instructions (such
431 /// as stores) that cause side-effects.
433 return UncountableExitType;
434 }
435
436 /// Returns true if the loop has uncountable early exits, i.e. uncountable
437 /// exits that aren't the latch block.
441
442 /// Returns true if this is an early exit loop with state-changing or
443 /// potentially-faulting operations and the condition for the uncountable
444 /// exit must be determined before any of the state changes or potentially
445 /// faulting operations take place.
449
450 /// Return true if there is store-load forwarding dependencies.
452 return LAI->getDepChecker().isSafeForAnyStoreLoadForwardDistances();
453 }
454
455 /// Return safe power-of-2 number of elements, which do not prevent store-load
456 /// forwarding and safe to operate simultaneously.
458 return LAI->getDepChecker().getStoreLoadForwardSafeDistanceInBits();
459 }
460
461 /// Returns true if instruction \p I requires a mask for vectorization.
462 /// This accounts for both control flow masking (conditionally executed
463 /// blocks) and tail-folding masking (predicated loop vectorization).
464 bool isMaskRequired(const Instruction *I, bool TailFolded) const {
465 if (TailFolded)
466 return TailFoldedMaskedOp.contains(I);
467 return ConditionallyExecutedOps.contains(I);
468 }
469
470 /// Returns true if there is at least one function call in the loop which
471 /// has a vectorized variant available.
472 bool hasVectorCallVariants() const { return VecCallVariantsFound; }
473
474 unsigned getNumStores() const { return LAI->getNumStores(); }
475 unsigned getNumLoads() const { return LAI->getNumLoads(); }
476
477 /// Returns a HistogramInfo* for the given instruction if it was determined
478 /// to be part of a load -> update -> store sequence where multiple lanes
479 /// may be working on the same memory address.
480 std::optional<const HistogramInfo *> getHistogramInfo(Instruction *I) const {
481 for (const HistogramInfo &HGram : Histograms)
482 if (HGram.Load == I || HGram.Update == I || HGram.Store == I)
483 return &HGram;
484
485 return std::nullopt;
486 }
487
488 /// Returns a list of all known histogram operations in the loop.
489 bool hasHistograms() const { return !Histograms.empty(); }
490
494
495 Loop *getLoop() const { return TheLoop; }
496
497 LoopInfo *getLoopInfo() const { return LI; }
498
499 AssumptionCache *getAssumptionCache() const { return AC; }
500
501 ScalarEvolution *getScalarEvolution() const { return PSE.getSE(); }
502
503 DominatorTree *getDominatorTree() const { return DT; }
504
505 /// Returns all exiting blocks with a countable exit, i.e. the
506 /// exit-not-taken count is known exactly at compile time.
508 return CountableExitingBlocks;
509 }
510
511private:
512 /// Return true if the pre-header, exiting and latch blocks of \p Lp and all
513 /// its nested loops are considered legal for vectorization. These legal
514 /// checks are common for inner and outer loop vectorization.
515 /// Temporarily taking UseVPlanNativePath parameter. If true, take
516 /// the new code path being implemented for outer loop vectorization
517 /// (should be functional for inner loop vectorization) based on VPlan.
518 /// If false, good old LV code.
519 bool canVectorizeLoopNestCFG(Loop *Lp, bool UseVPlanNativePath);
520
521 /// Set up outer loop inductions by checking Phis in outer loop header for
522 /// supported inductions (int inductions). Return false if any of these Phis
523 /// is not a supported induction or if we fail to find an induction.
524 bool setupOuterLoopInductions();
525
526 /// Return true if the pre-header, exiting and latch blocks of \p Lp
527 /// (non-recursive) are considered legal for vectorization.
528 /// Temporarily taking UseVPlanNativePath parameter. If true, take
529 /// the new code path being implemented for outer loop vectorization
530 /// (should be functional for inner loop vectorization) based on VPlan.
531 /// If false, good old LV code.
532 bool canVectorizeLoopCFG(Loop *Lp, bool UseVPlanNativePath);
533
534 /// Check if a single basic block loop is vectorizable.
535 /// At this point we know that this is a loop with a constant trip count
536 /// and we only need to check individual instructions.
537 bool canVectorizeInstrs();
538
539 /// Check if an individual instruction is vectorizable.
540 bool canVectorizeInstr(Instruction &I);
541
542 /// When we vectorize loops we may change the order in which
543 /// we read and write from memory. This method checks if it is
544 /// legal to vectorize the code, considering only memory constrains.
545 /// Returns true if the loop is vectorizable
546 bool canVectorizeMemory();
547
548 /// If LAA cannot determine whether all dependences are safe, we may be able
549 /// to further analyse some IndirectUnsafe dependences and if they match a
550 /// certain pattern (like a histogram) then we may still be able to vectorize.
551 bool canVectorizeIndirectUnsafeDependences();
552
553 /// Return true if we can vectorize this loop using the IF-conversion
554 /// transformation.
555 bool canVectorizeWithIfConvert();
556
557 /// Return true if we can vectorize this outer loop. The method performs
558 /// specific checks for outer loop vectorization.
559 bool canVectorizeOuterLoop();
560
561 /// Returns true if this is an early exit loop that can be vectorized.
562 /// Currently, a loop with an uncountable early exit is considered
563 /// vectorizable if:
564 /// 1. Writes to memory will access different underlying objects than
565 /// any load used as part of the uncountable exit condition.
566 /// 2. The loop has only one early uncountable exit
567 /// 3. The early exit block dominates the latch block.
568 /// 4. The latch block has an exact exit count.
569 /// 5. The loop does not contain reductions or recurrences.
570 /// 6. We can prove at compile-time that loops will not contain faulting
571 /// loads, or that any faulting loads would also occur in a purely
572 /// scalar loop.
573 /// 7. It is safe to speculatively execute instructions such as divide or
574 /// call instructions.
575 /// The list above is not based on theoretical limitations of vectorization,
576 /// but simply a statement that more work is needed to support these
577 /// additional cases safely.
578 bool isVectorizableEarlyExitLoop();
579
580 /// When vectorizing an early exit loop containing side effects, we need to
581 /// determine whether an uncounted exit will be taken before any operation
582 /// that has side effects.
583 ///
584 /// Consider a loop like the following:
585 /// for (int i = 0; i < N; ++i) {
586 /// a[i] = b[i];
587 /// if (c[i] == 0)
588 /// break;
589 /// }
590 ///
591 /// We have both a load and a store operation occurring before the condition
592 /// is checked for early termination. We could potentially restrict
593 /// vectorization to cases where we know all addresses are guaranteed to be
594 /// dereferenceable, which would allow the load before the condition check to
595 /// be vectorized.
596 ///
597 /// The store, however, should not execute across all lanes if early
598 /// termination occurs before the end of the vector. We must only store to the
599 /// locations that would have been stored to by a scalar loop. So we need to
600 /// know what the result of 'c[i] == 0' is before performing the vector store,
601 /// with or without masking.
602 ///
603 /// We can either do this by moving the condition load to the top of the
604 /// vector body and using the comparison to create masks for other operations
605 /// in the loop, or by looking ahead one vector iteration and bailing out to
606 /// the scalar loop if an exit would occur.
607 ///
608 /// Using the latter approach (applicable to more targets), we need to hoist
609 /// the first load (of c[0]) out of the loop then rotate the load within the
610 /// loop to the next iteration, remembering to adjust the vector trip count.
611 /// Something like the following:
612 ///
613 /// vec.ph:
614 /// %ci.0 = load <4 x i32>, ptr %c
615 /// %cmp.0 = icmp eq <4 x i32> %ci.0, zeroinitializer
616 /// %any.of.0 = call i1 @llvm.vector.reduce.or.v4i1(<4 x i1> %cmp.0)
617 /// br i1 %any.of.0, label %scalar.ph, label %vec.body
618 /// vec.body:
619 /// %iv = phi...
620 /// phi for c[i] if used elsewhere in the loop...
621 /// other operations in the loop...
622 /// %iv.next = add i64 %iv, 4
623 /// %addr.next = getelementptr i32, ptr %c, i64 %iv.next
624 /// %ci.next = load <4 x i32>, ptr %addr.next
625 /// %cmp.next = icmp eq <4 x i32> %ci.next, zeroinitializer
626 /// %any.of.next = call i1 @llvm.vector.reduce.or.v4i1(<4 x i1> %cmp.next)
627 /// iv.next compared with shortened vector tripcount...
628 /// uncountable condition combined with counted condition...
629 /// br...
630 ///
631 /// Doing this means the last few iterations will always be performed by a
632 /// scalar loop regardless of which exit is taken, and so vector iterations
633 /// will never execute a memory operation to a location that the scalar loop
634 /// would not have.
635 ///
636 /// This means we must ensure that it is safe to move the load for 'c[i]'
637 /// before other memory operations (or any other observable side effects) in
638 /// the loop.
639 ///
640 /// Currently, c[i] must have only one user (the comparison used for the
641 /// uncountable exit) since we would otherwise need to introduce a PHI node
642 /// for it.
643 bool canUncountableExitConditionLoadBeMoved(BasicBlock *ExitingBlock);
644
645 /// Return true if all of the instructions in the block can be speculatively
646 /// executed, and record the loads/stores that require masking.
647 /// \p SafePtrs is a list of addresses that are known to be legal and we know
648 /// that we can read from them without segfault.
649 /// \p MaskedOp is a list of instructions that have to be transformed into
650 /// calls to the appropriate masked intrinsic when the loop is vectorized
651 /// or dropped if the instruction is a conditional assume intrinsic.
652 bool
653 blockCanBePredicated(BasicBlock *BB, SmallPtrSetImpl<Value *> &SafePtrs,
655
656 /// Updates the vectorization state by adding \p Phi to the inductions list.
657 /// This can set \p Phi as the main induction of the loop if \p Phi is a
658 /// better choice for the main induction than the existing one.
659 void addInductionPhi(PHINode *Phi, const InductionDescriptor &ID,
660 SmallPtrSetImpl<Value *> &AllowedExit);
661
662 /// The loop that we evaluate.
663 Loop *TheLoop;
664
665 /// Loop Info analysis.
666 LoopInfo *LI;
667
668 /// A wrapper around ScalarEvolution used to add runtime SCEV checks.
669 /// Applies dynamic knowledge to simplify SCEV expressions in the context
670 /// of existing SCEV assumptions. The analysis will also add a minimal set
671 /// of new predicates if this is required to enable vectorization and
672 /// unrolling.
674
675 /// Target Transform Info.
677
678 /// Target Library Info.
680
681 /// Dominator Tree.
682 DominatorTree *DT;
683
684 // LoopAccess analysis.
686
687 const LoopAccessInfo *LAI = nullptr;
688
689 /// Interface to emit optimization remarks.
691
692 // --- vectorization state --- //
693
694 /// Holds the primary induction variable. This is the counter of the
695 /// loop.
696 PHINode *PrimaryInduction = nullptr;
697
698 /// Holds the reduction variables.
700
701 /// Holds all of the induction variables that we found in the loop.
702 /// Notice that inductions don't need to start at zero and that induction
703 /// variables can be pointers.
704 InductionList Inductions;
705
706 /// Holds all the casts that participate in the update chain of the induction
707 /// variables, and that have been proven to be redundant (possibly under a
708 /// runtime guard). These casts can be ignored when creating the vectorized
709 /// loop body.
710 SmallPtrSet<Instruction *, 4> InductionCastsToIgnore;
711
712 /// Holds the phi nodes that are fixed-order recurrences.
713 RecurrenceSet FixedOrderRecurrences;
714
715 /// Holds the widest induction type encountered.
716 IntegerType *WidestIndTy = nullptr;
717
718 /// Allowed outside users. This holds the variables that can be accessed from
719 /// outside the loop.
720 SmallPtrSet<Value *, 4> AllowedExit;
721
722 /// Vectorization requirements that will go through late-evaluation.
723 LoopVectorizationRequirements *Requirements;
724
725 /// Used to emit an analysis of any legality issues.
726 LoopVectorizeHints *Hints;
727
728 /// The demanded bits analysis is used to compute the minimum type size in
729 /// which a reduction can be computed.
730 DemandedBits *DB;
731
732 /// The assumption cache analysis is used to compute the minimum type size in
733 /// which a reduction can be computed.
734 AssumptionCache *AC;
735
736 /// Instructions that require masking because they are in source-level
737 /// conditionally executed blocks.
738 SmallPtrSet<const Instruction *, 8> ConditionallyExecutedOps;
739 /// Instructions that require masking only due to tail-folding predication.
740 SmallPtrSet<const Instruction *, 8> TailFoldedMaskedOp;
741
742 /// Contains all identified histogram operations, which are sequences of
743 /// load -> update -> store instructions where multiple lanes in a vector
744 /// may work on the same memory location.
746
747 /// Whether or not creating SCEV predicates is allowed.
748 bool AllowRuntimeSCEVChecks;
749
750 // Alias Analysis results used to check for possible aliasing with loads
751 // used in uncountable exit conditions.
752 AAResults *AA;
753
754 /// If we discover function calls within the loop which have a valid
755 /// vectorized variant, record that fact so that LoopVectorize can
756 /// (potentially) make a better decision on the maximum VF and enable
757 /// the use of those function variants.
758 bool VecCallVariantsFound = false;
759
760 /// Keep track of all the countable and uncountable exiting blocks if
761 /// the exact backedge taken count is not computable.
762 SmallVector<BasicBlock *, 4> CountableExitingBlocks;
763
764 /// Records whether we have an uncountable early exit in a loop that's
765 /// either read-only or read-write.
767};
768
769} // namespace llvm
770
771#endif // LLVM_TRANSFORMS_VECTORIZE_LOOPVECTORIZATIONLEGALITY_H
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
#define H(x, y, z)
Definition MD5.cpp:56
This file implements a map that provides insertion order iteration.
A cache of @llvm.assume calls within a function.
LLVM Basic Block Representation.
Definition BasicBlock.h:62
BlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate IR basic block frequen...
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition Dominators.h:159
static constexpr ElementCount get(ScalarTy MinVal, bool Scalable)
Definition TypeSize.h:315
A struct for saving information about induction variables.
Class to represent integer types.
An instruction for reading from memory.
Drive the analysis of memory accesses in the loop.
MapVector< PHINode *, InductionDescriptor > InductionList
InductionList saves induction variables and maps them to the induction descriptor.
bool isInvariantStoreOfReduction(StoreInst *SI)
Returns True if given store is a final invariant store of one of the reductions found in the loop.
bool hasVectorCallVariants() const
Returns true if there is at least one function call in the loop which has a vectorized variant availa...
const RecurrenceDescriptor & getRecurrenceDescriptor(PHINode *PN) const
Returns the recurrence descriptor associated with a given phi node PN, expecting one to exist.
RecurrenceSet & getFixedOrderRecurrences()
Return the fixed-order recurrences found in the loop.
uint64_t getMaxStoreLoadForwardSafeDistanceInBits() const
Return safe power-of-2 number of elements, which do not prevent store-load forwarding and safe to ope...
bool isInvariantAddressOfReduction(Value *V)
Returns True if given address is invariant and is used to store recurrent expression.
bool canVectorize(bool UseVPlanNativePath)
Returns true if it is legal to vectorize this loop.
PredicatedScalarEvolution * getPredicatedScalarEvolution() const
bool blockNeedsPredication(const BasicBlock *BB) const
Return true if the block BB needs to be predicated in order for the loop to be vectorized.
int isConsecutivePtr(Type *AccessTy, Value *Ptr) const
Check if this pointer is consecutive when vectorizing.
AssumptionCache * getAssumptionCache() const
std::optional< const HistogramInfo * > getHistogramInfo(Instruction *I) const
Returns a HistogramInfo* for the given instruction if it was determined to be part of a load -> updat...
SmallPtrSet< const PHINode *, 8 > RecurrenceSet
RecurrenceSet contains the phi nodes that are recurrences other than inductions and reductions.
bool hasUncountableExitWithSideEffects() const
Returns true if this is an early exit loop with state-changing or potentially-faulting operations and...
bool canVectorizeFPMath(bool EnableStrictReductions)
Returns true if it is legal to vectorize the FP math operations in this loop.
bool isReductionVariable(PHINode *PN) const
Returns True if PN is a reduction variable in this loop.
bool isFixedOrderRecurrence(const PHINode *Phi) const
Returns True if Phi is a fixed-order recurrence in this loop.
IntegerType * getWidestInductionType()
Returns the widest induction type.
const InductionDescriptor * getPointerInductionDescriptor(PHINode *Phi) const
Returns a pointer to the induction descriptor, if Phi is pointer induction.
const InductionDescriptor * getIntOrFpInductionDescriptor(PHINode *Phi) const
Returns a pointer to the induction descriptor, if Phi is an integer or floating point induction.
bool isInductionPhi(const Value *V) const
Returns True if V is a Phi node of an induction variable in this loop.
PHINode * getPrimaryInduction()
Returns the primary induction variable.
UncountableExitTrait getUncountableExitTrait() const
Returns information about whether this loop contains at least one uncountable early exit,...
const SmallVector< BasicBlock *, 4 > & getCountableExitingBlocks() const
Returns all exiting blocks with a countable exit, i.e.
bool isUniform(Value *V, ElementCount VF) const
Returns true if value V is uniform across VF lanes, when VF is provided, and otherwise if V is invari...
const InductionList & getInductionVars() const
Returns the induction variables found in the loop.
bool isInvariant(Value *V) const
Returns true if V is invariant across all loop iterations according to SCEV.
const ReductionList & getReductionVars() const
Returns the reduction variables found in the loop.
bool isSafeForAnyStoreLoadForwardDistances() const
Return true if there is store-load forwarding dependencies.
bool canFoldTailByMasking() const
Return true if we can vectorize this loop while folding its tail by masking.
void prepareToFoldTailByMasking()
Mark all respective loads/stores for masking.
bool hasUncountableEarlyExit() const
Returns true if the loop has uncountable early exits, i.e.
bool hasHistograms() const
Returns a list of all known histogram operations in the loop.
const LoopAccessInfo * getLAI() const
MapVector< PHINode *, RecurrenceDescriptor > ReductionList
ReductionList contains the reduction descriptors for all of the reductions that were found in the loo...
ScalarEvolution * getScalarEvolution() const
bool isUniformMemOp(Instruction &I, ElementCount VF) const
A uniform memory op is a load or store which accesses the same memory location on all VF lanes,...
bool isMaskRequired(const Instruction *I, bool TailFolded) const
Returns true if instruction I requires a mask for vectorization.
LoopVectorizationLegality(Loop *L, PredicatedScalarEvolution &PSE, DominatorTree *DT, TargetTransformInfo *TTI, TargetLibraryInfo *TLI, Function *F, LoopAccessInfoManager &LAIs, LoopInfo *LI, OptimizationRemarkEmitter *ORE, LoopVectorizationRequirements *R, LoopVectorizeHints *H, DemandedBits *DB, AssumptionCache *AC, bool AllowRuntimeSCEVChecks, AAResults *AA)
const RuntimePointerChecking * getRuntimePointerChecking() const
Returns the information that we collected about runtime memory check.
bool isInductionVariable(const Value *V) const
Returns True if V can be considered as an induction variable in this loop.
bool isCastedInductionVariable(const Value *V) const
Returns True if V is a cast that is part of an induction def-use chain, and had been proven to be red...
This holds vectorization requirements that must be verified late in the process.
void addExactFPMathInst(Instruction *I)
Track the 1st floating-point instruction that can not be reassociated.
Utility class for getting and setting loop vectorizer hints in the form of loop metadata.
@ SK_PreferScalable
Vectorize loops using scalable vectors or fixed-width vectors, but favor scalable vectors when the co...
@ SK_AlwaysScalable
Always vectorize loops using scalable vectors if feasible (i.e.
@ SK_FixedWidthOnly
Disables vectorization with scalable vectors.
bool allowVectorization(Function *F, Loop *L, bool VectorizeOnlyWhenForced) const
bool allowReordering() const
When enabling loop hints are provided we allow the vectorizer to change the order of operations that ...
void emitRemarkWithHints() const
Dumps all the hint information.
void setAlreadyVectorized()
Mark the loop L as already vectorized by setting the width to 1.
LoopVectorizeHints(const Loop *L, bool InterleaveOnlyWhenForced, OptimizationRemarkEmitter &ORE, const TargetTransformInfo *TTI=nullptr)
const char * vectorizeAnalysisPassName() const
If hints are provided that force vectorization, use the AlwaysPrint pass name to force the frontend t...
Represents a single loop in the control flow graph.
Definition LoopInfo.h:40
This class implements a map that also provides access to all stored values in a deterministic order.
Definition MapVector.h:36
Root of the metadata hierarchy.
Definition Metadata.h:64
The optimization diagnostic interface.
An interface layer with SCEV used to manage how we see SCEV expressions for values in the context of ...
Analysis providing profile information.
The RecurrenceDescriptor is used to identify recurrences variables in a loop.
Holds information about the memory runtime legality checks to verify that a group of pointers do not ...
The main scalar evolution driver.
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
An instruction for storing to memory.
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
Provides information about what library functions are available for the current target.
This pass provides access to the codegen interfaces that are needed for IR-level transformations.
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:46
LLVM Value Representation.
Definition Value.h:75
Abstract Attribute helper functions.
Definition Attributor.h:165
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
This is an optimization pass for GlobalISel generic memory operations.
FunctionAddr VTableAddr Value
Definition InstrProf.h:137
UncountableExitTrait
Indicates the characteristics of a loop with an uncountable exit.
LLVM_ABI bool hasDisableAllTransformsHint(const Loop *L)
Look for the loop attribute that disables all transformation heuristic.
LLVM_ABI TransformationMode hasUnrollTransformation(const Loop *L)
TargetTransformInfo TTI
@ TM_Disable
The transformation should not be applied.
Definition LoopUtils.h:292
This holds details about a histogram operation – a load -> update -> store sequence where each lane i...
HistogramInfo(LoadInst *Load, Instruction *Update, StoreInst *Store)