LLVM 17.0.0git
LoopAccessAnalysis.cpp
Go to the documentation of this file.
1//===- LoopAccessAnalysis.cpp - Loop Access Analysis Implementation --------==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// The implementation for the loop memory dependence that was originally
10// developed for the loop vectorizer.
11//
12//===----------------------------------------------------------------------===//
13
15#include "llvm/ADT/APInt.h"
16#include "llvm/ADT/DenseMap.h"
20#include "llvm/ADT/STLExtras.h"
21#include "llvm/ADT/SetVector.h"
23#include "llvm/ADT/SmallSet.h"
38#include "llvm/IR/BasicBlock.h"
39#include "llvm/IR/Constants.h"
40#include "llvm/IR/DataLayout.h"
41#include "llvm/IR/DebugLoc.h"
44#include "llvm/IR/Dominators.h"
45#include "llvm/IR/Function.h"
47#include "llvm/IR/InstrTypes.h"
48#include "llvm/IR/Instruction.h"
50#include "llvm/IR/Operator.h"
51#include "llvm/IR/PassManager.h"
53#include "llvm/IR/Type.h"
54#include "llvm/IR/Value.h"
55#include "llvm/IR/ValueHandle.h"
57#include "llvm/Pass.h"
60#include "llvm/Support/Debug.h"
63#include <algorithm>
64#include <cassert>
65#include <cstdint>
66#include <iterator>
67#include <utility>
68#include <vector>
69
70using namespace llvm;
71using namespace llvm::PatternMatch;
72
73#define DEBUG_TYPE "loop-accesses"
74
76VectorizationFactor("force-vector-width", cl::Hidden,
77 cl::desc("Sets the SIMD width. Zero is autoselect."),
80
82VectorizationInterleave("force-vector-interleave", cl::Hidden,
83 cl::desc("Sets the vectorization interleave count. "
84 "Zero is autoselect."),
88
90 "runtime-memory-check-threshold", cl::Hidden,
91 cl::desc("When performing memory disambiguation checks at runtime do not "
92 "generate more than this number of comparisons (default = 8)."),
95
96/// The maximum iterations used to merge memory checks
98 "memory-check-merge-threshold", cl::Hidden,
99 cl::desc("Maximum number of comparisons done when trying to merge "
100 "runtime memory checks. (default = 100)"),
101 cl::init(100));
102
103/// Maximum SIMD width.
104const unsigned VectorizerParams::MaxVectorWidth = 64;
105
106/// We collect dependences up to this threshold.
108 MaxDependences("max-dependences", cl::Hidden,
109 cl::desc("Maximum number of dependences collected by "
110 "loop-access analysis (default = 100)"),
111 cl::init(100));
112
113/// This enables versioning on the strides of symbolically striding memory
114/// accesses in code like the following.
115/// for (i = 0; i < N; ++i)
116/// A[i * Stride1] += B[i * Stride2] ...
117///
118/// Will be roughly translated to
119/// if (Stride1 == 1 && Stride2 == 1) {
120/// for (i = 0; i < N; i+=4)
121/// A[i:i+3] += ...
122/// } else
123/// ...
125 "enable-mem-access-versioning", cl::init(true), cl::Hidden,
126 cl::desc("Enable symbolic stride memory access versioning"));
127
128/// Enable store-to-load forwarding conflict detection. This option can
129/// be disabled for correctness testing.
131 "store-to-load-forwarding-conflict-detection", cl::Hidden,
132 cl::desc("Enable conflict detection in loop-access analysis"),
133 cl::init(true));
134
136 "max-forked-scev-depth", cl::Hidden,
137 cl::desc("Maximum recursion depth when finding forked SCEVs (default = 5)"),
138 cl::init(5));
139
141 "laa-speculate-unit-stride", cl::Hidden,
142 cl::desc("Speculate that non-constant strides are unit in LAA"),
143 cl::init(true));
144
146 return ::VectorizationInterleave.getNumOccurrences() > 0;
147}
148
150 const DenseMap<Value *, const SCEV *> &PtrToStride,
151 Value *Ptr) {
152 const SCEV *OrigSCEV = PSE.getSCEV(Ptr);
153
154 // If there is an entry in the map return the SCEV of the pointer with the
155 // symbolic stride replaced by one.
157 if (SI == PtrToStride.end())
158 // For a non-symbolic stride, just return the original expression.
159 return OrigSCEV;
160
161 const SCEV *StrideSCEV = SI->second;
162 // Note: This assert is both overly strong and overly weak. The actual
163 // invariant here is that StrideSCEV should be loop invariant. The only
164 // such invariant strides we happen to speculate right now are unknowns
165 // and thus this is a reasonable proxy of the actual invariant.
166 assert(isa<SCEVUnknown>(StrideSCEV) && "shouldn't be in map");
167
168 ScalarEvolution *SE = PSE.getSE();
169 const auto *CT = SE->getOne(StrideSCEV->getType());
170 PSE.addPredicate(*SE->getEqualPredicate(StrideSCEV, CT));
171 auto *Expr = PSE.getSCEV(Ptr);
172
173 LLVM_DEBUG(dbgs() << "LAA: Replacing SCEV: " << *OrigSCEV
174 << " by: " << *Expr << "\n");
175 return Expr;
176}
177
179 unsigned Index, RuntimePointerChecking &RtCheck)
180 : High(RtCheck.Pointers[Index].End), Low(RtCheck.Pointers[Index].Start),
181 AddressSpace(RtCheck.Pointers[Index]
182 .PointerValue->getType()
183 ->getPointerAddressSpace()),
184 NeedsFreeze(RtCheck.Pointers[Index].NeedsFreeze) {
186}
187
188/// Calculate Start and End points of memory access.
189/// Let's assume A is the first access and B is a memory access on N-th loop
190/// iteration. Then B is calculated as:
191/// B = A + Step*N .
192/// Step value may be positive or negative.
193/// N is a calculated back-edge taken count:
194/// N = (TripCount > 0) ? RoundDown(TripCount -1 , VF) : 0
195/// Start and End points are calculated in the following way:
196/// Start = UMIN(A, B) ; End = UMAX(A, B) + SizeOfElt,
197/// where SizeOfElt is the size of single memory access in bytes.
198///
199/// There is no conflict when the intervals are disjoint:
200/// NoConflict = (P2.Start >= P1.End) || (P1.Start >= P2.End)
202 Type *AccessTy, bool WritePtr,
203 unsigned DepSetId, unsigned ASId,
205 bool NeedsFreeze) {
206 ScalarEvolution *SE = PSE.getSE();
207
208 const SCEV *ScStart;
209 const SCEV *ScEnd;
210
211 if (SE->isLoopInvariant(PtrExpr, Lp)) {
212 ScStart = ScEnd = PtrExpr;
213 } else {
214 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(PtrExpr);
215 assert(AR && "Invalid addrec expression");
216 const SCEV *Ex = PSE.getBackedgeTakenCount();
217
218 ScStart = AR->getStart();
219 ScEnd = AR->evaluateAtIteration(Ex, *SE);
220 const SCEV *Step = AR->getStepRecurrence(*SE);
221
222 // For expressions with negative step, the upper bound is ScStart and the
223 // lower bound is ScEnd.
224 if (const auto *CStep = dyn_cast<SCEVConstant>(Step)) {
225 if (CStep->getValue()->isNegative())
226 std::swap(ScStart, ScEnd);
227 } else {
228 // Fallback case: the step is not constant, but we can still
229 // get the upper and lower bounds of the interval by using min/max
230 // expressions.
231 ScStart = SE->getUMinExpr(ScStart, ScEnd);
232 ScEnd = SE->getUMaxExpr(AR->getStart(), ScEnd);
233 }
234 }
235 // Add the size of the pointed element to ScEnd.
236 auto &DL = Lp->getHeader()->getModule()->getDataLayout();
237 Type *IdxTy = DL.getIndexType(Ptr->getType());
238 const SCEV *EltSizeSCEV = SE->getStoreSizeOfExpr(IdxTy, AccessTy);
239 ScEnd = SE->getAddExpr(ScEnd, EltSizeSCEV);
240
241 Pointers.emplace_back(Ptr, ScStart, ScEnd, WritePtr, DepSetId, ASId, PtrExpr,
242 NeedsFreeze);
243}
244
245void RuntimePointerChecking::tryToCreateDiffCheck(
246 const RuntimeCheckingPtrGroup &CGI, const RuntimeCheckingPtrGroup &CGJ) {
247 if (!CanUseDiffCheck)
248 return;
249
250 // If either group contains multiple different pointers, bail out.
251 // TODO: Support multiple pointers by using the minimum or maximum pointer,
252 // depending on src & sink.
253 if (CGI.Members.size() != 1 || CGJ.Members.size() != 1) {
254 CanUseDiffCheck = false;
255 return;
256 }
257
258 PointerInfo *Src = &Pointers[CGI.Members[0]];
259 PointerInfo *Sink = &Pointers[CGJ.Members[0]];
260
261 // If either pointer is read and written, multiple checks may be needed. Bail
262 // out.
263 if (!DC.getOrderForAccess(Src->PointerValue, !Src->IsWritePtr).empty() ||
264 !DC.getOrderForAccess(Sink->PointerValue, !Sink->IsWritePtr).empty()) {
265 CanUseDiffCheck = false;
266 return;
267 }
268
269 ArrayRef<unsigned> AccSrc =
270 DC.getOrderForAccess(Src->PointerValue, Src->IsWritePtr);
271 ArrayRef<unsigned> AccSink =
272 DC.getOrderForAccess(Sink->PointerValue, Sink->IsWritePtr);
273 // If either pointer is accessed multiple times, there may not be a clear
274 // src/sink relation. Bail out for now.
275 if (AccSrc.size() != 1 || AccSink.size() != 1) {
276 CanUseDiffCheck = false;
277 return;
278 }
279 // If the sink is accessed before src, swap src/sink.
280 if (AccSink[0] < AccSrc[0])
281 std::swap(Src, Sink);
282
283 auto *SrcAR = dyn_cast<SCEVAddRecExpr>(Src->Expr);
284 auto *SinkAR = dyn_cast<SCEVAddRecExpr>(Sink->Expr);
285 if (!SrcAR || !SinkAR || SrcAR->getLoop() != DC.getInnermostLoop() ||
286 SinkAR->getLoop() != DC.getInnermostLoop()) {
287 CanUseDiffCheck = false;
288 return;
289 }
290
292 DC.getInstructionsForAccess(Src->PointerValue, Src->IsWritePtr);
294 DC.getInstructionsForAccess(Sink->PointerValue, Sink->IsWritePtr);
295 Type *SrcTy = getLoadStoreType(SrcInsts[0]);
296 Type *DstTy = getLoadStoreType(SinkInsts[0]);
297 if (isa<ScalableVectorType>(SrcTy) || isa<ScalableVectorType>(DstTy)) {
298 CanUseDiffCheck = false;
299 return;
300 }
301 const DataLayout &DL =
302 SinkAR->getLoop()->getHeader()->getModule()->getDataLayout();
303 unsigned AllocSize =
304 std::max(DL.getTypeAllocSize(SrcTy), DL.getTypeAllocSize(DstTy));
305
306 // Only matching constant steps matching the AllocSize are supported at the
307 // moment. This simplifies the difference computation. Can be extended in the
308 // future.
309 auto *Step = dyn_cast<SCEVConstant>(SinkAR->getStepRecurrence(*SE));
310 if (!Step || Step != SrcAR->getStepRecurrence(*SE) ||
311 Step->getAPInt().abs() != AllocSize) {
312 CanUseDiffCheck = false;
313 return;
314 }
315
316 IntegerType *IntTy =
317 IntegerType::get(Src->PointerValue->getContext(),
318 DL.getPointerSizeInBits(CGI.AddressSpace));
319
320 // When counting down, the dependence distance needs to be swapped.
321 if (Step->getValue()->isNegative())
322 std::swap(SinkAR, SrcAR);
323
324 const SCEV *SinkStartInt = SE->getPtrToIntExpr(SinkAR->getStart(), IntTy);
325 const SCEV *SrcStartInt = SE->getPtrToIntExpr(SrcAR->getStart(), IntTy);
326 if (isa<SCEVCouldNotCompute>(SinkStartInt) ||
327 isa<SCEVCouldNotCompute>(SrcStartInt)) {
328 CanUseDiffCheck = false;
329 return;
330 }
331 DiffChecks.emplace_back(SrcStartInt, SinkStartInt, AllocSize,
332 Src->NeedsFreeze || Sink->NeedsFreeze);
333}
334
335SmallVector<RuntimePointerCheck, 4> RuntimePointerChecking::generateChecks() {
337
338 for (unsigned I = 0; I < CheckingGroups.size(); ++I) {
339 for (unsigned J = I + 1; J < CheckingGroups.size(); ++J) {
342
343 if (needsChecking(CGI, CGJ)) {
344 tryToCreateDiffCheck(CGI, CGJ);
345 Checks.push_back(std::make_pair(&CGI, &CGJ));
346 }
347 }
348 }
349 return Checks;
350}
351
352void RuntimePointerChecking::generateChecks(
353 MemoryDepChecker::DepCandidates &DepCands, bool UseDependencies) {
354 assert(Checks.empty() && "Checks is not empty");
355 groupChecks(DepCands, UseDependencies);
356 Checks = generateChecks();
357}
358
360 const RuntimeCheckingPtrGroup &M, const RuntimeCheckingPtrGroup &N) const {
361 for (unsigned I = 0, EI = M.Members.size(); EI != I; ++I)
362 for (unsigned J = 0, EJ = N.Members.size(); EJ != J; ++J)
363 if (needsChecking(M.Members[I], N.Members[J]))
364 return true;
365 return false;
366}
367
368/// Compare \p I and \p J and return the minimum.
369/// Return nullptr in case we couldn't find an answer.
370static const SCEV *getMinFromExprs(const SCEV *I, const SCEV *J,
371 ScalarEvolution *SE) {
372 const SCEV *Diff = SE->getMinusSCEV(J, I);
373 const SCEVConstant *C = dyn_cast<const SCEVConstant>(Diff);
374
375 if (!C)
376 return nullptr;
377 if (C->getValue()->isNegative())
378 return J;
379 return I;
380}
381
383 RuntimePointerChecking &RtCheck) {
384 return addPointer(
385 Index, RtCheck.Pointers[Index].Start, RtCheck.Pointers[Index].End,
386 RtCheck.Pointers[Index].PointerValue->getType()->getPointerAddressSpace(),
387 RtCheck.Pointers[Index].NeedsFreeze, *RtCheck.SE);
388}
389
391 const SCEV *End, unsigned AS,
392 bool NeedsFreeze,
393 ScalarEvolution &SE) {
394 assert(AddressSpace == AS &&
395 "all pointers in a checking group must be in the same address space");
396
397 // Compare the starts and ends with the known minimum and maximum
398 // of this set. We need to know how we compare against the min/max
399 // of the set in order to be able to emit memchecks.
400 const SCEV *Min0 = getMinFromExprs(Start, Low, &SE);
401 if (!Min0)
402 return false;
403
404 const SCEV *Min1 = getMinFromExprs(End, High, &SE);
405 if (!Min1)
406 return false;
407
408 // Update the low bound expression if we've found a new min value.
409 if (Min0 == Start)
410 Low = Start;
411
412 // Update the high bound expression if we've found a new max value.
413 if (Min1 != End)
414 High = End;
415
417 this->NeedsFreeze |= NeedsFreeze;
418 return true;
419}
420
421void RuntimePointerChecking::groupChecks(
422 MemoryDepChecker::DepCandidates &DepCands, bool UseDependencies) {
423 // We build the groups from dependency candidates equivalence classes
424 // because:
425 // - We know that pointers in the same equivalence class share
426 // the same underlying object and therefore there is a chance
427 // that we can compare pointers
428 // - We wouldn't be able to merge two pointers for which we need
429 // to emit a memcheck. The classes in DepCands are already
430 // conveniently built such that no two pointers in the same
431 // class need checking against each other.
432
433 // We use the following (greedy) algorithm to construct the groups
434 // For every pointer in the equivalence class:
435 // For each existing group:
436 // - if the difference between this pointer and the min/max bounds
437 // of the group is a constant, then make the pointer part of the
438 // group and update the min/max bounds of that group as required.
439
440 CheckingGroups.clear();
441
442 // If we need to check two pointers to the same underlying object
443 // with a non-constant difference, we shouldn't perform any pointer
444 // grouping with those pointers. This is because we can easily get
445 // into cases where the resulting check would return false, even when
446 // the accesses are safe.
447 //
448 // The following example shows this:
449 // for (i = 0; i < 1000; ++i)
450 // a[5000 + i * m] = a[i] + a[i + 9000]
451 //
452 // Here grouping gives a check of (5000, 5000 + 1000 * m) against
453 // (0, 10000) which is always false. However, if m is 1, there is no
454 // dependence. Not grouping the checks for a[i] and a[i + 9000] allows
455 // us to perform an accurate check in this case.
456 //
457 // The above case requires that we have an UnknownDependence between
458 // accesses to the same underlying object. This cannot happen unless
459 // FoundNonConstantDistanceDependence is set, and therefore UseDependencies
460 // is also false. In this case we will use the fallback path and create
461 // separate checking groups for all pointers.
462
463 // If we don't have the dependency partitions, construct a new
464 // checking pointer group for each pointer. This is also required
465 // for correctness, because in this case we can have checking between
466 // pointers to the same underlying object.
467 if (!UseDependencies) {
468 for (unsigned I = 0; I < Pointers.size(); ++I)
469 CheckingGroups.push_back(RuntimeCheckingPtrGroup(I, *this));
470 return;
471 }
472
473 unsigned TotalComparisons = 0;
474
476 for (unsigned Index = 0; Index < Pointers.size(); ++Index) {
477 auto Iter = PositionMap.insert({Pointers[Index].PointerValue, {}});
478 Iter.first->second.push_back(Index);
479 }
480
481 // We need to keep track of what pointers we've already seen so we
482 // don't process them twice.
484
485 // Go through all equivalence classes, get the "pointer check groups"
486 // and add them to the overall solution. We use the order in which accesses
487 // appear in 'Pointers' to enforce determinism.
488 for (unsigned I = 0; I < Pointers.size(); ++I) {
489 // We've seen this pointer before, and therefore already processed
490 // its equivalence class.
491 if (Seen.count(I))
492 continue;
493
494 MemoryDepChecker::MemAccessInfo Access(Pointers[I].PointerValue,
495 Pointers[I].IsWritePtr);
496
498 auto LeaderI = DepCands.findValue(DepCands.getLeaderValue(Access));
499
500 // Because DepCands is constructed by visiting accesses in the order in
501 // which they appear in alias sets (which is deterministic) and the
502 // iteration order within an equivalence class member is only dependent on
503 // the order in which unions and insertions are performed on the
504 // equivalence class, the iteration order is deterministic.
505 for (auto MI = DepCands.member_begin(LeaderI), ME = DepCands.member_end();
506 MI != ME; ++MI) {
507 auto PointerI = PositionMap.find(MI->getPointer());
508 assert(PointerI != PositionMap.end() &&
509 "pointer in equivalence class not found in PositionMap");
510 for (unsigned Pointer : PointerI->second) {
511 bool Merged = false;
512 // Mark this pointer as seen.
513 Seen.insert(Pointer);
514
515 // Go through all the existing sets and see if we can find one
516 // which can include this pointer.
517 for (RuntimeCheckingPtrGroup &Group : Groups) {
518 // Don't perform more than a certain amount of comparisons.
519 // This should limit the cost of grouping the pointers to something
520 // reasonable. If we do end up hitting this threshold, the algorithm
521 // will create separate groups for all remaining pointers.
522 if (TotalComparisons > MemoryCheckMergeThreshold)
523 break;
524
525 TotalComparisons++;
526
527 if (Group.addPointer(Pointer, *this)) {
528 Merged = true;
529 break;
530 }
531 }
532
533 if (!Merged)
534 // We couldn't add this pointer to any existing set or the threshold
535 // for the number of comparisons has been reached. Create a new group
536 // to hold the current pointer.
537 Groups.push_back(RuntimeCheckingPtrGroup(Pointer, *this));
538 }
539 }
540
541 // We've computed the grouped checks for this partition.
542 // Save the results and continue with the next one.
543 llvm::copy(Groups, std::back_inserter(CheckingGroups));
544 }
545}
546
548 const SmallVectorImpl<int> &PtrToPartition, unsigned PtrIdx1,
549 unsigned PtrIdx2) {
550 return (PtrToPartition[PtrIdx1] != -1 &&
551 PtrToPartition[PtrIdx1] == PtrToPartition[PtrIdx2]);
552}
553
554bool RuntimePointerChecking::needsChecking(unsigned I, unsigned J) const {
555 const PointerInfo &PointerI = Pointers[I];
556 const PointerInfo &PointerJ = Pointers[J];
557
558 // No need to check if two readonly pointers intersect.
559 if (!PointerI.IsWritePtr && !PointerJ.IsWritePtr)
560 return false;
561
562 // Only need to check pointers between two different dependency sets.
563 if (PointerI.DependencySetId == PointerJ.DependencySetId)
564 return false;
565
566 // Only need to check pointers in the same alias set.
567 if (PointerI.AliasSetId != PointerJ.AliasSetId)
568 return false;
569
570 return true;
571}
572
575 unsigned Depth) const {
576 unsigned N = 0;
577 for (const auto &Check : Checks) {
578 const auto &First = Check.first->Members, &Second = Check.second->Members;
579
580 OS.indent(Depth) << "Check " << N++ << ":\n";
581
582 OS.indent(Depth + 2) << "Comparing group (" << Check.first << "):\n";
583 for (unsigned K = 0; K < First.size(); ++K)
584 OS.indent(Depth + 2) << *Pointers[First[K]].PointerValue << "\n";
585
586 OS.indent(Depth + 2) << "Against group (" << Check.second << "):\n";
587 for (unsigned K = 0; K < Second.size(); ++K)
588 OS.indent(Depth + 2) << *Pointers[Second[K]].PointerValue << "\n";
589 }
590}
591
593
594 OS.indent(Depth) << "Run-time memory checks:\n";
595 printChecks(OS, Checks, Depth);
596
597 OS.indent(Depth) << "Grouped accesses:\n";
598 for (unsigned I = 0; I < CheckingGroups.size(); ++I) {
599 const auto &CG = CheckingGroups[I];
600
601 OS.indent(Depth + 2) << "Group " << &CG << ":\n";
602 OS.indent(Depth + 4) << "(Low: " << *CG.Low << " High: " << *CG.High
603 << ")\n";
604 for (unsigned J = 0; J < CG.Members.size(); ++J) {
605 OS.indent(Depth + 6) << "Member: " << *Pointers[CG.Members[J]].Expr
606 << "\n";
607 }
608 }
609}
610
611namespace {
612
613/// Analyses memory accesses in a loop.
614///
615/// Checks whether run time pointer checks are needed and builds sets for data
616/// dependence checking.
617class AccessAnalysis {
618public:
619 /// Read or write access location.
620 typedef PointerIntPair<Value *, 1, bool> MemAccessInfo;
621 typedef SmallVector<MemAccessInfo, 8> MemAccessInfoList;
622
623 AccessAnalysis(Loop *TheLoop, AAResults *AA, LoopInfo *LI,
626 : TheLoop(TheLoop), BAA(*AA), AST(BAA), LI(LI), DepCands(DA), PSE(PSE) {
627 // We're analyzing dependences across loop iterations.
628 BAA.enableCrossIterationMode();
629 }
630
631 /// Register a load and whether it is only read from.
632 void addLoad(MemoryLocation &Loc, Type *AccessTy, bool IsReadOnly) {
633 Value *Ptr = const_cast<Value*>(Loc.Ptr);
635 Accesses[MemAccessInfo(Ptr, false)].insert(AccessTy);
636 if (IsReadOnly)
637 ReadOnlyPtr.insert(Ptr);
638 }
639
640 /// Register a store.
641 void addStore(MemoryLocation &Loc, Type *AccessTy) {
642 Value *Ptr = const_cast<Value*>(Loc.Ptr);
644 Accesses[MemAccessInfo(Ptr, true)].insert(AccessTy);
645 }
646
647 /// Check if we can emit a run-time no-alias check for \p Access.
648 ///
649 /// Returns true if we can emit a run-time no alias check for \p Access.
650 /// If we can check this access, this also adds it to a dependence set and
651 /// adds a run-time to check for it to \p RtCheck. If \p Assume is true,
652 /// we will attempt to use additional run-time checks in order to get
653 /// the bounds of the pointer.
654 bool createCheckForAccess(RuntimePointerChecking &RtCheck,
655 MemAccessInfo Access, Type *AccessTy,
656 const DenseMap<Value *, const SCEV *> &Strides,
658 Loop *TheLoop, unsigned &RunningDepId,
659 unsigned ASId, bool ShouldCheckStride, bool Assume);
660
661 /// Check whether we can check the pointers at runtime for
662 /// non-intersection.
663 ///
664 /// Returns true if we need no check or if we do and we can generate them
665 /// (i.e. the pointers have computable bounds).
666 bool canCheckPtrAtRT(RuntimePointerChecking &RtCheck, ScalarEvolution *SE,
667 Loop *TheLoop, const DenseMap<Value *, const SCEV *> &Strides,
668 Value *&UncomputablePtr, bool ShouldCheckWrap = false);
669
670 /// Goes over all memory accesses, checks whether a RT check is needed
671 /// and builds sets of dependent accesses.
672 void buildDependenceSets() {
673 processMemAccesses();
674 }
675
676 /// Initial processing of memory accesses determined that we need to
677 /// perform dependency checking.
678 ///
679 /// Note that this can later be cleared if we retry memcheck analysis without
680 /// dependency checking (i.e. FoundNonConstantDistanceDependence).
681 bool isDependencyCheckNeeded() { return !CheckDeps.empty(); }
682
683 /// We decided that no dependence analysis would be used. Reset the state.
684 void resetDepChecks(MemoryDepChecker &DepChecker) {
685 CheckDeps.clear();
686 DepChecker.clearDependences();
687 }
688
689 MemAccessInfoList &getDependenciesToCheck() { return CheckDeps; }
690
691private:
693
694 /// Go over all memory access and check whether runtime pointer checks
695 /// are needed and build sets of dependency check candidates.
696 void processMemAccesses();
697
698 /// Map of all accesses. Values are the types used to access memory pointed to
699 /// by the pointer.
700 PtrAccessMap Accesses;
701
702 /// The loop being checked.
703 const Loop *TheLoop;
704
705 /// List of accesses that need a further dependence check.
706 MemAccessInfoList CheckDeps;
707
708 /// Set of pointers that are read only.
709 SmallPtrSet<Value*, 16> ReadOnlyPtr;
710
711 /// Batched alias analysis results.
712 BatchAAResults BAA;
713
714 /// An alias set tracker to partition the access set by underlying object and
715 //intrinsic property (such as TBAA metadata).
716 AliasSetTracker AST;
717
718 LoopInfo *LI;
719
720 /// Sets of potentially dependent accesses - members of one set share an
721 /// underlying pointer. The set "CheckDeps" identfies which sets really need a
722 /// dependence check.
724
725 /// Initial processing of memory accesses determined that we may need
726 /// to add memchecks. Perform the analysis to determine the necessary checks.
727 ///
728 /// Note that, this is different from isDependencyCheckNeeded. When we retry
729 /// memcheck analysis without dependency checking
730 /// (i.e. FoundNonConstantDistanceDependence), isDependencyCheckNeeded is
731 /// cleared while this remains set if we have potentially dependent accesses.
732 bool IsRTCheckAnalysisNeeded = false;
733
734 /// The SCEV predicate containing all the SCEV-related assumptions.
736};
737
738} // end anonymous namespace
739
740/// Check whether a pointer can participate in a runtime bounds check.
741/// If \p Assume, try harder to prove that we can compute the bounds of \p Ptr
742/// by adding run-time checks (overflow checks) if necessary.
744 const SCEV *PtrScev, Loop *L, bool Assume) {
745 // The bounds for loop-invariant pointer is trivial.
746 if (PSE.getSE()->isLoopInvariant(PtrScev, L))
747 return true;
748
749 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(PtrScev);
750
751 if (!AR && Assume)
752 AR = PSE.getAsAddRec(Ptr);
753
754 if (!AR)
755 return false;
756
757 return AR->isAffine();
758}
759
760/// Check whether a pointer address cannot wrap.
762 const DenseMap<Value *, const SCEV *> &Strides, Value *Ptr, Type *AccessTy,
763 Loop *L) {
764 const SCEV *PtrScev = PSE.getSCEV(Ptr);
765 if (PSE.getSE()->isLoopInvariant(PtrScev, L))
766 return true;
767
768 int64_t Stride = getPtrStride(PSE, AccessTy, Ptr, L, Strides).value_or(0);
769 if (Stride == 1 || PSE.hasNoOverflow(Ptr, SCEVWrapPredicate::IncrementNUSW))
770 return true;
771
772 return false;
773}
774
775static void visitPointers(Value *StartPtr, const Loop &InnermostLoop,
776 function_ref<void(Value *)> AddPointer) {
778 SmallVector<Value *> WorkList;
779 WorkList.push_back(StartPtr);
780
781 while (!WorkList.empty()) {
782 Value *Ptr = WorkList.pop_back_val();
783 if (!Visited.insert(Ptr).second)
784 continue;
785 auto *PN = dyn_cast<PHINode>(Ptr);
786 // SCEV does not look through non-header PHIs inside the loop. Such phis
787 // can be analyzed by adding separate accesses for each incoming pointer
788 // value.
789 if (PN && InnermostLoop.contains(PN->getParent()) &&
790 PN->getParent() != InnermostLoop.getHeader()) {
791 for (const Use &Inc : PN->incoming_values())
792 WorkList.push_back(Inc);
793 } else
794 AddPointer(Ptr);
795 }
796}
797
798// Walk back through the IR for a pointer, looking for a select like the
799// following:
800//
801// %offset = select i1 %cmp, i64 %a, i64 %b
802// %addr = getelementptr double, double* %base, i64 %offset
803// %ld = load double, double* %addr, align 8
804//
805// We won't be able to form a single SCEVAddRecExpr from this since the
806// address for each loop iteration depends on %cmp. We could potentially
807// produce multiple valid SCEVAddRecExprs, though, and check all of them for
808// memory safety/aliasing if needed.
809//
810// If we encounter some IR we don't yet handle, or something obviously fine
811// like a constant, then we just add the SCEV for that term to the list passed
812// in by the caller. If we have a node that may potentially yield a valid
813// SCEVAddRecExpr then we decompose it into parts and build the SCEV terms
814// ourselves before adding to the list.
815static void findForkedSCEVs(
816 ScalarEvolution *SE, const Loop *L, Value *Ptr,
818 unsigned Depth) {
819 // If our Value is a SCEVAddRecExpr, loop invariant, not an instruction, or
820 // we've exceeded our limit on recursion, just return whatever we have
821 // regardless of whether it can be used for a forked pointer or not, along
822 // with an indication of whether it might be a poison or undef value.
823 const SCEV *Scev = SE->getSCEV(Ptr);
824 if (isa<SCEVAddRecExpr>(Scev) || L->isLoopInvariant(Ptr) ||
825 !isa<Instruction>(Ptr) || Depth == 0) {
826 ScevList.emplace_back(Scev, !isGuaranteedNotToBeUndefOrPoison(Ptr));
827 return;
828 }
829
830 Depth--;
831
832 auto UndefPoisonCheck = [](PointerIntPair<const SCEV *, 1, bool> S) {
833 return get<1>(S);
834 };
835
836 auto GetBinOpExpr = [&SE](unsigned Opcode, const SCEV *L, const SCEV *R) {
837 switch (Opcode) {
838 case Instruction::Add:
839 return SE->getAddExpr(L, R);
840 case Instruction::Sub:
841 return SE->getMinusSCEV(L, R);
842 default:
843 llvm_unreachable("Unexpected binary operator when walking ForkedPtrs");
844 }
845 };
846
847 Instruction *I = cast<Instruction>(Ptr);
848 unsigned Opcode = I->getOpcode();
849 switch (Opcode) {
850 case Instruction::GetElementPtr: {
851 GetElementPtrInst *GEP = cast<GetElementPtrInst>(I);
852 Type *SourceTy = GEP->getSourceElementType();
853 // We only handle base + single offset GEPs here for now.
854 // Not dealing with preexisting gathers yet, so no vectors.
855 if (I->getNumOperands() != 2 || SourceTy->isVectorTy()) {
856 ScevList.emplace_back(Scev, !isGuaranteedNotToBeUndefOrPoison(GEP));
857 break;
858 }
861 findForkedSCEVs(SE, L, I->getOperand(0), BaseScevs, Depth);
862 findForkedSCEVs(SE, L, I->getOperand(1), OffsetScevs, Depth);
863
864 // See if we need to freeze our fork...
865 bool NeedsFreeze = any_of(BaseScevs, UndefPoisonCheck) ||
866 any_of(OffsetScevs, UndefPoisonCheck);
867
868 // Check that we only have a single fork, on either the base or the offset.
869 // Copy the SCEV across for the one without a fork in order to generate
870 // the full SCEV for both sides of the GEP.
871 if (OffsetScevs.size() == 2 && BaseScevs.size() == 1)
872 BaseScevs.push_back(BaseScevs[0]);
873 else if (BaseScevs.size() == 2 && OffsetScevs.size() == 1)
874 OffsetScevs.push_back(OffsetScevs[0]);
875 else {
876 ScevList.emplace_back(Scev, NeedsFreeze);
877 break;
878 }
879
880 // Find the pointer type we need to extend to.
881 Type *IntPtrTy = SE->getEffectiveSCEVType(
882 SE->getSCEV(GEP->getPointerOperand())->getType());
883
884 // Find the size of the type being pointed to. We only have a single
885 // index term (guarded above) so we don't need to index into arrays or
886 // structures, just get the size of the scalar value.
887 const SCEV *Size = SE->getSizeOfExpr(IntPtrTy, SourceTy);
888
889 // Scale up the offsets by the size of the type, then add to the bases.
890 const SCEV *Scaled1 = SE->getMulExpr(
891 Size, SE->getTruncateOrSignExtend(get<0>(OffsetScevs[0]), IntPtrTy));
892 const SCEV *Scaled2 = SE->getMulExpr(
893 Size, SE->getTruncateOrSignExtend(get<0>(OffsetScevs[1]), IntPtrTy));
894 ScevList.emplace_back(SE->getAddExpr(get<0>(BaseScevs[0]), Scaled1),
895 NeedsFreeze);
896 ScevList.emplace_back(SE->getAddExpr(get<0>(BaseScevs[1]), Scaled2),
897 NeedsFreeze);
898 break;
899 }
900 case Instruction::Select: {
902 // A select means we've found a forked pointer, but we currently only
903 // support a single select per pointer so if there's another behind this
904 // then we just bail out and return the generic SCEV.
905 findForkedSCEVs(SE, L, I->getOperand(1), ChildScevs, Depth);
906 findForkedSCEVs(SE, L, I->getOperand(2), ChildScevs, Depth);
907 if (ChildScevs.size() == 2) {
908 ScevList.push_back(ChildScevs[0]);
909 ScevList.push_back(ChildScevs[1]);
910 } else
911 ScevList.emplace_back(Scev, !isGuaranteedNotToBeUndefOrPoison(Ptr));
912 break;
913 }
914 case Instruction::Add:
915 case Instruction::Sub: {
918 findForkedSCEVs(SE, L, I->getOperand(0), LScevs, Depth);
919 findForkedSCEVs(SE, L, I->getOperand(1), RScevs, Depth);
920
921 // See if we need to freeze our fork...
922 bool NeedsFreeze =
923 any_of(LScevs, UndefPoisonCheck) || any_of(RScevs, UndefPoisonCheck);
924
925 // Check that we only have a single fork, on either the left or right side.
926 // Copy the SCEV across for the one without a fork in order to generate
927 // the full SCEV for both sides of the BinOp.
928 if (LScevs.size() == 2 && RScevs.size() == 1)
929 RScevs.push_back(RScevs[0]);
930 else if (RScevs.size() == 2 && LScevs.size() == 1)
931 LScevs.push_back(LScevs[0]);
932 else {
933 ScevList.emplace_back(Scev, NeedsFreeze);
934 break;
935 }
936
937 ScevList.emplace_back(
938 GetBinOpExpr(Opcode, get<0>(LScevs[0]), get<0>(RScevs[0])),
939 NeedsFreeze);
940 ScevList.emplace_back(
941 GetBinOpExpr(Opcode, get<0>(LScevs[1]), get<0>(RScevs[1])),
942 NeedsFreeze);
943 break;
944 }
945 default:
946 // Just return the current SCEV if we haven't handled the instruction yet.
947 LLVM_DEBUG(dbgs() << "ForkedPtr unhandled instruction: " << *I << "\n");
948 ScevList.emplace_back(Scev, !isGuaranteedNotToBeUndefOrPoison(Ptr));
949 break;
950 }
951}
952
955 const DenseMap<Value *, const SCEV *> &StridesMap, Value *Ptr,
956 const Loop *L) {
957 ScalarEvolution *SE = PSE.getSE();
958 assert(SE->isSCEVable(Ptr->getType()) && "Value is not SCEVable!");
960 findForkedSCEVs(SE, L, Ptr, Scevs, MaxForkedSCEVDepth);
961
962 // For now, we will only accept a forked pointer with two possible SCEVs
963 // that are either SCEVAddRecExprs or loop invariant.
964 if (Scevs.size() == 2 &&
965 (isa<SCEVAddRecExpr>(get<0>(Scevs[0])) ||
966 SE->isLoopInvariant(get<0>(Scevs[0]), L)) &&
967 (isa<SCEVAddRecExpr>(get<0>(Scevs[1])) ||
968 SE->isLoopInvariant(get<0>(Scevs[1]), L))) {
969 LLVM_DEBUG(dbgs() << "LAA: Found forked pointer: " << *Ptr << "\n");
970 LLVM_DEBUG(dbgs() << "\t(1) " << *get<0>(Scevs[0]) << "\n");
971 LLVM_DEBUG(dbgs() << "\t(2) " << *get<0>(Scevs[1]) << "\n");
972 return Scevs;
973 }
974
975 return {{replaceSymbolicStrideSCEV(PSE, StridesMap, Ptr), false}};
976}
977
978bool AccessAnalysis::createCheckForAccess(RuntimePointerChecking &RtCheck,
979 MemAccessInfo Access, Type *AccessTy,
980 const DenseMap<Value *, const SCEV *> &StridesMap,
982 Loop *TheLoop, unsigned &RunningDepId,
983 unsigned ASId, bool ShouldCheckWrap,
984 bool Assume) {
985 Value *Ptr = Access.getPointer();
986
988 findForkedPointer(PSE, StridesMap, Ptr, TheLoop);
989
990 for (auto &P : TranslatedPtrs) {
991 const SCEV *PtrExpr = get<0>(P);
992 if (!hasComputableBounds(PSE, Ptr, PtrExpr, TheLoop, Assume))
993 return false;
994
995 // When we run after a failing dependency check we have to make sure
996 // we don't have wrapping pointers.
997 if (ShouldCheckWrap) {
998 // Skip wrap checking when translating pointers.
999 if (TranslatedPtrs.size() > 1)
1000 return false;
1001
1002 if (!isNoWrap(PSE, StridesMap, Ptr, AccessTy, TheLoop)) {
1003 auto *Expr = PSE.getSCEV(Ptr);
1004 if (!Assume || !isa<SCEVAddRecExpr>(Expr))
1005 return false;
1007 }
1008 }
1009 // If there's only one option for Ptr, look it up after bounds and wrap
1010 // checking, because assumptions might have been added to PSE.
1011 if (TranslatedPtrs.size() == 1)
1012 TranslatedPtrs[0] = {replaceSymbolicStrideSCEV(PSE, StridesMap, Ptr),
1013 false};
1014 }
1015
1016 for (auto [PtrExpr, NeedsFreeze] : TranslatedPtrs) {
1017 // The id of the dependence set.
1018 unsigned DepId;
1019
1020 if (isDependencyCheckNeeded()) {
1021 Value *Leader = DepCands.getLeaderValue(Access).getPointer();
1022 unsigned &LeaderId = DepSetId[Leader];
1023 if (!LeaderId)
1024 LeaderId = RunningDepId++;
1025 DepId = LeaderId;
1026 } else
1027 // Each access has its own dependence set.
1028 DepId = RunningDepId++;
1029
1030 bool IsWrite = Access.getInt();
1031 RtCheck.insert(TheLoop, Ptr, PtrExpr, AccessTy, IsWrite, DepId, ASId, PSE,
1032 NeedsFreeze);
1033 LLVM_DEBUG(dbgs() << "LAA: Found a runtime check ptr:" << *Ptr << '\n');
1034 }
1035
1036 return true;
1037}
1038
1039bool AccessAnalysis::canCheckPtrAtRT(RuntimePointerChecking &RtCheck,
1040 ScalarEvolution *SE, Loop *TheLoop,
1041 const DenseMap<Value *, const SCEV *> &StridesMap,
1042 Value *&UncomputablePtr, bool ShouldCheckWrap) {
1043 // Find pointers with computable bounds. We are going to use this information
1044 // to place a runtime bound check.
1045 bool CanDoRT = true;
1046
1047 bool MayNeedRTCheck = false;
1048 if (!IsRTCheckAnalysisNeeded) return true;
1049
1050 bool IsDepCheckNeeded = isDependencyCheckNeeded();
1051
1052 // We assign a consecutive id to access from different alias sets.
1053 // Accesses between different groups doesn't need to be checked.
1054 unsigned ASId = 0;
1055 for (auto &AS : AST) {
1056 int NumReadPtrChecks = 0;
1057 int NumWritePtrChecks = 0;
1058 bool CanDoAliasSetRT = true;
1059 ++ASId;
1060
1061 // We assign consecutive id to access from different dependence sets.
1062 // Accesses within the same set don't need a runtime check.
1063 unsigned RunningDepId = 1;
1065
1067
1068 // First, count how many write and read accesses are in the alias set. Also
1069 // collect MemAccessInfos for later.
1071 for (const auto &A : AS) {
1072 Value *Ptr = A.getValue();
1073 bool IsWrite = Accesses.count(MemAccessInfo(Ptr, true));
1074
1075 if (IsWrite)
1076 ++NumWritePtrChecks;
1077 else
1078 ++NumReadPtrChecks;
1079 AccessInfos.emplace_back(Ptr, IsWrite);
1080 }
1081
1082 // We do not need runtime checks for this alias set, if there are no writes
1083 // or a single write and no reads.
1084 if (NumWritePtrChecks == 0 ||
1085 (NumWritePtrChecks == 1 && NumReadPtrChecks == 0)) {
1086 assert((AS.size() <= 1 ||
1087 all_of(AS,
1088 [this](auto AC) {
1089 MemAccessInfo AccessWrite(AC.getValue(), true);
1090 return DepCands.findValue(AccessWrite) == DepCands.end();
1091 })) &&
1092 "Can only skip updating CanDoRT below, if all entries in AS "
1093 "are reads or there is at most 1 entry");
1094 continue;
1095 }
1096
1097 for (auto &Access : AccessInfos) {
1098 for (const auto &AccessTy : Accesses[Access]) {
1099 if (!createCheckForAccess(RtCheck, Access, AccessTy, StridesMap,
1100 DepSetId, TheLoop, RunningDepId, ASId,
1101 ShouldCheckWrap, false)) {
1102 LLVM_DEBUG(dbgs() << "LAA: Can't find bounds for ptr:"
1103 << *Access.getPointer() << '\n');
1104 Retries.push_back({Access, AccessTy});
1105 CanDoAliasSetRT = false;
1106 }
1107 }
1108 }
1109
1110 // Note that this function computes CanDoRT and MayNeedRTCheck
1111 // independently. For example CanDoRT=false, MayNeedRTCheck=false means that
1112 // we have a pointer for which we couldn't find the bounds but we don't
1113 // actually need to emit any checks so it does not matter.
1114 //
1115 // We need runtime checks for this alias set, if there are at least 2
1116 // dependence sets (in which case RunningDepId > 2) or if we need to re-try
1117 // any bound checks (because in that case the number of dependence sets is
1118 // incomplete).
1119 bool NeedsAliasSetRTCheck = RunningDepId > 2 || !Retries.empty();
1120
1121 // We need to perform run-time alias checks, but some pointers had bounds
1122 // that couldn't be checked.
1123 if (NeedsAliasSetRTCheck && !CanDoAliasSetRT) {
1124 // Reset the CanDoSetRt flag and retry all accesses that have failed.
1125 // We know that we need these checks, so we can now be more aggressive
1126 // and add further checks if required (overflow checks).
1127 CanDoAliasSetRT = true;
1128 for (auto Retry : Retries) {
1129 MemAccessInfo Access = Retry.first;
1130 Type *AccessTy = Retry.second;
1131 if (!createCheckForAccess(RtCheck, Access, AccessTy, StridesMap,
1132 DepSetId, TheLoop, RunningDepId, ASId,
1133 ShouldCheckWrap, /*Assume=*/true)) {
1134 CanDoAliasSetRT = false;
1135 UncomputablePtr = Access.getPointer();
1136 break;
1137 }
1138 }
1139 }
1140
1141 CanDoRT &= CanDoAliasSetRT;
1142 MayNeedRTCheck |= NeedsAliasSetRTCheck;
1143 ++ASId;
1144 }
1145
1146 // If the pointers that we would use for the bounds comparison have different
1147 // address spaces, assume the values aren't directly comparable, so we can't
1148 // use them for the runtime check. We also have to assume they could
1149 // overlap. In the future there should be metadata for whether address spaces
1150 // are disjoint.
1151 unsigned NumPointers = RtCheck.Pointers.size();
1152 for (unsigned i = 0; i < NumPointers; ++i) {
1153 for (unsigned j = i + 1; j < NumPointers; ++j) {
1154 // Only need to check pointers between two different dependency sets.
1155 if (RtCheck.Pointers[i].DependencySetId ==
1156 RtCheck.Pointers[j].DependencySetId)
1157 continue;
1158 // Only need to check pointers in the same alias set.
1159 if (RtCheck.Pointers[i].AliasSetId != RtCheck.Pointers[j].AliasSetId)
1160 continue;
1161
1162 Value *PtrI = RtCheck.Pointers[i].PointerValue;
1163 Value *PtrJ = RtCheck.Pointers[j].PointerValue;
1164
1165 unsigned ASi = PtrI->getType()->getPointerAddressSpace();
1166 unsigned ASj = PtrJ->getType()->getPointerAddressSpace();
1167 if (ASi != ASj) {
1168 LLVM_DEBUG(
1169 dbgs() << "LAA: Runtime check would require comparison between"
1170 " different address spaces\n");
1171 return false;
1172 }
1173 }
1174 }
1175
1176 if (MayNeedRTCheck && CanDoRT)
1177 RtCheck.generateChecks(DepCands, IsDepCheckNeeded);
1178
1179 LLVM_DEBUG(dbgs() << "LAA: We need to do " << RtCheck.getNumberOfChecks()
1180 << " pointer comparisons.\n");
1181
1182 // If we can do run-time checks, but there are no checks, no runtime checks
1183 // are needed. This can happen when all pointers point to the same underlying
1184 // object for example.
1185 RtCheck.Need = CanDoRT ? RtCheck.getNumberOfChecks() != 0 : MayNeedRTCheck;
1186
1187 bool CanDoRTIfNeeded = !RtCheck.Need || CanDoRT;
1188 if (!CanDoRTIfNeeded)
1189 RtCheck.reset();
1190 return CanDoRTIfNeeded;
1191}
1192
1193void AccessAnalysis::processMemAccesses() {
1194 // We process the set twice: first we process read-write pointers, last we
1195 // process read-only pointers. This allows us to skip dependence tests for
1196 // read-only pointers.
1197
1198 LLVM_DEBUG(dbgs() << "LAA: Processing memory accesses...\n");
1199 LLVM_DEBUG(dbgs() << " AST: "; AST.dump());
1200 LLVM_DEBUG(dbgs() << "LAA: Accesses(" << Accesses.size() << "):\n");
1201 LLVM_DEBUG({
1202 for (auto A : Accesses)
1203 dbgs() << "\t" << *A.first.getPointer() << " ("
1204 << (A.first.getInt()
1205 ? "write"
1206 : (ReadOnlyPtr.count(A.first.getPointer()) ? "read-only"
1207 : "read"))
1208 << ")\n";
1209 });
1210
1211 // The AliasSetTracker has nicely partitioned our pointers by metadata
1212 // compatibility and potential for underlying-object overlap. As a result, we
1213 // only need to check for potential pointer dependencies within each alias
1214 // set.
1215 for (const auto &AS : AST) {
1216 // Note that both the alias-set tracker and the alias sets themselves used
1217 // linked lists internally and so the iteration order here is deterministic
1218 // (matching the original instruction order within each set).
1219
1220 bool SetHasWrite = false;
1221
1222 // Map of pointers to last access encountered.
1223 typedef DenseMap<const Value*, MemAccessInfo> UnderlyingObjToAccessMap;
1224 UnderlyingObjToAccessMap ObjToLastAccess;
1225
1226 // Set of access to check after all writes have been processed.
1227 PtrAccessMap DeferredAccesses;
1228
1229 // Iterate over each alias set twice, once to process read/write pointers,
1230 // and then to process read-only pointers.
1231 for (int SetIteration = 0; SetIteration < 2; ++SetIteration) {
1232 bool UseDeferred = SetIteration > 0;
1233 PtrAccessMap &S = UseDeferred ? DeferredAccesses : Accesses;
1234
1235 for (const auto &AV : AS) {
1236 Value *Ptr = AV.getValue();
1237
1238 // For a single memory access in AliasSetTracker, Accesses may contain
1239 // both read and write, and they both need to be handled for CheckDeps.
1240 for (const auto &AC : S) {
1241 if (AC.first.getPointer() != Ptr)
1242 continue;
1243
1244 bool IsWrite = AC.first.getInt();
1245
1246 // If we're using the deferred access set, then it contains only
1247 // reads.
1248 bool IsReadOnlyPtr = ReadOnlyPtr.count(Ptr) && !IsWrite;
1249 if (UseDeferred && !IsReadOnlyPtr)
1250 continue;
1251 // Otherwise, the pointer must be in the PtrAccessSet, either as a
1252 // read or a write.
1253 assert(((IsReadOnlyPtr && UseDeferred) || IsWrite ||
1254 S.count(MemAccessInfo(Ptr, false))) &&
1255 "Alias-set pointer not in the access set?");
1256
1257 MemAccessInfo Access(Ptr, IsWrite);
1258 DepCands.insert(Access);
1259
1260 // Memorize read-only pointers for later processing and skip them in
1261 // the first round (they need to be checked after we have seen all
1262 // write pointers). Note: we also mark pointer that are not
1263 // consecutive as "read-only" pointers (so that we check
1264 // "a[b[i]] +="). Hence, we need the second check for "!IsWrite".
1265 if (!UseDeferred && IsReadOnlyPtr) {
1266 // We only use the pointer keys, the types vector values don't
1267 // matter.
1268 DeferredAccesses.insert({Access, {}});
1269 continue;
1270 }
1271
1272 // If this is a write - check other reads and writes for conflicts. If
1273 // this is a read only check other writes for conflicts (but only if
1274 // there is no other write to the ptr - this is an optimization to
1275 // catch "a[i] = a[i] + " without having to do a dependence check).
1276 if ((IsWrite || IsReadOnlyPtr) && SetHasWrite) {
1277 CheckDeps.push_back(Access);
1278 IsRTCheckAnalysisNeeded = true;
1279 }
1280
1281 if (IsWrite)
1282 SetHasWrite = true;
1283
1284 // Create sets of pointers connected by a shared alias set and
1285 // underlying object.
1286 typedef SmallVector<const Value *, 16> ValueVector;
1287 ValueVector TempObjects;
1288
1289 getUnderlyingObjects(Ptr, TempObjects, LI);
1291 << "Underlying objects for pointer " << *Ptr << "\n");
1292 for (const Value *UnderlyingObj : TempObjects) {
1293 // nullptr never alias, don't join sets for pointer that have "null"
1294 // in their UnderlyingObjects list.
1295 if (isa<ConstantPointerNull>(UnderlyingObj) &&
1297 TheLoop->getHeader()->getParent(),
1298 UnderlyingObj->getType()->getPointerAddressSpace()))
1299 continue;
1300
1301 UnderlyingObjToAccessMap::iterator Prev =
1302 ObjToLastAccess.find(UnderlyingObj);
1303 if (Prev != ObjToLastAccess.end())
1304 DepCands.unionSets(Access, Prev->second);
1305
1306 ObjToLastAccess[UnderlyingObj] = Access;
1307 LLVM_DEBUG(dbgs() << " " << *UnderlyingObj << "\n");
1308 }
1309 }
1310 }
1311 }
1312 }
1313}
1314
1315/// Return true if an AddRec pointer \p Ptr is unsigned non-wrapping,
1316/// i.e. monotonically increasing/decreasing.
1317static bool isNoWrapAddRec(Value *Ptr, const SCEVAddRecExpr *AR,
1318 PredicatedScalarEvolution &PSE, const Loop *L) {
1319
1320 // FIXME: This should probably only return true for NUW.
1322 return true;
1323
1325 return true;
1326
1327 // Scalar evolution does not propagate the non-wrapping flags to values that
1328 // are derived from a non-wrapping induction variable because non-wrapping
1329 // could be flow-sensitive.
1330 //
1331 // Look through the potentially overflowing instruction to try to prove
1332 // non-wrapping for the *specific* value of Ptr.
1333
1334 // The arithmetic implied by an inbounds GEP can't overflow.
1335 auto *GEP = dyn_cast<GetElementPtrInst>(Ptr);
1336 if (!GEP || !GEP->isInBounds())
1337 return false;
1338
1339 // Make sure there is only one non-const index and analyze that.
1340 Value *NonConstIndex = nullptr;
1341 for (Value *Index : GEP->indices())
1342 if (!isa<ConstantInt>(Index)) {
1343 if (NonConstIndex)
1344 return false;
1345 NonConstIndex = Index;
1346 }
1347 if (!NonConstIndex)
1348 // The recurrence is on the pointer, ignore for now.
1349 return false;
1350
1351 // The index in GEP is signed. It is non-wrapping if it's derived from a NSW
1352 // AddRec using a NSW operation.
1353 if (auto *OBO = dyn_cast<OverflowingBinaryOperator>(NonConstIndex))
1354 if (OBO->hasNoSignedWrap() &&
1355 // Assume constant for other the operand so that the AddRec can be
1356 // easily found.
1357 isa<ConstantInt>(OBO->getOperand(1))) {
1358 auto *OpScev = PSE.getSCEV(OBO->getOperand(0));
1359
1360 if (auto *OpAR = dyn_cast<SCEVAddRecExpr>(OpScev))
1361 return OpAR->getLoop() == L && OpAR->getNoWrapFlags(SCEV::FlagNSW);
1362 }
1363
1364 return false;
1365}
1366
1367/// Check whether the access through \p Ptr has a constant stride.
1369 Type *AccessTy, Value *Ptr,
1370 const Loop *Lp,
1371 const DenseMap<Value *, const SCEV *> &StridesMap,
1372 bool Assume, bool ShouldCheckWrap) {
1373 Type *Ty = Ptr->getType();
1374 assert(Ty->isPointerTy() && "Unexpected non-ptr");
1375
1376 if (isa<ScalableVectorType>(AccessTy)) {
1377 LLVM_DEBUG(dbgs() << "LAA: Bad stride - Scalable object: " << *AccessTy
1378 << "\n");
1379 return std::nullopt;
1380 }
1381
1382 const SCEV *PtrScev = replaceSymbolicStrideSCEV(PSE, StridesMap, Ptr);
1383
1384 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(PtrScev);
1385 if (Assume && !AR)
1386 AR = PSE.getAsAddRec(Ptr);
1387
1388 if (!AR) {
1389 LLVM_DEBUG(dbgs() << "LAA: Bad stride - Not an AddRecExpr pointer " << *Ptr
1390 << " SCEV: " << *PtrScev << "\n");
1391 return std::nullopt;
1392 }
1393
1394 // The access function must stride over the innermost loop.
1395 if (Lp != AR->getLoop()) {
1396 LLVM_DEBUG(dbgs() << "LAA: Bad stride - Not striding over innermost loop "
1397 << *Ptr << " SCEV: " << *AR << "\n");
1398 return std::nullopt;
1399 }
1400
1401 // Check the step is constant.
1402 const SCEV *Step = AR->getStepRecurrence(*PSE.getSE());
1403
1404 // Calculate the pointer stride and check if it is constant.
1405 const SCEVConstant *C = dyn_cast<SCEVConstant>(Step);
1406 if (!C) {
1407 LLVM_DEBUG(dbgs() << "LAA: Bad stride - Not a constant strided " << *Ptr
1408 << " SCEV: " << *AR << "\n");
1409 return std::nullopt;
1410 }
1411
1412 auto &DL = Lp->getHeader()->getModule()->getDataLayout();
1413 TypeSize AllocSize = DL.getTypeAllocSize(AccessTy);
1414 int64_t Size = AllocSize.getFixedValue();
1415 const APInt &APStepVal = C->getAPInt();
1416
1417 // Huge step value - give up.
1418 if (APStepVal.getBitWidth() > 64)
1419 return std::nullopt;
1420
1421 int64_t StepVal = APStepVal.getSExtValue();
1422
1423 // Strided access.
1424 int64_t Stride = StepVal / Size;
1425 int64_t Rem = StepVal % Size;
1426 if (Rem)
1427 return std::nullopt;
1428
1429 if (!ShouldCheckWrap)
1430 return Stride;
1431
1432 // The address calculation must not wrap. Otherwise, a dependence could be
1433 // inverted.
1434 if (isNoWrapAddRec(Ptr, AR, PSE, Lp))
1435 return Stride;
1436
1437 // An inbounds getelementptr that is a AddRec with a unit stride
1438 // cannot wrap per definition. If it did, the result would be poison
1439 // and any memory access dependent on it would be immediate UB
1440 // when executed.
1441 if (auto *GEP = dyn_cast<GetElementPtrInst>(Ptr);
1442 GEP && GEP->isInBounds() && (Stride == 1 || Stride == -1))
1443 return Stride;
1444
1445 // If the null pointer is undefined, then a access sequence which would
1446 // otherwise access it can be assumed not to unsigned wrap. Note that this
1447 // assumes the object in memory is aligned to the natural alignment.
1448 unsigned AddrSpace = Ty->getPointerAddressSpace();
1449 if (!NullPointerIsDefined(Lp->getHeader()->getParent(), AddrSpace) &&
1450 (Stride == 1 || Stride == -1))
1451 return Stride;
1452
1453 if (Assume) {
1455 LLVM_DEBUG(dbgs() << "LAA: Pointer may wrap:\n"
1456 << "LAA: Pointer: " << *Ptr << "\n"
1457 << "LAA: SCEV: " << *AR << "\n"
1458 << "LAA: Added an overflow assumption\n");
1459 return Stride;
1460 }
1461 LLVM_DEBUG(
1462 dbgs() << "LAA: Bad stride - Pointer may wrap in the address space "
1463 << *Ptr << " SCEV: " << *AR << "\n");
1464 return std::nullopt;
1465}
1466
1467std::optional<int> llvm::getPointersDiff(Type *ElemTyA, Value *PtrA,
1468 Type *ElemTyB, Value *PtrB,
1469 const DataLayout &DL,
1470 ScalarEvolution &SE, bool StrictCheck,
1471 bool CheckType) {
1472 assert(PtrA && PtrB && "Expected non-nullptr pointers.");
1473 assert(cast<PointerType>(PtrA->getType())
1474 ->isOpaqueOrPointeeTypeMatches(ElemTyA) && "Wrong PtrA type");
1475 assert(cast<PointerType>(PtrB->getType())
1476 ->isOpaqueOrPointeeTypeMatches(ElemTyB) && "Wrong PtrB type");
1477
1478 // Make sure that A and B are different pointers.
1479 if (PtrA == PtrB)
1480 return 0;
1481
1482 // Make sure that the element types are the same if required.
1483 if (CheckType && ElemTyA != ElemTyB)
1484 return std::nullopt;
1485
1486 unsigned ASA = PtrA->getType()->getPointerAddressSpace();
1487 unsigned ASB = PtrB->getType()->getPointerAddressSpace();
1488
1489 // Check that the address spaces match.
1490 if (ASA != ASB)
1491 return std::nullopt;
1492 unsigned IdxWidth = DL.getIndexSizeInBits(ASA);
1493
1494 APInt OffsetA(IdxWidth, 0), OffsetB(IdxWidth, 0);
1495 Value *PtrA1 = PtrA->stripAndAccumulateInBoundsConstantOffsets(DL, OffsetA);
1496 Value *PtrB1 = PtrB->stripAndAccumulateInBoundsConstantOffsets(DL, OffsetB);
1497
1498 int Val;
1499 if (PtrA1 == PtrB1) {
1500 // Retrieve the address space again as pointer stripping now tracks through
1501 // `addrspacecast`.
1502 ASA = cast<PointerType>(PtrA1->getType())->getAddressSpace();
1503 ASB = cast<PointerType>(PtrB1->getType())->getAddressSpace();
1504 // Check that the address spaces match and that the pointers are valid.
1505 if (ASA != ASB)
1506 return std::nullopt;
1507
1508 IdxWidth = DL.getIndexSizeInBits(ASA);
1509 OffsetA = OffsetA.sextOrTrunc(IdxWidth);
1510 OffsetB = OffsetB.sextOrTrunc(IdxWidth);
1511
1512 OffsetB -= OffsetA;
1513 Val = OffsetB.getSExtValue();
1514 } else {
1515 // Otherwise compute the distance with SCEV between the base pointers.
1516 const SCEV *PtrSCEVA = SE.getSCEV(PtrA);
1517 const SCEV *PtrSCEVB = SE.getSCEV(PtrB);
1518 const auto *Diff =
1519 dyn_cast<SCEVConstant>(SE.getMinusSCEV(PtrSCEVB, PtrSCEVA));
1520 if (!Diff)
1521 return std::nullopt;
1522 Val = Diff->getAPInt().getSExtValue();
1523 }
1524 int Size = DL.getTypeStoreSize(ElemTyA);
1525 int Dist = Val / Size;
1526
1527 // Ensure that the calculated distance matches the type-based one after all
1528 // the bitcasts removal in the provided pointers.
1529 if (!StrictCheck || Dist * Size == Val)
1530 return Dist;
1531 return std::nullopt;
1532}
1533
1535 const DataLayout &DL, ScalarEvolution &SE,
1536 SmallVectorImpl<unsigned> &SortedIndices) {
1538 VL, [](const Value *V) { return V->getType()->isPointerTy(); }) &&
1539 "Expected list of pointer operands.");
1540 // Walk over the pointers, and map each of them to an offset relative to
1541 // first pointer in the array.
1542 Value *Ptr0 = VL[0];
1543
1544 using DistOrdPair = std::pair<int64_t, int>;
1545 auto Compare = llvm::less_first();
1546 std::set<DistOrdPair, decltype(Compare)> Offsets(Compare);
1547 Offsets.emplace(0, 0);
1548 int Cnt = 1;
1549 bool IsConsecutive = true;
1550 for (auto *Ptr : VL.drop_front()) {
1551 std::optional<int> Diff = getPointersDiff(ElemTy, Ptr0, ElemTy, Ptr, DL, SE,
1552 /*StrictCheck=*/true);
1553 if (!Diff)
1554 return false;
1555
1556 // Check if the pointer with the same offset is found.
1557 int64_t Offset = *Diff;
1558 auto Res = Offsets.emplace(Offset, Cnt);
1559 if (!Res.second)
1560 return false;
1561 // Consecutive order if the inserted element is the last one.
1562 IsConsecutive = IsConsecutive && std::next(Res.first) == Offsets.end();
1563 ++Cnt;
1564 }
1565 SortedIndices.clear();
1566 if (!IsConsecutive) {
1567 // Fill SortedIndices array only if it is non-consecutive.
1568 SortedIndices.resize(VL.size());
1569 Cnt = 0;
1570 for (const std::pair<int64_t, int> &Pair : Offsets) {
1571 SortedIndices[Cnt] = Pair.second;
1572 ++Cnt;
1573 }
1574 }
1575 return true;
1576}
1577
1578/// Returns true if the memory operations \p A and \p B are consecutive.
1580 ScalarEvolution &SE, bool CheckType) {
1583 if (!PtrA || !PtrB)
1584 return false;
1585 Type *ElemTyA = getLoadStoreType(A);
1586 Type *ElemTyB = getLoadStoreType(B);
1587 std::optional<int> Diff =
1588 getPointersDiff(ElemTyA, PtrA, ElemTyB, PtrB, DL, SE,
1589 /*StrictCheck=*/true, CheckType);
1590 return Diff && *Diff == 1;
1591}
1592
1594 visitPointers(SI->getPointerOperand(), *InnermostLoop,
1595 [this, SI](Value *Ptr) {
1596 Accesses[MemAccessInfo(Ptr, true)].push_back(AccessIdx);
1597 InstMap.push_back(SI);
1598 ++AccessIdx;
1599 });
1600}
1601
1603 visitPointers(LI->getPointerOperand(), *InnermostLoop,
1604 [this, LI](Value *Ptr) {
1605 Accesses[MemAccessInfo(Ptr, false)].push_back(AccessIdx);
1606 InstMap.push_back(LI);
1607 ++AccessIdx;
1608 });
1609}
1610
1613 switch (Type) {
1614 case NoDep:
1615 case Forward:
1618
1619 case Unknown:
1622 case Backward:
1625 }
1626 llvm_unreachable("unexpected DepType!");
1627}
1628
1630 switch (Type) {
1631 case NoDep:
1632 case Forward:
1633 case ForwardButPreventsForwarding:
1634 case Unknown:
1635 return false;
1636
1637 case BackwardVectorizable:
1638 case Backward:
1639 case BackwardVectorizableButPreventsForwarding:
1640 return true;
1641 }
1642 llvm_unreachable("unexpected DepType!");
1643}
1644
1646 return isBackward() || Type == Unknown;
1647}
1648
1650 switch (Type) {
1651 case Forward:
1652 case ForwardButPreventsForwarding:
1653 return true;
1654
1655 case NoDep:
1656 case Unknown:
1657 case BackwardVectorizable:
1658 case Backward:
1659 case BackwardVectorizableButPreventsForwarding:
1660 return false;
1661 }
1662 llvm_unreachable("unexpected DepType!");
1663}
1664
1665bool MemoryDepChecker::couldPreventStoreLoadForward(uint64_t Distance,
1666 uint64_t TypeByteSize) {
1667 // If loads occur at a distance that is not a multiple of a feasible vector
1668 // factor store-load forwarding does not take place.
1669 // Positive dependences might cause troubles because vectorizing them might
1670 // prevent store-load forwarding making vectorized code run a lot slower.
1671 // a[i] = a[i-3] ^ a[i-8];
1672 // The stores to a[i:i+1] don't align with the stores to a[i-3:i-2] and
1673 // hence on your typical architecture store-load forwarding does not take
1674 // place. Vectorizing in such cases does not make sense.
1675 // Store-load forwarding distance.
1676
1677 // After this many iterations store-to-load forwarding conflicts should not
1678 // cause any slowdowns.
1679 const uint64_t NumItersForStoreLoadThroughMemory = 8 * TypeByteSize;
1680 // Maximum vector factor.
1681 uint64_t MaxVFWithoutSLForwardIssues = std::min(
1682 VectorizerParams::MaxVectorWidth * TypeByteSize, MaxSafeDepDistBytes);
1683
1684 // Compute the smallest VF at which the store and load would be misaligned.
1685 for (uint64_t VF = 2 * TypeByteSize; VF <= MaxVFWithoutSLForwardIssues;
1686 VF *= 2) {
1687 // If the number of vector iteration between the store and the load are
1688 // small we could incur conflicts.
1689 if (Distance % VF && Distance / VF < NumItersForStoreLoadThroughMemory) {
1690 MaxVFWithoutSLForwardIssues = (VF >> 1);
1691 break;
1692 }
1693 }
1694
1695 if (MaxVFWithoutSLForwardIssues < 2 * TypeByteSize) {
1696 LLVM_DEBUG(
1697 dbgs() << "LAA: Distance " << Distance
1698 << " that could cause a store-load forwarding conflict\n");
1699 return true;
1700 }
1701
1702 if (MaxVFWithoutSLForwardIssues < MaxSafeDepDistBytes &&
1703 MaxVFWithoutSLForwardIssues !=
1704 VectorizerParams::MaxVectorWidth * TypeByteSize)
1705 MaxSafeDepDistBytes = MaxVFWithoutSLForwardIssues;
1706 return false;
1707}
1708
1709void MemoryDepChecker::mergeInStatus(VectorizationSafetyStatus S) {
1710 if (Status < S)
1711 Status = S;
1712}
1713
1714/// Given a dependence-distance \p Dist between two
1715/// memory accesses, that have the same stride whose absolute value is given
1716/// in \p Stride, and that have the same type size \p TypeByteSize,
1717/// in a loop whose takenCount is \p BackedgeTakenCount, check if it is
1718/// possible to prove statically that the dependence distance is larger
1719/// than the range that the accesses will travel through the execution of
1720/// the loop. If so, return true; false otherwise. This is useful for
1721/// example in loops such as the following (PR31098):
1722/// for (i = 0; i < D; ++i) {
1723/// = out[i];
1724/// out[i+D] =
1725/// }
1727 const SCEV &BackedgeTakenCount,
1728 const SCEV &Dist, uint64_t Stride,
1729 uint64_t TypeByteSize) {
1730
1731 // If we can prove that
1732 // (**) |Dist| > BackedgeTakenCount * Step
1733 // where Step is the absolute stride of the memory accesses in bytes,
1734 // then there is no dependence.
1735 //
1736 // Rationale:
1737 // We basically want to check if the absolute distance (|Dist/Step|)
1738 // is >= the loop iteration count (or > BackedgeTakenCount).
1739 // This is equivalent to the Strong SIV Test (Practical Dependence Testing,
1740 // Section 4.2.1); Note, that for vectorization it is sufficient to prove
1741 // that the dependence distance is >= VF; This is checked elsewhere.
1742 // But in some cases we can prune dependence distances early, and
1743 // even before selecting the VF, and without a runtime test, by comparing
1744 // the distance against the loop iteration count. Since the vectorized code
1745 // will be executed only if LoopCount >= VF, proving distance >= LoopCount
1746 // also guarantees that distance >= VF.
1747 //
1748 const uint64_t ByteStride = Stride * TypeByteSize;
1749 const SCEV *Step = SE.getConstant(BackedgeTakenCount.getType(), ByteStride);
1750 const SCEV *Product = SE.getMulExpr(&BackedgeTakenCount, Step);
1751
1752 const SCEV *CastedDist = &Dist;
1753 const SCEV *CastedProduct = Product;
1754 uint64_t DistTypeSizeBits = DL.getTypeSizeInBits(Dist.getType());
1755 uint64_t ProductTypeSizeBits = DL.getTypeSizeInBits(Product->getType());
1756
1757 // The dependence distance can be positive/negative, so we sign extend Dist;
1758 // The multiplication of the absolute stride in bytes and the
1759 // backedgeTakenCount is non-negative, so we zero extend Product.
1760 if (DistTypeSizeBits > ProductTypeSizeBits)
1761 CastedProduct = SE.getZeroExtendExpr(Product, Dist.getType());
1762 else
1763 CastedDist = SE.getNoopOrSignExtend(&Dist, Product->getType());
1764
1765 // Is Dist - (BackedgeTakenCount * Step) > 0 ?
1766 // (If so, then we have proven (**) because |Dist| >= Dist)
1767 const SCEV *Minus = SE.getMinusSCEV(CastedDist, CastedProduct);
1768 if (SE.isKnownPositive(Minus))
1769 return true;
1770
1771 // Second try: Is -Dist - (BackedgeTakenCount * Step) > 0 ?
1772 // (If so, then we have proven (**) because |Dist| >= -1*Dist)
1773 const SCEV *NegDist = SE.getNegativeSCEV(CastedDist);
1774 Minus = SE.getMinusSCEV(NegDist, CastedProduct);
1775 if (SE.isKnownPositive(Minus))
1776 return true;
1777
1778 return false;
1779}
1780
1781/// Check the dependence for two accesses with the same stride \p Stride.
1782/// \p Distance is the positive distance and \p TypeByteSize is type size in
1783/// bytes.
1784///
1785/// \returns true if they are independent.
1787 uint64_t TypeByteSize) {
1788 assert(Stride > 1 && "The stride must be greater than 1");
1789 assert(TypeByteSize > 0 && "The type size in byte must be non-zero");
1790 assert(Distance > 0 && "The distance must be non-zero");
1791
1792 // Skip if the distance is not multiple of type byte size.
1793 if (Distance % TypeByteSize)
1794 return false;
1795
1796 uint64_t ScaledDist = Distance / TypeByteSize;
1797
1798 // No dependence if the scaled distance is not multiple of the stride.
1799 // E.g.
1800 // for (i = 0; i < 1024 ; i += 4)
1801 // A[i+2] = A[i] + 1;
1802 //
1803 // Two accesses in memory (scaled distance is 2, stride is 4):
1804 // | A[0] | | | | A[4] | | | |
1805 // | | | A[2] | | | | A[6] | |
1806 //
1807 // E.g.
1808 // for (i = 0; i < 1024 ; i += 3)
1809 // A[i+4] = A[i] + 1;
1810 //
1811 // Two accesses in memory (scaled distance is 4, stride is 3):
1812 // | A[0] | | | A[3] | | | A[6] | | |
1813 // | | | | | A[4] | | | A[7] | |
1814 return ScaledDist % Stride;
1815}
1816
1818MemoryDepChecker::isDependent(const MemAccessInfo &A, unsigned AIdx,
1819 const MemAccessInfo &B, unsigned BIdx,
1820 const DenseMap<Value *, const SCEV *> &Strides) {
1821 assert (AIdx < BIdx && "Must pass arguments in program order");
1822
1823 auto [APtr, AIsWrite] = A;
1824 auto [BPtr, BIsWrite] = B;
1825 Type *ATy = getLoadStoreType(InstMap[AIdx]);
1826 Type *BTy = getLoadStoreType(InstMap[BIdx]);
1827
1828 // Two reads are independent.
1829 if (!AIsWrite && !BIsWrite)
1830 return Dependence::NoDep;
1831
1832 // We cannot check pointers in different address spaces.
1833 if (APtr->getType()->getPointerAddressSpace() !=
1834 BPtr->getType()->getPointerAddressSpace())
1835 return Dependence::Unknown;
1836
1837 int64_t StrideAPtr =
1838 getPtrStride(PSE, ATy, APtr, InnermostLoop, Strides, true).value_or(0);
1839 int64_t StrideBPtr =
1840 getPtrStride(PSE, BTy, BPtr, InnermostLoop, Strides, true).value_or(0);
1841
1842 const SCEV *Src = PSE.getSCEV(APtr);
1843 const SCEV *Sink = PSE.getSCEV(BPtr);
1844
1845 // If the induction step is negative we have to invert source and sink of the
1846 // dependence.
1847 if (StrideAPtr < 0) {
1848 std::swap(APtr, BPtr);
1849 std::swap(ATy, BTy);
1850 std::swap(Src, Sink);
1851 std::swap(AIsWrite, BIsWrite);
1852 std::swap(AIdx, BIdx);
1853 std::swap(StrideAPtr, StrideBPtr);
1854 }
1855
1856 ScalarEvolution &SE = *PSE.getSE();
1857 const SCEV *Dist = SE.getMinusSCEV(Sink, Src);
1858
1859 LLVM_DEBUG(dbgs() << "LAA: Src Scev: " << *Src << "Sink Scev: " << *Sink
1860 << "(Induction step: " << StrideAPtr << ")\n");
1861 LLVM_DEBUG(dbgs() << "LAA: Distance for " << *InstMap[AIdx] << " to "
1862 << *InstMap[BIdx] << ": " << *Dist << "\n");
1863
1864 // Need accesses with constant stride. We don't want to vectorize
1865 // "A[B[i]] += ..." and similar code or pointer arithmetic that could wrap in
1866 // the address space.
1867 if (!StrideAPtr || !StrideBPtr || StrideAPtr != StrideBPtr){
1868 LLVM_DEBUG(dbgs() << "Pointer access with non-constant stride\n");
1869 return Dependence::Unknown;
1870 }
1871
1872 auto &DL = InnermostLoop->getHeader()->getModule()->getDataLayout();
1873 uint64_t TypeByteSize = DL.getTypeAllocSize(ATy);
1874 bool HasSameSize =
1875 DL.getTypeStoreSizeInBits(ATy) == DL.getTypeStoreSizeInBits(BTy);
1876 uint64_t Stride = std::abs(StrideAPtr);
1877
1878 if (!isa<SCEVCouldNotCompute>(Dist) && HasSameSize &&
1880 Stride, TypeByteSize))
1881 return Dependence::NoDep;
1882
1883 const SCEVConstant *C = dyn_cast<SCEVConstant>(Dist);
1884 if (!C) {
1885 LLVM_DEBUG(dbgs() << "LAA: Dependence because of non-constant distance\n");
1886 FoundNonConstantDistanceDependence = true;
1887 return Dependence::Unknown;
1888 }
1889
1890 const APInt &Val = C->getAPInt();
1891 int64_t Distance = Val.getSExtValue();
1892
1893 // Attempt to prove strided accesses independent.
1894 if (std::abs(Distance) > 0 && Stride > 1 && HasSameSize &&
1895 areStridedAccessesIndependent(std::abs(Distance), Stride, TypeByteSize)) {
1896 LLVM_DEBUG(dbgs() << "LAA: Strided accesses are independent\n");
1897 return Dependence::NoDep;
1898 }
1899
1900 // Negative distances are not plausible dependencies.
1901 if (Val.isNegative()) {
1902 bool IsTrueDataDependence = (AIsWrite && !BIsWrite);
1903 if (IsTrueDataDependence && EnableForwardingConflictDetection &&
1904 (couldPreventStoreLoadForward(Val.abs().getZExtValue(), TypeByteSize) ||
1905 !HasSameSize)) {
1906 LLVM_DEBUG(dbgs() << "LAA: Forward but may prevent st->ld forwarding\n");
1908 }
1909
1910 LLVM_DEBUG(dbgs() << "LAA: Dependence is negative\n");
1911 return Dependence::Forward;
1912 }
1913
1914 // Write to the same location with the same size.
1915 if (Val == 0) {
1916 if (HasSameSize)
1917 return Dependence::Forward;
1918 LLVM_DEBUG(
1919 dbgs() << "LAA: Zero dependence difference but different type sizes\n");
1920 return Dependence::Unknown;
1921 }
1922
1923 assert(Val.isStrictlyPositive() && "Expect a positive value");
1924
1925 if (!HasSameSize) {
1926 LLVM_DEBUG(dbgs() << "LAA: ReadWrite-Write positive dependency with "
1927 "different type sizes\n");
1928 return Dependence::Unknown;
1929 }
1930
1931 // Bail out early if passed-in parameters make vectorization not feasible.
1932 unsigned ForcedFactor = (VectorizerParams::VectorizationFactor ?
1934 unsigned ForcedUnroll = (VectorizerParams::VectorizationInterleave ?
1936 // The minimum number of iterations for a vectorized/unrolled version.
1937 unsigned MinNumIter = std::max(ForcedFactor * ForcedUnroll, 2U);
1938
1939 // It's not vectorizable if the distance is smaller than the minimum distance
1940 // needed for a vectroized/unrolled version. Vectorizing one iteration in
1941 // front needs TypeByteSize * Stride. Vectorizing the last iteration needs
1942 // TypeByteSize (No need to plus the last gap distance).
1943 //
1944 // E.g. Assume one char is 1 byte in memory and one int is 4 bytes.
1945 // foo(int *A) {
1946 // int *B = (int *)((char *)A + 14);
1947 // for (i = 0 ; i < 1024 ; i += 2)
1948 // B[i] = A[i] + 1;
1949 // }
1950 //
1951 // Two accesses in memory (stride is 2):
1952 // | A[0] | | A[2] | | A[4] | | A[6] | |
1953 // | B[0] | | B[2] | | B[4] |
1954 //
1955 // Distance needs for vectorizing iterations except the last iteration:
1956 // 4 * 2 * (MinNumIter - 1). Distance needs for the last iteration: 4.
1957 // So the minimum distance needed is: 4 * 2 * (MinNumIter - 1) + 4.
1958 //
1959 // If MinNumIter is 2, it is vectorizable as the minimum distance needed is
1960 // 12, which is less than distance.
1961 //
1962 // If MinNumIter is 4 (Say if a user forces the vectorization factor to be 4),
1963 // the minimum distance needed is 28, which is greater than distance. It is
1964 // not safe to do vectorization.
1965 uint64_t MinDistanceNeeded =
1966 TypeByteSize * Stride * (MinNumIter - 1) + TypeByteSize;
1967 if (MinDistanceNeeded > static_cast<uint64_t>(Distance)) {
1968 LLVM_DEBUG(dbgs() << "LAA: Failure because of positive distance "
1969 << Distance << '\n');
1970 return Dependence::Backward;
1971 }
1972
1973 // Unsafe if the minimum distance needed is greater than max safe distance.
1974 if (MinDistanceNeeded > MaxSafeDepDistBytes) {
1975 LLVM_DEBUG(dbgs() << "LAA: Failure because it needs at least "
1976 << MinDistanceNeeded << " size in bytes\n");
1977 return Dependence::Backward;
1978 }
1979
1980 // Positive distance bigger than max vectorization factor.
1981 // FIXME: Should use max factor instead of max distance in bytes, which could
1982 // not handle different types.
1983 // E.g. Assume one char is 1 byte in memory and one int is 4 bytes.
1984 // void foo (int *A, char *B) {
1985 // for (unsigned i = 0; i < 1024; i++) {
1986 // A[i+2] = A[i] + 1;
1987 // B[i+2] = B[i] + 1;
1988 // }
1989 // }
1990 //
1991 // This case is currently unsafe according to the max safe distance. If we
1992 // analyze the two accesses on array B, the max safe dependence distance
1993 // is 2. Then we analyze the accesses on array A, the minimum distance needed
1994 // is 8, which is less than 2 and forbidden vectorization, But actually
1995 // both A and B could be vectorized by 2 iterations.
1996 MaxSafeDepDistBytes =
1997 std::min(static_cast<uint64_t>(Distance), MaxSafeDepDistBytes);
1998
1999 bool IsTrueDataDependence = (!AIsWrite && BIsWrite);
2000 if (IsTrueDataDependence && EnableForwardingConflictDetection &&
2001 couldPreventStoreLoadForward(Distance, TypeByteSize))
2003
2004 uint64_t MaxVF = MaxSafeDepDistBytes / (TypeByteSize * Stride);
2005 LLVM_DEBUG(dbgs() << "LAA: Positive distance " << Val.getSExtValue()
2006 << " with max VF = " << MaxVF << '\n');
2007 uint64_t MaxVFInBits = MaxVF * TypeByteSize * 8;
2008 MaxSafeVectorWidthInBits = std::min(MaxSafeVectorWidthInBits, MaxVFInBits);
2010}
2011
2013 MemAccessInfoList &CheckDeps,
2014 const DenseMap<Value *, const SCEV *> &Strides) {
2015
2016 MaxSafeDepDistBytes = -1;
2018 for (MemAccessInfo CurAccess : CheckDeps) {
2019 if (Visited.count(CurAccess))
2020 continue;
2021
2022 // Get the relevant memory access set.
2024 AccessSets.findValue(AccessSets.getLeaderValue(CurAccess));
2025
2026 // Check accesses within this set.
2028 AccessSets.member_begin(I);
2030 AccessSets.member_end();
2031
2032 // Check every access pair.
2033 while (AI != AE) {
2034 Visited.insert(*AI);
2035 bool AIIsWrite = AI->getInt();
2036 // Check loads only against next equivalent class, but stores also against
2037 // other stores in the same equivalence class - to the same address.
2039 (AIIsWrite ? AI : std::next(AI));
2040 while (OI != AE) {
2041 // Check every accessing instruction pair in program order.
2042 for (std::vector<unsigned>::iterator I1 = Accesses[*AI].begin(),
2043 I1E = Accesses[*AI].end(); I1 != I1E; ++I1)
2044 // Scan all accesses of another equivalence class, but only the next
2045 // accesses of the same equivalent class.
2046 for (std::vector<unsigned>::iterator
2047 I2 = (OI == AI ? std::next(I1) : Accesses[*OI].begin()),
2048 I2E = (OI == AI ? I1E : Accesses[*OI].end());
2049 I2 != I2E; ++I2) {
2050 auto A = std::make_pair(&*AI, *I1);
2051 auto B = std::make_pair(&*OI, *I2);
2052
2053 assert(*I1 != *I2);
2054 if (*I1 > *I2)
2055 std::swap(A, B);
2056
2058 isDependent(*A.first, A.second, *B.first, B.second, Strides);
2060
2061 // Gather dependences unless we accumulated MaxDependences
2062 // dependences. In that case return as soon as we find the first
2063 // unsafe dependence. This puts a limit on this quadratic
2064 // algorithm.
2065 if (RecordDependences) {
2066 if (Type != Dependence::NoDep)
2067 Dependences.push_back(Dependence(A.second, B.second, Type));
2068
2069 if (Dependences.size() >= MaxDependences) {
2070 RecordDependences = false;
2071 Dependences.clear();
2073 << "Too many dependences, stopped recording\n");
2074 }
2075 }
2076 if (!RecordDependences && !isSafeForVectorization())
2077 return false;
2078 }
2079 ++OI;
2080 }
2081 AI++;
2082 }
2083 }
2084
2085 LLVM_DEBUG(dbgs() << "Total Dependences: " << Dependences.size() << "\n");
2086 return isSafeForVectorization();
2087}
2088
2091 MemAccessInfo Access(Ptr, isWrite);
2092 auto &IndexVector = Accesses.find(Access)->second;
2093
2095 transform(IndexVector,
2096 std::back_inserter(Insts),
2097 [&](unsigned Idx) { return this->InstMap[Idx]; });
2098 return Insts;
2099}
2100
2102 "NoDep", "Unknown", "Forward", "ForwardButPreventsForwarding", "Backward",
2103 "BackwardVectorizable", "BackwardVectorizableButPreventsForwarding"};
2104
2106 raw_ostream &OS, unsigned Depth,
2107 const SmallVectorImpl<Instruction *> &Instrs) const {
2108 OS.indent(Depth) << DepName[Type] << ":\n";
2109 OS.indent(Depth + 2) << *Instrs[Source] << " -> \n";
2110 OS.indent(Depth + 2) << *Instrs[Destination] << "\n";
2111}
2112
2113bool LoopAccessInfo::canAnalyzeLoop() {
2114 // We need to have a loop header.
2115 LLVM_DEBUG(dbgs() << "LAA: Found a loop in "
2116 << TheLoop->getHeader()->getParent()->getName() << ": "
2117 << TheLoop->getHeader()->getName() << '\n');
2118
2119 // We can only analyze innermost loops.
2120 if (!TheLoop->isInnermost()) {
2121 LLVM_DEBUG(dbgs() << "LAA: loop is not the innermost loop\n");
2122 recordAnalysis("NotInnerMostLoop") << "loop is not the innermost loop";
2123 return false;
2124 }
2125
2126 // We must have a single backedge.
2127 if (TheLoop->getNumBackEdges() != 1) {
2128 LLVM_DEBUG(
2129 dbgs() << "LAA: loop control flow is not understood by analyzer\n");
2130 recordAnalysis("CFGNotUnderstood")
2131 << "loop control flow is not understood by analyzer";
2132 return false;
2133 }
2134
2135 // ScalarEvolution needs to be able to find the exit count.
2136 const SCEV *ExitCount = PSE->getBackedgeTakenCount();
2137 if (isa<SCEVCouldNotCompute>(ExitCount)) {
2138 recordAnalysis("CantComputeNumberOfIterations")
2139 << "could not determine number of loop iterations";
2140 LLVM_DEBUG(dbgs() << "LAA: SCEV could not compute the loop exit count.\n");
2141 return false;
2142 }
2143
2144 return true;
2145}
2146
2147void LoopAccessInfo::analyzeLoop(AAResults *AA, LoopInfo *LI,
2148 const TargetLibraryInfo *TLI,
2149 DominatorTree *DT) {
2150 // Holds the Load and Store instructions.
2153
2154 // Holds all the different accesses in the loop.
2155 unsigned NumReads = 0;
2156 unsigned NumReadWrites = 0;
2157
2158 bool HasComplexMemInst = false;
2159
2160 // A runtime check is only legal to insert if there are no convergent calls.
2161 HasConvergentOp = false;
2162
2163 PtrRtChecking->Pointers.clear();
2164 PtrRtChecking->Need = false;
2165
2166 const bool IsAnnotatedParallel = TheLoop->isAnnotatedParallel();
2167
2168 const bool EnableMemAccessVersioningOfLoop =
2170 !TheLoop->getHeader()->getParent()->hasOptSize();
2171
2172 // Traverse blocks in fixed RPOT order, regardless of their storage in the
2173 // loop info, as it may be arbitrary.
2174 LoopBlocksRPO RPOT(TheLoop);
2175 RPOT.perform(LI);
2176 for (BasicBlock *BB : RPOT) {
2177 // Scan the BB and collect legal loads and stores. Also detect any
2178 // convergent instructions.
2179 for (Instruction &I : *BB) {
2180 if (auto *Call = dyn_cast<CallBase>(&I)) {
2181 if (Call->isConvergent())
2182 HasConvergentOp = true;
2183 }
2184
2185 // With both a non-vectorizable memory instruction and a convergent
2186 // operation, found in this loop, no reason to continue the search.
2187 if (HasComplexMemInst && HasConvergentOp) {
2188 CanVecMem = false;
2189 return;
2190 }
2191
2192 // Avoid hitting recordAnalysis multiple times.
2193 if (HasComplexMemInst)
2194 continue;
2195
2196 // If this is a load, save it. If this instruction can read from memory
2197 // but is not a load, then we quit. Notice that we don't handle function
2198 // calls that read or write.
2199 if (I.mayReadFromMemory()) {
2200 // Many math library functions read the rounding mode. We will only
2201 // vectorize a loop if it contains known function calls that don't set
2202 // the flag. Therefore, it is safe to ignore this read from memory.
2203 auto *Call = dyn_cast<CallInst>(&I);
2204 if (Call && getVectorIntrinsicIDForCall(Call, TLI))
2205 continue;
2206
2207 // If the function has an explicit vectorized counterpart, we can safely
2208 // assume that it can be vectorized.
2209 if (Call && !Call->isNoBuiltin() && Call->getCalledFunction() &&
2210 !VFDatabase::getMappings(*Call).empty())
2211 continue;
2212
2213 auto *Ld = dyn_cast<LoadInst>(&I);
2214 if (!Ld) {
2215 recordAnalysis("CantVectorizeInstruction", Ld)
2216 << "instruction cannot be vectorized";
2217 HasComplexMemInst = true;
2218 continue;
2219 }
2220 if (!Ld->isSimple() && !IsAnnotatedParallel) {
2221 recordAnalysis("NonSimpleLoad", Ld)
2222 << "read with atomic ordering or volatile read";
2223 LLVM_DEBUG(dbgs() << "LAA: Found a non-simple load.\n");
2224 HasComplexMemInst = true;
2225 continue;
2226 }
2227 NumLoads++;
2228 Loads.push_back(Ld);
2229 DepChecker->addAccess(Ld);
2230 if (EnableMemAccessVersioningOfLoop)
2231 collectStridedAccess(Ld);
2232 continue;
2233 }
2234
2235 // Save 'store' instructions. Abort if other instructions write to memory.
2236 if (I.mayWriteToMemory()) {
2237 auto *St = dyn_cast<StoreInst>(&I);
2238 if (!St) {
2239 recordAnalysis("CantVectorizeInstruction", St)
2240 << "instruction cannot be vectorized";
2241 HasComplexMemInst = true;
2242 continue;
2243 }
2244 if (!St->isSimple() && !IsAnnotatedParallel) {
2245 recordAnalysis("NonSimpleStore", St)
2246 << "write with atomic ordering or volatile write";
2247 LLVM_DEBUG(dbgs() << "LAA: Found a non-simple store.\n");
2248 HasComplexMemInst = true;
2249 continue;
2250 }
2251 NumStores++;
2252 Stores.push_back(St);
2253 DepChecker->addAccess(St);
2254 if (EnableMemAccessVersioningOfLoop)
2255 collectStridedAccess(St);
2256 }
2257 } // Next instr.
2258 } // Next block.
2259
2260 if (HasComplexMemInst) {
2261 CanVecMem = false;
2262 return;
2263 }
2264
2265 // Now we have two lists that hold the loads and the stores.
2266 // Next, we find the pointers that they use.
2267
2268 // Check if we see any stores. If there are no stores, then we don't
2269 // care if the pointers are *restrict*.
2270 if (!Stores.size()) {
2271 LLVM_DEBUG(dbgs() << "LAA: Found a read-only loop!\n");
2272 CanVecMem = true;
2273 return;
2274 }
2275
2276 MemoryDepChecker::DepCandidates DependentAccesses;
2277 AccessAnalysis Accesses(TheLoop, AA, LI, DependentAccesses, *PSE);
2278
2279 // Holds the analyzed pointers. We don't want to call getUnderlyingObjects
2280 // multiple times on the same object. If the ptr is accessed twice, once
2281 // for read and once for write, it will only appear once (on the write
2282 // list). This is okay, since we are going to check for conflicts between
2283 // writes and between reads and writes, but not between reads and reads.
2285
2286 // Record uniform store addresses to identify if we have multiple stores
2287 // to the same address.
2288 SmallPtrSet<Value *, 16> UniformStores;
2289
2290 for (StoreInst *ST : Stores) {
2291 Value *Ptr = ST->getPointerOperand();
2292
2293 if (isInvariant(Ptr)) {
2294 // Record store instructions to loop invariant addresses
2295 StoresToInvariantAddresses.push_back(ST);
2296 HasDependenceInvolvingLoopInvariantAddress |=
2297 !UniformStores.insert(Ptr).second;
2298 }
2299
2300 // If we did *not* see this pointer before, insert it to the read-write
2301 // list. At this phase it is only a 'write' list.
2302 Type *AccessTy = getLoadStoreType(ST);
2303 if (Seen.insert({Ptr, AccessTy}).second) {
2304 ++NumReadWrites;
2305
2307 // The TBAA metadata could have a control dependency on the predication
2308 // condition, so we cannot rely on it when determining whether or not we
2309 // need runtime pointer checks.
2310 if (blockNeedsPredication(ST->getParent(), TheLoop, DT))
2311 Loc.AATags.TBAA = nullptr;
2312
2313 visitPointers(const_cast<Value *>(Loc.Ptr), *TheLoop,
2314 [&Accesses, AccessTy, Loc](Value *Ptr) {
2315 MemoryLocation NewLoc = Loc.getWithNewPtr(Ptr);
2316 Accesses.addStore(NewLoc, AccessTy);
2317 });
2318 }
2319 }
2320
2321 if (IsAnnotatedParallel) {
2322 LLVM_DEBUG(
2323 dbgs() << "LAA: A loop annotated parallel, ignore memory dependency "
2324 << "checks.\n");
2325 CanVecMem = true;
2326 return;
2327 }
2328
2329 for (LoadInst *LD : Loads) {
2330 Value *Ptr = LD->getPointerOperand();
2331 // If we did *not* see this pointer before, insert it to the
2332 // read list. If we *did* see it before, then it is already in
2333 // the read-write list. This allows us to vectorize expressions
2334 // such as A[i] += x; Because the address of A[i] is a read-write
2335 // pointer. This only works if the index of A[i] is consecutive.
2336 // If the address of i is unknown (for example A[B[i]]) then we may
2337 // read a few words, modify, and write a few words, and some of the
2338 // words may be written to the same address.
2339 bool IsReadOnlyPtr = false;
2340 Type *AccessTy = getLoadStoreType(LD);
2341 if (Seen.insert({Ptr, AccessTy}).second ||
2342 !getPtrStride(*PSE, LD->getType(), Ptr, TheLoop, SymbolicStrides).value_or(0)) {
2343 ++NumReads;
2344 IsReadOnlyPtr = true;
2345 }
2346
2347 // See if there is an unsafe dependency between a load to a uniform address and
2348 // store to the same uniform address.
2349 if (UniformStores.count(Ptr)) {
2350 LLVM_DEBUG(dbgs() << "LAA: Found an unsafe dependency between a uniform "
2351 "load and uniform store to the same address!\n");
2352 HasDependenceInvolvingLoopInvariantAddress = true;
2353 }
2354
2356 // The TBAA metadata could have a control dependency on the predication
2357 // condition, so we cannot rely on it when determining whether or not we
2358 // need runtime pointer checks.
2359 if (blockNeedsPredication(LD->getParent(), TheLoop, DT))
2360 Loc.AATags.TBAA = nullptr;
2361
2362 visitPointers(const_cast<Value *>(Loc.Ptr), *TheLoop,
2363 [&Accesses, AccessTy, Loc, IsReadOnlyPtr](Value *Ptr) {
2364 MemoryLocation NewLoc = Loc.getWithNewPtr(Ptr);
2365 Accesses.addLoad(NewLoc, AccessTy, IsReadOnlyPtr);
2366 });
2367 }
2368
2369 // If we write (or read-write) to a single destination and there are no
2370 // other reads in this loop then is it safe to vectorize.
2371 if (NumReadWrites == 1 && NumReads == 0) {
2372 LLVM_DEBUG(dbgs() << "LAA: Found a write-only loop!\n");
2373 CanVecMem = true;
2374 return;
2375 }
2376
2377 // Build dependence sets and check whether we need a runtime pointer bounds
2378 // check.
2379 Accesses.buildDependenceSets();
2380
2381 // Find pointers with computable bounds. We are going to use this information
2382 // to place a runtime bound check.
2383 Value *UncomputablePtr = nullptr;
2384 bool CanDoRTIfNeeded =
2385 Accesses.canCheckPtrAtRT(*PtrRtChecking, PSE->getSE(), TheLoop,
2386 SymbolicStrides, UncomputablePtr, false);
2387 if (!CanDoRTIfNeeded) {
2388 auto *I = dyn_cast_or_null<Instruction>(UncomputablePtr);
2389 recordAnalysis("CantIdentifyArrayBounds", I)
2390 << "cannot identify array bounds";
2391 LLVM_DEBUG(dbgs() << "LAA: We can't vectorize because we can't find "
2392 << "the array bounds.\n");
2393 CanVecMem = false;
2394 return;
2395 }
2396
2397 LLVM_DEBUG(
2398 dbgs() << "LAA: May be able to perform a memory runtime check if needed.\n");
2399
2400 CanVecMem = true;
2401 if (Accesses.isDependencyCheckNeeded()) {
2402 LLVM_DEBUG(dbgs() << "LAA: Checking memory dependencies\n");
2403 CanVecMem = DepChecker->areDepsSafe(
2404 DependentAccesses, Accesses.getDependenciesToCheck(), SymbolicStrides);
2405 MaxSafeDepDistBytes = DepChecker->getMaxSafeDepDistBytes();
2406
2407 if (!CanVecMem && DepChecker->shouldRetryWithRuntimeCheck()) {
2408 LLVM_DEBUG(dbgs() << "LAA: Retrying with memory checks\n");
2409
2410 // Clear the dependency checks. We assume they are not needed.
2411 Accesses.resetDepChecks(*DepChecker);
2412
2413 PtrRtChecking->reset();
2414 PtrRtChecking->Need = true;
2415
2416 auto *SE = PSE->getSE();
2417 UncomputablePtr = nullptr;
2418 CanDoRTIfNeeded = Accesses.canCheckPtrAtRT(
2419 *PtrRtChecking, SE, TheLoop, SymbolicStrides, UncomputablePtr, true);
2420
2421 // Check that we found the bounds for the pointer.
2422 if (!CanDoRTIfNeeded) {
2423 auto *I = dyn_cast_or_null<Instruction>(UncomputablePtr);
2424 recordAnalysis("CantCheckMemDepsAtRunTime", I)
2425 << "cannot check memory dependencies at runtime";
2426 LLVM_DEBUG(dbgs() << "LAA: Can't vectorize with memory checks\n");
2427 CanVecMem = false;
2428 return;
2429 }
2430
2431 CanVecMem = true;
2432 }
2433 }
2434
2435 if (HasConvergentOp) {
2436 recordAnalysis("CantInsertRuntimeCheckWithConvergent")
2437 << "cannot add control dependency to convergent operation";
2438 LLVM_DEBUG(dbgs() << "LAA: We can't vectorize because a runtime check "
2439 "would be needed with a convergent operation\n");
2440 CanVecMem = false;
2441 return;
2442 }
2443
2444 if (CanVecMem)
2445 LLVM_DEBUG(
2446 dbgs() << "LAA: No unsafe dependent memory operations in loop. We"
2447 << (PtrRtChecking->Need ? "" : " don't")
2448 << " need runtime memory checks.\n");
2449 else
2450 emitUnsafeDependenceRemark();
2451}
2452
2453void LoopAccessInfo::emitUnsafeDependenceRemark() {
2454 auto Deps = getDepChecker().getDependences();
2455 if (!Deps)
2456 return;
2457 auto Found = llvm::find_if(*Deps, [](const MemoryDepChecker::Dependence &D) {
2460 });
2461 if (Found == Deps->end())
2462 return;
2463 MemoryDepChecker::Dependence Dep = *Found;
2464
2465 LLVM_DEBUG(dbgs() << "LAA: unsafe dependent memory operations in loop\n");
2466
2467 // Emit remark for first unsafe dependence
2469 recordAnalysis("UnsafeDep", Dep.getDestination(*this))
2470 << "unsafe dependent memory operations in loop. Use "
2471 "#pragma loop distribute(enable) to allow loop distribution "
2472 "to attempt to isolate the offending operations into a separate "
2473 "loop";
2474
2475 switch (Dep.Type) {
2479 llvm_unreachable("Unexpected dependence");
2481 R << "\nBackward loop carried data dependence.";
2482 break;
2484 R << "\nForward loop carried data dependence that prevents "
2485 "store-to-load forwarding.";
2486 break;
2488 R << "\nBackward loop carried data dependence that prevents "
2489 "store-to-load forwarding.";
2490 break;
2492 R << "\nUnknown data dependence.";
2493 break;
2494 }
2495
2496 if (Instruction *I = Dep.getSource(*this)) {
2497 DebugLoc SourceLoc = I->getDebugLoc();
2498 if (auto *DD = dyn_cast_or_null<Instruction>(getPointerOperand(I)))
2499 SourceLoc = DD->getDebugLoc();
2500 if (SourceLoc)
2501 R << " Memory location is the same as accessed at "
2502 << ore::NV("Location", SourceLoc);
2503 }
2504}
2505
2507 DominatorTree *DT) {
2508 assert(TheLoop->contains(BB) && "Unknown block used");
2509
2510 // Blocks that do not dominate the latch need predication.
2511 BasicBlock* Latch = TheLoop->getLoopLatch();
2512 return !DT->dominates(BB, Latch);
2513}
2514
2515OptimizationRemarkAnalysis &LoopAccessInfo::recordAnalysis(StringRef RemarkName,
2516 Instruction *I) {
2517 assert(!Report && "Multiple reports generated");
2518
2519 Value *CodeRegion = TheLoop->getHeader();
2520 DebugLoc DL = TheLoop->getStartLoc();
2521
2522 if (I) {
2523 CodeRegion = I->getParent();
2524 // If there is no debug location attached to the instruction, revert back to
2525 // using the loop's.
2526 if (I->getDebugLoc())
2527 DL = I->getDebugLoc();
2528 }
2529
2530 Report = std::make_unique<OptimizationRemarkAnalysis>(DEBUG_TYPE, RemarkName, DL,
2531 CodeRegion);
2532 return *Report;
2533}
2534
2536 auto *SE = PSE->getSE();
2537 // TODO: Is this really what we want? Even without FP SCEV, we may want some
2538 // trivially loop-invariant FP values to be considered invariant.
2539 if (!SE->isSCEVable(V->getType()))
2540 return false;
2541 const SCEV *S = SE->getSCEV(V);
2542 return SE->isLoopInvariant(S, TheLoop);
2543}
2544
2545/// Find the operand of the GEP that should be checked for consecutive
2546/// stores. This ignores trailing indices that have no effect on the final
2547/// pointer.
2548static unsigned getGEPInductionOperand(const GetElementPtrInst *Gep) {
2549 const DataLayout &DL = Gep->getModule()->getDataLayout();
2550 unsigned LastOperand = Gep->getNumOperands() - 1;
2551 TypeSize GEPAllocSize = DL.getTypeAllocSize(Gep->getResultElementType());
2552
2553 // Walk backwards and try to peel off zeros.
2554 while (LastOperand > 1 && match(Gep->getOperand(LastOperand), m_Zero())) {
2555 // Find the type we're currently indexing into.
2556 gep_type_iterator GEPTI = gep_type_begin(Gep);
2557 std::advance(GEPTI, LastOperand - 2);
2558
2559 // If it's a type with the same allocation size as the result of the GEP we
2560 // can peel off the zero index.
2561 if (DL.getTypeAllocSize(GEPTI.getIndexedType()) != GEPAllocSize)
2562 break;
2563 --LastOperand;
2564 }
2565
2566 return LastOperand;
2567}
2568
2569/// If the argument is a GEP, then returns the operand identified by
2570/// getGEPInductionOperand. However, if there is some other non-loop-invariant
2571/// operand, it returns that instead.
2573 GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr);
2574 if (!GEP)
2575 return Ptr;
2576
2577 unsigned InductionOperand = getGEPInductionOperand(GEP);
2578
2579 // Check that all of the gep indices are uniform except for our induction
2580 // operand.
2581 for (unsigned i = 0, e = GEP->getNumOperands(); i != e; ++i)
2582 if (i != InductionOperand &&
2583 !SE->isLoopInvariant(SE->getSCEV(GEP->getOperand(i)), Lp))
2584 return Ptr;
2585 return GEP->getOperand(InductionOperand);
2586}
2587
2588/// If a value has only one user that is a CastInst, return it.
2590 Value *UniqueCast = nullptr;
2591 for (User *U : Ptr->users()) {
2592 CastInst *CI = dyn_cast<CastInst>(U);
2593 if (CI && CI->getType() == Ty) {
2594 if (!UniqueCast)
2595 UniqueCast = CI;
2596 else
2597 return nullptr;
2598 }
2599 }
2600 return UniqueCast;
2601}
2602
2603/// Get the stride of a pointer access in a loop. Looks for symbolic
2604/// strides "a[i*stride]". Returns the symbolic stride, or null otherwise.
2606 auto *PtrTy = dyn_cast<PointerType>(Ptr->getType());
2607 if (!PtrTy || PtrTy->isAggregateType())
2608 return nullptr;
2609
2610 // Try to remove a gep instruction to make the pointer (actually index at this
2611 // point) easier analyzable. If OrigPtr is equal to Ptr we are analyzing the
2612 // pointer, otherwise, we are analyzing the index.
2613 Value *OrigPtr = Ptr;
2614
2615 // The size of the pointer access.
2616 int64_t PtrAccessSize = 1;
2617
2618 Ptr = stripGetElementPtr(Ptr, SE, Lp);
2619 const SCEV *V = SE->getSCEV(Ptr);
2620
2621 if (Ptr != OrigPtr)
2622 // Strip off casts.
2623 while (const SCEVIntegralCastExpr *C = dyn_cast<SCEVIntegralCastExpr>(V))
2624 V = C->getOperand();
2625
2626 const SCEVAddRecExpr *S = dyn_cast<SCEVAddRecExpr>(V);
2627 if (!S)
2628 return nullptr;
2629
2630 // If the pointer is invariant then there is no stride and it makes no
2631 // sense to add it here.
2632 if (Lp != S->getLoop())
2633 return nullptr;
2634
2635 V = S->getStepRecurrence(*SE);
2636 if (!V)
2637 return nullptr;
2638
2639 // Strip off the size of access multiplication if we are still analyzing the
2640 // pointer.
2641 if (OrigPtr == Ptr) {
2642 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(V)) {
2643 if (M->getOperand(0)->getSCEVType() != scConstant)
2644 return nullptr;
2645
2646 const APInt &APStepVal = cast<SCEVConstant>(M->getOperand(0))->getAPInt();
2647
2648 // Huge step value - give up.
2649 if (APStepVal.getBitWidth() > 64)
2650 return nullptr;
2651
2652 int64_t StepVal = APStepVal.getSExtValue();
2653 if (PtrAccessSize != StepVal)
2654 return nullptr;
2655 V = M->getOperand(1);
2656 }
2657 }
2658
2659 // Note that the restriction after this loop invariant check are only
2660 // profitability restrictions.
2661 if (!SE->isLoopInvariant(V, Lp))
2662 return nullptr;
2663
2664 // Look for the loop invariant symbolic value.
2665 const SCEVUnknown *U = dyn_cast<SCEVUnknown>(V);
2666 if (!U) {
2667 const auto *C = dyn_cast<SCEVIntegralCastExpr>(V);
2668 if (!C)
2669 return nullptr;
2670 U = dyn_cast<SCEVUnknown>(C->getOperand());
2671 if (!U)
2672 return nullptr;
2673
2674 // Match legacy behavior - this is not needed for correctness
2675 if (!getUniqueCastUse(U->getValue(), Lp, V->getType()))
2676 return nullptr;
2677 }
2678
2679 return V;
2680}
2681
2682void LoopAccessInfo::collectStridedAccess(Value *MemAccess) {
2683 Value *Ptr = getLoadStorePointerOperand(MemAccess);
2684 if (!Ptr)
2685 return;
2686
2687 // Note: getStrideFromPointer is a *profitability* heuristic. We
2688 // could broaden the scope of values returned here - to anything
2689 // which happens to be loop invariant and contributes to the
2690 // computation of an interesting IV - but we chose not to as we
2691 // don't have a cost model here, and broadening the scope exposes
2692 // far too many unprofitable cases.
2693 const SCEV *StrideExpr = getStrideFromPointer(Ptr, PSE->getSE(), TheLoop);
2694 if (!StrideExpr)
2695 return;
2696
2697 LLVM_DEBUG(dbgs() << "LAA: Found a strided access that is a candidate for "
2698 "versioning:");
2699 LLVM_DEBUG(dbgs() << " Ptr: " << *Ptr << " Stride: " << *StrideExpr << "\n");
2700
2701 if (!SpeculateUnitStride) {
2702 LLVM_DEBUG(dbgs() << " Chose not to due to -laa-speculate-unit-stride\n");
2703 return;
2704 }
2705
2706 // Avoid adding the "Stride == 1" predicate when we know that
2707 // Stride >= Trip-Count. Such a predicate will effectively optimize a single
2708 // or zero iteration loop, as Trip-Count <= Stride == 1.
2709 //
2710 // TODO: We are currently not making a very informed decision on when it is
2711 // beneficial to apply stride versioning. It might make more sense that the
2712 // users of this analysis (such as the vectorizer) will trigger it, based on
2713 // their specific cost considerations; For example, in cases where stride
2714 // versioning does not help resolving memory accesses/dependences, the
2715 // vectorizer should evaluate the cost of the runtime test, and the benefit
2716 // of various possible stride specializations, considering the alternatives
2717 // of using gather/scatters (if available).
2718
2719 const SCEV *BETakenCount = PSE->getBackedgeTakenCount();
2720
2721 // Match the types so we can compare the stride and the BETakenCount.
2722 // The Stride can be positive/negative, so we sign extend Stride;
2723 // The backedgeTakenCount is non-negative, so we zero extend BETakenCount.
2724 const DataLayout &DL = TheLoop->getHeader()->getModule()->getDataLayout();
2725 uint64_t StrideTypeSizeBits = DL.getTypeSizeInBits(StrideExpr->getType());
2726 uint64_t BETypeSizeBits = DL.getTypeSizeInBits(BETakenCount->getType());
2727 const SCEV *CastedStride = StrideExpr;
2728 const SCEV *CastedBECount = BETakenCount;
2729 ScalarEvolution *SE = PSE->getSE();
2730 if (BETypeSizeBits >= StrideTypeSizeBits)
2731 CastedStride = SE->getNoopOrSignExtend(StrideExpr, BETakenCount->getType());
2732 else
2733 CastedBECount = SE->getZeroExtendExpr(BETakenCount, StrideExpr->getType());
2734 const SCEV *StrideMinusBETaken = SE->getMinusSCEV(CastedStride, CastedBECount);
2735 // Since TripCount == BackEdgeTakenCount + 1, checking:
2736 // "Stride >= TripCount" is equivalent to checking:
2737 // Stride - BETakenCount > 0
2738 if (SE->isKnownPositive(StrideMinusBETaken)) {
2739 LLVM_DEBUG(
2740 dbgs() << "LAA: Stride>=TripCount; No point in versioning as the "
2741 "Stride==1 predicate will imply that the loop executes "
2742 "at most once.\n");
2743 return;
2744 }
2745 LLVM_DEBUG(dbgs() << "LAA: Found a strided access that we can version.\n");
2746
2747 // Strip back off the integer cast, and check that our result is a
2748 // SCEVUnknown as we expect.
2749 const SCEV *StrideBase = StrideExpr;
2750 if (const auto *C = dyn_cast<SCEVIntegralCastExpr>(StrideBase))
2751 StrideBase = C->getOperand();
2752 SymbolicStrides[Ptr] = cast<SCEVUnknown>(StrideBase);
2753}
2754
2756 const TargetLibraryInfo *TLI, AAResults *AA,
2757 DominatorTree *DT, LoopInfo *LI)
2758 : PSE(std::make_unique<PredicatedScalarEvolution>(*SE, *L)),
2759 PtrRtChecking(nullptr),
2760 DepChecker(std::make_unique<MemoryDepChecker>(*PSE, L)), TheLoop(L) {
2761 PtrRtChecking = std::make_unique<RuntimePointerChecking>(*DepChecker, SE);
2762 if (canAnalyzeLoop()) {
2763 analyzeLoop(AA, LI, TLI, DT);
2764 }
2765}
2766
2768 if (CanVecMem) {
2769 OS.indent(Depth) << "Memory dependences are safe";
2770 if (MaxSafeDepDistBytes != -1ULL)
2771 OS << " with a maximum dependence distance of " << MaxSafeDepDistBytes
2772 << " bytes";
2773 if (PtrRtChecking->Need)
2774 OS << " with run-time checks";
2775 OS << "\n";
2776 }
2777
2778 if (HasConvergentOp)
2779 OS.indent(Depth) << "Has convergent operation in loop\n";
2780
2781 if (Report)
2782 OS.indent(Depth) << "Report: " << Report->getMsg() << "\n";
2783
2784 if (auto *Dependences = DepChecker->getDependences()) {
2785 OS.indent(Depth) << "Dependences:\n";
2786 for (const auto &Dep : *Dependences) {
2787 Dep.print(OS, Depth + 2, DepChecker->getMemoryInstructions());
2788 OS << "\n";
2789 }
2790 } else
2791 OS.indent(Depth) << "Too many dependences, not recorded\n";
2792
2793 // List the pair of accesses need run-time checks to prove independence.
2794 PtrRtChecking->print(OS, Depth);
2795 OS << "\n";
2796
2797 OS.indent(Depth) << "Non vectorizable stores to invariant address were "
2798 << (HasDependenceInvolvingLoopInvariantAddress ? "" : "not ")
2799 << "found in loop.\n";
2800
2801 OS.indent(Depth) << "SCEV assumptions:\n";
2802 PSE->getPredicate().print(OS, Depth);
2803
2804 OS << "\n";
2805
2806 OS.indent(Depth) << "Expressions re-written:\n";
2807 PSE->print(OS, Depth);
2808}
2809
2811 auto I = LoopAccessInfoMap.insert({&L, nullptr});
2812
2813 if (I.second)
2814 I.first->second =
2815 std::make_unique<LoopAccessInfo>(&L, &SE, TLI, &AA, &DT, &LI);
2816
2817 return *I.first->second;
2818}
2819
2822}
2823
2825 auto &SE = getAnalysis<ScalarEvolutionWrapperPass>().getSE();
2826 auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>();
2827 auto *TLI = TLIP ? &TLIP->getTLI(F) : nullptr;
2828 auto &AA = getAnalysis<AAResultsWrapperPass>().getAAResults();
2829 auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
2830 auto &LI = getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
2831 LAIs = std::make_unique<LoopAccessInfoManager>(SE, AA, DT, LI, TLI);
2832 return false;
2833}
2834
2840
2841 AU.setPreservesAll();
2842}
2843
2845 Function &F, const PreservedAnalyses &PA,
2847 // Check whether our analysis is preserved.
2848 auto PAC = PA.getChecker<LoopAccessAnalysis>();
2849 if (!PAC.preserved() && !PAC.preservedSet<AllAnalysesOn<Function>>())
2850 // If not, give up now.
2851 return true;
2852
2853 // Check whether the analyses we depend on became invalid for any reason.
2854 // Skip checking TargetLibraryAnalysis as it is immutable and can't become
2855 // invalid.
2856 return Inv.invalidate<AAManager>(F, PA) ||
2858 Inv.invalidate<LoopAnalysis>(F, PA) ||
2860}
2861
2865 auto &AA = FAM.getResult<AAManager>(F);
2866 auto &DT = FAM.getResult<DominatorTreeAnalysis>(F);
2867 auto &LI = FAM.getResult<LoopAnalysis>(F);
2868 auto &TLI = FAM.getResult<TargetLibraryAnalysis>(F);
2869 return LoopAccessInfoManager(SE, AA, DT, LI, &TLI);
2870}
2871
2873static const char laa_name[] = "Loop Access Analysis";
2874#define LAA_NAME "loop-accesses"
2875
2882
2884
2885namespace llvm {
2886
2888 return new LoopAccessLegacyAnalysis();
2889 }
2890
2891} // end namespace llvm
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
This file implements a class to represent arbitrary precision integral constant values and operations...
basic Basic Alias true
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
This file contains the declarations for the subclasses of Constant, which represent the different fla...
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
#define LLVM_DEBUG(X)
Definition: Debug.h:101
This file defines the DenseMap class.
This file builds on the ADT/GraphTraits.h file to build generic depth first graph iterator.
uint64_t Size
bool End
Definition: ELF_riscv.cpp:464
Generic implementation of equivalence classes through the use Tarjan's efficient union-find algorithm...
#define DEBUG_TYPE
Hexagon Common GEP
IRTranslator LLVM IR MI
#define Check(C,...)
Definition: Lint.cpp:168
static cl::opt< unsigned > MaxDependences("max-dependences", cl::Hidden, cl::desc("Maximum number of dependences collected by " "loop-access analysis (default = 100)"), cl::init(100))
We collect dependences up to this threshold.
static cl::opt< bool > EnableForwardingConflictDetection("store-to-load-forwarding-conflict-detection", cl::Hidden, cl::desc("Enable conflict detection in loop-access analysis"), cl::init(true))
Enable store-to-load forwarding conflict detection.
static void findForkedSCEVs(ScalarEvolution *SE, const Loop *L, Value *Ptr, SmallVectorImpl< PointerIntPair< const SCEV *, 1, bool > > &ScevList, unsigned Depth)
static bool hasComputableBounds(PredicatedScalarEvolution &PSE, Value *Ptr, const SCEV *PtrScev, Loop *L, bool Assume)
Check whether a pointer can participate in a runtime bounds check.
static cl::opt< unsigned > MemoryCheckMergeThreshold("memory-check-merge-threshold", cl::Hidden, cl::desc("Maximum number of comparisons done when trying to merge " "runtime memory checks. (default = 100)"), cl::init(100))
The maximum iterations used to merge memory checks.
static bool isNoWrap(PredicatedScalarEvolution &PSE, const DenseMap< Value *, const SCEV * > &Strides, Value *Ptr, Type *AccessTy, Loop *L)
Check whether a pointer address cannot wrap.
static const SCEV * getStrideFromPointer(Value *Ptr, ScalarEvolution *SE, Loop *Lp)
Get the stride of a pointer access in a loop.
static unsigned getGEPInductionOperand(const GetElementPtrInst *Gep)
Find the operand of the GEP that should be checked for consecutive stores.
static cl::opt< unsigned, true > VectorizationInterleave("force-vector-interleave", cl::Hidden, cl::desc("Sets the vectorization interleave count. " "Zero is autoselect."), cl::location(VectorizerParams::VectorizationInterleave))
static Value * getUniqueCastUse(Value *Ptr, Loop *Lp, Type *Ty)
If a value has only one user that is a CastInst, return it.
#define LAA_NAME
static cl::opt< unsigned, true > VectorizationFactor("force-vector-width", cl::Hidden, cl::desc("Sets the SIMD width. Zero is autoselect."), cl::location(VectorizerParams::VectorizationFactor))
static const char laa_name[]
static cl::opt< unsigned, true > RuntimeMemoryCheckThreshold("runtime-memory-check-threshold", cl::Hidden, cl::desc("When performing memory disambiguation checks at runtime do not " "generate more than this number of comparisons (default = 8)."), cl::location(VectorizerParams::RuntimeMemoryCheckThreshold), cl::init(8))
static void visitPointers(Value *StartPtr, const Loop &InnermostLoop, function_ref< void(Value *)> AddPointer)
static bool isNoWrapAddRec(Value *Ptr, const SCEVAddRecExpr *AR, PredicatedScalarEvolution &PSE, const Loop *L)
Return true if an AddRec pointer Ptr is unsigned non-wrapping, i.e.
static Value * stripGetElementPtr(Value *Ptr, ScalarEvolution *SE, Loop *Lp)
If the argument is a GEP, then returns the operand identified by getGEPInductionOperand.
static bool isSafeDependenceDistance(const DataLayout &DL, ScalarEvolution &SE, const SCEV &BackedgeTakenCount, const SCEV &Dist, uint64_t Stride, uint64_t TypeByteSize)
Given a dependence-distance Dist between two memory accesses, that have the same stride whose absolut...
static bool areStridedAccessesIndependent(uint64_t Distance, uint64_t Stride, uint64_t TypeByteSize)
Check the dependence for two accesses with the same stride Stride.
static const SCEV * getMinFromExprs(const SCEV *I, const SCEV *J, ScalarEvolution *SE)
Compare I and J and return the minimum.
static cl::opt< unsigned > MaxForkedSCEVDepth("max-forked-scev-depth", cl::Hidden, cl::desc("Maximum recursion depth when finding forked SCEVs (default = 5)"), cl::init(5))
static cl::opt< bool > SpeculateUnitStride("laa-speculate-unit-stride", cl::Hidden, cl::desc("Speculate that non-constant strides are unit in LAA"), cl::init(true))
static SmallVector< PointerIntPair< const SCEV *, 1, bool > > findForkedPointer(PredicatedScalarEvolution &PSE, const DenseMap< Value *, const SCEV * > &StridesMap, Value *Ptr, const Loop *L)
static cl::opt< bool > EnableMemAccessVersioning("enable-mem-access-versioning", cl::init(true), cl::Hidden, cl::desc("Enable symbolic stride memory access versioning"))
This enables versioning on the strides of symbolically striding memory accesses in code like the foll...
This header provides classes for managing per-loop analyses.
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
This file provides utility analysis objects describing memory locations.
uint64_t High
#define P(N)
FunctionAnalysisManager FAM
This header defines various interfaces for pass management in LLVM.
#define INITIALIZE_PASS_DEPENDENCY(depName)
Definition: PassSupport.h:55
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
Definition: PassSupport.h:59
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
Definition: PassSupport.h:52
This file defines the PointerIntPair class.
@ SI
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file contains some templates that are useful if you are working with the STL at all.
raw_pwrite_stream & OS
static LLVM_ATTRIBUTE_ALWAYS_INLINE bool CheckType(const unsigned char *MatcherTable, unsigned &MatcherIndex, SDValue N, const TargetLowering *TLI, const DataLayout &DL)
This file implements a set that has insertion order iteration characteristics.
This file defines the SmallPtrSet class.
This file defines the SmallSet class.
This file defines the SmallVector class.
static SymbolRef::Type getType(const Symbol *Sym)
Definition: TapiFile.cpp:40
static const X86InstrFMA3Group Groups[]
A manager for alias analyses.
A wrapper pass to provide the legacy pass manager access to a suitably prepared AAResults object.
Class for arbitrary precision integers.
Definition: APInt.h:75
uint64_t getZExtValue() const
Get zero extended value.
Definition: APInt.h:1498
APInt abs() const
Get the absolute value.
Definition: APInt.h:1746
unsigned getBitWidth() const
Return the number of bits in the APInt.
Definition: APInt.h:1443
bool isNegative() const
Determine sign of this APInt.
Definition: APInt.h:312
APInt sextOrTrunc(unsigned width) const
Sign extend or truncate to width.
Definition: APInt.cpp:1001
bool isStrictlyPositive() const
Determine if this APInt Value is positive.
Definition: APInt.h:339
int64_t getSExtValue() const
Get sign extended value.
Definition: APInt.h:1520
This templated class represents "all analyses that operate over <a particular IR unit>" (e....
Definition: PassManager.h:90
API to communicate dependencies between analyses during invalidation.
Definition: PassManager.h:661
bool invalidate(IRUnitT &IR, const PreservedAnalyses &PA)
Trigger the invalidation of some other analysis pass if not already handled and return whether it was...
Definition: PassManager.h:679
A container for analyses that lazily runs them and caches their results.
Definition: PassManager.h:620
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Definition: PassManager.h:774
Represent the analysis usage information of a pass.
void setPreservesAll()
Set by analyses that do not transform their input at all.
AnalysisUsage & addRequiredTransitive()
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
ArrayRef< T > drop_front(size_t N=1) const
Drop the first N elements of the array.
Definition: ArrayRef.h:202
size_t size() const
size - Get the array size.
Definition: ArrayRef.h:163
bool empty() const
empty - Check if the array is empty.
Definition: ArrayRef.h:158
LLVM Basic Block Representation.
Definition: BasicBlock.h:56
const Function * getParent() const
Return the enclosing method, or null if none.
Definition: BasicBlock.h:112
const Module * getModule() const
Return the module owning the function this basic block belongs to, or nullptr if the function does no...
Definition: BasicBlock.cpp:145
This class is a wrapper over an AAResults, and it is intended to be used only when there are no IR ch...
This is the base class for all instructions that perform data casts.
Definition: InstrTypes.h:428
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:110
A debug info location.
Definition: DebugLoc.h:33
iterator find(const_arg_type_t< KeyT > Val)
Definition: DenseMap.h:155
iterator end()
Definition: DenseMap.h:84
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition: DenseMap.h:220
Analysis pass which computes a DominatorTree.
Definition: Dominators.h:279
Legacy analysis pass which computes a DominatorTree.
Definition: Dominators.h:314
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition: Dominators.h:166
bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
Definition: Dominators.cpp:122
EquivalenceClasses - This represents a collection of equivalence classes and supports three efficient...
iterator findValue(const ElemTy &V) const
findValue - Return an iterator to the specified value.
iterator insert(const ElemTy &Data)
insert - Insert a new value into the union/find set, ignoring the request if the value already exists...
member_iterator member_end() const
typename std::set< ECValue, ECValueComparator >::const_iterator iterator
iterator* - Provides a way to iterate over all values in the set.
member_iterator member_begin(iterator I) const
member_iterator unionSets(const ElemTy &V1, const ElemTy &V2)
union - Merge the two equivalence sets for the specified values, inserting them if they do not alread...
const ElemTy & getLeaderValue(const ElemTy &V) const
getLeaderValue - Return the leader for the specified value that is in the set.
FunctionPass class - This class is used to implement most global optimizations.
Definition: Pass.h:311
bool hasOptSize() const
Optimize this function for size (-Os) or minimum size (-Oz).
Definition: Function.h:649
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
Definition: Instructions.h:940
Type * getResultElementType() const
PointerType * getType() const
Global values are always pointers.
Definition: GlobalValue.h:290
const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
Definition: Instruction.cpp:70
Class to represent integer types.
Definition: DerivedTypes.h:40
static IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
Definition: Type.cpp:339
An instruction for reading from memory.
Definition: Instructions.h:177
Value * getPointerOperand()
Definition: Instructions.h:264
static constexpr LocationSize beforeOrAfterPointer()
Any location before or after the base pointer (but still within the underlying object).
This analysis provides dependence information for the memory accesses of a loop.
Result run(Function &F, FunctionAnalysisManager &AM)
bool invalidate(Function &F, const PreservedAnalyses &PA, FunctionAnalysisManager::Invalidator &Inv)
const LoopAccessInfo & getInfo(Loop &L)
Drive the analysis of memory accesses in the loop.
bool isInvariant(Value *V) const
Returns true if value V is loop invariant.
void print(raw_ostream &OS, unsigned Depth=0) const
Print the information about the memory accesses in the loop.
LoopAccessInfo(Loop *L, ScalarEvolution *SE, const TargetLibraryInfo *TLI, AAResults *AA, DominatorTree *DT, LoopInfo *LI)
static bool blockNeedsPredication(BasicBlock *BB, Loop *TheLoop, DominatorTree *DT)
Return true if the block BB needs to be predicated in order for the loop to be vectorized.
This analysis provides dependence information for the memory accesses of a loop.
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - This function should be overriden by passes that need analysis information to do t...
bool runOnFunction(Function &F) override
runOnFunction - Virtual method overriden by subclasses to do the per-function processing of the pass.
Analysis pass that exposes the LoopInfo for a function.
Definition: LoopInfo.h:569
bool contains(const LoopT *L) const
Return true if the specified loop is contained within in this loop.
BlockT * getLoopLatch() const
If there is a single latch block for this loop, return it.
bool isInnermost() const
Return true if the loop does not contain any (natural) loops.
unsigned getNumBackEdges() const
Calculate the number of back edges to the loop header.
BlockT * getHeader() const
Wrapper class to LoopBlocksDFS that provides a standard begin()/end() interface for the DFS reverse p...
Definition: LoopIterator.h:172
The legacy pass manager's analysis pass to compute loop information.
Definition: LoopInfo.h:594
Represents a single loop in the control flow graph.
Definition: LoopInfo.h:47
bool isAnnotatedParallel() const
Returns true if the loop is annotated parallel.
Definition: LoopInfo.cpp:564
DebugLoc getStartLoc() const
Return the debug location of the start of this loop.
Definition: LoopInfo.cpp:631
This class implements a map that also provides access to all stored values in a deterministic order.
Definition: MapVector.h:37
Checks memory dependences among accesses to the same underlying object to determine whether there vec...
ArrayRef< unsigned > getOrderForAccess(Value *Ptr, bool IsWrite) const
Return the program order indices for the access location (Ptr, IsWrite).
const Loop * getInnermostLoop() const
bool isSafeForVectorization() const
No memory dependence was encountered that would inhibit vectorization.
bool areDepsSafe(DepCandidates &AccessSets, MemAccessInfoList &CheckDeps, const DenseMap< Value *, const SCEV * > &Strides)
Check whether the dependencies between the accesses are safe.
SmallVector< Instruction *, 4 > getInstructionsForAccess(Value *Ptr, bool isWrite) const
Find the set of instructions that read or write via Ptr.
VectorizationSafetyStatus
Type to keep track of the status of the dependence check.
void addAccess(StoreInst *SI)
Register the location (instructions are given increasing numbers) of a write access.
PointerIntPair< Value *, 1, bool > MemAccessInfo
Representation for a specific memory location.
static MemoryLocation get(const LoadInst *LI)
Return a location with information about the memory reference by the given instruction.
AAMDNodes AATags
The metadata nodes which describes the aliasing of the location (each member is null if that kind of ...
const Value * Ptr
The address of the start of the location.
const DataLayout & getDataLayout() const
Get the data layout for the module's target platform.
Definition: Module.cpp:398
Diagnostic information for optimization analysis remarks.
static PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
Pass interface - Implemented by all 'passes'.
Definition: Pass.h:94
An interface layer with SCEV used to manage how we see SCEV expressions for values in the context of ...
void addPredicate(const SCEVPredicate &Pred)
Adds a new predicate.
ScalarEvolution * getSE() const
Returns the ScalarEvolution analysis used.
const SCEVPredicate & getPredicate() const
bool hasNoOverflow(Value *V, SCEVWrapPredicate::IncrementWrapFlags Flags)
Returns true if we've proved that V doesn't wrap by means of a SCEV predicate.
void setNoOverflow(Value *V, SCEVWrapPredicate::IncrementWrapFlags Flags)
Proves that V doesn't overflow by adding SCEV predicate.
void print(raw_ostream &OS, unsigned Depth) const
Print the SCEV mappings done by the Predicated Scalar Evolution.
const SCEVAddRecExpr * getAsAddRec(Value *V)
Attempts to produce an AddRecExpr for V by adding additional SCEV predicates.
const SCEV * getBackedgeTakenCount()
Get the (predicated) backedge count for the analyzed loop.
const SCEV * getSCEV(Value *V)
Returns the SCEV expression of V, in the context of the current SCEV predicate.
A set of analyses that are preserved following a run of a transformation pass.
Definition: PassManager.h:152
PreservedAnalysisChecker getChecker() const
Build a checker for this PreservedAnalyses and the specified analysis type.
Definition: PassManager.h:310
Holds information about the memory runtime legality checks to verify that a group of pointers do not ...
bool Need
This flag indicates if we need to add the runtime check.
void reset()
Reset the state of the pointer runtime information.
unsigned getNumberOfChecks() const
Returns the number of run-time checks required according to needsChecking.
void printChecks(raw_ostream &OS, const SmallVectorImpl< RuntimePointerCheck > &Checks, unsigned Depth=0) const
Print Checks.
bool needsChecking(const RuntimeCheckingPtrGroup &M, const RuntimeCheckingPtrGroup &N) const
Decide if we need to add a check between two groups of pointers, according to needsChecking.
void print(raw_ostream &OS, unsigned Depth=0) const
Print the list run-time memory checks necessary.
SmallVector< RuntimeCheckingPtrGroup, 2 > CheckingGroups
Holds a partitioning of pointers into "check groups".
void generateChecks(MemoryDepChecker::DepCandidates &DepCands, bool UseDependencies)
Generate the checks and store it.
static bool arePointersInSamePartition(const SmallVectorImpl< int > &PtrToPartition, unsigned PtrIdx1, unsigned PtrIdx2)
Check if pointers are in the same partition.
SmallVector< PointerInfo, 2 > Pointers
Information about the pointers that may require checking.
void insert(Loop *Lp, Value *Ptr, const SCEV *PtrExpr, Type *AccessTy, bool WritePtr, unsigned DepSetId, unsigned ASId, PredicatedScalarEvolution &PSE, bool NeedsFreeze)
Insert a pointer and calculate the start and end SCEVs.
This node represents a polynomial recurrence on the trip count of the specified loop.
const SCEV * evaluateAtIteration(const SCEV *It, ScalarEvolution &SE) const
Return the value of this chain of recurrences at the specified iteration number.
const SCEV * getStepRecurrence(ScalarEvolution &SE) const
Constructs and returns the recurrence indicating how much this expression steps by.
bool isAffine() const
Return true if this represents an expression A + B*x where A and B are loop invariant values.
This class represents a constant integer value.
This is the base class for unary integral cast operator classes.
This node represents multiplication of some number of SCEVs.
NoWrapFlags getNoWrapFlags(NoWrapFlags Mask=NoWrapMask) const
virtual void print(raw_ostream &OS, unsigned Depth=0) const =0
Prints a textual representation of this predicate with an indentation of Depth.
This means that we are dealing with an entirely unknown SCEV value, and only represent it as its LLVM...
This class represents an analyzed expression in the program.
Type * getType() const
Return the LLVM type of this SCEV expression.
Analysis pass that exposes the ScalarEvolution for a function.
The main scalar evolution driver.
const SCEV * getNegativeSCEV(const SCEV *V, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap)
Return the SCEV object corresponding to -V.
const SCEV * getUMaxExpr(const SCEV *LHS, const SCEV *RHS)
const SCEVPredicate * getEqualPredicate(const SCEV *LHS, const SCEV *RHS)
const SCEV * getConstant(ConstantInt *V)
const SCEV * getSCEV(Value *V)
Return a SCEV expression for the full generality of the specified expression.
const SCEV * getNoopOrSignExtend(const SCEV *V, Type *Ty)
Return a SCEV corresponding to a conversion of the input value to the specified type.
const SCEV * getOne(Type *Ty)
Return a SCEV for the constant 1 of a specific type.
const SCEV * getPtrToIntExpr(const SCEV *Op, Type *Ty)
bool isLoopInvariant(const SCEV *S, const Loop *L)
Return true if the value of the given SCEV is unchanging in the specified loop.
bool isKnownPositive(const SCEV *S)
Test if the given expression is known to be positive.
const SCEV * getZeroExtendExpr(const SCEV *Op, Type *Ty, unsigned Depth=0)
bool isSCEVable(Type *Ty) const
Test if values of the given type are analyzable within the SCEV framework.
Type * getEffectiveSCEVType(Type *Ty) const
Return a type with the same bitwidth as the given type and which represents how SCEV will treat the g...
const SCEV * getUMinExpr(const SCEV *LHS, const SCEV *RHS, bool Sequential=false)
const SCEV * getStoreSizeOfExpr(Type *IntTy, Type *StoreTy)
Return an expression for the store size of StoreTy that is type IntTy.
const SCEV * getMinusSCEV(const SCEV *LHS, const SCEV *RHS, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)
Return LHS-RHS.
const SCEV * getMulExpr(SmallVectorImpl< const SCEV * > &Ops, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)
Get a canonical multiply expression, or something simpler if possible.
const SCEV * getSizeOfExpr(Type *IntTy, TypeSize Size)
Return an expression for a TypeSize.
const SCEV * getAddExpr(SmallVectorImpl< const SCEV * > &Ops, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)
Get a canonical add expression, or something simpler if possible.
const SCEV * getTruncateOrSignExtend(const SCEV *V, Type *Ty, unsigned Depth=0)
Return a SCEV corresponding to a conversion of the input value to the specified type.
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
Definition: SmallPtrSet.h:383
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:365
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
Definition: SmallPtrSet.h:450
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
Definition: SmallSet.h:135
size_type count(const T &V) const
count - Return 1 if the element is in the set, 0 otherwise.
Definition: SmallSet.h:166
std::pair< const_iterator, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
Definition: SmallSet.h:179
bool empty() const
Definition: SmallVector.h:94
size_t size() const
Definition: SmallVector.h:91
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:577
reference emplace_back(ArgTypes &&... Args)
Definition: SmallVector.h:941
void resize(size_type N)
Definition: SmallVector.h:642
void push_back(const T &Elt)
Definition: SmallVector.h:416
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1200
An instruction for storing to memory.
Definition: Instructions.h:301
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:50
Analysis pass providing the TargetLibraryInfo.
Provides information about what library functions are available for the current target.
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
bool isVectorTy() const
True if this is an instance of VectorType.
Definition: Type.h:265
bool isPointerTy() const
True if this is an instance of PointerType.
Definition: Type.h:256
unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
A Use represents the edge between a Value definition and its users.
Definition: Use.h:43
Value * getOperand(unsigned i) const
Definition: User.h:169
unsigned getNumOperands() const
Definition: User.h:191
static SmallVector< VFInfo, 8 > getMappings(const CallInst &CI)
Retrieve all the VFInfo instances associated to the CallInst CI.
Definition: VectorUtils.h:266
LLVM Value Representation.
Definition: Value.h:74
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:255
const Value * stripAndAccumulateInBoundsConstantOffsets(const DataLayout &DL, APInt &Offset) const
This is a wrapper around stripAndAccumulateConstantOffsets with the in-bounds requirement set to fals...
Definition: Value.h:724
StringRef getName() const
Return a constant reference to the value's name.
Definition: Value.cpp:309
constexpr ScalarTy getFixedValue() const
Definition: TypeSize.h:182
An efficient, type-erasing, non-owning reference to a callable.
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition: raw_ostream.h:52
raw_ostream & indent(unsigned NumSpaces)
indent - Insert 'NumSpaces' spaces.
friend const_iterator end(StringRef path)
Get end iterator over path.
Definition: Path.cpp:236
This provides a very simple, boring adaptor for a begin and end iterator into a range type.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
bool match(Val *V, const Pattern &P)
Definition: PatternMatch.h:49
is_zero m_Zero()
Match any null constant or a vector with all elements equal to 0.
Definition: PatternMatch.h:537
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:445
LocationClass< Ty > location(Ty &L)
Definition: CommandLine.h:465
DiagnosticInfoOptimizationBase::Argument NV
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
std::optional< int > getPointersDiff(Type *ElemTyA, Value *PtrA, Type *ElemTyB, Value *PtrB, const DataLayout &DL, ScalarEvolution &SE, bool StrictCheck=false, bool CheckType=true)
Returns the distance between the pointers PtrA and PtrB iff they are compatible and it is possible to...
@ Low
Lower the current thread's priority such that it does not affect foreground tasks significantly.
@ Offset
Definition: DWP.cpp:440
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1819
Intrinsic::ID getVectorIntrinsicIDForCall(const CallInst *CI, const TargetLibraryInfo *TLI)
Returns intrinsic ID for call.
const Value * getLoadStorePointerOperand(const Value *V)
A helper function that returns the pointer operand of a load or store instruction.
AddressSpace
Definition: NVPTXBaseInfo.h:21
void initializeLoopAccessLegacyAnalysisPass(PassRegistry &)
const Value * getPointerOperand(const Value *V)
A helper function that returns the pointer operand of a load, store or GEP instruction.
OutputIt transform(R &&Range, OutputIt d_first, UnaryFunction F)
Wrapper function around std::transform to apply a function to a range and store the result elsewhere.
Definition: STLExtras.h:2025
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1826
Pass * createLAAPass()
bool NullPointerIsDefined(const Function *F, unsigned AS=0)
Check whether null pointer dereferencing is considered undefined behavior for a given function or an ...
Definition: Function.cpp:2102
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
std::optional< int64_t > getPtrStride(PredicatedScalarEvolution &PSE, Type *AccessTy, Value *Ptr, const Loop *Lp, const DenseMap< Value *, const SCEV * > &StridesMap=DenseMap< Value *, const SCEV * >(), bool Assume=false, bool ShouldCheckWrap=true)
If the pointer has a constant stride return it in units of the access type size.
bool sortPtrAccesses(ArrayRef< Value * > VL, Type *ElemTy, const DataLayout &DL, ScalarEvolution &SE, SmallVectorImpl< unsigned > &SortedIndices)
Attempt to sort the pointers in VL and return the sorted indices in SortedIndices,...
void getUnderlyingObjects(const Value *V, SmallVectorImpl< const Value * > &Objects, LoopInfo *LI=nullptr, unsigned MaxLookup=6)
This method is similar to getUnderlyingObject except that it can look through phi and select instruct...
const SCEV * replaceSymbolicStrideSCEV(PredicatedScalarEvolution &PSE, const DenseMap< Value *, const SCEV * > &PtrToStride, Value *Ptr)
Return the SCEV corresponding to a pointer with the symbolic stride replaced with constant one,...
bool isConsecutiveAccess(Value *A, Value *B, const DataLayout &DL, ScalarEvolution &SE, bool CheckType=true)
Returns true if the memory operations A and B are consecutive.
bool isGuaranteedNotToBeUndefOrPoison(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Return true if this function can prove that V does not have undef bits and is never poison.
OutputIt copy(R &&Range, OutputIt Out)
Definition: STLExtras.h:1921
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1846
gep_type_iterator gep_type_begin(const User *GEP)
Type * getLoadStoreType(Value *I)
A helper function that returns the type of a load or store instruction.
Definition: BitVector.h:858
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition: BitVector.h:860
#define N
MDNode * TBAA
The tag for type-based alias analysis.
Definition: Metadata.h:668
A special type used by analysis passes to provide an address that identifies that particular analysis...
Definition: PassManager.h:69
Dependece between memory access instructions.
DepType Type
The type of the dependence.
bool isPossiblyBackward() const
May be a lexically backward dependence type (includes Unknown).
bool isForward() const
Lexically forward dependence.
bool isBackward() const
Lexically backward dependence.
void print(raw_ostream &OS, unsigned Depth, const SmallVectorImpl< Instruction * > &Instrs) const
Print the dependence.
Instruction * getDestination(const LoopAccessInfo &LAI) const
Return the destination instruction of the dependence.
Instruction * getSource(const LoopAccessInfo &LAI) const
Return the source instruction of the dependence.
DepType
The type of the dependence.
static const char * DepName[]
String version of the types.
static VectorizationSafetyStatus isSafeForVectorization(DepType Type)
Dependence types that don't prevent vectorization.
unsigned AddressSpace
Address space of the involved pointers.
bool addPointer(unsigned Index, RuntimePointerChecking &RtCheck)
Tries to add the pointer recorded in RtCheck at index Index to this pointer checking group.
bool NeedsFreeze
Whether the pointer needs to be frozen after expansion, e.g.
const SCEV * High
The SCEV expression which represents the upper bound of all the pointers in this group.
SmallVector< unsigned, 2 > Members
Indices of all the pointers that constitute this grouping.
RuntimeCheckingPtrGroup(unsigned Index, RuntimePointerChecking &RtCheck)
Create a new pointer checking group containing a single pointer, with index Index in RtCheck.
const SCEV * Low
The SCEV expression which represents the lower bound of all the pointers in this group.
bool IsWritePtr
Holds the information if this pointer is used for writing to memory.
unsigned DependencySetId
Holds the id of the set of pointers that could be dependent because of a shared underlying object.
unsigned AliasSetId
Holds the id of the disjoint alias set to which this pointer belongs.
static const unsigned MaxVectorWidth
Maximum SIMD width.
static unsigned VectorizationFactor
VF as overridden by the user.
static unsigned RuntimeMemoryCheckThreshold
\When performing memory disambiguation checks at runtime do not make more than this number of compari...
static bool isInterleaveForced()
True if force-vector-interleave was specified by the user.
static unsigned VectorizationInterleave
Interleave factor as overridden by the user.
Function object to check whether the first component of a container supported by std::get (like std::...
Definition: STLExtras.h:1537