LLVM 19.0.0git
LoopAccessAnalysis.cpp
Go to the documentation of this file.
1//===- LoopAccessAnalysis.cpp - Loop Access Analysis Implementation --------==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// The implementation for the loop memory dependence that was originally
10// developed for the loop vectorizer.
11//
12//===----------------------------------------------------------------------===//
13
15#include "llvm/ADT/APInt.h"
16#include "llvm/ADT/DenseMap.h"
19#include "llvm/ADT/STLExtras.h"
20#include "llvm/ADT/SetVector.h"
22#include "llvm/ADT/SmallSet.h"
37#include "llvm/IR/BasicBlock.h"
38#include "llvm/IR/Constants.h"
39#include "llvm/IR/DataLayout.h"
40#include "llvm/IR/DebugLoc.h"
43#include "llvm/IR/Dominators.h"
44#include "llvm/IR/Function.h"
46#include "llvm/IR/InstrTypes.h"
47#include "llvm/IR/Instruction.h"
49#include "llvm/IR/Operator.h"
50#include "llvm/IR/PassManager.h"
52#include "llvm/IR/Type.h"
53#include "llvm/IR/Value.h"
54#include "llvm/IR/ValueHandle.h"
57#include "llvm/Support/Debug.h"
60#include <algorithm>
61#include <cassert>
62#include <cstdint>
63#include <iterator>
64#include <utility>
65#include <variant>
66#include <vector>
67
68using namespace llvm;
69using namespace llvm::PatternMatch;
70
71#define DEBUG_TYPE "loop-accesses"
72
74VectorizationFactor("force-vector-width", cl::Hidden,
75 cl::desc("Sets the SIMD width. Zero is autoselect."),
78
80VectorizationInterleave("force-vector-interleave", cl::Hidden,
81 cl::desc("Sets the vectorization interleave count. "
82 "Zero is autoselect."),
86
88 "runtime-memory-check-threshold", cl::Hidden,
89 cl::desc("When performing memory disambiguation checks at runtime do not "
90 "generate more than this number of comparisons (default = 8)."),
93
94/// The maximum iterations used to merge memory checks
96 "memory-check-merge-threshold", cl::Hidden,
97 cl::desc("Maximum number of comparisons done when trying to merge "
98 "runtime memory checks. (default = 100)"),
99 cl::init(100));
100
101/// Maximum SIMD width.
102const unsigned VectorizerParams::MaxVectorWidth = 64;
103
104/// We collect dependences up to this threshold.
106 MaxDependences("max-dependences", cl::Hidden,
107 cl::desc("Maximum number of dependences collected by "
108 "loop-access analysis (default = 100)"),
109 cl::init(100));
110
111/// This enables versioning on the strides of symbolically striding memory
112/// accesses in code like the following.
113/// for (i = 0; i < N; ++i)
114/// A[i * Stride1] += B[i * Stride2] ...
115///
116/// Will be roughly translated to
117/// if (Stride1 == 1 && Stride2 == 1) {
118/// for (i = 0; i < N; i+=4)
119/// A[i:i+3] += ...
120/// } else
121/// ...
123 "enable-mem-access-versioning", cl::init(true), cl::Hidden,
124 cl::desc("Enable symbolic stride memory access versioning"));
125
126/// Enable store-to-load forwarding conflict detection. This option can
127/// be disabled for correctness testing.
129 "store-to-load-forwarding-conflict-detection", cl::Hidden,
130 cl::desc("Enable conflict detection in loop-access analysis"),
131 cl::init(true));
132
134 "max-forked-scev-depth", cl::Hidden,
135 cl::desc("Maximum recursion depth when finding forked SCEVs (default = 5)"),
136 cl::init(5));
137
139 "laa-speculate-unit-stride", cl::Hidden,
140 cl::desc("Speculate that non-constant strides are unit in LAA"),
141 cl::init(true));
142
144 "hoist-runtime-checks", cl::Hidden,
145 cl::desc(
146 "Hoist inner loop runtime memory checks to outer loop if possible"),
149
151 return ::VectorizationInterleave.getNumOccurrences() > 0;
152}
153
155 const DenseMap<Value *, const SCEV *> &PtrToStride,
156 Value *Ptr) {
157 const SCEV *OrigSCEV = PSE.getSCEV(Ptr);
158
159 // If there is an entry in the map return the SCEV of the pointer with the
160 // symbolic stride replaced by one.
162 if (SI == PtrToStride.end())
163 // For a non-symbolic stride, just return the original expression.
164 return OrigSCEV;
165
166 const SCEV *StrideSCEV = SI->second;
167 // Note: This assert is both overly strong and overly weak. The actual
168 // invariant here is that StrideSCEV should be loop invariant. The only
169 // such invariant strides we happen to speculate right now are unknowns
170 // and thus this is a reasonable proxy of the actual invariant.
171 assert(isa<SCEVUnknown>(StrideSCEV) && "shouldn't be in map");
172
173 ScalarEvolution *SE = PSE.getSE();
174 const auto *CT = SE->getOne(StrideSCEV->getType());
175 PSE.addPredicate(*SE->getEqualPredicate(StrideSCEV, CT));
176 auto *Expr = PSE.getSCEV(Ptr);
177
178 LLVM_DEBUG(dbgs() << "LAA: Replacing SCEV: " << *OrigSCEV
179 << " by: " << *Expr << "\n");
180 return Expr;
181}
182
184 unsigned Index, RuntimePointerChecking &RtCheck)
185 : High(RtCheck.Pointers[Index].End), Low(RtCheck.Pointers[Index].Start),
186 AddressSpace(RtCheck.Pointers[Index]
187 .PointerValue->getType()
189 NeedsFreeze(RtCheck.Pointers[Index].NeedsFreeze) {
191}
192
193/// Calculate Start and End points of memory access.
194/// Let's assume A is the first access and B is a memory access on N-th loop
195/// iteration. Then B is calculated as:
196/// B = A + Step*N .
197/// Step value may be positive or negative.
198/// N is a calculated back-edge taken count:
199/// N = (TripCount > 0) ? RoundDown(TripCount -1 , VF) : 0
200/// Start and End points are calculated in the following way:
201/// Start = UMIN(A, B) ; End = UMAX(A, B) + SizeOfElt,
202/// where SizeOfElt is the size of single memory access in bytes.
203///
204/// There is no conflict when the intervals are disjoint:
205/// NoConflict = (P2.Start >= P1.End) || (P1.Start >= P2.End)
207 Type *AccessTy, bool WritePtr,
208 unsigned DepSetId, unsigned ASId,
210 bool NeedsFreeze) {
211 ScalarEvolution *SE = PSE.getSE();
212
213 const SCEV *ScStart;
214 const SCEV *ScEnd;
215
216 if (SE->isLoopInvariant(PtrExpr, Lp)) {
217 ScStart = ScEnd = PtrExpr;
218 } else {
219 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(PtrExpr);
220 assert(AR && "Invalid addrec expression");
221 const SCEV *Ex = PSE.getBackedgeTakenCount();
222
223 ScStart = AR->getStart();
224 ScEnd = AR->evaluateAtIteration(Ex, *SE);
225 const SCEV *Step = AR->getStepRecurrence(*SE);
226
227 // For expressions with negative step, the upper bound is ScStart and the
228 // lower bound is ScEnd.
229 if (const auto *CStep = dyn_cast<SCEVConstant>(Step)) {
230 if (CStep->getValue()->isNegative())
231 std::swap(ScStart, ScEnd);
232 } else {
233 // Fallback case: the step is not constant, but we can still
234 // get the upper and lower bounds of the interval by using min/max
235 // expressions.
236 ScStart = SE->getUMinExpr(ScStart, ScEnd);
237 ScEnd = SE->getUMaxExpr(AR->getStart(), ScEnd);
238 }
239 }
240 assert(SE->isLoopInvariant(ScStart, Lp) && "ScStart needs to be invariant");
241 assert(SE->isLoopInvariant(ScEnd, Lp)&& "ScEnd needs to be invariant");
242
243 // Add the size of the pointed element to ScEnd.
244 auto &DL = Lp->getHeader()->getModule()->getDataLayout();
245 Type *IdxTy = DL.getIndexType(Ptr->getType());
246 const SCEV *EltSizeSCEV = SE->getStoreSizeOfExpr(IdxTy, AccessTy);
247 ScEnd = SE->getAddExpr(ScEnd, EltSizeSCEV);
248
249 Pointers.emplace_back(Ptr, ScStart, ScEnd, WritePtr, DepSetId, ASId, PtrExpr,
250 NeedsFreeze);
251}
252
253bool RuntimePointerChecking::tryToCreateDiffCheck(
254 const RuntimeCheckingPtrGroup &CGI, const RuntimeCheckingPtrGroup &CGJ) {
255 // If either group contains multiple different pointers, bail out.
256 // TODO: Support multiple pointers by using the minimum or maximum pointer,
257 // depending on src & sink.
258 if (CGI.Members.size() != 1 || CGJ.Members.size() != 1)
259 return false;
260
261 PointerInfo *Src = &Pointers[CGI.Members[0]];
262 PointerInfo *Sink = &Pointers[CGJ.Members[0]];
263
264 // If either pointer is read and written, multiple checks may be needed. Bail
265 // out.
266 if (!DC.getOrderForAccess(Src->PointerValue, !Src->IsWritePtr).empty() ||
267 !DC.getOrderForAccess(Sink->PointerValue, !Sink->IsWritePtr).empty())
268 return false;
269
270 ArrayRef<unsigned> AccSrc =
271 DC.getOrderForAccess(Src->PointerValue, Src->IsWritePtr);
272 ArrayRef<unsigned> AccSink =
273 DC.getOrderForAccess(Sink->PointerValue, Sink->IsWritePtr);
274 // If either pointer is accessed multiple times, there may not be a clear
275 // src/sink relation. Bail out for now.
276 if (AccSrc.size() != 1 || AccSink.size() != 1)
277 return false;
278
279 // If the sink is accessed before src, swap src/sink.
280 if (AccSink[0] < AccSrc[0])
281 std::swap(Src, Sink);
282
283 auto *SrcAR = dyn_cast<SCEVAddRecExpr>(Src->Expr);
284 auto *SinkAR = dyn_cast<SCEVAddRecExpr>(Sink->Expr);
285 if (!SrcAR || !SinkAR || SrcAR->getLoop() != DC.getInnermostLoop() ||
286 SinkAR->getLoop() != DC.getInnermostLoop())
287 return false;
288
290 DC.getInstructionsForAccess(Src->PointerValue, Src->IsWritePtr);
292 DC.getInstructionsForAccess(Sink->PointerValue, Sink->IsWritePtr);
293 Type *SrcTy = getLoadStoreType(SrcInsts[0]);
294 Type *DstTy = getLoadStoreType(SinkInsts[0]);
295 if (isa<ScalableVectorType>(SrcTy) || isa<ScalableVectorType>(DstTy))
296 return false;
297
298 const DataLayout &DL =
299 SinkAR->getLoop()->getHeader()->getModule()->getDataLayout();
300 unsigned AllocSize =
301 std::max(DL.getTypeAllocSize(SrcTy), DL.getTypeAllocSize(DstTy));
302
303 // Only matching constant steps matching the AllocSize are supported at the
304 // moment. This simplifies the difference computation. Can be extended in the
305 // future.
306 auto *Step = dyn_cast<SCEVConstant>(SinkAR->getStepRecurrence(*SE));
307 if (!Step || Step != SrcAR->getStepRecurrence(*SE) ||
308 Step->getAPInt().abs() != AllocSize)
309 return false;
310
311 IntegerType *IntTy =
312 IntegerType::get(Src->PointerValue->getContext(),
313 DL.getPointerSizeInBits(CGI.AddressSpace));
314
315 // When counting down, the dependence distance needs to be swapped.
316 if (Step->getValue()->isNegative())
317 std::swap(SinkAR, SrcAR);
318
319 const SCEV *SinkStartInt = SE->getPtrToIntExpr(SinkAR->getStart(), IntTy);
320 const SCEV *SrcStartInt = SE->getPtrToIntExpr(SrcAR->getStart(), IntTy);
321 if (isa<SCEVCouldNotCompute>(SinkStartInt) ||
322 isa<SCEVCouldNotCompute>(SrcStartInt))
323 return false;
324
325 const Loop *InnerLoop = SrcAR->getLoop();
326 // If the start values for both Src and Sink also vary according to an outer
327 // loop, then it's probably better to avoid creating diff checks because
328 // they may not be hoisted. We should instead let llvm::addRuntimeChecks
329 // do the expanded full range overlap checks, which can be hoisted.
330 if (HoistRuntimeChecks && InnerLoop->getParentLoop() &&
331 isa<SCEVAddRecExpr>(SinkStartInt) && isa<SCEVAddRecExpr>(SrcStartInt)) {
332 auto *SrcStartAR = cast<SCEVAddRecExpr>(SrcStartInt);
333 auto *SinkStartAR = cast<SCEVAddRecExpr>(SinkStartInt);
334 const Loop *StartARLoop = SrcStartAR->getLoop();
335 if (StartARLoop == SinkStartAR->getLoop() &&
336 StartARLoop == InnerLoop->getParentLoop() &&
337 // If the diff check would already be loop invariant (due to the
338 // recurrences being the same), then we prefer to keep the diff checks
339 // because they are cheaper.
340 SrcStartAR->getStepRecurrence(*SE) !=
341 SinkStartAR->getStepRecurrence(*SE)) {
342 LLVM_DEBUG(dbgs() << "LAA: Not creating diff runtime check, since these "
343 "cannot be hoisted out of the outer loop\n");
344 return false;
345 }
346 }
347
348 LLVM_DEBUG(dbgs() << "LAA: Creating diff runtime check for:\n"
349 << "SrcStart: " << *SrcStartInt << '\n'
350 << "SinkStartInt: " << *SinkStartInt << '\n');
351 DiffChecks.emplace_back(SrcStartInt, SinkStartInt, AllocSize,
352 Src->NeedsFreeze || Sink->NeedsFreeze);
353 return true;
354}
355
356SmallVector<RuntimePointerCheck, 4> RuntimePointerChecking::generateChecks() {
358
359 for (unsigned I = 0; I < CheckingGroups.size(); ++I) {
360 for (unsigned J = I + 1; J < CheckingGroups.size(); ++J) {
363
364 if (needsChecking(CGI, CGJ)) {
365 CanUseDiffCheck = CanUseDiffCheck && tryToCreateDiffCheck(CGI, CGJ);
366 Checks.push_back(std::make_pair(&CGI, &CGJ));
367 }
368 }
369 }
370 return Checks;
371}
372
373void RuntimePointerChecking::generateChecks(
374 MemoryDepChecker::DepCandidates &DepCands, bool UseDependencies) {
375 assert(Checks.empty() && "Checks is not empty");
376 groupChecks(DepCands, UseDependencies);
377 Checks = generateChecks();
378}
379
381 const RuntimeCheckingPtrGroup &M, const RuntimeCheckingPtrGroup &N) const {
382 for (unsigned I = 0, EI = M.Members.size(); EI != I; ++I)
383 for (unsigned J = 0, EJ = N.Members.size(); EJ != J; ++J)
384 if (needsChecking(M.Members[I], N.Members[J]))
385 return true;
386 return false;
387}
388
389/// Compare \p I and \p J and return the minimum.
390/// Return nullptr in case we couldn't find an answer.
391static const SCEV *getMinFromExprs(const SCEV *I, const SCEV *J,
392 ScalarEvolution *SE) {
393 const SCEV *Diff = SE->getMinusSCEV(J, I);
394 const SCEVConstant *C = dyn_cast<const SCEVConstant>(Diff);
395
396 if (!C)
397 return nullptr;
398 if (C->getValue()->isNegative())
399 return J;
400 return I;
401}
402
404 RuntimePointerChecking &RtCheck) {
405 return addPointer(
406 Index, RtCheck.Pointers[Index].Start, RtCheck.Pointers[Index].End,
407 RtCheck.Pointers[Index].PointerValue->getType()->getPointerAddressSpace(),
408 RtCheck.Pointers[Index].NeedsFreeze, *RtCheck.SE);
409}
410
412 const SCEV *End, unsigned AS,
413 bool NeedsFreeze,
414 ScalarEvolution &SE) {
415 assert(AddressSpace == AS &&
416 "all pointers in a checking group must be in the same address space");
417
418 // Compare the starts and ends with the known minimum and maximum
419 // of this set. We need to know how we compare against the min/max
420 // of the set in order to be able to emit memchecks.
421 const SCEV *Min0 = getMinFromExprs(Start, Low, &SE);
422 if (!Min0)
423 return false;
424
425 const SCEV *Min1 = getMinFromExprs(End, High, &SE);
426 if (!Min1)
427 return false;
428
429 // Update the low bound expression if we've found a new min value.
430 if (Min0 == Start)
431 Low = Start;
432
433 // Update the high bound expression if we've found a new max value.
434 if (Min1 != End)
435 High = End;
436
438 this->NeedsFreeze |= NeedsFreeze;
439 return true;
440}
441
442void RuntimePointerChecking::groupChecks(
443 MemoryDepChecker::DepCandidates &DepCands, bool UseDependencies) {
444 // We build the groups from dependency candidates equivalence classes
445 // because:
446 // - We know that pointers in the same equivalence class share
447 // the same underlying object and therefore there is a chance
448 // that we can compare pointers
449 // - We wouldn't be able to merge two pointers for which we need
450 // to emit a memcheck. The classes in DepCands are already
451 // conveniently built such that no two pointers in the same
452 // class need checking against each other.
453
454 // We use the following (greedy) algorithm to construct the groups
455 // For every pointer in the equivalence class:
456 // For each existing group:
457 // - if the difference between this pointer and the min/max bounds
458 // of the group is a constant, then make the pointer part of the
459 // group and update the min/max bounds of that group as required.
460
461 CheckingGroups.clear();
462
463 // If we need to check two pointers to the same underlying object
464 // with a non-constant difference, we shouldn't perform any pointer
465 // grouping with those pointers. This is because we can easily get
466 // into cases where the resulting check would return false, even when
467 // the accesses are safe.
468 //
469 // The following example shows this:
470 // for (i = 0; i < 1000; ++i)
471 // a[5000 + i * m] = a[i] + a[i + 9000]
472 //
473 // Here grouping gives a check of (5000, 5000 + 1000 * m) against
474 // (0, 10000) which is always false. However, if m is 1, there is no
475 // dependence. Not grouping the checks for a[i] and a[i + 9000] allows
476 // us to perform an accurate check in this case.
477 //
478 // The above case requires that we have an UnknownDependence between
479 // accesses to the same underlying object. This cannot happen unless
480 // FoundNonConstantDistanceDependence is set, and therefore UseDependencies
481 // is also false. In this case we will use the fallback path and create
482 // separate checking groups for all pointers.
483
484 // If we don't have the dependency partitions, construct a new
485 // checking pointer group for each pointer. This is also required
486 // for correctness, because in this case we can have checking between
487 // pointers to the same underlying object.
488 if (!UseDependencies) {
489 for (unsigned I = 0; I < Pointers.size(); ++I)
490 CheckingGroups.push_back(RuntimeCheckingPtrGroup(I, *this));
491 return;
492 }
493
494 unsigned TotalComparisons = 0;
495
497 for (unsigned Index = 0; Index < Pointers.size(); ++Index) {
498 auto Iter = PositionMap.insert({Pointers[Index].PointerValue, {}});
499 Iter.first->second.push_back(Index);
500 }
501
502 // We need to keep track of what pointers we've already seen so we
503 // don't process them twice.
505
506 // Go through all equivalence classes, get the "pointer check groups"
507 // and add them to the overall solution. We use the order in which accesses
508 // appear in 'Pointers' to enforce determinism.
509 for (unsigned I = 0; I < Pointers.size(); ++I) {
510 // We've seen this pointer before, and therefore already processed
511 // its equivalence class.
512 if (Seen.count(I))
513 continue;
514
515 MemoryDepChecker::MemAccessInfo Access(Pointers[I].PointerValue,
516 Pointers[I].IsWritePtr);
517
519 auto LeaderI = DepCands.findValue(DepCands.getLeaderValue(Access));
520
521 // Because DepCands is constructed by visiting accesses in the order in
522 // which they appear in alias sets (which is deterministic) and the
523 // iteration order within an equivalence class member is only dependent on
524 // the order in which unions and insertions are performed on the
525 // equivalence class, the iteration order is deterministic.
526 for (auto MI = DepCands.member_begin(LeaderI), ME = DepCands.member_end();
527 MI != ME; ++MI) {
528 auto PointerI = PositionMap.find(MI->getPointer());
529 assert(PointerI != PositionMap.end() &&
530 "pointer in equivalence class not found in PositionMap");
531 for (unsigned Pointer : PointerI->second) {
532 bool Merged = false;
533 // Mark this pointer as seen.
534 Seen.insert(Pointer);
535
536 // Go through all the existing sets and see if we can find one
537 // which can include this pointer.
538 for (RuntimeCheckingPtrGroup &Group : Groups) {
539 // Don't perform more than a certain amount of comparisons.
540 // This should limit the cost of grouping the pointers to something
541 // reasonable. If we do end up hitting this threshold, the algorithm
542 // will create separate groups for all remaining pointers.
543 if (TotalComparisons > MemoryCheckMergeThreshold)
544 break;
545
546 TotalComparisons++;
547
548 if (Group.addPointer(Pointer, *this)) {
549 Merged = true;
550 break;
551 }
552 }
553
554 if (!Merged)
555 // We couldn't add this pointer to any existing set or the threshold
556 // for the number of comparisons has been reached. Create a new group
557 // to hold the current pointer.
558 Groups.push_back(RuntimeCheckingPtrGroup(Pointer, *this));
559 }
560 }
561
562 // We've computed the grouped checks for this partition.
563 // Save the results and continue with the next one.
564 llvm::copy(Groups, std::back_inserter(CheckingGroups));
565 }
566}
567
569 const SmallVectorImpl<int> &PtrToPartition, unsigned PtrIdx1,
570 unsigned PtrIdx2) {
571 return (PtrToPartition[PtrIdx1] != -1 &&
572 PtrToPartition[PtrIdx1] == PtrToPartition[PtrIdx2]);
573}
574
575bool RuntimePointerChecking::needsChecking(unsigned I, unsigned J) const {
576 const PointerInfo &PointerI = Pointers[I];
577 const PointerInfo &PointerJ = Pointers[J];
578
579 // No need to check if two readonly pointers intersect.
580 if (!PointerI.IsWritePtr && !PointerJ.IsWritePtr)
581 return false;
582
583 // Only need to check pointers between two different dependency sets.
584 if (PointerI.DependencySetId == PointerJ.DependencySetId)
585 return false;
586
587 // Only need to check pointers in the same alias set.
588 if (PointerI.AliasSetId != PointerJ.AliasSetId)
589 return false;
590
591 return true;
592}
593
596 unsigned Depth) const {
597 unsigned N = 0;
598 for (const auto &Check : Checks) {
599 const auto &First = Check.first->Members, &Second = Check.second->Members;
600
601 OS.indent(Depth) << "Check " << N++ << ":\n";
602
603 OS.indent(Depth + 2) << "Comparing group (" << Check.first << "):\n";
604 for (unsigned K = 0; K < First.size(); ++K)
605 OS.indent(Depth + 2) << *Pointers[First[K]].PointerValue << "\n";
606
607 OS.indent(Depth + 2) << "Against group (" << Check.second << "):\n";
608 for (unsigned K = 0; K < Second.size(); ++K)
609 OS.indent(Depth + 2) << *Pointers[Second[K]].PointerValue << "\n";
610 }
611}
612
614
615 OS.indent(Depth) << "Run-time memory checks:\n";
616 printChecks(OS, Checks, Depth);
617
618 OS.indent(Depth) << "Grouped accesses:\n";
619 for (unsigned I = 0; I < CheckingGroups.size(); ++I) {
620 const auto &CG = CheckingGroups[I];
621
622 OS.indent(Depth + 2) << "Group " << &CG << ":\n";
623 OS.indent(Depth + 4) << "(Low: " << *CG.Low << " High: " << *CG.High
624 << ")\n";
625 for (unsigned J = 0; J < CG.Members.size(); ++J) {
626 OS.indent(Depth + 6) << "Member: " << *Pointers[CG.Members[J]].Expr
627 << "\n";
628 }
629 }
630}
631
632namespace {
633
634/// Analyses memory accesses in a loop.
635///
636/// Checks whether run time pointer checks are needed and builds sets for data
637/// dependence checking.
638class AccessAnalysis {
639public:
640 /// Read or write access location.
641 typedef PointerIntPair<Value *, 1, bool> MemAccessInfo;
642 typedef SmallVector<MemAccessInfo, 8> MemAccessInfoList;
643
644 AccessAnalysis(Loop *TheLoop, AAResults *AA, LoopInfo *LI,
647 SmallPtrSetImpl<MDNode *> &LoopAliasScopes)
648 : TheLoop(TheLoop), BAA(*AA), AST(BAA), LI(LI), DepCands(DA), PSE(PSE),
649 LoopAliasScopes(LoopAliasScopes) {
650 // We're analyzing dependences across loop iterations.
651 BAA.enableCrossIterationMode();
652 }
653
654 /// Register a load and whether it is only read from.
655 void addLoad(MemoryLocation &Loc, Type *AccessTy, bool IsReadOnly) {
656 Value *Ptr = const_cast<Value *>(Loc.Ptr);
657 AST.add(adjustLoc(Loc));
658 Accesses[MemAccessInfo(Ptr, false)].insert(AccessTy);
659 if (IsReadOnly)
660 ReadOnlyPtr.insert(Ptr);
661 }
662
663 /// Register a store.
664 void addStore(MemoryLocation &Loc, Type *AccessTy) {
665 Value *Ptr = const_cast<Value *>(Loc.Ptr);
666 AST.add(adjustLoc(Loc));
667 Accesses[MemAccessInfo(Ptr, true)].insert(AccessTy);
668 }
669
670 /// Check if we can emit a run-time no-alias check for \p Access.
671 ///
672 /// Returns true if we can emit a run-time no alias check for \p Access.
673 /// If we can check this access, this also adds it to a dependence set and
674 /// adds a run-time to check for it to \p RtCheck. If \p Assume is true,
675 /// we will attempt to use additional run-time checks in order to get
676 /// the bounds of the pointer.
677 bool createCheckForAccess(RuntimePointerChecking &RtCheck,
678 MemAccessInfo Access, Type *AccessTy,
679 const DenseMap<Value *, const SCEV *> &Strides,
681 Loop *TheLoop, unsigned &RunningDepId,
682 unsigned ASId, bool ShouldCheckStride, bool Assume);
683
684 /// Check whether we can check the pointers at runtime for
685 /// non-intersection.
686 ///
687 /// Returns true if we need no check or if we do and we can generate them
688 /// (i.e. the pointers have computable bounds).
689 bool canCheckPtrAtRT(RuntimePointerChecking &RtCheck, ScalarEvolution *SE,
690 Loop *TheLoop, const DenseMap<Value *, const SCEV *> &Strides,
691 Value *&UncomputablePtr, bool ShouldCheckWrap = false);
692
693 /// Goes over all memory accesses, checks whether a RT check is needed
694 /// and builds sets of dependent accesses.
695 void buildDependenceSets() {
696 processMemAccesses();
697 }
698
699 /// Initial processing of memory accesses determined that we need to
700 /// perform dependency checking.
701 ///
702 /// Note that this can later be cleared if we retry memcheck analysis without
703 /// dependency checking (i.e. FoundNonConstantDistanceDependence).
704 bool isDependencyCheckNeeded() { return !CheckDeps.empty(); }
705
706 /// We decided that no dependence analysis would be used. Reset the state.
707 void resetDepChecks(MemoryDepChecker &DepChecker) {
708 CheckDeps.clear();
709 DepChecker.clearDependences();
710 }
711
712 MemAccessInfoList &getDependenciesToCheck() { return CheckDeps; }
713
716 return UnderlyingObjects;
717 }
718
719private:
721
722 /// Adjust the MemoryLocation so that it represents accesses to this
723 /// location across all iterations, rather than a single one.
724 MemoryLocation adjustLoc(MemoryLocation Loc) const {
725 // The accessed location varies within the loop, but remains within the
726 // underlying object.
728 Loc.AATags.Scope = adjustAliasScopeList(Loc.AATags.Scope);
729 Loc.AATags.NoAlias = adjustAliasScopeList(Loc.AATags.NoAlias);
730 return Loc;
731 }
732
733 /// Drop alias scopes that are only valid within a single loop iteration.
734 MDNode *adjustAliasScopeList(MDNode *ScopeList) const {
735 if (!ScopeList)
736 return nullptr;
737
738 // For the sake of simplicity, drop the whole scope list if any scope is
739 // iteration-local.
740 if (any_of(ScopeList->operands(), [&](Metadata *Scope) {
741 return LoopAliasScopes.contains(cast<MDNode>(Scope));
742 }))
743 return nullptr;
744
745 return ScopeList;
746 }
747
748 /// Go over all memory access and check whether runtime pointer checks
749 /// are needed and build sets of dependency check candidates.
750 void processMemAccesses();
751
752 /// Map of all accesses. Values are the types used to access memory pointed to
753 /// by the pointer.
754 PtrAccessMap Accesses;
755
756 /// The loop being checked.
757 const Loop *TheLoop;
758
759 /// List of accesses that need a further dependence check.
760 MemAccessInfoList CheckDeps;
761
762 /// Set of pointers that are read only.
763 SmallPtrSet<Value*, 16> ReadOnlyPtr;
764
765 /// Batched alias analysis results.
766 BatchAAResults BAA;
767
768 /// An alias set tracker to partition the access set by underlying object and
769 //intrinsic property (such as TBAA metadata).
770 AliasSetTracker AST;
771
772 LoopInfo *LI;
773
774 /// Sets of potentially dependent accesses - members of one set share an
775 /// underlying pointer. The set "CheckDeps" identfies which sets really need a
776 /// dependence check.
778
779 /// Initial processing of memory accesses determined that we may need
780 /// to add memchecks. Perform the analysis to determine the necessary checks.
781 ///
782 /// Note that, this is different from isDependencyCheckNeeded. When we retry
783 /// memcheck analysis without dependency checking
784 /// (i.e. FoundNonConstantDistanceDependence), isDependencyCheckNeeded is
785 /// cleared while this remains set if we have potentially dependent accesses.
786 bool IsRTCheckAnalysisNeeded = false;
787
788 /// The SCEV predicate containing all the SCEV-related assumptions.
790
792
793 /// Alias scopes that are declared inside the loop, and as such not valid
794 /// across iterations.
795 SmallPtrSetImpl<MDNode *> &LoopAliasScopes;
796};
797
798} // end anonymous namespace
799
800/// Check whether a pointer can participate in a runtime bounds check.
801/// If \p Assume, try harder to prove that we can compute the bounds of \p Ptr
802/// by adding run-time checks (overflow checks) if necessary.
804 const SCEV *PtrScev, Loop *L, bool Assume) {
805 // The bounds for loop-invariant pointer is trivial.
806 if (PSE.getSE()->isLoopInvariant(PtrScev, L))
807 return true;
808
809 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(PtrScev);
810
811 if (!AR && Assume)
812 AR = PSE.getAsAddRec(Ptr);
813
814 if (!AR)
815 return false;
816
817 return AR->isAffine();
818}
819
820/// Check whether a pointer address cannot wrap.
822 const DenseMap<Value *, const SCEV *> &Strides, Value *Ptr, Type *AccessTy,
823 Loop *L) {
824 const SCEV *PtrScev = PSE.getSCEV(Ptr);
825 if (PSE.getSE()->isLoopInvariant(PtrScev, L))
826 return true;
827
828 int64_t Stride = getPtrStride(PSE, AccessTy, Ptr, L, Strides).value_or(0);
829 if (Stride == 1 || PSE.hasNoOverflow(Ptr, SCEVWrapPredicate::IncrementNUSW))
830 return true;
831
832 return false;
833}
834
835static void visitPointers(Value *StartPtr, const Loop &InnermostLoop,
836 function_ref<void(Value *)> AddPointer) {
838 SmallVector<Value *> WorkList;
839 WorkList.push_back(StartPtr);
840
841 while (!WorkList.empty()) {
842 Value *Ptr = WorkList.pop_back_val();
843 if (!Visited.insert(Ptr).second)
844 continue;
845 auto *PN = dyn_cast<PHINode>(Ptr);
846 // SCEV does not look through non-header PHIs inside the loop. Such phis
847 // can be analyzed by adding separate accesses for each incoming pointer
848 // value.
849 if (PN && InnermostLoop.contains(PN->getParent()) &&
850 PN->getParent() != InnermostLoop.getHeader()) {
851 for (const Use &Inc : PN->incoming_values())
852 WorkList.push_back(Inc);
853 } else
854 AddPointer(Ptr);
855 }
856}
857
858// Walk back through the IR for a pointer, looking for a select like the
859// following:
860//
861// %offset = select i1 %cmp, i64 %a, i64 %b
862// %addr = getelementptr double, double* %base, i64 %offset
863// %ld = load double, double* %addr, align 8
864//
865// We won't be able to form a single SCEVAddRecExpr from this since the
866// address for each loop iteration depends on %cmp. We could potentially
867// produce multiple valid SCEVAddRecExprs, though, and check all of them for
868// memory safety/aliasing if needed.
869//
870// If we encounter some IR we don't yet handle, or something obviously fine
871// like a constant, then we just add the SCEV for that term to the list passed
872// in by the caller. If we have a node that may potentially yield a valid
873// SCEVAddRecExpr then we decompose it into parts and build the SCEV terms
874// ourselves before adding to the list.
875static void findForkedSCEVs(
876 ScalarEvolution *SE, const Loop *L, Value *Ptr,
878 unsigned Depth) {
879 // If our Value is a SCEVAddRecExpr, loop invariant, not an instruction, or
880 // we've exceeded our limit on recursion, just return whatever we have
881 // regardless of whether it can be used for a forked pointer or not, along
882 // with an indication of whether it might be a poison or undef value.
883 const SCEV *Scev = SE->getSCEV(Ptr);
884 if (isa<SCEVAddRecExpr>(Scev) || L->isLoopInvariant(Ptr) ||
885 !isa<Instruction>(Ptr) || Depth == 0) {
886 ScevList.emplace_back(Scev, !isGuaranteedNotToBeUndefOrPoison(Ptr));
887 return;
888 }
889
890 Depth--;
891
892 auto UndefPoisonCheck = [](PointerIntPair<const SCEV *, 1, bool> S) {
893 return get<1>(S);
894 };
895
896 auto GetBinOpExpr = [&SE](unsigned Opcode, const SCEV *L, const SCEV *R) {
897 switch (Opcode) {
898 case Instruction::Add:
899 return SE->getAddExpr(L, R);
900 case Instruction::Sub:
901 return SE->getMinusSCEV(L, R);
902 default:
903 llvm_unreachable("Unexpected binary operator when walking ForkedPtrs");
904 }
905 };
906
907 Instruction *I = cast<Instruction>(Ptr);
908 unsigned Opcode = I->getOpcode();
909 switch (Opcode) {
910 case Instruction::GetElementPtr: {
911 GetElementPtrInst *GEP = cast<GetElementPtrInst>(I);
912 Type *SourceTy = GEP->getSourceElementType();
913 // We only handle base + single offset GEPs here for now.
914 // Not dealing with preexisting gathers yet, so no vectors.
915 if (I->getNumOperands() != 2 || SourceTy->isVectorTy()) {
916 ScevList.emplace_back(Scev, !isGuaranteedNotToBeUndefOrPoison(GEP));
917 break;
918 }
921 findForkedSCEVs(SE, L, I->getOperand(0), BaseScevs, Depth);
922 findForkedSCEVs(SE, L, I->getOperand(1), OffsetScevs, Depth);
923
924 // See if we need to freeze our fork...
925 bool NeedsFreeze = any_of(BaseScevs, UndefPoisonCheck) ||
926 any_of(OffsetScevs, UndefPoisonCheck);
927
928 // Check that we only have a single fork, on either the base or the offset.
929 // Copy the SCEV across for the one without a fork in order to generate
930 // the full SCEV for both sides of the GEP.
931 if (OffsetScevs.size() == 2 && BaseScevs.size() == 1)
932 BaseScevs.push_back(BaseScevs[0]);
933 else if (BaseScevs.size() == 2 && OffsetScevs.size() == 1)
934 OffsetScevs.push_back(OffsetScevs[0]);
935 else {
936 ScevList.emplace_back(Scev, NeedsFreeze);
937 break;
938 }
939
940 // Find the pointer type we need to extend to.
941 Type *IntPtrTy = SE->getEffectiveSCEVType(
942 SE->getSCEV(GEP->getPointerOperand())->getType());
943
944 // Find the size of the type being pointed to. We only have a single
945 // index term (guarded above) so we don't need to index into arrays or
946 // structures, just get the size of the scalar value.
947 const SCEV *Size = SE->getSizeOfExpr(IntPtrTy, SourceTy);
948
949 // Scale up the offsets by the size of the type, then add to the bases.
950 const SCEV *Scaled1 = SE->getMulExpr(
951 Size, SE->getTruncateOrSignExtend(get<0>(OffsetScevs[0]), IntPtrTy));
952 const SCEV *Scaled2 = SE->getMulExpr(
953 Size, SE->getTruncateOrSignExtend(get<0>(OffsetScevs[1]), IntPtrTy));
954 ScevList.emplace_back(SE->getAddExpr(get<0>(BaseScevs[0]), Scaled1),
955 NeedsFreeze);
956 ScevList.emplace_back(SE->getAddExpr(get<0>(BaseScevs[1]), Scaled2),
957 NeedsFreeze);
958 break;
959 }
960 case Instruction::Select: {
962 // A select means we've found a forked pointer, but we currently only
963 // support a single select per pointer so if there's another behind this
964 // then we just bail out and return the generic SCEV.
965 findForkedSCEVs(SE, L, I->getOperand(1), ChildScevs, Depth);
966 findForkedSCEVs(SE, L, I->getOperand(2), ChildScevs, Depth);
967 if (ChildScevs.size() == 2) {
968 ScevList.push_back(ChildScevs[0]);
969 ScevList.push_back(ChildScevs[1]);
970 } else
971 ScevList.emplace_back(Scev, !isGuaranteedNotToBeUndefOrPoison(Ptr));
972 break;
973 }
974 case Instruction::PHI: {
976 // A phi means we've found a forked pointer, but we currently only
977 // support a single phi per pointer so if there's another behind this
978 // then we just bail out and return the generic SCEV.
979 if (I->getNumOperands() == 2) {
980 findForkedSCEVs(SE, L, I->getOperand(0), ChildScevs, Depth);
981 findForkedSCEVs(SE, L, I->getOperand(1), ChildScevs, Depth);
982 }
983 if (ChildScevs.size() == 2) {
984 ScevList.push_back(ChildScevs[0]);
985 ScevList.push_back(ChildScevs[1]);
986 } else
987 ScevList.emplace_back(Scev, !isGuaranteedNotToBeUndefOrPoison(Ptr));
988 break;
989 }
990 case Instruction::Add:
991 case Instruction::Sub: {
994 findForkedSCEVs(SE, L, I->getOperand(0), LScevs, Depth);
995 findForkedSCEVs(SE, L, I->getOperand(1), RScevs, Depth);
996
997 // See if we need to freeze our fork...
998 bool NeedsFreeze =
999 any_of(LScevs, UndefPoisonCheck) || any_of(RScevs, UndefPoisonCheck);
1000
1001 // Check that we only have a single fork, on either the left or right side.
1002 // Copy the SCEV across for the one without a fork in order to generate
1003 // the full SCEV for both sides of the BinOp.
1004 if (LScevs.size() == 2 && RScevs.size() == 1)
1005 RScevs.push_back(RScevs[0]);
1006 else if (RScevs.size() == 2 && LScevs.size() == 1)
1007 LScevs.push_back(LScevs[0]);
1008 else {
1009 ScevList.emplace_back(Scev, NeedsFreeze);
1010 break;
1011 }
1012
1013 ScevList.emplace_back(
1014 GetBinOpExpr(Opcode, get<0>(LScevs[0]), get<0>(RScevs[0])),
1015 NeedsFreeze);
1016 ScevList.emplace_back(
1017 GetBinOpExpr(Opcode, get<0>(LScevs[1]), get<0>(RScevs[1])),
1018 NeedsFreeze);
1019 break;
1020 }
1021 default:
1022 // Just return the current SCEV if we haven't handled the instruction yet.
1023 LLVM_DEBUG(dbgs() << "ForkedPtr unhandled instruction: " << *I << "\n");
1024 ScevList.emplace_back(Scev, !isGuaranteedNotToBeUndefOrPoison(Ptr));
1025 break;
1026 }
1027}
1028
1031 const DenseMap<Value *, const SCEV *> &StridesMap, Value *Ptr,
1032 const Loop *L) {
1033 ScalarEvolution *SE = PSE.getSE();
1034 assert(SE->isSCEVable(Ptr->getType()) && "Value is not SCEVable!");
1036 findForkedSCEVs(SE, L, Ptr, Scevs, MaxForkedSCEVDepth);
1037
1038 // For now, we will only accept a forked pointer with two possible SCEVs
1039 // that are either SCEVAddRecExprs or loop invariant.
1040 if (Scevs.size() == 2 &&
1041 (isa<SCEVAddRecExpr>(get<0>(Scevs[0])) ||
1042 SE->isLoopInvariant(get<0>(Scevs[0]), L)) &&
1043 (isa<SCEVAddRecExpr>(get<0>(Scevs[1])) ||
1044 SE->isLoopInvariant(get<0>(Scevs[1]), L))) {
1045 LLVM_DEBUG(dbgs() << "LAA: Found forked pointer: " << *Ptr << "\n");
1046 LLVM_DEBUG(dbgs() << "\t(1) " << *get<0>(Scevs[0]) << "\n");
1047 LLVM_DEBUG(dbgs() << "\t(2) " << *get<0>(Scevs[1]) << "\n");
1048 return Scevs;
1049 }
1050
1051 return {{replaceSymbolicStrideSCEV(PSE, StridesMap, Ptr), false}};
1052}
1053
1054bool AccessAnalysis::createCheckForAccess(RuntimePointerChecking &RtCheck,
1055 MemAccessInfo Access, Type *AccessTy,
1056 const DenseMap<Value *, const SCEV *> &StridesMap,
1058 Loop *TheLoop, unsigned &RunningDepId,
1059 unsigned ASId, bool ShouldCheckWrap,
1060 bool Assume) {
1061 Value *Ptr = Access.getPointer();
1062
1064 findForkedPointer(PSE, StridesMap, Ptr, TheLoop);
1065
1066 for (auto &P : TranslatedPtrs) {
1067 const SCEV *PtrExpr = get<0>(P);
1068 if (!hasComputableBounds(PSE, Ptr, PtrExpr, TheLoop, Assume))
1069 return false;
1070
1071 // When we run after a failing dependency check we have to make sure
1072 // we don't have wrapping pointers.
1073 if (ShouldCheckWrap) {
1074 // Skip wrap checking when translating pointers.
1075 if (TranslatedPtrs.size() > 1)
1076 return false;
1077
1078 if (!isNoWrap(PSE, StridesMap, Ptr, AccessTy, TheLoop)) {
1079 auto *Expr = PSE.getSCEV(Ptr);
1080 if (!Assume || !isa<SCEVAddRecExpr>(Expr))
1081 return false;
1083 }
1084 }
1085 // If there's only one option for Ptr, look it up after bounds and wrap
1086 // checking, because assumptions might have been added to PSE.
1087 if (TranslatedPtrs.size() == 1)
1088 TranslatedPtrs[0] = {replaceSymbolicStrideSCEV(PSE, StridesMap, Ptr),
1089 false};
1090 }
1091
1092 for (auto [PtrExpr, NeedsFreeze] : TranslatedPtrs) {
1093 // The id of the dependence set.
1094 unsigned DepId;
1095
1096 if (isDependencyCheckNeeded()) {
1097 Value *Leader = DepCands.getLeaderValue(Access).getPointer();
1098 unsigned &LeaderId = DepSetId[Leader];
1099 if (!LeaderId)
1100 LeaderId = RunningDepId++;
1101 DepId = LeaderId;
1102 } else
1103 // Each access has its own dependence set.
1104 DepId = RunningDepId++;
1105
1106 bool IsWrite = Access.getInt();
1107 RtCheck.insert(TheLoop, Ptr, PtrExpr, AccessTy, IsWrite, DepId, ASId, PSE,
1108 NeedsFreeze);
1109 LLVM_DEBUG(dbgs() << "LAA: Found a runtime check ptr:" << *Ptr << '\n');
1110 }
1111
1112 return true;
1113}
1114
1115bool AccessAnalysis::canCheckPtrAtRT(RuntimePointerChecking &RtCheck,
1116 ScalarEvolution *SE, Loop *TheLoop,
1117 const DenseMap<Value *, const SCEV *> &StridesMap,
1118 Value *&UncomputablePtr, bool ShouldCheckWrap) {
1119 // Find pointers with computable bounds. We are going to use this information
1120 // to place a runtime bound check.
1121 bool CanDoRT = true;
1122
1123 bool MayNeedRTCheck = false;
1124 if (!IsRTCheckAnalysisNeeded) return true;
1125
1126 bool IsDepCheckNeeded = isDependencyCheckNeeded();
1127
1128 // We assign a consecutive id to access from different alias sets.
1129 // Accesses between different groups doesn't need to be checked.
1130 unsigned ASId = 0;
1131 for (auto &AS : AST) {
1132 int NumReadPtrChecks = 0;
1133 int NumWritePtrChecks = 0;
1134 bool CanDoAliasSetRT = true;
1135 ++ASId;
1136 auto ASPointers = AS.getPointers();
1137
1138 // We assign consecutive id to access from different dependence sets.
1139 // Accesses within the same set don't need a runtime check.
1140 unsigned RunningDepId = 1;
1142
1144
1145 // First, count how many write and read accesses are in the alias set. Also
1146 // collect MemAccessInfos for later.
1148 for (const Value *Ptr_ : ASPointers) {
1149 Value *Ptr = const_cast<Value *>(Ptr_);
1150 bool IsWrite = Accesses.count(MemAccessInfo(Ptr, true));
1151 if (IsWrite)
1152 ++NumWritePtrChecks;
1153 else
1154 ++NumReadPtrChecks;
1155 AccessInfos.emplace_back(Ptr, IsWrite);
1156 }
1157
1158 // We do not need runtime checks for this alias set, if there are no writes
1159 // or a single write and no reads.
1160 if (NumWritePtrChecks == 0 ||
1161 (NumWritePtrChecks == 1 && NumReadPtrChecks == 0)) {
1162 assert((ASPointers.size() <= 1 ||
1163 all_of(ASPointers,
1164 [this](const Value *Ptr) {
1165 MemAccessInfo AccessWrite(const_cast<Value *>(Ptr),
1166 true);
1167 return DepCands.findValue(AccessWrite) == DepCands.end();
1168 })) &&
1169 "Can only skip updating CanDoRT below, if all entries in AS "
1170 "are reads or there is at most 1 entry");
1171 continue;
1172 }
1173
1174 for (auto &Access : AccessInfos) {
1175 for (const auto &AccessTy : Accesses[Access]) {
1176 if (!createCheckForAccess(RtCheck, Access, AccessTy, StridesMap,
1177 DepSetId, TheLoop, RunningDepId, ASId,
1178 ShouldCheckWrap, false)) {
1179 LLVM_DEBUG(dbgs() << "LAA: Can't find bounds for ptr:"
1180 << *Access.getPointer() << '\n');
1181 Retries.push_back({Access, AccessTy});
1182 CanDoAliasSetRT = false;
1183 }
1184 }
1185 }
1186
1187 // Note that this function computes CanDoRT and MayNeedRTCheck
1188 // independently. For example CanDoRT=false, MayNeedRTCheck=false means that
1189 // we have a pointer for which we couldn't find the bounds but we don't
1190 // actually need to emit any checks so it does not matter.
1191 //
1192 // We need runtime checks for this alias set, if there are at least 2
1193 // dependence sets (in which case RunningDepId > 2) or if we need to re-try
1194 // any bound checks (because in that case the number of dependence sets is
1195 // incomplete).
1196 bool NeedsAliasSetRTCheck = RunningDepId > 2 || !Retries.empty();
1197
1198 // We need to perform run-time alias checks, but some pointers had bounds
1199 // that couldn't be checked.
1200 if (NeedsAliasSetRTCheck && !CanDoAliasSetRT) {
1201 // Reset the CanDoSetRt flag and retry all accesses that have failed.
1202 // We know that we need these checks, so we can now be more aggressive
1203 // and add further checks if required (overflow checks).
1204 CanDoAliasSetRT = true;
1205 for (auto Retry : Retries) {
1206 MemAccessInfo Access = Retry.first;
1207 Type *AccessTy = Retry.second;
1208 if (!createCheckForAccess(RtCheck, Access, AccessTy, StridesMap,
1209 DepSetId, TheLoop, RunningDepId, ASId,
1210 ShouldCheckWrap, /*Assume=*/true)) {
1211 CanDoAliasSetRT = false;
1212 UncomputablePtr = Access.getPointer();
1213 break;
1214 }
1215 }
1216 }
1217
1218 CanDoRT &= CanDoAliasSetRT;
1219 MayNeedRTCheck |= NeedsAliasSetRTCheck;
1220 ++ASId;
1221 }
1222
1223 // If the pointers that we would use for the bounds comparison have different
1224 // address spaces, assume the values aren't directly comparable, so we can't
1225 // use them for the runtime check. We also have to assume they could
1226 // overlap. In the future there should be metadata for whether address spaces
1227 // are disjoint.
1228 unsigned NumPointers = RtCheck.Pointers.size();
1229 for (unsigned i = 0; i < NumPointers; ++i) {
1230 for (unsigned j = i + 1; j < NumPointers; ++j) {
1231 // Only need to check pointers between two different dependency sets.
1232 if (RtCheck.Pointers[i].DependencySetId ==
1233 RtCheck.Pointers[j].DependencySetId)
1234 continue;
1235 // Only need to check pointers in the same alias set.
1236 if (RtCheck.Pointers[i].AliasSetId != RtCheck.Pointers[j].AliasSetId)
1237 continue;
1238
1239 Value *PtrI = RtCheck.Pointers[i].PointerValue;
1240 Value *PtrJ = RtCheck.Pointers[j].PointerValue;
1241
1242 unsigned ASi = PtrI->getType()->getPointerAddressSpace();
1243 unsigned ASj = PtrJ->getType()->getPointerAddressSpace();
1244 if (ASi != ASj) {
1245 LLVM_DEBUG(
1246 dbgs() << "LAA: Runtime check would require comparison between"
1247 " different address spaces\n");
1248 return false;
1249 }
1250 }
1251 }
1252
1253 if (MayNeedRTCheck && CanDoRT)
1254 RtCheck.generateChecks(DepCands, IsDepCheckNeeded);
1255
1256 LLVM_DEBUG(dbgs() << "LAA: We need to do " << RtCheck.getNumberOfChecks()
1257 << " pointer comparisons.\n");
1258
1259 // If we can do run-time checks, but there are no checks, no runtime checks
1260 // are needed. This can happen when all pointers point to the same underlying
1261 // object for example.
1262 RtCheck.Need = CanDoRT ? RtCheck.getNumberOfChecks() != 0 : MayNeedRTCheck;
1263
1264 bool CanDoRTIfNeeded = !RtCheck.Need || CanDoRT;
1265 if (!CanDoRTIfNeeded)
1266 RtCheck.reset();
1267 return CanDoRTIfNeeded;
1268}
1269
1270void AccessAnalysis::processMemAccesses() {
1271 // We process the set twice: first we process read-write pointers, last we
1272 // process read-only pointers. This allows us to skip dependence tests for
1273 // read-only pointers.
1274
1275 LLVM_DEBUG(dbgs() << "LAA: Processing memory accesses...\n");
1276 LLVM_DEBUG(dbgs() << " AST: "; AST.dump());
1277 LLVM_DEBUG(dbgs() << "LAA: Accesses(" << Accesses.size() << "):\n");
1278 LLVM_DEBUG({
1279 for (auto A : Accesses)
1280 dbgs() << "\t" << *A.first.getPointer() << " ("
1281 << (A.first.getInt()
1282 ? "write"
1283 : (ReadOnlyPtr.count(A.first.getPointer()) ? "read-only"
1284 : "read"))
1285 << ")\n";
1286 });
1287
1288 // The AliasSetTracker has nicely partitioned our pointers by metadata
1289 // compatibility and potential for underlying-object overlap. As a result, we
1290 // only need to check for potential pointer dependencies within each alias
1291 // set.
1292 for (const auto &AS : AST) {
1293 // Note that both the alias-set tracker and the alias sets themselves used
1294 // ordered collections internally and so the iteration order here is
1295 // deterministic.
1296 auto ASPointers = AS.getPointers();
1297
1298 bool SetHasWrite = false;
1299
1300 // Map of pointers to last access encountered.
1301 typedef DenseMap<const Value*, MemAccessInfo> UnderlyingObjToAccessMap;
1302 UnderlyingObjToAccessMap ObjToLastAccess;
1303
1304 // Set of access to check after all writes have been processed.
1305 PtrAccessMap DeferredAccesses;
1306
1307 // Iterate over each alias set twice, once to process read/write pointers,
1308 // and then to process read-only pointers.
1309 for (int SetIteration = 0; SetIteration < 2; ++SetIteration) {
1310 bool UseDeferred = SetIteration > 0;
1311 PtrAccessMap &S = UseDeferred ? DeferredAccesses : Accesses;
1312
1313 for (const Value *Ptr_ : ASPointers) {
1314 Value *Ptr = const_cast<Value *>(Ptr_);
1315
1316 // For a single memory access in AliasSetTracker, Accesses may contain
1317 // both read and write, and they both need to be handled for CheckDeps.
1318 for (const auto &AC : S) {
1319 if (AC.first.getPointer() != Ptr)
1320 continue;
1321
1322 bool IsWrite = AC.first.getInt();
1323
1324 // If we're using the deferred access set, then it contains only
1325 // reads.
1326 bool IsReadOnlyPtr = ReadOnlyPtr.count(Ptr) && !IsWrite;
1327 if (UseDeferred && !IsReadOnlyPtr)
1328 continue;
1329 // Otherwise, the pointer must be in the PtrAccessSet, either as a
1330 // read or a write.
1331 assert(((IsReadOnlyPtr && UseDeferred) || IsWrite ||
1332 S.count(MemAccessInfo(Ptr, false))) &&
1333 "Alias-set pointer not in the access set?");
1334
1335 MemAccessInfo Access(Ptr, IsWrite);
1336 DepCands.insert(Access);
1337
1338 // Memorize read-only pointers for later processing and skip them in
1339 // the first round (they need to be checked after we have seen all
1340 // write pointers). Note: we also mark pointer that are not
1341 // consecutive as "read-only" pointers (so that we check
1342 // "a[b[i]] +="). Hence, we need the second check for "!IsWrite".
1343 if (!UseDeferred && IsReadOnlyPtr) {
1344 // We only use the pointer keys, the types vector values don't
1345 // matter.
1346 DeferredAccesses.insert({Access, {}});
1347 continue;
1348 }
1349
1350 // If this is a write - check other reads and writes for conflicts. If
1351 // this is a read only check other writes for conflicts (but only if
1352 // there is no other write to the ptr - this is an optimization to
1353 // catch "a[i] = a[i] + " without having to do a dependence check).
1354 if ((IsWrite || IsReadOnlyPtr) && SetHasWrite) {
1355 CheckDeps.push_back(Access);
1356 IsRTCheckAnalysisNeeded = true;
1357 }
1358
1359 if (IsWrite)
1360 SetHasWrite = true;
1361
1362 // Create sets of pointers connected by a shared alias set and
1363 // underlying object.
1364 typedef SmallVector<const Value *, 16> ValueVector;
1365 ValueVector TempObjects;
1366
1367 UnderlyingObjects[Ptr] = {};
1368 SmallVector<const Value *, 16> &UOs = UnderlyingObjects[Ptr];
1369 ::getUnderlyingObjects(Ptr, UOs, LI);
1371 << "Underlying objects for pointer " << *Ptr << "\n");
1372 for (const Value *UnderlyingObj : UOs) {
1373 // nullptr never alias, don't join sets for pointer that have "null"
1374 // in their UnderlyingObjects list.
1375 if (isa<ConstantPointerNull>(UnderlyingObj) &&
1377 TheLoop->getHeader()->getParent(),
1378 UnderlyingObj->getType()->getPointerAddressSpace()))
1379 continue;
1380
1381 UnderlyingObjToAccessMap::iterator Prev =
1382 ObjToLastAccess.find(UnderlyingObj);
1383 if (Prev != ObjToLastAccess.end())
1384 DepCands.unionSets(Access, Prev->second);
1385
1386 ObjToLastAccess[UnderlyingObj] = Access;
1387 LLVM_DEBUG(dbgs() << " " << *UnderlyingObj << "\n");
1388 }
1389 }
1390 }
1391 }
1392 }
1393}
1394
1395/// Return true if an AddRec pointer \p Ptr is unsigned non-wrapping,
1396/// i.e. monotonically increasing/decreasing.
1397static bool isNoWrapAddRec(Value *Ptr, const SCEVAddRecExpr *AR,
1398 PredicatedScalarEvolution &PSE, const Loop *L) {
1399
1400 // FIXME: This should probably only return true for NUW.
1402 return true;
1403
1405 return true;
1406
1407 // Scalar evolution does not propagate the non-wrapping flags to values that
1408 // are derived from a non-wrapping induction variable because non-wrapping
1409 // could be flow-sensitive.
1410 //
1411 // Look through the potentially overflowing instruction to try to prove
1412 // non-wrapping for the *specific* value of Ptr.
1413
1414 // The arithmetic implied by an inbounds GEP can't overflow.
1415 auto *GEP = dyn_cast<GetElementPtrInst>(Ptr);
1416 if (!GEP || !GEP->isInBounds())
1417 return false;
1418
1419 // Make sure there is only one non-const index and analyze that.
1420 Value *NonConstIndex = nullptr;
1421 for (Value *Index : GEP->indices())
1422 if (!isa<ConstantInt>(Index)) {
1423 if (NonConstIndex)
1424 return false;
1425 NonConstIndex = Index;
1426 }
1427 if (!NonConstIndex)
1428 // The recurrence is on the pointer, ignore for now.
1429 return false;
1430
1431 // The index in GEP is signed. It is non-wrapping if it's derived from a NSW
1432 // AddRec using a NSW operation.
1433 if (auto *OBO = dyn_cast<OverflowingBinaryOperator>(NonConstIndex))
1434 if (OBO->hasNoSignedWrap() &&
1435 // Assume constant for other the operand so that the AddRec can be
1436 // easily found.
1437 isa<ConstantInt>(OBO->getOperand(1))) {
1438 auto *OpScev = PSE.getSCEV(OBO->getOperand(0));
1439
1440 if (auto *OpAR = dyn_cast<SCEVAddRecExpr>(OpScev))
1441 return OpAR->getLoop() == L && OpAR->getNoWrapFlags(SCEV::FlagNSW);
1442 }
1443
1444 return false;
1445}
1446
1447/// Check whether the access through \p Ptr has a constant stride.
1449 Type *AccessTy, Value *Ptr,
1450 const Loop *Lp,
1451 const DenseMap<Value *, const SCEV *> &StridesMap,
1452 bool Assume, bool ShouldCheckWrap) {
1453 Type *Ty = Ptr->getType();
1454 assert(Ty->isPointerTy() && "Unexpected non-ptr");
1455
1456 if (isa<ScalableVectorType>(AccessTy)) {
1457 LLVM_DEBUG(dbgs() << "LAA: Bad stride - Scalable object: " << *AccessTy
1458 << "\n");
1459 return std::nullopt;
1460 }
1461
1462 const SCEV *PtrScev = replaceSymbolicStrideSCEV(PSE, StridesMap, Ptr);
1463
1464 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(PtrScev);
1465 if (Assume && !AR)
1466 AR = PSE.getAsAddRec(Ptr);
1467
1468 if (!AR) {
1469 LLVM_DEBUG(dbgs() << "LAA: Bad stride - Not an AddRecExpr pointer " << *Ptr
1470 << " SCEV: " << *PtrScev << "\n");
1471 return std::nullopt;
1472 }
1473
1474 // The access function must stride over the innermost loop.
1475 if (Lp != AR->getLoop()) {
1476 LLVM_DEBUG(dbgs() << "LAA: Bad stride - Not striding over innermost loop "
1477 << *Ptr << " SCEV: " << *AR << "\n");
1478 return std::nullopt;
1479 }
1480
1481 // Check the step is constant.
1482 const SCEV *Step = AR->getStepRecurrence(*PSE.getSE());
1483
1484 // Calculate the pointer stride and check if it is constant.
1485 const SCEVConstant *C = dyn_cast<SCEVConstant>(Step);
1486 if (!C) {
1487 LLVM_DEBUG(dbgs() << "LAA: Bad stride - Not a constant strided " << *Ptr
1488 << " SCEV: " << *AR << "\n");
1489 return std::nullopt;
1490 }
1491
1492 auto &DL = Lp->getHeader()->getModule()->getDataLayout();
1493 TypeSize AllocSize = DL.getTypeAllocSize(AccessTy);
1494 int64_t Size = AllocSize.getFixedValue();
1495 const APInt &APStepVal = C->getAPInt();
1496
1497 // Huge step value - give up.
1498 if (APStepVal.getBitWidth() > 64)
1499 return std::nullopt;
1500
1501 int64_t StepVal = APStepVal.getSExtValue();
1502
1503 // Strided access.
1504 int64_t Stride = StepVal / Size;
1505 int64_t Rem = StepVal % Size;
1506 if (Rem)
1507 return std::nullopt;
1508
1509 if (!ShouldCheckWrap)
1510 return Stride;
1511
1512 // The address calculation must not wrap. Otherwise, a dependence could be
1513 // inverted.
1514 if (isNoWrapAddRec(Ptr, AR, PSE, Lp))
1515 return Stride;
1516
1517 // An inbounds getelementptr that is a AddRec with a unit stride
1518 // cannot wrap per definition. If it did, the result would be poison
1519 // and any memory access dependent on it would be immediate UB
1520 // when executed.
1521 if (auto *GEP = dyn_cast<GetElementPtrInst>(Ptr);
1522 GEP && GEP->isInBounds() && (Stride == 1 || Stride == -1))
1523 return Stride;
1524
1525 // If the null pointer is undefined, then a access sequence which would
1526 // otherwise access it can be assumed not to unsigned wrap. Note that this
1527 // assumes the object in memory is aligned to the natural alignment.
1528 unsigned AddrSpace = Ty->getPointerAddressSpace();
1529 if (!NullPointerIsDefined(Lp->getHeader()->getParent(), AddrSpace) &&
1530 (Stride == 1 || Stride == -1))
1531 return Stride;
1532
1533 if (Assume) {
1535 LLVM_DEBUG(dbgs() << "LAA: Pointer may wrap:\n"
1536 << "LAA: Pointer: " << *Ptr << "\n"
1537 << "LAA: SCEV: " << *AR << "\n"
1538 << "LAA: Added an overflow assumption\n");
1539 return Stride;
1540 }
1541 LLVM_DEBUG(
1542 dbgs() << "LAA: Bad stride - Pointer may wrap in the address space "
1543 << *Ptr << " SCEV: " << *AR << "\n");
1544 return std::nullopt;
1545}
1546
1547std::optional<int> llvm::getPointersDiff(Type *ElemTyA, Value *PtrA,
1548 Type *ElemTyB, Value *PtrB,
1549 const DataLayout &DL,
1550 ScalarEvolution &SE, bool StrictCheck,
1551 bool CheckType) {
1552 assert(PtrA && PtrB && "Expected non-nullptr pointers.");
1553
1554 // Make sure that A and B are different pointers.
1555 if (PtrA == PtrB)
1556 return 0;
1557
1558 // Make sure that the element types are the same if required.
1559 if (CheckType && ElemTyA != ElemTyB)
1560 return std::nullopt;
1561
1562 unsigned ASA = PtrA->getType()->getPointerAddressSpace();
1563 unsigned ASB = PtrB->getType()->getPointerAddressSpace();
1564
1565 // Check that the address spaces match.
1566 if (ASA != ASB)
1567 return std::nullopt;
1568 unsigned IdxWidth = DL.getIndexSizeInBits(ASA);
1569
1570 APInt OffsetA(IdxWidth, 0), OffsetB(IdxWidth, 0);
1571 Value *PtrA1 = PtrA->stripAndAccumulateInBoundsConstantOffsets(DL, OffsetA);
1572 Value *PtrB1 = PtrB->stripAndAccumulateInBoundsConstantOffsets(DL, OffsetB);
1573
1574 int Val;
1575 if (PtrA1 == PtrB1) {
1576 // Retrieve the address space again as pointer stripping now tracks through
1577 // `addrspacecast`.
1578 ASA = cast<PointerType>(PtrA1->getType())->getAddressSpace();
1579 ASB = cast<PointerType>(PtrB1->getType())->getAddressSpace();
1580 // Check that the address spaces match and that the pointers are valid.
1581 if (ASA != ASB)
1582 return std::nullopt;
1583
1584 IdxWidth = DL.getIndexSizeInBits(ASA);
1585 OffsetA = OffsetA.sextOrTrunc(IdxWidth);
1586 OffsetB = OffsetB.sextOrTrunc(IdxWidth);
1587
1588 OffsetB -= OffsetA;
1589 Val = OffsetB.getSExtValue();
1590 } else {
1591 // Otherwise compute the distance with SCEV between the base pointers.
1592 const SCEV *PtrSCEVA = SE.getSCEV(PtrA);
1593 const SCEV *PtrSCEVB = SE.getSCEV(PtrB);
1594 const auto *Diff =
1595 dyn_cast<SCEVConstant>(SE.getMinusSCEV(PtrSCEVB, PtrSCEVA));
1596 if (!Diff)
1597 return std::nullopt;
1598 Val = Diff->getAPInt().getSExtValue();
1599 }
1600 int Size = DL.getTypeStoreSize(ElemTyA);
1601 int Dist = Val / Size;
1602
1603 // Ensure that the calculated distance matches the type-based one after all
1604 // the bitcasts removal in the provided pointers.
1605 if (!StrictCheck || Dist * Size == Val)
1606 return Dist;
1607 return std::nullopt;
1608}
1609
1611 const DataLayout &DL, ScalarEvolution &SE,
1612 SmallVectorImpl<unsigned> &SortedIndices) {
1614 VL, [](const Value *V) { return V->getType()->isPointerTy(); }) &&
1615 "Expected list of pointer operands.");
1616 // Walk over the pointers, and map each of them to an offset relative to
1617 // first pointer in the array.
1618 Value *Ptr0 = VL[0];
1619
1620 using DistOrdPair = std::pair<int64_t, int>;
1621 auto Compare = llvm::less_first();
1622 std::set<DistOrdPair, decltype(Compare)> Offsets(Compare);
1623 Offsets.emplace(0, 0);
1624 int Cnt = 1;
1625 bool IsConsecutive = true;
1626 for (auto *Ptr : VL.drop_front()) {
1627 std::optional<int> Diff = getPointersDiff(ElemTy, Ptr0, ElemTy, Ptr, DL, SE,
1628 /*StrictCheck=*/true);
1629 if (!Diff)
1630 return false;
1631
1632 // Check if the pointer with the same offset is found.
1633 int64_t Offset = *Diff;
1634 auto Res = Offsets.emplace(Offset, Cnt);
1635 if (!Res.second)
1636 return false;
1637 // Consecutive order if the inserted element is the last one.
1638 IsConsecutive = IsConsecutive && std::next(Res.first) == Offsets.end();
1639 ++Cnt;
1640 }
1641 SortedIndices.clear();
1642 if (!IsConsecutive) {
1643 // Fill SortedIndices array only if it is non-consecutive.
1644 SortedIndices.resize(VL.size());
1645 Cnt = 0;
1646 for (const std::pair<int64_t, int> &Pair : Offsets) {
1647 SortedIndices[Cnt] = Pair.second;
1648 ++Cnt;
1649 }
1650 }
1651 return true;
1652}
1653
1654/// Returns true if the memory operations \p A and \p B are consecutive.
1656 ScalarEvolution &SE, bool CheckType) {
1659 if (!PtrA || !PtrB)
1660 return false;
1661 Type *ElemTyA = getLoadStoreType(A);
1662 Type *ElemTyB = getLoadStoreType(B);
1663 std::optional<int> Diff =
1664 getPointersDiff(ElemTyA, PtrA, ElemTyB, PtrB, DL, SE,
1665 /*StrictCheck=*/true, CheckType);
1666 return Diff && *Diff == 1;
1667}
1668
1670 visitPointers(SI->getPointerOperand(), *InnermostLoop,
1671 [this, SI](Value *Ptr) {
1672 Accesses[MemAccessInfo(Ptr, true)].push_back(AccessIdx);
1673 InstMap.push_back(SI);
1674 ++AccessIdx;
1675 });
1676}
1677
1679 visitPointers(LI->getPointerOperand(), *InnermostLoop,
1680 [this, LI](Value *Ptr) {
1681 Accesses[MemAccessInfo(Ptr, false)].push_back(AccessIdx);
1682 InstMap.push_back(LI);
1683 ++AccessIdx;
1684 });
1685}
1686
1689 switch (Type) {
1690 case NoDep:
1691 case Forward:
1694
1695 case Unknown:
1698 case Backward:
1700 case IndirectUnsafe:
1702 }
1703 llvm_unreachable("unexpected DepType!");
1704}
1705
1707 switch (Type) {
1708 case NoDep:
1709 case Forward:
1710 case ForwardButPreventsForwarding:
1711 case Unknown:
1712 case IndirectUnsafe:
1713 return false;
1714
1715 case BackwardVectorizable:
1716 case Backward:
1717 case BackwardVectorizableButPreventsForwarding:
1718 return true;
1719 }
1720 llvm_unreachable("unexpected DepType!");
1721}
1722
1724 return isBackward() || Type == Unknown;
1725}
1726
1728 switch (Type) {
1729 case Forward:
1730 case ForwardButPreventsForwarding:
1731 return true;
1732
1733 case NoDep:
1734 case Unknown:
1735 case BackwardVectorizable:
1736 case Backward:
1737 case BackwardVectorizableButPreventsForwarding:
1738 case IndirectUnsafe:
1739 return false;
1740 }
1741 llvm_unreachable("unexpected DepType!");
1742}
1743
1744bool MemoryDepChecker::couldPreventStoreLoadForward(uint64_t Distance,
1745 uint64_t TypeByteSize) {
1746 // If loads occur at a distance that is not a multiple of a feasible vector
1747 // factor store-load forwarding does not take place.
1748 // Positive dependences might cause troubles because vectorizing them might
1749 // prevent store-load forwarding making vectorized code run a lot slower.
1750 // a[i] = a[i-3] ^ a[i-8];
1751 // The stores to a[i:i+1] don't align with the stores to a[i-3:i-2] and
1752 // hence on your typical architecture store-load forwarding does not take
1753 // place. Vectorizing in such cases does not make sense.
1754 // Store-load forwarding distance.
1755
1756 // After this many iterations store-to-load forwarding conflicts should not
1757 // cause any slowdowns.
1758 const uint64_t NumItersForStoreLoadThroughMemory = 8 * TypeByteSize;
1759 // Maximum vector factor.
1760 uint64_t MaxVFWithoutSLForwardIssues = std::min(
1761 VectorizerParams::MaxVectorWidth * TypeByteSize, MinDepDistBytes);
1762
1763 // Compute the smallest VF at which the store and load would be misaligned.
1764 for (uint64_t VF = 2 * TypeByteSize; VF <= MaxVFWithoutSLForwardIssues;
1765 VF *= 2) {
1766 // If the number of vector iteration between the store and the load are
1767 // small we could incur conflicts.
1768 if (Distance % VF && Distance / VF < NumItersForStoreLoadThroughMemory) {
1769 MaxVFWithoutSLForwardIssues = (VF >> 1);
1770 break;
1771 }
1772 }
1773
1774 if (MaxVFWithoutSLForwardIssues < 2 * TypeByteSize) {
1775 LLVM_DEBUG(
1776 dbgs() << "LAA: Distance " << Distance
1777 << " that could cause a store-load forwarding conflict\n");
1778 return true;
1779 }
1780
1781 if (MaxVFWithoutSLForwardIssues < MinDepDistBytes &&
1782 MaxVFWithoutSLForwardIssues !=
1783 VectorizerParams::MaxVectorWidth * TypeByteSize)
1784 MinDepDistBytes = MaxVFWithoutSLForwardIssues;
1785 return false;
1786}
1787
1788void MemoryDepChecker::mergeInStatus(VectorizationSafetyStatus S) {
1789 if (Status < S)
1790 Status = S;
1791}
1792
1793/// Given a dependence-distance \p Dist between two
1794/// memory accesses, that have strides in the same direction whose absolute
1795/// value of the maximum stride is given in \p MaxStride, and that have the same
1796/// type size \p TypeByteSize, in a loop whose takenCount is \p
1797/// BackedgeTakenCount, check if it is possible to prove statically that the
1798/// dependence distance is larger than the range that the accesses will travel
1799/// through the execution of the loop. If so, return true; false otherwise. This
1800/// is useful for example in loops such as the following (PR31098):
1801/// for (i = 0; i < D; ++i) {
1802/// = out[i];
1803/// out[i+D] =
1804/// }
1806 const SCEV &BackedgeTakenCount,
1807 const SCEV &Dist, uint64_t MaxStride,
1808 uint64_t TypeByteSize) {
1809
1810 // If we can prove that
1811 // (**) |Dist| > BackedgeTakenCount * Step
1812 // where Step is the absolute stride of the memory accesses in bytes,
1813 // then there is no dependence.
1814 //
1815 // Rationale:
1816 // We basically want to check if the absolute distance (|Dist/Step|)
1817 // is >= the loop iteration count (or > BackedgeTakenCount).
1818 // This is equivalent to the Strong SIV Test (Practical Dependence Testing,
1819 // Section 4.2.1); Note, that for vectorization it is sufficient to prove
1820 // that the dependence distance is >= VF; This is checked elsewhere.
1821 // But in some cases we can prune dependence distances early, and
1822 // even before selecting the VF, and without a runtime test, by comparing
1823 // the distance against the loop iteration count. Since the vectorized code
1824 // will be executed only if LoopCount >= VF, proving distance >= LoopCount
1825 // also guarantees that distance >= VF.
1826 //
1827 const uint64_t ByteStride = MaxStride * TypeByteSize;
1828 const SCEV *Step = SE.getConstant(BackedgeTakenCount.getType(), ByteStride);
1829 const SCEV *Product = SE.getMulExpr(&BackedgeTakenCount, Step);
1830
1831 const SCEV *CastedDist = &Dist;
1832 const SCEV *CastedProduct = Product;
1833 uint64_t DistTypeSizeBits = DL.getTypeSizeInBits(Dist.getType());
1834 uint64_t ProductTypeSizeBits = DL.getTypeSizeInBits(Product->getType());
1835
1836 // The dependence distance can be positive/negative, so we sign extend Dist;
1837 // The multiplication of the absolute stride in bytes and the
1838 // backedgeTakenCount is non-negative, so we zero extend Product.
1839 if (DistTypeSizeBits > ProductTypeSizeBits)
1840 CastedProduct = SE.getZeroExtendExpr(Product, Dist.getType());
1841 else
1842 CastedDist = SE.getNoopOrSignExtend(&Dist, Product->getType());
1843
1844 // Is Dist - (BackedgeTakenCount * Step) > 0 ?
1845 // (If so, then we have proven (**) because |Dist| >= Dist)
1846 const SCEV *Minus = SE.getMinusSCEV(CastedDist, CastedProduct);
1847 if (SE.isKnownPositive(Minus))
1848 return true;
1849
1850 // Second try: Is -Dist - (BackedgeTakenCount * Step) > 0 ?
1851 // (If so, then we have proven (**) because |Dist| >= -1*Dist)
1852 const SCEV *NegDist = SE.getNegativeSCEV(CastedDist);
1853 Minus = SE.getMinusSCEV(NegDist, CastedProduct);
1854 if (SE.isKnownPositive(Minus))
1855 return true;
1856
1857 return false;
1858}
1859
1860/// Check the dependence for two accesses with the same stride \p Stride.
1861/// \p Distance is the positive distance and \p TypeByteSize is type size in
1862/// bytes.
1863///
1864/// \returns true if they are independent.
1866 uint64_t TypeByteSize) {
1867 assert(Stride > 1 && "The stride must be greater than 1");
1868 assert(TypeByteSize > 0 && "The type size in byte must be non-zero");
1869 assert(Distance > 0 && "The distance must be non-zero");
1870
1871 // Skip if the distance is not multiple of type byte size.
1872 if (Distance % TypeByteSize)
1873 return false;
1874
1875 uint64_t ScaledDist = Distance / TypeByteSize;
1876
1877 // No dependence if the scaled distance is not multiple of the stride.
1878 // E.g.
1879 // for (i = 0; i < 1024 ; i += 4)
1880 // A[i+2] = A[i] + 1;
1881 //
1882 // Two accesses in memory (scaled distance is 2, stride is 4):
1883 // | A[0] | | | | A[4] | | | |
1884 // | | | A[2] | | | | A[6] | |
1885 //
1886 // E.g.
1887 // for (i = 0; i < 1024 ; i += 3)
1888 // A[i+4] = A[i] + 1;
1889 //
1890 // Two accesses in memory (scaled distance is 4, stride is 3):
1891 // | A[0] | | | A[3] | | | A[6] | | |
1892 // | | | | | A[4] | | | A[7] | |
1893 return ScaledDist % Stride;
1894}
1895
1896/// Returns true if any of the underlying objects has a loop varying address,
1897/// i.e. may change in \p L.
1898static bool
1900 ScalarEvolution &SE, const Loop *L) {
1901 return any_of(UnderlyingObjects, [&SE, L](const Value *UO) {
1902 return !SE.isLoopInvariant(SE.getSCEV(const_cast<Value *>(UO)), L);
1903 });
1904}
1905
1906namespace {
1907struct DepDistanceStrideAndSizeInfo {
1908 const SCEV *Dist;
1909 uint64_t StrideA;
1910 uint64_t StrideB;
1911 uint64_t TypeByteSize;
1912 bool AIsWrite;
1913 bool BIsWrite;
1914
1915 DepDistanceStrideAndSizeInfo(const SCEV *Dist, uint64_t StrideA,
1916 uint64_t StrideB, uint64_t TypeByteSize,
1917 bool AIsWrite, bool BIsWrite)
1918 : Dist(Dist), StrideA(StrideA), StrideB(StrideB),
1919 TypeByteSize(TypeByteSize), AIsWrite(AIsWrite), BIsWrite(BIsWrite) {}
1920};
1921} // namespace
1922
1923// Get the dependence distance, strides, type size and whether it is a write for
1924// the dependence between A and B. Returns a DepType, if we can prove there's
1925// no dependence or the analysis fails. Outlined to lambda to limit he scope
1926// of various temporary variables, like A/BPtr, StrideA/BPtr and others.
1927// Returns either the dependence result, if it could already be determined, or a
1928// struct containing (Distance, Stride, TypeSize, AIsWrite, BIsWrite).
1929static std::variant<MemoryDepChecker::Dependence::DepType,
1930 DepDistanceStrideAndSizeInfo>
1934 const DenseMap<Value *, const SCEV *> &Strides,
1935 const DenseMap<Value *, SmallVector<const Value *, 16>> &UnderlyingObjects,
1936 PredicatedScalarEvolution &PSE, const Loop *InnermostLoop) {
1937 auto &DL = InnermostLoop->getHeader()->getModule()->getDataLayout();
1938 auto &SE = *PSE.getSE();
1939 auto [APtr, AIsWrite] = A;
1940 auto [BPtr, BIsWrite] = B;
1941
1942 // Two reads are independent.
1943 if (!AIsWrite && !BIsWrite)
1945
1946 Type *ATy = getLoadStoreType(AInst);
1947 Type *BTy = getLoadStoreType(BInst);
1948
1949 // We cannot check pointers in different address spaces.
1950 if (APtr->getType()->getPointerAddressSpace() !=
1951 BPtr->getType()->getPointerAddressSpace())
1953
1954 int64_t StrideAPtr =
1955 getPtrStride(PSE, ATy, APtr, InnermostLoop, Strides, true).value_or(0);
1956 int64_t StrideBPtr =
1957 getPtrStride(PSE, BTy, BPtr, InnermostLoop, Strides, true).value_or(0);
1958
1959 const SCEV *Src = PSE.getSCEV(APtr);
1960 const SCEV *Sink = PSE.getSCEV(BPtr);
1961
1962 // If the induction step is negative we have to invert source and sink of the
1963 // dependence when measuring the distance between them. We should not swap
1964 // AIsWrite with BIsWrite, as their uses expect them in program order.
1965 if (StrideAPtr < 0) {
1966 std::swap(Src, Sink);
1967 std::swap(AInst, BInst);
1968 }
1969
1970 const SCEV *Dist = SE.getMinusSCEV(Sink, Src);
1971
1972 LLVM_DEBUG(dbgs() << "LAA: Src Scev: " << *Src << "Sink Scev: " << *Sink
1973 << "(Induction step: " << StrideAPtr << ")\n");
1974 LLVM_DEBUG(dbgs() << "LAA: Distance for " << *AInst << " to " << *BInst
1975 << ": " << *Dist << "\n");
1976
1977 // Needs accesses where the addresses of the accessed underlying objects do
1978 // not change within the loop.
1979 if (isLoopVariantIndirectAddress(UnderlyingObjects.find(APtr)->second, SE,
1980 InnermostLoop) ||
1981 isLoopVariantIndirectAddress(UnderlyingObjects.find(BPtr)->second, SE,
1982 InnermostLoop))
1984
1985 // Need accesses with constant strides and the same direction. We don't want
1986 // to vectorize "A[B[i]] += ..." and similar code or pointer arithmetic that
1987 // could wrap in the address space.
1988 if (!StrideAPtr || !StrideBPtr || (StrideAPtr > 0 && StrideBPtr < 0) ||
1989 (StrideAPtr < 0 && StrideBPtr > 0)) {
1990 LLVM_DEBUG(dbgs() << "Pointer access with non-constant stride\n");
1992 }
1993
1994 uint64_t TypeByteSize = DL.getTypeAllocSize(ATy);
1995 bool HasSameSize =
1996 DL.getTypeStoreSizeInBits(ATy) == DL.getTypeStoreSizeInBits(BTy);
1997 if (!HasSameSize)
1998 TypeByteSize = 0;
1999 return DepDistanceStrideAndSizeInfo(Dist, std::abs(StrideAPtr),
2000 std::abs(StrideBPtr), TypeByteSize,
2001 AIsWrite, BIsWrite);
2002}
2003
2004MemoryDepChecker::Dependence::DepType MemoryDepChecker::isDependent(
2005 const MemAccessInfo &A, unsigned AIdx, const MemAccessInfo &B,
2006 unsigned BIdx, const DenseMap<Value *, const SCEV *> &Strides,
2008 &UnderlyingObjects) {
2009 assert(AIdx < BIdx && "Must pass arguments in program order");
2010
2011 // Get the dependence distance, stride, type size and what access writes for
2012 // the dependence between A and B.
2014 A, InstMap[AIdx], B, InstMap[BIdx], Strides, UnderlyingObjects, PSE,
2015 InnermostLoop);
2016 if (std::holds_alternative<Dependence::DepType>(Res))
2017 return std::get<Dependence::DepType>(Res);
2018
2019 auto &[Dist, StrideA, StrideB, TypeByteSize, AIsWrite, BIsWrite] =
2020 std::get<DepDistanceStrideAndSizeInfo>(Res);
2021 bool HasSameSize = TypeByteSize > 0;
2022
2023 std::optional<uint64_t> CommonStride =
2024 StrideA == StrideB ? std::make_optional(StrideA) : std::nullopt;
2025 if (isa<SCEVCouldNotCompute>(Dist)) {
2026 // TODO: Relax requirement that there is a common stride to retry with
2027 // non-constant distance dependencies.
2028 FoundNonConstantDistanceDependence |= !!CommonStride;
2029 LLVM_DEBUG(dbgs() << "LAA: Dependence because of uncomputable distance.\n");
2030 return Dependence::Unknown;
2031 }
2032
2033 ScalarEvolution &SE = *PSE.getSE();
2034 auto &DL = InnermostLoop->getHeader()->getModule()->getDataLayout();
2035 uint64_t MaxStride = std::max(StrideA, StrideB);
2036
2037 // If the distance between the acecsses is larger than their maximum absolute
2038 // stride multiplied by the backedge taken count, the accesses are independet,
2039 // i.e. they are far enough appart that accesses won't access the same
2040 // location across all loop ierations.
2041 if (HasSameSize &&
2043 MaxStride, TypeByteSize))
2044 return Dependence::NoDep;
2045
2046 const SCEVConstant *C = dyn_cast<SCEVConstant>(Dist);
2047
2048 // Attempt to prove strided accesses independent.
2049 if (C) {
2050 const APInt &Val = C->getAPInt();
2051 int64_t Distance = Val.getSExtValue();
2052
2053 // If the distance between accesses and their strides are known constants,
2054 // check whether the accesses interlace each other.
2055 if (std::abs(Distance) > 0 && CommonStride && *CommonStride > 1 &&
2056 HasSameSize &&
2057 areStridedAccessesIndependent(std::abs(Distance), *CommonStride,
2058 TypeByteSize)) {
2059 LLVM_DEBUG(dbgs() << "LAA: Strided accesses are independent\n");
2060 return Dependence::NoDep;
2061 }
2062 } else
2063 Dist = SE.applyLoopGuards(Dist, InnermostLoop);
2064
2065 // Negative distances are not plausible dependencies.
2066 if (SE.isKnownNonPositive(Dist)) {
2067 if (SE.isKnownNonNegative(Dist)) {
2068 if (HasSameSize) {
2069 // Write to the same location with the same size.
2070 return Dependence::Forward;
2071 } else {
2072 LLVM_DEBUG(dbgs() << "LAA: possibly zero dependence difference but "
2073 "different type sizes\n");
2074 return Dependence::Unknown;
2075 }
2076 }
2077
2078 bool IsTrueDataDependence = (AIsWrite && !BIsWrite);
2079 // Check if the first access writes to a location that is read in a later
2080 // iteration, where the distance between them is not a multiple of a vector
2081 // factor and relatively small.
2082 //
2083 // NOTE: There is no need to update MaxSafeVectorWidthInBits after call to
2084 // couldPreventStoreLoadForward, even if it changed MinDepDistBytes, since a
2085 // forward dependency will allow vectorization using any width.
2086
2087 if (IsTrueDataDependence && EnableForwardingConflictDetection) {
2088 if (!C) {
2089 // TODO: FoundNonConstantDistanceDependence is used as a necessary
2090 // condition to consider retrying with runtime checks. Historically, we
2091 // did not set it when strides were different but there is no inherent
2092 // reason to.
2093 FoundNonConstantDistanceDependence |= CommonStride.has_value();
2094 return Dependence::Unknown;
2095 }
2096 if (!HasSameSize ||
2097 couldPreventStoreLoadForward(C->getAPInt().abs().getZExtValue(),
2098 TypeByteSize)) {
2099 LLVM_DEBUG(
2100 dbgs() << "LAA: Forward but may prevent st->ld forwarding\n");
2102 }
2103 }
2104
2105 LLVM_DEBUG(dbgs() << "LAA: Dependence is negative\n");
2106 return Dependence::Forward;
2107 }
2108
2109 int64_t MinDistance = SE.getSignedRangeMin(Dist).getSExtValue();
2110 // Below we only handle strictly positive distances.
2111 if (MinDistance <= 0) {
2112 FoundNonConstantDistanceDependence |= CommonStride.has_value();
2113 return Dependence::Unknown;
2114 }
2115
2116 if (!isa<SCEVConstant>(Dist)) {
2117 // Previously this case would be treated as Unknown, possibly setting
2118 // FoundNonConstantDistanceDependence to force re-trying with runtime
2119 // checks. Until the TODO below is addressed, set it here to preserve
2120 // original behavior w.r.t. re-trying with runtime checks.
2121 // TODO: FoundNonConstantDistanceDependence is used as a necessary
2122 // condition to consider retrying with runtime checks. Historically, we
2123 // did not set it when strides were different but there is no inherent
2124 // reason to.
2125 FoundNonConstantDistanceDependence |= CommonStride.has_value();
2126 }
2127
2128 if (!HasSameSize) {
2129 LLVM_DEBUG(dbgs() << "LAA: ReadWrite-Write positive dependency with "
2130 "different type sizes\n");
2131 return Dependence::Unknown;
2132 }
2133
2134 if (!CommonStride)
2135 return Dependence::Unknown;
2136
2137 // Bail out early if passed-in parameters make vectorization not feasible.
2138 unsigned ForcedFactor = (VectorizerParams::VectorizationFactor ?
2140 unsigned ForcedUnroll = (VectorizerParams::VectorizationInterleave ?
2142 // The minimum number of iterations for a vectorized/unrolled version.
2143 unsigned MinNumIter = std::max(ForcedFactor * ForcedUnroll, 2U);
2144
2145 // It's not vectorizable if the distance is smaller than the minimum distance
2146 // needed for a vectroized/unrolled version. Vectorizing one iteration in
2147 // front needs TypeByteSize * Stride. Vectorizing the last iteration needs
2148 // TypeByteSize (No need to plus the last gap distance).
2149 //
2150 // E.g. Assume one char is 1 byte in memory and one int is 4 bytes.
2151 // foo(int *A) {
2152 // int *B = (int *)((char *)A + 14);
2153 // for (i = 0 ; i < 1024 ; i += 2)
2154 // B[i] = A[i] + 1;
2155 // }
2156 //
2157 // Two accesses in memory (stride is 2):
2158 // | A[0] | | A[2] | | A[4] | | A[6] | |
2159 // | B[0] | | B[2] | | B[4] |
2160 //
2161 // MinDistance needs for vectorizing iterations except the last iteration:
2162 // 4 * 2 * (MinNumIter - 1). MinDistance needs for the last iteration: 4.
2163 // So the minimum distance needed is: 4 * 2 * (MinNumIter - 1) + 4.
2164 //
2165 // If MinNumIter is 2, it is vectorizable as the minimum distance needed is
2166 // 12, which is less than distance.
2167 //
2168 // If MinNumIter is 4 (Say if a user forces the vectorization factor to be 4),
2169 // the minimum distance needed is 28, which is greater than distance. It is
2170 // not safe to do vectorization.
2171
2172 // We know that Dist is positive, but it may not be constant. Use the signed
2173 // minimum for computations below, as this ensures we compute the closest
2174 // possible dependence distance.
2175 uint64_t MinDistanceNeeded =
2176 TypeByteSize * *CommonStride * (MinNumIter - 1) + TypeByteSize;
2177 if (MinDistanceNeeded > static_cast<uint64_t>(MinDistance)) {
2178 if (!isa<SCEVConstant>(Dist)) {
2179 // For non-constant distances, we checked the lower bound of the
2180 // dependence distance and the distance may be larger at runtime (and safe
2181 // for vectorization). Classify it as Unknown, so we re-try with runtime
2182 // checks.
2183 return Dependence::Unknown;
2184 }
2185 LLVM_DEBUG(dbgs() << "LAA: Failure because of positive minimum distance "
2186 << MinDistance << '\n');
2187 return Dependence::Backward;
2188 }
2189
2190 // Unsafe if the minimum distance needed is greater than smallest dependence
2191 // distance distance.
2192 if (MinDistanceNeeded > MinDepDistBytes) {
2193 LLVM_DEBUG(dbgs() << "LAA: Failure because it needs at least "
2194 << MinDistanceNeeded << " size in bytes\n");
2195 return Dependence::Backward;
2196 }
2197
2198 // Positive distance bigger than max vectorization factor.
2199 // FIXME: Should use max factor instead of max distance in bytes, which could
2200 // not handle different types.
2201 // E.g. Assume one char is 1 byte in memory and one int is 4 bytes.
2202 // void foo (int *A, char *B) {
2203 // for (unsigned i = 0; i < 1024; i++) {
2204 // A[i+2] = A[i] + 1;
2205 // B[i+2] = B[i] + 1;
2206 // }
2207 // }
2208 //
2209 // This case is currently unsafe according to the max safe distance. If we
2210 // analyze the two accesses on array B, the max safe dependence distance
2211 // is 2. Then we analyze the accesses on array A, the minimum distance needed
2212 // is 8, which is less than 2 and forbidden vectorization, But actually
2213 // both A and B could be vectorized by 2 iterations.
2214 MinDepDistBytes =
2215 std::min(static_cast<uint64_t>(MinDistance), MinDepDistBytes);
2216
2217 bool IsTrueDataDependence = (!AIsWrite && BIsWrite);
2218 uint64_t MinDepDistBytesOld = MinDepDistBytes;
2219 if (IsTrueDataDependence && EnableForwardingConflictDetection &&
2220 isa<SCEVConstant>(Dist) &&
2221 couldPreventStoreLoadForward(MinDistance, TypeByteSize)) {
2222 // Sanity check that we didn't update MinDepDistBytes when calling
2223 // couldPreventStoreLoadForward
2224 assert(MinDepDistBytes == MinDepDistBytesOld &&
2225 "An update to MinDepDistBytes requires an update to "
2226 "MaxSafeVectorWidthInBits");
2227 (void)MinDepDistBytesOld;
2229 }
2230
2231 // An update to MinDepDistBytes requires an update to MaxSafeVectorWidthInBits
2232 // since there is a backwards dependency.
2233 uint64_t MaxVF = MinDepDistBytes / (TypeByteSize * *CommonStride);
2234 LLVM_DEBUG(dbgs() << "LAA: Positive min distance " << MinDistance
2235 << " with max VF = " << MaxVF << '\n');
2236
2237 uint64_t MaxVFInBits = MaxVF * TypeByteSize * 8;
2238 if (!isa<SCEVConstant>(Dist) && MaxVFInBits < MaxTargetVectorWidthInBits) {
2239 // For non-constant distances, we checked the lower bound of the dependence
2240 // distance and the distance may be larger at runtime (and safe for
2241 // vectorization). Classify it as Unknown, so we re-try with runtime checks.
2242 return Dependence::Unknown;
2243 }
2244
2245 MaxSafeVectorWidthInBits = std::min(MaxSafeVectorWidthInBits, MaxVFInBits);
2247}
2248
2250 DepCandidates &AccessSets, MemAccessInfoList &CheckDeps,
2251 const DenseMap<Value *, const SCEV *> &Strides,
2253 &UnderlyingObjects) {
2254
2255 MinDepDistBytes = -1;
2257 for (MemAccessInfo CurAccess : CheckDeps) {
2258 if (Visited.count(CurAccess))
2259 continue;
2260
2261 // Get the relevant memory access set.
2263 AccessSets.findValue(AccessSets.getLeaderValue(CurAccess));
2264
2265 // Check accesses within this set.
2267 AccessSets.member_begin(I);
2269 AccessSets.member_end();
2270
2271 // Check every access pair.
2272 while (AI != AE) {
2273 Visited.insert(*AI);
2274 bool AIIsWrite = AI->getInt();
2275 // Check loads only against next equivalent class, but stores also against
2276 // other stores in the same equivalence class - to the same address.
2278 (AIIsWrite ? AI : std::next(AI));
2279 while (OI != AE) {
2280 // Check every accessing instruction pair in program order.
2281 for (std::vector<unsigned>::iterator I1 = Accesses[*AI].begin(),
2282 I1E = Accesses[*AI].end(); I1 != I1E; ++I1)
2283 // Scan all accesses of another equivalence class, but only the next
2284 // accesses of the same equivalent class.
2285 for (std::vector<unsigned>::iterator
2286 I2 = (OI == AI ? std::next(I1) : Accesses[*OI].begin()),
2287 I2E = (OI == AI ? I1E : Accesses[*OI].end());
2288 I2 != I2E; ++I2) {
2289 auto A = std::make_pair(&*AI, *I1);
2290 auto B = std::make_pair(&*OI, *I2);
2291
2292 assert(*I1 != *I2);
2293 if (*I1 > *I2)
2294 std::swap(A, B);
2295
2297 isDependent(*A.first, A.second, *B.first, B.second, Strides,
2298 UnderlyingObjects);
2300
2301 // Gather dependences unless we accumulated MaxDependences
2302 // dependences. In that case return as soon as we find the first
2303 // unsafe dependence. This puts a limit on this quadratic
2304 // algorithm.
2305 if (RecordDependences) {
2306 if (Type != Dependence::NoDep)
2307 Dependences.push_back(Dependence(A.second, B.second, Type));
2308
2309 if (Dependences.size() >= MaxDependences) {
2310 RecordDependences = false;
2311 Dependences.clear();
2313 << "Too many dependences, stopped recording\n");
2314 }
2315 }
2316 if (!RecordDependences && !isSafeForVectorization())
2317 return false;
2318 }
2319 ++OI;
2320 }
2321 AI++;
2322 }
2323 }
2324
2325 LLVM_DEBUG(dbgs() << "Total Dependences: " << Dependences.size() << "\n");
2326 return isSafeForVectorization();
2327}
2328
2331 MemAccessInfo Access(Ptr, isWrite);
2332 auto &IndexVector = Accesses.find(Access)->second;
2333
2335 transform(IndexVector,
2336 std::back_inserter(Insts),
2337 [&](unsigned Idx) { return this->InstMap[Idx]; });
2338 return Insts;
2339}
2340
2342 "NoDep",
2343 "Unknown",
2344 "IndirectUnsafe",
2345 "Forward",
2346 "ForwardButPreventsForwarding",
2347 "Backward",
2348 "BackwardVectorizable",
2349 "BackwardVectorizableButPreventsForwarding"};
2350
2352 raw_ostream &OS, unsigned Depth,
2353 const SmallVectorImpl<Instruction *> &Instrs) const {
2354 OS.indent(Depth) << DepName[Type] << ":\n";
2355 OS.indent(Depth + 2) << *Instrs[Source] << " -> \n";
2356 OS.indent(Depth + 2) << *Instrs[Destination] << "\n";
2357}
2358
2359bool LoopAccessInfo::canAnalyzeLoop() {
2360 // We need to have a loop header.
2361 LLVM_DEBUG(dbgs() << "LAA: Found a loop in "
2362 << TheLoop->getHeader()->getParent()->getName() << ": "
2363 << TheLoop->getHeader()->getName() << '\n');
2364
2365 // We can only analyze innermost loops.
2366 if (!TheLoop->isInnermost()) {
2367 LLVM_DEBUG(dbgs() << "LAA: loop is not the innermost loop\n");
2368 recordAnalysis("NotInnerMostLoop") << "loop is not the innermost loop";
2369 return false;
2370 }
2371
2372 // We must have a single backedge.
2373 if (TheLoop->getNumBackEdges() != 1) {
2374 LLVM_DEBUG(
2375 dbgs() << "LAA: loop control flow is not understood by analyzer\n");
2376 recordAnalysis("CFGNotUnderstood")
2377 << "loop control flow is not understood by analyzer";
2378 return false;
2379 }
2380
2381 // ScalarEvolution needs to be able to find the exit count.
2382 const SCEV *ExitCount = PSE->getBackedgeTakenCount();
2383 if (isa<SCEVCouldNotCompute>(ExitCount)) {
2384 recordAnalysis("CantComputeNumberOfIterations")
2385 << "could not determine number of loop iterations";
2386 LLVM_DEBUG(dbgs() << "LAA: SCEV could not compute the loop exit count.\n");
2387 return false;
2388 }
2389
2390 return true;
2391}
2392
2393void LoopAccessInfo::analyzeLoop(AAResults *AA, LoopInfo *LI,
2394 const TargetLibraryInfo *TLI,
2395 DominatorTree *DT) {
2396 // Holds the Load and Store instructions.
2399 SmallPtrSet<MDNode *, 8> LoopAliasScopes;
2400
2401 // Holds all the different accesses in the loop.
2402 unsigned NumReads = 0;
2403 unsigned NumReadWrites = 0;
2404
2405 bool HasComplexMemInst = false;
2406
2407 // A runtime check is only legal to insert if there are no convergent calls.
2408 HasConvergentOp = false;
2409
2410 PtrRtChecking->Pointers.clear();
2411 PtrRtChecking->Need = false;
2412
2413 const bool IsAnnotatedParallel = TheLoop->isAnnotatedParallel();
2414
2415 const bool EnableMemAccessVersioningOfLoop =
2417 !TheLoop->getHeader()->getParent()->hasOptSize();
2418
2419 // Traverse blocks in fixed RPOT order, regardless of their storage in the
2420 // loop info, as it may be arbitrary.
2421 LoopBlocksRPO RPOT(TheLoop);
2422 RPOT.perform(LI);
2423 for (BasicBlock *BB : RPOT) {
2424 // Scan the BB and collect legal loads and stores. Also detect any
2425 // convergent instructions.
2426 for (Instruction &I : *BB) {
2427 if (auto *Call = dyn_cast<CallBase>(&I)) {
2428 if (Call->isConvergent())
2429 HasConvergentOp = true;
2430 }
2431
2432 // With both a non-vectorizable memory instruction and a convergent
2433 // operation, found in this loop, no reason to continue the search.
2434 if (HasComplexMemInst && HasConvergentOp) {
2435 CanVecMem = false;
2436 return;
2437 }
2438
2439 // Avoid hitting recordAnalysis multiple times.
2440 if (HasComplexMemInst)
2441 continue;
2442
2443 // Record alias scopes defined inside the loop.
2444 if (auto *Decl = dyn_cast<NoAliasScopeDeclInst>(&I))
2445 for (Metadata *Op : Decl->getScopeList()->operands())
2446 LoopAliasScopes.insert(cast<MDNode>(Op));
2447
2448 // Many math library functions read the rounding mode. We will only
2449 // vectorize a loop if it contains known function calls that don't set
2450 // the flag. Therefore, it is safe to ignore this read from memory.
2451 auto *Call = dyn_cast<CallInst>(&I);
2452 if (Call && getVectorIntrinsicIDForCall(Call, TLI))
2453 continue;
2454
2455 // If this is a load, save it. If this instruction can read from memory
2456 // but is not a load, then we quit. Notice that we don't handle function
2457 // calls that read or write.
2458 if (I.mayReadFromMemory()) {
2459 // If the function has an explicit vectorized counterpart, we can safely
2460 // assume that it can be vectorized.
2461 if (Call && !Call->isNoBuiltin() && Call->getCalledFunction() &&
2462 !VFDatabase::getMappings(*Call).empty())
2463 continue;
2464
2465 auto *Ld = dyn_cast<LoadInst>(&I);
2466 if (!Ld) {
2467 recordAnalysis("CantVectorizeInstruction", Ld)
2468 << "instruction cannot be vectorized";
2469 HasComplexMemInst = true;
2470 continue;
2471 }
2472 if (!Ld->isSimple() && !IsAnnotatedParallel) {
2473 recordAnalysis("NonSimpleLoad", Ld)
2474 << "read with atomic ordering or volatile read";
2475 LLVM_DEBUG(dbgs() << "LAA: Found a non-simple load.\n");
2476 HasComplexMemInst = true;
2477 continue;
2478 }
2479 NumLoads++;
2480 Loads.push_back(Ld);
2481 DepChecker->addAccess(Ld);
2482 if (EnableMemAccessVersioningOfLoop)
2483 collectStridedAccess(Ld);
2484 continue;
2485 }
2486
2487 // Save 'store' instructions. Abort if other instructions write to memory.
2488 if (I.mayWriteToMemory()) {
2489 auto *St = dyn_cast<StoreInst>(&I);
2490 if (!St) {
2491 recordAnalysis("CantVectorizeInstruction", St)
2492 << "instruction cannot be vectorized";
2493 HasComplexMemInst = true;
2494 continue;
2495 }
2496 if (!St->isSimple() && !IsAnnotatedParallel) {
2497 recordAnalysis("NonSimpleStore", St)
2498 << "write with atomic ordering or volatile write";
2499 LLVM_DEBUG(dbgs() << "LAA: Found a non-simple store.\n");
2500 HasComplexMemInst = true;
2501 continue;
2502 }
2503 NumStores++;
2504 Stores.push_back(St);
2505 DepChecker->addAccess(St);
2506 if (EnableMemAccessVersioningOfLoop)
2507 collectStridedAccess(St);
2508 }
2509 } // Next instr.
2510 } // Next block.
2511
2512 if (HasComplexMemInst) {
2513 CanVecMem = false;
2514 return;
2515 }
2516
2517 // Now we have two lists that hold the loads and the stores.
2518 // Next, we find the pointers that they use.
2519
2520 // Check if we see any stores. If there are no stores, then we don't
2521 // care if the pointers are *restrict*.
2522 if (!Stores.size()) {
2523 LLVM_DEBUG(dbgs() << "LAA: Found a read-only loop!\n");
2524 CanVecMem = true;
2525 return;
2526 }
2527
2528 MemoryDepChecker::DepCandidates DependentAccesses;
2529 AccessAnalysis Accesses(TheLoop, AA, LI, DependentAccesses, *PSE,
2530 LoopAliasScopes);
2531
2532 // Holds the analyzed pointers. We don't want to call getUnderlyingObjects
2533 // multiple times on the same object. If the ptr is accessed twice, once
2534 // for read and once for write, it will only appear once (on the write
2535 // list). This is okay, since we are going to check for conflicts between
2536 // writes and between reads and writes, but not between reads and reads.
2538
2539 // Record uniform store addresses to identify if we have multiple stores
2540 // to the same address.
2541 SmallPtrSet<Value *, 16> UniformStores;
2542
2543 for (StoreInst *ST : Stores) {
2544 Value *Ptr = ST->getPointerOperand();
2545
2546 if (isInvariant(Ptr)) {
2547 // Record store instructions to loop invariant addresses
2548 StoresToInvariantAddresses.push_back(ST);
2549 HasStoreStoreDependenceInvolvingLoopInvariantAddress |=
2550 !UniformStores.insert(Ptr).second;
2551 }
2552
2553 // If we did *not* see this pointer before, insert it to the read-write
2554 // list. At this phase it is only a 'write' list.
2555 Type *AccessTy = getLoadStoreType(ST);
2556 if (Seen.insert({Ptr, AccessTy}).second) {
2557 ++NumReadWrites;
2558
2560 // The TBAA metadata could have a control dependency on the predication
2561 // condition, so we cannot rely on it when determining whether or not we
2562 // need runtime pointer checks.
2563 if (blockNeedsPredication(ST->getParent(), TheLoop, DT))
2564 Loc.AATags.TBAA = nullptr;
2565
2566 visitPointers(const_cast<Value *>(Loc.Ptr), *TheLoop,
2567 [&Accesses, AccessTy, Loc](Value *Ptr) {
2568 MemoryLocation NewLoc = Loc.getWithNewPtr(Ptr);
2569 Accesses.addStore(NewLoc, AccessTy);
2570 });
2571 }
2572 }
2573
2574 if (IsAnnotatedParallel) {
2575 LLVM_DEBUG(
2576 dbgs() << "LAA: A loop annotated parallel, ignore memory dependency "
2577 << "checks.\n");
2578 CanVecMem = true;
2579 return;
2580 }
2581
2582 for (LoadInst *LD : Loads) {
2583 Value *Ptr = LD->getPointerOperand();
2584 // If we did *not* see this pointer before, insert it to the
2585 // read list. If we *did* see it before, then it is already in
2586 // the read-write list. This allows us to vectorize expressions
2587 // such as A[i] += x; Because the address of A[i] is a read-write
2588 // pointer. This only works if the index of A[i] is consecutive.
2589 // If the address of i is unknown (for example A[B[i]]) then we may
2590 // read a few words, modify, and write a few words, and some of the
2591 // words may be written to the same address.
2592 bool IsReadOnlyPtr = false;
2593 Type *AccessTy = getLoadStoreType(LD);
2594 if (Seen.insert({Ptr, AccessTy}).second ||
2595 !getPtrStride(*PSE, LD->getType(), Ptr, TheLoop, SymbolicStrides).value_or(0)) {
2596 ++NumReads;
2597 IsReadOnlyPtr = true;
2598 }
2599
2600 // See if there is an unsafe dependency between a load to a uniform address and
2601 // store to the same uniform address.
2602 if (UniformStores.count(Ptr)) {
2603 LLVM_DEBUG(dbgs() << "LAA: Found an unsafe dependency between a uniform "
2604 "load and uniform store to the same address!\n");
2605 HasLoadStoreDependenceInvolvingLoopInvariantAddress = true;
2606 }
2607
2609 // The TBAA metadata could have a control dependency on the predication
2610 // condition, so we cannot rely on it when determining whether or not we
2611 // need runtime pointer checks.
2612 if (blockNeedsPredication(LD->getParent(), TheLoop, DT))
2613 Loc.AATags.TBAA = nullptr;
2614
2615 visitPointers(const_cast<Value *>(Loc.Ptr), *TheLoop,
2616 [&Accesses, AccessTy, Loc, IsReadOnlyPtr](Value *Ptr) {
2617 MemoryLocation NewLoc = Loc.getWithNewPtr(Ptr);
2618 Accesses.addLoad(NewLoc, AccessTy, IsReadOnlyPtr);
2619 });
2620 }
2621
2622 // If we write (or read-write) to a single destination and there are no
2623 // other reads in this loop then is it safe to vectorize.
2624 if (NumReadWrites == 1 && NumReads == 0) {
2625 LLVM_DEBUG(dbgs() << "LAA: Found a write-only loop!\n");
2626 CanVecMem = true;
2627 return;
2628 }
2629
2630 // Build dependence sets and check whether we need a runtime pointer bounds
2631 // check.
2632 Accesses.buildDependenceSets();
2633
2634 // Find pointers with computable bounds. We are going to use this information
2635 // to place a runtime bound check.
2636 Value *UncomputablePtr = nullptr;
2637 bool CanDoRTIfNeeded =
2638 Accesses.canCheckPtrAtRT(*PtrRtChecking, PSE->getSE(), TheLoop,
2639 SymbolicStrides, UncomputablePtr, false);
2640 if (!CanDoRTIfNeeded) {
2641 auto *I = dyn_cast_or_null<Instruction>(UncomputablePtr);
2642 recordAnalysis("CantIdentifyArrayBounds", I)
2643 << "cannot identify array bounds";
2644 LLVM_DEBUG(dbgs() << "LAA: We can't vectorize because we can't find "
2645 << "the array bounds.\n");
2646 CanVecMem = false;
2647 return;
2648 }
2649
2650 LLVM_DEBUG(
2651 dbgs() << "LAA: May be able to perform a memory runtime check if needed.\n");
2652
2653 CanVecMem = true;
2654 if (Accesses.isDependencyCheckNeeded()) {
2655 LLVM_DEBUG(dbgs() << "LAA: Checking memory dependencies\n");
2656 CanVecMem = DepChecker->areDepsSafe(
2657 DependentAccesses, Accesses.getDependenciesToCheck(), SymbolicStrides,
2658 Accesses.getUnderlyingObjects());
2659
2660 if (!CanVecMem && DepChecker->shouldRetryWithRuntimeCheck()) {
2661 LLVM_DEBUG(dbgs() << "LAA: Retrying with memory checks\n");
2662
2663 // Clear the dependency checks. We assume they are not needed.
2664 Accesses.resetDepChecks(*DepChecker);
2665
2666 PtrRtChecking->reset();
2667 PtrRtChecking->Need = true;
2668
2669 auto *SE = PSE->getSE();
2670 UncomputablePtr = nullptr;
2671 CanDoRTIfNeeded = Accesses.canCheckPtrAtRT(
2672 *PtrRtChecking, SE, TheLoop, SymbolicStrides, UncomputablePtr, true);
2673
2674 // Check that we found the bounds for the pointer.
2675 if (!CanDoRTIfNeeded) {
2676 auto *I = dyn_cast_or_null<Instruction>(UncomputablePtr);
2677 recordAnalysis("CantCheckMemDepsAtRunTime", I)
2678 << "cannot check memory dependencies at runtime";
2679 LLVM_DEBUG(dbgs() << "LAA: Can't vectorize with memory checks\n");
2680 CanVecMem = false;
2681 return;
2682 }
2683
2684 CanVecMem = true;
2685 }
2686 }
2687
2688 if (HasConvergentOp) {
2689 recordAnalysis("CantInsertRuntimeCheckWithConvergent")
2690 << "cannot add control dependency to convergent operation";
2691 LLVM_DEBUG(dbgs() << "LAA: We can't vectorize because a runtime check "
2692 "would be needed with a convergent operation\n");
2693 CanVecMem = false;
2694 return;
2695 }
2696
2697 if (CanVecMem)
2698 LLVM_DEBUG(
2699 dbgs() << "LAA: No unsafe dependent memory operations in loop. We"
2700 << (PtrRtChecking->Need ? "" : " don't")
2701 << " need runtime memory checks.\n");
2702 else
2703 emitUnsafeDependenceRemark();
2704}
2705
2706void LoopAccessInfo::emitUnsafeDependenceRemark() {
2707 auto Deps = getDepChecker().getDependences();
2708 if (!Deps)
2709 return;
2710 auto Found = llvm::find_if(*Deps, [](const MemoryDepChecker::Dependence &D) {
2713 });
2714 if (Found == Deps->end())
2715 return;
2716 MemoryDepChecker::Dependence Dep = *Found;
2717
2718 LLVM_DEBUG(dbgs() << "LAA: unsafe dependent memory operations in loop\n");
2719
2720 // Emit remark for first unsafe dependence
2721 bool HasForcedDistribution = false;
2722 std::optional<const MDOperand *> Value =
2723 findStringMetadataForLoop(TheLoop, "llvm.loop.distribute.enable");
2724 if (Value) {
2725 const MDOperand *Op = *Value;
2726 assert(Op && mdconst::hasa<ConstantInt>(*Op) && "invalid metadata");
2727 HasForcedDistribution = mdconst::extract<ConstantInt>(*Op)->getZExtValue();
2728 }
2729
2730 const std::string Info =
2731 HasForcedDistribution
2732 ? "unsafe dependent memory operations in loop."
2733 : "unsafe dependent memory operations in loop. Use "
2734 "#pragma clang loop distribute(enable) to allow loop distribution "
2735 "to attempt to isolate the offending operations into a separate "
2736 "loop";
2738 recordAnalysis("UnsafeDep", Dep.getDestination(getDepChecker())) << Info;
2739
2740 switch (Dep.Type) {
2744 llvm_unreachable("Unexpected dependence");
2746 R << "\nBackward loop carried data dependence.";
2747 break;
2749 R << "\nForward loop carried data dependence that prevents "
2750 "store-to-load forwarding.";
2751 break;
2753 R << "\nBackward loop carried data dependence that prevents "
2754 "store-to-load forwarding.";
2755 break;
2757 R << "\nUnsafe indirect dependence.";
2758 break;
2760 R << "\nUnknown data dependence.";
2761 break;
2762 }
2763
2764 if (Instruction *I = Dep.getSource(getDepChecker())) {
2765 DebugLoc SourceLoc = I->getDebugLoc();
2766 if (auto *DD = dyn_cast_or_null<Instruction>(getPointerOperand(I)))
2767 SourceLoc = DD->getDebugLoc();
2768 if (SourceLoc)
2769 R << " Memory location is the same as accessed at "
2770 << ore::NV("Location", SourceLoc);
2771 }
2772}
2773
2775 DominatorTree *DT) {
2776 assert(TheLoop->contains(BB) && "Unknown block used");
2777
2778 // Blocks that do not dominate the latch need predication.
2779 BasicBlock* Latch = TheLoop->getLoopLatch();
2780 return !DT->dominates(BB, Latch);
2781}
2782
2783OptimizationRemarkAnalysis &LoopAccessInfo::recordAnalysis(StringRef RemarkName,
2784 Instruction *I) {
2785 assert(!Report && "Multiple reports generated");
2786
2787 Value *CodeRegion = TheLoop->getHeader();
2788 DebugLoc DL = TheLoop->getStartLoc();
2789
2790 if (I) {
2791 CodeRegion = I->getParent();
2792 // If there is no debug location attached to the instruction, revert back to
2793 // using the loop's.
2794 if (I->getDebugLoc())
2795 DL = I->getDebugLoc();
2796 }
2797
2798 Report = std::make_unique<OptimizationRemarkAnalysis>(DEBUG_TYPE, RemarkName, DL,
2799 CodeRegion);
2800 return *Report;
2801}
2802
2804 auto *SE = PSE->getSE();
2805 // TODO: Is this really what we want? Even without FP SCEV, we may want some
2806 // trivially loop-invariant FP values to be considered invariant.
2807 if (!SE->isSCEVable(V->getType()))
2808 return false;
2809 const SCEV *S = SE->getSCEV(V);
2810 return SE->isLoopInvariant(S, TheLoop);
2811}
2812
2813/// Find the operand of the GEP that should be checked for consecutive
2814/// stores. This ignores trailing indices that have no effect on the final
2815/// pointer.
2816static unsigned getGEPInductionOperand(const GetElementPtrInst *Gep) {
2817 const DataLayout &DL = Gep->getModule()->getDataLayout();
2818 unsigned LastOperand = Gep->getNumOperands() - 1;
2819 TypeSize GEPAllocSize = DL.getTypeAllocSize(Gep->getResultElementType());
2820
2821 // Walk backwards and try to peel off zeros.
2822 while (LastOperand > 1 && match(Gep->getOperand(LastOperand), m_Zero())) {
2823 // Find the type we're currently indexing into.
2824 gep_type_iterator GEPTI = gep_type_begin(Gep);
2825 std::advance(GEPTI, LastOperand - 2);
2826
2827 // If it's a type with the same allocation size as the result of the GEP we
2828 // can peel off the zero index.
2829 TypeSize ElemSize = GEPTI.isStruct()
2830 ? DL.getTypeAllocSize(GEPTI.getIndexedType())
2832 if (ElemSize != GEPAllocSize)
2833 break;
2834 --LastOperand;
2835 }
2836
2837 return LastOperand;
2838}
2839
2840/// If the argument is a GEP, then returns the operand identified by
2841/// getGEPInductionOperand. However, if there is some other non-loop-invariant
2842/// operand, it returns that instead.
2844 GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr);
2845 if (!GEP)
2846 return Ptr;
2847
2848 unsigned InductionOperand = getGEPInductionOperand(GEP);
2849
2850 // Check that all of the gep indices are uniform except for our induction
2851 // operand.
2852 for (unsigned i = 0, e = GEP->getNumOperands(); i != e; ++i)
2853 if (i != InductionOperand &&
2854 !SE->isLoopInvariant(SE->getSCEV(GEP->getOperand(i)), Lp))
2855 return Ptr;
2856 return GEP->getOperand(InductionOperand);
2857}
2858
2859/// If a value has only one user that is a CastInst, return it.
2861 Value *UniqueCast = nullptr;
2862 for (User *U : Ptr->users()) {
2863 CastInst *CI = dyn_cast<CastInst>(U);
2864 if (CI && CI->getType() == Ty) {
2865 if (!UniqueCast)
2866 UniqueCast = CI;
2867 else
2868 return nullptr;
2869 }
2870 }
2871 return UniqueCast;
2872}
2873
2874/// Get the stride of a pointer access in a loop. Looks for symbolic
2875/// strides "a[i*stride]". Returns the symbolic stride, or null otherwise.
2877 auto *PtrTy = dyn_cast<PointerType>(Ptr->getType());
2878 if (!PtrTy || PtrTy->isAggregateType())
2879 return nullptr;
2880
2881 // Try to remove a gep instruction to make the pointer (actually index at this
2882 // point) easier analyzable. If OrigPtr is equal to Ptr we are analyzing the
2883 // pointer, otherwise, we are analyzing the index.
2884 Value *OrigPtr = Ptr;
2885
2886 // The size of the pointer access.
2887 int64_t PtrAccessSize = 1;
2888
2889 Ptr = stripGetElementPtr(Ptr, SE, Lp);
2890 const SCEV *V = SE->getSCEV(Ptr);
2891
2892 if (Ptr != OrigPtr)
2893 // Strip off casts.
2894 while (const SCEVIntegralCastExpr *C = dyn_cast<SCEVIntegralCastExpr>(V))
2895 V = C->getOperand();
2896
2897 const SCEVAddRecExpr *S = dyn_cast<SCEVAddRecExpr>(V);
2898 if (!S)
2899 return nullptr;
2900
2901 // If the pointer is invariant then there is no stride and it makes no
2902 // sense to add it here.
2903 if (Lp != S->getLoop())
2904 return nullptr;
2905
2906 V = S->getStepRecurrence(*SE);
2907 if (!V)
2908 return nullptr;
2909
2910 // Strip off the size of access multiplication if we are still analyzing the
2911 // pointer.
2912 if (OrigPtr == Ptr) {
2913 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(V)) {
2914 if (M->getOperand(0)->getSCEVType() != scConstant)
2915 return nullptr;
2916
2917 const APInt &APStepVal = cast<SCEVConstant>(M->getOperand(0))->getAPInt();
2918
2919 // Huge step value - give up.
2920 if (APStepVal.getBitWidth() > 64)
2921 return nullptr;
2922
2923 int64_t StepVal = APStepVal.getSExtValue();
2924 if (PtrAccessSize != StepVal)
2925 return nullptr;
2926 V = M->getOperand(1);
2927 }
2928 }
2929
2930 // Note that the restriction after this loop invariant check are only
2931 // profitability restrictions.
2932 if (!SE->isLoopInvariant(V, Lp))
2933 return nullptr;
2934
2935 // Look for the loop invariant symbolic value.
2936 const SCEVUnknown *U = dyn_cast<SCEVUnknown>(V);
2937 if (!U) {
2938 const auto *C = dyn_cast<SCEVIntegralCastExpr>(V);
2939 if (!C)
2940 return nullptr;
2941 U = dyn_cast<SCEVUnknown>(C->getOperand());
2942 if (!U)
2943 return nullptr;
2944
2945 // Match legacy behavior - this is not needed for correctness
2946 if (!getUniqueCastUse(U->getValue(), Lp, V->getType()))
2947 return nullptr;
2948 }
2949
2950 return V;
2951}
2952
2953void LoopAccessInfo::collectStridedAccess(Value *MemAccess) {
2954 Value *Ptr = getLoadStorePointerOperand(MemAccess);
2955 if (!Ptr)
2956 return;
2957
2958 // Note: getStrideFromPointer is a *profitability* heuristic. We
2959 // could broaden the scope of values returned here - to anything
2960 // which happens to be loop invariant and contributes to the
2961 // computation of an interesting IV - but we chose not to as we
2962 // don't have a cost model here, and broadening the scope exposes
2963 // far too many unprofitable cases.
2964 const SCEV *StrideExpr = getStrideFromPointer(Ptr, PSE->getSE(), TheLoop);
2965 if (!StrideExpr)
2966 return;
2967
2968 LLVM_DEBUG(dbgs() << "LAA: Found a strided access that is a candidate for "
2969 "versioning:");
2970 LLVM_DEBUG(dbgs() << " Ptr: " << *Ptr << " Stride: " << *StrideExpr << "\n");
2971
2972 if (!SpeculateUnitStride) {
2973 LLVM_DEBUG(dbgs() << " Chose not to due to -laa-speculate-unit-stride\n");
2974 return;
2975 }
2976
2977 // Avoid adding the "Stride == 1" predicate when we know that
2978 // Stride >= Trip-Count. Such a predicate will effectively optimize a single
2979 // or zero iteration loop, as Trip-Count <= Stride == 1.
2980 //
2981 // TODO: We are currently not making a very informed decision on when it is
2982 // beneficial to apply stride versioning. It might make more sense that the
2983 // users of this analysis (such as the vectorizer) will trigger it, based on
2984 // their specific cost considerations; For example, in cases where stride
2985 // versioning does not help resolving memory accesses/dependences, the
2986 // vectorizer should evaluate the cost of the runtime test, and the benefit
2987 // of various possible stride specializations, considering the alternatives
2988 // of using gather/scatters (if available).
2989
2990 const SCEV *BETakenCount = PSE->getBackedgeTakenCount();
2991
2992 // Match the types so we can compare the stride and the BETakenCount.
2993 // The Stride can be positive/negative, so we sign extend Stride;
2994 // The backedgeTakenCount is non-negative, so we zero extend BETakenCount.
2995 const DataLayout &DL = TheLoop->getHeader()->getModule()->getDataLayout();
2996 uint64_t StrideTypeSizeBits = DL.getTypeSizeInBits(StrideExpr->getType());
2997 uint64_t BETypeSizeBits = DL.getTypeSizeInBits(BETakenCount->getType());
2998 const SCEV *CastedStride = StrideExpr;
2999 const SCEV *CastedBECount = BETakenCount;
3000 ScalarEvolution *SE = PSE->getSE();
3001 if (BETypeSizeBits >= StrideTypeSizeBits)
3002 CastedStride = SE->getNoopOrSignExtend(StrideExpr, BETakenCount->getType());
3003 else
3004 CastedBECount = SE->getZeroExtendExpr(BETakenCount, StrideExpr->getType());
3005 const SCEV *StrideMinusBETaken = SE->getMinusSCEV(CastedStride, CastedBECount);
3006 // Since TripCount == BackEdgeTakenCount + 1, checking:
3007 // "Stride >= TripCount" is equivalent to checking:
3008 // Stride - BETakenCount > 0
3009 if (SE->isKnownPositive(StrideMinusBETaken)) {
3010 LLVM_DEBUG(
3011 dbgs() << "LAA: Stride>=TripCount; No point in versioning as the "
3012 "Stride==1 predicate will imply that the loop executes "
3013 "at most once.\n");
3014 return;
3015 }
3016 LLVM_DEBUG(dbgs() << "LAA: Found a strided access that we can version.\n");
3017
3018 // Strip back off the integer cast, and check that our result is a
3019 // SCEVUnknown as we expect.
3020 const SCEV *StrideBase = StrideExpr;
3021 if (const auto *C = dyn_cast<SCEVIntegralCastExpr>(StrideBase))
3022 StrideBase = C->getOperand();
3023 SymbolicStrides[Ptr] = cast<SCEVUnknown>(StrideBase);
3024}
3025
3027 const TargetTransformInfo *TTI,
3028 const TargetLibraryInfo *TLI, AAResults *AA,
3029 DominatorTree *DT, LoopInfo *LI)
3030 : PSE(std::make_unique<PredicatedScalarEvolution>(*SE, *L)),
3031 PtrRtChecking(nullptr), TheLoop(L) {
3032 unsigned MaxTargetVectorWidthInBits = std::numeric_limits<unsigned>::max();
3033 if (TTI) {
3034 TypeSize FixedWidth =
3036 if (FixedWidth.isNonZero()) {
3037 // Scale the vector width by 2 as rough estimate to also consider
3038 // interleaving.
3039 MaxTargetVectorWidthInBits = FixedWidth.getFixedValue() * 2;
3040 }
3041
3042 TypeSize ScalableWidth =
3044 if (ScalableWidth.isNonZero())
3045 MaxTargetVectorWidthInBits = std::numeric_limits<unsigned>::max();
3046 }
3047 DepChecker =
3048 std::make_unique<MemoryDepChecker>(*PSE, L, MaxTargetVectorWidthInBits);
3049 PtrRtChecking = std::make_unique<RuntimePointerChecking>(*DepChecker, SE);
3050 if (canAnalyzeLoop()) {
3051 analyzeLoop(AA, LI, TLI, DT);
3052 }
3053}
3054
3056 if (CanVecMem) {
3057 OS.indent(Depth) << "Memory dependences are safe";
3058 const MemoryDepChecker &DC = getDepChecker();
3059 if (!DC.isSafeForAnyVectorWidth())
3060 OS << " with a maximum safe vector width of "
3061 << DC.getMaxSafeVectorWidthInBits() << " bits";
3062 if (PtrRtChecking->Need)
3063 OS << " with run-time checks";
3064 OS << "\n";
3065 }
3066
3067 if (HasConvergentOp)
3068 OS.indent(Depth) << "Has convergent operation in loop\n";
3069
3070 if (Report)
3071 OS.indent(Depth) << "Report: " << Report->getMsg() << "\n";
3072
3073 if (auto *Dependences = DepChecker->getDependences()) {
3074 OS.indent(Depth) << "Dependences:\n";
3075 for (const auto &Dep : *Dependences) {
3076 Dep.print(OS, Depth + 2, DepChecker->getMemoryInstructions());
3077 OS << "\n";
3078 }
3079 } else
3080 OS.indent(Depth) << "Too many dependences, not recorded\n";
3081
3082 // List the pair of accesses need run-time checks to prove independence.
3083 PtrRtChecking->print(OS, Depth);
3084 OS << "\n";
3085
3086 OS.indent(Depth)
3087 << "Non vectorizable stores to invariant address were "
3088 << (HasStoreStoreDependenceInvolvingLoopInvariantAddress ||
3089 HasLoadStoreDependenceInvolvingLoopInvariantAddress
3090 ? ""
3091 : "not ")
3092 << "found in loop.\n";
3093
3094 OS.indent(Depth) << "SCEV assumptions:\n";
3095 PSE->getPredicate().print(OS, Depth);
3096
3097 OS << "\n";
3098
3099 OS.indent(Depth) << "Expressions re-written:\n";
3100 PSE->print(OS, Depth);
3101}
3102
3104 auto I = LoopAccessInfoMap.insert({&L, nullptr});
3105
3106 if (I.second)
3107 I.first->second =
3108 std::make_unique<LoopAccessInfo>(&L, &SE, TTI, TLI, &AA, &DT, &LI);
3109
3110 return *I.first->second;
3111}
3112
3114 Function &F, const PreservedAnalyses &PA,
3116 // Check whether our analysis is preserved.
3117 auto PAC = PA.getChecker<LoopAccessAnalysis>();
3118 if (!PAC.preserved() && !PAC.preservedSet<AllAnalysesOn<Function>>())
3119 // If not, give up now.
3120 return true;
3121
3122 // Check whether the analyses we depend on became invalid for any reason.
3123 // Skip checking TargetLibraryAnalysis as it is immutable and can't become
3124 // invalid.
3125 return Inv.invalidate<AAManager>(F, PA) ||
3127 Inv.invalidate<LoopAnalysis>(F, PA) ||
3129}
3130
3134 auto &AA = FAM.getResult<AAManager>(F);
3135 auto &DT = FAM.getResult<DominatorTreeAnalysis>(F);
3136 auto &LI = FAM.getResult<LoopAnalysis>(F);
3138 auto &TLI = FAM.getResult<TargetLibraryAnalysis>(F);
3139 return LoopAccessInfoManager(SE, AA, DT, LI, &TTI, &TLI);
3140}
3141
3142AnalysisKey LoopAccessAnalysis::Key;
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
This file implements a class to represent arbitrary precision integral constant values and operations...
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
Analysis containing CSE Info
Definition: CSEInfo.cpp:27
This file contains the declarations for the subclasses of Constant, which represent the different fla...
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
#define LLVM_DEBUG(X)
Definition: Debug.h:101
This file defines the DenseMap class.
uint64_t Size
bool End
Definition: ELF_riscv.cpp:480
Generic implementation of equivalence classes through the use Tarjan's efficient union-find algorithm...
#define Check(C,...)
#define DEBUG_TYPE
Hexagon Common GEP
IRTranslator LLVM IR MI
static cl::opt< unsigned > MaxDependences("max-dependences", cl::Hidden, cl::desc("Maximum number of dependences collected by " "loop-access analysis (default = 100)"), cl::init(100))
We collect dependences up to this threshold.
static cl::opt< bool > EnableForwardingConflictDetection("store-to-load-forwarding-conflict-detection", cl::Hidden, cl::desc("Enable conflict detection in loop-access analysis"), cl::init(true))
Enable store-to-load forwarding conflict detection.
static void findForkedSCEVs(ScalarEvolution *SE, const Loop *L, Value *Ptr, SmallVectorImpl< PointerIntPair< const SCEV *, 1, bool > > &ScevList, unsigned Depth)
static std::variant< MemoryDepChecker::Dependence::DepType, DepDistanceStrideAndSizeInfo > getDependenceDistanceStrideAndSize(const AccessAnalysis::MemAccessInfo &A, Instruction *AInst, const AccessAnalysis::MemAccessInfo &B, Instruction *BInst, const DenseMap< Value *, const SCEV * > &Strides, const DenseMap< Value *, SmallVector< const Value *, 16 > > &UnderlyingObjects, PredicatedScalarEvolution &PSE, const Loop *InnermostLoop)
static bool hasComputableBounds(PredicatedScalarEvolution &PSE, Value *Ptr, const SCEV *PtrScev, Loop *L, bool Assume)
Check whether a pointer can participate in a runtime bounds check.
static cl::opt< unsigned > MemoryCheckMergeThreshold("memory-check-merge-threshold", cl::Hidden, cl::desc("Maximum number of comparisons done when trying to merge " "runtime memory checks. (default = 100)"), cl::init(100))
The maximum iterations used to merge memory checks.
static bool isNoWrap(PredicatedScalarEvolution &PSE, const DenseMap< Value *, const SCEV * > &Strides, Value *Ptr, Type *AccessTy, Loop *L)
Check whether a pointer address cannot wrap.
static const SCEV * getStrideFromPointer(Value *Ptr, ScalarEvolution *SE, Loop *Lp)
Get the stride of a pointer access in a loop.
static unsigned getGEPInductionOperand(const GetElementPtrInst *Gep)
Find the operand of the GEP that should be checked for consecutive stores.
static bool isSafeDependenceDistance(const DataLayout &DL, ScalarEvolution &SE, const SCEV &BackedgeTakenCount, const SCEV &Dist, uint64_t MaxStride, uint64_t TypeByteSize)
Given a dependence-distance Dist between two memory accesses, that have strides in the same direction...
static cl::opt< unsigned, true > VectorizationInterleave("force-vector-interleave", cl::Hidden, cl::desc("Sets the vectorization interleave count. " "Zero is autoselect."), cl::location(VectorizerParams::VectorizationInterleave))
static bool isLoopVariantIndirectAddress(ArrayRef< const Value * > UnderlyingObjects, ScalarEvolution &SE, const Loop *L)
Returns true if any of the underlying objects has a loop varying address, i.e.
static Value * getUniqueCastUse(Value *Ptr, Loop *Lp, Type *Ty)
If a value has only one user that is a CastInst, return it.
static cl::opt< bool, true > HoistRuntimeChecks("hoist-runtime-checks", cl::Hidden, cl::desc("Hoist inner loop runtime memory checks to outer loop if possible"), cl::location(VectorizerParams::HoistRuntimeChecks), cl::init(true))
static cl::opt< unsigned, true > VectorizationFactor("force-vector-width", cl::Hidden, cl::desc("Sets the SIMD width. Zero is autoselect."), cl::location(VectorizerParams::VectorizationFactor))
static cl::opt< unsigned, true > RuntimeMemoryCheckThreshold("runtime-memory-check-threshold", cl::Hidden, cl::desc("When performing memory disambiguation checks at runtime do not " "generate more than this number of comparisons (default = 8)."), cl::location(VectorizerParams::RuntimeMemoryCheckThreshold), cl::init(8))
static void visitPointers(Value *StartPtr, const Loop &InnermostLoop, function_ref< void(Value *)> AddPointer)
static bool isNoWrapAddRec(Value *Ptr, const SCEVAddRecExpr *AR, PredicatedScalarEvolution &PSE, const Loop *L)
Return true if an AddRec pointer Ptr is unsigned non-wrapping, i.e.
static Value * stripGetElementPtr(Value *Ptr, ScalarEvolution *SE, Loop *Lp)
If the argument is a GEP, then returns the operand identified by getGEPInductionOperand.
static bool areStridedAccessesIndependent(uint64_t Distance, uint64_t Stride, uint64_t TypeByteSize)
Check the dependence for two accesses with the same stride Stride.
static const SCEV * getMinFromExprs(const SCEV *I, const SCEV *J, ScalarEvolution *SE)
Compare I and J and return the minimum.
static cl::opt< unsigned > MaxForkedSCEVDepth("max-forked-scev-depth", cl::Hidden, cl::desc("Maximum recursion depth when finding forked SCEVs (default = 5)"), cl::init(5))
static cl::opt< bool > SpeculateUnitStride("laa-speculate-unit-stride", cl::Hidden, cl::desc("Speculate that non-constant strides are unit in LAA"), cl::init(true))
static SmallVector< PointerIntPair< const SCEV *, 1, bool > > findForkedPointer(PredicatedScalarEvolution &PSE, const DenseMap< Value *, const SCEV * > &StridesMap, Value *Ptr, const Loop *L)
static cl::opt< bool > EnableMemAccessVersioning("enable-mem-access-versioning", cl::init(true), cl::Hidden, cl::desc("Enable symbolic stride memory access versioning"))
This enables versioning on the strides of symbolically striding memory accesses in code like the foll...
This header provides classes for managing per-loop analyses.
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
This file provides utility analysis objects describing memory locations.
uint64_t High
#define P(N)
FunctionAnalysisManager FAM
This header defines various interfaces for pass management in LLVM.
This file defines the PointerIntPair class.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file contains some templates that are useful if you are working with the STL at all.
raw_pwrite_stream & OS
static LLVM_ATTRIBUTE_ALWAYS_INLINE bool CheckType(MVT::SimpleValueType VT, SDValue N, const TargetLowering *TLI, const DataLayout &DL)
This file implements a set that has insertion order iteration characteristics.
This file defines the SmallPtrSet class.
This file defines the SmallSet class.
This file defines the SmallVector class.
static SymbolRef::Type getType(const Symbol *Sym)
Definition: TapiFile.cpp:40
This pass exposes codegen information to IR-level passes.
static const X86InstrFMA3Group Groups[]
A manager for alias analyses.
Class for arbitrary precision integers.
Definition: APInt.h:76
unsigned getBitWidth() const
Return the number of bits in the APInt.
Definition: APInt.h:1446
APInt sextOrTrunc(unsigned width) const
Sign extend or truncate to width.
Definition: APInt.cpp:1010
int64_t getSExtValue() const
Get sign extended value.
Definition: APInt.h:1520
This templated class represents "all analyses that operate over <a particular IR unit>" (e....
Definition: Analysis.h:47
API to communicate dependencies between analyses during invalidation.
Definition: PassManager.h:360
bool invalidate(IRUnitT &IR, const PreservedAnalyses &PA)
Trigger the invalidation of some other analysis pass if not already handled and return whether it was...
Definition: PassManager.h:378
A container for analyses that lazily runs them and caches their results.
Definition: PassManager.h:321
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Definition: PassManager.h:473
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
ArrayRef< T > drop_front(size_t N=1) const
Drop the first N elements of the array.
Definition: ArrayRef.h:204
size_t size() const
size - Get the array size.
Definition: ArrayRef.h:165
bool empty() const
empty - Check if the array is empty.
Definition: ArrayRef.h:160
LLVM Basic Block Representation.
Definition: BasicBlock.h:60
const Function * getParent() const
Return the enclosing method, or null if none.
Definition: BasicBlock.h:206
const Module * getModule() const
Return the module owning the function this basic block belongs to, or nullptr if the function does no...
Definition: BasicBlock.cpp:289
This class is a wrapper over an AAResults, and it is intended to be used only when there are no IR ch...
This is the base class for all instructions that perform data casts.
Definition: InstrTypes.h:601
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:110
A debug info location.
Definition: DebugLoc.h:33
iterator find(const_arg_type_t< KeyT > Val)
Definition: DenseMap.h:155
iterator end()
Definition: DenseMap.h:84
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition: DenseMap.h:220
Analysis pass which computes a DominatorTree.
Definition: Dominators.h:279
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition: Dominators.h:162
bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
Definition: Dominators.cpp:122
EquivalenceClasses - This represents a collection of equivalence classes and supports three efficient...
iterator findValue(const ElemTy &V) const
findValue - Return an iterator to the specified value.
iterator insert(const ElemTy &Data)
insert - Insert a new value into the union/find set, ignoring the request if the value already exists...
member_iterator member_end() const
typename std::set< ECValue, ECValueComparator >::const_iterator iterator
iterator* - Provides a way to iterate over all values in the set.
member_iterator member_begin(iterator I) const
member_iterator unionSets(const ElemTy &V1, const ElemTy &V2)
union - Merge the two equivalence sets for the specified values, inserting them if they do not alread...
const ElemTy & getLeaderValue(const ElemTy &V) const
getLeaderValue - Return the leader for the specified value that is in the set.
bool hasOptSize() const
Optimize this function for size (-Os) or minimum size (-Oz).
Definition: Function.h:685
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
Definition: Instructions.h:973
Type * getResultElementType() const
PointerType * getType() const
Global values are always pointers.
Definition: GlobalValue.h:293
const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
Definition: Instruction.cpp:83
Class to represent integer types.
Definition: DerivedTypes.h:40
static IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
Definition: Type.cpp:278
An instruction for reading from memory.
Definition: Instructions.h:184
Value * getPointerOperand()
Definition: Instructions.h:280
static constexpr LocationSize beforeOrAfterPointer()
Any location before or after the base pointer (but still within the underlying object).
This analysis provides dependence information for the memory accesses of a loop.
Result run(Function &F, FunctionAnalysisManager &AM)
bool invalidate(Function &F, const PreservedAnalyses &PA, FunctionAnalysisManager::Invalidator &Inv)
const LoopAccessInfo & getInfo(Loop &L)
Drive the analysis of memory accesses in the loop.
const MemoryDepChecker & getDepChecker() const
the Memory Dependence Checker which can determine the loop-independent and loop-carried dependences b...
bool isInvariant(Value *V) const
Returns true if value V is loop invariant.
void print(raw_ostream &OS, unsigned Depth=0) const
Print the information about the memory accesses in the loop.
static bool blockNeedsPredication(BasicBlock *BB, Loop *TheLoop, DominatorTree *DT)
Return true if the block BB needs to be predicated in order for the loop to be vectorized.
LoopAccessInfo(Loop *L, ScalarEvolution *SE, const TargetTransformInfo *TTI, const TargetLibraryInfo *TLI, AAResults *AA, DominatorTree *DT, LoopInfo *LI)
Analysis pass that exposes the LoopInfo for a function.
Definition: LoopInfo.h:566
bool contains(const LoopT *L) const
Return true if the specified loop is contained within in this loop.
BlockT * getLoopLatch() const
If there is a single latch block for this loop, return it.
bool isInnermost() const
Return true if the loop does not contain any (natural) loops.
unsigned getNumBackEdges() const
Calculate the number of back edges to the loop header.
BlockT * getHeader() const
LoopT * getParentLoop() const
Return the parent loop if it exists or nullptr for top level loops.
Wrapper class to LoopBlocksDFS that provides a standard begin()/end() interface for the DFS reverse p...
Definition: LoopIterator.h:172
Represents a single loop in the control flow graph.
Definition: LoopInfo.h:44
bool isAnnotatedParallel() const
Returns true if the loop is annotated parallel.
Definition: LoopInfo.cpp:564
DebugLoc getStartLoc() const
Return the debug location of the start of this loop.
Definition: LoopInfo.cpp:631
Metadata node.
Definition: Metadata.h:1067
ArrayRef< MDOperand > operands() const
Definition: Metadata.h:1426
Tracking metadata reference owned by Metadata.
Definition: Metadata.h:889
This class implements a map that also provides access to all stored values in a deterministic order.
Definition: MapVector.h:36
Checks memory dependences among accesses to the same underlying object to determine whether there vec...
ArrayRef< unsigned > getOrderForAccess(Value *Ptr, bool IsWrite) const
Return the program order indices for the access location (Ptr, IsWrite).
bool isSafeForAnyVectorWidth() const
Return true if the number of elements that are safe to operate on simultaneously is not bounded.
bool areDepsSafe(DepCandidates &AccessSets, MemAccessInfoList &CheckDeps, const DenseMap< Value *, const SCEV * > &Strides, const DenseMap< Value *, SmallVector< const Value *, 16 > > &UnderlyingObjects)
Check whether the dependencies between the accesses are safe.
const SmallVectorImpl< Instruction * > & getMemoryInstructions() const
The vector of memory access instructions.
const Loop * getInnermostLoop() const
uint64_t getMaxSafeVectorWidthInBits() const
Return the number of elements that are safe to operate on simultaneously, multiplied by the size of t...
bool isSafeForVectorization() const
No memory dependence was encountered that would inhibit vectorization.
const SmallVectorImpl< Dependence > * getDependences() const
Returns the memory dependences.
SmallVector< Instruction *, 4 > getInstructionsForAccess(Value *Ptr, bool isWrite) const
Find the set of instructions that read or write via Ptr.
VectorizationSafetyStatus
Type to keep track of the status of the dependence check.
bool shouldRetryWithRuntimeCheck() const
In same cases when the dependency check fails we can still vectorize the loop with a dynamic array ac...
void addAccess(StoreInst *SI)
Register the location (instructions are given increasing numbers) of a write access.
PointerIntPair< Value *, 1, bool > MemAccessInfo
Representation for a specific memory location.
static MemoryLocation get(const LoadInst *LI)
Return a location with information about the memory reference by the given instruction.
LocationSize Size
The maximum size of the location, in address-units, or UnknownSize if the size is not known.
AAMDNodes AATags
The metadata nodes which describes the aliasing of the location (each member is null if that kind of ...
const Value * Ptr
The address of the start of the location.
Root of the metadata hierarchy.
Definition: Metadata.h:62
const DataLayout & getDataLayout() const
Get the data layout for the module's target platform.
Definition: Module.h:293
Diagnostic information for optimization analysis remarks.
An interface layer with SCEV used to manage how we see SCEV expressions for values in the context of ...
void addPredicate(const SCEVPredicate &Pred)
Adds a new predicate.
ScalarEvolution * getSE() const
Returns the ScalarEvolution analysis used.
const SCEVPredicate & getPredicate() const
bool hasNoOverflow(Value *V, SCEVWrapPredicate::IncrementWrapFlags Flags)
Returns true if we've proved that V doesn't wrap by means of a SCEV predicate.
void setNoOverflow(Value *V, SCEVWrapPredicate::IncrementWrapFlags Flags)
Proves that V doesn't overflow by adding SCEV predicate.
void print(raw_ostream &OS, unsigned Depth) const
Print the SCEV mappings done by the Predicated Scalar Evolution.
const SCEVAddRecExpr * getAsAddRec(Value *V)
Attempts to produce an AddRecExpr for V by adding additional SCEV predicates.
const SCEV * getBackedgeTakenCount()
Get the (predicated) backedge count for the analyzed loop.
const SCEV * getSCEV(Value *V)
Returns the SCEV expression of V, in the context of the current SCEV predicate.
A set of analyses that are preserved following a run of a transformation pass.
Definition: Analysis.h:109
PreservedAnalysisChecker getChecker() const
Build a checker for this PreservedAnalyses and the specified analysis type.
Definition: Analysis.h:264
Holds information about the memory runtime legality checks to verify that a group of pointers do not ...
bool Need
This flag indicates if we need to add the runtime check.
void reset()
Reset the state of the pointer runtime information.
unsigned getNumberOfChecks() const
Returns the number of run-time checks required according to needsChecking.
void printChecks(raw_ostream &OS, const SmallVectorImpl< RuntimePointerCheck > &Checks, unsigned Depth=0) const
Print Checks.
bool needsChecking(const RuntimeCheckingPtrGroup &M, const RuntimeCheckingPtrGroup &N) const
Decide if we need to add a check between two groups of pointers, according to needsChecking.
void print(raw_ostream &OS, unsigned Depth=0) const
Print the list run-time memory checks necessary.
SmallVector< RuntimeCheckingPtrGroup, 2 > CheckingGroups
Holds a partitioning of pointers into "check groups".
void generateChecks(MemoryDepChecker::DepCandidates &DepCands, bool UseDependencies)
Generate the checks and store it.
static bool arePointersInSamePartition(const SmallVectorImpl< int > &PtrToPartition, unsigned PtrIdx1, unsigned PtrIdx2)
Check if pointers are in the same partition.
SmallVector< PointerInfo, 2 > Pointers
Information about the pointers that may require checking.
void insert(Loop *Lp, Value *Ptr, const SCEV *PtrExpr, Type *AccessTy, bool WritePtr, unsigned DepSetId, unsigned ASId, PredicatedScalarEvolution &PSE, bool NeedsFreeze)
Insert a pointer and calculate the start and end SCEVs.
This node represents a polynomial recurrence on the trip count of the specified loop.
const SCEV * evaluateAtIteration(const SCEV *It, ScalarEvolution &SE) const
Return the value of this chain of recurrences at the specified iteration number.
const SCEV * getStepRecurrence(ScalarEvolution &SE) const
Constructs and returns the recurrence indicating how much this expression steps by.
bool isAffine() const
Return true if this represents an expression A + B*x where A and B are loop invariant values.
This class represents a constant integer value.
This is the base class for unary integral cast operator classes.
This node represents multiplication of some number of SCEVs.
NoWrapFlags getNoWrapFlags(NoWrapFlags Mask=NoWrapMask) const
virtual void print(raw_ostream &OS, unsigned Depth=0) const =0
Prints a textual representation of this predicate with an indentation of Depth.
This means that we are dealing with an entirely unknown SCEV value, and only represent it as its LLVM...
This class represents an analyzed expression in the program.
Type * getType() const
Return the LLVM type of this SCEV expression.
Analysis pass that exposes the ScalarEvolution for a function.
The main scalar evolution driver.
bool isKnownNonNegative(const SCEV *S)
Test if the given expression is known to be non-negative.
const SCEV * getNegativeSCEV(const SCEV *V, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap)
Return the SCEV object corresponding to -V.
bool isKnownNonPositive(const SCEV *S)
Test if the given expression is known to be non-positive.
const SCEV * getUMaxExpr(const SCEV *LHS, const SCEV *RHS)
const SCEVPredicate * getEqualPredicate(const SCEV *LHS, const SCEV *RHS)
const SCEV * getConstant(ConstantInt *V)
const SCEV * getSCEV(Value *V)
Return a SCEV expression for the full generality of the specified expression.
const SCEV * getNoopOrSignExtend(const SCEV *V, Type *Ty)
Return a SCEV corresponding to a conversion of the input value to the specified type.
const SCEV * getOne(Type *Ty)
Return a SCEV for the constant 1 of a specific type.
const SCEV * getPtrToIntExpr(const SCEV *Op, Type *Ty)
bool isLoopInvariant(const SCEV *S, const Loop *L)
Return true if the value of the given SCEV is unchanging in the specified loop.
bool isKnownPositive(const SCEV *S)
Test if the given expression is known to be positive.
const SCEV * getZeroExtendExpr(const SCEV *Op, Type *Ty, unsigned Depth=0)
bool isSCEVable(Type *Ty) const
Test if values of the given type are analyzable within the SCEV framework.
Type * getEffectiveSCEVType(Type *Ty) const
Return a type with the same bitwidth as the given type and which represents how SCEV will treat the g...
const SCEV * getUMinExpr(const SCEV *LHS, const SCEV *RHS, bool Sequential=false)
APInt getSignedRangeMin(const SCEV *S)
Determine the min of the signed range for a particular SCEV.
const SCEV * getStoreSizeOfExpr(Type *IntTy, Type *StoreTy)
Return an expression for the store size of StoreTy that is type IntTy.
const SCEV * getMinusSCEV(const SCEV *LHS, const SCEV *RHS, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)
Return LHS-RHS.
const SCEV * applyLoopGuards(const SCEV *Expr, const Loop *L)
Try to apply information from loop guards for L to Expr.
const SCEV * getMulExpr(SmallVectorImpl< const SCEV * > &Ops, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)
Get a canonical multiply expression, or something simpler if possible.
const SCEV * getSizeOfExpr(Type *IntTy, TypeSize Size)
Return an expression for a TypeSize.
const SCEV * getAddExpr(SmallVectorImpl< const SCEV * > &Ops, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)
Get a canonical add expression, or something simpler if possible.
const SCEV * getTruncateOrSignExtend(const SCEV *V, Type *Ty, unsigned Depth=0)
Return a SCEV corresponding to a conversion of the input value to the specified type.
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
Definition: SmallPtrSet.h:321
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
Definition: SmallPtrSet.h:360
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:342
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
Definition: SmallPtrSet.h:427
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
Definition: SmallSet.h:135
size_type count(const T &V) const
count - Return 1 if the element is in the set, 0 otherwise.
Definition: SmallSet.h:166
std::pair< const_iterator, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
Definition: SmallSet.h:179
bool empty() const
Definition: SmallVector.h:94
size_t size() const
Definition: SmallVector.h:91
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:586
reference emplace_back(ArgTypes &&... Args)
Definition: SmallVector.h:950
void resize(size_type N)
Definition: SmallVector.h:651
void push_back(const T &Elt)
Definition: SmallVector.h:426
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1209
An instruction for storing to memory.
Definition: Instructions.h:317
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:50
Analysis pass providing the TargetTransformInfo.
Analysis pass providing the TargetLibraryInfo.
Provides information about what library functions are available for the current target.
This pass provides access to the codegen interfaces that are needed for IR-level transformations.
TypeSize getRegisterBitWidth(RegisterKind K) const
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
bool isVectorTy() const
True if this is an instance of VectorType.
Definition: Type.h:265
bool isPointerTy() const
True if this is an instance of PointerType.
Definition: Type.h:255
unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
A Use represents the edge between a Value definition and its users.
Definition: Use.h:43
Value * getOperand(unsigned i) const
Definition: User.h:169
unsigned getNumOperands() const
Definition: User.h:191
static SmallVector< VFInfo, 8 > getMappings(const CallInst &CI)
Retrieve all the VFInfo instances associated to the CallInst CI.
Definition: VectorUtils.h:70
LLVM Value Representation.
Definition: Value.h:74
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:255
const Value * stripAndAccumulateInBoundsConstantOffsets(const DataLayout &DL, APInt &Offset) const
This is a wrapper around stripAndAccumulateConstantOffsets with the in-bounds requirement set to fals...
Definition: Value.h:736
StringRef getName() const
Return a constant reference to the value's name.
Definition: Value.cpp:309
constexpr ScalarTy getFixedValue() const
Definition: TypeSize.h:199
constexpr bool isNonZero() const
Definition: TypeSize.h:158
An efficient, type-erasing, non-owning reference to a callable.
TypeSize getSequentialElementStride(const DataLayout &DL) const
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition: raw_ostream.h:52
raw_ostream & indent(unsigned NumSpaces)
indent - Insert 'NumSpaces' spaces.
friend const_iterator end(StringRef path)
Get end iterator over path.
Definition: Path.cpp:236
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
bool match(Val *V, const Pattern &P)
Definition: PatternMatch.h:49
is_zero m_Zero()
Match any null constant or a vector with all elements equal to 0.
Definition: PatternMatch.h:612
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:450
LocationClass< Ty > location(Ty &L)
Definition: CommandLine.h:470
DiagnosticInfoOptimizationBase::Argument NV
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
std::optional< int > getPointersDiff(Type *ElemTyA, Value *PtrA, Type *ElemTyB, Value *PtrB, const DataLayout &DL, ScalarEvolution &SE, bool StrictCheck=false, bool CheckType=true)
Returns the distance between the pointers PtrA and PtrB iff they are compatible and it is possible to...
@ Low
Lower the current thread's priority such that it does not affect foreground tasks significantly.
@ Offset
Definition: DWP.cpp:456
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1722
Intrinsic::ID getVectorIntrinsicIDForCall(const CallInst *CI, const TargetLibraryInfo *TLI)
Returns intrinsic ID for call.
unsigned getPointerAddressSpace(const Type *T)
Definition: SPIRVUtils.h:126
std::optional< const MDOperand * > findStringMetadataForLoop(const Loop *TheLoop, StringRef Name)
Find string metadata for loop.
Definition: LoopInfo.cpp:1053
const Value * getLoadStorePointerOperand(const Value *V)
A helper function that returns the pointer operand of a load or store instruction.
AddressSpace
Definition: NVPTXBaseInfo.h:21
const Value * getPointerOperand(const Value *V)
A helper function that returns the pointer operand of a load, store or GEP instruction.
OutputIt transform(R &&Range, OutputIt d_first, UnaryFunction F)
Wrapper function around std::transform to apply a function to a range and store the result elsewhere.
Definition: STLExtras.h:1928
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1729
bool NullPointerIsDefined(const Function *F, unsigned AS=0)
Check whether null pointer dereferencing is considered undefined behavior for a given function or an ...
Definition: Function.cpp:2058
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
bool isPointerTy(const Type *T)
Definition: SPIRVUtils.h:120
std::optional< int64_t > getPtrStride(PredicatedScalarEvolution &PSE, Type *AccessTy, Value *Ptr, const Loop *Lp, const DenseMap< Value *, const SCEV * > &StridesMap=DenseMap< Value *, const SCEV * >(), bool Assume=false, bool ShouldCheckWrap=true)
If the pointer has a constant stride return it in units of the access type size.
bool sortPtrAccesses(ArrayRef< Value * > VL, Type *ElemTy, const DataLayout &DL, ScalarEvolution &SE, SmallVectorImpl< unsigned > &SortedIndices)
Attempt to sort the pointers in VL and return the sorted indices in SortedIndices,...
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
void getUnderlyingObjects(const Value *V, SmallVectorImpl< const Value * > &Objects, LoopInfo *LI=nullptr, unsigned MaxLookup=6)
This method is similar to getUnderlyingObject except that it can look through phi and select instruct...
const SCEV * replaceSymbolicStrideSCEV(PredicatedScalarEvolution &PSE, const DenseMap< Value *, const SCEV * > &PtrToStride, Value *Ptr)
Return the SCEV corresponding to a pointer with the symbolic stride replaced with constant one,...
bool isConsecutiveAccess(Value *A, Value *B, const DataLayout &DL, ScalarEvolution &SE, bool CheckType=true)
Returns true if the memory operations A and B are consecutive.
bool isGuaranteedNotToBeUndefOrPoison(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Return true if this function can prove that V does not have undef bits and is never poison.
OutputIt copy(R &&Range, OutputIt Out)
Definition: STLExtras.h:1824
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1749
gep_type_iterator gep_type_begin(const User *GEP)
Type * getLoadStoreType(Value *I)
A helper function that returns the type of a load or store instruction.
Implement std::hash so that hash_code can be used in STL containers.
Definition: BitVector.h:858
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition: BitVector.h:860
#define N
MDNode * Scope
The tag for alias scope specification (used with noalias).
Definition: Metadata.h:783
MDNode * TBAA
The tag for type-based alias analysis.
Definition: Metadata.h:777
MDNode * NoAlias
The tag specifying the noalias scope.
Definition: Metadata.h:786
A special type used by analysis passes to provide an address that identifies that particular analysis...
Definition: Analysis.h:26
Dependece between memory access instructions.
Instruction * getDestination(const MemoryDepChecker &DepChecker) const
Return the destination instruction of the dependence.
DepType Type
The type of the dependence.
bool isPossiblyBackward() const
May be a lexically backward dependence type (includes Unknown).
Instruction * getSource(const MemoryDepChecker &DepChecker) const
Return the source instruction of the dependence.
bool isForward() const
Lexically forward dependence.
bool isBackward() const
Lexically backward dependence.
void print(raw_ostream &OS, unsigned Depth, const SmallVectorImpl< Instruction * > &Instrs) const
Print the dependence.
DepType
The type of the dependence.
static const char * DepName[]
String version of the types.
static VectorizationSafetyStatus isSafeForVectorization(DepType Type)
Dependence types that don't prevent vectorization.
unsigned AddressSpace
Address space of the involved pointers.
bool addPointer(unsigned Index, RuntimePointerChecking &RtCheck)
Tries to add the pointer recorded in RtCheck at index Index to this pointer checking group.
bool NeedsFreeze
Whether the pointer needs to be frozen after expansion, e.g.
const SCEV * High
The SCEV expression which represents the upper bound of all the pointers in this group.
SmallVector< unsigned, 2 > Members
Indices of all the pointers that constitute this grouping.
RuntimeCheckingPtrGroup(unsigned Index, RuntimePointerChecking &RtCheck)
Create a new pointer checking group containing a single pointer, with index Index in RtCheck.
const SCEV * Low
The SCEV expression which represents the lower bound of all the pointers in this group.
bool IsWritePtr
Holds the information if this pointer is used for writing to memory.
unsigned DependencySetId
Holds the id of the set of pointers that could be dependent because of a shared underlying object.
unsigned AliasSetId
Holds the id of the disjoint alias set to which this pointer belongs.
static const unsigned MaxVectorWidth
Maximum SIMD width.
static unsigned VectorizationFactor
VF as overridden by the user.
static unsigned RuntimeMemoryCheckThreshold
\When performing memory disambiguation checks at runtime do not make more than this number of compari...
static bool isInterleaveForced()
True if force-vector-interleave was specified by the user.
static unsigned VectorizationInterleave
Interleave factor as overridden by the user.
Function object to check whether the first component of a container supported by std::get (like std::...
Definition: STLExtras.h:1450