LLVM 20.0.0git
LoopAccessAnalysis.cpp
Go to the documentation of this file.
1//===- LoopAccessAnalysis.cpp - Loop Access Analysis Implementation --------==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// The implementation for the loop memory dependence that was originally
10// developed for the loop vectorizer.
11//
12//===----------------------------------------------------------------------===//
13
15#include "llvm/ADT/APInt.h"
16#include "llvm/ADT/DenseMap.h"
19#include "llvm/ADT/STLExtras.h"
20#include "llvm/ADT/SetVector.h"
22#include "llvm/ADT/SmallSet.h"
37#include "llvm/IR/BasicBlock.h"
38#include "llvm/IR/Constants.h"
39#include "llvm/IR/DataLayout.h"
40#include "llvm/IR/DebugLoc.h"
43#include "llvm/IR/Dominators.h"
44#include "llvm/IR/Function.h"
46#include "llvm/IR/InstrTypes.h"
47#include "llvm/IR/Instruction.h"
49#include "llvm/IR/Operator.h"
50#include "llvm/IR/PassManager.h"
52#include "llvm/IR/Type.h"
53#include "llvm/IR/Value.h"
54#include "llvm/IR/ValueHandle.h"
57#include "llvm/Support/Debug.h"
60#include <algorithm>
61#include <cassert>
62#include <cstdint>
63#include <iterator>
64#include <utility>
65#include <variant>
66#include <vector>
67
68using namespace llvm;
69using namespace llvm::PatternMatch;
70
71#define DEBUG_TYPE "loop-accesses"
72
74VectorizationFactor("force-vector-width", cl::Hidden,
75 cl::desc("Sets the SIMD width. Zero is autoselect."),
78
80VectorizationInterleave("force-vector-interleave", cl::Hidden,
81 cl::desc("Sets the vectorization interleave count. "
82 "Zero is autoselect."),
86
88 "runtime-memory-check-threshold", cl::Hidden,
89 cl::desc("When performing memory disambiguation checks at runtime do not "
90 "generate more than this number of comparisons (default = 8)."),
93
94/// The maximum iterations used to merge memory checks
96 "memory-check-merge-threshold", cl::Hidden,
97 cl::desc("Maximum number of comparisons done when trying to merge "
98 "runtime memory checks. (default = 100)"),
99 cl::init(100));
100
101/// Maximum SIMD width.
102const unsigned VectorizerParams::MaxVectorWidth = 64;
103
104/// We collect dependences up to this threshold.
106 MaxDependences("max-dependences", cl::Hidden,
107 cl::desc("Maximum number of dependences collected by "
108 "loop-access analysis (default = 100)"),
109 cl::init(100));
110
111/// This enables versioning on the strides of symbolically striding memory
112/// accesses in code like the following.
113/// for (i = 0; i < N; ++i)
114/// A[i * Stride1] += B[i * Stride2] ...
115///
116/// Will be roughly translated to
117/// if (Stride1 == 1 && Stride2 == 1) {
118/// for (i = 0; i < N; i+=4)
119/// A[i:i+3] += ...
120/// } else
121/// ...
123 "enable-mem-access-versioning", cl::init(true), cl::Hidden,
124 cl::desc("Enable symbolic stride memory access versioning"));
125
126/// Enable store-to-load forwarding conflict detection. This option can
127/// be disabled for correctness testing.
129 "store-to-load-forwarding-conflict-detection", cl::Hidden,
130 cl::desc("Enable conflict detection in loop-access analysis"),
131 cl::init(true));
132
134 "max-forked-scev-depth", cl::Hidden,
135 cl::desc("Maximum recursion depth when finding forked SCEVs (default = 5)"),
136 cl::init(5));
137
139 "laa-speculate-unit-stride", cl::Hidden,
140 cl::desc("Speculate that non-constant strides are unit in LAA"),
141 cl::init(true));
142
144 "hoist-runtime-checks", cl::Hidden,
145 cl::desc(
146 "Hoist inner loop runtime memory checks to outer loop if possible"),
149
151 return ::VectorizationInterleave.getNumOccurrences() > 0;
152}
153
155 const DenseMap<Value *, const SCEV *> &PtrToStride,
156 Value *Ptr) {
157 const SCEV *OrigSCEV = PSE.getSCEV(Ptr);
158
159 // If there is an entry in the map return the SCEV of the pointer with the
160 // symbolic stride replaced by one.
162 if (SI == PtrToStride.end())
163 // For a non-symbolic stride, just return the original expression.
164 return OrigSCEV;
165
166 const SCEV *StrideSCEV = SI->second;
167 // Note: This assert is both overly strong and overly weak. The actual
168 // invariant here is that StrideSCEV should be loop invariant. The only
169 // such invariant strides we happen to speculate right now are unknowns
170 // and thus this is a reasonable proxy of the actual invariant.
171 assert(isa<SCEVUnknown>(StrideSCEV) && "shouldn't be in map");
172
173 ScalarEvolution *SE = PSE.getSE();
174 const SCEV *CT = SE->getOne(StrideSCEV->getType());
175 PSE.addPredicate(*SE->getEqualPredicate(StrideSCEV, CT));
176 const SCEV *Expr = PSE.getSCEV(Ptr);
177
178 LLVM_DEBUG(dbgs() << "LAA: Replacing SCEV: " << *OrigSCEV
179 << " by: " << *Expr << "\n");
180 return Expr;
181}
182
184 unsigned Index, const RuntimePointerChecking &RtCheck)
185 : High(RtCheck.Pointers[Index].End), Low(RtCheck.Pointers[Index].Start),
186 AddressSpace(RtCheck.Pointers[Index]
187 .PointerValue->getType()
189 NeedsFreeze(RtCheck.Pointers[Index].NeedsFreeze) {
190 Members.push_back(Index);
191}
192
193/// Calculate Start and End points of memory access.
194/// Let's assume A is the first access and B is a memory access on N-th loop
195/// iteration. Then B is calculated as:
196/// B = A + Step*N .
197/// Step value may be positive or negative.
198/// N is a calculated back-edge taken count:
199/// N = (TripCount > 0) ? RoundDown(TripCount -1 , VF) : 0
200/// Start and End points are calculated in the following way:
201/// Start = UMIN(A, B) ; End = UMAX(A, B) + SizeOfElt,
202/// where SizeOfElt is the size of single memory access in bytes.
203///
204/// There is no conflict when the intervals are disjoint:
205/// NoConflict = (P2.Start >= P1.End) || (P1.Start >= P2.End)
206static std::pair<const SCEV *, const SCEV *> getStartAndEndForAccess(
207 const Loop *Lp, const SCEV *PtrExpr, Type *AccessTy,
209 DenseMap<std::pair<const SCEV *, Type *>,
210 std::pair<const SCEV *, const SCEV *>> &PointerBounds) {
211 ScalarEvolution *SE = PSE.getSE();
212
213 auto [Iter, Ins] = PointerBounds.insert(
214 {{PtrExpr, AccessTy},
215 {SE->getCouldNotCompute(), SE->getCouldNotCompute()}});
216 if (!Ins)
217 return Iter->second;
218
219 const SCEV *ScStart;
220 const SCEV *ScEnd;
221
222 if (SE->isLoopInvariant(PtrExpr, Lp)) {
223 ScStart = ScEnd = PtrExpr;
224 } else if (auto *AR = dyn_cast<SCEVAddRecExpr>(PtrExpr)) {
225 const SCEV *Ex = PSE.getSymbolicMaxBackedgeTakenCount();
226
227 ScStart = AR->getStart();
228 ScEnd = AR->evaluateAtIteration(Ex, *SE);
229 const SCEV *Step = AR->getStepRecurrence(*SE);
230
231 // For expressions with negative step, the upper bound is ScStart and the
232 // lower bound is ScEnd.
233 if (const auto *CStep = dyn_cast<SCEVConstant>(Step)) {
234 if (CStep->getValue()->isNegative())
235 std::swap(ScStart, ScEnd);
236 } else {
237 // Fallback case: the step is not constant, but we can still
238 // get the upper and lower bounds of the interval by using min/max
239 // expressions.
240 ScStart = SE->getUMinExpr(ScStart, ScEnd);
241 ScEnd = SE->getUMaxExpr(AR->getStart(), ScEnd);
242 }
243 } else
244 return {SE->getCouldNotCompute(), SE->getCouldNotCompute()};
245
246 assert(SE->isLoopInvariant(ScStart, Lp) && "ScStart needs to be invariant");
247 assert(SE->isLoopInvariant(ScEnd, Lp)&& "ScEnd needs to be invariant");
248
249 // Add the size of the pointed element to ScEnd.
250 auto &DL = Lp->getHeader()->getDataLayout();
251 Type *IdxTy = DL.getIndexType(PtrExpr->getType());
252 const SCEV *EltSizeSCEV = SE->getStoreSizeOfExpr(IdxTy, AccessTy);
253 ScEnd = SE->getAddExpr(ScEnd, EltSizeSCEV);
254
255 Iter->second = {ScStart, ScEnd};
256 return Iter->second;
257}
258
259/// Calculate Start and End points of memory access using
260/// getStartAndEndForAccess.
262 Type *AccessTy, bool WritePtr,
263 unsigned DepSetId, unsigned ASId,
265 bool NeedsFreeze) {
266 const auto &[ScStart, ScEnd] = getStartAndEndForAccess(
267 Lp, PtrExpr, AccessTy, PSE, DC.getPointerBounds());
268 assert(!isa<SCEVCouldNotCompute>(ScStart) &&
269 !isa<SCEVCouldNotCompute>(ScEnd) &&
270 "must be able to compute both start and end expressions");
271 Pointers.emplace_back(Ptr, ScStart, ScEnd, WritePtr, DepSetId, ASId, PtrExpr,
272 NeedsFreeze);
273}
274
275bool RuntimePointerChecking::tryToCreateDiffCheck(
276 const RuntimeCheckingPtrGroup &CGI, const RuntimeCheckingPtrGroup &CGJ) {
277 // If either group contains multiple different pointers, bail out.
278 // TODO: Support multiple pointers by using the minimum or maximum pointer,
279 // depending on src & sink.
280 if (CGI.Members.size() != 1 || CGJ.Members.size() != 1)
281 return false;
282
283 const PointerInfo *Src = &Pointers[CGI.Members[0]];
284 const PointerInfo *Sink = &Pointers[CGJ.Members[0]];
285
286 // If either pointer is read and written, multiple checks may be needed. Bail
287 // out.
288 if (!DC.getOrderForAccess(Src->PointerValue, !Src->IsWritePtr).empty() ||
289 !DC.getOrderForAccess(Sink->PointerValue, !Sink->IsWritePtr).empty())
290 return false;
291
292 ArrayRef<unsigned> AccSrc =
293 DC.getOrderForAccess(Src->PointerValue, Src->IsWritePtr);
294 ArrayRef<unsigned> AccSink =
295 DC.getOrderForAccess(Sink->PointerValue, Sink->IsWritePtr);
296 // If either pointer is accessed multiple times, there may not be a clear
297 // src/sink relation. Bail out for now.
298 if (AccSrc.size() != 1 || AccSink.size() != 1)
299 return false;
300
301 // If the sink is accessed before src, swap src/sink.
302 if (AccSink[0] < AccSrc[0])
303 std::swap(Src, Sink);
304
305 auto *SrcAR = dyn_cast<SCEVAddRecExpr>(Src->Expr);
306 auto *SinkAR = dyn_cast<SCEVAddRecExpr>(Sink->Expr);
307 if (!SrcAR || !SinkAR || SrcAR->getLoop() != DC.getInnermostLoop() ||
308 SinkAR->getLoop() != DC.getInnermostLoop())
309 return false;
310
312 DC.getInstructionsForAccess(Src->PointerValue, Src->IsWritePtr);
314 DC.getInstructionsForAccess(Sink->PointerValue, Sink->IsWritePtr);
315 Type *SrcTy = getLoadStoreType(SrcInsts[0]);
316 Type *DstTy = getLoadStoreType(SinkInsts[0]);
317 if (isa<ScalableVectorType>(SrcTy) || isa<ScalableVectorType>(DstTy))
318 return false;
319
320 const DataLayout &DL =
321 SinkAR->getLoop()->getHeader()->getDataLayout();
322 unsigned AllocSize =
323 std::max(DL.getTypeAllocSize(SrcTy), DL.getTypeAllocSize(DstTy));
324
325 // Only matching constant steps matching the AllocSize are supported at the
326 // moment. This simplifies the difference computation. Can be extended in the
327 // future.
328 auto *Step = dyn_cast<SCEVConstant>(SinkAR->getStepRecurrence(*SE));
329 if (!Step || Step != SrcAR->getStepRecurrence(*SE) ||
330 Step->getAPInt().abs() != AllocSize)
331 return false;
332
333 IntegerType *IntTy =
334 IntegerType::get(Src->PointerValue->getContext(),
335 DL.getPointerSizeInBits(CGI.AddressSpace));
336
337 // When counting down, the dependence distance needs to be swapped.
338 if (Step->getValue()->isNegative())
339 std::swap(SinkAR, SrcAR);
340
341 const SCEV *SinkStartInt = SE->getPtrToIntExpr(SinkAR->getStart(), IntTy);
342 const SCEV *SrcStartInt = SE->getPtrToIntExpr(SrcAR->getStart(), IntTy);
343 if (isa<SCEVCouldNotCompute>(SinkStartInt) ||
344 isa<SCEVCouldNotCompute>(SrcStartInt))
345 return false;
346
347 const Loop *InnerLoop = SrcAR->getLoop();
348 // If the start values for both Src and Sink also vary according to an outer
349 // loop, then it's probably better to avoid creating diff checks because
350 // they may not be hoisted. We should instead let llvm::addRuntimeChecks
351 // do the expanded full range overlap checks, which can be hoisted.
352 if (HoistRuntimeChecks && InnerLoop->getParentLoop() &&
353 isa<SCEVAddRecExpr>(SinkStartInt) && isa<SCEVAddRecExpr>(SrcStartInt)) {
354 auto *SrcStartAR = cast<SCEVAddRecExpr>(SrcStartInt);
355 auto *SinkStartAR = cast<SCEVAddRecExpr>(SinkStartInt);
356 const Loop *StartARLoop = SrcStartAR->getLoop();
357 if (StartARLoop == SinkStartAR->getLoop() &&
358 StartARLoop == InnerLoop->getParentLoop() &&
359 // If the diff check would already be loop invariant (due to the
360 // recurrences being the same), then we prefer to keep the diff checks
361 // because they are cheaper.
362 SrcStartAR->getStepRecurrence(*SE) !=
363 SinkStartAR->getStepRecurrence(*SE)) {
364 LLVM_DEBUG(dbgs() << "LAA: Not creating diff runtime check, since these "
365 "cannot be hoisted out of the outer loop\n");
366 return false;
367 }
368 }
369
370 LLVM_DEBUG(dbgs() << "LAA: Creating diff runtime check for:\n"
371 << "SrcStart: " << *SrcStartInt << '\n'
372 << "SinkStartInt: " << *SinkStartInt << '\n');
373 DiffChecks.emplace_back(SrcStartInt, SinkStartInt, AllocSize,
374 Src->NeedsFreeze || Sink->NeedsFreeze);
375 return true;
376}
377
378SmallVector<RuntimePointerCheck, 4> RuntimePointerChecking::generateChecks() {
380
381 for (unsigned I = 0; I < CheckingGroups.size(); ++I) {
382 for (unsigned J = I + 1; J < CheckingGroups.size(); ++J) {
385
386 if (needsChecking(CGI, CGJ)) {
387 CanUseDiffCheck = CanUseDiffCheck && tryToCreateDiffCheck(CGI, CGJ);
388 Checks.emplace_back(&CGI, &CGJ);
389 }
390 }
391 }
392 return Checks;
393}
394
395void RuntimePointerChecking::generateChecks(
396 MemoryDepChecker::DepCandidates &DepCands, bool UseDependencies) {
397 assert(Checks.empty() && "Checks is not empty");
398 groupChecks(DepCands, UseDependencies);
399 Checks = generateChecks();
400}
401
403 const RuntimeCheckingPtrGroup &M, const RuntimeCheckingPtrGroup &N) const {
404 for (const auto &I : M.Members)
405 for (const auto &J : N.Members)
406 if (needsChecking(I, J))
407 return true;
408 return false;
409}
410
411/// Compare \p I and \p J and return the minimum.
412/// Return nullptr in case we couldn't find an answer.
413static const SCEV *getMinFromExprs(const SCEV *I, const SCEV *J,
414 ScalarEvolution *SE) {
415 std::optional<APInt> Diff = SE->computeConstantDifference(J, I);
416 if (!Diff)
417 return nullptr;
418 return Diff->isNegative() ? J : I;
419}
420
422 unsigned Index, const RuntimePointerChecking &RtCheck) {
423 return addPointer(
424 Index, RtCheck.Pointers[Index].Start, RtCheck.Pointers[Index].End,
425 RtCheck.Pointers[Index].PointerValue->getType()->getPointerAddressSpace(),
426 RtCheck.Pointers[Index].NeedsFreeze, *RtCheck.SE);
427}
428
429bool RuntimeCheckingPtrGroup::addPointer(unsigned Index, const SCEV *Start,
430 const SCEV *End, unsigned AS,
431 bool NeedsFreeze,
432 ScalarEvolution &SE) {
433 assert(AddressSpace == AS &&
434 "all pointers in a checking group must be in the same address space");
435
436 // Compare the starts and ends with the known minimum and maximum
437 // of this set. We need to know how we compare against the min/max
438 // of the set in order to be able to emit memchecks.
439 const SCEV *Min0 = getMinFromExprs(Start, Low, &SE);
440 if (!Min0)
441 return false;
442
443 const SCEV *Min1 = getMinFromExprs(End, High, &SE);
444 if (!Min1)
445 return false;
446
447 // Update the low bound expression if we've found a new min value.
448 if (Min0 == Start)
449 Low = Start;
450
451 // Update the high bound expression if we've found a new max value.
452 if (Min1 != End)
453 High = End;
454
455 Members.push_back(Index);
456 this->NeedsFreeze |= NeedsFreeze;
457 return true;
458}
459
460void RuntimePointerChecking::groupChecks(
461 MemoryDepChecker::DepCandidates &DepCands, bool UseDependencies) {
462 // We build the groups from dependency candidates equivalence classes
463 // because:
464 // - We know that pointers in the same equivalence class share
465 // the same underlying object and therefore there is a chance
466 // that we can compare pointers
467 // - We wouldn't be able to merge two pointers for which we need
468 // to emit a memcheck. The classes in DepCands are already
469 // conveniently built such that no two pointers in the same
470 // class need checking against each other.
471
472 // We use the following (greedy) algorithm to construct the groups
473 // For every pointer in the equivalence class:
474 // For each existing group:
475 // - if the difference between this pointer and the min/max bounds
476 // of the group is a constant, then make the pointer part of the
477 // group and update the min/max bounds of that group as required.
478
479 CheckingGroups.clear();
480
481 // If we need to check two pointers to the same underlying object
482 // with a non-constant difference, we shouldn't perform any pointer
483 // grouping with those pointers. This is because we can easily get
484 // into cases where the resulting check would return false, even when
485 // the accesses are safe.
486 //
487 // The following example shows this:
488 // for (i = 0; i < 1000; ++i)
489 // a[5000 + i * m] = a[i] + a[i + 9000]
490 //
491 // Here grouping gives a check of (5000, 5000 + 1000 * m) against
492 // (0, 10000) which is always false. However, if m is 1, there is no
493 // dependence. Not grouping the checks for a[i] and a[i + 9000] allows
494 // us to perform an accurate check in this case.
495 //
496 // The above case requires that we have an UnknownDependence between
497 // accesses to the same underlying object. This cannot happen unless
498 // FoundNonConstantDistanceDependence is set, and therefore UseDependencies
499 // is also false. In this case we will use the fallback path and create
500 // separate checking groups for all pointers.
501
502 // If we don't have the dependency partitions, construct a new
503 // checking pointer group for each pointer. This is also required
504 // for correctness, because in this case we can have checking between
505 // pointers to the same underlying object.
506 if (!UseDependencies) {
507 for (unsigned I = 0; I < Pointers.size(); ++I)
508 CheckingGroups.emplace_back(I, *this);
509 return;
510 }
511
512 unsigned TotalComparisons = 0;
513
515 for (unsigned Index = 0; Index < Pointers.size(); ++Index)
516 PositionMap[Pointers[Index].PointerValue].push_back(Index);
517
518 // We need to keep track of what pointers we've already seen so we
519 // don't process them twice.
521
522 // Go through all equivalence classes, get the "pointer check groups"
523 // and add them to the overall solution. We use the order in which accesses
524 // appear in 'Pointers' to enforce determinism.
525 for (unsigned I = 0; I < Pointers.size(); ++I) {
526 // We've seen this pointer before, and therefore already processed
527 // its equivalence class.
528 if (Seen.count(I))
529 continue;
530
532 Pointers[I].IsWritePtr);
533
535 auto LeaderI = DepCands.findValue(DepCands.getLeaderValue(Access));
536
537 // Because DepCands is constructed by visiting accesses in the order in
538 // which they appear in alias sets (which is deterministic) and the
539 // iteration order within an equivalence class member is only dependent on
540 // the order in which unions and insertions are performed on the
541 // equivalence class, the iteration order is deterministic.
542 for (auto MI = DepCands.member_begin(LeaderI), ME = DepCands.member_end();
543 MI != ME; ++MI) {
544 auto PointerI = PositionMap.find(MI->getPointer());
545 assert(PointerI != PositionMap.end() &&
546 "pointer in equivalence class not found in PositionMap");
547 for (unsigned Pointer : PointerI->second) {
548 bool Merged = false;
549 // Mark this pointer as seen.
550 Seen.insert(Pointer);
551
552 // Go through all the existing sets and see if we can find one
553 // which can include this pointer.
554 for (RuntimeCheckingPtrGroup &Group : Groups) {
555 // Don't perform more than a certain amount of comparisons.
556 // This should limit the cost of grouping the pointers to something
557 // reasonable. If we do end up hitting this threshold, the algorithm
558 // will create separate groups for all remaining pointers.
559 if (TotalComparisons > MemoryCheckMergeThreshold)
560 break;
561
562 TotalComparisons++;
563
564 if (Group.addPointer(Pointer, *this)) {
565 Merged = true;
566 break;
567 }
568 }
569
570 if (!Merged)
571 // We couldn't add this pointer to any existing set or the threshold
572 // for the number of comparisons has been reached. Create a new group
573 // to hold the current pointer.
574 Groups.emplace_back(Pointer, *this);
575 }
576 }
577
578 // We've computed the grouped checks for this partition.
579 // Save the results and continue with the next one.
580 llvm::copy(Groups, std::back_inserter(CheckingGroups));
581 }
582}
583
585 const SmallVectorImpl<int> &PtrToPartition, unsigned PtrIdx1,
586 unsigned PtrIdx2) {
587 return (PtrToPartition[PtrIdx1] != -1 &&
588 PtrToPartition[PtrIdx1] == PtrToPartition[PtrIdx2]);
589}
590
591bool RuntimePointerChecking::needsChecking(unsigned I, unsigned J) const {
592 const PointerInfo &PointerI = Pointers[I];
593 const PointerInfo &PointerJ = Pointers[J];
594
595 // No need to check if two readonly pointers intersect.
596 if (!PointerI.IsWritePtr && !PointerJ.IsWritePtr)
597 return false;
598
599 // Only need to check pointers between two different dependency sets.
600 if (PointerI.DependencySetId == PointerJ.DependencySetId)
601 return false;
602
603 // Only need to check pointers in the same alias set.
604 return PointerI.AliasSetId == PointerJ.AliasSetId;
605}
606
609 unsigned Depth) const {
610 unsigned N = 0;
611 for (const auto &[Check1, Check2] : Checks) {
612 const auto &First = Check1->Members, &Second = Check2->Members;
613
614 OS.indent(Depth) << "Check " << N++ << ":\n";
615
616 OS.indent(Depth + 2) << "Comparing group (" << Check1 << "):\n";
617 for (unsigned K : First)
618 OS.indent(Depth + 2) << *Pointers[K].PointerValue << "\n";
619
620 OS.indent(Depth + 2) << "Against group (" << Check2 << "):\n";
621 for (unsigned K : Second)
622 OS.indent(Depth + 2) << *Pointers[K].PointerValue << "\n";
623 }
624}
625
627
628 OS.indent(Depth) << "Run-time memory checks:\n";
629 printChecks(OS, Checks, Depth);
630
631 OS.indent(Depth) << "Grouped accesses:\n";
632 for (const auto &CG : CheckingGroups) {
633 OS.indent(Depth + 2) << "Group " << &CG << ":\n";
634 OS.indent(Depth + 4) << "(Low: " << *CG.Low << " High: " << *CG.High
635 << ")\n";
636 for (unsigned Member : CG.Members) {
637 OS.indent(Depth + 6) << "Member: " << *Pointers[Member].Expr << "\n";
638 }
639 }
640}
641
642namespace {
643
644/// Analyses memory accesses in a loop.
645///
646/// Checks whether run time pointer checks are needed and builds sets for data
647/// dependence checking.
648class AccessAnalysis {
649public:
650 /// Read or write access location.
651 typedef PointerIntPair<Value *, 1, bool> MemAccessInfo;
652 typedef SmallVector<MemAccessInfo, 8> MemAccessInfoList;
653
654 AccessAnalysis(const Loop *TheLoop, AAResults *AA, const LoopInfo *LI,
657 SmallPtrSetImpl<MDNode *> &LoopAliasScopes)
658 : TheLoop(TheLoop), BAA(*AA), AST(BAA), LI(LI), DepCands(DA), PSE(PSE),
659 LoopAliasScopes(LoopAliasScopes) {
660 // We're analyzing dependences across loop iterations.
661 BAA.enableCrossIterationMode();
662 }
663
664 /// Register a load and whether it is only read from.
665 void addLoad(const MemoryLocation &Loc, Type *AccessTy, bool IsReadOnly) {
666 Value *Ptr = const_cast<Value *>(Loc.Ptr);
667 AST.add(adjustLoc(Loc));
668 Accesses[MemAccessInfo(Ptr, false)].insert(AccessTy);
669 if (IsReadOnly)
670 ReadOnlyPtr.insert(Ptr);
671 }
672
673 /// Register a store.
674 void addStore(const MemoryLocation &Loc, Type *AccessTy) {
675 Value *Ptr = const_cast<Value *>(Loc.Ptr);
676 AST.add(adjustLoc(Loc));
677 Accesses[MemAccessInfo(Ptr, true)].insert(AccessTy);
678 }
679
680 /// Check if we can emit a run-time no-alias check for \p Access.
681 ///
682 /// Returns true if we can emit a run-time no alias check for \p Access.
683 /// If we can check this access, this also adds it to a dependence set and
684 /// adds a run-time to check for it to \p RtCheck. If \p Assume is true,
685 /// we will attempt to use additional run-time checks in order to get
686 /// the bounds of the pointer.
687 bool createCheckForAccess(RuntimePointerChecking &RtCheck,
688 MemAccessInfo Access, Type *AccessTy,
689 const DenseMap<Value *, const SCEV *> &Strides,
691 Loop *TheLoop, unsigned &RunningDepId,
692 unsigned ASId, bool ShouldCheckStride, bool Assume);
693
694 /// Check whether we can check the pointers at runtime for
695 /// non-intersection.
696 ///
697 /// Returns true if we need no check or if we do and we can generate them
698 /// (i.e. the pointers have computable bounds).
699 bool canCheckPtrAtRT(RuntimePointerChecking &RtCheck, ScalarEvolution *SE,
700 Loop *TheLoop, const DenseMap<Value *, const SCEV *> &Strides,
701 Value *&UncomputablePtr, bool ShouldCheckWrap = false);
702
703 /// Goes over all memory accesses, checks whether a RT check is needed
704 /// and builds sets of dependent accesses.
705 void buildDependenceSets() {
706 processMemAccesses();
707 }
708
709 /// Initial processing of memory accesses determined that we need to
710 /// perform dependency checking.
711 ///
712 /// Note that this can later be cleared if we retry memcheck analysis without
713 /// dependency checking (i.e. FoundNonConstantDistanceDependence).
714 bool isDependencyCheckNeeded() const { return !CheckDeps.empty(); }
715
716 /// We decided that no dependence analysis would be used. Reset the state.
717 void resetDepChecks(MemoryDepChecker &DepChecker) {
718 CheckDeps.clear();
719 DepChecker.clearDependences();
720 }
721
722 const MemAccessInfoList &getDependenciesToCheck() const { return CheckDeps; }
723
724private:
726
727 /// Adjust the MemoryLocation so that it represents accesses to this
728 /// location across all iterations, rather than a single one.
729 MemoryLocation adjustLoc(MemoryLocation Loc) const {
730 // The accessed location varies within the loop, but remains within the
731 // underlying object.
733 Loc.AATags.Scope = adjustAliasScopeList(Loc.AATags.Scope);
734 Loc.AATags.NoAlias = adjustAliasScopeList(Loc.AATags.NoAlias);
735 return Loc;
736 }
737
738 /// Drop alias scopes that are only valid within a single loop iteration.
739 MDNode *adjustAliasScopeList(MDNode *ScopeList) const {
740 if (!ScopeList)
741 return nullptr;
742
743 // For the sake of simplicity, drop the whole scope list if any scope is
744 // iteration-local.
745 if (any_of(ScopeList->operands(), [&](Metadata *Scope) {
746 return LoopAliasScopes.contains(cast<MDNode>(Scope));
747 }))
748 return nullptr;
749
750 return ScopeList;
751 }
752
753 /// Go over all memory access and check whether runtime pointer checks
754 /// are needed and build sets of dependency check candidates.
755 void processMemAccesses();
756
757 /// Map of all accesses. Values are the types used to access memory pointed to
758 /// by the pointer.
759 PtrAccessMap Accesses;
760
761 /// The loop being checked.
762 const Loop *TheLoop;
763
764 /// List of accesses that need a further dependence check.
765 MemAccessInfoList CheckDeps;
766
767 /// Set of pointers that are read only.
768 SmallPtrSet<Value*, 16> ReadOnlyPtr;
769
770 /// Batched alias analysis results.
771 BatchAAResults BAA;
772
773 /// An alias set tracker to partition the access set by underlying object and
774 //intrinsic property (such as TBAA metadata).
775 AliasSetTracker AST;
776
777 /// The LoopInfo of the loop being checked.
778 const LoopInfo *LI;
779
780 /// Sets of potentially dependent accesses - members of one set share an
781 /// underlying pointer. The set "CheckDeps" identfies which sets really need a
782 /// dependence check.
784
785 /// Initial processing of memory accesses determined that we may need
786 /// to add memchecks. Perform the analysis to determine the necessary checks.
787 ///
788 /// Note that, this is different from isDependencyCheckNeeded. When we retry
789 /// memcheck analysis without dependency checking
790 /// (i.e. FoundNonConstantDistanceDependence), isDependencyCheckNeeded is
791 /// cleared while this remains set if we have potentially dependent accesses.
792 bool IsRTCheckAnalysisNeeded = false;
793
794 /// The SCEV predicate containing all the SCEV-related assumptions.
796
798
799 /// Alias scopes that are declared inside the loop, and as such not valid
800 /// across iterations.
801 SmallPtrSetImpl<MDNode *> &LoopAliasScopes;
802};
803
804} // end anonymous namespace
805
806/// Check whether a pointer can participate in a runtime bounds check.
807/// If \p Assume, try harder to prove that we can compute the bounds of \p Ptr
808/// by adding run-time checks (overflow checks) if necessary.
810 const SCEV *PtrScev, Loop *L, bool Assume) {
811 // The bounds for loop-invariant pointer is trivial.
812 if (PSE.getSE()->isLoopInvariant(PtrScev, L))
813 return true;
814
815 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(PtrScev);
816
817 if (!AR && Assume)
818 AR = PSE.getAsAddRec(Ptr);
819
820 if (!AR)
821 return false;
822
823 return AR->isAffine();
824}
825
826/// Check whether a pointer address cannot wrap.
829 Type *AccessTy, Loop *L, bool Assume) {
830 const SCEV *PtrScev = PSE.getSCEV(Ptr);
831 if (PSE.getSE()->isLoopInvariant(PtrScev, L))
832 return true;
833
834 return getPtrStride(PSE, AccessTy, Ptr, L, Strides, Assume).has_value() ||
836}
837
838static void visitPointers(Value *StartPtr, const Loop &InnermostLoop,
839 function_ref<void(Value *)> AddPointer) {
841 SmallVector<Value *> WorkList;
842 WorkList.push_back(StartPtr);
843
844 while (!WorkList.empty()) {
845 Value *Ptr = WorkList.pop_back_val();
846 if (!Visited.insert(Ptr).second)
847 continue;
848 auto *PN = dyn_cast<PHINode>(Ptr);
849 // SCEV does not look through non-header PHIs inside the loop. Such phis
850 // can be analyzed by adding separate accesses for each incoming pointer
851 // value.
852 if (PN && InnermostLoop.contains(PN->getParent()) &&
853 PN->getParent() != InnermostLoop.getHeader()) {
854 for (const Use &Inc : PN->incoming_values())
855 WorkList.push_back(Inc);
856 } else
857 AddPointer(Ptr);
858 }
859}
860
861// Walk back through the IR for a pointer, looking for a select like the
862// following:
863//
864// %offset = select i1 %cmp, i64 %a, i64 %b
865// %addr = getelementptr double, double* %base, i64 %offset
866// %ld = load double, double* %addr, align 8
867//
868// We won't be able to form a single SCEVAddRecExpr from this since the
869// address for each loop iteration depends on %cmp. We could potentially
870// produce multiple valid SCEVAddRecExprs, though, and check all of them for
871// memory safety/aliasing if needed.
872//
873// If we encounter some IR we don't yet handle, or something obviously fine
874// like a constant, then we just add the SCEV for that term to the list passed
875// in by the caller. If we have a node that may potentially yield a valid
876// SCEVAddRecExpr then we decompose it into parts and build the SCEV terms
877// ourselves before adding to the list.
878static void findForkedSCEVs(
879 ScalarEvolution *SE, const Loop *L, Value *Ptr,
881 unsigned Depth) {
882 // If our Value is a SCEVAddRecExpr, loop invariant, not an instruction, or
883 // we've exceeded our limit on recursion, just return whatever we have
884 // regardless of whether it can be used for a forked pointer or not, along
885 // with an indication of whether it might be a poison or undef value.
886 const SCEV *Scev = SE->getSCEV(Ptr);
887 if (isa<SCEVAddRecExpr>(Scev) || L->isLoopInvariant(Ptr) ||
888 !isa<Instruction>(Ptr) || Depth == 0) {
889 ScevList.emplace_back(Scev, !isGuaranteedNotToBeUndefOrPoison(Ptr));
890 return;
891 }
892
893 Depth--;
894
895 auto UndefPoisonCheck = [](PointerIntPair<const SCEV *, 1, bool> S) {
896 return get<1>(S);
897 };
898
899 auto GetBinOpExpr = [&SE](unsigned Opcode, const SCEV *L, const SCEV *R) {
900 switch (Opcode) {
901 case Instruction::Add:
902 return SE->getAddExpr(L, R);
903 case Instruction::Sub:
904 return SE->getMinusSCEV(L, R);
905 default:
906 llvm_unreachable("Unexpected binary operator when walking ForkedPtrs");
907 }
908 };
909
910 Instruction *I = cast<Instruction>(Ptr);
911 unsigned Opcode = I->getOpcode();
912 switch (Opcode) {
913 case Instruction::GetElementPtr: {
914 auto *GEP = cast<GetElementPtrInst>(I);
915 Type *SourceTy = GEP->getSourceElementType();
916 // We only handle base + single offset GEPs here for now.
917 // Not dealing with preexisting gathers yet, so no vectors.
918 if (I->getNumOperands() != 2 || SourceTy->isVectorTy()) {
919 ScevList.emplace_back(Scev, !isGuaranteedNotToBeUndefOrPoison(GEP));
920 break;
921 }
924 findForkedSCEVs(SE, L, I->getOperand(0), BaseScevs, Depth);
925 findForkedSCEVs(SE, L, I->getOperand(1), OffsetScevs, Depth);
926
927 // See if we need to freeze our fork...
928 bool NeedsFreeze = any_of(BaseScevs, UndefPoisonCheck) ||
929 any_of(OffsetScevs, UndefPoisonCheck);
930
931 // Check that we only have a single fork, on either the base or the offset.
932 // Copy the SCEV across for the one without a fork in order to generate
933 // the full SCEV for both sides of the GEP.
934 if (OffsetScevs.size() == 2 && BaseScevs.size() == 1)
935 BaseScevs.push_back(BaseScevs[0]);
936 else if (BaseScevs.size() == 2 && OffsetScevs.size() == 1)
937 OffsetScevs.push_back(OffsetScevs[0]);
938 else {
939 ScevList.emplace_back(Scev, NeedsFreeze);
940 break;
941 }
942
943 // Find the pointer type we need to extend to.
944 Type *IntPtrTy = SE->getEffectiveSCEVType(
945 SE->getSCEV(GEP->getPointerOperand())->getType());
946
947 // Find the size of the type being pointed to. We only have a single
948 // index term (guarded above) so we don't need to index into arrays or
949 // structures, just get the size of the scalar value.
950 const SCEV *Size = SE->getSizeOfExpr(IntPtrTy, SourceTy);
951
952 // Scale up the offsets by the size of the type, then add to the bases.
953 const SCEV *Scaled1 = SE->getMulExpr(
954 Size, SE->getTruncateOrSignExtend(get<0>(OffsetScevs[0]), IntPtrTy));
955 const SCEV *Scaled2 = SE->getMulExpr(
956 Size, SE->getTruncateOrSignExtend(get<0>(OffsetScevs[1]), IntPtrTy));
957 ScevList.emplace_back(SE->getAddExpr(get<0>(BaseScevs[0]), Scaled1),
958 NeedsFreeze);
959 ScevList.emplace_back(SE->getAddExpr(get<0>(BaseScevs[1]), Scaled2),
960 NeedsFreeze);
961 break;
962 }
963 case Instruction::Select: {
965 // A select means we've found a forked pointer, but we currently only
966 // support a single select per pointer so if there's another behind this
967 // then we just bail out and return the generic SCEV.
968 findForkedSCEVs(SE, L, I->getOperand(1), ChildScevs, Depth);
969 findForkedSCEVs(SE, L, I->getOperand(2), ChildScevs, Depth);
970 if (ChildScevs.size() == 2) {
971 ScevList.push_back(ChildScevs[0]);
972 ScevList.push_back(ChildScevs[1]);
973 } else
974 ScevList.emplace_back(Scev, !isGuaranteedNotToBeUndefOrPoison(Ptr));
975 break;
976 }
977 case Instruction::PHI: {
979 // A phi means we've found a forked pointer, but we currently only
980 // support a single phi per pointer so if there's another behind this
981 // then we just bail out and return the generic SCEV.
982 if (I->getNumOperands() == 2) {
983 findForkedSCEVs(SE, L, I->getOperand(0), ChildScevs, Depth);
984 findForkedSCEVs(SE, L, I->getOperand(1), ChildScevs, Depth);
985 }
986 if (ChildScevs.size() == 2) {
987 ScevList.push_back(ChildScevs[0]);
988 ScevList.push_back(ChildScevs[1]);
989 } else
990 ScevList.emplace_back(Scev, !isGuaranteedNotToBeUndefOrPoison(Ptr));
991 break;
992 }
993 case Instruction::Add:
994 case Instruction::Sub: {
997 findForkedSCEVs(SE, L, I->getOperand(0), LScevs, Depth);
998 findForkedSCEVs(SE, L, I->getOperand(1), RScevs, Depth);
999
1000 // See if we need to freeze our fork...
1001 bool NeedsFreeze =
1002 any_of(LScevs, UndefPoisonCheck) || any_of(RScevs, UndefPoisonCheck);
1003
1004 // Check that we only have a single fork, on either the left or right side.
1005 // Copy the SCEV across for the one without a fork in order to generate
1006 // the full SCEV for both sides of the BinOp.
1007 if (LScevs.size() == 2 && RScevs.size() == 1)
1008 RScevs.push_back(RScevs[0]);
1009 else if (RScevs.size() == 2 && LScevs.size() == 1)
1010 LScevs.push_back(LScevs[0]);
1011 else {
1012 ScevList.emplace_back(Scev, NeedsFreeze);
1013 break;
1014 }
1015
1016 ScevList.emplace_back(
1017 GetBinOpExpr(Opcode, get<0>(LScevs[0]), get<0>(RScevs[0])),
1018 NeedsFreeze);
1019 ScevList.emplace_back(
1020 GetBinOpExpr(Opcode, get<0>(LScevs[1]), get<0>(RScevs[1])),
1021 NeedsFreeze);
1022 break;
1023 }
1024 default:
1025 // Just return the current SCEV if we haven't handled the instruction yet.
1026 LLVM_DEBUG(dbgs() << "ForkedPtr unhandled instruction: " << *I << "\n");
1027 ScevList.emplace_back(Scev, !isGuaranteedNotToBeUndefOrPoison(Ptr));
1028 break;
1029 }
1030}
1031
1034 const DenseMap<Value *, const SCEV *> &StridesMap, Value *Ptr,
1035 const Loop *L) {
1036 ScalarEvolution *SE = PSE.getSE();
1037 assert(SE->isSCEVable(Ptr->getType()) && "Value is not SCEVable!");
1039 findForkedSCEVs(SE, L, Ptr, Scevs, MaxForkedSCEVDepth);
1040
1041 // For now, we will only accept a forked pointer with two possible SCEVs
1042 // that are either SCEVAddRecExprs or loop invariant.
1043 if (Scevs.size() == 2 &&
1044 (isa<SCEVAddRecExpr>(get<0>(Scevs[0])) ||
1045 SE->isLoopInvariant(get<0>(Scevs[0]), L)) &&
1046 (isa<SCEVAddRecExpr>(get<0>(Scevs[1])) ||
1047 SE->isLoopInvariant(get<0>(Scevs[1]), L))) {
1048 LLVM_DEBUG(dbgs() << "LAA: Found forked pointer: " << *Ptr << "\n");
1049 LLVM_DEBUG(dbgs() << "\t(1) " << *get<0>(Scevs[0]) << "\n");
1050 LLVM_DEBUG(dbgs() << "\t(2) " << *get<0>(Scevs[1]) << "\n");
1051 return Scevs;
1052 }
1053
1054 return {{replaceSymbolicStrideSCEV(PSE, StridesMap, Ptr), false}};
1055}
1056
1057bool AccessAnalysis::createCheckForAccess(RuntimePointerChecking &RtCheck,
1058 MemAccessInfo Access, Type *AccessTy,
1059 const DenseMap<Value *, const SCEV *> &StridesMap,
1061 Loop *TheLoop, unsigned &RunningDepId,
1062 unsigned ASId, bool ShouldCheckWrap,
1063 bool Assume) {
1064 Value *Ptr = Access.getPointer();
1065
1067 findForkedPointer(PSE, StridesMap, Ptr, TheLoop);
1068
1069 for (const auto &P : TranslatedPtrs) {
1070 const SCEV *PtrExpr = get<0>(P);
1071 if (!hasComputableBounds(PSE, Ptr, PtrExpr, TheLoop, Assume))
1072 return false;
1073
1074 // When we run after a failing dependency check we have to make sure
1075 // we don't have wrapping pointers.
1076 if (ShouldCheckWrap) {
1077 // Skip wrap checking when translating pointers.
1078 if (TranslatedPtrs.size() > 1)
1079 return false;
1080
1081 if (!isNoWrap(PSE, StridesMap, Ptr, AccessTy, TheLoop, Assume))
1082 return false;
1083 }
1084 // If there's only one option for Ptr, look it up after bounds and wrap
1085 // checking, because assumptions might have been added to PSE.
1086 if (TranslatedPtrs.size() == 1)
1087 TranslatedPtrs[0] = {replaceSymbolicStrideSCEV(PSE, StridesMap, Ptr),
1088 false};
1089 }
1090
1091 for (auto [PtrExpr, NeedsFreeze] : TranslatedPtrs) {
1092 // The id of the dependence set.
1093 unsigned DepId;
1094
1095 if (isDependencyCheckNeeded()) {
1096 Value *Leader = DepCands.getLeaderValue(Access).getPointer();
1097 unsigned &LeaderId = DepSetId[Leader];
1098 if (!LeaderId)
1099 LeaderId = RunningDepId++;
1100 DepId = LeaderId;
1101 } else
1102 // Each access has its own dependence set.
1103 DepId = RunningDepId++;
1104
1105 bool IsWrite = Access.getInt();
1106 RtCheck.insert(TheLoop, Ptr, PtrExpr, AccessTy, IsWrite, DepId, ASId, PSE,
1107 NeedsFreeze);
1108 LLVM_DEBUG(dbgs() << "LAA: Found a runtime check ptr:" << *Ptr << '\n');
1109 }
1110
1111 return true;
1112}
1113
1114bool AccessAnalysis::canCheckPtrAtRT(RuntimePointerChecking &RtCheck,
1115 ScalarEvolution *SE, Loop *TheLoop,
1116 const DenseMap<Value *, const SCEV *> &StridesMap,
1117 Value *&UncomputablePtr, bool ShouldCheckWrap) {
1118 // Find pointers with computable bounds. We are going to use this information
1119 // to place a runtime bound check.
1120 bool CanDoRT = true;
1121
1122 bool MayNeedRTCheck = false;
1123 if (!IsRTCheckAnalysisNeeded) return true;
1124
1125 bool IsDepCheckNeeded = isDependencyCheckNeeded();
1126
1127 // We assign a consecutive id to access from different alias sets.
1128 // Accesses between different groups doesn't need to be checked.
1129 unsigned ASId = 0;
1130 for (const auto &AS : AST) {
1131 int NumReadPtrChecks = 0;
1132 int NumWritePtrChecks = 0;
1133 bool CanDoAliasSetRT = true;
1134 ++ASId;
1135 auto ASPointers = AS.getPointers();
1136
1137 // We assign consecutive id to access from different dependence sets.
1138 // Accesses within the same set don't need a runtime check.
1139 unsigned RunningDepId = 1;
1141
1143
1144 // First, count how many write and read accesses are in the alias set. Also
1145 // collect MemAccessInfos for later.
1147 for (const Value *ConstPtr : ASPointers) {
1148 Value *Ptr = const_cast<Value *>(ConstPtr);
1149 bool IsWrite = Accesses.count(MemAccessInfo(Ptr, true));
1150 if (IsWrite)
1151 ++NumWritePtrChecks;
1152 else
1153 ++NumReadPtrChecks;
1154 AccessInfos.emplace_back(Ptr, IsWrite);
1155 }
1156
1157 // We do not need runtime checks for this alias set, if there are no writes
1158 // or a single write and no reads.
1159 if (NumWritePtrChecks == 0 ||
1160 (NumWritePtrChecks == 1 && NumReadPtrChecks == 0)) {
1161 assert((ASPointers.size() <= 1 ||
1162 all_of(ASPointers,
1163 [this](const Value *Ptr) {
1164 MemAccessInfo AccessWrite(const_cast<Value *>(Ptr),
1165 true);
1166 return DepCands.findValue(AccessWrite) == DepCands.end();
1167 })) &&
1168 "Can only skip updating CanDoRT below, if all entries in AS "
1169 "are reads or there is at most 1 entry");
1170 continue;
1171 }
1172
1173 for (auto &Access : AccessInfos) {
1174 for (const auto &AccessTy : Accesses[Access]) {
1175 if (!createCheckForAccess(RtCheck, Access, AccessTy, StridesMap,
1176 DepSetId, TheLoop, RunningDepId, ASId,
1177 ShouldCheckWrap, false)) {
1178 LLVM_DEBUG(dbgs() << "LAA: Can't find bounds for ptr:"
1179 << *Access.getPointer() << '\n');
1180 Retries.emplace_back(Access, AccessTy);
1181 CanDoAliasSetRT = false;
1182 }
1183 }
1184 }
1185
1186 // Note that this function computes CanDoRT and MayNeedRTCheck
1187 // independently. For example CanDoRT=false, MayNeedRTCheck=false means that
1188 // we have a pointer for which we couldn't find the bounds but we don't
1189 // actually need to emit any checks so it does not matter.
1190 //
1191 // We need runtime checks for this alias set, if there are at least 2
1192 // dependence sets (in which case RunningDepId > 2) or if we need to re-try
1193 // any bound checks (because in that case the number of dependence sets is
1194 // incomplete).
1195 bool NeedsAliasSetRTCheck = RunningDepId > 2 || !Retries.empty();
1196
1197 // We need to perform run-time alias checks, but some pointers had bounds
1198 // that couldn't be checked.
1199 if (NeedsAliasSetRTCheck && !CanDoAliasSetRT) {
1200 // Reset the CanDoSetRt flag and retry all accesses that have failed.
1201 // We know that we need these checks, so we can now be more aggressive
1202 // and add further checks if required (overflow checks).
1203 CanDoAliasSetRT = true;
1204 for (const auto &[Access, AccessTy] : Retries) {
1205 if (!createCheckForAccess(RtCheck, Access, AccessTy, StridesMap,
1206 DepSetId, TheLoop, RunningDepId, ASId,
1207 ShouldCheckWrap, /*Assume=*/true)) {
1208 CanDoAliasSetRT = false;
1209 UncomputablePtr = Access.getPointer();
1210 break;
1211 }
1212 }
1213 }
1214
1215 CanDoRT &= CanDoAliasSetRT;
1216 MayNeedRTCheck |= NeedsAliasSetRTCheck;
1217 ++ASId;
1218 }
1219
1220 // If the pointers that we would use for the bounds comparison have different
1221 // address spaces, assume the values aren't directly comparable, so we can't
1222 // use them for the runtime check. We also have to assume they could
1223 // overlap. In the future there should be metadata for whether address spaces
1224 // are disjoint.
1225 unsigned NumPointers = RtCheck.Pointers.size();
1226 for (unsigned i = 0; i < NumPointers; ++i) {
1227 for (unsigned j = i + 1; j < NumPointers; ++j) {
1228 // Only need to check pointers between two different dependency sets.
1229 if (RtCheck.Pointers[i].DependencySetId ==
1230 RtCheck.Pointers[j].DependencySetId)
1231 continue;
1232 // Only need to check pointers in the same alias set.
1233 if (RtCheck.Pointers[i].AliasSetId != RtCheck.Pointers[j].AliasSetId)
1234 continue;
1235
1236 Value *PtrI = RtCheck.Pointers[i].PointerValue;
1237 Value *PtrJ = RtCheck.Pointers[j].PointerValue;
1238
1239 unsigned ASi = PtrI->getType()->getPointerAddressSpace();
1240 unsigned ASj = PtrJ->getType()->getPointerAddressSpace();
1241 if (ASi != ASj) {
1242 LLVM_DEBUG(
1243 dbgs() << "LAA: Runtime check would require comparison between"
1244 " different address spaces\n");
1245 return false;
1246 }
1247 }
1248 }
1249
1250 if (MayNeedRTCheck && CanDoRT)
1251 RtCheck.generateChecks(DepCands, IsDepCheckNeeded);
1252
1253 LLVM_DEBUG(dbgs() << "LAA: We need to do " << RtCheck.getNumberOfChecks()
1254 << " pointer comparisons.\n");
1255
1256 // If we can do run-time checks, but there are no checks, no runtime checks
1257 // are needed. This can happen when all pointers point to the same underlying
1258 // object for example.
1259 RtCheck.Need = CanDoRT ? RtCheck.getNumberOfChecks() != 0 : MayNeedRTCheck;
1260
1261 bool CanDoRTIfNeeded = !RtCheck.Need || CanDoRT;
1262 if (!CanDoRTIfNeeded)
1263 RtCheck.reset();
1264 return CanDoRTIfNeeded;
1265}
1266
1267void AccessAnalysis::processMemAccesses() {
1268 // We process the set twice: first we process read-write pointers, last we
1269 // process read-only pointers. This allows us to skip dependence tests for
1270 // read-only pointers.
1271
1272 LLVM_DEBUG(dbgs() << "LAA: Processing memory accesses...\n");
1273 LLVM_DEBUG(dbgs() << " AST: "; AST.dump());
1274 LLVM_DEBUG(dbgs() << "LAA: Accesses(" << Accesses.size() << "):\n");
1275 LLVM_DEBUG({
1276 for (const auto &[A, _] : Accesses)
1277 dbgs() << "\t" << *A.getPointer() << " ("
1278 << (A.getInt() ? "write"
1279 : (ReadOnlyPtr.count(A.getPointer()) ? "read-only"
1280 : "read"))
1281 << ")\n";
1282 });
1283
1284 // The AliasSetTracker has nicely partitioned our pointers by metadata
1285 // compatibility and potential for underlying-object overlap. As a result, we
1286 // only need to check for potential pointer dependencies within each alias
1287 // set.
1288 for (const auto &AS : AST) {
1289 // Note that both the alias-set tracker and the alias sets themselves used
1290 // ordered collections internally and so the iteration order here is
1291 // deterministic.
1292 auto ASPointers = AS.getPointers();
1293
1294 bool SetHasWrite = false;
1295
1296 // Map of pointers to last access encountered.
1297 typedef DenseMap<const Value*, MemAccessInfo> UnderlyingObjToAccessMap;
1298 UnderlyingObjToAccessMap ObjToLastAccess;
1299
1300 // Set of access to check after all writes have been processed.
1301 PtrAccessMap DeferredAccesses;
1302
1303 // Iterate over each alias set twice, once to process read/write pointers,
1304 // and then to process read-only pointers.
1305 for (int SetIteration = 0; SetIteration < 2; ++SetIteration) {
1306 bool UseDeferred = SetIteration > 0;
1307 PtrAccessMap &S = UseDeferred ? DeferredAccesses : Accesses;
1308
1309 for (const Value *ConstPtr : ASPointers) {
1310 Value *Ptr = const_cast<Value *>(ConstPtr);
1311
1312 // For a single memory access in AliasSetTracker, Accesses may contain
1313 // both read and write, and they both need to be handled for CheckDeps.
1314 for (const auto &[AC, _] : S) {
1315 if (AC.getPointer() != Ptr)
1316 continue;
1317
1318 bool IsWrite = AC.getInt();
1319
1320 // If we're using the deferred access set, then it contains only
1321 // reads.
1322 bool IsReadOnlyPtr = ReadOnlyPtr.count(Ptr) && !IsWrite;
1323 if (UseDeferred && !IsReadOnlyPtr)
1324 continue;
1325 // Otherwise, the pointer must be in the PtrAccessSet, either as a
1326 // read or a write.
1327 assert(((IsReadOnlyPtr && UseDeferred) || IsWrite ||
1328 S.count(MemAccessInfo(Ptr, false))) &&
1329 "Alias-set pointer not in the access set?");
1330
1331 MemAccessInfo Access(Ptr, IsWrite);
1332 DepCands.insert(Access);
1333
1334 // Memorize read-only pointers for later processing and skip them in
1335 // the first round (they need to be checked after we have seen all
1336 // write pointers). Note: we also mark pointer that are not
1337 // consecutive as "read-only" pointers (so that we check
1338 // "a[b[i]] +="). Hence, we need the second check for "!IsWrite".
1339 if (!UseDeferred && IsReadOnlyPtr) {
1340 // We only use the pointer keys, the types vector values don't
1341 // matter.
1342 DeferredAccesses.insert({Access, {}});
1343 continue;
1344 }
1345
1346 // If this is a write - check other reads and writes for conflicts. If
1347 // this is a read only check other writes for conflicts (but only if
1348 // there is no other write to the ptr - this is an optimization to
1349 // catch "a[i] = a[i] + " without having to do a dependence check).
1350 if ((IsWrite || IsReadOnlyPtr) && SetHasWrite) {
1351 CheckDeps.push_back(Access);
1352 IsRTCheckAnalysisNeeded = true;
1353 }
1354
1355 if (IsWrite)
1356 SetHasWrite = true;
1357
1358 // Create sets of pointers connected by a shared alias set and
1359 // underlying object.
1360 typedef SmallVector<const Value *, 16> ValueVector;
1361 ValueVector TempObjects;
1362
1363 UnderlyingObjects[Ptr] = {};
1364 SmallVector<const Value *, 16> &UOs = UnderlyingObjects[Ptr];
1365 ::getUnderlyingObjects(Ptr, UOs, LI);
1367 << "Underlying objects for pointer " << *Ptr << "\n");
1368 for (const Value *UnderlyingObj : UOs) {
1369 // nullptr never alias, don't join sets for pointer that have "null"
1370 // in their UnderlyingObjects list.
1371 if (isa<ConstantPointerNull>(UnderlyingObj) &&
1373 TheLoop->getHeader()->getParent(),
1374 UnderlyingObj->getType()->getPointerAddressSpace()))
1375 continue;
1376
1377 UnderlyingObjToAccessMap::iterator Prev =
1378 ObjToLastAccess.find(UnderlyingObj);
1379 if (Prev != ObjToLastAccess.end())
1380 DepCands.unionSets(Access, Prev->second);
1381
1382 ObjToLastAccess[UnderlyingObj] = Access;
1383 LLVM_DEBUG(dbgs() << " " << *UnderlyingObj << "\n");
1384 }
1385 }
1386 }
1387 }
1388 }
1389}
1390
1391/// Return true if an AddRec pointer \p Ptr is unsigned non-wrapping,
1392/// i.e. monotonically increasing/decreasing.
1393static bool isNoWrapAddRec(Value *Ptr, const SCEVAddRecExpr *AR,
1394 PredicatedScalarEvolution &PSE, const Loop *L) {
1395
1396 // FIXME: This should probably only return true for NUW.
1398 return true;
1399
1401 return true;
1402
1403 // Scalar evolution does not propagate the non-wrapping flags to values that
1404 // are derived from a non-wrapping induction variable because non-wrapping
1405 // could be flow-sensitive.
1406 //
1407 // Look through the potentially overflowing instruction to try to prove
1408 // non-wrapping for the *specific* value of Ptr.
1409
1410 // The arithmetic implied by an nusw GEP can't overflow.
1411 const auto *GEP = dyn_cast<GetElementPtrInst>(Ptr);
1412 if (!GEP || !GEP->hasNoUnsignedSignedWrap())
1413 return false;
1414
1415 // Make sure there is only one non-const index and analyze that.
1416 Value *NonConstIndex = nullptr;
1417 for (Value *Index : GEP->indices())
1418 if (!isa<ConstantInt>(Index)) {
1419 if (NonConstIndex)
1420 return false;
1421 NonConstIndex = Index;
1422 }
1423 if (!NonConstIndex)
1424 // The recurrence is on the pointer, ignore for now.
1425 return false;
1426
1427 // The index in GEP is signed. It is non-wrapping if it's derived from a NSW
1428 // AddRec using a NSW operation.
1429 if (auto *OBO = dyn_cast<OverflowingBinaryOperator>(NonConstIndex))
1430 if (OBO->hasNoSignedWrap() &&
1431 // Assume constant for other the operand so that the AddRec can be
1432 // easily found.
1433 isa<ConstantInt>(OBO->getOperand(1))) {
1434 const SCEV *OpScev = PSE.getSCEV(OBO->getOperand(0));
1435
1436 if (auto *OpAR = dyn_cast<SCEVAddRecExpr>(OpScev))
1437 return OpAR->getLoop() == L && OpAR->getNoWrapFlags(SCEV::FlagNSW);
1438 }
1439
1440 return false;
1441}
1442
1443/// Check whether the access through \p Ptr has a constant stride.
1444std::optional<int64_t>
1446 const Loop *Lp,
1447 const DenseMap<Value *, const SCEV *> &StridesMap,
1448 bool Assume, bool ShouldCheckWrap) {
1449 const SCEV *PtrScev = replaceSymbolicStrideSCEV(PSE, StridesMap, Ptr);
1450 if (PSE.getSE()->isLoopInvariant(PtrScev, Lp))
1451 return {0};
1452
1453 Type *Ty = Ptr->getType();
1454 assert(Ty->isPointerTy() && "Unexpected non-ptr");
1455 if (isa<ScalableVectorType>(AccessTy)) {
1456 LLVM_DEBUG(dbgs() << "LAA: Bad stride - Scalable object: " << *AccessTy
1457 << "\n");
1458 return std::nullopt;
1459 }
1460
1461 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(PtrScev);
1462 if (Assume && !AR)
1463 AR = PSE.getAsAddRec(Ptr);
1464
1465 if (!AR) {
1466 LLVM_DEBUG(dbgs() << "LAA: Bad stride - Not an AddRecExpr pointer " << *Ptr
1467 << " SCEV: " << *PtrScev << "\n");
1468 return std::nullopt;
1469 }
1470
1471 // The access function must stride over the innermost loop.
1472 if (Lp != AR->getLoop()) {
1473 LLVM_DEBUG(dbgs() << "LAA: Bad stride - Not striding over innermost loop "
1474 << *Ptr << " SCEV: " << *AR << "\n");
1475 return std::nullopt;
1476 }
1477
1478 // Check the step is constant.
1479 const SCEV *Step = AR->getStepRecurrence(*PSE.getSE());
1480
1481 // Calculate the pointer stride and check if it is constant.
1482 const SCEVConstant *C = dyn_cast<SCEVConstant>(Step);
1483 if (!C) {
1484 LLVM_DEBUG(dbgs() << "LAA: Bad stride - Not a constant strided " << *Ptr
1485 << " SCEV: " << *AR << "\n");
1486 return std::nullopt;
1487 }
1488
1489 const auto &DL = Lp->getHeader()->getDataLayout();
1490 TypeSize AllocSize = DL.getTypeAllocSize(AccessTy);
1491 int64_t Size = AllocSize.getFixedValue();
1492 const APInt &APStepVal = C->getAPInt();
1493
1494 // Huge step value - give up.
1495 if (APStepVal.getBitWidth() > 64)
1496 return std::nullopt;
1497
1498 int64_t StepVal = APStepVal.getSExtValue();
1499
1500 // Strided access.
1501 int64_t Stride = StepVal / Size;
1502 int64_t Rem = StepVal % Size;
1503 if (Rem)
1504 return std::nullopt;
1505
1506 if (!ShouldCheckWrap)
1507 return Stride;
1508
1509 // The address calculation must not wrap. Otherwise, a dependence could be
1510 // inverted.
1511 if (isNoWrapAddRec(Ptr, AR, PSE, Lp))
1512 return Stride;
1513
1514 // An nusw getelementptr that is an AddRec cannot wrap. If it would wrap,
1515 // the distance between the previously accessed location and the wrapped
1516 // location will be larger than half the pointer index type space. In that
1517 // case, the GEP would be poison and any memory access dependent on it would
1518 // be immediate UB when executed.
1519 if (auto *GEP = dyn_cast<GetElementPtrInst>(Ptr);
1520 GEP && GEP->hasNoUnsignedSignedWrap())
1521 return Stride;
1522
1523 // If the null pointer is undefined, then a access sequence which would
1524 // otherwise access it can be assumed not to unsigned wrap. Note that this
1525 // assumes the object in memory is aligned to the natural alignment.
1526 unsigned AddrSpace = Ty->getPointerAddressSpace();
1527 if (!NullPointerIsDefined(Lp->getHeader()->getParent(), AddrSpace) &&
1528 (Stride == 1 || Stride == -1))
1529 return Stride;
1530
1531 if (Assume) {
1533 LLVM_DEBUG(dbgs() << "LAA: Pointer may wrap:\n"
1534 << "LAA: Pointer: " << *Ptr << "\n"
1535 << "LAA: SCEV: " << *AR << "\n"
1536 << "LAA: Added an overflow assumption\n");
1537 return Stride;
1538 }
1539 LLVM_DEBUG(
1540 dbgs() << "LAA: Bad stride - Pointer may wrap in the address space "
1541 << *Ptr << " SCEV: " << *AR << "\n");
1542 return std::nullopt;
1543}
1544
1545std::optional<int> llvm::getPointersDiff(Type *ElemTyA, Value *PtrA,
1546 Type *ElemTyB, Value *PtrB,
1547 const DataLayout &DL,
1548 ScalarEvolution &SE, bool StrictCheck,
1549 bool CheckType) {
1550 assert(PtrA && PtrB && "Expected non-nullptr pointers.");
1551
1552 // Make sure that A and B are different pointers.
1553 if (PtrA == PtrB)
1554 return 0;
1555
1556 // Make sure that the element types are the same if required.
1557 if (CheckType && ElemTyA != ElemTyB)
1558 return std::nullopt;
1559
1560 unsigned ASA = PtrA->getType()->getPointerAddressSpace();
1561 unsigned ASB = PtrB->getType()->getPointerAddressSpace();
1562
1563 // Check that the address spaces match.
1564 if (ASA != ASB)
1565 return std::nullopt;
1566 unsigned IdxWidth = DL.getIndexSizeInBits(ASA);
1567
1568 APInt OffsetA(IdxWidth, 0), OffsetB(IdxWidth, 0);
1569 const Value *PtrA1 = PtrA->stripAndAccumulateConstantOffsets(
1570 DL, OffsetA, /*AllowNonInbounds=*/true);
1571 const Value *PtrB1 = PtrB->stripAndAccumulateConstantOffsets(
1572 DL, OffsetB, /*AllowNonInbounds=*/true);
1573
1574 int Val;
1575 if (PtrA1 == PtrB1) {
1576 // Retrieve the address space again as pointer stripping now tracks through
1577 // `addrspacecast`.
1578 ASA = cast<PointerType>(PtrA1->getType())->getAddressSpace();
1579 ASB = cast<PointerType>(PtrB1->getType())->getAddressSpace();
1580 // Check that the address spaces match and that the pointers are valid.
1581 if (ASA != ASB)
1582 return std::nullopt;
1583
1584 IdxWidth = DL.getIndexSizeInBits(ASA);
1585 OffsetA = OffsetA.sextOrTrunc(IdxWidth);
1586 OffsetB = OffsetB.sextOrTrunc(IdxWidth);
1587
1588 OffsetB -= OffsetA;
1589 Val = OffsetB.getSExtValue();
1590 } else {
1591 // Otherwise compute the distance with SCEV between the base pointers.
1592 const SCEV *PtrSCEVA = SE.getSCEV(PtrA);
1593 const SCEV *PtrSCEVB = SE.getSCEV(PtrB);
1594 std::optional<APInt> Diff =
1595 SE.computeConstantDifference(PtrSCEVB, PtrSCEVA);
1596 if (!Diff)
1597 return std::nullopt;
1598 Val = Diff->getSExtValue();
1599 }
1600 int Size = DL.getTypeStoreSize(ElemTyA);
1601 int Dist = Val / Size;
1602
1603 // Ensure that the calculated distance matches the type-based one after all
1604 // the bitcasts removal in the provided pointers.
1605 if (!StrictCheck || Dist * Size == Val)
1606 return Dist;
1607 return std::nullopt;
1608}
1609
1611 const DataLayout &DL, ScalarEvolution &SE,
1612 SmallVectorImpl<unsigned> &SortedIndices) {
1614 VL, [](const Value *V) { return V->getType()->isPointerTy(); }) &&
1615 "Expected list of pointer operands.");
1616 // Walk over the pointers, and map each of them to an offset relative to
1617 // first pointer in the array.
1618 Value *Ptr0 = VL[0];
1619
1620 using DistOrdPair = std::pair<int64_t, int>;
1621 auto Compare = llvm::less_first();
1622 std::set<DistOrdPair, decltype(Compare)> Offsets(Compare);
1623 Offsets.emplace(0, 0);
1624 bool IsConsecutive = true;
1625 for (auto [Idx, Ptr] : drop_begin(enumerate(VL))) {
1626 std::optional<int> Diff = getPointersDiff(ElemTy, Ptr0, ElemTy, Ptr, DL, SE,
1627 /*StrictCheck=*/true);
1628 if (!Diff)
1629 return false;
1630
1631 // Check if the pointer with the same offset is found.
1632 int64_t Offset = *Diff;
1633 auto [It, IsInserted] = Offsets.emplace(Offset, Idx);
1634 if (!IsInserted)
1635 return false;
1636 // Consecutive order if the inserted element is the last one.
1637 IsConsecutive &= std::next(It) == Offsets.end();
1638 }
1639 SortedIndices.clear();
1640 if (!IsConsecutive) {
1641 // Fill SortedIndices array only if it is non-consecutive.
1642 SortedIndices.resize(VL.size());
1643 for (auto [Idx, Off] : enumerate(Offsets))
1644 SortedIndices[Idx] = Off.second;
1645 }
1646 return true;
1647}
1648
1649/// Returns true if the memory operations \p A and \p B are consecutive.
1651 ScalarEvolution &SE, bool CheckType) {
1654 if (!PtrA || !PtrB)
1655 return false;
1656 Type *ElemTyA = getLoadStoreType(A);
1657 Type *ElemTyB = getLoadStoreType(B);
1658 std::optional<int> Diff =
1659 getPointersDiff(ElemTyA, PtrA, ElemTyB, PtrB, DL, SE,
1660 /*StrictCheck=*/true, CheckType);
1661 return Diff && *Diff == 1;
1662}
1663
1665 visitPointers(SI->getPointerOperand(), *InnermostLoop,
1666 [this, SI](Value *Ptr) {
1667 Accesses[MemAccessInfo(Ptr, true)].push_back(AccessIdx);
1668 InstMap.push_back(SI);
1669 ++AccessIdx;
1670 });
1671}
1672
1674 visitPointers(LI->getPointerOperand(), *InnermostLoop,
1675 [this, LI](Value *Ptr) {
1676 Accesses[MemAccessInfo(Ptr, false)].push_back(AccessIdx);
1677 InstMap.push_back(LI);
1678 ++AccessIdx;
1679 });
1680}
1681
1684 switch (Type) {
1685 case NoDep:
1686 case Forward:
1689
1690 case Unknown:
1693 case Backward:
1695 case IndirectUnsafe:
1697 }
1698 llvm_unreachable("unexpected DepType!");
1699}
1700
1702 switch (Type) {
1703 case NoDep:
1704 case Forward:
1705 case ForwardButPreventsForwarding:
1706 case Unknown:
1707 case IndirectUnsafe:
1708 return false;
1709
1710 case BackwardVectorizable:
1711 case Backward:
1712 case BackwardVectorizableButPreventsForwarding:
1713 return true;
1714 }
1715 llvm_unreachable("unexpected DepType!");
1716}
1717
1719 return isBackward() || Type == Unknown || Type == IndirectUnsafe;
1720}
1721
1723 switch (Type) {
1724 case Forward:
1725 case ForwardButPreventsForwarding:
1726 return true;
1727
1728 case NoDep:
1729 case Unknown:
1730 case BackwardVectorizable:
1731 case Backward:
1732 case BackwardVectorizableButPreventsForwarding:
1733 case IndirectUnsafe:
1734 return false;
1735 }
1736 llvm_unreachable("unexpected DepType!");
1737}
1738
1739bool MemoryDepChecker::couldPreventStoreLoadForward(uint64_t Distance,
1740 uint64_t TypeByteSize) {
1741 // If loads occur at a distance that is not a multiple of a feasible vector
1742 // factor store-load forwarding does not take place.
1743 // Positive dependences might cause troubles because vectorizing them might
1744 // prevent store-load forwarding making vectorized code run a lot slower.
1745 // a[i] = a[i-3] ^ a[i-8];
1746 // The stores to a[i:i+1] don't align with the stores to a[i-3:i-2] and
1747 // hence on your typical architecture store-load forwarding does not take
1748 // place. Vectorizing in such cases does not make sense.
1749 // Store-load forwarding distance.
1750
1751 // After this many iterations store-to-load forwarding conflicts should not
1752 // cause any slowdowns.
1753 const uint64_t NumItersForStoreLoadThroughMemory = 8 * TypeByteSize;
1754 // Maximum vector factor.
1755 uint64_t MaxVFWithoutSLForwardIssues = std::min(
1756 VectorizerParams::MaxVectorWidth * TypeByteSize, MinDepDistBytes);
1757
1758 // Compute the smallest VF at which the store and load would be misaligned.
1759 for (uint64_t VF = 2 * TypeByteSize; VF <= MaxVFWithoutSLForwardIssues;
1760 VF *= 2) {
1761 // If the number of vector iteration between the store and the load are
1762 // small we could incur conflicts.
1763 if (Distance % VF && Distance / VF < NumItersForStoreLoadThroughMemory) {
1764 MaxVFWithoutSLForwardIssues = (VF >> 1);
1765 break;
1766 }
1767 }
1768
1769 if (MaxVFWithoutSLForwardIssues < 2 * TypeByteSize) {
1770 LLVM_DEBUG(
1771 dbgs() << "LAA: Distance " << Distance
1772 << " that could cause a store-load forwarding conflict\n");
1773 return true;
1774 }
1775
1776 if (MaxVFWithoutSLForwardIssues < MinDepDistBytes &&
1777 MaxVFWithoutSLForwardIssues !=
1778 VectorizerParams::MaxVectorWidth * TypeByteSize)
1779 MinDepDistBytes = MaxVFWithoutSLForwardIssues;
1780 return false;
1781}
1782
1783void MemoryDepChecker::mergeInStatus(VectorizationSafetyStatus S) {
1784 if (Status < S)
1785 Status = S;
1786}
1787
1788/// Given a dependence-distance \p Dist between two
1789/// memory accesses, that have strides in the same direction whose absolute
1790/// value of the maximum stride is given in \p MaxStride, and that have the same
1791/// type size \p TypeByteSize, in a loop whose maximum backedge taken count is
1792/// \p MaxBTC, check if it is possible to prove statically that the dependence
1793/// distance is larger than the range that the accesses will travel through the
1794/// execution of the loop. If so, return true; false otherwise. This is useful
1795/// for example in loops such as the following (PR31098):
1796/// for (i = 0; i < D; ++i) {
1797/// = out[i];
1798/// out[i+D] =
1799/// }
1801 const SCEV &MaxBTC, const SCEV &Dist,
1802 uint64_t MaxStride,
1803 uint64_t TypeByteSize) {
1804
1805 // If we can prove that
1806 // (**) |Dist| > MaxBTC * Step
1807 // where Step is the absolute stride of the memory accesses in bytes,
1808 // then there is no dependence.
1809 //
1810 // Rationale:
1811 // We basically want to check if the absolute distance (|Dist/Step|)
1812 // is >= the loop iteration count (or > MaxBTC).
1813 // This is equivalent to the Strong SIV Test (Practical Dependence Testing,
1814 // Section 4.2.1); Note, that for vectorization it is sufficient to prove
1815 // that the dependence distance is >= VF; This is checked elsewhere.
1816 // But in some cases we can prune dependence distances early, and
1817 // even before selecting the VF, and without a runtime test, by comparing
1818 // the distance against the loop iteration count. Since the vectorized code
1819 // will be executed only if LoopCount >= VF, proving distance >= LoopCount
1820 // also guarantees that distance >= VF.
1821 //
1822 const uint64_t ByteStride = MaxStride * TypeByteSize;
1823 const SCEV *Step = SE.getConstant(MaxBTC.getType(), ByteStride);
1824 const SCEV *Product = SE.getMulExpr(&MaxBTC, Step);
1825
1826 const SCEV *CastedDist = &Dist;
1827 const SCEV *CastedProduct = Product;
1828 uint64_t DistTypeSizeBits = DL.getTypeSizeInBits(Dist.getType());
1829 uint64_t ProductTypeSizeBits = DL.getTypeSizeInBits(Product->getType());
1830
1831 // The dependence distance can be positive/negative, so we sign extend Dist;
1832 // The multiplication of the absolute stride in bytes and the
1833 // backedgeTakenCount is non-negative, so we zero extend Product.
1834 if (DistTypeSizeBits > ProductTypeSizeBits)
1835 CastedProduct = SE.getZeroExtendExpr(Product, Dist.getType());
1836 else
1837 CastedDist = SE.getNoopOrSignExtend(&Dist, Product->getType());
1838
1839 // Is Dist - (MaxBTC * Step) > 0 ?
1840 // (If so, then we have proven (**) because |Dist| >= Dist)
1841 const SCEV *Minus = SE.getMinusSCEV(CastedDist, CastedProduct);
1842 if (SE.isKnownPositive(Minus))
1843 return true;
1844
1845 // Second try: Is -Dist - (MaxBTC * Step) > 0 ?
1846 // (If so, then we have proven (**) because |Dist| >= -1*Dist)
1847 const SCEV *NegDist = SE.getNegativeSCEV(CastedDist);
1848 Minus = SE.getMinusSCEV(NegDist, CastedProduct);
1849 return SE.isKnownPositive(Minus);
1850}
1851
1852/// Check the dependence for two accesses with the same stride \p Stride.
1853/// \p Distance is the positive distance and \p TypeByteSize is type size in
1854/// bytes.
1855///
1856/// \returns true if they are independent.
1858 uint64_t TypeByteSize) {
1859 assert(Stride > 1 && "The stride must be greater than 1");
1860 assert(TypeByteSize > 0 && "The type size in byte must be non-zero");
1861 assert(Distance > 0 && "The distance must be non-zero");
1862
1863 // Skip if the distance is not multiple of type byte size.
1864 if (Distance % TypeByteSize)
1865 return false;
1866
1867 uint64_t ScaledDist = Distance / TypeByteSize;
1868
1869 // No dependence if the scaled distance is not multiple of the stride.
1870 // E.g.
1871 // for (i = 0; i < 1024 ; i += 4)
1872 // A[i+2] = A[i] + 1;
1873 //
1874 // Two accesses in memory (scaled distance is 2, stride is 4):
1875 // | A[0] | | | | A[4] | | | |
1876 // | | | A[2] | | | | A[6] | |
1877 //
1878 // E.g.
1879 // for (i = 0; i < 1024 ; i += 3)
1880 // A[i+4] = A[i] + 1;
1881 //
1882 // Two accesses in memory (scaled distance is 4, stride is 3):
1883 // | A[0] | | | A[3] | | | A[6] | | |
1884 // | | | | | A[4] | | | A[7] | |
1885 return ScaledDist % Stride;
1886}
1887
1889 MemoryDepChecker::DepDistanceStrideAndSizeInfo>
1890MemoryDepChecker::getDependenceDistanceStrideAndSize(
1893 const auto &DL = InnermostLoop->getHeader()->getDataLayout();
1894 auto &SE = *PSE.getSE();
1895 const auto &[APtr, AIsWrite] = A;
1896 const auto &[BPtr, BIsWrite] = B;
1897
1898 // Two reads are independent.
1899 if (!AIsWrite && !BIsWrite)
1901
1902 Type *ATy = getLoadStoreType(AInst);
1903 Type *BTy = getLoadStoreType(BInst);
1904
1905 // We cannot check pointers in different address spaces.
1906 if (APtr->getType()->getPointerAddressSpace() !=
1907 BPtr->getType()->getPointerAddressSpace())
1909
1910 std::optional<int64_t> StrideAPtr =
1911 getPtrStride(PSE, ATy, APtr, InnermostLoop, SymbolicStrides, true, true);
1912 std::optional<int64_t> StrideBPtr =
1913 getPtrStride(PSE, BTy, BPtr, InnermostLoop, SymbolicStrides, true, true);
1914
1915 const SCEV *Src = PSE.getSCEV(APtr);
1916 const SCEV *Sink = PSE.getSCEV(BPtr);
1917
1918 // If the induction step is negative we have to invert source and sink of the
1919 // dependence when measuring the distance between them. We should not swap
1920 // AIsWrite with BIsWrite, as their uses expect them in program order.
1921 if (StrideAPtr && *StrideAPtr < 0) {
1922 std::swap(Src, Sink);
1923 std::swap(AInst, BInst);
1924 std::swap(ATy, BTy);
1925 std::swap(StrideAPtr, StrideBPtr);
1926 }
1927
1928 const SCEV *Dist = SE.getMinusSCEV(Sink, Src);
1929
1930 LLVM_DEBUG(dbgs() << "LAA: Src Scev: " << *Src << "Sink Scev: " << *Sink
1931 << "\n");
1932 LLVM_DEBUG(dbgs() << "LAA: Distance for " << *AInst << " to " << *BInst
1933 << ": " << *Dist << "\n");
1934
1935 // Check if we can prove that Sink only accesses memory after Src's end or
1936 // vice versa. At the moment this is limited to cases where either source or
1937 // sink are loop invariant to avoid compile-time increases. This is not
1938 // required for correctness.
1939 if (SE.isLoopInvariant(Src, InnermostLoop) ||
1940 SE.isLoopInvariant(Sink, InnermostLoop)) {
1941 const auto &[SrcStart_, SrcEnd_] =
1942 getStartAndEndForAccess(InnermostLoop, Src, ATy, PSE, PointerBounds);
1943 const auto &[SinkStart_, SinkEnd_] =
1944 getStartAndEndForAccess(InnermostLoop, Sink, BTy, PSE, PointerBounds);
1945 if (!isa<SCEVCouldNotCompute>(SrcStart_) &&
1946 !isa<SCEVCouldNotCompute>(SrcEnd_) &&
1947 !isa<SCEVCouldNotCompute>(SinkStart_) &&
1948 !isa<SCEVCouldNotCompute>(SinkEnd_)) {
1949 if (!LoopGuards)
1950 LoopGuards.emplace(
1951 ScalarEvolution::LoopGuards::collect(InnermostLoop, SE));
1952 auto SrcEnd = SE.applyLoopGuards(SrcEnd_, *LoopGuards);
1953 auto SinkStart = SE.applyLoopGuards(SinkStart_, *LoopGuards);
1954 if (SE.isKnownPredicate(CmpInst::ICMP_ULE, SrcEnd, SinkStart))
1956
1957 auto SinkEnd = SE.applyLoopGuards(SinkEnd_, *LoopGuards);
1958 auto SrcStart = SE.applyLoopGuards(SrcStart_, *LoopGuards);
1959 if (SE.isKnownPredicate(CmpInst::ICMP_ULE, SinkEnd, SrcStart))
1961 }
1962 }
1963
1964 // Need accesses with constant strides and the same direction for further
1965 // dependence analysis. We don't want to vectorize "A[B[i]] += ..." and
1966 // similar code or pointer arithmetic that could wrap in the address space.
1967
1968 // If either Src or Sink are not strided (i.e. not a non-wrapping AddRec) and
1969 // not loop-invariant (stride will be 0 in that case), we cannot analyze the
1970 // dependence further and also cannot generate runtime checks.
1971 if (!StrideAPtr || !StrideBPtr) {
1972 LLVM_DEBUG(dbgs() << "Pointer access with non-constant stride\n");
1974 }
1975
1976 int64_t StrideAPtrInt = *StrideAPtr;
1977 int64_t StrideBPtrInt = *StrideBPtr;
1978 LLVM_DEBUG(dbgs() << "LAA: Src induction step: " << StrideAPtrInt
1979 << " Sink induction step: " << StrideBPtrInt << "\n");
1980 // At least Src or Sink are loop invariant and the other is strided or
1981 // invariant. We can generate a runtime check to disambiguate the accesses.
1982 if (!StrideAPtrInt || !StrideBPtrInt)
1984
1985 // Both Src and Sink have a constant stride, check if they are in the same
1986 // direction.
1987 if ((StrideAPtrInt > 0) != (StrideBPtrInt > 0)) {
1988 LLVM_DEBUG(
1989 dbgs() << "Pointer access with strides in different directions\n");
1991 }
1992
1993 uint64_t TypeByteSize = DL.getTypeAllocSize(ATy);
1994 bool HasSameSize =
1995 DL.getTypeStoreSizeInBits(ATy) == DL.getTypeStoreSizeInBits(BTy);
1996 if (!HasSameSize)
1997 TypeByteSize = 0;
1998
1999 StrideAPtrInt = std::abs(StrideAPtrInt);
2000 StrideBPtrInt = std::abs(StrideBPtrInt);
2001
2002 uint64_t MaxStride = std::max(StrideAPtrInt, StrideBPtrInt);
2003
2004 std::optional<uint64_t> CommonStride;
2005 if (StrideAPtrInt == StrideBPtrInt)
2006 CommonStride = StrideAPtrInt;
2007
2008 // TODO: Historically, we don't retry with runtime checks unless the
2009 // (unscaled) strides are the same. Fix this once the condition for runtime
2010 // checks in isDependent is fixed.
2011 bool ShouldRetryWithRuntimeCheck = CommonStride.has_value();
2012
2013 return DepDistanceStrideAndSizeInfo(Dist, MaxStride, CommonStride,
2014 ShouldRetryWithRuntimeCheck, TypeByteSize,
2015 AIsWrite, BIsWrite);
2016}
2017
2019MemoryDepChecker::isDependent(const MemAccessInfo &A, unsigned AIdx,
2020 const MemAccessInfo &B, unsigned BIdx) {
2021 assert(AIdx < BIdx && "Must pass arguments in program order");
2022
2023 // Get the dependence distance, stride, type size and what access writes for
2024 // the dependence between A and B.
2025 auto Res =
2026 getDependenceDistanceStrideAndSize(A, InstMap[AIdx], B, InstMap[BIdx]);
2027 if (std::holds_alternative<Dependence::DepType>(Res))
2028 return std::get<Dependence::DepType>(Res);
2029
2030 auto &[Dist, MaxStride, CommonStride, ShouldRetryWithRuntimeCheck,
2031 TypeByteSize, AIsWrite, BIsWrite] =
2032 std::get<DepDistanceStrideAndSizeInfo>(Res);
2033 bool HasSameSize = TypeByteSize > 0;
2034
2035 if (isa<SCEVCouldNotCompute>(Dist)) {
2036 // TODO: Relax requirement that there is a common unscaled stride to retry
2037 // with non-constant distance dependencies.
2038 FoundNonConstantDistanceDependence |= ShouldRetryWithRuntimeCheck;
2039 LLVM_DEBUG(dbgs() << "LAA: Dependence because of uncomputable distance.\n");
2040 return Dependence::Unknown;
2041 }
2042
2043 ScalarEvolution &SE = *PSE.getSE();
2044 auto &DL = InnermostLoop->getHeader()->getDataLayout();
2045
2046 // If the distance between the acecsses is larger than their maximum absolute
2047 // stride multiplied by the symbolic maximum backedge taken count (which is an
2048 // upper bound of the number of iterations), the accesses are independet, i.e.
2049 // they are far enough appart that accesses won't access the same location
2050 // across all loop ierations.
2051 if (HasSameSize && isSafeDependenceDistance(
2053 *Dist, MaxStride, TypeByteSize))
2054 return Dependence::NoDep;
2055
2056 const SCEVConstant *ConstDist = dyn_cast<SCEVConstant>(Dist);
2057
2058 // Attempt to prove strided accesses independent.
2059 if (ConstDist) {
2060 uint64_t Distance = ConstDist->getAPInt().abs().getZExtValue();
2061
2062 // If the distance between accesses and their strides are known constants,
2063 // check whether the accesses interlace each other.
2064 if (Distance > 0 && CommonStride && CommonStride > 1 && HasSameSize &&
2065 areStridedAccessesIndependent(Distance, *CommonStride, TypeByteSize)) {
2066 LLVM_DEBUG(dbgs() << "LAA: Strided accesses are independent\n");
2067 return Dependence::NoDep;
2068 }
2069 } else {
2070 if (!LoopGuards)
2071 LoopGuards.emplace(
2072 ScalarEvolution::LoopGuards::collect(InnermostLoop, SE));
2073 Dist = SE.applyLoopGuards(Dist, *LoopGuards);
2074 }
2075
2076 // Negative distances are not plausible dependencies.
2077 if (SE.isKnownNonPositive(Dist)) {
2078 if (SE.isKnownNonNegative(Dist)) {
2079 if (HasSameSize) {
2080 // Write to the same location with the same size.
2081 return Dependence::Forward;
2082 }
2083 LLVM_DEBUG(dbgs() << "LAA: possibly zero dependence difference but "
2084 "different type sizes\n");
2085 return Dependence::Unknown;
2086 }
2087
2088 bool IsTrueDataDependence = (AIsWrite && !BIsWrite);
2089 // Check if the first access writes to a location that is read in a later
2090 // iteration, where the distance between them is not a multiple of a vector
2091 // factor and relatively small.
2092 //
2093 // NOTE: There is no need to update MaxSafeVectorWidthInBits after call to
2094 // couldPreventStoreLoadForward, even if it changed MinDepDistBytes, since a
2095 // forward dependency will allow vectorization using any width.
2096
2097 if (IsTrueDataDependence && EnableForwardingConflictDetection) {
2098 if (!ConstDist) {
2099 // TODO: FoundNonConstantDistanceDependence is used as a necessary
2100 // condition to consider retrying with runtime checks. Historically, we
2101 // did not set it when strides were different but there is no inherent
2102 // reason to.
2103 FoundNonConstantDistanceDependence |= ShouldRetryWithRuntimeCheck;
2104 return Dependence::Unknown;
2105 }
2106 if (!HasSameSize ||
2107 couldPreventStoreLoadForward(
2108 ConstDist->getAPInt().abs().getZExtValue(), TypeByteSize)) {
2109 LLVM_DEBUG(
2110 dbgs() << "LAA: Forward but may prevent st->ld forwarding\n");
2112 }
2113 }
2114
2115 LLVM_DEBUG(dbgs() << "LAA: Dependence is negative\n");
2116 return Dependence::Forward;
2117 }
2118
2119 int64_t MinDistance = SE.getSignedRangeMin(Dist).getSExtValue();
2120 // Below we only handle strictly positive distances.
2121 if (MinDistance <= 0) {
2122 FoundNonConstantDistanceDependence |= ShouldRetryWithRuntimeCheck;
2123 return Dependence::Unknown;
2124 }
2125
2126 if (!ConstDist) {
2127 // Previously this case would be treated as Unknown, possibly setting
2128 // FoundNonConstantDistanceDependence to force re-trying with runtime
2129 // checks. Until the TODO below is addressed, set it here to preserve
2130 // original behavior w.r.t. re-trying with runtime checks.
2131 // TODO: FoundNonConstantDistanceDependence is used as a necessary
2132 // condition to consider retrying with runtime checks. Historically, we
2133 // did not set it when strides were different but there is no inherent
2134 // reason to.
2135 FoundNonConstantDistanceDependence |= ShouldRetryWithRuntimeCheck;
2136 }
2137
2138 if (!HasSameSize) {
2139 LLVM_DEBUG(dbgs() << "LAA: ReadWrite-Write positive dependency with "
2140 "different type sizes\n");
2141 return Dependence::Unknown;
2142 }
2143
2144 if (!CommonStride)
2145 return Dependence::Unknown;
2146
2147 // Bail out early if passed-in parameters make vectorization not feasible.
2148 unsigned ForcedFactor = (VectorizerParams::VectorizationFactor ?
2150 unsigned ForcedUnroll = (VectorizerParams::VectorizationInterleave ?
2152 // The minimum number of iterations for a vectorized/unrolled version.
2153 unsigned MinNumIter = std::max(ForcedFactor * ForcedUnroll, 2U);
2154
2155 // It's not vectorizable if the distance is smaller than the minimum distance
2156 // needed for a vectroized/unrolled version. Vectorizing one iteration in
2157 // front needs TypeByteSize * Stride. Vectorizing the last iteration needs
2158 // TypeByteSize (No need to plus the last gap distance).
2159 //
2160 // E.g. Assume one char is 1 byte in memory and one int is 4 bytes.
2161 // foo(int *A) {
2162 // int *B = (int *)((char *)A + 14);
2163 // for (i = 0 ; i < 1024 ; i += 2)
2164 // B[i] = A[i] + 1;
2165 // }
2166 //
2167 // Two accesses in memory (stride is 2):
2168 // | A[0] | | A[2] | | A[4] | | A[6] | |
2169 // | B[0] | | B[2] | | B[4] |
2170 //
2171 // MinDistance needs for vectorizing iterations except the last iteration:
2172 // 4 * 2 * (MinNumIter - 1). MinDistance needs for the last iteration: 4.
2173 // So the minimum distance needed is: 4 * 2 * (MinNumIter - 1) + 4.
2174 //
2175 // If MinNumIter is 2, it is vectorizable as the minimum distance needed is
2176 // 12, which is less than distance.
2177 //
2178 // If MinNumIter is 4 (Say if a user forces the vectorization factor to be 4),
2179 // the minimum distance needed is 28, which is greater than distance. It is
2180 // not safe to do vectorization.
2181
2182 // We know that Dist is positive, but it may not be constant. Use the signed
2183 // minimum for computations below, as this ensures we compute the closest
2184 // possible dependence distance.
2185 uint64_t MinDistanceNeeded =
2186 TypeByteSize * *CommonStride * (MinNumIter - 1) + TypeByteSize;
2187 if (MinDistanceNeeded > static_cast<uint64_t>(MinDistance)) {
2188 if (!ConstDist) {
2189 // For non-constant distances, we checked the lower bound of the
2190 // dependence distance and the distance may be larger at runtime (and safe
2191 // for vectorization). Classify it as Unknown, so we re-try with runtime
2192 // checks.
2193 return Dependence::Unknown;
2194 }
2195 LLVM_DEBUG(dbgs() << "LAA: Failure because of positive minimum distance "
2196 << MinDistance << '\n');
2197 return Dependence::Backward;
2198 }
2199
2200 // Unsafe if the minimum distance needed is greater than smallest dependence
2201 // distance distance.
2202 if (MinDistanceNeeded > MinDepDistBytes) {
2203 LLVM_DEBUG(dbgs() << "LAA: Failure because it needs at least "
2204 << MinDistanceNeeded << " size in bytes\n");
2205 return Dependence::Backward;
2206 }
2207
2208 // Positive distance bigger than max vectorization factor.
2209 // FIXME: Should use max factor instead of max distance in bytes, which could
2210 // not handle different types.
2211 // E.g. Assume one char is 1 byte in memory and one int is 4 bytes.
2212 // void foo (int *A, char *B) {
2213 // for (unsigned i = 0; i < 1024; i++) {
2214 // A[i+2] = A[i] + 1;
2215 // B[i+2] = B[i] + 1;
2216 // }
2217 // }
2218 //
2219 // This case is currently unsafe according to the max safe distance. If we
2220 // analyze the two accesses on array B, the max safe dependence distance
2221 // is 2. Then we analyze the accesses on array A, the minimum distance needed
2222 // is 8, which is less than 2 and forbidden vectorization, But actually
2223 // both A and B could be vectorized by 2 iterations.
2224 MinDepDistBytes =
2225 std::min(static_cast<uint64_t>(MinDistance), MinDepDistBytes);
2226
2227 bool IsTrueDataDependence = (!AIsWrite && BIsWrite);
2228 uint64_t MinDepDistBytesOld = MinDepDistBytes;
2229 if (IsTrueDataDependence && EnableForwardingConflictDetection && ConstDist &&
2230 couldPreventStoreLoadForward(MinDistance, TypeByteSize)) {
2231 // Sanity check that we didn't update MinDepDistBytes when calling
2232 // couldPreventStoreLoadForward
2233 assert(MinDepDistBytes == MinDepDistBytesOld &&
2234 "An update to MinDepDistBytes requires an update to "
2235 "MaxSafeVectorWidthInBits");
2236 (void)MinDepDistBytesOld;
2238 }
2239
2240 // An update to MinDepDistBytes requires an update to MaxSafeVectorWidthInBits
2241 // since there is a backwards dependency.
2242 uint64_t MaxVF = MinDepDistBytes / (TypeByteSize * *CommonStride);
2243 LLVM_DEBUG(dbgs() << "LAA: Positive min distance " << MinDistance
2244 << " with max VF = " << MaxVF << '\n');
2245
2246 uint64_t MaxVFInBits = MaxVF * TypeByteSize * 8;
2247 if (!ConstDist && MaxVFInBits < MaxTargetVectorWidthInBits) {
2248 // For non-constant distances, we checked the lower bound of the dependence
2249 // distance and the distance may be larger at runtime (and safe for
2250 // vectorization). Classify it as Unknown, so we re-try with runtime checks.
2251 return Dependence::Unknown;
2252 }
2253
2254 MaxSafeVectorWidthInBits = std::min(MaxSafeVectorWidthInBits, MaxVFInBits);
2256}
2257
2259 const MemAccessInfoList &CheckDeps) {
2260
2261 MinDepDistBytes = -1;
2263 for (MemAccessInfo CurAccess : CheckDeps) {
2264 if (Visited.count(CurAccess))
2265 continue;
2266
2267 // Get the relevant memory access set.
2269 AccessSets.findValue(AccessSets.getLeaderValue(CurAccess));
2270
2271 // Check accesses within this set.
2273 AccessSets.member_begin(I);
2275 AccessSets.member_end();
2276
2277 // Check every access pair.
2278 while (AI != AE) {
2279 Visited.insert(*AI);
2280 bool AIIsWrite = AI->getInt();
2281 // Check loads only against next equivalent class, but stores also against
2282 // other stores in the same equivalence class - to the same address.
2284 (AIIsWrite ? AI : std::next(AI));
2285 while (OI != AE) {
2286 // Check every accessing instruction pair in program order.
2287 for (std::vector<unsigned>::iterator I1 = Accesses[*AI].begin(),
2288 I1E = Accesses[*AI].end(); I1 != I1E; ++I1)
2289 // Scan all accesses of another equivalence class, but only the next
2290 // accesses of the same equivalent class.
2291 for (std::vector<unsigned>::iterator
2292 I2 = (OI == AI ? std::next(I1) : Accesses[*OI].begin()),
2293 I2E = (OI == AI ? I1E : Accesses[*OI].end());
2294 I2 != I2E; ++I2) {
2295 auto A = std::make_pair(&*AI, *I1);
2296 auto B = std::make_pair(&*OI, *I2);
2297
2298 assert(*I1 != *I2);
2299 if (*I1 > *I2)
2300 std::swap(A, B);
2301
2303 isDependent(*A.first, A.second, *B.first, B.second);
2305
2306 // Gather dependences unless we accumulated MaxDependences
2307 // dependences. In that case return as soon as we find the first
2308 // unsafe dependence. This puts a limit on this quadratic
2309 // algorithm.
2310 if (RecordDependences) {
2311 if (Type != Dependence::NoDep)
2312 Dependences.emplace_back(A.second, B.second, Type);
2313
2314 if (Dependences.size() >= MaxDependences) {
2315 RecordDependences = false;
2316 Dependences.clear();
2318 << "Too many dependences, stopped recording\n");
2319 }
2320 }
2321 if (!RecordDependences && !isSafeForVectorization())
2322 return false;
2323 }
2324 ++OI;
2325 }
2326 ++AI;
2327 }
2328 }
2329
2330 LLVM_DEBUG(dbgs() << "Total Dependences: " << Dependences.size() << "\n");
2331 return isSafeForVectorization();
2332}
2333
2336 MemAccessInfo Access(Ptr, IsWrite);
2337 auto &IndexVector = Accesses.find(Access)->second;
2338
2340 transform(IndexVector,
2341 std::back_inserter(Insts),
2342 [&](unsigned Idx) { return this->InstMap[Idx]; });
2343 return Insts;
2344}
2345
2347 "NoDep",
2348 "Unknown",
2349 "IndirectUnsafe",
2350 "Forward",
2351 "ForwardButPreventsForwarding",
2352 "Backward",
2353 "BackwardVectorizable",
2354 "BackwardVectorizableButPreventsForwarding"};
2355
2357 raw_ostream &OS, unsigned Depth,
2358 const SmallVectorImpl<Instruction *> &Instrs) const {
2359 OS.indent(Depth) << DepName[Type] << ":\n";
2360 OS.indent(Depth + 2) << *Instrs[Source] << " -> \n";
2361 OS.indent(Depth + 2) << *Instrs[Destination] << "\n";
2362}
2363
2364bool LoopAccessInfo::canAnalyzeLoop() {
2365 // We need to have a loop header.
2366 LLVM_DEBUG(dbgs() << "\nLAA: Checking a loop in '"
2367 << TheLoop->getHeader()->getParent()->getName() << "' from "
2368 << TheLoop->getLocStr() << "\n");
2369
2370 // We can only analyze innermost loops.
2371 if (!TheLoop->isInnermost()) {
2372 LLVM_DEBUG(dbgs() << "LAA: loop is not the innermost loop\n");
2373 recordAnalysis("NotInnerMostLoop") << "loop is not the innermost loop";
2374 return false;
2375 }
2376
2377 // We must have a single backedge.
2378 if (TheLoop->getNumBackEdges() != 1) {
2379 LLVM_DEBUG(
2380 dbgs() << "LAA: loop control flow is not understood by analyzer\n");
2381 recordAnalysis("CFGNotUnderstood")
2382 << "loop control flow is not understood by analyzer";
2383 return false;
2384 }
2385
2386 // ScalarEvolution needs to be able to find the symbolic max backedge taken
2387 // count, which is an upper bound on the number of loop iterations. The loop
2388 // may execute fewer iterations, if it exits via an uncountable exit.
2389 const SCEV *ExitCount = PSE->getSymbolicMaxBackedgeTakenCount();
2390 if (isa<SCEVCouldNotCompute>(ExitCount)) {
2391 recordAnalysis("CantComputeNumberOfIterations")
2392 << "could not determine number of loop iterations";
2393 LLVM_DEBUG(dbgs() << "LAA: SCEV could not compute the loop exit count.\n");
2394 return false;
2395 }
2396
2397 LLVM_DEBUG(dbgs() << "LAA: Found an analyzable loop: "
2398 << TheLoop->getHeader()->getName() << "\n");
2399 return true;
2400}
2401
2402bool LoopAccessInfo::analyzeLoop(AAResults *AA, const LoopInfo *LI,
2403 const TargetLibraryInfo *TLI,
2404 DominatorTree *DT) {
2405 // Holds the Load and Store instructions.
2408 SmallPtrSet<MDNode *, 8> LoopAliasScopes;
2409
2410 // Holds all the different accesses in the loop.
2411 unsigned NumReads = 0;
2412 unsigned NumReadWrites = 0;
2413
2414 bool HasComplexMemInst = false;
2415
2416 // A runtime check is only legal to insert if there are no convergent calls.
2417 HasConvergentOp = false;
2418
2419 PtrRtChecking->Pointers.clear();
2420 PtrRtChecking->Need = false;
2421
2422 const bool IsAnnotatedParallel = TheLoop->isAnnotatedParallel();
2423
2424 const bool EnableMemAccessVersioningOfLoop =
2426 !TheLoop->getHeader()->getParent()->hasOptSize();
2427
2428 // Traverse blocks in fixed RPOT order, regardless of their storage in the
2429 // loop info, as it may be arbitrary.
2430 LoopBlocksRPO RPOT(TheLoop);
2431 RPOT.perform(LI);
2432 for (BasicBlock *BB : RPOT) {
2433 // Scan the BB and collect legal loads and stores. Also detect any
2434 // convergent instructions.
2435 for (Instruction &I : *BB) {
2436 if (auto *Call = dyn_cast<CallBase>(&I)) {
2437 if (Call->isConvergent())
2438 HasConvergentOp = true;
2439 }
2440
2441 // With both a non-vectorizable memory instruction and a convergent
2442 // operation, found in this loop, no reason to continue the search.
2443 if (HasComplexMemInst && HasConvergentOp)
2444 return false;
2445
2446 // Avoid hitting recordAnalysis multiple times.
2447 if (HasComplexMemInst)
2448 continue;
2449
2450 // Record alias scopes defined inside the loop.
2451 if (auto *Decl = dyn_cast<NoAliasScopeDeclInst>(&I))
2452 for (Metadata *Op : Decl->getScopeList()->operands())
2453 LoopAliasScopes.insert(cast<MDNode>(Op));
2454
2455 // Many math library functions read the rounding mode. We will only
2456 // vectorize a loop if it contains known function calls that don't set
2457 // the flag. Therefore, it is safe to ignore this read from memory.
2458 auto *Call = dyn_cast<CallInst>(&I);
2459 if (Call && getVectorIntrinsicIDForCall(Call, TLI))
2460 continue;
2461
2462 // If this is a load, save it. If this instruction can read from memory
2463 // but is not a load, we only allow it if it's a call to a function with a
2464 // vector mapping and no pointer arguments.
2465 if (I.mayReadFromMemory()) {
2466 auto hasPointerArgs = [](CallBase *CB) {
2467 return any_of(CB->args(), [](Value const *Arg) {
2468 return Arg->getType()->isPointerTy();
2469 });
2470 };
2471
2472 // If the function has an explicit vectorized counterpart, and does not
2473 // take output/input pointers, we can safely assume that it can be
2474 // vectorized.
2475 if (Call && !Call->isNoBuiltin() && Call->getCalledFunction() &&
2476 !hasPointerArgs(Call) && !VFDatabase::getMappings(*Call).empty())
2477 continue;
2478
2479 auto *Ld = dyn_cast<LoadInst>(&I);
2480 if (!Ld) {
2481 recordAnalysis("CantVectorizeInstruction", Ld)
2482 << "instruction cannot be vectorized";
2483 HasComplexMemInst = true;
2484 continue;
2485 }
2486 if (!Ld->isSimple() && !IsAnnotatedParallel) {
2487 recordAnalysis("NonSimpleLoad", Ld)
2488 << "read with atomic ordering or volatile read";
2489 LLVM_DEBUG(dbgs() << "LAA: Found a non-simple load.\n");
2490 HasComplexMemInst = true;
2491 continue;
2492 }
2493 NumLoads++;
2494 Loads.push_back(Ld);
2495 DepChecker->addAccess(Ld);
2496 if (EnableMemAccessVersioningOfLoop)
2497 collectStridedAccess(Ld);
2498 continue;
2499 }
2500
2501 // Save 'store' instructions. Abort if other instructions write to memory.
2502 if (I.mayWriteToMemory()) {
2503 auto *St = dyn_cast<StoreInst>(&I);
2504 if (!St) {
2505 recordAnalysis("CantVectorizeInstruction", St)
2506 << "instruction cannot be vectorized";
2507 HasComplexMemInst = true;
2508 continue;
2509 }
2510 if (!St->isSimple() && !IsAnnotatedParallel) {
2511 recordAnalysis("NonSimpleStore", St)
2512 << "write with atomic ordering or volatile write";
2513 LLVM_DEBUG(dbgs() << "LAA: Found a non-simple store.\n");
2514 HasComplexMemInst = true;
2515 continue;
2516 }
2517 NumStores++;
2518 Stores.push_back(St);
2519 DepChecker->addAccess(St);
2520 if (EnableMemAccessVersioningOfLoop)
2521 collectStridedAccess(St);
2522 }
2523 } // Next instr.
2524 } // Next block.
2525
2526 if (HasComplexMemInst)
2527 return false;
2528
2529 // Now we have two lists that hold the loads and the stores.
2530 // Next, we find the pointers that they use.
2531
2532 // Check if we see any stores. If there are no stores, then we don't
2533 // care if the pointers are *restrict*.
2534 if (!Stores.size()) {
2535 LLVM_DEBUG(dbgs() << "LAA: Found a read-only loop!\n");
2536 return true;
2537 }
2538
2539 MemoryDepChecker::DepCandidates DependentAccesses;
2540 AccessAnalysis Accesses(TheLoop, AA, LI, DependentAccesses, *PSE,
2541 LoopAliasScopes);
2542
2543 // Holds the analyzed pointers. We don't want to call getUnderlyingObjects
2544 // multiple times on the same object. If the ptr is accessed twice, once
2545 // for read and once for write, it will only appear once (on the write
2546 // list). This is okay, since we are going to check for conflicts between
2547 // writes and between reads and writes, but not between reads and reads.
2549
2550 // Record uniform store addresses to identify if we have multiple stores
2551 // to the same address.
2552 SmallPtrSet<Value *, 16> UniformStores;
2553
2554 for (StoreInst *ST : Stores) {
2555 Value *Ptr = ST->getPointerOperand();
2556
2557 if (isInvariant(Ptr)) {
2558 // Record store instructions to loop invariant addresses
2559 StoresToInvariantAddresses.push_back(ST);
2560 HasStoreStoreDependenceInvolvingLoopInvariantAddress |=
2561 !UniformStores.insert(Ptr).second;
2562 }
2563
2564 // If we did *not* see this pointer before, insert it to the read-write
2565 // list. At this phase it is only a 'write' list.
2566 Type *AccessTy = getLoadStoreType(ST);
2567 if (Seen.insert({Ptr, AccessTy}).second) {
2568 ++NumReadWrites;
2569
2571 // The TBAA metadata could have a control dependency on the predication
2572 // condition, so we cannot rely on it when determining whether or not we
2573 // need runtime pointer checks.
2574 if (blockNeedsPredication(ST->getParent(), TheLoop, DT))
2575 Loc.AATags.TBAA = nullptr;
2576
2577 visitPointers(const_cast<Value *>(Loc.Ptr), *TheLoop,
2578 [&Accesses, AccessTy, Loc](Value *Ptr) {
2579 MemoryLocation NewLoc = Loc.getWithNewPtr(Ptr);
2580 Accesses.addStore(NewLoc, AccessTy);
2581 });
2582 }
2583 }
2584
2585 if (IsAnnotatedParallel) {
2586 LLVM_DEBUG(
2587 dbgs() << "LAA: A loop annotated parallel, ignore memory dependency "
2588 << "checks.\n");
2589 return true;
2590 }
2591
2592 for (LoadInst *LD : Loads) {
2593 Value *Ptr = LD->getPointerOperand();
2594 // If we did *not* see this pointer before, insert it to the
2595 // read list. If we *did* see it before, then it is already in
2596 // the read-write list. This allows us to vectorize expressions
2597 // such as A[i] += x; Because the address of A[i] is a read-write
2598 // pointer. This only works if the index of A[i] is consecutive.
2599 // If the address of i is unknown (for example A[B[i]]) then we may
2600 // read a few words, modify, and write a few words, and some of the
2601 // words may be written to the same address.
2602 bool IsReadOnlyPtr = false;
2603 Type *AccessTy = getLoadStoreType(LD);
2604 if (Seen.insert({Ptr, AccessTy}).second ||
2605 !getPtrStride(*PSE, LD->getType(), Ptr, TheLoop, SymbolicStrides).value_or(0)) {
2606 ++NumReads;
2607 IsReadOnlyPtr = true;
2608 }
2609
2610 // See if there is an unsafe dependency between a load to a uniform address and
2611 // store to the same uniform address.
2612 if (UniformStores.count(Ptr)) {
2613 LLVM_DEBUG(dbgs() << "LAA: Found an unsafe dependency between a uniform "
2614 "load and uniform store to the same address!\n");
2615 HasLoadStoreDependenceInvolvingLoopInvariantAddress = true;
2616 }
2617
2619 // The TBAA metadata could have a control dependency on the predication
2620 // condition, so we cannot rely on it when determining whether or not we
2621 // need runtime pointer checks.
2622 if (blockNeedsPredication(LD->getParent(), TheLoop, DT))
2623 Loc.AATags.TBAA = nullptr;
2624
2625 visitPointers(const_cast<Value *>(Loc.Ptr), *TheLoop,
2626 [&Accesses, AccessTy, Loc, IsReadOnlyPtr](Value *Ptr) {
2627 MemoryLocation NewLoc = Loc.getWithNewPtr(Ptr);
2628 Accesses.addLoad(NewLoc, AccessTy, IsReadOnlyPtr);
2629 });
2630 }
2631
2632 // If we write (or read-write) to a single destination and there are no
2633 // other reads in this loop then is it safe to vectorize.
2634 if (NumReadWrites == 1 && NumReads == 0) {
2635 LLVM_DEBUG(dbgs() << "LAA: Found a write-only loop!\n");
2636 return true;
2637 }
2638
2639 // Build dependence sets and check whether we need a runtime pointer bounds
2640 // check.
2641 Accesses.buildDependenceSets();
2642
2643 // Find pointers with computable bounds. We are going to use this information
2644 // to place a runtime bound check.
2645 Value *UncomputablePtr = nullptr;
2646 bool CanDoRTIfNeeded =
2647 Accesses.canCheckPtrAtRT(*PtrRtChecking, PSE->getSE(), TheLoop,
2648 SymbolicStrides, UncomputablePtr, false);
2649 if (!CanDoRTIfNeeded) {
2650 const auto *I = dyn_cast_or_null<Instruction>(UncomputablePtr);
2651 recordAnalysis("CantIdentifyArrayBounds", I)
2652 << "cannot identify array bounds";
2653 LLVM_DEBUG(dbgs() << "LAA: We can't vectorize because we can't find "
2654 << "the array bounds.\n");
2655 return false;
2656 }
2657
2658 LLVM_DEBUG(
2659 dbgs() << "LAA: May be able to perform a memory runtime check if needed.\n");
2660
2661 bool DepsAreSafe = true;
2662 if (Accesses.isDependencyCheckNeeded()) {
2663 LLVM_DEBUG(dbgs() << "LAA: Checking memory dependencies\n");
2664 DepsAreSafe = DepChecker->areDepsSafe(DependentAccesses,
2665 Accesses.getDependenciesToCheck());
2666
2667 if (!DepsAreSafe && DepChecker->shouldRetryWithRuntimeCheck()) {
2668 LLVM_DEBUG(dbgs() << "LAA: Retrying with memory checks\n");
2669
2670 // Clear the dependency checks. We assume they are not needed.
2671 Accesses.resetDepChecks(*DepChecker);
2672
2673 PtrRtChecking->reset();
2674 PtrRtChecking->Need = true;
2675
2676 auto *SE = PSE->getSE();
2677 UncomputablePtr = nullptr;
2678 CanDoRTIfNeeded = Accesses.canCheckPtrAtRT(
2679 *PtrRtChecking, SE, TheLoop, SymbolicStrides, UncomputablePtr, true);
2680
2681 // Check that we found the bounds for the pointer.
2682 if (!CanDoRTIfNeeded) {
2683 auto *I = dyn_cast_or_null<Instruction>(UncomputablePtr);
2684 recordAnalysis("CantCheckMemDepsAtRunTime", I)
2685 << "cannot check memory dependencies at runtime";
2686 LLVM_DEBUG(dbgs() << "LAA: Can't vectorize with memory checks\n");
2687 return false;
2688 }
2689 DepsAreSafe = true;
2690 }
2691 }
2692
2693 if (HasConvergentOp) {
2694 recordAnalysis("CantInsertRuntimeCheckWithConvergent")
2695 << "cannot add control dependency to convergent operation";
2696 LLVM_DEBUG(dbgs() << "LAA: We can't vectorize because a runtime check "
2697 "would be needed with a convergent operation\n");
2698 return false;
2699 }
2700
2701 if (DepsAreSafe) {
2702 LLVM_DEBUG(
2703 dbgs() << "LAA: No unsafe dependent memory operations in loop. We"
2704 << (PtrRtChecking->Need ? "" : " don't")
2705 << " need runtime memory checks.\n");
2706 return true;
2707 }
2708
2709 emitUnsafeDependenceRemark();
2710 return false;
2711}
2712
2713void LoopAccessInfo::emitUnsafeDependenceRemark() {
2714 const auto *Deps = getDepChecker().getDependences();
2715 if (!Deps)
2716 return;
2717 const auto *Found =
2718 llvm::find_if(*Deps, [](const MemoryDepChecker::Dependence &D) {
2721 });
2722 if (Found == Deps->end())
2723 return;
2724 MemoryDepChecker::Dependence Dep = *Found;
2725
2726 LLVM_DEBUG(dbgs() << "LAA: unsafe dependent memory operations in loop\n");
2727
2728 // Emit remark for first unsafe dependence
2729 bool HasForcedDistribution = false;
2730 std::optional<const MDOperand *> Value =
2731 findStringMetadataForLoop(TheLoop, "llvm.loop.distribute.enable");
2732 if (Value) {
2733 const MDOperand *Op = *Value;
2734 assert(Op && mdconst::hasa<ConstantInt>(*Op) && "invalid metadata");
2735 HasForcedDistribution = mdconst::extract<ConstantInt>(*Op)->getZExtValue();
2736 }
2737
2738 const std::string Info =
2739 HasForcedDistribution
2740 ? "unsafe dependent memory operations in loop."
2741 : "unsafe dependent memory operations in loop. Use "
2742 "#pragma clang loop distribute(enable) to allow loop distribution "
2743 "to attempt to isolate the offending operations into a separate "
2744 "loop";
2746 recordAnalysis("UnsafeDep", Dep.getDestination(getDepChecker())) << Info;
2747
2748 switch (Dep.Type) {
2752 llvm_unreachable("Unexpected dependence");
2754 R << "\nBackward loop carried data dependence.";
2755 break;
2757 R << "\nForward loop carried data dependence that prevents "
2758 "store-to-load forwarding.";
2759 break;
2761 R << "\nBackward loop carried data dependence that prevents "
2762 "store-to-load forwarding.";
2763 break;
2765 R << "\nUnsafe indirect dependence.";
2766 break;
2768 R << "\nUnknown data dependence.";
2769 break;
2770 }
2771
2772 if (Instruction *I = Dep.getSource(getDepChecker())) {
2773 DebugLoc SourceLoc = I->getDebugLoc();
2774 if (auto *DD = dyn_cast_or_null<Instruction>(getPointerOperand(I)))
2775 SourceLoc = DD->getDebugLoc();
2776 if (SourceLoc)
2777 R << " Memory location is the same as accessed at "
2778 << ore::NV("Location", SourceLoc);
2779 }
2780}
2781
2783 DominatorTree *DT) {
2784 assert(TheLoop->contains(BB) && "Unknown block used");
2785
2786 // Blocks that do not dominate the latch need predication.
2787 const BasicBlock *Latch = TheLoop->getLoopLatch();
2788 return !DT->dominates(BB, Latch);
2789}
2790
2792LoopAccessInfo::recordAnalysis(StringRef RemarkName, const Instruction *I) {
2793 assert(!Report && "Multiple reports generated");
2794
2795 const Value *CodeRegion = TheLoop->getHeader();
2796 DebugLoc DL = TheLoop->getStartLoc();
2797
2798 if (I) {
2799 CodeRegion = I->getParent();
2800 // If there is no debug location attached to the instruction, revert back to
2801 // using the loop's.
2802 if (I->getDebugLoc())
2803 DL = I->getDebugLoc();
2804 }
2805
2806 Report = std::make_unique<OptimizationRemarkAnalysis>(DEBUG_TYPE, RemarkName, DL,
2807 CodeRegion);
2808 return *Report;
2809}
2810
2812 auto *SE = PSE->getSE();
2813 // TODO: Is this really what we want? Even without FP SCEV, we may want some
2814 // trivially loop-invariant FP values to be considered invariant.
2815 if (!SE->isSCEVable(V->getType()))
2816 return false;
2817 const SCEV *S = SE->getSCEV(V);
2818 return SE->isLoopInvariant(S, TheLoop);
2819}
2820
2821/// Find the operand of the GEP that should be checked for consecutive
2822/// stores. This ignores trailing indices that have no effect on the final
2823/// pointer.
2824static unsigned getGEPInductionOperand(const GetElementPtrInst *Gep) {
2825 const DataLayout &DL = Gep->getDataLayout();
2826 unsigned LastOperand = Gep->getNumOperands() - 1;
2827 TypeSize GEPAllocSize = DL.getTypeAllocSize(Gep->getResultElementType());
2828
2829 // Walk backwards and try to peel off zeros.
2830 while (LastOperand > 1 && match(Gep->getOperand(LastOperand), m_Zero())) {
2831 // Find the type we're currently indexing into.
2832 gep_type_iterator GEPTI = gep_type_begin(Gep);
2833 std::advance(GEPTI, LastOperand - 2);
2834
2835 // If it's a type with the same allocation size as the result of the GEP we
2836 // can peel off the zero index.
2837 TypeSize ElemSize = GEPTI.isStruct()
2838 ? DL.getTypeAllocSize(GEPTI.getIndexedType())
2840 if (ElemSize != GEPAllocSize)
2841 break;
2842 --LastOperand;
2843 }
2844
2845 return LastOperand;
2846}
2847
2848/// If the argument is a GEP, then returns the operand identified by
2849/// getGEPInductionOperand. However, if there is some other non-loop-invariant
2850/// operand, it returns that instead.
2852 auto *GEP = dyn_cast<GetElementPtrInst>(Ptr);
2853 if (!GEP)
2854 return Ptr;
2855
2856 unsigned InductionOperand = getGEPInductionOperand(GEP);
2857
2858 // Check that all of the gep indices are uniform except for our induction
2859 // operand.
2860 for (unsigned I = 0, E = GEP->getNumOperands(); I != E; ++I)
2861 if (I != InductionOperand &&
2862 !SE->isLoopInvariant(SE->getSCEV(GEP->getOperand(I)), Lp))
2863 return Ptr;
2864 return GEP->getOperand(InductionOperand);
2865}
2866
2867/// Get the stride of a pointer access in a loop. Looks for symbolic
2868/// strides "a[i*stride]". Returns the symbolic stride, or null otherwise.
2870 auto *PtrTy = dyn_cast<PointerType>(Ptr->getType());
2871 if (!PtrTy || PtrTy->isAggregateType())
2872 return nullptr;
2873
2874 // Try to remove a gep instruction to make the pointer (actually index at this
2875 // point) easier analyzable. If OrigPtr is equal to Ptr we are analyzing the
2876 // pointer, otherwise, we are analyzing the index.
2877 Value *OrigPtr = Ptr;
2878
2879 // The size of the pointer access.
2880 int64_t PtrAccessSize = 1;
2881
2882 Ptr = stripGetElementPtr(Ptr, SE, Lp);
2883 const SCEV *V = SE->getSCEV(Ptr);
2884
2885 if (Ptr != OrigPtr)
2886 // Strip off casts.
2887 while (const SCEVIntegralCastExpr *C = dyn_cast<SCEVIntegralCastExpr>(V))
2888 V = C->getOperand();
2889
2890 const SCEVAddRecExpr *S = dyn_cast<SCEVAddRecExpr>(V);
2891 if (!S)
2892 return nullptr;
2893
2894 // If the pointer is invariant then there is no stride and it makes no
2895 // sense to add it here.
2896 if (Lp != S->getLoop())
2897 return nullptr;
2898
2899 V = S->getStepRecurrence(*SE);
2900 if (!V)
2901 return nullptr;
2902
2903 // Strip off the size of access multiplication if we are still analyzing the
2904 // pointer.
2905 if (OrigPtr == Ptr) {
2906 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(V)) {
2907 if (M->getOperand(0)->getSCEVType() != scConstant)
2908 return nullptr;
2909
2910 const APInt &APStepVal = cast<SCEVConstant>(M->getOperand(0))->getAPInt();
2911
2912 // Huge step value - give up.
2913 if (APStepVal.getBitWidth() > 64)
2914 return nullptr;
2915
2916 int64_t StepVal = APStepVal.getSExtValue();
2917 if (PtrAccessSize != StepVal)
2918 return nullptr;
2919 V = M->getOperand(1);
2920 }
2921 }
2922
2923 // Note that the restriction after this loop invariant check are only
2924 // profitability restrictions.
2925 if (!SE->isLoopInvariant(V, Lp))
2926 return nullptr;
2927
2928 // Look for the loop invariant symbolic value.
2929 if (isa<SCEVUnknown>(V))
2930 return V;
2931
2932 if (const auto *C = dyn_cast<SCEVIntegralCastExpr>(V))
2933 if (isa<SCEVUnknown>(C->getOperand()))
2934 return V;
2935
2936 return nullptr;
2937}
2938
2939void LoopAccessInfo::collectStridedAccess(Value *MemAccess) {
2940 Value *Ptr = getLoadStorePointerOperand(MemAccess);
2941 if (!Ptr)
2942 return;
2943
2944 // Note: getStrideFromPointer is a *profitability* heuristic. We
2945 // could broaden the scope of values returned here - to anything
2946 // which happens to be loop invariant and contributes to the
2947 // computation of an interesting IV - but we chose not to as we
2948 // don't have a cost model here, and broadening the scope exposes
2949 // far too many unprofitable cases.
2950 const SCEV *StrideExpr = getStrideFromPointer(Ptr, PSE->getSE(), TheLoop);
2951 if (!StrideExpr)
2952 return;
2953
2954 LLVM_DEBUG(dbgs() << "LAA: Found a strided access that is a candidate for "
2955 "versioning:");
2956 LLVM_DEBUG(dbgs() << " Ptr: " << *Ptr << " Stride: " << *StrideExpr << "\n");
2957
2958 if (!SpeculateUnitStride) {
2959 LLVM_DEBUG(dbgs() << " Chose not to due to -laa-speculate-unit-stride\n");
2960 return;
2961 }
2962
2963 // Avoid adding the "Stride == 1" predicate when we know that
2964 // Stride >= Trip-Count. Such a predicate will effectively optimize a single
2965 // or zero iteration loop, as Trip-Count <= Stride == 1.
2966 //
2967 // TODO: We are currently not making a very informed decision on when it is
2968 // beneficial to apply stride versioning. It might make more sense that the
2969 // users of this analysis (such as the vectorizer) will trigger it, based on
2970 // their specific cost considerations; For example, in cases where stride
2971 // versioning does not help resolving memory accesses/dependences, the
2972 // vectorizer should evaluate the cost of the runtime test, and the benefit
2973 // of various possible stride specializations, considering the alternatives
2974 // of using gather/scatters (if available).
2975
2976 const SCEV *MaxBTC = PSE->getSymbolicMaxBackedgeTakenCount();
2977
2978 // Match the types so we can compare the stride and the MaxBTC.
2979 // The Stride can be positive/negative, so we sign extend Stride;
2980 // The backedgeTakenCount is non-negative, so we zero extend MaxBTC.
2981 const DataLayout &DL = TheLoop->getHeader()->getDataLayout();
2982 uint64_t StrideTypeSizeBits = DL.getTypeSizeInBits(StrideExpr->getType());
2983 uint64_t BETypeSizeBits = DL.getTypeSizeInBits(MaxBTC->getType());
2984 const SCEV *CastedStride = StrideExpr;
2985 const SCEV *CastedBECount = MaxBTC;
2986 ScalarEvolution *SE = PSE->getSE();
2987 if (BETypeSizeBits >= StrideTypeSizeBits)
2988 CastedStride = SE->getNoopOrSignExtend(StrideExpr, MaxBTC->getType());
2989 else
2990 CastedBECount = SE->getZeroExtendExpr(MaxBTC, StrideExpr->getType());
2991 const SCEV *StrideMinusBETaken = SE->getMinusSCEV(CastedStride, CastedBECount);
2992 // Since TripCount == BackEdgeTakenCount + 1, checking:
2993 // "Stride >= TripCount" is equivalent to checking:
2994 // Stride - MaxBTC> 0
2995 if (SE->isKnownPositive(StrideMinusBETaken)) {
2996 LLVM_DEBUG(
2997 dbgs() << "LAA: Stride>=TripCount; No point in versioning as the "
2998 "Stride==1 predicate will imply that the loop executes "
2999 "at most once.\n");
3000 return;
3001 }
3002 LLVM_DEBUG(dbgs() << "LAA: Found a strided access that we can version.\n");
3003
3004 // Strip back off the integer cast, and check that our result is a
3005 // SCEVUnknown as we expect.
3006 const SCEV *StrideBase = StrideExpr;
3007 if (const auto *C = dyn_cast<SCEVIntegralCastExpr>(StrideBase))
3008 StrideBase = C->getOperand();
3009 SymbolicStrides[Ptr] = cast<SCEVUnknown>(StrideBase);
3010}
3011
3013 const TargetTransformInfo *TTI,
3014 const TargetLibraryInfo *TLI, AAResults *AA,
3015 DominatorTree *DT, LoopInfo *LI)
3016 : PSE(std::make_unique<PredicatedScalarEvolution>(*SE, *L)),
3017 PtrRtChecking(nullptr), TheLoop(L) {
3018 unsigned MaxTargetVectorWidthInBits = std::numeric_limits<unsigned>::max();
3019 if (TTI) {
3020 TypeSize FixedWidth =
3022 if (FixedWidth.isNonZero()) {
3023 // Scale the vector width by 2 as rough estimate to also consider
3024 // interleaving.
3025 MaxTargetVectorWidthInBits = FixedWidth.getFixedValue() * 2;
3026 }
3027
3028 TypeSize ScalableWidth =
3030 if (ScalableWidth.isNonZero())
3031 MaxTargetVectorWidthInBits = std::numeric_limits<unsigned>::max();
3032 }
3033 DepChecker = std::make_unique<MemoryDepChecker>(*PSE, L, SymbolicStrides,
3034 MaxTargetVectorWidthInBits);
3035 PtrRtChecking = std::make_unique<RuntimePointerChecking>(*DepChecker, SE);
3036 if (canAnalyzeLoop())
3037 CanVecMem = analyzeLoop(AA, LI, TLI, DT);
3038}
3039
3041 if (CanVecMem) {
3042 OS.indent(Depth) << "Memory dependences are safe";
3043 const MemoryDepChecker &DC = getDepChecker();
3044 if (!DC.isSafeForAnyVectorWidth())
3045 OS << " with a maximum safe vector width of "
3046 << DC.getMaxSafeVectorWidthInBits() << " bits";
3047 if (PtrRtChecking->Need)
3048 OS << " with run-time checks";
3049 OS << "\n";
3050 }
3051
3052 if (HasConvergentOp)
3053 OS.indent(Depth) << "Has convergent operation in loop\n";
3054
3055 if (Report)
3056 OS.indent(Depth) << "Report: " << Report->getMsg() << "\n";
3057
3058 if (auto *Dependences = DepChecker->getDependences()) {
3059 OS.indent(Depth) << "Dependences:\n";
3060 for (const auto &Dep : *Dependences) {
3061 Dep.print(OS, Depth + 2, DepChecker->getMemoryInstructions());
3062 OS << "\n";
3063 }
3064 } else
3065 OS.indent(Depth) << "Too many dependences, not recorded\n";
3066
3067 // List the pair of accesses need run-time checks to prove independence.
3068 PtrRtChecking->print(OS, Depth);
3069 OS << "\n";
3070
3071 OS.indent(Depth)
3072 << "Non vectorizable stores to invariant address were "
3073 << (HasStoreStoreDependenceInvolvingLoopInvariantAddress ||
3074 HasLoadStoreDependenceInvolvingLoopInvariantAddress
3075 ? ""
3076 : "not ")
3077 << "found in loop.\n";
3078
3079 OS.indent(Depth) << "SCEV assumptions:\n";
3080 PSE->getPredicate().print(OS, Depth);
3081
3082 OS << "\n";
3083
3084 OS.indent(Depth) << "Expressions re-written:\n";
3085 PSE->print(OS, Depth);
3086}
3087
3089 const auto &[It, Inserted] = LoopAccessInfoMap.insert({&L, nullptr});
3090
3091 if (Inserted)
3092 It->second =
3093 std::make_unique<LoopAccessInfo>(&L, &SE, TTI, TLI, &AA, &DT, &LI);
3094
3095 return *It->second;
3096}
3099 // Collect LoopAccessInfo entries that may keep references to IR outside the
3100 // analyzed loop or SCEVs that may have been modified or invalidated. At the
3101 // moment, that is loops requiring memory or SCEV runtime checks, as those cache
3102 // SCEVs, e.g. for pointer expressions.
3103 for (const auto &[L, LAI] : LoopAccessInfoMap) {
3104 if (LAI->getRuntimePointerChecking()->getChecks().empty() &&
3105 LAI->getPSE().getPredicate().isAlwaysTrue())
3106 continue;
3107 ToRemove.push_back(L);
3108 }
3109
3110 for (Loop *L : ToRemove)
3111 LoopAccessInfoMap.erase(L);
3112}
3113
3115 Function &F, const PreservedAnalyses &PA,
3117 // Check whether our analysis is preserved.
3118 auto PAC = PA.getChecker<LoopAccessAnalysis>();
3119 if (!PAC.preserved() && !PAC.preservedSet<AllAnalysesOn<Function>>())
3120 // If not, give up now.
3121 return true;
3122
3123 // Check whether the analyses we depend on became invalid for any reason.
3124 // Skip checking TargetLibraryAnalysis as it is immutable and can't become
3125 // invalid.
3126 return Inv.invalidate<AAManager>(F, PA) ||
3128 Inv.invalidate<LoopAnalysis>(F, PA) ||
3130}
3131
3135 auto &AA = FAM.getResult<AAManager>(F);
3136 auto &DT = FAM.getResult<DominatorTreeAnalysis>(F);
3137 auto &LI = FAM.getResult<LoopAnalysis>(F);
3139 auto &TLI = FAM.getResult<TargetLibraryAnalysis>(F);
3140 return LoopAccessInfoManager(SE, AA, DT, LI, &TTI, &TLI);
3141}
3142
3143AnalysisKey LoopAccessAnalysis::Key;
This file implements a class to represent arbitrary precision integral constant values and operations...
ReachingDefAnalysis InstSet & ToRemove
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
Analysis containing CSE Info
Definition: CSEInfo.cpp:27
This file contains the declarations for the subclasses of Constant, which represent the different fla...
DXIL Resource Access
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
#define LLVM_DEBUG(...)
Definition: Debug.h:106
This file defines the DenseMap class.
uint64_t Size
bool End
Definition: ELF_riscv.cpp:480
Generic implementation of equivalence classes through the use Tarjan's efficient union-find algorithm...
#define DEBUG_TYPE
Hexagon Common GEP
#define _
IRTranslator LLVM IR MI
This header defines various interfaces for pass management in LLVM.
static std::pair< const SCEV *, const SCEV * > getStartAndEndForAccess(const Loop *Lp, const SCEV *PtrExpr, Type *AccessTy, PredicatedScalarEvolution &PSE, DenseMap< std::pair< const SCEV *, Type * >, std::pair< const SCEV *, const SCEV * > > &PointerBounds)
Calculate Start and End points of memory access.
static cl::opt< unsigned > MaxDependences("max-dependences", cl::Hidden, cl::desc("Maximum number of dependences collected by " "loop-access analysis (default = 100)"), cl::init(100))
We collect dependences up to this threshold.
static cl::opt< bool > EnableForwardingConflictDetection("store-to-load-forwarding-conflict-detection", cl::Hidden, cl::desc("Enable conflict detection in loop-access analysis"), cl::init(true))
Enable store-to-load forwarding conflict detection.
static void findForkedSCEVs(ScalarEvolution *SE, const Loop *L, Value *Ptr, SmallVectorImpl< PointerIntPair< const SCEV *, 1, bool > > &ScevList, unsigned Depth)
static bool hasComputableBounds(PredicatedScalarEvolution &PSE, Value *Ptr, const SCEV *PtrScev, Loop *L, bool Assume)
Check whether a pointer can participate in a runtime bounds check.
static cl::opt< unsigned > MemoryCheckMergeThreshold("memory-check-merge-threshold", cl::Hidden, cl::desc("Maximum number of comparisons done when trying to merge " "runtime memory checks. (default = 100)"), cl::init(100))
The maximum iterations used to merge memory checks.
static const SCEV * getStrideFromPointer(Value *Ptr, ScalarEvolution *SE, Loop *Lp)
Get the stride of a pointer access in a loop.
static unsigned getGEPInductionOperand(const GetElementPtrInst *Gep)
Find the operand of the GEP that should be checked for consecutive stores.
static cl::opt< unsigned, true > VectorizationInterleave("force-vector-interleave", cl::Hidden, cl::desc("Sets the vectorization interleave count. " "Zero is autoselect."), cl::location(VectorizerParams::VectorizationInterleave))
static bool isNoWrap(PredicatedScalarEvolution &PSE, const DenseMap< Value *, const SCEV * > &Strides, Value *Ptr, Type *AccessTy, Loop *L, bool Assume)
Check whether a pointer address cannot wrap.
static cl::opt< bool, true > HoistRuntimeChecks("hoist-runtime-checks", cl::Hidden, cl::desc("Hoist inner loop runtime memory checks to outer loop if possible"), cl::location(VectorizerParams::HoistRuntimeChecks), cl::init(true))
static cl::opt< unsigned, true > VectorizationFactor("force-vector-width", cl::Hidden, cl::desc("Sets the SIMD width. Zero is autoselect."), cl::location(VectorizerParams::VectorizationFactor))
static bool isSafeDependenceDistance(const DataLayout &DL, ScalarEvolution &SE, const SCEV &MaxBTC, const SCEV &Dist, uint64_t MaxStride, uint64_t TypeByteSize)
Given a dependence-distance Dist between two memory accesses, that have strides in the same direction...
static cl::opt< unsigned, true > RuntimeMemoryCheckThreshold("runtime-memory-check-threshold", cl::Hidden, cl::desc("When performing memory disambiguation checks at runtime do not " "generate more than this number of comparisons (default = 8)."), cl::location(VectorizerParams::RuntimeMemoryCheckThreshold), cl::init(8))
static void visitPointers(Value *StartPtr, const Loop &InnermostLoop, function_ref< void(Value *)> AddPointer)
static bool isNoWrapAddRec(Value *Ptr, const SCEVAddRecExpr *AR, PredicatedScalarEvolution &PSE, const Loop *L)
Return true if an AddRec pointer Ptr is unsigned non-wrapping, i.e.
static Value * stripGetElementPtr(Value *Ptr, ScalarEvolution *SE, Loop *Lp)
If the argument is a GEP, then returns the operand identified by getGEPInductionOperand.
static bool areStridedAccessesIndependent(uint64_t Distance, uint64_t Stride, uint64_t TypeByteSize)
Check the dependence for two accesses with the same stride Stride.
static const SCEV * getMinFromExprs(const SCEV *I, const SCEV *J, ScalarEvolution *SE)
Compare I and J and return the minimum.
static cl::opt< unsigned > MaxForkedSCEVDepth("max-forked-scev-depth", cl::Hidden, cl::desc("Maximum recursion depth when finding forked SCEVs (default = 5)"), cl::init(5))
static cl::opt< bool > SpeculateUnitStride("laa-speculate-unit-stride", cl::Hidden, cl::desc("Speculate that non-constant strides are unit in LAA"), cl::init(true))
static SmallVector< PointerIntPair< const SCEV *, 1, bool > > findForkedPointer(PredicatedScalarEvolution &PSE, const DenseMap< Value *, const SCEV * > &StridesMap, Value *Ptr, const Loop *L)
static cl::opt< bool > EnableMemAccessVersioning("enable-mem-access-versioning", cl::init(true), cl::Hidden, cl::desc("Enable symbolic stride memory access versioning"))
This enables versioning on the strides of symbolically striding memory accesses in code like the foll...
This header provides classes for managing per-loop analyses.
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
This file provides utility analysis objects describing memory locations.
uint64_t High
#define P(N)
FunctionAnalysisManager FAM
This file defines the PointerIntPair class.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file contains some templates that are useful if you are working with the STL at all.
raw_pwrite_stream & OS
static LLVM_ATTRIBUTE_ALWAYS_INLINE bool CheckType(MVT::SimpleValueType VT, SDValue N, const TargetLowering *TLI, const DataLayout &DL)
This file implements a set that has insertion order iteration characteristics.
This file defines the SmallPtrSet class.
This file defines the SmallSet class.
This file defines the SmallVector class.
static SymbolRef::Type getType(const Symbol *Sym)
Definition: TapiFile.cpp:39
This pass exposes codegen information to IR-level passes.
static const X86InstrFMA3Group Groups[]
A manager for alias analyses.
Class for arbitrary precision integers.
Definition: APInt.h:78
uint64_t getZExtValue() const
Get zero extended value.
Definition: APInt.h:1520
APInt abs() const
Get the absolute value.
Definition: APInt.h:1773
unsigned getBitWidth() const
Return the number of bits in the APInt.
Definition: APInt.h:1468
APInt sextOrTrunc(unsigned width) const
Sign extend or truncate to width.
Definition: APInt.cpp:1015
int64_t getSExtValue() const
Get sign extended value.
Definition: APInt.h:1542
This templated class represents "all analyses that operate over <a particular IR unit>" (e....
Definition: Analysis.h:49
API to communicate dependencies between analyses during invalidation.
Definition: PassManager.h:292
bool invalidate(IRUnitT &IR, const PreservedAnalyses &PA)
Trigger the invalidation of some other analysis pass if not already handled and return whether it was...
Definition: PassManager.h:310
A container for analyses that lazily runs them and caches their results.
Definition: PassManager.h:253
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Definition: PassManager.h:410
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
size_t size() const
size - Get the array size.
Definition: ArrayRef.h:168
bool empty() const
empty - Check if the array is empty.
Definition: ArrayRef.h:163
LLVM Basic Block Representation.
Definition: BasicBlock.h:61
const Function * getParent() const
Return the enclosing method, or null if none.
Definition: BasicBlock.h:219
const DataLayout & getDataLayout() const
Get the data layout of the module this basic block belongs to.
Definition: BasicBlock.cpp:296
This class is a wrapper over an AAResults, and it is intended to be used only when there are no IR ch...
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Definition: InstrTypes.h:1112
@ ICMP_ULE
unsigned less or equal
Definition: InstrTypes.h:699
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:63
A debug info location.
Definition: DebugLoc.h:33
iterator find(const_arg_type_t< KeyT > Val)
Definition: DenseMap.h:156
iterator end()
Definition: DenseMap.h:84
Analysis pass which computes a DominatorTree.
Definition: Dominators.h:279
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition: Dominators.h:162
bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
Definition: Dominators.cpp:122
EquivalenceClasses - This represents a collection of equivalence classes and supports three efficient...
iterator findValue(const ElemTy &V) const
findValue - Return an iterator to the specified value.
iterator insert(const ElemTy &Data)
insert - Insert a new value into the union/find set, ignoring the request if the value already exists...
member_iterator member_end() const
typename std::set< ECValue, ECValueComparator >::const_iterator iterator
iterator* - Provides a way to iterate over all values in the set.
member_iterator member_begin(iterator I) const
member_iterator unionSets(const ElemTy &V1, const ElemTy &V2)
union - Merge the two equivalence sets for the specified values, inserting them if they do not alread...
const ElemTy & getLeaderValue(const ElemTy &V) const
getLeaderValue - Return the leader for the specified value that is in the set.
bool hasOptSize() const
Optimize this function for size (-Os) or minimum size (-Oz).
Definition: Function.h:707
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
Definition: Instructions.h:933
Type * getResultElementType() const
Definition: Instructions.h:995
PointerType * getType() const
Global values are always pointers.
Definition: GlobalValue.h:294
const DataLayout & getDataLayout() const
Get the data layout of the module this instruction belongs to.
Definition: Instruction.cpp:76
Class to represent integer types.
Definition: DerivedTypes.h:42
static IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
Definition: Type.cpp:311
An instruction for reading from memory.
Definition: Instructions.h:176
Value * getPointerOperand()
Definition: Instructions.h:255
static constexpr LocationSize beforeOrAfterPointer()
Any location before or after the base pointer (but still within the underlying object).
This analysis provides dependence information for the memory accesses of a loop.
Result run(Function &F, FunctionAnalysisManager &AM)
bool invalidate(Function &F, const PreservedAnalyses &PA, FunctionAnalysisManager::Invalidator &Inv)
const LoopAccessInfo & getInfo(Loop &L)
Drive the analysis of memory accesses in the loop.
const MemoryDepChecker & getDepChecker() const
the Memory Dependence Checker which can determine the loop-independent and loop-carried dependences b...
bool isInvariant(Value *V) const
Returns true if value V is loop invariant.
void print(raw_ostream &OS, unsigned Depth=0) const
Print the information about the memory accesses in the loop.
static bool blockNeedsPredication(BasicBlock *BB, Loop *TheLoop, DominatorTree *DT)
Return true if the block BB needs to be predicated in order for the loop to be vectorized.
LoopAccessInfo(Loop *L, ScalarEvolution *SE, const TargetTransformInfo *TTI, const TargetLibraryInfo *TLI, AAResults *AA, DominatorTree *DT, LoopInfo *LI)
Analysis pass that exposes the LoopInfo for a function.
Definition: LoopInfo.h:566
bool contains(const LoopT *L) const
Return true if the specified loop is contained within in this loop.
BlockT * getLoopLatch() const
If there is a single latch block for this loop, return it.
bool isInnermost() const
Return true if the loop does not contain any (natural) loops.
unsigned getNumBackEdges() const
Calculate the number of back edges to the loop header.
BlockT * getHeader() const
LoopT * getParentLoop() const
Return the parent loop if it exists or nullptr for top level loops.
Wrapper class to LoopBlocksDFS that provides a standard begin()/end() interface for the DFS reverse p...
Definition: LoopIterator.h:172
Represents a single loop in the control flow graph.
Definition: LoopInfo.h:39
std::string getLocStr() const
Return a string containing the debug location of the loop (file name + line number if present,...
Definition: LoopInfo.cpp:667
bool isAnnotatedParallel() const
Returns true if the loop is annotated parallel.
Definition: LoopInfo.cpp:565
DebugLoc getStartLoc() const
Return the debug location of the start of this loop.
Definition: LoopInfo.cpp:632
Metadata node.
Definition: Metadata.h:1069
ArrayRef< MDOperand > operands() const
Definition: Metadata.h:1428
Tracking metadata reference owned by Metadata.
Definition: Metadata.h:891
This class implements a map that also provides access to all stored values in a deterministic order.
Definition: MapVector.h:36
Checks memory dependences among accesses to the same underlying object to determine whether there vec...
ArrayRef< unsigned > getOrderForAccess(Value *Ptr, bool IsWrite) const
Return the program order indices for the access location (Ptr, IsWrite).
bool isSafeForAnyVectorWidth() const
Return true if the number of elements that are safe to operate on simultaneously is not bounded.
bool areDepsSafe(const DepCandidates &AccessSets, const MemAccessInfoList &CheckDeps)
Check whether the dependencies between the accesses are safe.
const SmallVectorImpl< Instruction * > & getMemoryInstructions() const
The vector of memory access instructions.
const Loop * getInnermostLoop() const
uint64_t getMaxSafeVectorWidthInBits() const
Return the number of elements that are safe to operate on simultaneously, multiplied by the size of t...
bool isSafeForVectorization() const
No memory dependence was encountered that would inhibit vectorization.
const SmallVectorImpl< Dependence > * getDependences() const
Returns the memory dependences.
DenseMap< std::pair< const SCEV *, Type * >, std::pair< const SCEV *, const SCEV * > > & getPointerBounds()
SmallVector< Instruction *, 4 > getInstructionsForAccess(Value *Ptr, bool isWrite) const
Find the set of instructions that read or write via Ptr.
VectorizationSafetyStatus
Type to keep track of the status of the dependence check.
bool shouldRetryWithRuntimeCheck() const
In same cases when the dependency check fails we can still vectorize the loop with a dynamic array ac...
void addAccess(StoreInst *SI)
Register the location (instructions are given increasing numbers) of a write access.
PointerIntPair< Value *, 1, bool > MemAccessInfo
Representation for a specific memory location.
static MemoryLocation get(const LoadInst *LI)
Return a location with information about the memory reference by the given instruction.
LocationSize Size
The maximum size of the location, in address-units, or UnknownSize if the size is not known.
AAMDNodes AATags
The metadata nodes which describes the aliasing of the location (each member is null if that kind of ...
const Value * Ptr
The address of the start of the location.
Root of the metadata hierarchy.
Definition: Metadata.h:62
Diagnostic information for optimization analysis remarks.
An interface layer with SCEV used to manage how we see SCEV expressions for values in the context of ...
void addPredicate(const SCEVPredicate &Pred)
Adds a new predicate.
ScalarEvolution * getSE() const
Returns the ScalarEvolution analysis used.
const SCEVPredicate & getPredicate() const
bool hasNoOverflow(Value *V, SCEVWrapPredicate::IncrementWrapFlags Flags)
Returns true if we've proved that V doesn't wrap by means of a SCEV predicate.
void setNoOverflow(Value *V, SCEVWrapPredicate::IncrementWrapFlags Flags)
Proves that V doesn't overflow by adding SCEV predicate.
void print(raw_ostream &OS, unsigned Depth) const
Print the SCEV mappings done by the Predicated Scalar Evolution.
const SCEVAddRecExpr * getAsAddRec(Value *V)
Attempts to produce an AddRecExpr for V by adding additional SCEV predicates.
const SCEV * getSymbolicMaxBackedgeTakenCount()
Get the (predicated) symbolic max backedge count for the analyzed loop.
const SCEV * getSCEV(Value *V)
Returns the SCEV expression of V, in the context of the current SCEV predicate.
A set of analyses that are preserved following a run of a transformation pass.
Definition: Analysis.h:111
PreservedAnalysisChecker getChecker() const
Build a checker for this PreservedAnalyses and the specified analysis type.
Definition: Analysis.h:264
Holds information about the memory runtime legality checks to verify that a group of pointers do not ...
bool Need
This flag indicates if we need to add the runtime check.
void reset()
Reset the state of the pointer runtime information.
unsigned getNumberOfChecks() const
Returns the number of run-time checks required according to needsChecking.
void printChecks(raw_ostream &OS, const SmallVectorImpl< RuntimePointerCheck > &Checks, unsigned Depth=0) const
Print Checks.
bool needsChecking(const RuntimeCheckingPtrGroup &M, const RuntimeCheckingPtrGroup &N) const
Decide if we need to add a check between two groups of pointers, according to needsChecking.
void print(raw_ostream &OS, unsigned Depth=0) const
Print the list run-time memory checks necessary.
SmallVector< RuntimeCheckingPtrGroup, 2 > CheckingGroups
Holds a partitioning of pointers into "check groups".
void generateChecks(MemoryDepChecker::DepCandidates &DepCands, bool UseDependencies)
Generate the checks and store it.
static bool arePointersInSamePartition(const SmallVectorImpl< int > &PtrToPartition, unsigned PtrIdx1, unsigned PtrIdx2)
Check if pointers are in the same partition.
SmallVector< PointerInfo, 2 > Pointers
Information about the pointers that may require checking.
void insert(Loop *Lp, Value *Ptr, const SCEV *PtrExpr, Type *AccessTy, bool WritePtr, unsigned DepSetId, unsigned ASId, PredicatedScalarEvolution &PSE, bool NeedsFreeze)
Insert a pointer and calculate the start and end SCEVs.
This node represents a polynomial recurrence on the trip count of the specified loop.
const SCEV * getStepRecurrence(ScalarEvolution &SE) const
Constructs and returns the recurrence indicating how much this expression steps by.
bool isAffine() const
Return true if this represents an expression A + B*x where A and B are loop invariant values.
This class represents a constant integer value.
const APInt & getAPInt() const
This is the base class for unary integral cast operator classes.
This node represents multiplication of some number of SCEVs.
NoWrapFlags getNoWrapFlags(NoWrapFlags Mask=NoWrapMask) const
virtual void print(raw_ostream &OS, unsigned Depth=0) const =0
Prints a textual representation of this predicate with an indentation of Depth.
This class represents an analyzed expression in the program.
Type * getType() const
Return the LLVM type of this SCEV expression.
Analysis pass that exposes the ScalarEvolution for a function.
static LoopGuards collect(const Loop *L, ScalarEvolution &SE)
Collect rewrite map for loop guards for loop L, together with flags indicating if NUW and NSW can be ...
The main scalar evolution driver.
bool isKnownNonNegative(const SCEV *S)
Test if the given expression is known to be non-negative.
const SCEV * getNegativeSCEV(const SCEV *V, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap)
Return the SCEV object corresponding to -V.
bool isKnownNonPositive(const SCEV *S)
Test if the given expression is known to be non-positive.
const SCEV * getUMaxExpr(const SCEV *LHS, const SCEV *RHS)
const SCEVPredicate * getEqualPredicate(const SCEV *LHS, const SCEV *RHS)
const SCEV * getConstant(ConstantInt *V)
const SCEV * getSCEV(Value *V)
Return a SCEV expression for the full generality of the specified expression.
const SCEV * getNoopOrSignExtend(const SCEV *V, Type *Ty)
Return a SCEV corresponding to a conversion of the input value to the specified type.
const SCEV * getOne(Type *Ty)
Return a SCEV for the constant 1 of a specific type.
const SCEV * getPtrToIntExpr(const SCEV *Op, Type *Ty)
bool isLoopInvariant(const SCEV *S, const Loop *L)
Return true if the value of the given SCEV is unchanging in the specified loop.
bool isKnownPositive(const SCEV *S)
Test if the given expression is known to be positive.
bool isKnownPredicate(ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS)
Test if the given expression is known to satisfy the condition described by Pred, LHS,...
const SCEV * getZeroExtendExpr(const SCEV *Op, Type *Ty, unsigned Depth=0)
bool isSCEVable(Type *Ty) const
Test if values of the given type are analyzable within the SCEV framework.
Type * getEffectiveSCEVType(Type *Ty) const
Return a type with the same bitwidth as the given type and which represents how SCEV will treat the g...
const SCEV * getUMinExpr(const SCEV *LHS, const SCEV *RHS, bool Sequential=false)
APInt getSignedRangeMin(const SCEV *S)
Determine the min of the signed range for a particular SCEV.
const SCEV * getStoreSizeOfExpr(Type *IntTy, Type *StoreTy)
Return an expression for the store size of StoreTy that is type IntTy.
const SCEV * getMinusSCEV(const SCEV *LHS, const SCEV *RHS, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)
Return LHS-RHS.
const SCEV * getCouldNotCompute()
const SCEV * applyLoopGuards(const SCEV *Expr, const Loop *L)
Try to apply information from loop guards for L to Expr.
const SCEV * getMulExpr(SmallVectorImpl< const SCEV * > &Ops, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)
Get a canonical multiply expression, or something simpler if possible.
const SCEV * getSizeOfExpr(Type *IntTy, TypeSize Size)
Return an expression for a TypeSize.
std::optional< APInt > computeConstantDifference(const SCEV *LHS, const SCEV *RHS)
Compute LHS - RHS and returns the result as an APInt if it is a constant, and std::nullopt if it isn'...
const SCEV * getAddExpr(SmallVectorImpl< const SCEV * > &Ops, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)
Get a canonical add expression, or something simpler if possible.
const SCEV * getTruncateOrSignExtend(const SCEV *V, Type *Ty, unsigned Depth=0)
Return a SCEV corresponding to a conversion of the input value to the specified type.
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
Definition: SmallPtrSet.h:363
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
Definition: SmallPtrSet.h:452
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:384
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
Definition: SmallPtrSet.h:519
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
Definition: SmallSet.h:132
size_type count(const T &V) const
count - Return 1 if the element is in the set, 0 otherwise.
Definition: SmallSet.h:175
std::pair< const_iterator, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
Definition: SmallSet.h:181
bool empty() const
Definition: SmallVector.h:81
size_t size() const
Definition: SmallVector.h:78
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:573
reference emplace_back(ArgTypes &&... Args)
Definition: SmallVector.h:937
void resize(size_type N)
Definition: SmallVector.h:638
void push_back(const T &Elt)
Definition: SmallVector.h:413
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1196
An instruction for storing to memory.
Definition: Instructions.h:292
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:51
Analysis pass providing the TargetTransformInfo.
Analysis pass providing the TargetLibraryInfo.
Provides information about what library functions are available for the current target.
This pass provides access to the codegen interfaces that are needed for IR-level transformations.
TypeSize getRegisterBitWidth(RegisterKind K) const
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
bool isVectorTy() const
True if this is an instance of VectorType.
Definition: Type.h:270
bool isPointerTy() const
True if this is an instance of PointerType.
Definition: Type.h:264
unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
A Use represents the edge between a Value definition and its users.
Definition: Use.h:43
Value * getOperand(unsigned i) const
Definition: User.h:228
unsigned getNumOperands() const
Definition: User.h:250
static SmallVector< VFInfo, 8 > getMappings(const CallInst &CI)
Retrieve all the VFInfo instances associated to the CallInst CI.
Definition: VectorUtils.h:72
LLVM Value Representation.
Definition: Value.h:74
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:255
const Value * stripAndAccumulateConstantOffsets(const DataLayout &DL, APInt &Offset, bool AllowNonInbounds, bool AllowInvariantGroup=false, function_ref< bool(Value &Value, APInt &Offset)> ExternalAnalysis=nullptr) const
Accumulate the constant offset this value has compared to a base pointer.
StringRef getName() const
Return a constant reference to the value's name.
Definition: Value.cpp:309
constexpr ScalarTy getFixedValue() const
Definition: TypeSize.h:202
constexpr bool isNonZero() const
Definition: TypeSize.h:158
An efficient, type-erasing, non-owning reference to a callable.
TypeSize getSequentialElementStride(const DataLayout &DL) const
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition: raw_ostream.h:52
raw_ostream & indent(unsigned NumSpaces)
indent - Insert 'NumSpaces' spaces.
friend const_iterator end(StringRef path)
Get end iterator over path.
Definition: Path.cpp:235
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
bool match(Val *V, const Pattern &P)
Definition: PatternMatch.h:49
is_zero m_Zero()
Match any null constant or a vector with all elements equal to 0.
Definition: PatternMatch.h:612
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:443
LocationClass< Ty > location(Ty &L)
Definition: CommandLine.h:463
DiagnosticInfoOptimizationBase::Argument NV
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Definition: STLExtras.h:329
std::optional< int > getPointersDiff(Type *ElemTyA, Value *PtrA, Type *ElemTyB, Value *PtrB, const DataLayout &DL, ScalarEvolution &SE, bool StrictCheck=false, bool CheckType=true)
Returns the distance between the pointers PtrA and PtrB iff they are compatible and it is possible to...
@ Low
Lower the current thread's priority such that it does not affect foreground tasks significantly.
@ Offset
Definition: DWP.cpp:480
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1739
Intrinsic::ID getVectorIntrinsicIDForCall(const CallInst *CI, const TargetLibraryInfo *TLI)
Returns intrinsic ID for call.
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
Definition: STLExtras.h:2448
unsigned getPointerAddressSpace(const Type *T)
Definition: SPIRVUtils.h:261
std::optional< const MDOperand * > findStringMetadataForLoop(const Loop *TheLoop, StringRef Name)
Find string metadata for loop.
Definition: LoopInfo.cpp:1065
const Value * getLoadStorePointerOperand(const Value *V)
A helper function that returns the pointer operand of a load or store instruction.
const Value * getPointerOperand(const Value *V)
A helper function that returns the pointer operand of a load, store or GEP instruction.
OutputIt transform(R &&Range, OutputIt d_first, UnaryFunction F)
Wrapper function around std::transform to apply a function to a range and store the result elsewhere.
Definition: STLExtras.h:1952
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1746
bool NullPointerIsDefined(const Function *F, unsigned AS=0)
Check whether null pointer dereferencing is considered undefined behavior for a given function or an ...
Definition: Function.cpp:1187
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
bool isPointerTy(const Type *T)
Definition: SPIRVUtils.h:255
std::optional< int64_t > getPtrStride(PredicatedScalarEvolution &PSE, Type *AccessTy, Value *Ptr, const Loop *Lp, const DenseMap< Value *, const SCEV * > &StridesMap=DenseMap< Value *, const SCEV * >(), bool Assume=false, bool ShouldCheckWrap=true)
If the pointer has a constant stride return it in units of the access type size.
bool sortPtrAccesses(ArrayRef< Value * > VL, Type *ElemTy, const DataLayout &DL, ScalarEvolution &SE, SmallVectorImpl< unsigned > &SortedIndices)
Attempt to sort the pointers in VL and return the sorted indices in SortedIndices,...
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
const SCEV * replaceSymbolicStrideSCEV(PredicatedScalarEvolution &PSE, const DenseMap< Value *, const SCEV * > &PtrToStride, Value *Ptr)
Return the SCEV corresponding to a pointer with the symbolic stride replaced with constant one,...
bool isConsecutiveAccess(Value *A, Value *B, const DataLayout &DL, ScalarEvolution &SE, bool CheckType=true)
Returns true if the memory operations A and B are consecutive.
bool isGuaranteedNotToBeUndefOrPoison(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Return true if this function can prove that V does not have undef bits and is never poison.
OutputIt copy(R &&Range, OutputIt Out)
Definition: STLExtras.h:1841
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1766
gep_type_iterator gep_type_begin(const User *GEP)
void getUnderlyingObjects(const Value *V, SmallVectorImpl< const Value * > &Objects, const LoopInfo *LI=nullptr, unsigned MaxLookup=6)
This method is similar to getUnderlyingObject except that it can look through phi and select instruct...
Type * getLoadStoreType(const Value *I)
A helper function that returns the type of a load or store instruction.
Implement std::hash so that hash_code can be used in STL containers.
Definition: BitVector.h:858
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition: BitVector.h:860
#define N
IR Values for the lower and upper bounds of a pointer evolution.
Definition: LoopUtils.cpp:1858
MDNode * Scope
The tag for alias scope specification (used with noalias).
Definition: Metadata.h:783
MDNode * TBAA
The tag for type-based alias analysis.
Definition: Metadata.h:777
MDNode * NoAlias
The tag specifying the noalias scope.
Definition: Metadata.h:786
A special type used by analysis passes to provide an address that identifies that particular analysis...
Definition: Analysis.h:28
Dependece between memory access instructions.
Instruction * getDestination(const MemoryDepChecker &DepChecker) const
Return the destination instruction of the dependence.
DepType Type
The type of the dependence.
bool isPossiblyBackward() const
May be a lexically backward dependence type (includes Unknown).
Instruction * getSource(const MemoryDepChecker &DepChecker) const
Return the source instruction of the dependence.
bool isForward() const
Lexically forward dependence.
bool isBackward() const
Lexically backward dependence.
void print(raw_ostream &OS, unsigned Depth, const SmallVectorImpl< Instruction * > &Instrs) const
Print the dependence.
DepType
The type of the dependence.
static const char * DepName[]
String version of the types.
static VectorizationSafetyStatus isSafeForVectorization(DepType Type)
Dependence types that don't prevent vectorization.
unsigned AddressSpace
Address space of the involved pointers.
bool addPointer(unsigned Index, const RuntimePointerChecking &RtCheck)
Tries to add the pointer recorded in RtCheck at index Index to this pointer checking group.
bool NeedsFreeze
Whether the pointer needs to be frozen after expansion, e.g.
RuntimeCheckingPtrGroup(unsigned Index, const RuntimePointerChecking &RtCheck)
Create a new pointer checking group containing a single pointer, with index Index in RtCheck.
const SCEV * High
The SCEV expression which represents the upper bound of all the pointers in this group.
SmallVector< unsigned, 2 > Members
Indices of all the pointers that constitute this grouping.
const SCEV * Low
The SCEV expression which represents the lower bound of all the pointers in this group.
bool IsWritePtr
Holds the information if this pointer is used for writing to memory.
unsigned DependencySetId
Holds the id of the set of pointers that could be dependent because of a shared underlying object.
unsigned AliasSetId
Holds the id of the disjoint alias set to which this pointer belongs.
static const unsigned MaxVectorWidth
Maximum SIMD width.
static unsigned VectorizationFactor
VF as overridden by the user.
static unsigned RuntimeMemoryCheckThreshold
\When performing memory disambiguation checks at runtime do not make more than this number of compari...
static bool isInterleaveForced()
True if force-vector-interleave was specified by the user.
static unsigned VectorizationInterleave
Interleave factor as overridden by the user.
Function object to check whether the first component of a container supported by std::get (like std::...
Definition: STLExtras.h:1467