LLVM 20.0.0git
LoopAccessAnalysis.cpp
Go to the documentation of this file.
1//===- LoopAccessAnalysis.cpp - Loop Access Analysis Implementation --------==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// The implementation for the loop memory dependence that was originally
10// developed for the loop vectorizer.
11//
12//===----------------------------------------------------------------------===//
13
15#include "llvm/ADT/APInt.h"
16#include "llvm/ADT/DenseMap.h"
19#include "llvm/ADT/STLExtras.h"
20#include "llvm/ADT/SetVector.h"
22#include "llvm/ADT/SmallSet.h"
37#include "llvm/IR/BasicBlock.h"
38#include "llvm/IR/Constants.h"
39#include "llvm/IR/DataLayout.h"
40#include "llvm/IR/DebugLoc.h"
43#include "llvm/IR/Dominators.h"
44#include "llvm/IR/Function.h"
46#include "llvm/IR/InstrTypes.h"
47#include "llvm/IR/Instruction.h"
49#include "llvm/IR/Operator.h"
50#include "llvm/IR/PassManager.h"
52#include "llvm/IR/Type.h"
53#include "llvm/IR/Value.h"
54#include "llvm/IR/ValueHandle.h"
57#include "llvm/Support/Debug.h"
60#include <algorithm>
61#include <cassert>
62#include <cstdint>
63#include <iterator>
64#include <utility>
65#include <variant>
66#include <vector>
67
68using namespace llvm;
69using namespace llvm::PatternMatch;
70
71#define DEBUG_TYPE "loop-accesses"
72
74VectorizationFactor("force-vector-width", cl::Hidden,
75 cl::desc("Sets the SIMD width. Zero is autoselect."),
78
80VectorizationInterleave("force-vector-interleave", cl::Hidden,
81 cl::desc("Sets the vectorization interleave count. "
82 "Zero is autoselect."),
86
88 "runtime-memory-check-threshold", cl::Hidden,
89 cl::desc("When performing memory disambiguation checks at runtime do not "
90 "generate more than this number of comparisons (default = 8)."),
93
94/// The maximum iterations used to merge memory checks
96 "memory-check-merge-threshold", cl::Hidden,
97 cl::desc("Maximum number of comparisons done when trying to merge "
98 "runtime memory checks. (default = 100)"),
99 cl::init(100));
100
101/// Maximum SIMD width.
102const unsigned VectorizerParams::MaxVectorWidth = 64;
103
104/// We collect dependences up to this threshold.
106 MaxDependences("max-dependences", cl::Hidden,
107 cl::desc("Maximum number of dependences collected by "
108 "loop-access analysis (default = 100)"),
109 cl::init(100));
110
111/// This enables versioning on the strides of symbolically striding memory
112/// accesses in code like the following.
113/// for (i = 0; i < N; ++i)
114/// A[i * Stride1] += B[i * Stride2] ...
115///
116/// Will be roughly translated to
117/// if (Stride1 == 1 && Stride2 == 1) {
118/// for (i = 0; i < N; i+=4)
119/// A[i:i+3] += ...
120/// } else
121/// ...
123 "enable-mem-access-versioning", cl::init(true), cl::Hidden,
124 cl::desc("Enable symbolic stride memory access versioning"));
125
126/// Enable store-to-load forwarding conflict detection. This option can
127/// be disabled for correctness testing.
129 "store-to-load-forwarding-conflict-detection", cl::Hidden,
130 cl::desc("Enable conflict detection in loop-access analysis"),
131 cl::init(true));
132
134 "max-forked-scev-depth", cl::Hidden,
135 cl::desc("Maximum recursion depth when finding forked SCEVs (default = 5)"),
136 cl::init(5));
137
139 "laa-speculate-unit-stride", cl::Hidden,
140 cl::desc("Speculate that non-constant strides are unit in LAA"),
141 cl::init(true));
142
144 "hoist-runtime-checks", cl::Hidden,
145 cl::desc(
146 "Hoist inner loop runtime memory checks to outer loop if possible"),
149
151 return ::VectorizationInterleave.getNumOccurrences() > 0;
152}
153
155 const DenseMap<Value *, const SCEV *> &PtrToStride,
156 Value *Ptr) {
157 const SCEV *OrigSCEV = PSE.getSCEV(Ptr);
158
159 // If there is an entry in the map return the SCEV of the pointer with the
160 // symbolic stride replaced by one.
162 if (SI == PtrToStride.end())
163 // For a non-symbolic stride, just return the original expression.
164 return OrigSCEV;
165
166 const SCEV *StrideSCEV = SI->second;
167 // Note: This assert is both overly strong and overly weak. The actual
168 // invariant here is that StrideSCEV should be loop invariant. The only
169 // such invariant strides we happen to speculate right now are unknowns
170 // and thus this is a reasonable proxy of the actual invariant.
171 assert(isa<SCEVUnknown>(StrideSCEV) && "shouldn't be in map");
172
173 ScalarEvolution *SE = PSE.getSE();
174 const auto *CT = SE->getOne(StrideSCEV->getType());
175 PSE.addPredicate(*SE->getEqualPredicate(StrideSCEV, CT));
176 auto *Expr = PSE.getSCEV(Ptr);
177
178 LLVM_DEBUG(dbgs() << "LAA: Replacing SCEV: " << *OrigSCEV
179 << " by: " << *Expr << "\n");
180 return Expr;
181}
182
184 unsigned Index, RuntimePointerChecking &RtCheck)
185 : High(RtCheck.Pointers[Index].End), Low(RtCheck.Pointers[Index].Start),
186 AddressSpace(RtCheck.Pointers[Index]
187 .PointerValue->getType()
189 NeedsFreeze(RtCheck.Pointers[Index].NeedsFreeze) {
191}
192
193/// Calculate Start and End points of memory access.
194/// Let's assume A is the first access and B is a memory access on N-th loop
195/// iteration. Then B is calculated as:
196/// B = A + Step*N .
197/// Step value may be positive or negative.
198/// N is a calculated back-edge taken count:
199/// N = (TripCount > 0) ? RoundDown(TripCount -1 , VF) : 0
200/// Start and End points are calculated in the following way:
201/// Start = UMIN(A, B) ; End = UMAX(A, B) + SizeOfElt,
202/// where SizeOfElt is the size of single memory access in bytes.
203///
204/// There is no conflict when the intervals are disjoint:
205/// NoConflict = (P2.Start >= P1.End) || (P1.Start >= P2.End)
206static std::pair<const SCEV *, const SCEV *> getStartAndEndForAccess(
207 const Loop *Lp, const SCEV *PtrExpr, Type *AccessTy,
209 DenseMap<std::pair<const SCEV *, Type *>,
210 std::pair<const SCEV *, const SCEV *>> &PointerBounds) {
211 ScalarEvolution *SE = PSE.getSE();
212
213 auto [Iter, Ins] = PointerBounds.insert(
214 {{PtrExpr, AccessTy},
215 {SE->getCouldNotCompute(), SE->getCouldNotCompute()}});
216 if (!Ins)
217 return Iter->second;
218
219 const SCEV *ScStart;
220 const SCEV *ScEnd;
221
222 if (SE->isLoopInvariant(PtrExpr, Lp)) {
223 ScStart = ScEnd = PtrExpr;
224 } else if (auto *AR = dyn_cast<SCEVAddRecExpr>(PtrExpr)) {
225 const SCEV *Ex = PSE.getSymbolicMaxBackedgeTakenCount();
226
227 ScStart = AR->getStart();
228 ScEnd = AR->evaluateAtIteration(Ex, *SE);
229 const SCEV *Step = AR->getStepRecurrence(*SE);
230
231 // For expressions with negative step, the upper bound is ScStart and the
232 // lower bound is ScEnd.
233 if (const auto *CStep = dyn_cast<SCEVConstant>(Step)) {
234 if (CStep->getValue()->isNegative())
235 std::swap(ScStart, ScEnd);
236 } else {
237 // Fallback case: the step is not constant, but we can still
238 // get the upper and lower bounds of the interval by using min/max
239 // expressions.
240 ScStart = SE->getUMinExpr(ScStart, ScEnd);
241 ScEnd = SE->getUMaxExpr(AR->getStart(), ScEnd);
242 }
243 } else
244 return {SE->getCouldNotCompute(), SE->getCouldNotCompute()};
245
246 assert(SE->isLoopInvariant(ScStart, Lp) && "ScStart needs to be invariant");
247 assert(SE->isLoopInvariant(ScEnd, Lp)&& "ScEnd needs to be invariant");
248
249 // Add the size of the pointed element to ScEnd.
250 auto &DL = Lp->getHeader()->getDataLayout();
251 Type *IdxTy = DL.getIndexType(PtrExpr->getType());
252 const SCEV *EltSizeSCEV = SE->getStoreSizeOfExpr(IdxTy, AccessTy);
253 ScEnd = SE->getAddExpr(ScEnd, EltSizeSCEV);
254
255 Iter->second = {ScStart, ScEnd};
256 return Iter->second;
257}
258
259/// Calculate Start and End points of memory access using
260/// getStartAndEndForAccess.
262 Type *AccessTy, bool WritePtr,
263 unsigned DepSetId, unsigned ASId,
265 bool NeedsFreeze) {
266 const auto &[ScStart, ScEnd] = getStartAndEndForAccess(
267 Lp, PtrExpr, AccessTy, PSE, DC.getPointerBounds());
268 assert(!isa<SCEVCouldNotCompute>(ScStart) &&
269 !isa<SCEVCouldNotCompute>(ScEnd) &&
270 "must be able to compute both start and end expressions");
271 Pointers.emplace_back(Ptr, ScStart, ScEnd, WritePtr, DepSetId, ASId, PtrExpr,
272 NeedsFreeze);
273}
274
275bool RuntimePointerChecking::tryToCreateDiffCheck(
276 const RuntimeCheckingPtrGroup &CGI, const RuntimeCheckingPtrGroup &CGJ) {
277 // If either group contains multiple different pointers, bail out.
278 // TODO: Support multiple pointers by using the minimum or maximum pointer,
279 // depending on src & sink.
280 if (CGI.Members.size() != 1 || CGJ.Members.size() != 1)
281 return false;
282
283 PointerInfo *Src = &Pointers[CGI.Members[0]];
284 PointerInfo *Sink = &Pointers[CGJ.Members[0]];
285
286 // If either pointer is read and written, multiple checks may be needed. Bail
287 // out.
288 if (!DC.getOrderForAccess(Src->PointerValue, !Src->IsWritePtr).empty() ||
289 !DC.getOrderForAccess(Sink->PointerValue, !Sink->IsWritePtr).empty())
290 return false;
291
292 ArrayRef<unsigned> AccSrc =
293 DC.getOrderForAccess(Src->PointerValue, Src->IsWritePtr);
294 ArrayRef<unsigned> AccSink =
295 DC.getOrderForAccess(Sink->PointerValue, Sink->IsWritePtr);
296 // If either pointer is accessed multiple times, there may not be a clear
297 // src/sink relation. Bail out for now.
298 if (AccSrc.size() != 1 || AccSink.size() != 1)
299 return false;
300
301 // If the sink is accessed before src, swap src/sink.
302 if (AccSink[0] < AccSrc[0])
303 std::swap(Src, Sink);
304
305 auto *SrcAR = dyn_cast<SCEVAddRecExpr>(Src->Expr);
306 auto *SinkAR = dyn_cast<SCEVAddRecExpr>(Sink->Expr);
307 if (!SrcAR || !SinkAR || SrcAR->getLoop() != DC.getInnermostLoop() ||
308 SinkAR->getLoop() != DC.getInnermostLoop())
309 return false;
310
312 DC.getInstructionsForAccess(Src->PointerValue, Src->IsWritePtr);
314 DC.getInstructionsForAccess(Sink->PointerValue, Sink->IsWritePtr);
315 Type *SrcTy = getLoadStoreType(SrcInsts[0]);
316 Type *DstTy = getLoadStoreType(SinkInsts[0]);
317 if (isa<ScalableVectorType>(SrcTy) || isa<ScalableVectorType>(DstTy))
318 return false;
319
320 const DataLayout &DL =
321 SinkAR->getLoop()->getHeader()->getDataLayout();
322 unsigned AllocSize =
323 std::max(DL.getTypeAllocSize(SrcTy), DL.getTypeAllocSize(DstTy));
324
325 // Only matching constant steps matching the AllocSize are supported at the
326 // moment. This simplifies the difference computation. Can be extended in the
327 // future.
328 auto *Step = dyn_cast<SCEVConstant>(SinkAR->getStepRecurrence(*SE));
329 if (!Step || Step != SrcAR->getStepRecurrence(*SE) ||
330 Step->getAPInt().abs() != AllocSize)
331 return false;
332
333 IntegerType *IntTy =
334 IntegerType::get(Src->PointerValue->getContext(),
335 DL.getPointerSizeInBits(CGI.AddressSpace));
336
337 // When counting down, the dependence distance needs to be swapped.
338 if (Step->getValue()->isNegative())
339 std::swap(SinkAR, SrcAR);
340
341 const SCEV *SinkStartInt = SE->getPtrToIntExpr(SinkAR->getStart(), IntTy);
342 const SCEV *SrcStartInt = SE->getPtrToIntExpr(SrcAR->getStart(), IntTy);
343 if (isa<SCEVCouldNotCompute>(SinkStartInt) ||
344 isa<SCEVCouldNotCompute>(SrcStartInt))
345 return false;
346
347 const Loop *InnerLoop = SrcAR->getLoop();
348 // If the start values for both Src and Sink also vary according to an outer
349 // loop, then it's probably better to avoid creating diff checks because
350 // they may not be hoisted. We should instead let llvm::addRuntimeChecks
351 // do the expanded full range overlap checks, which can be hoisted.
352 if (HoistRuntimeChecks && InnerLoop->getParentLoop() &&
353 isa<SCEVAddRecExpr>(SinkStartInt) && isa<SCEVAddRecExpr>(SrcStartInt)) {
354 auto *SrcStartAR = cast<SCEVAddRecExpr>(SrcStartInt);
355 auto *SinkStartAR = cast<SCEVAddRecExpr>(SinkStartInt);
356 const Loop *StartARLoop = SrcStartAR->getLoop();
357 if (StartARLoop == SinkStartAR->getLoop() &&
358 StartARLoop == InnerLoop->getParentLoop() &&
359 // If the diff check would already be loop invariant (due to the
360 // recurrences being the same), then we prefer to keep the diff checks
361 // because they are cheaper.
362 SrcStartAR->getStepRecurrence(*SE) !=
363 SinkStartAR->getStepRecurrence(*SE)) {
364 LLVM_DEBUG(dbgs() << "LAA: Not creating diff runtime check, since these "
365 "cannot be hoisted out of the outer loop\n");
366 return false;
367 }
368 }
369
370 LLVM_DEBUG(dbgs() << "LAA: Creating diff runtime check for:\n"
371 << "SrcStart: " << *SrcStartInt << '\n'
372 << "SinkStartInt: " << *SinkStartInt << '\n');
373 DiffChecks.emplace_back(SrcStartInt, SinkStartInt, AllocSize,
374 Src->NeedsFreeze || Sink->NeedsFreeze);
375 return true;
376}
377
378SmallVector<RuntimePointerCheck, 4> RuntimePointerChecking::generateChecks() {
380
381 for (unsigned I = 0; I < CheckingGroups.size(); ++I) {
382 for (unsigned J = I + 1; J < CheckingGroups.size(); ++J) {
385
386 if (needsChecking(CGI, CGJ)) {
387 CanUseDiffCheck = CanUseDiffCheck && tryToCreateDiffCheck(CGI, CGJ);
388 Checks.push_back(std::make_pair(&CGI, &CGJ));
389 }
390 }
391 }
392 return Checks;
393}
394
395void RuntimePointerChecking::generateChecks(
396 MemoryDepChecker::DepCandidates &DepCands, bool UseDependencies) {
397 assert(Checks.empty() && "Checks is not empty");
398 groupChecks(DepCands, UseDependencies);
399 Checks = generateChecks();
400}
401
403 const RuntimeCheckingPtrGroup &M, const RuntimeCheckingPtrGroup &N) const {
404 for (const auto &I : M.Members)
405 for (const auto &J : N.Members)
406 if (needsChecking(I, J))
407 return true;
408 return false;
409}
410
411/// Compare \p I and \p J and return the minimum.
412/// Return nullptr in case we couldn't find an answer.
413static const SCEV *getMinFromExprs(const SCEV *I, const SCEV *J,
414 ScalarEvolution *SE) {
415 const SCEV *Diff = SE->getMinusSCEV(J, I);
416 const SCEVConstant *C = dyn_cast<const SCEVConstant>(Diff);
417
418 if (!C)
419 return nullptr;
420 return C->getValue()->isNegative() ? J : I;
421}
422
424 RuntimePointerChecking &RtCheck) {
425 return addPointer(
426 Index, RtCheck.Pointers[Index].Start, RtCheck.Pointers[Index].End,
427 RtCheck.Pointers[Index].PointerValue->getType()->getPointerAddressSpace(),
428 RtCheck.Pointers[Index].NeedsFreeze, *RtCheck.SE);
429}
430
432 const SCEV *End, unsigned AS,
433 bool NeedsFreeze,
434 ScalarEvolution &SE) {
435 assert(AddressSpace == AS &&
436 "all pointers in a checking group must be in the same address space");
437
438 // Compare the starts and ends with the known minimum and maximum
439 // of this set. We need to know how we compare against the min/max
440 // of the set in order to be able to emit memchecks.
441 const SCEV *Min0 = getMinFromExprs(Start, Low, &SE);
442 if (!Min0)
443 return false;
444
445 const SCEV *Min1 = getMinFromExprs(End, High, &SE);
446 if (!Min1)
447 return false;
448
449 // Update the low bound expression if we've found a new min value.
450 if (Min0 == Start)
451 Low = Start;
452
453 // Update the high bound expression if we've found a new max value.
454 if (Min1 != End)
455 High = End;
456
458 this->NeedsFreeze |= NeedsFreeze;
459 return true;
460}
461
462void RuntimePointerChecking::groupChecks(
463 MemoryDepChecker::DepCandidates &DepCands, bool UseDependencies) {
464 // We build the groups from dependency candidates equivalence classes
465 // because:
466 // - We know that pointers in the same equivalence class share
467 // the same underlying object and therefore there is a chance
468 // that we can compare pointers
469 // - We wouldn't be able to merge two pointers for which we need
470 // to emit a memcheck. The classes in DepCands are already
471 // conveniently built such that no two pointers in the same
472 // class need checking against each other.
473
474 // We use the following (greedy) algorithm to construct the groups
475 // For every pointer in the equivalence class:
476 // For each existing group:
477 // - if the difference between this pointer and the min/max bounds
478 // of the group is a constant, then make the pointer part of the
479 // group and update the min/max bounds of that group as required.
480
481 CheckingGroups.clear();
482
483 // If we need to check two pointers to the same underlying object
484 // with a non-constant difference, we shouldn't perform any pointer
485 // grouping with those pointers. This is because we can easily get
486 // into cases where the resulting check would return false, even when
487 // the accesses are safe.
488 //
489 // The following example shows this:
490 // for (i = 0; i < 1000; ++i)
491 // a[5000 + i * m] = a[i] + a[i + 9000]
492 //
493 // Here grouping gives a check of (5000, 5000 + 1000 * m) against
494 // (0, 10000) which is always false. However, if m is 1, there is no
495 // dependence. Not grouping the checks for a[i] and a[i + 9000] allows
496 // us to perform an accurate check in this case.
497 //
498 // The above case requires that we have an UnknownDependence between
499 // accesses to the same underlying object. This cannot happen unless
500 // FoundNonConstantDistanceDependence is set, and therefore UseDependencies
501 // is also false. In this case we will use the fallback path and create
502 // separate checking groups for all pointers.
503
504 // If we don't have the dependency partitions, construct a new
505 // checking pointer group for each pointer. This is also required
506 // for correctness, because in this case we can have checking between
507 // pointers to the same underlying object.
508 if (!UseDependencies) {
509 for (unsigned I = 0; I < Pointers.size(); ++I)
510 CheckingGroups.push_back(RuntimeCheckingPtrGroup(I, *this));
511 return;
512 }
513
514 unsigned TotalComparisons = 0;
515
517 for (unsigned Index = 0; Index < Pointers.size(); ++Index) {
518 auto [It, _] = PositionMap.insert({Pointers[Index].PointerValue, {}});
519 It->second.push_back(Index);
520 }
521
522 // We need to keep track of what pointers we've already seen so we
523 // don't process them twice.
525
526 // Go through all equivalence classes, get the "pointer check groups"
527 // and add them to the overall solution. We use the order in which accesses
528 // appear in 'Pointers' to enforce determinism.
529 for (unsigned I = 0; I < Pointers.size(); ++I) {
530 // We've seen this pointer before, and therefore already processed
531 // its equivalence class.
532 if (Seen.count(I))
533 continue;
534
535 MemoryDepChecker::MemAccessInfo Access(Pointers[I].PointerValue,
536 Pointers[I].IsWritePtr);
537
539 auto LeaderI = DepCands.findValue(DepCands.getLeaderValue(Access));
540
541 // Because DepCands is constructed by visiting accesses in the order in
542 // which they appear in alias sets (which is deterministic) and the
543 // iteration order within an equivalence class member is only dependent on
544 // the order in which unions and insertions are performed on the
545 // equivalence class, the iteration order is deterministic.
546 for (auto MI = DepCands.member_begin(LeaderI), ME = DepCands.member_end();
547 MI != ME; ++MI) {
548 auto PointerI = PositionMap.find(MI->getPointer());
549 assert(PointerI != PositionMap.end() &&
550 "pointer in equivalence class not found in PositionMap");
551 for (unsigned Pointer : PointerI->second) {
552 bool Merged = false;
553 // Mark this pointer as seen.
554 Seen.insert(Pointer);
555
556 // Go through all the existing sets and see if we can find one
557 // which can include this pointer.
558 for (RuntimeCheckingPtrGroup &Group : Groups) {
559 // Don't perform more than a certain amount of comparisons.
560 // This should limit the cost of grouping the pointers to something
561 // reasonable. If we do end up hitting this threshold, the algorithm
562 // will create separate groups for all remaining pointers.
563 if (TotalComparisons > MemoryCheckMergeThreshold)
564 break;
565
566 TotalComparisons++;
567
568 if (Group.addPointer(Pointer, *this)) {
569 Merged = true;
570 break;
571 }
572 }
573
574 if (!Merged)
575 // We couldn't add this pointer to any existing set or the threshold
576 // for the number of comparisons has been reached. Create a new group
577 // to hold the current pointer.
578 Groups.push_back(RuntimeCheckingPtrGroup(Pointer, *this));
579 }
580 }
581
582 // We've computed the grouped checks for this partition.
583 // Save the results and continue with the next one.
584 llvm::copy(Groups, std::back_inserter(CheckingGroups));
585 }
586}
587
589 const SmallVectorImpl<int> &PtrToPartition, unsigned PtrIdx1,
590 unsigned PtrIdx2) {
591 return (PtrToPartition[PtrIdx1] != -1 &&
592 PtrToPartition[PtrIdx1] == PtrToPartition[PtrIdx2]);
593}
594
595bool RuntimePointerChecking::needsChecking(unsigned I, unsigned J) const {
596 const PointerInfo &PointerI = Pointers[I];
597 const PointerInfo &PointerJ = Pointers[J];
598
599 // No need to check if two readonly pointers intersect.
600 if (!PointerI.IsWritePtr && !PointerJ.IsWritePtr)
601 return false;
602
603 // Only need to check pointers between two different dependency sets.
604 if (PointerI.DependencySetId == PointerJ.DependencySetId)
605 return false;
606
607 // Only need to check pointers in the same alias set.
608 if (PointerI.AliasSetId != PointerJ.AliasSetId)
609 return false;
610
611 return true;
612}
613
616 unsigned Depth) const {
617 unsigned N = 0;
618 for (const auto &[Check1, Check2] : Checks) {
619 const auto &First = Check1->Members, &Second = Check2->Members;
620
621 OS.indent(Depth) << "Check " << N++ << ":\n";
622
623 OS.indent(Depth + 2) << "Comparing group (" << Check1 << "):\n";
624 for (unsigned K : First)
625 OS.indent(Depth + 2) << *Pointers[K].PointerValue << "\n";
626
627 OS.indent(Depth + 2) << "Against group (" << Check2 << "):\n";
628 for (unsigned K : Second)
629 OS.indent(Depth + 2) << *Pointers[K].PointerValue << "\n";
630 }
631}
632
634
635 OS.indent(Depth) << "Run-time memory checks:\n";
636 printChecks(OS, Checks, Depth);
637
638 OS.indent(Depth) << "Grouped accesses:\n";
639 for (const auto &CG : CheckingGroups) {
640 OS.indent(Depth + 2) << "Group " << &CG << ":\n";
641 OS.indent(Depth + 4) << "(Low: " << *CG.Low << " High: " << *CG.High
642 << ")\n";
643 for (unsigned Member : CG.Members) {
644 OS.indent(Depth + 6) << "Member: " << *Pointers[Member].Expr << "\n";
645 }
646 }
647}
648
649namespace {
650
651/// Analyses memory accesses in a loop.
652///
653/// Checks whether run time pointer checks are needed and builds sets for data
654/// dependence checking.
655class AccessAnalysis {
656public:
657 /// Read or write access location.
658 typedef PointerIntPair<Value *, 1, bool> MemAccessInfo;
659 typedef SmallVector<MemAccessInfo, 8> MemAccessInfoList;
660
661 AccessAnalysis(Loop *TheLoop, AAResults *AA, LoopInfo *LI,
664 SmallPtrSetImpl<MDNode *> &LoopAliasScopes)
665 : TheLoop(TheLoop), BAA(*AA), AST(BAA), LI(LI), DepCands(DA), PSE(PSE),
666 LoopAliasScopes(LoopAliasScopes) {
667 // We're analyzing dependences across loop iterations.
668 BAA.enableCrossIterationMode();
669 }
670
671 /// Register a load and whether it is only read from.
672 void addLoad(MemoryLocation &Loc, Type *AccessTy, bool IsReadOnly) {
673 Value *Ptr = const_cast<Value *>(Loc.Ptr);
674 AST.add(adjustLoc(Loc));
675 Accesses[MemAccessInfo(Ptr, false)].insert(AccessTy);
676 if (IsReadOnly)
677 ReadOnlyPtr.insert(Ptr);
678 }
679
680 /// Register a store.
681 void addStore(MemoryLocation &Loc, Type *AccessTy) {
682 Value *Ptr = const_cast<Value *>(Loc.Ptr);
683 AST.add(adjustLoc(Loc));
684 Accesses[MemAccessInfo(Ptr, true)].insert(AccessTy);
685 }
686
687 /// Check if we can emit a run-time no-alias check for \p Access.
688 ///
689 /// Returns true if we can emit a run-time no alias check for \p Access.
690 /// If we can check this access, this also adds it to a dependence set and
691 /// adds a run-time to check for it to \p RtCheck. If \p Assume is true,
692 /// we will attempt to use additional run-time checks in order to get
693 /// the bounds of the pointer.
694 bool createCheckForAccess(RuntimePointerChecking &RtCheck,
695 MemAccessInfo Access, Type *AccessTy,
696 const DenseMap<Value *, const SCEV *> &Strides,
698 Loop *TheLoop, unsigned &RunningDepId,
699 unsigned ASId, bool ShouldCheckStride, bool Assume);
700
701 /// Check whether we can check the pointers at runtime for
702 /// non-intersection.
703 ///
704 /// Returns true if we need no check or if we do and we can generate them
705 /// (i.e. the pointers have computable bounds).
706 bool canCheckPtrAtRT(RuntimePointerChecking &RtCheck, ScalarEvolution *SE,
707 Loop *TheLoop, const DenseMap<Value *, const SCEV *> &Strides,
708 Value *&UncomputablePtr, bool ShouldCheckWrap = false);
709
710 /// Goes over all memory accesses, checks whether a RT check is needed
711 /// and builds sets of dependent accesses.
712 void buildDependenceSets() {
713 processMemAccesses();
714 }
715
716 /// Initial processing of memory accesses determined that we need to
717 /// perform dependency checking.
718 ///
719 /// Note that this can later be cleared if we retry memcheck analysis without
720 /// dependency checking (i.e. FoundNonConstantDistanceDependence).
721 bool isDependencyCheckNeeded() { return !CheckDeps.empty(); }
722
723 /// We decided that no dependence analysis would be used. Reset the state.
724 void resetDepChecks(MemoryDepChecker &DepChecker) {
725 CheckDeps.clear();
726 DepChecker.clearDependences();
727 }
728
729 MemAccessInfoList &getDependenciesToCheck() { return CheckDeps; }
730
733 return UnderlyingObjects;
734 }
735
736private:
738
739 /// Adjust the MemoryLocation so that it represents accesses to this
740 /// location across all iterations, rather than a single one.
741 MemoryLocation adjustLoc(MemoryLocation Loc) const {
742 // The accessed location varies within the loop, but remains within the
743 // underlying object.
745 Loc.AATags.Scope = adjustAliasScopeList(Loc.AATags.Scope);
746 Loc.AATags.NoAlias = adjustAliasScopeList(Loc.AATags.NoAlias);
747 return Loc;
748 }
749
750 /// Drop alias scopes that are only valid within a single loop iteration.
751 MDNode *adjustAliasScopeList(MDNode *ScopeList) const {
752 if (!ScopeList)
753 return nullptr;
754
755 // For the sake of simplicity, drop the whole scope list if any scope is
756 // iteration-local.
757 if (any_of(ScopeList->operands(), [&](Metadata *Scope) {
758 return LoopAliasScopes.contains(cast<MDNode>(Scope));
759 }))
760 return nullptr;
761
762 return ScopeList;
763 }
764
765 /// Go over all memory access and check whether runtime pointer checks
766 /// are needed and build sets of dependency check candidates.
767 void processMemAccesses();
768
769 /// Map of all accesses. Values are the types used to access memory pointed to
770 /// by the pointer.
771 PtrAccessMap Accesses;
772
773 /// The loop being checked.
774 const Loop *TheLoop;
775
776 /// List of accesses that need a further dependence check.
777 MemAccessInfoList CheckDeps;
778
779 /// Set of pointers that are read only.
780 SmallPtrSet<Value*, 16> ReadOnlyPtr;
781
782 /// Batched alias analysis results.
783 BatchAAResults BAA;
784
785 /// An alias set tracker to partition the access set by underlying object and
786 //intrinsic property (such as TBAA metadata).
787 AliasSetTracker AST;
788
789 /// The LoopInfo of the loop being checked.
790 const LoopInfo *LI;
791
792 /// Sets of potentially dependent accesses - members of one set share an
793 /// underlying pointer. The set "CheckDeps" identfies which sets really need a
794 /// dependence check.
796
797 /// Initial processing of memory accesses determined that we may need
798 /// to add memchecks. Perform the analysis to determine the necessary checks.
799 ///
800 /// Note that, this is different from isDependencyCheckNeeded. When we retry
801 /// memcheck analysis without dependency checking
802 /// (i.e. FoundNonConstantDistanceDependence), isDependencyCheckNeeded is
803 /// cleared while this remains set if we have potentially dependent accesses.
804 bool IsRTCheckAnalysisNeeded = false;
805
806 /// The SCEV predicate containing all the SCEV-related assumptions.
808
810
811 /// Alias scopes that are declared inside the loop, and as such not valid
812 /// across iterations.
813 SmallPtrSetImpl<MDNode *> &LoopAliasScopes;
814};
815
816} // end anonymous namespace
817
818/// Check whether a pointer can participate in a runtime bounds check.
819/// If \p Assume, try harder to prove that we can compute the bounds of \p Ptr
820/// by adding run-time checks (overflow checks) if necessary.
822 const SCEV *PtrScev, Loop *L, bool Assume) {
823 // The bounds for loop-invariant pointer is trivial.
824 if (PSE.getSE()->isLoopInvariant(PtrScev, L))
825 return true;
826
827 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(PtrScev);
828
829 if (!AR && Assume)
830 AR = PSE.getAsAddRec(Ptr);
831
832 if (!AR)
833 return false;
834
835 return AR->isAffine();
836}
837
838/// Check whether a pointer address cannot wrap.
840 const DenseMap<Value *, const SCEV *> &Strides, Value *Ptr, Type *AccessTy,
841 Loop *L) {
842 const SCEV *PtrScev = PSE.getSCEV(Ptr);
843 if (PSE.getSE()->isLoopInvariant(PtrScev, L))
844 return true;
845
846 int64_t Stride = getPtrStride(PSE, AccessTy, Ptr, L, Strides).value_or(0);
847 if (Stride == 1 || PSE.hasNoOverflow(Ptr, SCEVWrapPredicate::IncrementNUSW))
848 return true;
849
850 return false;
851}
852
853static void visitPointers(Value *StartPtr, const Loop &InnermostLoop,
854 function_ref<void(Value *)> AddPointer) {
856 SmallVector<Value *> WorkList;
857 WorkList.push_back(StartPtr);
858
859 while (!WorkList.empty()) {
860 Value *Ptr = WorkList.pop_back_val();
861 if (!Visited.insert(Ptr).second)
862 continue;
863 auto *PN = dyn_cast<PHINode>(Ptr);
864 // SCEV does not look through non-header PHIs inside the loop. Such phis
865 // can be analyzed by adding separate accesses for each incoming pointer
866 // value.
867 if (PN && InnermostLoop.contains(PN->getParent()) &&
868 PN->getParent() != InnermostLoop.getHeader()) {
869 for (const Use &Inc : PN->incoming_values())
870 WorkList.push_back(Inc);
871 } else
872 AddPointer(Ptr);
873 }
874}
875
876// Walk back through the IR for a pointer, looking for a select like the
877// following:
878//
879// %offset = select i1 %cmp, i64 %a, i64 %b
880// %addr = getelementptr double, double* %base, i64 %offset
881// %ld = load double, double* %addr, align 8
882//
883// We won't be able to form a single SCEVAddRecExpr from this since the
884// address for each loop iteration depends on %cmp. We could potentially
885// produce multiple valid SCEVAddRecExprs, though, and check all of them for
886// memory safety/aliasing if needed.
887//
888// If we encounter some IR we don't yet handle, or something obviously fine
889// like a constant, then we just add the SCEV for that term to the list passed
890// in by the caller. If we have a node that may potentially yield a valid
891// SCEVAddRecExpr then we decompose it into parts and build the SCEV terms
892// ourselves before adding to the list.
893static void findForkedSCEVs(
894 ScalarEvolution *SE, const Loop *L, Value *Ptr,
896 unsigned Depth) {
897 // If our Value is a SCEVAddRecExpr, loop invariant, not an instruction, or
898 // we've exceeded our limit on recursion, just return whatever we have
899 // regardless of whether it can be used for a forked pointer or not, along
900 // with an indication of whether it might be a poison or undef value.
901 const SCEV *Scev = SE->getSCEV(Ptr);
902 if (isa<SCEVAddRecExpr>(Scev) || L->isLoopInvariant(Ptr) ||
903 !isa<Instruction>(Ptr) || Depth == 0) {
904 ScevList.emplace_back(Scev, !isGuaranteedNotToBeUndefOrPoison(Ptr));
905 return;
906 }
907
908 Depth--;
909
910 auto UndefPoisonCheck = [](PointerIntPair<const SCEV *, 1, bool> S) {
911 return get<1>(S);
912 };
913
914 auto GetBinOpExpr = [&SE](unsigned Opcode, const SCEV *L, const SCEV *R) {
915 switch (Opcode) {
916 case Instruction::Add:
917 return SE->getAddExpr(L, R);
918 case Instruction::Sub:
919 return SE->getMinusSCEV(L, R);
920 default:
921 llvm_unreachable("Unexpected binary operator when walking ForkedPtrs");
922 }
923 };
924
925 Instruction *I = cast<Instruction>(Ptr);
926 unsigned Opcode = I->getOpcode();
927 switch (Opcode) {
928 case Instruction::GetElementPtr: {
929 GetElementPtrInst *GEP = cast<GetElementPtrInst>(I);
930 Type *SourceTy = GEP->getSourceElementType();
931 // We only handle base + single offset GEPs here for now.
932 // Not dealing with preexisting gathers yet, so no vectors.
933 if (I->getNumOperands() != 2 || SourceTy->isVectorTy()) {
934 ScevList.emplace_back(Scev, !isGuaranteedNotToBeUndefOrPoison(GEP));
935 break;
936 }
939 findForkedSCEVs(SE, L, I->getOperand(0), BaseScevs, Depth);
940 findForkedSCEVs(SE, L, I->getOperand(1), OffsetScevs, Depth);
941
942 // See if we need to freeze our fork...
943 bool NeedsFreeze = any_of(BaseScevs, UndefPoisonCheck) ||
944 any_of(OffsetScevs, UndefPoisonCheck);
945
946 // Check that we only have a single fork, on either the base or the offset.
947 // Copy the SCEV across for the one without a fork in order to generate
948 // the full SCEV for both sides of the GEP.
949 if (OffsetScevs.size() == 2 && BaseScevs.size() == 1)
950 BaseScevs.push_back(BaseScevs[0]);
951 else if (BaseScevs.size() == 2 && OffsetScevs.size() == 1)
952 OffsetScevs.push_back(OffsetScevs[0]);
953 else {
954 ScevList.emplace_back(Scev, NeedsFreeze);
955 break;
956 }
957
958 // Find the pointer type we need to extend to.
959 Type *IntPtrTy = SE->getEffectiveSCEVType(
960 SE->getSCEV(GEP->getPointerOperand())->getType());
961
962 // Find the size of the type being pointed to. We only have a single
963 // index term (guarded above) so we don't need to index into arrays or
964 // structures, just get the size of the scalar value.
965 const SCEV *Size = SE->getSizeOfExpr(IntPtrTy, SourceTy);
966
967 // Scale up the offsets by the size of the type, then add to the bases.
968 const SCEV *Scaled1 = SE->getMulExpr(
969 Size, SE->getTruncateOrSignExtend(get<0>(OffsetScevs[0]), IntPtrTy));
970 const SCEV *Scaled2 = SE->getMulExpr(
971 Size, SE->getTruncateOrSignExtend(get<0>(OffsetScevs[1]), IntPtrTy));
972 ScevList.emplace_back(SE->getAddExpr(get<0>(BaseScevs[0]), Scaled1),
973 NeedsFreeze);
974 ScevList.emplace_back(SE->getAddExpr(get<0>(BaseScevs[1]), Scaled2),
975 NeedsFreeze);
976 break;
977 }
978 case Instruction::Select: {
980 // A select means we've found a forked pointer, but we currently only
981 // support a single select per pointer so if there's another behind this
982 // then we just bail out and return the generic SCEV.
983 findForkedSCEVs(SE, L, I->getOperand(1), ChildScevs, Depth);
984 findForkedSCEVs(SE, L, I->getOperand(2), ChildScevs, Depth);
985 if (ChildScevs.size() == 2) {
986 ScevList.push_back(ChildScevs[0]);
987 ScevList.push_back(ChildScevs[1]);
988 } else
989 ScevList.emplace_back(Scev, !isGuaranteedNotToBeUndefOrPoison(Ptr));
990 break;
991 }
992 case Instruction::PHI: {
994 // A phi means we've found a forked pointer, but we currently only
995 // support a single phi per pointer so if there's another behind this
996 // then we just bail out and return the generic SCEV.
997 if (I->getNumOperands() == 2) {
998 findForkedSCEVs(SE, L, I->getOperand(0), ChildScevs, Depth);
999 findForkedSCEVs(SE, L, I->getOperand(1), ChildScevs, Depth);
1000 }
1001 if (ChildScevs.size() == 2) {
1002 ScevList.push_back(ChildScevs[0]);
1003 ScevList.push_back(ChildScevs[1]);
1004 } else
1005 ScevList.emplace_back(Scev, !isGuaranteedNotToBeUndefOrPoison(Ptr));
1006 break;
1007 }
1008 case Instruction::Add:
1009 case Instruction::Sub: {
1012 findForkedSCEVs(SE, L, I->getOperand(0), LScevs, Depth);
1013 findForkedSCEVs(SE, L, I->getOperand(1), RScevs, Depth);
1014
1015 // See if we need to freeze our fork...
1016 bool NeedsFreeze =
1017 any_of(LScevs, UndefPoisonCheck) || any_of(RScevs, UndefPoisonCheck);
1018
1019 // Check that we only have a single fork, on either the left or right side.
1020 // Copy the SCEV across for the one without a fork in order to generate
1021 // the full SCEV for both sides of the BinOp.
1022 if (LScevs.size() == 2 && RScevs.size() == 1)
1023 RScevs.push_back(RScevs[0]);
1024 else if (RScevs.size() == 2 && LScevs.size() == 1)
1025 LScevs.push_back(LScevs[0]);
1026 else {
1027 ScevList.emplace_back(Scev, NeedsFreeze);
1028 break;
1029 }
1030
1031 ScevList.emplace_back(
1032 GetBinOpExpr(Opcode, get<0>(LScevs[0]), get<0>(RScevs[0])),
1033 NeedsFreeze);
1034 ScevList.emplace_back(
1035 GetBinOpExpr(Opcode, get<0>(LScevs[1]), get<0>(RScevs[1])),
1036 NeedsFreeze);
1037 break;
1038 }
1039 default:
1040 // Just return the current SCEV if we haven't handled the instruction yet.
1041 LLVM_DEBUG(dbgs() << "ForkedPtr unhandled instruction: " << *I << "\n");
1042 ScevList.emplace_back(Scev, !isGuaranteedNotToBeUndefOrPoison(Ptr));
1043 break;
1044 }
1045}
1046
1049 const DenseMap<Value *, const SCEV *> &StridesMap, Value *Ptr,
1050 const Loop *L) {
1051 ScalarEvolution *SE = PSE.getSE();
1052 assert(SE->isSCEVable(Ptr->getType()) && "Value is not SCEVable!");
1054 findForkedSCEVs(SE, L, Ptr, Scevs, MaxForkedSCEVDepth);
1055
1056 // For now, we will only accept a forked pointer with two possible SCEVs
1057 // that are either SCEVAddRecExprs or loop invariant.
1058 if (Scevs.size() == 2 &&
1059 (isa<SCEVAddRecExpr>(get<0>(Scevs[0])) ||
1060 SE->isLoopInvariant(get<0>(Scevs[0]), L)) &&
1061 (isa<SCEVAddRecExpr>(get<0>(Scevs[1])) ||
1062 SE->isLoopInvariant(get<0>(Scevs[1]), L))) {
1063 LLVM_DEBUG(dbgs() << "LAA: Found forked pointer: " << *Ptr << "\n");
1064 LLVM_DEBUG(dbgs() << "\t(1) " << *get<0>(Scevs[0]) << "\n");
1065 LLVM_DEBUG(dbgs() << "\t(2) " << *get<0>(Scevs[1]) << "\n");
1066 return Scevs;
1067 }
1068
1069 return {{replaceSymbolicStrideSCEV(PSE, StridesMap, Ptr), false}};
1070}
1071
1072bool AccessAnalysis::createCheckForAccess(RuntimePointerChecking &RtCheck,
1073 MemAccessInfo Access, Type *AccessTy,
1074 const DenseMap<Value *, const SCEV *> &StridesMap,
1076 Loop *TheLoop, unsigned &RunningDepId,
1077 unsigned ASId, bool ShouldCheckWrap,
1078 bool Assume) {
1079 Value *Ptr = Access.getPointer();
1080
1082 findForkedPointer(PSE, StridesMap, Ptr, TheLoop);
1083
1084 for (auto &P : TranslatedPtrs) {
1085 const SCEV *PtrExpr = get<0>(P);
1086 if (!hasComputableBounds(PSE, Ptr, PtrExpr, TheLoop, Assume))
1087 return false;
1088
1089 // When we run after a failing dependency check we have to make sure
1090 // we don't have wrapping pointers.
1091 if (ShouldCheckWrap) {
1092 // Skip wrap checking when translating pointers.
1093 if (TranslatedPtrs.size() > 1)
1094 return false;
1095
1096 if (!isNoWrap(PSE, StridesMap, Ptr, AccessTy, TheLoop)) {
1097 auto *Expr = PSE.getSCEV(Ptr);
1098 if (!Assume || !isa<SCEVAddRecExpr>(Expr))
1099 return false;
1101 }
1102 }
1103 // If there's only one option for Ptr, look it up after bounds and wrap
1104 // checking, because assumptions might have been added to PSE.
1105 if (TranslatedPtrs.size() == 1)
1106 TranslatedPtrs[0] = {replaceSymbolicStrideSCEV(PSE, StridesMap, Ptr),
1107 false};
1108 }
1109
1110 for (auto [PtrExpr, NeedsFreeze] : TranslatedPtrs) {
1111 // The id of the dependence set.
1112 unsigned DepId;
1113
1114 if (isDependencyCheckNeeded()) {
1115 Value *Leader = DepCands.getLeaderValue(Access).getPointer();
1116 unsigned &LeaderId = DepSetId[Leader];
1117 if (!LeaderId)
1118 LeaderId = RunningDepId++;
1119 DepId = LeaderId;
1120 } else
1121 // Each access has its own dependence set.
1122 DepId = RunningDepId++;
1123
1124 bool IsWrite = Access.getInt();
1125 RtCheck.insert(TheLoop, Ptr, PtrExpr, AccessTy, IsWrite, DepId, ASId, PSE,
1126 NeedsFreeze);
1127 LLVM_DEBUG(dbgs() << "LAA: Found a runtime check ptr:" << *Ptr << '\n');
1128 }
1129
1130 return true;
1131}
1132
1133bool AccessAnalysis::canCheckPtrAtRT(RuntimePointerChecking &RtCheck,
1134 ScalarEvolution *SE, Loop *TheLoop,
1135 const DenseMap<Value *, const SCEV *> &StridesMap,
1136 Value *&UncomputablePtr, bool ShouldCheckWrap) {
1137 // Find pointers with computable bounds. We are going to use this information
1138 // to place a runtime bound check.
1139 bool CanDoRT = true;
1140
1141 bool MayNeedRTCheck = false;
1142 if (!IsRTCheckAnalysisNeeded) return true;
1143
1144 bool IsDepCheckNeeded = isDependencyCheckNeeded();
1145
1146 // We assign a consecutive id to access from different alias sets.
1147 // Accesses between different groups doesn't need to be checked.
1148 unsigned ASId = 0;
1149 for (auto &AS : AST) {
1150 int NumReadPtrChecks = 0;
1151 int NumWritePtrChecks = 0;
1152 bool CanDoAliasSetRT = true;
1153 ++ASId;
1154 auto ASPointers = AS.getPointers();
1155
1156 // We assign consecutive id to access from different dependence sets.
1157 // Accesses within the same set don't need a runtime check.
1158 unsigned RunningDepId = 1;
1160
1162
1163 // First, count how many write and read accesses are in the alias set. Also
1164 // collect MemAccessInfos for later.
1166 for (const Value *ConstPtr : ASPointers) {
1167 Value *Ptr = const_cast<Value *>(ConstPtr);
1168 bool IsWrite = Accesses.count(MemAccessInfo(Ptr, true));
1169 if (IsWrite)
1170 ++NumWritePtrChecks;
1171 else
1172 ++NumReadPtrChecks;
1173 AccessInfos.emplace_back(Ptr, IsWrite);
1174 }
1175
1176 // We do not need runtime checks for this alias set, if there are no writes
1177 // or a single write and no reads.
1178 if (NumWritePtrChecks == 0 ||
1179 (NumWritePtrChecks == 1 && NumReadPtrChecks == 0)) {
1180 assert((ASPointers.size() <= 1 ||
1181 all_of(ASPointers,
1182 [this](const Value *Ptr) {
1183 MemAccessInfo AccessWrite(const_cast<Value *>(Ptr),
1184 true);
1185 return DepCands.findValue(AccessWrite) == DepCands.end();
1186 })) &&
1187 "Can only skip updating CanDoRT below, if all entries in AS "
1188 "are reads or there is at most 1 entry");
1189 continue;
1190 }
1191
1192 for (auto &Access : AccessInfos) {
1193 for (const auto &AccessTy : Accesses[Access]) {
1194 if (!createCheckForAccess(RtCheck, Access, AccessTy, StridesMap,
1195 DepSetId, TheLoop, RunningDepId, ASId,
1196 ShouldCheckWrap, false)) {
1197 LLVM_DEBUG(dbgs() << "LAA: Can't find bounds for ptr:"
1198 << *Access.getPointer() << '\n');
1199 Retries.push_back({Access, AccessTy});
1200 CanDoAliasSetRT = false;
1201 }
1202 }
1203 }
1204
1205 // Note that this function computes CanDoRT and MayNeedRTCheck
1206 // independently. For example CanDoRT=false, MayNeedRTCheck=false means that
1207 // we have a pointer for which we couldn't find the bounds but we don't
1208 // actually need to emit any checks so it does not matter.
1209 //
1210 // We need runtime checks for this alias set, if there are at least 2
1211 // dependence sets (in which case RunningDepId > 2) or if we need to re-try
1212 // any bound checks (because in that case the number of dependence sets is
1213 // incomplete).
1214 bool NeedsAliasSetRTCheck = RunningDepId > 2 || !Retries.empty();
1215
1216 // We need to perform run-time alias checks, but some pointers had bounds
1217 // that couldn't be checked.
1218 if (NeedsAliasSetRTCheck && !CanDoAliasSetRT) {
1219 // Reset the CanDoSetRt flag and retry all accesses that have failed.
1220 // We know that we need these checks, so we can now be more aggressive
1221 // and add further checks if required (overflow checks).
1222 CanDoAliasSetRT = true;
1223 for (const auto &[Access, AccessTy] : Retries) {
1224 if (!createCheckForAccess(RtCheck, Access, AccessTy, StridesMap,
1225 DepSetId, TheLoop, RunningDepId, ASId,
1226 ShouldCheckWrap, /*Assume=*/true)) {
1227 CanDoAliasSetRT = false;
1228 UncomputablePtr = Access.getPointer();
1229 break;
1230 }
1231 }
1232 }
1233
1234 CanDoRT &= CanDoAliasSetRT;
1235 MayNeedRTCheck |= NeedsAliasSetRTCheck;
1236 ++ASId;
1237 }
1238
1239 // If the pointers that we would use for the bounds comparison have different
1240 // address spaces, assume the values aren't directly comparable, so we can't
1241 // use them for the runtime check. We also have to assume they could
1242 // overlap. In the future there should be metadata for whether address spaces
1243 // are disjoint.
1244 unsigned NumPointers = RtCheck.Pointers.size();
1245 for (unsigned i = 0; i < NumPointers; ++i) {
1246 for (unsigned j = i + 1; j < NumPointers; ++j) {
1247 // Only need to check pointers between two different dependency sets.
1248 if (RtCheck.Pointers[i].DependencySetId ==
1249 RtCheck.Pointers[j].DependencySetId)
1250 continue;
1251 // Only need to check pointers in the same alias set.
1252 if (RtCheck.Pointers[i].AliasSetId != RtCheck.Pointers[j].AliasSetId)
1253 continue;
1254
1255 Value *PtrI = RtCheck.Pointers[i].PointerValue;
1256 Value *PtrJ = RtCheck.Pointers[j].PointerValue;
1257
1258 unsigned ASi = PtrI->getType()->getPointerAddressSpace();
1259 unsigned ASj = PtrJ->getType()->getPointerAddressSpace();
1260 if (ASi != ASj) {
1261 LLVM_DEBUG(
1262 dbgs() << "LAA: Runtime check would require comparison between"
1263 " different address spaces\n");
1264 return false;
1265 }
1266 }
1267 }
1268
1269 if (MayNeedRTCheck && CanDoRT)
1270 RtCheck.generateChecks(DepCands, IsDepCheckNeeded);
1271
1272 LLVM_DEBUG(dbgs() << "LAA: We need to do " << RtCheck.getNumberOfChecks()
1273 << " pointer comparisons.\n");
1274
1275 // If we can do run-time checks, but there are no checks, no runtime checks
1276 // are needed. This can happen when all pointers point to the same underlying
1277 // object for example.
1278 RtCheck.Need = CanDoRT ? RtCheck.getNumberOfChecks() != 0 : MayNeedRTCheck;
1279
1280 bool CanDoRTIfNeeded = !RtCheck.Need || CanDoRT;
1281 if (!CanDoRTIfNeeded)
1282 RtCheck.reset();
1283 return CanDoRTIfNeeded;
1284}
1285
1286void AccessAnalysis::processMemAccesses() {
1287 // We process the set twice: first we process read-write pointers, last we
1288 // process read-only pointers. This allows us to skip dependence tests for
1289 // read-only pointers.
1290
1291 LLVM_DEBUG(dbgs() << "LAA: Processing memory accesses...\n");
1292 LLVM_DEBUG(dbgs() << " AST: "; AST.dump());
1293 LLVM_DEBUG(dbgs() << "LAA: Accesses(" << Accesses.size() << "):\n");
1294 LLVM_DEBUG({
1295 for (const auto &[A, _] : Accesses)
1296 dbgs() << "\t" << *A.getPointer() << " ("
1297 << (A.getInt() ? "write"
1298 : (ReadOnlyPtr.count(A.getPointer()) ? "read-only"
1299 : "read"))
1300 << ")\n";
1301 });
1302
1303 // The AliasSetTracker has nicely partitioned our pointers by metadata
1304 // compatibility and potential for underlying-object overlap. As a result, we
1305 // only need to check for potential pointer dependencies within each alias
1306 // set.
1307 for (const auto &AS : AST) {
1308 // Note that both the alias-set tracker and the alias sets themselves used
1309 // ordered collections internally and so the iteration order here is
1310 // deterministic.
1311 auto ASPointers = AS.getPointers();
1312
1313 bool SetHasWrite = false;
1314
1315 // Map of pointers to last access encountered.
1316 typedef DenseMap<const Value*, MemAccessInfo> UnderlyingObjToAccessMap;
1317 UnderlyingObjToAccessMap ObjToLastAccess;
1318
1319 // Set of access to check after all writes have been processed.
1320 PtrAccessMap DeferredAccesses;
1321
1322 // Iterate over each alias set twice, once to process read/write pointers,
1323 // and then to process read-only pointers.
1324 for (int SetIteration = 0; SetIteration < 2; ++SetIteration) {
1325 bool UseDeferred = SetIteration > 0;
1326 PtrAccessMap &S = UseDeferred ? DeferredAccesses : Accesses;
1327
1328 for (const Value *ConstPtr : ASPointers) {
1329 Value *Ptr = const_cast<Value *>(ConstPtr);
1330
1331 // For a single memory access in AliasSetTracker, Accesses may contain
1332 // both read and write, and they both need to be handled for CheckDeps.
1333 for (const auto &[AC, _] : S) {
1334 if (AC.getPointer() != Ptr)
1335 continue;
1336
1337 bool IsWrite = AC.getInt();
1338
1339 // If we're using the deferred access set, then it contains only
1340 // reads.
1341 bool IsReadOnlyPtr = ReadOnlyPtr.count(Ptr) && !IsWrite;
1342 if (UseDeferred && !IsReadOnlyPtr)
1343 continue;
1344 // Otherwise, the pointer must be in the PtrAccessSet, either as a
1345 // read or a write.
1346 assert(((IsReadOnlyPtr && UseDeferred) || IsWrite ||
1347 S.count(MemAccessInfo(Ptr, false))) &&
1348 "Alias-set pointer not in the access set?");
1349
1350 MemAccessInfo Access(Ptr, IsWrite);
1351 DepCands.insert(Access);
1352
1353 // Memorize read-only pointers for later processing and skip them in
1354 // the first round (they need to be checked after we have seen all
1355 // write pointers). Note: we also mark pointer that are not
1356 // consecutive as "read-only" pointers (so that we check
1357 // "a[b[i]] +="). Hence, we need the second check for "!IsWrite".
1358 if (!UseDeferred && IsReadOnlyPtr) {
1359 // We only use the pointer keys, the types vector values don't
1360 // matter.
1361 DeferredAccesses.insert({Access, {}});
1362 continue;
1363 }
1364
1365 // If this is a write - check other reads and writes for conflicts. If
1366 // this is a read only check other writes for conflicts (but only if
1367 // there is no other write to the ptr - this is an optimization to
1368 // catch "a[i] = a[i] + " without having to do a dependence check).
1369 if ((IsWrite || IsReadOnlyPtr) && SetHasWrite) {
1370 CheckDeps.push_back(Access);
1371 IsRTCheckAnalysisNeeded = true;
1372 }
1373
1374 if (IsWrite)
1375 SetHasWrite = true;
1376
1377 // Create sets of pointers connected by a shared alias set and
1378 // underlying object.
1379 typedef SmallVector<const Value *, 16> ValueVector;
1380 ValueVector TempObjects;
1381
1382 UnderlyingObjects[Ptr] = {};
1383 SmallVector<const Value *, 16> &UOs = UnderlyingObjects[Ptr];
1384 ::getUnderlyingObjects(Ptr, UOs, LI);
1386 << "Underlying objects for pointer " << *Ptr << "\n");
1387 for (const Value *UnderlyingObj : UOs) {
1388 // nullptr never alias, don't join sets for pointer that have "null"
1389 // in their UnderlyingObjects list.
1390 if (isa<ConstantPointerNull>(UnderlyingObj) &&
1392 TheLoop->getHeader()->getParent(),
1393 UnderlyingObj->getType()->getPointerAddressSpace()))
1394 continue;
1395
1396 UnderlyingObjToAccessMap::iterator Prev =
1397 ObjToLastAccess.find(UnderlyingObj);
1398 if (Prev != ObjToLastAccess.end())
1399 DepCands.unionSets(Access, Prev->second);
1400
1401 ObjToLastAccess[UnderlyingObj] = Access;
1402 LLVM_DEBUG(dbgs() << " " << *UnderlyingObj << "\n");
1403 }
1404 }
1405 }
1406 }
1407 }
1408}
1409
1410/// Return true if an AddRec pointer \p Ptr is unsigned non-wrapping,
1411/// i.e. monotonically increasing/decreasing.
1412static bool isNoWrapAddRec(Value *Ptr, const SCEVAddRecExpr *AR,
1413 PredicatedScalarEvolution &PSE, const Loop *L) {
1414
1415 // FIXME: This should probably only return true for NUW.
1417 return true;
1418
1420 return true;
1421
1422 // Scalar evolution does not propagate the non-wrapping flags to values that
1423 // are derived from a non-wrapping induction variable because non-wrapping
1424 // could be flow-sensitive.
1425 //
1426 // Look through the potentially overflowing instruction to try to prove
1427 // non-wrapping for the *specific* value of Ptr.
1428
1429 // The arithmetic implied by an inbounds GEP can't overflow.
1430 auto *GEP = dyn_cast<GetElementPtrInst>(Ptr);
1431 if (!GEP || !GEP->isInBounds())
1432 return false;
1433
1434 // Make sure there is only one non-const index and analyze that.
1435 Value *NonConstIndex = nullptr;
1436 for (Value *Index : GEP->indices())
1437 if (!isa<ConstantInt>(Index)) {
1438 if (NonConstIndex)
1439 return false;
1440 NonConstIndex = Index;
1441 }
1442 if (!NonConstIndex)
1443 // The recurrence is on the pointer, ignore for now.
1444 return false;
1445
1446 // The index in GEP is signed. It is non-wrapping if it's derived from a NSW
1447 // AddRec using a NSW operation.
1448 if (auto *OBO = dyn_cast<OverflowingBinaryOperator>(NonConstIndex))
1449 if (OBO->hasNoSignedWrap() &&
1450 // Assume constant for other the operand so that the AddRec can be
1451 // easily found.
1452 isa<ConstantInt>(OBO->getOperand(1))) {
1453 auto *OpScev = PSE.getSCEV(OBO->getOperand(0));
1454
1455 if (auto *OpAR = dyn_cast<SCEVAddRecExpr>(OpScev))
1456 return OpAR->getLoop() == L && OpAR->getNoWrapFlags(SCEV::FlagNSW);
1457 }
1458
1459 return false;
1460}
1461
1462/// Check whether the access through \p Ptr has a constant stride.
1464 Type *AccessTy, Value *Ptr,
1465 const Loop *Lp,
1466 const DenseMap<Value *, const SCEV *> &StridesMap,
1467 bool Assume, bool ShouldCheckWrap) {
1468 Type *Ty = Ptr->getType();
1469 assert(Ty->isPointerTy() && "Unexpected non-ptr");
1470
1471 if (isa<ScalableVectorType>(AccessTy)) {
1472 LLVM_DEBUG(dbgs() << "LAA: Bad stride - Scalable object: " << *AccessTy
1473 << "\n");
1474 return std::nullopt;
1475 }
1476
1477 const SCEV *PtrScev = replaceSymbolicStrideSCEV(PSE, StridesMap, Ptr);
1478
1479 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(PtrScev);
1480 if (Assume && !AR)
1481 AR = PSE.getAsAddRec(Ptr);
1482
1483 if (!AR) {
1484 LLVM_DEBUG(dbgs() << "LAA: Bad stride - Not an AddRecExpr pointer " << *Ptr
1485 << " SCEV: " << *PtrScev << "\n");
1486 return std::nullopt;
1487 }
1488
1489 // The access function must stride over the innermost loop.
1490 if (Lp != AR->getLoop()) {
1491 LLVM_DEBUG(dbgs() << "LAA: Bad stride - Not striding over innermost loop "
1492 << *Ptr << " SCEV: " << *AR << "\n");
1493 return std::nullopt;
1494 }
1495
1496 // Check the step is constant.
1497 const SCEV *Step = AR->getStepRecurrence(*PSE.getSE());
1498
1499 // Calculate the pointer stride and check if it is constant.
1500 const SCEVConstant *C = dyn_cast<SCEVConstant>(Step);
1501 if (!C) {
1502 LLVM_DEBUG(dbgs() << "LAA: Bad stride - Not a constant strided " << *Ptr
1503 << " SCEV: " << *AR << "\n");
1504 return std::nullopt;
1505 }
1506
1507 auto &DL = Lp->getHeader()->getDataLayout();
1508 TypeSize AllocSize = DL.getTypeAllocSize(AccessTy);
1509 int64_t Size = AllocSize.getFixedValue();
1510 const APInt &APStepVal = C->getAPInt();
1511
1512 // Huge step value - give up.
1513 if (APStepVal.getBitWidth() > 64)
1514 return std::nullopt;
1515
1516 int64_t StepVal = APStepVal.getSExtValue();
1517
1518 // Strided access.
1519 int64_t Stride = StepVal / Size;
1520 int64_t Rem = StepVal % Size;
1521 if (Rem)
1522 return std::nullopt;
1523
1524 if (!ShouldCheckWrap)
1525 return Stride;
1526
1527 // The address calculation must not wrap. Otherwise, a dependence could be
1528 // inverted.
1529 if (isNoWrapAddRec(Ptr, AR, PSE, Lp))
1530 return Stride;
1531
1532 // An inbounds getelementptr that is a AddRec with a unit stride
1533 // cannot wrap per definition. If it did, the result would be poison
1534 // and any memory access dependent on it would be immediate UB
1535 // when executed.
1536 if (auto *GEP = dyn_cast<GetElementPtrInst>(Ptr);
1537 GEP && GEP->isInBounds() && (Stride == 1 || Stride == -1))
1538 return Stride;
1539
1540 // If the null pointer is undefined, then a access sequence which would
1541 // otherwise access it can be assumed not to unsigned wrap. Note that this
1542 // assumes the object in memory is aligned to the natural alignment.
1543 unsigned AddrSpace = Ty->getPointerAddressSpace();
1544 if (!NullPointerIsDefined(Lp->getHeader()->getParent(), AddrSpace) &&
1545 (Stride == 1 || Stride == -1))
1546 return Stride;
1547
1548 if (Assume) {
1550 LLVM_DEBUG(dbgs() << "LAA: Pointer may wrap:\n"
1551 << "LAA: Pointer: " << *Ptr << "\n"
1552 << "LAA: SCEV: " << *AR << "\n"
1553 << "LAA: Added an overflow assumption\n");
1554 return Stride;
1555 }
1556 LLVM_DEBUG(
1557 dbgs() << "LAA: Bad stride - Pointer may wrap in the address space "
1558 << *Ptr << " SCEV: " << *AR << "\n");
1559 return std::nullopt;
1560}
1561
1562std::optional<int> llvm::getPointersDiff(Type *ElemTyA, Value *PtrA,
1563 Type *ElemTyB, Value *PtrB,
1564 const DataLayout &DL,
1565 ScalarEvolution &SE, bool StrictCheck,
1566 bool CheckType) {
1567 assert(PtrA && PtrB && "Expected non-nullptr pointers.");
1568
1569 // Make sure that A and B are different pointers.
1570 if (PtrA == PtrB)
1571 return 0;
1572
1573 // Make sure that the element types are the same if required.
1574 if (CheckType && ElemTyA != ElemTyB)
1575 return std::nullopt;
1576
1577 unsigned ASA = PtrA->getType()->getPointerAddressSpace();
1578 unsigned ASB = PtrB->getType()->getPointerAddressSpace();
1579
1580 // Check that the address spaces match.
1581 if (ASA != ASB)
1582 return std::nullopt;
1583 unsigned IdxWidth = DL.getIndexSizeInBits(ASA);
1584
1585 APInt OffsetA(IdxWidth, 0), OffsetB(IdxWidth, 0);
1586 Value *PtrA1 = PtrA->stripAndAccumulateInBoundsConstantOffsets(DL, OffsetA);
1587 Value *PtrB1 = PtrB->stripAndAccumulateInBoundsConstantOffsets(DL, OffsetB);
1588
1589 int Val;
1590 if (PtrA1 == PtrB1) {
1591 // Retrieve the address space again as pointer stripping now tracks through
1592 // `addrspacecast`.
1593 ASA = cast<PointerType>(PtrA1->getType())->getAddressSpace();
1594 ASB = cast<PointerType>(PtrB1->getType())->getAddressSpace();
1595 // Check that the address spaces match and that the pointers are valid.
1596 if (ASA != ASB)
1597 return std::nullopt;
1598
1599 IdxWidth = DL.getIndexSizeInBits(ASA);
1600 OffsetA = OffsetA.sextOrTrunc(IdxWidth);
1601 OffsetB = OffsetB.sextOrTrunc(IdxWidth);
1602
1603 OffsetB -= OffsetA;
1604 Val = OffsetB.getSExtValue();
1605 } else {
1606 // Otherwise compute the distance with SCEV between the base pointers.
1607 const SCEV *PtrSCEVA = SE.getSCEV(PtrA);
1608 const SCEV *PtrSCEVB = SE.getSCEV(PtrB);
1609 const auto *Diff =
1610 dyn_cast<SCEVConstant>(SE.getMinusSCEV(PtrSCEVB, PtrSCEVA));
1611 if (!Diff)
1612 return std::nullopt;
1613 Val = Diff->getAPInt().getSExtValue();
1614 }
1615 int Size = DL.getTypeStoreSize(ElemTyA);
1616 int Dist = Val / Size;
1617
1618 // Ensure that the calculated distance matches the type-based one after all
1619 // the bitcasts removal in the provided pointers.
1620 if (!StrictCheck || Dist * Size == Val)
1621 return Dist;
1622 return std::nullopt;
1623}
1624
1626 const DataLayout &DL, ScalarEvolution &SE,
1627 SmallVectorImpl<unsigned> &SortedIndices) {
1629 VL, [](const Value *V) { return V->getType()->isPointerTy(); }) &&
1630 "Expected list of pointer operands.");
1631 // Walk over the pointers, and map each of them to an offset relative to
1632 // first pointer in the array.
1633 Value *Ptr0 = VL[0];
1634
1635 using DistOrdPair = std::pair<int64_t, int>;
1636 auto Compare = llvm::less_first();
1637 std::set<DistOrdPair, decltype(Compare)> Offsets(Compare);
1638 Offsets.emplace(0, 0);
1639 bool IsConsecutive = true;
1640 for (auto [Idx, Ptr] : drop_begin(enumerate(VL))) {
1641 std::optional<int> Diff = getPointersDiff(ElemTy, Ptr0, ElemTy, Ptr, DL, SE,
1642 /*StrictCheck=*/true);
1643 if (!Diff)
1644 return false;
1645
1646 // Check if the pointer with the same offset is found.
1647 int64_t Offset = *Diff;
1648 auto [It, IsInserted] = Offsets.emplace(Offset, Idx);
1649 if (!IsInserted)
1650 return false;
1651 // Consecutive order if the inserted element is the last one.
1652 IsConsecutive &= std::next(It) == Offsets.end();
1653 }
1654 SortedIndices.clear();
1655 if (!IsConsecutive) {
1656 // Fill SortedIndices array only if it is non-consecutive.
1657 SortedIndices.resize(VL.size());
1658 for (auto [Idx, Off] : enumerate(Offsets))
1659 SortedIndices[Idx] = Off.second;
1660 }
1661 return true;
1662}
1663
1664/// Returns true if the memory operations \p A and \p B are consecutive.
1666 ScalarEvolution &SE, bool CheckType) {
1669 if (!PtrA || !PtrB)
1670 return false;
1671 Type *ElemTyA = getLoadStoreType(A);
1672 Type *ElemTyB = getLoadStoreType(B);
1673 std::optional<int> Diff =
1674 getPointersDiff(ElemTyA, PtrA, ElemTyB, PtrB, DL, SE,
1675 /*StrictCheck=*/true, CheckType);
1676 return Diff && *Diff == 1;
1677}
1678
1680 visitPointers(SI->getPointerOperand(), *InnermostLoop,
1681 [this, SI](Value *Ptr) {
1682 Accesses[MemAccessInfo(Ptr, true)].push_back(AccessIdx);
1683 InstMap.push_back(SI);
1684 ++AccessIdx;
1685 });
1686}
1687
1689 visitPointers(LI->getPointerOperand(), *InnermostLoop,
1690 [this, LI](Value *Ptr) {
1691 Accesses[MemAccessInfo(Ptr, false)].push_back(AccessIdx);
1692 InstMap.push_back(LI);
1693 ++AccessIdx;
1694 });
1695}
1696
1699 switch (Type) {
1700 case NoDep:
1701 case Forward:
1704
1705 case Unknown:
1708 case Backward:
1710 case IndirectUnsafe:
1712 }
1713 llvm_unreachable("unexpected DepType!");
1714}
1715
1717 switch (Type) {
1718 case NoDep:
1719 case Forward:
1720 case ForwardButPreventsForwarding:
1721 case Unknown:
1722 case IndirectUnsafe:
1723 return false;
1724
1725 case BackwardVectorizable:
1726 case Backward:
1727 case BackwardVectorizableButPreventsForwarding:
1728 return true;
1729 }
1730 llvm_unreachable("unexpected DepType!");
1731}
1732
1734 return isBackward() || Type == Unknown || Type == IndirectUnsafe;
1735}
1736
1738 switch (Type) {
1739 case Forward:
1740 case ForwardButPreventsForwarding:
1741 return true;
1742
1743 case NoDep:
1744 case Unknown:
1745 case BackwardVectorizable:
1746 case Backward:
1747 case BackwardVectorizableButPreventsForwarding:
1748 case IndirectUnsafe:
1749 return false;
1750 }
1751 llvm_unreachable("unexpected DepType!");
1752}
1753
1754bool MemoryDepChecker::couldPreventStoreLoadForward(uint64_t Distance,
1755 uint64_t TypeByteSize) {
1756 // If loads occur at a distance that is not a multiple of a feasible vector
1757 // factor store-load forwarding does not take place.
1758 // Positive dependences might cause troubles because vectorizing them might
1759 // prevent store-load forwarding making vectorized code run a lot slower.
1760 // a[i] = a[i-3] ^ a[i-8];
1761 // The stores to a[i:i+1] don't align with the stores to a[i-3:i-2] and
1762 // hence on your typical architecture store-load forwarding does not take
1763 // place. Vectorizing in such cases does not make sense.
1764 // Store-load forwarding distance.
1765
1766 // After this many iterations store-to-load forwarding conflicts should not
1767 // cause any slowdowns.
1768 const uint64_t NumItersForStoreLoadThroughMemory = 8 * TypeByteSize;
1769 // Maximum vector factor.
1770 uint64_t MaxVFWithoutSLForwardIssues = std::min(
1771 VectorizerParams::MaxVectorWidth * TypeByteSize, MinDepDistBytes);
1772
1773 // Compute the smallest VF at which the store and load would be misaligned.
1774 for (uint64_t VF = 2 * TypeByteSize; VF <= MaxVFWithoutSLForwardIssues;
1775 VF *= 2) {
1776 // If the number of vector iteration between the store and the load are
1777 // small we could incur conflicts.
1778 if (Distance % VF && Distance / VF < NumItersForStoreLoadThroughMemory) {
1779 MaxVFWithoutSLForwardIssues = (VF >> 1);
1780 break;
1781 }
1782 }
1783
1784 if (MaxVFWithoutSLForwardIssues < 2 * TypeByteSize) {
1785 LLVM_DEBUG(
1786 dbgs() << "LAA: Distance " << Distance
1787 << " that could cause a store-load forwarding conflict\n");
1788 return true;
1789 }
1790
1791 if (MaxVFWithoutSLForwardIssues < MinDepDistBytes &&
1792 MaxVFWithoutSLForwardIssues !=
1793 VectorizerParams::MaxVectorWidth * TypeByteSize)
1794 MinDepDistBytes = MaxVFWithoutSLForwardIssues;
1795 return false;
1796}
1797
1798void MemoryDepChecker::mergeInStatus(VectorizationSafetyStatus S) {
1799 if (Status < S)
1800 Status = S;
1801}
1802
1803/// Given a dependence-distance \p Dist between two
1804/// memory accesses, that have strides in the same direction whose absolute
1805/// value of the maximum stride is given in \p MaxStride, and that have the same
1806/// type size \p TypeByteSize, in a loop whose maximum backedge taken count is
1807/// \p MaxBTC, check if it is possible to prove statically that the dependence
1808/// distance is larger than the range that the accesses will travel through the
1809/// execution of the loop. If so, return true; false otherwise. This is useful
1810/// for example in loops such as the following (PR31098):
1811/// for (i = 0; i < D; ++i) {
1812/// = out[i];
1813/// out[i+D] =
1814/// }
1816 const SCEV &MaxBTC, const SCEV &Dist,
1817 uint64_t MaxStride,
1818 uint64_t TypeByteSize) {
1819
1820 // If we can prove that
1821 // (**) |Dist| > MaxBTC * Step
1822 // where Step is the absolute stride of the memory accesses in bytes,
1823 // then there is no dependence.
1824 //
1825 // Rationale:
1826 // We basically want to check if the absolute distance (|Dist/Step|)
1827 // is >= the loop iteration count (or > MaxBTC).
1828 // This is equivalent to the Strong SIV Test (Practical Dependence Testing,
1829 // Section 4.2.1); Note, that for vectorization it is sufficient to prove
1830 // that the dependence distance is >= VF; This is checked elsewhere.
1831 // But in some cases we can prune dependence distances early, and
1832 // even before selecting the VF, and without a runtime test, by comparing
1833 // the distance against the loop iteration count. Since the vectorized code
1834 // will be executed only if LoopCount >= VF, proving distance >= LoopCount
1835 // also guarantees that distance >= VF.
1836 //
1837 const uint64_t ByteStride = MaxStride * TypeByteSize;
1838 const SCEV *Step = SE.getConstant(MaxBTC.getType(), ByteStride);
1839 const SCEV *Product = SE.getMulExpr(&MaxBTC, Step);
1840
1841 const SCEV *CastedDist = &Dist;
1842 const SCEV *CastedProduct = Product;
1843 uint64_t DistTypeSizeBits = DL.getTypeSizeInBits(Dist.getType());
1844 uint64_t ProductTypeSizeBits = DL.getTypeSizeInBits(Product->getType());
1845
1846 // The dependence distance can be positive/negative, so we sign extend Dist;
1847 // The multiplication of the absolute stride in bytes and the
1848 // backedgeTakenCount is non-negative, so we zero extend Product.
1849 if (DistTypeSizeBits > ProductTypeSizeBits)
1850 CastedProduct = SE.getZeroExtendExpr(Product, Dist.getType());
1851 else
1852 CastedDist = SE.getNoopOrSignExtend(&Dist, Product->getType());
1853
1854 // Is Dist - (MaxBTC * Step) > 0 ?
1855 // (If so, then we have proven (**) because |Dist| >= Dist)
1856 const SCEV *Minus = SE.getMinusSCEV(CastedDist, CastedProduct);
1857 if (SE.isKnownPositive(Minus))
1858 return true;
1859
1860 // Second try: Is -Dist - (MaxBTC * Step) > 0 ?
1861 // (If so, then we have proven (**) because |Dist| >= -1*Dist)
1862 const SCEV *NegDist = SE.getNegativeSCEV(CastedDist);
1863 Minus = SE.getMinusSCEV(NegDist, CastedProduct);
1864 return SE.isKnownPositive(Minus);
1865}
1866
1867/// Check the dependence for two accesses with the same stride \p Stride.
1868/// \p Distance is the positive distance and \p TypeByteSize is type size in
1869/// bytes.
1870///
1871/// \returns true if they are independent.
1873 uint64_t TypeByteSize) {
1874 assert(Stride > 1 && "The stride must be greater than 1");
1875 assert(TypeByteSize > 0 && "The type size in byte must be non-zero");
1876 assert(Distance > 0 && "The distance must be non-zero");
1877
1878 // Skip if the distance is not multiple of type byte size.
1879 if (Distance % TypeByteSize)
1880 return false;
1881
1882 uint64_t ScaledDist = Distance / TypeByteSize;
1883
1884 // No dependence if the scaled distance is not multiple of the stride.
1885 // E.g.
1886 // for (i = 0; i < 1024 ; i += 4)
1887 // A[i+2] = A[i] + 1;
1888 //
1889 // Two accesses in memory (scaled distance is 2, stride is 4):
1890 // | A[0] | | | | A[4] | | | |
1891 // | | | A[2] | | | | A[6] | |
1892 //
1893 // E.g.
1894 // for (i = 0; i < 1024 ; i += 3)
1895 // A[i+4] = A[i] + 1;
1896 //
1897 // Two accesses in memory (scaled distance is 4, stride is 3):
1898 // | A[0] | | | A[3] | | | A[6] | | |
1899 // | | | | | A[4] | | | A[7] | |
1900 return ScaledDist % Stride;
1901}
1902
1903/// Returns true if any of the underlying objects has a loop varying address,
1904/// i.e. may change in \p L.
1905static bool
1907 ScalarEvolution &SE, const Loop *L) {
1908 return any_of(UnderlyingObjects, [&SE, L](const Value *UO) {
1909 return !SE.isLoopInvariant(SE.getSCEV(const_cast<Value *>(UO)), L);
1910 });
1911}
1912
1914 MemoryDepChecker::DepDistanceStrideAndSizeInfo>
1915MemoryDepChecker::getDependenceDistanceStrideAndSize(
1919 &UnderlyingObjects) {
1920 auto &DL = InnermostLoop->getHeader()->getDataLayout();
1921 auto &SE = *PSE.getSE();
1922 auto [APtr, AIsWrite] = A;
1923 auto [BPtr, BIsWrite] = B;
1924
1925 // Two reads are independent.
1926 if (!AIsWrite && !BIsWrite)
1928
1929 Type *ATy = getLoadStoreType(AInst);
1930 Type *BTy = getLoadStoreType(BInst);
1931
1932 // We cannot check pointers in different address spaces.
1933 if (APtr->getType()->getPointerAddressSpace() !=
1934 BPtr->getType()->getPointerAddressSpace())
1936
1937 int64_t StrideAPtr =
1938 getPtrStride(PSE, ATy, APtr, InnermostLoop, SymbolicStrides, true)
1939 .value_or(0);
1940 int64_t StrideBPtr =
1941 getPtrStride(PSE, BTy, BPtr, InnermostLoop, SymbolicStrides, true)
1942 .value_or(0);
1943
1944 const SCEV *Src = PSE.getSCEV(APtr);
1945 const SCEV *Sink = PSE.getSCEV(BPtr);
1946
1947 // If the induction step is negative we have to invert source and sink of the
1948 // dependence when measuring the distance between them. We should not swap
1949 // AIsWrite with BIsWrite, as their uses expect them in program order.
1950 if (StrideAPtr < 0) {
1951 std::swap(Src, Sink);
1952 std::swap(AInst, BInst);
1953 }
1954
1955 const SCEV *Dist = SE.getMinusSCEV(Sink, Src);
1956
1957 LLVM_DEBUG(dbgs() << "LAA: Src Scev: " << *Src << "Sink Scev: " << *Sink
1958 << "(Induction step: " << StrideAPtr << ")\n");
1959 LLVM_DEBUG(dbgs() << "LAA: Distance for " << *AInst << " to " << *BInst
1960 << ": " << *Dist << "\n");
1961
1962 // Needs accesses where the addresses of the accessed underlying objects do
1963 // not change within the loop.
1964 if (isLoopVariantIndirectAddress(UnderlyingObjects.find(APtr)->second, SE,
1965 InnermostLoop) ||
1966 isLoopVariantIndirectAddress(UnderlyingObjects.find(BPtr)->second, SE,
1967 InnermostLoop))
1969
1970 // Check if we can prove that Sink only accesses memory after Src's end or
1971 // vice versa. At the moment this is limited to cases where either source or
1972 // sink are loop invariant to avoid compile-time increases. This is not
1973 // required for correctness.
1974 if (SE.isLoopInvariant(Src, InnermostLoop) ||
1975 SE.isLoopInvariant(Sink, InnermostLoop)) {
1976 const auto &[SrcStart, SrcEnd] =
1977 getStartAndEndForAccess(InnermostLoop, Src, ATy, PSE, PointerBounds);
1978 const auto &[SinkStart, SinkEnd] =
1979 getStartAndEndForAccess(InnermostLoop, Sink, BTy, PSE, PointerBounds);
1980 if (!isa<SCEVCouldNotCompute>(SrcStart) &&
1981 !isa<SCEVCouldNotCompute>(SrcEnd) &&
1982 !isa<SCEVCouldNotCompute>(SinkStart) &&
1983 !isa<SCEVCouldNotCompute>(SinkEnd)) {
1984 if (SE.isKnownPredicate(CmpInst::ICMP_ULE, SrcEnd, SinkStart))
1986 if (SE.isKnownPredicate(CmpInst::ICMP_ULE, SinkEnd, SrcStart))
1988 }
1989 }
1990
1991 // Need accesses with constant strides and the same direction. We don't want
1992 // to vectorize "A[B[i]] += ..." and similar code or pointer arithmetic that
1993 // could wrap in the address space.
1994 if (!StrideAPtr || !StrideBPtr || (StrideAPtr > 0 && StrideBPtr < 0) ||
1995 (StrideAPtr < 0 && StrideBPtr > 0)) {
1996 LLVM_DEBUG(dbgs() << "Pointer access with non-constant stride\n");
1998 }
1999
2000 uint64_t TypeByteSize = DL.getTypeAllocSize(ATy);
2001 bool HasSameSize =
2002 DL.getTypeStoreSizeInBits(ATy) == DL.getTypeStoreSizeInBits(BTy);
2003 if (!HasSameSize)
2004 TypeByteSize = 0;
2005 return DepDistanceStrideAndSizeInfo(Dist, std::abs(StrideAPtr),
2006 std::abs(StrideBPtr), TypeByteSize,
2007 AIsWrite, BIsWrite);
2008}
2009
2010MemoryDepChecker::Dependence::DepType MemoryDepChecker::isDependent(
2011 const MemAccessInfo &A, unsigned AIdx, const MemAccessInfo &B,
2012 unsigned BIdx,
2014 &UnderlyingObjects) {
2015 assert(AIdx < BIdx && "Must pass arguments in program order");
2016
2017 // Get the dependence distance, stride, type size and what access writes for
2018 // the dependence between A and B.
2019 auto Res = getDependenceDistanceStrideAndSize(
2020 A, InstMap[AIdx], B, InstMap[BIdx], UnderlyingObjects);
2021 if (std::holds_alternative<Dependence::DepType>(Res))
2022 return std::get<Dependence::DepType>(Res);
2023
2024 auto &[Dist, StrideA, StrideB, TypeByteSize, AIsWrite, BIsWrite] =
2025 std::get<DepDistanceStrideAndSizeInfo>(Res);
2026 bool HasSameSize = TypeByteSize > 0;
2027
2028 std::optional<uint64_t> CommonStride =
2029 StrideA == StrideB ? std::make_optional(StrideA) : std::nullopt;
2030 if (isa<SCEVCouldNotCompute>(Dist)) {
2031 // TODO: Relax requirement that there is a common stride to retry with
2032 // non-constant distance dependencies.
2033 FoundNonConstantDistanceDependence |= CommonStride.has_value();
2034 LLVM_DEBUG(dbgs() << "LAA: Dependence because of uncomputable distance.\n");
2035 return Dependence::Unknown;
2036 }
2037
2038 ScalarEvolution &SE = *PSE.getSE();
2039 auto &DL = InnermostLoop->getHeader()->getDataLayout();
2040 uint64_t MaxStride = std::max(StrideA, StrideB);
2041
2042 // If the distance between the acecsses is larger than their maximum absolute
2043 // stride multiplied by the symbolic maximum backedge taken count (which is an
2044 // upper bound of the number of iterations), the accesses are independet, i.e.
2045 // they are far enough appart that accesses won't access the same location
2046 // across all loop ierations.
2047 if (HasSameSize && isSafeDependenceDistance(
2049 *Dist, MaxStride, TypeByteSize))
2050 return Dependence::NoDep;
2051
2052 const SCEVConstant *C = dyn_cast<SCEVConstant>(Dist);
2053
2054 // Attempt to prove strided accesses independent.
2055 if (C) {
2056 const APInt &Val = C->getAPInt();
2057 int64_t Distance = Val.getSExtValue();
2058
2059 // If the distance between accesses and their strides are known constants,
2060 // check whether the accesses interlace each other.
2061 if (std::abs(Distance) > 0 && CommonStride && *CommonStride > 1 &&
2062 HasSameSize &&
2063 areStridedAccessesIndependent(std::abs(Distance), *CommonStride,
2064 TypeByteSize)) {
2065 LLVM_DEBUG(dbgs() << "LAA: Strided accesses are independent\n");
2066 return Dependence::NoDep;
2067 }
2068 } else
2069 Dist = SE.applyLoopGuards(Dist, InnermostLoop);
2070
2071 // Negative distances are not plausible dependencies.
2072 if (SE.isKnownNonPositive(Dist)) {
2073 if (SE.isKnownNonNegative(Dist)) {
2074 if (HasSameSize) {
2075 // Write to the same location with the same size.
2076 return Dependence::Forward;
2077 }
2078 LLVM_DEBUG(dbgs() << "LAA: possibly zero dependence difference but "
2079 "different type sizes\n");
2080 return Dependence::Unknown;
2081 }
2082
2083 bool IsTrueDataDependence = (AIsWrite && !BIsWrite);
2084 // Check if the first access writes to a location that is read in a later
2085 // iteration, where the distance between them is not a multiple of a vector
2086 // factor and relatively small.
2087 //
2088 // NOTE: There is no need to update MaxSafeVectorWidthInBits after call to
2089 // couldPreventStoreLoadForward, even if it changed MinDepDistBytes, since a
2090 // forward dependency will allow vectorization using any width.
2091
2092 if (IsTrueDataDependence && EnableForwardingConflictDetection) {
2093 if (!C) {
2094 // TODO: FoundNonConstantDistanceDependence is used as a necessary
2095 // condition to consider retrying with runtime checks. Historically, we
2096 // did not set it when strides were different but there is no inherent
2097 // reason to.
2098 FoundNonConstantDistanceDependence |= CommonStride.has_value();
2099 return Dependence::Unknown;
2100 }
2101 if (!HasSameSize ||
2102 couldPreventStoreLoadForward(C->getAPInt().abs().getZExtValue(),
2103 TypeByteSize)) {
2104 LLVM_DEBUG(
2105 dbgs() << "LAA: Forward but may prevent st->ld forwarding\n");
2107 }
2108 }
2109
2110 LLVM_DEBUG(dbgs() << "LAA: Dependence is negative\n");
2111 return Dependence::Forward;
2112 }
2113
2114 int64_t MinDistance = SE.getSignedRangeMin(Dist).getSExtValue();
2115 // Below we only handle strictly positive distances.
2116 if (MinDistance <= 0) {
2117 FoundNonConstantDistanceDependence |= CommonStride.has_value();
2118 return Dependence::Unknown;
2119 }
2120
2121 if (!isa<SCEVConstant>(Dist)) {
2122 // Previously this case would be treated as Unknown, possibly setting
2123 // FoundNonConstantDistanceDependence to force re-trying with runtime
2124 // checks. Until the TODO below is addressed, set it here to preserve
2125 // original behavior w.r.t. re-trying with runtime checks.
2126 // TODO: FoundNonConstantDistanceDependence is used as a necessary
2127 // condition to consider retrying with runtime checks. Historically, we
2128 // did not set it when strides were different but there is no inherent
2129 // reason to.
2130 FoundNonConstantDistanceDependence |= CommonStride.has_value();
2131 }
2132
2133 if (!HasSameSize) {
2134 LLVM_DEBUG(dbgs() << "LAA: ReadWrite-Write positive dependency with "
2135 "different type sizes\n");
2136 return Dependence::Unknown;
2137 }
2138
2139 if (!CommonStride)
2140 return Dependence::Unknown;
2141
2142 // Bail out early if passed-in parameters make vectorization not feasible.
2143 unsigned ForcedFactor = (VectorizerParams::VectorizationFactor ?
2145 unsigned ForcedUnroll = (VectorizerParams::VectorizationInterleave ?
2147 // The minimum number of iterations for a vectorized/unrolled version.
2148 unsigned MinNumIter = std::max(ForcedFactor * ForcedUnroll, 2U);
2149
2150 // It's not vectorizable if the distance is smaller than the minimum distance
2151 // needed for a vectroized/unrolled version. Vectorizing one iteration in
2152 // front needs TypeByteSize * Stride. Vectorizing the last iteration needs
2153 // TypeByteSize (No need to plus the last gap distance).
2154 //
2155 // E.g. Assume one char is 1 byte in memory and one int is 4 bytes.
2156 // foo(int *A) {
2157 // int *B = (int *)((char *)A + 14);
2158 // for (i = 0 ; i < 1024 ; i += 2)
2159 // B[i] = A[i] + 1;
2160 // }
2161 //
2162 // Two accesses in memory (stride is 2):
2163 // | A[0] | | A[2] | | A[4] | | A[6] | |
2164 // | B[0] | | B[2] | | B[4] |
2165 //
2166 // MinDistance needs for vectorizing iterations except the last iteration:
2167 // 4 * 2 * (MinNumIter - 1). MinDistance needs for the last iteration: 4.
2168 // So the minimum distance needed is: 4 * 2 * (MinNumIter - 1) + 4.
2169 //
2170 // If MinNumIter is 2, it is vectorizable as the minimum distance needed is
2171 // 12, which is less than distance.
2172 //
2173 // If MinNumIter is 4 (Say if a user forces the vectorization factor to be 4),
2174 // the minimum distance needed is 28, which is greater than distance. It is
2175 // not safe to do vectorization.
2176
2177 // We know that Dist is positive, but it may not be constant. Use the signed
2178 // minimum for computations below, as this ensures we compute the closest
2179 // possible dependence distance.
2180 uint64_t MinDistanceNeeded =
2181 TypeByteSize * *CommonStride * (MinNumIter - 1) + TypeByteSize;
2182 if (MinDistanceNeeded > static_cast<uint64_t>(MinDistance)) {
2183 if (!isa<SCEVConstant>(Dist)) {
2184 // For non-constant distances, we checked the lower bound of the
2185 // dependence distance and the distance may be larger at runtime (and safe
2186 // for vectorization). Classify it as Unknown, so we re-try with runtime
2187 // checks.
2188 return Dependence::Unknown;
2189 }
2190 LLVM_DEBUG(dbgs() << "LAA: Failure because of positive minimum distance "
2191 << MinDistance << '\n');
2192 return Dependence::Backward;
2193 }
2194
2195 // Unsafe if the minimum distance needed is greater than smallest dependence
2196 // distance distance.
2197 if (MinDistanceNeeded > MinDepDistBytes) {
2198 LLVM_DEBUG(dbgs() << "LAA: Failure because it needs at least "
2199 << MinDistanceNeeded << " size in bytes\n");
2200 return Dependence::Backward;
2201 }
2202
2203 // Positive distance bigger than max vectorization factor.
2204 // FIXME: Should use max factor instead of max distance in bytes, which could
2205 // not handle different types.
2206 // E.g. Assume one char is 1 byte in memory and one int is 4 bytes.
2207 // void foo (int *A, char *B) {
2208 // for (unsigned i = 0; i < 1024; i++) {
2209 // A[i+2] = A[i] + 1;
2210 // B[i+2] = B[i] + 1;
2211 // }
2212 // }
2213 //
2214 // This case is currently unsafe according to the max safe distance. If we
2215 // analyze the two accesses on array B, the max safe dependence distance
2216 // is 2. Then we analyze the accesses on array A, the minimum distance needed
2217 // is 8, which is less than 2 and forbidden vectorization, But actually
2218 // both A and B could be vectorized by 2 iterations.
2219 MinDepDistBytes =
2220 std::min(static_cast<uint64_t>(MinDistance), MinDepDistBytes);
2221
2222 bool IsTrueDataDependence = (!AIsWrite && BIsWrite);
2223 uint64_t MinDepDistBytesOld = MinDepDistBytes;
2224 if (IsTrueDataDependence && EnableForwardingConflictDetection &&
2225 isa<SCEVConstant>(Dist) &&
2226 couldPreventStoreLoadForward(MinDistance, TypeByteSize)) {
2227 // Sanity check that we didn't update MinDepDistBytes when calling
2228 // couldPreventStoreLoadForward
2229 assert(MinDepDistBytes == MinDepDistBytesOld &&
2230 "An update to MinDepDistBytes requires an update to "
2231 "MaxSafeVectorWidthInBits");
2232 (void)MinDepDistBytesOld;
2234 }
2235
2236 // An update to MinDepDistBytes requires an update to MaxSafeVectorWidthInBits
2237 // since there is a backwards dependency.
2238 uint64_t MaxVF = MinDepDistBytes / (TypeByteSize * *CommonStride);
2239 LLVM_DEBUG(dbgs() << "LAA: Positive min distance " << MinDistance
2240 << " with max VF = " << MaxVF << '\n');
2241
2242 uint64_t MaxVFInBits = MaxVF * TypeByteSize * 8;
2243 if (!isa<SCEVConstant>(Dist) && MaxVFInBits < MaxTargetVectorWidthInBits) {
2244 // For non-constant distances, we checked the lower bound of the dependence
2245 // distance and the distance may be larger at runtime (and safe for
2246 // vectorization). Classify it as Unknown, so we re-try with runtime checks.
2247 return Dependence::Unknown;
2248 }
2249
2250 MaxSafeVectorWidthInBits = std::min(MaxSafeVectorWidthInBits, MaxVFInBits);
2252}
2253
2255 DepCandidates &AccessSets, MemAccessInfoList &CheckDeps,
2257 &UnderlyingObjects) {
2258
2259 MinDepDistBytes = -1;
2261 for (MemAccessInfo CurAccess : CheckDeps) {
2262 if (Visited.count(CurAccess))
2263 continue;
2264
2265 // Get the relevant memory access set.
2267 AccessSets.findValue(AccessSets.getLeaderValue(CurAccess));
2268
2269 // Check accesses within this set.
2271 AccessSets.member_begin(I);
2273 AccessSets.member_end();
2274
2275 // Check every access pair.
2276 while (AI != AE) {
2277 Visited.insert(*AI);
2278 bool AIIsWrite = AI->getInt();
2279 // Check loads only against next equivalent class, but stores also against
2280 // other stores in the same equivalence class - to the same address.
2282 (AIIsWrite ? AI : std::next(AI));
2283 while (OI != AE) {
2284 // Check every accessing instruction pair in program order.
2285 for (std::vector<unsigned>::iterator I1 = Accesses[*AI].begin(),
2286 I1E = Accesses[*AI].end(); I1 != I1E; ++I1)
2287 // Scan all accesses of another equivalence class, but only the next
2288 // accesses of the same equivalent class.
2289 for (std::vector<unsigned>::iterator
2290 I2 = (OI == AI ? std::next(I1) : Accesses[*OI].begin()),
2291 I2E = (OI == AI ? I1E : Accesses[*OI].end());
2292 I2 != I2E; ++I2) {
2293 auto A = std::make_pair(&*AI, *I1);
2294 auto B = std::make_pair(&*OI, *I2);
2295
2296 assert(*I1 != *I2);
2297 if (*I1 > *I2)
2298 std::swap(A, B);
2299
2300 Dependence::DepType Type = isDependent(*A.first, A.second, *B.first,
2301 B.second, UnderlyingObjects);
2303
2304 // Gather dependences unless we accumulated MaxDependences
2305 // dependences. In that case return as soon as we find the first
2306 // unsafe dependence. This puts a limit on this quadratic
2307 // algorithm.
2308 if (RecordDependences) {
2309 if (Type != Dependence::NoDep)
2310 Dependences.push_back(Dependence(A.second, B.second, Type));
2311
2312 if (Dependences.size() >= MaxDependences) {
2313 RecordDependences = false;
2314 Dependences.clear();
2316 << "Too many dependences, stopped recording\n");
2317 }
2318 }
2319 if (!RecordDependences && !isSafeForVectorization())
2320 return false;
2321 }
2322 ++OI;
2323 }
2324 ++AI;
2325 }
2326 }
2327
2328 LLVM_DEBUG(dbgs() << "Total Dependences: " << Dependences.size() << "\n");
2329 return isSafeForVectorization();
2330}
2331
2334 MemAccessInfo Access(Ptr, IsWrite);
2335 auto &IndexVector = Accesses.find(Access)->second;
2336
2338 transform(IndexVector,
2339 std::back_inserter(Insts),
2340 [&](unsigned Idx) { return this->InstMap[Idx]; });
2341 return Insts;
2342}
2343
2345 "NoDep",
2346 "Unknown",
2347 "IndirectUnsafe",
2348 "Forward",
2349 "ForwardButPreventsForwarding",
2350 "Backward",
2351 "BackwardVectorizable",
2352 "BackwardVectorizableButPreventsForwarding"};
2353
2355 raw_ostream &OS, unsigned Depth,
2356 const SmallVectorImpl<Instruction *> &Instrs) const {
2357 OS.indent(Depth) << DepName[Type] << ":\n";
2358 OS.indent(Depth + 2) << *Instrs[Source] << " -> \n";
2359 OS.indent(Depth + 2) << *Instrs[Destination] << "\n";
2360}
2361
2362bool LoopAccessInfo::canAnalyzeLoop() {
2363 // We need to have a loop header.
2364 LLVM_DEBUG(dbgs() << "\nLAA: Checking a loop in '"
2365 << TheLoop->getHeader()->getParent()->getName() << "' from "
2366 << TheLoop->getLocStr() << "\n");
2367
2368 // We can only analyze innermost loops.
2369 if (!TheLoop->isInnermost()) {
2370 LLVM_DEBUG(dbgs() << "LAA: loop is not the innermost loop\n");
2371 recordAnalysis("NotInnerMostLoop") << "loop is not the innermost loop";
2372 return false;
2373 }
2374
2375 // We must have a single backedge.
2376 if (TheLoop->getNumBackEdges() != 1) {
2377 LLVM_DEBUG(
2378 dbgs() << "LAA: loop control flow is not understood by analyzer\n");
2379 recordAnalysis("CFGNotUnderstood")
2380 << "loop control flow is not understood by analyzer";
2381 return false;
2382 }
2383
2384 // ScalarEvolution needs to be able to find the symbolic max backedge taken
2385 // count, which is an upper bound on the number of loop iterations. The loop
2386 // may execute fewer iterations, if it exits via an uncountable exit.
2387 const SCEV *ExitCount = PSE->getSymbolicMaxBackedgeTakenCount();
2388 if (isa<SCEVCouldNotCompute>(ExitCount)) {
2389 recordAnalysis("CantComputeNumberOfIterations")
2390 << "could not determine number of loop iterations";
2391 LLVM_DEBUG(dbgs() << "LAA: SCEV could not compute the loop exit count.\n");
2392 return false;
2393 }
2394
2395 LLVM_DEBUG(dbgs() << "LAA: Found an analyzable loop: "
2396 << TheLoop->getHeader()->getName() << "\n");
2397 return true;
2398}
2399
2400bool LoopAccessInfo::analyzeLoop(AAResults *AA, LoopInfo *LI,
2401 const TargetLibraryInfo *TLI,
2402 DominatorTree *DT) {
2403 // Holds the Load and Store instructions.
2406 SmallPtrSet<MDNode *, 8> LoopAliasScopes;
2407
2408 // Holds all the different accesses in the loop.
2409 unsigned NumReads = 0;
2410 unsigned NumReadWrites = 0;
2411
2412 bool HasComplexMemInst = false;
2413
2414 // A runtime check is only legal to insert if there are no convergent calls.
2415 HasConvergentOp = false;
2416
2417 PtrRtChecking->Pointers.clear();
2418 PtrRtChecking->Need = false;
2419
2420 const bool IsAnnotatedParallel = TheLoop->isAnnotatedParallel();
2421
2422 const bool EnableMemAccessVersioningOfLoop =
2424 !TheLoop->getHeader()->getParent()->hasOptSize();
2425
2426 // Traverse blocks in fixed RPOT order, regardless of their storage in the
2427 // loop info, as it may be arbitrary.
2428 LoopBlocksRPO RPOT(TheLoop);
2429 RPOT.perform(LI);
2430 for (BasicBlock *BB : RPOT) {
2431 // Scan the BB and collect legal loads and stores. Also detect any
2432 // convergent instructions.
2433 for (Instruction &I : *BB) {
2434 if (auto *Call = dyn_cast<CallBase>(&I)) {
2435 if (Call->isConvergent())
2436 HasConvergentOp = true;
2437 }
2438
2439 // With both a non-vectorizable memory instruction and a convergent
2440 // operation, found in this loop, no reason to continue the search.
2441 if (HasComplexMemInst && HasConvergentOp)
2442 return false;
2443
2444 // Avoid hitting recordAnalysis multiple times.
2445 if (HasComplexMemInst)
2446 continue;
2447
2448 // Record alias scopes defined inside the loop.
2449 if (auto *Decl = dyn_cast<NoAliasScopeDeclInst>(&I))
2450 for (Metadata *Op : Decl->getScopeList()->operands())
2451 LoopAliasScopes.insert(cast<MDNode>(Op));
2452
2453 // Many math library functions read the rounding mode. We will only
2454 // vectorize a loop if it contains known function calls that don't set
2455 // the flag. Therefore, it is safe to ignore this read from memory.
2456 auto *Call = dyn_cast<CallInst>(&I);
2457 if (Call && getVectorIntrinsicIDForCall(Call, TLI))
2458 continue;
2459
2460 // If this is a load, save it. If this instruction can read from memory
2461 // but is not a load, then we quit. Notice that we don't handle function
2462 // calls that read or write.
2463 if (I.mayReadFromMemory()) {
2464 // If the function has an explicit vectorized counterpart, we can safely
2465 // assume that it can be vectorized.
2466 if (Call && !Call->isNoBuiltin() && Call->getCalledFunction() &&
2467 !VFDatabase::getMappings(*Call).empty())
2468 continue;
2469
2470 auto *Ld = dyn_cast<LoadInst>(&I);
2471 if (!Ld) {
2472 recordAnalysis("CantVectorizeInstruction", Ld)
2473 << "instruction cannot be vectorized";
2474 HasComplexMemInst = true;
2475 continue;
2476 }
2477 if (!Ld->isSimple() && !IsAnnotatedParallel) {
2478 recordAnalysis("NonSimpleLoad", Ld)
2479 << "read with atomic ordering or volatile read";
2480 LLVM_DEBUG(dbgs() << "LAA: Found a non-simple load.\n");
2481 HasComplexMemInst = true;
2482 continue;
2483 }
2484 NumLoads++;
2485 Loads.push_back(Ld);
2486 DepChecker->addAccess(Ld);
2487 if (EnableMemAccessVersioningOfLoop)
2488 collectStridedAccess(Ld);
2489 continue;
2490 }
2491
2492 // Save 'store' instructions. Abort if other instructions write to memory.
2493 if (I.mayWriteToMemory()) {
2494 auto *St = dyn_cast<StoreInst>(&I);
2495 if (!St) {
2496 recordAnalysis("CantVectorizeInstruction", St)
2497 << "instruction cannot be vectorized";
2498 HasComplexMemInst = true;
2499 continue;
2500 }
2501 if (!St->isSimple() && !IsAnnotatedParallel) {
2502 recordAnalysis("NonSimpleStore", St)
2503 << "write with atomic ordering or volatile write";
2504 LLVM_DEBUG(dbgs() << "LAA: Found a non-simple store.\n");
2505 HasComplexMemInst = true;
2506 continue;
2507 }
2508 NumStores++;
2509 Stores.push_back(St);
2510 DepChecker->addAccess(St);
2511 if (EnableMemAccessVersioningOfLoop)
2512 collectStridedAccess(St);
2513 }
2514 } // Next instr.
2515 } // Next block.
2516
2517 if (HasComplexMemInst)
2518 return false;
2519
2520 // Now we have two lists that hold the loads and the stores.
2521 // Next, we find the pointers that they use.
2522
2523 // Check if we see any stores. If there are no stores, then we don't
2524 // care if the pointers are *restrict*.
2525 if (!Stores.size()) {
2526 LLVM_DEBUG(dbgs() << "LAA: Found a read-only loop!\n");
2527 return true;
2528 }
2529
2530 MemoryDepChecker::DepCandidates DependentAccesses;
2531 AccessAnalysis Accesses(TheLoop, AA, LI, DependentAccesses, *PSE,
2532 LoopAliasScopes);
2533
2534 // Holds the analyzed pointers. We don't want to call getUnderlyingObjects
2535 // multiple times on the same object. If the ptr is accessed twice, once
2536 // for read and once for write, it will only appear once (on the write
2537 // list). This is okay, since we are going to check for conflicts between
2538 // writes and between reads and writes, but not between reads and reads.
2540
2541 // Record uniform store addresses to identify if we have multiple stores
2542 // to the same address.
2543 SmallPtrSet<Value *, 16> UniformStores;
2544
2545 for (StoreInst *ST : Stores) {
2546 Value *Ptr = ST->getPointerOperand();
2547
2548 if (isInvariant(Ptr)) {
2549 // Record store instructions to loop invariant addresses
2550 StoresToInvariantAddresses.push_back(ST);
2551 HasStoreStoreDependenceInvolvingLoopInvariantAddress |=
2552 !UniformStores.insert(Ptr).second;
2553 }
2554
2555 // If we did *not* see this pointer before, insert it to the read-write
2556 // list. At this phase it is only a 'write' list.
2557 Type *AccessTy = getLoadStoreType(ST);
2558 if (Seen.insert({Ptr, AccessTy}).second) {
2559 ++NumReadWrites;
2560
2562 // The TBAA metadata could have a control dependency on the predication
2563 // condition, so we cannot rely on it when determining whether or not we
2564 // need runtime pointer checks.
2565 if (blockNeedsPredication(ST->getParent(), TheLoop, DT))
2566 Loc.AATags.TBAA = nullptr;
2567
2568 visitPointers(const_cast<Value *>(Loc.Ptr), *TheLoop,
2569 [&Accesses, AccessTy, Loc](Value *Ptr) {
2570 MemoryLocation NewLoc = Loc.getWithNewPtr(Ptr);
2571 Accesses.addStore(NewLoc, AccessTy);
2572 });
2573 }
2574 }
2575
2576 if (IsAnnotatedParallel) {
2577 LLVM_DEBUG(
2578 dbgs() << "LAA: A loop annotated parallel, ignore memory dependency "
2579 << "checks.\n");
2580 return true;
2581 }
2582
2583 for (LoadInst *LD : Loads) {
2584 Value *Ptr = LD->getPointerOperand();
2585 // If we did *not* see this pointer before, insert it to the
2586 // read list. If we *did* see it before, then it is already in
2587 // the read-write list. This allows us to vectorize expressions
2588 // such as A[i] += x; Because the address of A[i] is a read-write
2589 // pointer. This only works if the index of A[i] is consecutive.
2590 // If the address of i is unknown (for example A[B[i]]) then we may
2591 // read a few words, modify, and write a few words, and some of the
2592 // words may be written to the same address.
2593 bool IsReadOnlyPtr = false;
2594 Type *AccessTy = getLoadStoreType(LD);
2595 if (Seen.insert({Ptr, AccessTy}).second ||
2596 !getPtrStride(*PSE, LD->getType(), Ptr, TheLoop, SymbolicStrides).value_or(0)) {
2597 ++NumReads;
2598 IsReadOnlyPtr = true;
2599 }
2600
2601 // See if there is an unsafe dependency between a load to a uniform address and
2602 // store to the same uniform address.
2603 if (UniformStores.count(Ptr)) {
2604 LLVM_DEBUG(dbgs() << "LAA: Found an unsafe dependency between a uniform "
2605 "load and uniform store to the same address!\n");
2606 HasLoadStoreDependenceInvolvingLoopInvariantAddress = true;
2607 }
2608
2610 // The TBAA metadata could have a control dependency on the predication
2611 // condition, so we cannot rely on it when determining whether or not we
2612 // need runtime pointer checks.
2613 if (blockNeedsPredication(LD->getParent(), TheLoop, DT))
2614 Loc.AATags.TBAA = nullptr;
2615
2616 visitPointers(const_cast<Value *>(Loc.Ptr), *TheLoop,
2617 [&Accesses, AccessTy, Loc, IsReadOnlyPtr](Value *Ptr) {
2618 MemoryLocation NewLoc = Loc.getWithNewPtr(Ptr);
2619 Accesses.addLoad(NewLoc, AccessTy, IsReadOnlyPtr);
2620 });
2621 }
2622
2623 // If we write (or read-write) to a single destination and there are no
2624 // other reads in this loop then is it safe to vectorize.
2625 if (NumReadWrites == 1 && NumReads == 0) {
2626 LLVM_DEBUG(dbgs() << "LAA: Found a write-only loop!\n");
2627 return true;
2628 }
2629
2630 // Build dependence sets and check whether we need a runtime pointer bounds
2631 // check.
2632 Accesses.buildDependenceSets();
2633
2634 // Find pointers with computable bounds. We are going to use this information
2635 // to place a runtime bound check.
2636 Value *UncomputablePtr = nullptr;
2637 bool CanDoRTIfNeeded =
2638 Accesses.canCheckPtrAtRT(*PtrRtChecking, PSE->getSE(), TheLoop,
2639 SymbolicStrides, UncomputablePtr, false);
2640 if (!CanDoRTIfNeeded) {
2641 auto *I = dyn_cast_or_null<Instruction>(UncomputablePtr);
2642 recordAnalysis("CantIdentifyArrayBounds", I)
2643 << "cannot identify array bounds";
2644 LLVM_DEBUG(dbgs() << "LAA: We can't vectorize because we can't find "
2645 << "the array bounds.\n");
2646 return false;
2647 }
2648
2649 LLVM_DEBUG(
2650 dbgs() << "LAA: May be able to perform a memory runtime check if needed.\n");
2651
2652 bool DepsAreSafe = true;
2653 if (Accesses.isDependencyCheckNeeded()) {
2654 LLVM_DEBUG(dbgs() << "LAA: Checking memory dependencies\n");
2655 DepsAreSafe = DepChecker->areDepsSafe(DependentAccesses,
2656 Accesses.getDependenciesToCheck(),
2657 Accesses.getUnderlyingObjects());
2658
2659 if (!DepsAreSafe && DepChecker->shouldRetryWithRuntimeCheck()) {
2660 LLVM_DEBUG(dbgs() << "LAA: Retrying with memory checks\n");
2661
2662 // Clear the dependency checks. We assume they are not needed.
2663 Accesses.resetDepChecks(*DepChecker);
2664
2665 PtrRtChecking->reset();
2666 PtrRtChecking->Need = true;
2667
2668 auto *SE = PSE->getSE();
2669 UncomputablePtr = nullptr;
2670 CanDoRTIfNeeded = Accesses.canCheckPtrAtRT(
2671 *PtrRtChecking, SE, TheLoop, SymbolicStrides, UncomputablePtr, true);
2672
2673 // Check that we found the bounds for the pointer.
2674 if (!CanDoRTIfNeeded) {
2675 auto *I = dyn_cast_or_null<Instruction>(UncomputablePtr);
2676 recordAnalysis("CantCheckMemDepsAtRunTime", I)
2677 << "cannot check memory dependencies at runtime";
2678 LLVM_DEBUG(dbgs() << "LAA: Can't vectorize with memory checks\n");
2679 return false;
2680 }
2681 DepsAreSafe = true;
2682 }
2683 }
2684
2685 if (HasConvergentOp) {
2686 recordAnalysis("CantInsertRuntimeCheckWithConvergent")
2687 << "cannot add control dependency to convergent operation";
2688 LLVM_DEBUG(dbgs() << "LAA: We can't vectorize because a runtime check "
2689 "would be needed with a convergent operation\n");
2690 return false;
2691 }
2692
2693 if (DepsAreSafe) {
2694 LLVM_DEBUG(
2695 dbgs() << "LAA: No unsafe dependent memory operations in loop. We"
2696 << (PtrRtChecking->Need ? "" : " don't")
2697 << " need runtime memory checks.\n");
2698 return true;
2699 }
2700
2701 emitUnsafeDependenceRemark();
2702 return false;
2703}
2704
2705void LoopAccessInfo::emitUnsafeDependenceRemark() {
2706 const auto *Deps = getDepChecker().getDependences();
2707 if (!Deps)
2708 return;
2709 const auto *Found =
2710 llvm::find_if(*Deps, [](const MemoryDepChecker::Dependence &D) {
2713 });
2714 if (Found == Deps->end())
2715 return;
2716 MemoryDepChecker::Dependence Dep = *Found;
2717
2718 LLVM_DEBUG(dbgs() << "LAA: unsafe dependent memory operations in loop\n");
2719
2720 // Emit remark for first unsafe dependence
2721 bool HasForcedDistribution = false;
2722 std::optional<const MDOperand *> Value =
2723 findStringMetadataForLoop(TheLoop, "llvm.loop.distribute.enable");
2724 if (Value) {
2725 const MDOperand *Op = *Value;
2726 assert(Op && mdconst::hasa<ConstantInt>(*Op) && "invalid metadata");
2727 HasForcedDistribution = mdconst::extract<ConstantInt>(*Op)->getZExtValue();
2728 }
2729
2730 const std::string Info =
2731 HasForcedDistribution
2732 ? "unsafe dependent memory operations in loop."
2733 : "unsafe dependent memory operations in loop. Use "
2734 "#pragma clang loop distribute(enable) to allow loop distribution "
2735 "to attempt to isolate the offending operations into a separate "
2736 "loop";
2738 recordAnalysis("UnsafeDep", Dep.getDestination(getDepChecker())) << Info;
2739
2740 switch (Dep.Type) {
2744 llvm_unreachable("Unexpected dependence");
2746 R << "\nBackward loop carried data dependence.";
2747 break;
2749 R << "\nForward loop carried data dependence that prevents "
2750 "store-to-load forwarding.";
2751 break;
2753 R << "\nBackward loop carried data dependence that prevents "
2754 "store-to-load forwarding.";
2755 break;
2757 R << "\nUnsafe indirect dependence.";
2758 break;
2760 R << "\nUnknown data dependence.";
2761 break;
2762 }
2763
2764 if (Instruction *I = Dep.getSource(getDepChecker())) {
2765 DebugLoc SourceLoc = I->getDebugLoc();
2766 if (auto *DD = dyn_cast_or_null<Instruction>(getPointerOperand(I)))
2767 SourceLoc = DD->getDebugLoc();
2768 if (SourceLoc)
2769 R << " Memory location is the same as accessed at "
2770 << ore::NV("Location", SourceLoc);
2771 }
2772}
2773
2775 DominatorTree *DT) {
2776 assert(TheLoop->contains(BB) && "Unknown block used");
2777
2778 // Blocks that do not dominate the latch need predication.
2779 BasicBlock* Latch = TheLoop->getLoopLatch();
2780 return !DT->dominates(BB, Latch);
2781}
2782
2783OptimizationRemarkAnalysis &LoopAccessInfo::recordAnalysis(StringRef RemarkName,
2784 Instruction *I) {
2785 assert(!Report && "Multiple reports generated");
2786
2787 Value *CodeRegion = TheLoop->getHeader();
2788 DebugLoc DL = TheLoop->getStartLoc();
2789
2790 if (I) {
2791 CodeRegion = I->getParent();
2792 // If there is no debug location attached to the instruction, revert back to
2793 // using the loop's.
2794 if (I->getDebugLoc())
2795 DL = I->getDebugLoc();
2796 }
2797
2798 Report = std::make_unique<OptimizationRemarkAnalysis>(DEBUG_TYPE, RemarkName, DL,
2799 CodeRegion);
2800 return *Report;
2801}
2802
2804 auto *SE = PSE->getSE();
2805 // TODO: Is this really what we want? Even without FP SCEV, we may want some
2806 // trivially loop-invariant FP values to be considered invariant.
2807 if (!SE->isSCEVable(V->getType()))
2808 return false;
2809 const SCEV *S = SE->getSCEV(V);
2810 return SE->isLoopInvariant(S, TheLoop);
2811}
2812
2813/// Find the operand of the GEP that should be checked for consecutive
2814/// stores. This ignores trailing indices that have no effect on the final
2815/// pointer.
2816static unsigned getGEPInductionOperand(const GetElementPtrInst *Gep) {
2817 const DataLayout &DL = Gep->getDataLayout();
2818 unsigned LastOperand = Gep->getNumOperands() - 1;
2819 TypeSize GEPAllocSize = DL.getTypeAllocSize(Gep->getResultElementType());
2820
2821 // Walk backwards and try to peel off zeros.
2822 while (LastOperand > 1 && match(Gep->getOperand(LastOperand), m_Zero())) {
2823 // Find the type we're currently indexing into.
2824 gep_type_iterator GEPTI = gep_type_begin(Gep);
2825 std::advance(GEPTI, LastOperand - 2);
2826
2827 // If it's a type with the same allocation size as the result of the GEP we
2828 // can peel off the zero index.
2829 TypeSize ElemSize = GEPTI.isStruct()
2830 ? DL.getTypeAllocSize(GEPTI.getIndexedType())
2832 if (ElemSize != GEPAllocSize)
2833 break;
2834 --LastOperand;
2835 }
2836
2837 return LastOperand;
2838}
2839
2840/// If the argument is a GEP, then returns the operand identified by
2841/// getGEPInductionOperand. However, if there is some other non-loop-invariant
2842/// operand, it returns that instead.
2844 GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr);
2845 if (!GEP)
2846 return Ptr;
2847
2848 unsigned InductionOperand = getGEPInductionOperand(GEP);
2849
2850 // Check that all of the gep indices are uniform except for our induction
2851 // operand.
2852 for (unsigned I = 0, E = GEP->getNumOperands(); I != E; ++I)
2853 if (I != InductionOperand &&
2854 !SE->isLoopInvariant(SE->getSCEV(GEP->getOperand(I)), Lp))
2855 return Ptr;
2856 return GEP->getOperand(InductionOperand);
2857}
2858
2859/// Get the stride of a pointer access in a loop. Looks for symbolic
2860/// strides "a[i*stride]". Returns the symbolic stride, or null otherwise.
2862 auto *PtrTy = dyn_cast<PointerType>(Ptr->getType());
2863 if (!PtrTy || PtrTy->isAggregateType())
2864 return nullptr;
2865
2866 // Try to remove a gep instruction to make the pointer (actually index at this
2867 // point) easier analyzable. If OrigPtr is equal to Ptr we are analyzing the
2868 // pointer, otherwise, we are analyzing the index.
2869 Value *OrigPtr = Ptr;
2870
2871 // The size of the pointer access.
2872 int64_t PtrAccessSize = 1;
2873
2874 Ptr = stripGetElementPtr(Ptr, SE, Lp);
2875 const SCEV *V = SE->getSCEV(Ptr);
2876
2877 if (Ptr != OrigPtr)
2878 // Strip off casts.
2879 while (const SCEVIntegralCastExpr *C = dyn_cast<SCEVIntegralCastExpr>(V))
2880 V = C->getOperand();
2881
2882 const SCEVAddRecExpr *S = dyn_cast<SCEVAddRecExpr>(V);
2883 if (!S)
2884 return nullptr;
2885
2886 // If the pointer is invariant then there is no stride and it makes no
2887 // sense to add it here.
2888 if (Lp != S->getLoop())
2889 return nullptr;
2890
2891 V = S->getStepRecurrence(*SE);
2892 if (!V)
2893 return nullptr;
2894
2895 // Strip off the size of access multiplication if we are still analyzing the
2896 // pointer.
2897 if (OrigPtr == Ptr) {
2898 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(V)) {
2899 if (M->getOperand(0)->getSCEVType() != scConstant)
2900 return nullptr;
2901
2902 const APInt &APStepVal = cast<SCEVConstant>(M->getOperand(0))->getAPInt();
2903
2904 // Huge step value - give up.
2905 if (APStepVal.getBitWidth() > 64)
2906 return nullptr;
2907
2908 int64_t StepVal = APStepVal.getSExtValue();
2909 if (PtrAccessSize != StepVal)
2910 return nullptr;
2911 V = M->getOperand(1);
2912 }
2913 }
2914
2915 // Note that the restriction after this loop invariant check are only
2916 // profitability restrictions.
2917 if (!SE->isLoopInvariant(V, Lp))
2918 return nullptr;
2919
2920 // Look for the loop invariant symbolic value.
2921 if (isa<SCEVUnknown>(V))
2922 return V;
2923
2924 if (const auto *C = dyn_cast<SCEVIntegralCastExpr>(V))
2925 if (isa<SCEVUnknown>(C->getOperand()))
2926 return V;
2927
2928 return nullptr;
2929}
2930
2931void LoopAccessInfo::collectStridedAccess(Value *MemAccess) {
2932 Value *Ptr = getLoadStorePointerOperand(MemAccess);
2933 if (!Ptr)
2934 return;
2935
2936 // Note: getStrideFromPointer is a *profitability* heuristic. We
2937 // could broaden the scope of values returned here - to anything
2938 // which happens to be loop invariant and contributes to the
2939 // computation of an interesting IV - but we chose not to as we
2940 // don't have a cost model here, and broadening the scope exposes
2941 // far too many unprofitable cases.
2942 const SCEV *StrideExpr = getStrideFromPointer(Ptr, PSE->getSE(), TheLoop);
2943 if (!StrideExpr)
2944 return;
2945
2946 LLVM_DEBUG(dbgs() << "LAA: Found a strided access that is a candidate for "
2947 "versioning:");
2948 LLVM_DEBUG(dbgs() << " Ptr: " << *Ptr << " Stride: " << *StrideExpr << "\n");
2949
2950 if (!SpeculateUnitStride) {
2951 LLVM_DEBUG(dbgs() << " Chose not to due to -laa-speculate-unit-stride\n");
2952 return;
2953 }
2954
2955 // Avoid adding the "Stride == 1" predicate when we know that
2956 // Stride >= Trip-Count. Such a predicate will effectively optimize a single
2957 // or zero iteration loop, as Trip-Count <= Stride == 1.
2958 //
2959 // TODO: We are currently not making a very informed decision on when it is
2960 // beneficial to apply stride versioning. It might make more sense that the
2961 // users of this analysis (such as the vectorizer) will trigger it, based on
2962 // their specific cost considerations; For example, in cases where stride
2963 // versioning does not help resolving memory accesses/dependences, the
2964 // vectorizer should evaluate the cost of the runtime test, and the benefit
2965 // of various possible stride specializations, considering the alternatives
2966 // of using gather/scatters (if available).
2967
2968 const SCEV *MaxBTC = PSE->getSymbolicMaxBackedgeTakenCount();
2969
2970 // Match the types so we can compare the stride and the MaxBTC.
2971 // The Stride can be positive/negative, so we sign extend Stride;
2972 // The backedgeTakenCount is non-negative, so we zero extend MaxBTC.
2973 const DataLayout &DL = TheLoop->getHeader()->getDataLayout();
2974 uint64_t StrideTypeSizeBits = DL.getTypeSizeInBits(StrideExpr->getType());
2975 uint64_t BETypeSizeBits = DL.getTypeSizeInBits(MaxBTC->getType());
2976 const SCEV *CastedStride = StrideExpr;
2977 const SCEV *CastedBECount = MaxBTC;
2978 ScalarEvolution *SE = PSE->getSE();
2979 if (BETypeSizeBits >= StrideTypeSizeBits)
2980 CastedStride = SE->getNoopOrSignExtend(StrideExpr, MaxBTC->getType());
2981 else
2982 CastedBECount = SE->getZeroExtendExpr(MaxBTC, StrideExpr->getType());
2983 const SCEV *StrideMinusBETaken = SE->getMinusSCEV(CastedStride, CastedBECount);
2984 // Since TripCount == BackEdgeTakenCount + 1, checking:
2985 // "Stride >= TripCount" is equivalent to checking:
2986 // Stride - MaxBTC> 0
2987 if (SE->isKnownPositive(StrideMinusBETaken)) {
2988 LLVM_DEBUG(
2989 dbgs() << "LAA: Stride>=TripCount; No point in versioning as the "
2990 "Stride==1 predicate will imply that the loop executes "
2991 "at most once.\n");
2992 return;
2993 }
2994 LLVM_DEBUG(dbgs() << "LAA: Found a strided access that we can version.\n");
2995
2996 // Strip back off the integer cast, and check that our result is a
2997 // SCEVUnknown as we expect.
2998 const SCEV *StrideBase = StrideExpr;
2999 if (const auto *C = dyn_cast<SCEVIntegralCastExpr>(StrideBase))
3000 StrideBase = C->getOperand();
3001 SymbolicStrides[Ptr] = cast<SCEVUnknown>(StrideBase);
3002}
3003
3005 const TargetTransformInfo *TTI,
3006 const TargetLibraryInfo *TLI, AAResults *AA,
3007 DominatorTree *DT, LoopInfo *LI)
3008 : PSE(std::make_unique<PredicatedScalarEvolution>(*SE, *L)),
3009 PtrRtChecking(nullptr), TheLoop(L) {
3010 unsigned MaxTargetVectorWidthInBits = std::numeric_limits<unsigned>::max();
3011 if (TTI) {
3012 TypeSize FixedWidth =
3014 if (FixedWidth.isNonZero()) {
3015 // Scale the vector width by 2 as rough estimate to also consider
3016 // interleaving.
3017 MaxTargetVectorWidthInBits = FixedWidth.getFixedValue() * 2;
3018 }
3019
3020 TypeSize ScalableWidth =
3022 if (ScalableWidth.isNonZero())
3023 MaxTargetVectorWidthInBits = std::numeric_limits<unsigned>::max();
3024 }
3025 DepChecker = std::make_unique<MemoryDepChecker>(*PSE, L, SymbolicStrides,
3026 MaxTargetVectorWidthInBits);
3027 PtrRtChecking = std::make_unique<RuntimePointerChecking>(*DepChecker, SE);
3028 if (canAnalyzeLoop())
3029 CanVecMem = analyzeLoop(AA, LI, TLI, DT);
3030}
3031
3033 if (CanVecMem) {
3034 OS.indent(Depth) << "Memory dependences are safe";
3035 const MemoryDepChecker &DC = getDepChecker();
3036 if (!DC.isSafeForAnyVectorWidth())
3037 OS << " with a maximum safe vector width of "
3038 << DC.getMaxSafeVectorWidthInBits() << " bits";
3039 if (PtrRtChecking->Need)
3040 OS << " with run-time checks";
3041 OS << "\n";
3042 }
3043
3044 if (HasConvergentOp)
3045 OS.indent(Depth) << "Has convergent operation in loop\n";
3046
3047 if (Report)
3048 OS.indent(Depth) << "Report: " << Report->getMsg() << "\n";
3049
3050 if (auto *Dependences = DepChecker->getDependences()) {
3051 OS.indent(Depth) << "Dependences:\n";
3052 for (const auto &Dep : *Dependences) {
3053 Dep.print(OS, Depth + 2, DepChecker->getMemoryInstructions());
3054 OS << "\n";
3055 }
3056 } else
3057 OS.indent(Depth) << "Too many dependences, not recorded\n";
3058
3059 // List the pair of accesses need run-time checks to prove independence.
3060 PtrRtChecking->print(OS, Depth);
3061 OS << "\n";
3062
3063 OS.indent(Depth)
3064 << "Non vectorizable stores to invariant address were "
3065 << (HasStoreStoreDependenceInvolvingLoopInvariantAddress ||
3066 HasLoadStoreDependenceInvolvingLoopInvariantAddress
3067 ? ""
3068 : "not ")
3069 << "found in loop.\n";
3070
3071 OS.indent(Depth) << "SCEV assumptions:\n";
3072 PSE->getPredicate().print(OS, Depth);
3073
3074 OS << "\n";
3075
3076 OS.indent(Depth) << "Expressions re-written:\n";
3077 PSE->print(OS, Depth);
3078}
3079
3081 auto [It, Inserted] = LoopAccessInfoMap.insert({&L, nullptr});
3082
3083 if (Inserted)
3084 It->second =
3085 std::make_unique<LoopAccessInfo>(&L, &SE, TTI, TLI, &AA, &DT, &LI);
3086
3087 return *It->second;
3088}
3091 // Collect LoopAccessInfo entries that may keep references to IR outside the
3092 // analyzed loop or SCEVs that may have been modified or invalidated. At the
3093 // moment, that is loops requiring memory or SCEV runtime checks, as those cache
3094 // SCEVs, e.g. for pointer expressions.
3095 for (const auto &[L, LAI] : LoopAccessInfoMap) {
3096 if (LAI->getRuntimePointerChecking()->getChecks().empty() &&
3097 LAI->getPSE().getPredicate().isAlwaysTrue())
3098 continue;
3099 ToRemove.push_back(L);
3100 }
3101
3102 for (Loop *L : ToRemove)
3103 LoopAccessInfoMap.erase(L);
3104}
3105
3107 Function &F, const PreservedAnalyses &PA,
3109 // Check whether our analysis is preserved.
3110 auto PAC = PA.getChecker<LoopAccessAnalysis>();
3111 if (!PAC.preserved() && !PAC.preservedSet<AllAnalysesOn<Function>>())
3112 // If not, give up now.
3113 return true;
3114
3115 // Check whether the analyses we depend on became invalid for any reason.
3116 // Skip checking TargetLibraryAnalysis as it is immutable and can't become
3117 // invalid.
3118 return Inv.invalidate<AAManager>(F, PA) ||
3120 Inv.invalidate<LoopAnalysis>(F, PA) ||
3122}
3123
3127 auto &AA = FAM.getResult<AAManager>(F);
3128 auto &DT = FAM.getResult<DominatorTreeAnalysis>(F);
3129 auto &LI = FAM.getResult<LoopAnalysis>(F);
3131 auto &TLI = FAM.getResult<TargetLibraryAnalysis>(F);
3132 return LoopAccessInfoManager(SE, AA, DT, LI, &TTI, &TLI);
3133}
3134
3135AnalysisKey LoopAccessAnalysis::Key;
This file implements a class to represent arbitrary precision integral constant values and operations...
ReachingDefAnalysis InstSet & ToRemove
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
Analysis containing CSE Info
Definition: CSEInfo.cpp:27
This file contains the declarations for the subclasses of Constant, which represent the different fla...
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
#define LLVM_DEBUG(X)
Definition: Debug.h:101
This file defines the DenseMap class.
uint64_t Size
bool End
Definition: ELF_riscv.cpp:480
Generic implementation of equivalence classes through the use Tarjan's efficient union-find algorithm...
#define DEBUG_TYPE
Hexagon Common GEP
#define _
IRTranslator LLVM IR MI
static std::pair< const SCEV *, const SCEV * > getStartAndEndForAccess(const Loop *Lp, const SCEV *PtrExpr, Type *AccessTy, PredicatedScalarEvolution &PSE, DenseMap< std::pair< const SCEV *, Type * >, std::pair< const SCEV *, const SCEV * > > &PointerBounds)
Calculate Start and End points of memory access.
static cl::opt< unsigned > MaxDependences("max-dependences", cl::Hidden, cl::desc("Maximum number of dependences collected by " "loop-access analysis (default = 100)"), cl::init(100))
We collect dependences up to this threshold.
static cl::opt< bool > EnableForwardingConflictDetection("store-to-load-forwarding-conflict-detection", cl::Hidden, cl::desc("Enable conflict detection in loop-access analysis"), cl::init(true))
Enable store-to-load forwarding conflict detection.
static void findForkedSCEVs(ScalarEvolution *SE, const Loop *L, Value *Ptr, SmallVectorImpl< PointerIntPair< const SCEV *, 1, bool > > &ScevList, unsigned Depth)
static bool hasComputableBounds(PredicatedScalarEvolution &PSE, Value *Ptr, const SCEV *PtrScev, Loop *L, bool Assume)
Check whether a pointer can participate in a runtime bounds check.
static cl::opt< unsigned > MemoryCheckMergeThreshold("memory-check-merge-threshold", cl::Hidden, cl::desc("Maximum number of comparisons done when trying to merge " "runtime memory checks. (default = 100)"), cl::init(100))
The maximum iterations used to merge memory checks.
static bool isNoWrap(PredicatedScalarEvolution &PSE, const DenseMap< Value *, const SCEV * > &Strides, Value *Ptr, Type *AccessTy, Loop *L)
Check whether a pointer address cannot wrap.
static const SCEV * getStrideFromPointer(Value *Ptr, ScalarEvolution *SE, Loop *Lp)
Get the stride of a pointer access in a loop.
static unsigned getGEPInductionOperand(const GetElementPtrInst *Gep)
Find the operand of the GEP that should be checked for consecutive stores.
static cl::opt< unsigned, true > VectorizationInterleave("force-vector-interleave", cl::Hidden, cl::desc("Sets the vectorization interleave count. " "Zero is autoselect."), cl::location(VectorizerParams::VectorizationInterleave))
static bool isLoopVariantIndirectAddress(ArrayRef< const Value * > UnderlyingObjects, ScalarEvolution &SE, const Loop *L)
Returns true if any of the underlying objects has a loop varying address, i.e.
static cl::opt< bool, true > HoistRuntimeChecks("hoist-runtime-checks", cl::Hidden, cl::desc("Hoist inner loop runtime memory checks to outer loop if possible"), cl::location(VectorizerParams::HoistRuntimeChecks), cl::init(true))
static cl::opt< unsigned, true > VectorizationFactor("force-vector-width", cl::Hidden, cl::desc("Sets the SIMD width. Zero is autoselect."), cl::location(VectorizerParams::VectorizationFactor))
static bool isSafeDependenceDistance(const DataLayout &DL, ScalarEvolution &SE, const SCEV &MaxBTC, const SCEV &Dist, uint64_t MaxStride, uint64_t TypeByteSize)
Given a dependence-distance Dist between two memory accesses, that have strides in the same direction...
static cl::opt< unsigned, true > RuntimeMemoryCheckThreshold("runtime-memory-check-threshold", cl::Hidden, cl::desc("When performing memory disambiguation checks at runtime do not " "generate more than this number of comparisons (default = 8)."), cl::location(VectorizerParams::RuntimeMemoryCheckThreshold), cl::init(8))
static void visitPointers(Value *StartPtr, const Loop &InnermostLoop, function_ref< void(Value *)> AddPointer)
static bool isNoWrapAddRec(Value *Ptr, const SCEVAddRecExpr *AR, PredicatedScalarEvolution &PSE, const Loop *L)
Return true if an AddRec pointer Ptr is unsigned non-wrapping, i.e.
static Value * stripGetElementPtr(Value *Ptr, ScalarEvolution *SE, Loop *Lp)
If the argument is a GEP, then returns the operand identified by getGEPInductionOperand.
static bool areStridedAccessesIndependent(uint64_t Distance, uint64_t Stride, uint64_t TypeByteSize)
Check the dependence for two accesses with the same stride Stride.
static const SCEV * getMinFromExprs(const SCEV *I, const SCEV *J, ScalarEvolution *SE)
Compare I and J and return the minimum.
static cl::opt< unsigned > MaxForkedSCEVDepth("max-forked-scev-depth", cl::Hidden, cl::desc("Maximum recursion depth when finding forked SCEVs (default = 5)"), cl::init(5))
static cl::opt< bool > SpeculateUnitStride("laa-speculate-unit-stride", cl::Hidden, cl::desc("Speculate that non-constant strides are unit in LAA"), cl::init(true))
static SmallVector< PointerIntPair< const SCEV *, 1, bool > > findForkedPointer(PredicatedScalarEvolution &PSE, const DenseMap< Value *, const SCEV * > &StridesMap, Value *Ptr, const Loop *L)
static cl::opt< bool > EnableMemAccessVersioning("enable-mem-access-versioning", cl::init(true), cl::Hidden, cl::desc("Enable symbolic stride memory access versioning"))
This enables versioning on the strides of symbolically striding memory accesses in code like the foll...
This header provides classes for managing per-loop analyses.
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
This file provides utility analysis objects describing memory locations.
uint64_t High
#define P(N)
FunctionAnalysisManager FAM
This header defines various interfaces for pass management in LLVM.
This file defines the PointerIntPair class.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file contains some templates that are useful if you are working with the STL at all.
raw_pwrite_stream & OS
static LLVM_ATTRIBUTE_ALWAYS_INLINE bool CheckType(MVT::SimpleValueType VT, SDValue N, const TargetLowering *TLI, const DataLayout &DL)
This file implements a set that has insertion order iteration characteristics.
This file defines the SmallPtrSet class.
This file defines the SmallSet class.
This file defines the SmallVector class.
static SymbolRef::Type getType(const Symbol *Sym)
Definition: TapiFile.cpp:40
This pass exposes codegen information to IR-level passes.
static const X86InstrFMA3Group Groups[]
A manager for alias analyses.
Class for arbitrary precision integers.
Definition: APInt.h:78
unsigned getBitWidth() const
Return the number of bits in the APInt.
Definition: APInt.h:1448
APInt sextOrTrunc(unsigned width) const
Sign extend or truncate to width.
Definition: APInt.cpp:1010
int64_t getSExtValue() const
Get sign extended value.
Definition: APInt.h:1522
This templated class represents "all analyses that operate over <a particular IR unit>" (e....
Definition: Analysis.h:49
API to communicate dependencies between analyses during invalidation.
Definition: PassManager.h:292
bool invalidate(IRUnitT &IR, const PreservedAnalyses &PA)
Trigger the invalidation of some other analysis pass if not already handled and return whether it was...
Definition: PassManager.h:310
A container for analyses that lazily runs them and caches their results.
Definition: PassManager.h:253
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Definition: PassManager.h:405
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
size_t size() const
size - Get the array size.
Definition: ArrayRef.h:165
bool empty() const
empty - Check if the array is empty.
Definition: ArrayRef.h:160
LLVM Basic Block Representation.
Definition: BasicBlock.h:61
const Function * getParent() const
Return the enclosing method, or null if none.
Definition: BasicBlock.h:209
const DataLayout & getDataLayout() const
Get the data layout of the module this basic block belongs to.
Definition: BasicBlock.cpp:294
This class is a wrapper over an AAResults, and it is intended to be used only when there are no IR ch...
@ ICMP_ULE
unsigned less or equal
Definition: InstrTypes.h:783
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:110
A debug info location.
Definition: DebugLoc.h:33
iterator find(const_arg_type_t< KeyT > Val)
Definition: DenseMap.h:155
iterator end()
Definition: DenseMap.h:84
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition: DenseMap.h:220
Analysis pass which computes a DominatorTree.
Definition: Dominators.h:279
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition: Dominators.h:162
bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
Definition: Dominators.cpp:122
EquivalenceClasses - This represents a collection of equivalence classes and supports three efficient...
iterator findValue(const ElemTy &V) const
findValue - Return an iterator to the specified value.
iterator insert(const ElemTy &Data)
insert - Insert a new value into the union/find set, ignoring the request if the value already exists...
member_iterator member_end() const
typename std::set< ECValue, ECValueComparator >::const_iterator iterator
iterator* - Provides a way to iterate over all values in the set.
member_iterator member_begin(iterator I) const
member_iterator unionSets(const ElemTy &V1, const ElemTy &V2)
union - Merge the two equivalence sets for the specified values, inserting them if they do not alread...
const ElemTy & getLeaderValue(const ElemTy &V) const
getLeaderValue - Return the leader for the specified value that is in the set.
bool hasOptSize() const
Optimize this function for size (-Os) or minimum size (-Oz).
Definition: Function.h:698
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
Definition: Instructions.h:915
Type * getResultElementType() const
Definition: Instructions.h:976
PointerType * getType() const
Global values are always pointers.
Definition: GlobalValue.h:294
const DataLayout & getDataLayout() const
Get the data layout of the module this instruction belongs to.
Definition: Instruction.cpp:74
Class to represent integer types.
Definition: DerivedTypes.h:40
static IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
Definition: Type.cpp:278
An instruction for reading from memory.
Definition: Instructions.h:174
Value * getPointerOperand()
Definition: Instructions.h:253
static constexpr LocationSize beforeOrAfterPointer()
Any location before or after the base pointer (but still within the underlying object).
This analysis provides dependence information for the memory accesses of a loop.
Result run(Function &F, FunctionAnalysisManager &AM)
bool invalidate(Function &F, const PreservedAnalyses &PA, FunctionAnalysisManager::Invalidator &Inv)
const LoopAccessInfo & getInfo(Loop &L)
Drive the analysis of memory accesses in the loop.
const MemoryDepChecker & getDepChecker() const
the Memory Dependence Checker which can determine the loop-independent and loop-carried dependences b...
bool isInvariant(Value *V) const
Returns true if value V is loop invariant.
void print(raw_ostream &OS, unsigned Depth=0) const
Print the information about the memory accesses in the loop.
static bool blockNeedsPredication(BasicBlock *BB, Loop *TheLoop, DominatorTree *DT)
Return true if the block BB needs to be predicated in order for the loop to be vectorized.
LoopAccessInfo(Loop *L, ScalarEvolution *SE, const TargetTransformInfo *TTI, const TargetLibraryInfo *TLI, AAResults *AA, DominatorTree *DT, LoopInfo *LI)
Analysis pass that exposes the LoopInfo for a function.
Definition: LoopInfo.h:571
bool contains(const LoopT *L) const
Return true if the specified loop is contained within in this loop.
BlockT * getLoopLatch() const
If there is a single latch block for this loop, return it.
bool isInnermost() const
Return true if the loop does not contain any (natural) loops.
unsigned getNumBackEdges() const
Calculate the number of back edges to the loop header.
BlockT * getHeader() const
LoopT * getParentLoop() const
Return the parent loop if it exists or nullptr for top level loops.
Wrapper class to LoopBlocksDFS that provides a standard begin()/end() interface for the DFS reverse p...
Definition: LoopIterator.h:172
Represents a single loop in the control flow graph.
Definition: LoopInfo.h:44
std::string getLocStr() const
Return a string containing the debug location of the loop (file name + line number if present,...
Definition: LoopInfo.cpp:667
bool isAnnotatedParallel() const
Returns true if the loop is annotated parallel.
Definition: LoopInfo.cpp:565
DebugLoc getStartLoc() const
Return the debug location of the start of this loop.
Definition: LoopInfo.cpp:632
Metadata node.
Definition: Metadata.h:1067
ArrayRef< MDOperand > operands() const
Definition: Metadata.h:1426
Tracking metadata reference owned by Metadata.
Definition: Metadata.h:889
This class implements a map that also provides access to all stored values in a deterministic order.
Definition: MapVector.h:36
Checks memory dependences among accesses to the same underlying object to determine whether there vec...
ArrayRef< unsigned > getOrderForAccess(Value *Ptr, bool IsWrite) const
Return the program order indices for the access location (Ptr, IsWrite).
bool areDepsSafe(DepCandidates &AccessSets, MemAccessInfoList &CheckDeps, const DenseMap< Value *, SmallVector< const Value *, 16 > > &UnderlyingObjects)
Check whether the dependencies between the accesses are safe.
bool isSafeForAnyVectorWidth() const
Return true if the number of elements that are safe to operate on simultaneously is not bounded.
const SmallVectorImpl< Instruction * > & getMemoryInstructions() const
The vector of memory access instructions.
const Loop * getInnermostLoop() const
uint64_t getMaxSafeVectorWidthInBits() const
Return the number of elements that are safe to operate on simultaneously, multiplied by the size of t...
bool isSafeForVectorization() const
No memory dependence was encountered that would inhibit vectorization.
const SmallVectorImpl< Dependence > * getDependences() const
Returns the memory dependences.
DenseMap< std::pair< const SCEV *, Type * >, std::pair< const SCEV *, const SCEV * > > & getPointerBounds()
SmallVector< Instruction *, 4 > getInstructionsForAccess(Value *Ptr, bool isWrite) const
Find the set of instructions that read or write via Ptr.
VectorizationSafetyStatus
Type to keep track of the status of the dependence check.
bool shouldRetryWithRuntimeCheck() const
In same cases when the dependency check fails we can still vectorize the loop with a dynamic array ac...
void addAccess(StoreInst *SI)
Register the location (instructions are given increasing numbers) of a write access.
PointerIntPair< Value *, 1, bool > MemAccessInfo
Representation for a specific memory location.
static MemoryLocation get(const LoadInst *LI)
Return a location with information about the memory reference by the given instruction.
LocationSize Size
The maximum size of the location, in address-units, or UnknownSize if the size is not known.
AAMDNodes AATags
The metadata nodes which describes the aliasing of the location (each member is null if that kind of ...
const Value * Ptr
The address of the start of the location.
Root of the metadata hierarchy.
Definition: Metadata.h:62
Diagnostic information for optimization analysis remarks.
An interface layer with SCEV used to manage how we see SCEV expressions for values in the context of ...
void addPredicate(const SCEVPredicate &Pred)
Adds a new predicate.
ScalarEvolution * getSE() const
Returns the ScalarEvolution analysis used.
const SCEVPredicate & getPredicate() const
bool hasNoOverflow(Value *V, SCEVWrapPredicate::IncrementWrapFlags Flags)
Returns true if we've proved that V doesn't wrap by means of a SCEV predicate.
void setNoOverflow(Value *V, SCEVWrapPredicate::IncrementWrapFlags Flags)
Proves that V doesn't overflow by adding SCEV predicate.
void print(raw_ostream &OS, unsigned Depth) const
Print the SCEV mappings done by the Predicated Scalar Evolution.
const SCEVAddRecExpr * getAsAddRec(Value *V)
Attempts to produce an AddRecExpr for V by adding additional SCEV predicates.
const SCEV * getSymbolicMaxBackedgeTakenCount()
Get the (predicated) symbolic max backedge count for the analyzed loop.
const SCEV * getSCEV(Value *V)
Returns the SCEV expression of V, in the context of the current SCEV predicate.
A set of analyses that are preserved following a run of a transformation pass.
Definition: Analysis.h:111
PreservedAnalysisChecker getChecker() const
Build a checker for this PreservedAnalyses and the specified analysis type.
Definition: Analysis.h:264
Holds information about the memory runtime legality checks to verify that a group of pointers do not ...
bool Need
This flag indicates if we need to add the runtime check.
void reset()
Reset the state of the pointer runtime information.
unsigned getNumberOfChecks() const
Returns the number of run-time checks required according to needsChecking.
void printChecks(raw_ostream &OS, const SmallVectorImpl< RuntimePointerCheck > &Checks, unsigned Depth=0) const
Print Checks.
bool needsChecking(const RuntimeCheckingPtrGroup &M, const RuntimeCheckingPtrGroup &N) const
Decide if we need to add a check between two groups of pointers, according to needsChecking.
void print(raw_ostream &OS, unsigned Depth=0) const
Print the list run-time memory checks necessary.
SmallVector< RuntimeCheckingPtrGroup, 2 > CheckingGroups
Holds a partitioning of pointers into "check groups".
void generateChecks(MemoryDepChecker::DepCandidates &DepCands, bool UseDependencies)
Generate the checks and store it.
static bool arePointersInSamePartition(const SmallVectorImpl< int > &PtrToPartition, unsigned PtrIdx1, unsigned PtrIdx2)
Check if pointers are in the same partition.
SmallVector< PointerInfo, 2 > Pointers
Information about the pointers that may require checking.
void insert(Loop *Lp, Value *Ptr, const SCEV *PtrExpr, Type *AccessTy, bool WritePtr, unsigned DepSetId, unsigned ASId, PredicatedScalarEvolution &PSE, bool NeedsFreeze)
Insert a pointer and calculate the start and end SCEVs.
This node represents a polynomial recurrence on the trip count of the specified loop.
const SCEV * getStepRecurrence(ScalarEvolution &SE) const
Constructs and returns the recurrence indicating how much this expression steps by.
bool isAffine() const
Return true if this represents an expression A + B*x where A and B are loop invariant values.
This class represents a constant integer value.
This is the base class for unary integral cast operator classes.
This node represents multiplication of some number of SCEVs.
NoWrapFlags getNoWrapFlags(NoWrapFlags Mask=NoWrapMask) const
virtual void print(raw_ostream &OS, unsigned Depth=0) const =0
Prints a textual representation of this predicate with an indentation of Depth.
This class represents an analyzed expression in the program.
Type * getType() const
Return the LLVM type of this SCEV expression.
Analysis pass that exposes the ScalarEvolution for a function.
The main scalar evolution driver.
bool isKnownNonNegative(const SCEV *S)
Test if the given expression is known to be non-negative.
const SCEV * getNegativeSCEV(const SCEV *V, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap)
Return the SCEV object corresponding to -V.
bool isKnownNonPositive(const SCEV *S)
Test if the given expression is known to be non-positive.
const SCEV * getUMaxExpr(const SCEV *LHS, const SCEV *RHS)
const SCEVPredicate * getEqualPredicate(const SCEV *LHS, const SCEV *RHS)
const SCEV * getConstant(ConstantInt *V)
const SCEV * getSCEV(Value *V)
Return a SCEV expression for the full generality of the specified expression.
const SCEV * getNoopOrSignExtend(const SCEV *V, Type *Ty)
Return a SCEV corresponding to a conversion of the input value to the specified type.
const SCEV * getOne(Type *Ty)
Return a SCEV for the constant 1 of a specific type.
const SCEV * getPtrToIntExpr(const SCEV *Op, Type *Ty)
bool isLoopInvariant(const SCEV *S, const Loop *L)
Return true if the value of the given SCEV is unchanging in the specified loop.
bool isKnownPositive(const SCEV *S)
Test if the given expression is known to be positive.
bool isKnownPredicate(ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS)
Test if the given expression is known to satisfy the condition described by Pred, LHS,...
const SCEV * getZeroExtendExpr(const SCEV *Op, Type *Ty, unsigned Depth=0)
bool isSCEVable(Type *Ty) const
Test if values of the given type are analyzable within the SCEV framework.
Type * getEffectiveSCEVType(Type *Ty) const
Return a type with the same bitwidth as the given type and which represents how SCEV will treat the g...
const SCEV * getUMinExpr(const SCEV *LHS, const SCEV *RHS, bool Sequential=false)
APInt getSignedRangeMin(const SCEV *S)
Determine the min of the signed range for a particular SCEV.
const SCEV * getStoreSizeOfExpr(Type *IntTy, Type *StoreTy)
Return an expression for the store size of StoreTy that is type IntTy.
const SCEV * getMinusSCEV(const SCEV *LHS, const SCEV *RHS, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)
Return LHS-RHS.
const SCEV * getCouldNotCompute()
const SCEV * applyLoopGuards(const SCEV *Expr, const Loop *L)
Try to apply information from loop guards for L to Expr.
const SCEV * getMulExpr(SmallVectorImpl< const SCEV * > &Ops, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)
Get a canonical multiply expression, or something simpler if possible.
const SCEV * getSizeOfExpr(Type *IntTy, TypeSize Size)
Return an expression for a TypeSize.
const SCEV * getAddExpr(SmallVectorImpl< const SCEV * > &Ops, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)
Get a canonical add expression, or something simpler if possible.
const SCEV * getTruncateOrSignExtend(const SCEV *V, Type *Ty, unsigned Depth=0)
Return a SCEV corresponding to a conversion of the input value to the specified type.
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
Definition: SmallPtrSet.h:323
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
Definition: SmallPtrSet.h:412
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:344
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
Definition: SmallPtrSet.h:479
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
Definition: SmallSet.h:135
size_type count(const T &V) const
count - Return 1 if the element is in the set, 0 otherwise.
Definition: SmallSet.h:166
std::pair< const_iterator, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
Definition: SmallSet.h:179
bool empty() const
Definition: SmallVector.h:94
size_t size() const
Definition: SmallVector.h:91
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:586
reference emplace_back(ArgTypes &&... Args)
Definition: SmallVector.h:950
void resize(size_type N)
Definition: SmallVector.h:651
void push_back(const T &Elt)
Definition: SmallVector.h:426
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1209
An instruction for storing to memory.
Definition: Instructions.h:290
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:50
Analysis pass providing the TargetTransformInfo.
Analysis pass providing the TargetLibraryInfo.
Provides information about what library functions are available for the current target.
This pass provides access to the codegen interfaces that are needed for IR-level transformations.
TypeSize getRegisterBitWidth(RegisterKind K) const
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
bool isVectorTy() const
True if this is an instance of VectorType.
Definition: Type.h:265
bool isPointerTy() const
True if this is an instance of PointerType.
Definition: Type.h:255
unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
A Use represents the edge between a Value definition and its users.
Definition: Use.h:43
Value * getOperand(unsigned i) const
Definition: User.h:169
unsigned getNumOperands() const
Definition: User.h:191
static SmallVector< VFInfo, 8 > getMappings(const CallInst &CI)
Retrieve all the VFInfo instances associated to the CallInst CI.
Definition: VectorUtils.h:71
LLVM Value Representation.
Definition: Value.h:74
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:255
const Value * stripAndAccumulateInBoundsConstantOffsets(const DataLayout &DL, APInt &Offset) const
This is a wrapper around stripAndAccumulateConstantOffsets with the in-bounds requirement set to fals...
Definition: Value.h:736
StringRef getName() const
Return a constant reference to the value's name.
Definition: Value.cpp:309
constexpr ScalarTy getFixedValue() const
Definition: TypeSize.h:202
constexpr bool isNonZero() const
Definition: TypeSize.h:158
An efficient, type-erasing, non-owning reference to a callable.
TypeSize getSequentialElementStride(const DataLayout &DL) const
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition: raw_ostream.h:52
raw_ostream & indent(unsigned NumSpaces)
indent - Insert 'NumSpaces' spaces.
friend const_iterator end(StringRef path)
Get end iterator over path.
Definition: Path.cpp:236
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
bool match(Val *V, const Pattern &P)
Definition: PatternMatch.h:49
is_zero m_Zero()
Match any null constant or a vector with all elements equal to 0.
Definition: PatternMatch.h:612
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:443
LocationClass< Ty > location(Ty &L)
Definition: CommandLine.h:463
DiagnosticInfoOptimizationBase::Argument NV
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Definition: STLExtras.h:329
std::optional< int > getPointersDiff(Type *ElemTyA, Value *PtrA, Type *ElemTyB, Value *PtrB, const DataLayout &DL, ScalarEvolution &SE, bool StrictCheck=false, bool CheckType=true)
Returns the distance between the pointers PtrA and PtrB iff they are compatible and it is possible to...
@ Low
Lower the current thread's priority such that it does not affect foreground tasks significantly.
@ Offset
Definition: DWP.cpp:480
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1722
Intrinsic::ID getVectorIntrinsicIDForCall(const CallInst *CI, const TargetLibraryInfo *TLI)
Returns intrinsic ID for call.
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are are tuples (A,...
Definition: STLExtras.h:2400
unsigned getPointerAddressSpace(const Type *T)
Definition: SPIRVUtils.h:126
std::optional< const MDOperand * > findStringMetadataForLoop(const Loop *TheLoop, StringRef Name)
Find string metadata for loop.
Definition: LoopInfo.cpp:1065
const Value * getLoadStorePointerOperand(const Value *V)
A helper function that returns the pointer operand of a load or store instruction.
AddressSpace
Definition: NVPTXBaseInfo.h:21
const Value * getPointerOperand(const Value *V)
A helper function that returns the pointer operand of a load, store or GEP instruction.
OutputIt transform(R &&Range, OutputIt d_first, UnaryFunction F)
Wrapper function around std::transform to apply a function to a range and store the result elsewhere.
Definition: STLExtras.h:1928
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1729
bool NullPointerIsDefined(const Function *F, unsigned AS=0)
Check whether null pointer dereferencing is considered undefined behavior for a given function or an ...
Definition: Function.cpp:2102
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
bool isPointerTy(const Type *T)
Definition: SPIRVUtils.h:120
std::optional< int64_t > getPtrStride(PredicatedScalarEvolution &PSE, Type *AccessTy, Value *Ptr, const Loop *Lp, const DenseMap< Value *, const SCEV * > &StridesMap=DenseMap< Value *, const SCEV * >(), bool Assume=false, bool ShouldCheckWrap=true)
If the pointer has a constant stride return it in units of the access type size.
bool sortPtrAccesses(ArrayRef< Value * > VL, Type *ElemTy, const DataLayout &DL, ScalarEvolution &SE, SmallVectorImpl< unsigned > &SortedIndices)
Attempt to sort the pointers in VL and return the sorted indices in SortedIndices,...
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
const SCEV * replaceSymbolicStrideSCEV(PredicatedScalarEvolution &PSE, const DenseMap< Value *, const SCEV * > &PtrToStride, Value *Ptr)
Return the SCEV corresponding to a pointer with the symbolic stride replaced with constant one,...
bool isConsecutiveAccess(Value *A, Value *B, const DataLayout &DL, ScalarEvolution &SE, bool CheckType=true)
Returns true if the memory operations A and B are consecutive.
bool isGuaranteedNotToBeUndefOrPoison(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Return true if this function can prove that V does not have undef bits and is never poison.
OutputIt copy(R &&Range, OutputIt Out)
Definition: STLExtras.h:1824
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1749
gep_type_iterator gep_type_begin(const User *GEP)
void getUnderlyingObjects(const Value *V, SmallVectorImpl< const Value * > &Objects, const LoopInfo *LI=nullptr, unsigned MaxLookup=6)
This method is similar to getUnderlyingObject except that it can look through phi and select instruct...
Type * getLoadStoreType(Value *I)
A helper function that returns the type of a load or store instruction.
Implement std::hash so that hash_code can be used in STL containers.
Definition: BitVector.h:858
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition: BitVector.h:860
#define N
IR Values for the lower and upper bounds of a pointer evolution.
Definition: LoopUtils.cpp:1798
MDNode * Scope
The tag for alias scope specification (used with noalias).
Definition: Metadata.h:783
MDNode * TBAA
The tag for type-based alias analysis.
Definition: Metadata.h:777
MDNode * NoAlias
The tag specifying the noalias scope.
Definition: Metadata.h:786
A special type used by analysis passes to provide an address that identifies that particular analysis...
Definition: Analysis.h:28
Dependece between memory access instructions.
Instruction * getDestination(const MemoryDepChecker &DepChecker) const
Return the destination instruction of the dependence.
DepType Type
The type of the dependence.
bool isPossiblyBackward() const
May be a lexically backward dependence type (includes Unknown).
Instruction * getSource(const MemoryDepChecker &DepChecker) const
Return the source instruction of the dependence.
bool isForward() const
Lexically forward dependence.
bool isBackward() const
Lexically backward dependence.
void print(raw_ostream &OS, unsigned Depth, const SmallVectorImpl< Instruction * > &Instrs) const
Print the dependence.
DepType
The type of the dependence.
static const char * DepName[]
String version of the types.
static VectorizationSafetyStatus isSafeForVectorization(DepType Type)
Dependence types that don't prevent vectorization.
unsigned AddressSpace
Address space of the involved pointers.
bool addPointer(unsigned Index, RuntimePointerChecking &RtCheck)
Tries to add the pointer recorded in RtCheck at index Index to this pointer checking group.
bool NeedsFreeze
Whether the pointer needs to be frozen after expansion, e.g.
const SCEV * High
The SCEV expression which represents the upper bound of all the pointers in this group.
SmallVector< unsigned, 2 > Members
Indices of all the pointers that constitute this grouping.
RuntimeCheckingPtrGroup(unsigned Index, RuntimePointerChecking &RtCheck)
Create a new pointer checking group containing a single pointer, with index Index in RtCheck.
const SCEV * Low
The SCEV expression which represents the lower bound of all the pointers in this group.
bool IsWritePtr
Holds the information if this pointer is used for writing to memory.
unsigned DependencySetId
Holds the id of the set of pointers that could be dependent because of a shared underlying object.
unsigned AliasSetId
Holds the id of the disjoint alias set to which this pointer belongs.
static const unsigned MaxVectorWidth
Maximum SIMD width.
static unsigned VectorizationFactor
VF as overridden by the user.
static unsigned RuntimeMemoryCheckThreshold
\When performing memory disambiguation checks at runtime do not make more than this number of compari...
static bool isInterleaveForced()
True if force-vector-interleave was specified by the user.
static unsigned VectorizationInterleave
Interleave factor as overridden by the user.
Function object to check whether the first component of a container supported by std::get (like std::...
Definition: STLExtras.h:1450