LLVM 20.0.0git
LoopAccessAnalysis.cpp
Go to the documentation of this file.
1//===- LoopAccessAnalysis.cpp - Loop Access Analysis Implementation --------==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// The implementation for the loop memory dependence that was originally
10// developed for the loop vectorizer.
11//
12//===----------------------------------------------------------------------===//
13
15#include "llvm/ADT/APInt.h"
16#include "llvm/ADT/DenseMap.h"
19#include "llvm/ADT/STLExtras.h"
20#include "llvm/ADT/SetVector.h"
22#include "llvm/ADT/SmallSet.h"
37#include "llvm/IR/BasicBlock.h"
38#include "llvm/IR/Constants.h"
39#include "llvm/IR/DataLayout.h"
40#include "llvm/IR/DebugLoc.h"
43#include "llvm/IR/Dominators.h"
44#include "llvm/IR/Function.h"
46#include "llvm/IR/InstrTypes.h"
47#include "llvm/IR/Instruction.h"
49#include "llvm/IR/Operator.h"
50#include "llvm/IR/PassManager.h"
52#include "llvm/IR/Type.h"
53#include "llvm/IR/Value.h"
54#include "llvm/IR/ValueHandle.h"
57#include "llvm/Support/Debug.h"
60#include <algorithm>
61#include <cassert>
62#include <cstdint>
63#include <iterator>
64#include <utility>
65#include <variant>
66#include <vector>
67
68using namespace llvm;
69using namespace llvm::PatternMatch;
70
71#define DEBUG_TYPE "loop-accesses"
72
74VectorizationFactor("force-vector-width", cl::Hidden,
75 cl::desc("Sets the SIMD width. Zero is autoselect."),
78
80VectorizationInterleave("force-vector-interleave", cl::Hidden,
81 cl::desc("Sets the vectorization interleave count. "
82 "Zero is autoselect."),
86
88 "runtime-memory-check-threshold", cl::Hidden,
89 cl::desc("When performing memory disambiguation checks at runtime do not "
90 "generate more than this number of comparisons (default = 8)."),
93
94/// The maximum iterations used to merge memory checks
96 "memory-check-merge-threshold", cl::Hidden,
97 cl::desc("Maximum number of comparisons done when trying to merge "
98 "runtime memory checks. (default = 100)"),
99 cl::init(100));
100
101/// Maximum SIMD width.
102const unsigned VectorizerParams::MaxVectorWidth = 64;
103
104/// We collect dependences up to this threshold.
106 MaxDependences("max-dependences", cl::Hidden,
107 cl::desc("Maximum number of dependences collected by "
108 "loop-access analysis (default = 100)"),
109 cl::init(100));
110
111/// This enables versioning on the strides of symbolically striding memory
112/// accesses in code like the following.
113/// for (i = 0; i < N; ++i)
114/// A[i * Stride1] += B[i * Stride2] ...
115///
116/// Will be roughly translated to
117/// if (Stride1 == 1 && Stride2 == 1) {
118/// for (i = 0; i < N; i+=4)
119/// A[i:i+3] += ...
120/// } else
121/// ...
123 "enable-mem-access-versioning", cl::init(true), cl::Hidden,
124 cl::desc("Enable symbolic stride memory access versioning"));
125
126/// Enable store-to-load forwarding conflict detection. This option can
127/// be disabled for correctness testing.
129 "store-to-load-forwarding-conflict-detection", cl::Hidden,
130 cl::desc("Enable conflict detection in loop-access analysis"),
131 cl::init(true));
132
134 "max-forked-scev-depth", cl::Hidden,
135 cl::desc("Maximum recursion depth when finding forked SCEVs (default = 5)"),
136 cl::init(5));
137
139 "laa-speculate-unit-stride", cl::Hidden,
140 cl::desc("Speculate that non-constant strides are unit in LAA"),
141 cl::init(true));
142
144 "hoist-runtime-checks", cl::Hidden,
145 cl::desc(
146 "Hoist inner loop runtime memory checks to outer loop if possible"),
149
151 return ::VectorizationInterleave.getNumOccurrences() > 0;
152}
153
155 const DenseMap<Value *, const SCEV *> &PtrToStride,
156 Value *Ptr) {
157 const SCEV *OrigSCEV = PSE.getSCEV(Ptr);
158
159 // If there is an entry in the map return the SCEV of the pointer with the
160 // symbolic stride replaced by one.
162 if (SI == PtrToStride.end())
163 // For a non-symbolic stride, just return the original expression.
164 return OrigSCEV;
165
166 const SCEV *StrideSCEV = SI->second;
167 // Note: This assert is both overly strong and overly weak. The actual
168 // invariant here is that StrideSCEV should be loop invariant. The only
169 // such invariant strides we happen to speculate right now are unknowns
170 // and thus this is a reasonable proxy of the actual invariant.
171 assert(isa<SCEVUnknown>(StrideSCEV) && "shouldn't be in map");
172
173 ScalarEvolution *SE = PSE.getSE();
174 const SCEV *CT = SE->getOne(StrideSCEV->getType());
175 PSE.addPredicate(*SE->getEqualPredicate(StrideSCEV, CT));
176 const SCEV *Expr = PSE.getSCEV(Ptr);
177
178 LLVM_DEBUG(dbgs() << "LAA: Replacing SCEV: " << *OrigSCEV
179 << " by: " << *Expr << "\n");
180 return Expr;
181}
182
184 unsigned Index, const RuntimePointerChecking &RtCheck)
185 : High(RtCheck.Pointers[Index].End), Low(RtCheck.Pointers[Index].Start),
186 AddressSpace(RtCheck.Pointers[Index]
187 .PointerValue->getType()
189 NeedsFreeze(RtCheck.Pointers[Index].NeedsFreeze) {
191}
192
193/// Calculate Start and End points of memory access.
194/// Let's assume A is the first access and B is a memory access on N-th loop
195/// iteration. Then B is calculated as:
196/// B = A + Step*N .
197/// Step value may be positive or negative.
198/// N is a calculated back-edge taken count:
199/// N = (TripCount > 0) ? RoundDown(TripCount -1 , VF) : 0
200/// Start and End points are calculated in the following way:
201/// Start = UMIN(A, B) ; End = UMAX(A, B) + SizeOfElt,
202/// where SizeOfElt is the size of single memory access in bytes.
203///
204/// There is no conflict when the intervals are disjoint:
205/// NoConflict = (P2.Start >= P1.End) || (P1.Start >= P2.End)
206static std::pair<const SCEV *, const SCEV *> getStartAndEndForAccess(
207 const Loop *Lp, const SCEV *PtrExpr, Type *AccessTy,
209 DenseMap<std::pair<const SCEV *, Type *>,
210 std::pair<const SCEV *, const SCEV *>> &PointerBounds) {
211 ScalarEvolution *SE = PSE.getSE();
212
213 auto [Iter, Ins] = PointerBounds.insert(
214 {{PtrExpr, AccessTy},
215 {SE->getCouldNotCompute(), SE->getCouldNotCompute()}});
216 if (!Ins)
217 return Iter->second;
218
219 const SCEV *ScStart;
220 const SCEV *ScEnd;
221
222 if (SE->isLoopInvariant(PtrExpr, Lp)) {
223 ScStart = ScEnd = PtrExpr;
224 } else if (auto *AR = dyn_cast<SCEVAddRecExpr>(PtrExpr)) {
225 const SCEV *Ex = PSE.getSymbolicMaxBackedgeTakenCount();
226
227 ScStart = AR->getStart();
228 ScEnd = AR->evaluateAtIteration(Ex, *SE);
229 const SCEV *Step = AR->getStepRecurrence(*SE);
230
231 // For expressions with negative step, the upper bound is ScStart and the
232 // lower bound is ScEnd.
233 if (const auto *CStep = dyn_cast<SCEVConstant>(Step)) {
234 if (CStep->getValue()->isNegative())
235 std::swap(ScStart, ScEnd);
236 } else {
237 // Fallback case: the step is not constant, but we can still
238 // get the upper and lower bounds of the interval by using min/max
239 // expressions.
240 ScStart = SE->getUMinExpr(ScStart, ScEnd);
241 ScEnd = SE->getUMaxExpr(AR->getStart(), ScEnd);
242 }
243 } else
244 return {SE->getCouldNotCompute(), SE->getCouldNotCompute()};
245
246 assert(SE->isLoopInvariant(ScStart, Lp) && "ScStart needs to be invariant");
247 assert(SE->isLoopInvariant(ScEnd, Lp)&& "ScEnd needs to be invariant");
248
249 // Add the size of the pointed element to ScEnd.
250 auto &DL = Lp->getHeader()->getDataLayout();
251 Type *IdxTy = DL.getIndexType(PtrExpr->getType());
252 const SCEV *EltSizeSCEV = SE->getStoreSizeOfExpr(IdxTy, AccessTy);
253 ScEnd = SE->getAddExpr(ScEnd, EltSizeSCEV);
254
255 Iter->second = {ScStart, ScEnd};
256 return Iter->second;
257}
258
259/// Calculate Start and End points of memory access using
260/// getStartAndEndForAccess.
262 Type *AccessTy, bool WritePtr,
263 unsigned DepSetId, unsigned ASId,
265 bool NeedsFreeze) {
266 const auto &[ScStart, ScEnd] = getStartAndEndForAccess(
267 Lp, PtrExpr, AccessTy, PSE, DC.getPointerBounds());
268 assert(!isa<SCEVCouldNotCompute>(ScStart) &&
269 !isa<SCEVCouldNotCompute>(ScEnd) &&
270 "must be able to compute both start and end expressions");
271 Pointers.emplace_back(Ptr, ScStart, ScEnd, WritePtr, DepSetId, ASId, PtrExpr,
272 NeedsFreeze);
273}
274
275bool RuntimePointerChecking::tryToCreateDiffCheck(
276 const RuntimeCheckingPtrGroup &CGI, const RuntimeCheckingPtrGroup &CGJ) {
277 // If either group contains multiple different pointers, bail out.
278 // TODO: Support multiple pointers by using the minimum or maximum pointer,
279 // depending on src & sink.
280 if (CGI.Members.size() != 1 || CGJ.Members.size() != 1)
281 return false;
282
283 const PointerInfo *Src = &Pointers[CGI.Members[0]];
284 const PointerInfo *Sink = &Pointers[CGJ.Members[0]];
285
286 // If either pointer is read and written, multiple checks may be needed. Bail
287 // out.
288 if (!DC.getOrderForAccess(Src->PointerValue, !Src->IsWritePtr).empty() ||
289 !DC.getOrderForAccess(Sink->PointerValue, !Sink->IsWritePtr).empty())
290 return false;
291
292 ArrayRef<unsigned> AccSrc =
293 DC.getOrderForAccess(Src->PointerValue, Src->IsWritePtr);
294 ArrayRef<unsigned> AccSink =
295 DC.getOrderForAccess(Sink->PointerValue, Sink->IsWritePtr);
296 // If either pointer is accessed multiple times, there may not be a clear
297 // src/sink relation. Bail out for now.
298 if (AccSrc.size() != 1 || AccSink.size() != 1)
299 return false;
300
301 // If the sink is accessed before src, swap src/sink.
302 if (AccSink[0] < AccSrc[0])
303 std::swap(Src, Sink);
304
305 auto *SrcAR = dyn_cast<SCEVAddRecExpr>(Src->Expr);
306 auto *SinkAR = dyn_cast<SCEVAddRecExpr>(Sink->Expr);
307 if (!SrcAR || !SinkAR || SrcAR->getLoop() != DC.getInnermostLoop() ||
308 SinkAR->getLoop() != DC.getInnermostLoop())
309 return false;
310
312 DC.getInstructionsForAccess(Src->PointerValue, Src->IsWritePtr);
314 DC.getInstructionsForAccess(Sink->PointerValue, Sink->IsWritePtr);
315 Type *SrcTy = getLoadStoreType(SrcInsts[0]);
316 Type *DstTy = getLoadStoreType(SinkInsts[0]);
317 if (isa<ScalableVectorType>(SrcTy) || isa<ScalableVectorType>(DstTy))
318 return false;
319
320 const DataLayout &DL =
321 SinkAR->getLoop()->getHeader()->getDataLayout();
322 unsigned AllocSize =
323 std::max(DL.getTypeAllocSize(SrcTy), DL.getTypeAllocSize(DstTy));
324
325 // Only matching constant steps matching the AllocSize are supported at the
326 // moment. This simplifies the difference computation. Can be extended in the
327 // future.
328 auto *Step = dyn_cast<SCEVConstant>(SinkAR->getStepRecurrence(*SE));
329 if (!Step || Step != SrcAR->getStepRecurrence(*SE) ||
330 Step->getAPInt().abs() != AllocSize)
331 return false;
332
333 IntegerType *IntTy =
334 IntegerType::get(Src->PointerValue->getContext(),
335 DL.getPointerSizeInBits(CGI.AddressSpace));
336
337 // When counting down, the dependence distance needs to be swapped.
338 if (Step->getValue()->isNegative())
339 std::swap(SinkAR, SrcAR);
340
341 const SCEV *SinkStartInt = SE->getPtrToIntExpr(SinkAR->getStart(), IntTy);
342 const SCEV *SrcStartInt = SE->getPtrToIntExpr(SrcAR->getStart(), IntTy);
343 if (isa<SCEVCouldNotCompute>(SinkStartInt) ||
344 isa<SCEVCouldNotCompute>(SrcStartInt))
345 return false;
346
347 const Loop *InnerLoop = SrcAR->getLoop();
348 // If the start values for both Src and Sink also vary according to an outer
349 // loop, then it's probably better to avoid creating diff checks because
350 // they may not be hoisted. We should instead let llvm::addRuntimeChecks
351 // do the expanded full range overlap checks, which can be hoisted.
352 if (HoistRuntimeChecks && InnerLoop->getParentLoop() &&
353 isa<SCEVAddRecExpr>(SinkStartInt) && isa<SCEVAddRecExpr>(SrcStartInt)) {
354 auto *SrcStartAR = cast<SCEVAddRecExpr>(SrcStartInt);
355 auto *SinkStartAR = cast<SCEVAddRecExpr>(SinkStartInt);
356 const Loop *StartARLoop = SrcStartAR->getLoop();
357 if (StartARLoop == SinkStartAR->getLoop() &&
358 StartARLoop == InnerLoop->getParentLoop() &&
359 // If the diff check would already be loop invariant (due to the
360 // recurrences being the same), then we prefer to keep the diff checks
361 // because they are cheaper.
362 SrcStartAR->getStepRecurrence(*SE) !=
363 SinkStartAR->getStepRecurrence(*SE)) {
364 LLVM_DEBUG(dbgs() << "LAA: Not creating diff runtime check, since these "
365 "cannot be hoisted out of the outer loop\n");
366 return false;
367 }
368 }
369
370 LLVM_DEBUG(dbgs() << "LAA: Creating diff runtime check for:\n"
371 << "SrcStart: " << *SrcStartInt << '\n'
372 << "SinkStartInt: " << *SinkStartInt << '\n');
373 DiffChecks.emplace_back(SrcStartInt, SinkStartInt, AllocSize,
374 Src->NeedsFreeze || Sink->NeedsFreeze);
375 return true;
376}
377
378SmallVector<RuntimePointerCheck, 4> RuntimePointerChecking::generateChecks() {
380
381 for (unsigned I = 0; I < CheckingGroups.size(); ++I) {
382 for (unsigned J = I + 1; J < CheckingGroups.size(); ++J) {
385
386 if (needsChecking(CGI, CGJ)) {
387 CanUseDiffCheck = CanUseDiffCheck && tryToCreateDiffCheck(CGI, CGJ);
388 Checks.emplace_back(&CGI, &CGJ);
389 }
390 }
391 }
392 return Checks;
393}
394
395void RuntimePointerChecking::generateChecks(
396 MemoryDepChecker::DepCandidates &DepCands, bool UseDependencies) {
397 assert(Checks.empty() && "Checks is not empty");
398 groupChecks(DepCands, UseDependencies);
399 Checks = generateChecks();
400}
401
403 const RuntimeCheckingPtrGroup &M, const RuntimeCheckingPtrGroup &N) const {
404 for (const auto &I : M.Members)
405 for (const auto &J : N.Members)
406 if (needsChecking(I, J))
407 return true;
408 return false;
409}
410
411/// Compare \p I and \p J and return the minimum.
412/// Return nullptr in case we couldn't find an answer.
413static const SCEV *getMinFromExprs(const SCEV *I, const SCEV *J,
414 ScalarEvolution *SE) {
415 const SCEV *Diff = SE->getMinusSCEV(J, I);
416 const SCEVConstant *C = dyn_cast<const SCEVConstant>(Diff);
417
418 if (!C)
419 return nullptr;
420 return C->getValue()->isNegative() ? J : I;
421}
422
424 unsigned Index, const RuntimePointerChecking &RtCheck) {
425 return addPointer(
426 Index, RtCheck.Pointers[Index].Start, RtCheck.Pointers[Index].End,
427 RtCheck.Pointers[Index].PointerValue->getType()->getPointerAddressSpace(),
428 RtCheck.Pointers[Index].NeedsFreeze, *RtCheck.SE);
429}
430
432 const SCEV *End, unsigned AS,
433 bool NeedsFreeze,
434 ScalarEvolution &SE) {
435 assert(AddressSpace == AS &&
436 "all pointers in a checking group must be in the same address space");
437
438 // Compare the starts and ends with the known minimum and maximum
439 // of this set. We need to know how we compare against the min/max
440 // of the set in order to be able to emit memchecks.
441 const SCEV *Min0 = getMinFromExprs(Start, Low, &SE);
442 if (!Min0)
443 return false;
444
445 const SCEV *Min1 = getMinFromExprs(End, High, &SE);
446 if (!Min1)
447 return false;
448
449 // Update the low bound expression if we've found a new min value.
450 if (Min0 == Start)
451 Low = Start;
452
453 // Update the high bound expression if we've found a new max value.
454 if (Min1 != End)
455 High = End;
456
458 this->NeedsFreeze |= NeedsFreeze;
459 return true;
460}
461
462void RuntimePointerChecking::groupChecks(
463 MemoryDepChecker::DepCandidates &DepCands, bool UseDependencies) {
464 // We build the groups from dependency candidates equivalence classes
465 // because:
466 // - We know that pointers in the same equivalence class share
467 // the same underlying object and therefore there is a chance
468 // that we can compare pointers
469 // - We wouldn't be able to merge two pointers for which we need
470 // to emit a memcheck. The classes in DepCands are already
471 // conveniently built such that no two pointers in the same
472 // class need checking against each other.
473
474 // We use the following (greedy) algorithm to construct the groups
475 // For every pointer in the equivalence class:
476 // For each existing group:
477 // - if the difference between this pointer and the min/max bounds
478 // of the group is a constant, then make the pointer part of the
479 // group and update the min/max bounds of that group as required.
480
481 CheckingGroups.clear();
482
483 // If we need to check two pointers to the same underlying object
484 // with a non-constant difference, we shouldn't perform any pointer
485 // grouping with those pointers. This is because we can easily get
486 // into cases where the resulting check would return false, even when
487 // the accesses are safe.
488 //
489 // The following example shows this:
490 // for (i = 0; i < 1000; ++i)
491 // a[5000 + i * m] = a[i] + a[i + 9000]
492 //
493 // Here grouping gives a check of (5000, 5000 + 1000 * m) against
494 // (0, 10000) which is always false. However, if m is 1, there is no
495 // dependence. Not grouping the checks for a[i] and a[i + 9000] allows
496 // us to perform an accurate check in this case.
497 //
498 // The above case requires that we have an UnknownDependence between
499 // accesses to the same underlying object. This cannot happen unless
500 // FoundNonConstantDistanceDependence is set, and therefore UseDependencies
501 // is also false. In this case we will use the fallback path and create
502 // separate checking groups for all pointers.
503
504 // If we don't have the dependency partitions, construct a new
505 // checking pointer group for each pointer. This is also required
506 // for correctness, because in this case we can have checking between
507 // pointers to the same underlying object.
508 if (!UseDependencies) {
509 for (unsigned I = 0; I < Pointers.size(); ++I)
510 CheckingGroups.emplace_back(I, *this);
511 return;
512 }
513
514 unsigned TotalComparisons = 0;
515
517 for (unsigned Index = 0; Index < Pointers.size(); ++Index) {
518 auto [It, _] = PositionMap.insert({Pointers[Index].PointerValue, {}});
519 It->second.push_back(Index);
520 }
521
522 // We need to keep track of what pointers we've already seen so we
523 // don't process them twice.
525
526 // Go through all equivalence classes, get the "pointer check groups"
527 // and add them to the overall solution. We use the order in which accesses
528 // appear in 'Pointers' to enforce determinism.
529 for (unsigned I = 0; I < Pointers.size(); ++I) {
530 // We've seen this pointer before, and therefore already processed
531 // its equivalence class.
532 if (Seen.count(I))
533 continue;
534
535 MemoryDepChecker::MemAccessInfo Access(Pointers[I].PointerValue,
536 Pointers[I].IsWritePtr);
537
539 auto LeaderI = DepCands.findValue(DepCands.getLeaderValue(Access));
540
541 // Because DepCands is constructed by visiting accesses in the order in
542 // which they appear in alias sets (which is deterministic) and the
543 // iteration order within an equivalence class member is only dependent on
544 // the order in which unions and insertions are performed on the
545 // equivalence class, the iteration order is deterministic.
546 for (auto MI = DepCands.member_begin(LeaderI), ME = DepCands.member_end();
547 MI != ME; ++MI) {
548 auto PointerI = PositionMap.find(MI->getPointer());
549 assert(PointerI != PositionMap.end() &&
550 "pointer in equivalence class not found in PositionMap");
551 for (unsigned Pointer : PointerI->second) {
552 bool Merged = false;
553 // Mark this pointer as seen.
554 Seen.insert(Pointer);
555
556 // Go through all the existing sets and see if we can find one
557 // which can include this pointer.
558 for (RuntimeCheckingPtrGroup &Group : Groups) {
559 // Don't perform more than a certain amount of comparisons.
560 // This should limit the cost of grouping the pointers to something
561 // reasonable. If we do end up hitting this threshold, the algorithm
562 // will create separate groups for all remaining pointers.
563 if (TotalComparisons > MemoryCheckMergeThreshold)
564 break;
565
566 TotalComparisons++;
567
568 if (Group.addPointer(Pointer, *this)) {
569 Merged = true;
570 break;
571 }
572 }
573
574 if (!Merged)
575 // We couldn't add this pointer to any existing set or the threshold
576 // for the number of comparisons has been reached. Create a new group
577 // to hold the current pointer.
578 Groups.emplace_back(Pointer, *this);
579 }
580 }
581
582 // We've computed the grouped checks for this partition.
583 // Save the results and continue with the next one.
584 llvm::copy(Groups, std::back_inserter(CheckingGroups));
585 }
586}
587
589 const SmallVectorImpl<int> &PtrToPartition, unsigned PtrIdx1,
590 unsigned PtrIdx2) {
591 return (PtrToPartition[PtrIdx1] != -1 &&
592 PtrToPartition[PtrIdx1] == PtrToPartition[PtrIdx2]);
593}
594
595bool RuntimePointerChecking::needsChecking(unsigned I, unsigned J) const {
596 const PointerInfo &PointerI = Pointers[I];
597 const PointerInfo &PointerJ = Pointers[J];
598
599 // No need to check if two readonly pointers intersect.
600 if (!PointerI.IsWritePtr && !PointerJ.IsWritePtr)
601 return false;
602
603 // Only need to check pointers between two different dependency sets.
604 if (PointerI.DependencySetId == PointerJ.DependencySetId)
605 return false;
606
607 // Only need to check pointers in the same alias set.
608 return PointerI.AliasSetId == PointerJ.AliasSetId;
609}
610
613 unsigned Depth) const {
614 unsigned N = 0;
615 for (const auto &[Check1, Check2] : Checks) {
616 const auto &First = Check1->Members, &Second = Check2->Members;
617
618 OS.indent(Depth) << "Check " << N++ << ":\n";
619
620 OS.indent(Depth + 2) << "Comparing group (" << Check1 << "):\n";
621 for (unsigned K : First)
622 OS.indent(Depth + 2) << *Pointers[K].PointerValue << "\n";
623
624 OS.indent(Depth + 2) << "Against group (" << Check2 << "):\n";
625 for (unsigned K : Second)
626 OS.indent(Depth + 2) << *Pointers[K].PointerValue << "\n";
627 }
628}
629
631
632 OS.indent(Depth) << "Run-time memory checks:\n";
633 printChecks(OS, Checks, Depth);
634
635 OS.indent(Depth) << "Grouped accesses:\n";
636 for (const auto &CG : CheckingGroups) {
637 OS.indent(Depth + 2) << "Group " << &CG << ":\n";
638 OS.indent(Depth + 4) << "(Low: " << *CG.Low << " High: " << *CG.High
639 << ")\n";
640 for (unsigned Member : CG.Members) {
641 OS.indent(Depth + 6) << "Member: " << *Pointers[Member].Expr << "\n";
642 }
643 }
644}
645
646namespace {
647
648/// Analyses memory accesses in a loop.
649///
650/// Checks whether run time pointer checks are needed and builds sets for data
651/// dependence checking.
652class AccessAnalysis {
653public:
654 /// Read or write access location.
655 typedef PointerIntPair<Value *, 1, bool> MemAccessInfo;
656 typedef SmallVector<MemAccessInfo, 8> MemAccessInfoList;
657
658 AccessAnalysis(const Loop *TheLoop, AAResults *AA, const LoopInfo *LI,
661 SmallPtrSetImpl<MDNode *> &LoopAliasScopes)
662 : TheLoop(TheLoop), BAA(*AA), AST(BAA), LI(LI), DepCands(DA), PSE(PSE),
663 LoopAliasScopes(LoopAliasScopes) {
664 // We're analyzing dependences across loop iterations.
665 BAA.enableCrossIterationMode();
666 }
667
668 /// Register a load and whether it is only read from.
669 void addLoad(const MemoryLocation &Loc, Type *AccessTy, bool IsReadOnly) {
670 Value *Ptr = const_cast<Value *>(Loc.Ptr);
671 AST.add(adjustLoc(Loc));
672 Accesses[MemAccessInfo(Ptr, false)].insert(AccessTy);
673 if (IsReadOnly)
674 ReadOnlyPtr.insert(Ptr);
675 }
676
677 /// Register a store.
678 void addStore(const MemoryLocation &Loc, Type *AccessTy) {
679 Value *Ptr = const_cast<Value *>(Loc.Ptr);
680 AST.add(adjustLoc(Loc));
681 Accesses[MemAccessInfo(Ptr, true)].insert(AccessTy);
682 }
683
684 /// Check if we can emit a run-time no-alias check for \p Access.
685 ///
686 /// Returns true if we can emit a run-time no alias check for \p Access.
687 /// If we can check this access, this also adds it to a dependence set and
688 /// adds a run-time to check for it to \p RtCheck. If \p Assume is true,
689 /// we will attempt to use additional run-time checks in order to get
690 /// the bounds of the pointer.
691 bool createCheckForAccess(RuntimePointerChecking &RtCheck,
692 MemAccessInfo Access, Type *AccessTy,
693 const DenseMap<Value *, const SCEV *> &Strides,
695 Loop *TheLoop, unsigned &RunningDepId,
696 unsigned ASId, bool ShouldCheckStride, bool Assume);
697
698 /// Check whether we can check the pointers at runtime for
699 /// non-intersection.
700 ///
701 /// Returns true if we need no check or if we do and we can generate them
702 /// (i.e. the pointers have computable bounds).
703 bool canCheckPtrAtRT(RuntimePointerChecking &RtCheck, ScalarEvolution *SE,
704 Loop *TheLoop, const DenseMap<Value *, const SCEV *> &Strides,
705 Value *&UncomputablePtr, bool ShouldCheckWrap = false);
706
707 /// Goes over all memory accesses, checks whether a RT check is needed
708 /// and builds sets of dependent accesses.
709 void buildDependenceSets() {
710 processMemAccesses();
711 }
712
713 /// Initial processing of memory accesses determined that we need to
714 /// perform dependency checking.
715 ///
716 /// Note that this can later be cleared if we retry memcheck analysis without
717 /// dependency checking (i.e. FoundNonConstantDistanceDependence).
718 bool isDependencyCheckNeeded() const { return !CheckDeps.empty(); }
719
720 /// We decided that no dependence analysis would be used. Reset the state.
721 void resetDepChecks(MemoryDepChecker &DepChecker) {
722 CheckDeps.clear();
723 DepChecker.clearDependences();
724 }
725
726 const MemAccessInfoList &getDependenciesToCheck() const { return CheckDeps; }
727
728private:
730
731 /// Adjust the MemoryLocation so that it represents accesses to this
732 /// location across all iterations, rather than a single one.
733 MemoryLocation adjustLoc(MemoryLocation Loc) const {
734 // The accessed location varies within the loop, but remains within the
735 // underlying object.
737 Loc.AATags.Scope = adjustAliasScopeList(Loc.AATags.Scope);
738 Loc.AATags.NoAlias = adjustAliasScopeList(Loc.AATags.NoAlias);
739 return Loc;
740 }
741
742 /// Drop alias scopes that are only valid within a single loop iteration.
743 MDNode *adjustAliasScopeList(MDNode *ScopeList) const {
744 if (!ScopeList)
745 return nullptr;
746
747 // For the sake of simplicity, drop the whole scope list if any scope is
748 // iteration-local.
749 if (any_of(ScopeList->operands(), [&](Metadata *Scope) {
750 return LoopAliasScopes.contains(cast<MDNode>(Scope));
751 }))
752 return nullptr;
753
754 return ScopeList;
755 }
756
757 /// Go over all memory access and check whether runtime pointer checks
758 /// are needed and build sets of dependency check candidates.
759 void processMemAccesses();
760
761 /// Map of all accesses. Values are the types used to access memory pointed to
762 /// by the pointer.
763 PtrAccessMap Accesses;
764
765 /// The loop being checked.
766 const Loop *TheLoop;
767
768 /// List of accesses that need a further dependence check.
769 MemAccessInfoList CheckDeps;
770
771 /// Set of pointers that are read only.
772 SmallPtrSet<Value*, 16> ReadOnlyPtr;
773
774 /// Batched alias analysis results.
775 BatchAAResults BAA;
776
777 /// An alias set tracker to partition the access set by underlying object and
778 //intrinsic property (such as TBAA metadata).
779 AliasSetTracker AST;
780
781 /// The LoopInfo of the loop being checked.
782 const LoopInfo *LI;
783
784 /// Sets of potentially dependent accesses - members of one set share an
785 /// underlying pointer. The set "CheckDeps" identfies which sets really need a
786 /// dependence check.
788
789 /// Initial processing of memory accesses determined that we may need
790 /// to add memchecks. Perform the analysis to determine the necessary checks.
791 ///
792 /// Note that, this is different from isDependencyCheckNeeded. When we retry
793 /// memcheck analysis without dependency checking
794 /// (i.e. FoundNonConstantDistanceDependence), isDependencyCheckNeeded is
795 /// cleared while this remains set if we have potentially dependent accesses.
796 bool IsRTCheckAnalysisNeeded = false;
797
798 /// The SCEV predicate containing all the SCEV-related assumptions.
800
802
803 /// Alias scopes that are declared inside the loop, and as such not valid
804 /// across iterations.
805 SmallPtrSetImpl<MDNode *> &LoopAliasScopes;
806};
807
808} // end anonymous namespace
809
810/// Check whether a pointer can participate in a runtime bounds check.
811/// If \p Assume, try harder to prove that we can compute the bounds of \p Ptr
812/// by adding run-time checks (overflow checks) if necessary.
814 const SCEV *PtrScev, Loop *L, bool Assume) {
815 // The bounds for loop-invariant pointer is trivial.
816 if (PSE.getSE()->isLoopInvariant(PtrScev, L))
817 return true;
818
819 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(PtrScev);
820
821 if (!AR && Assume)
822 AR = PSE.getAsAddRec(Ptr);
823
824 if (!AR)
825 return false;
826
827 return AR->isAffine();
828}
829
830/// Check whether a pointer address cannot wrap.
832 const DenseMap<Value *, const SCEV *> &Strides, Value *Ptr, Type *AccessTy,
833 Loop *L) {
834 const SCEV *PtrScev = PSE.getSCEV(Ptr);
835 if (PSE.getSE()->isLoopInvariant(PtrScev, L))
836 return true;
837
838 int64_t Stride = getPtrStride(PSE, AccessTy, Ptr, L, Strides).value_or(0);
839 return Stride == 1 ||
841}
842
843static void visitPointers(Value *StartPtr, const Loop &InnermostLoop,
844 function_ref<void(Value *)> AddPointer) {
846 SmallVector<Value *> WorkList;
847 WorkList.push_back(StartPtr);
848
849 while (!WorkList.empty()) {
850 Value *Ptr = WorkList.pop_back_val();
851 if (!Visited.insert(Ptr).second)
852 continue;
853 auto *PN = dyn_cast<PHINode>(Ptr);
854 // SCEV does not look through non-header PHIs inside the loop. Such phis
855 // can be analyzed by adding separate accesses for each incoming pointer
856 // value.
857 if (PN && InnermostLoop.contains(PN->getParent()) &&
858 PN->getParent() != InnermostLoop.getHeader()) {
859 for (const Use &Inc : PN->incoming_values())
860 WorkList.push_back(Inc);
861 } else
862 AddPointer(Ptr);
863 }
864}
865
866// Walk back through the IR for a pointer, looking for a select like the
867// following:
868//
869// %offset = select i1 %cmp, i64 %a, i64 %b
870// %addr = getelementptr double, double* %base, i64 %offset
871// %ld = load double, double* %addr, align 8
872//
873// We won't be able to form a single SCEVAddRecExpr from this since the
874// address for each loop iteration depends on %cmp. We could potentially
875// produce multiple valid SCEVAddRecExprs, though, and check all of them for
876// memory safety/aliasing if needed.
877//
878// If we encounter some IR we don't yet handle, or something obviously fine
879// like a constant, then we just add the SCEV for that term to the list passed
880// in by the caller. If we have a node that may potentially yield a valid
881// SCEVAddRecExpr then we decompose it into parts and build the SCEV terms
882// ourselves before adding to the list.
883static void findForkedSCEVs(
884 ScalarEvolution *SE, const Loop *L, Value *Ptr,
886 unsigned Depth) {
887 // If our Value is a SCEVAddRecExpr, loop invariant, not an instruction, or
888 // we've exceeded our limit on recursion, just return whatever we have
889 // regardless of whether it can be used for a forked pointer or not, along
890 // with an indication of whether it might be a poison or undef value.
891 const SCEV *Scev = SE->getSCEV(Ptr);
892 if (isa<SCEVAddRecExpr>(Scev) || L->isLoopInvariant(Ptr) ||
893 !isa<Instruction>(Ptr) || Depth == 0) {
894 ScevList.emplace_back(Scev, !isGuaranteedNotToBeUndefOrPoison(Ptr));
895 return;
896 }
897
898 Depth--;
899
900 auto UndefPoisonCheck = [](PointerIntPair<const SCEV *, 1, bool> S) {
901 return get<1>(S);
902 };
903
904 auto GetBinOpExpr = [&SE](unsigned Opcode, const SCEV *L, const SCEV *R) {
905 switch (Opcode) {
906 case Instruction::Add:
907 return SE->getAddExpr(L, R);
908 case Instruction::Sub:
909 return SE->getMinusSCEV(L, R);
910 default:
911 llvm_unreachable("Unexpected binary operator when walking ForkedPtrs");
912 }
913 };
914
915 Instruction *I = cast<Instruction>(Ptr);
916 unsigned Opcode = I->getOpcode();
917 switch (Opcode) {
918 case Instruction::GetElementPtr: {
919 auto *GEP = cast<GetElementPtrInst>(I);
920 Type *SourceTy = GEP->getSourceElementType();
921 // We only handle base + single offset GEPs here for now.
922 // Not dealing with preexisting gathers yet, so no vectors.
923 if (I->getNumOperands() != 2 || SourceTy->isVectorTy()) {
924 ScevList.emplace_back(Scev, !isGuaranteedNotToBeUndefOrPoison(GEP));
925 break;
926 }
929 findForkedSCEVs(SE, L, I->getOperand(0), BaseScevs, Depth);
930 findForkedSCEVs(SE, L, I->getOperand(1), OffsetScevs, Depth);
931
932 // See if we need to freeze our fork...
933 bool NeedsFreeze = any_of(BaseScevs, UndefPoisonCheck) ||
934 any_of(OffsetScevs, UndefPoisonCheck);
935
936 // Check that we only have a single fork, on either the base or the offset.
937 // Copy the SCEV across for the one without a fork in order to generate
938 // the full SCEV for both sides of the GEP.
939 if (OffsetScevs.size() == 2 && BaseScevs.size() == 1)
940 BaseScevs.push_back(BaseScevs[0]);
941 else if (BaseScevs.size() == 2 && OffsetScevs.size() == 1)
942 OffsetScevs.push_back(OffsetScevs[0]);
943 else {
944 ScevList.emplace_back(Scev, NeedsFreeze);
945 break;
946 }
947
948 // Find the pointer type we need to extend to.
949 Type *IntPtrTy = SE->getEffectiveSCEVType(
950 SE->getSCEV(GEP->getPointerOperand())->getType());
951
952 // Find the size of the type being pointed to. We only have a single
953 // index term (guarded above) so we don't need to index into arrays or
954 // structures, just get the size of the scalar value.
955 const SCEV *Size = SE->getSizeOfExpr(IntPtrTy, SourceTy);
956
957 // Scale up the offsets by the size of the type, then add to the bases.
958 const SCEV *Scaled1 = SE->getMulExpr(
959 Size, SE->getTruncateOrSignExtend(get<0>(OffsetScevs[0]), IntPtrTy));
960 const SCEV *Scaled2 = SE->getMulExpr(
961 Size, SE->getTruncateOrSignExtend(get<0>(OffsetScevs[1]), IntPtrTy));
962 ScevList.emplace_back(SE->getAddExpr(get<0>(BaseScevs[0]), Scaled1),
963 NeedsFreeze);
964 ScevList.emplace_back(SE->getAddExpr(get<0>(BaseScevs[1]), Scaled2),
965 NeedsFreeze);
966 break;
967 }
968 case Instruction::Select: {
970 // A select means we've found a forked pointer, but we currently only
971 // support a single select per pointer so if there's another behind this
972 // then we just bail out and return the generic SCEV.
973 findForkedSCEVs(SE, L, I->getOperand(1), ChildScevs, Depth);
974 findForkedSCEVs(SE, L, I->getOperand(2), ChildScevs, Depth);
975 if (ChildScevs.size() == 2) {
976 ScevList.push_back(ChildScevs[0]);
977 ScevList.push_back(ChildScevs[1]);
978 } else
979 ScevList.emplace_back(Scev, !isGuaranteedNotToBeUndefOrPoison(Ptr));
980 break;
981 }
982 case Instruction::PHI: {
984 // A phi means we've found a forked pointer, but we currently only
985 // support a single phi per pointer so if there's another behind this
986 // then we just bail out and return the generic SCEV.
987 if (I->getNumOperands() == 2) {
988 findForkedSCEVs(SE, L, I->getOperand(0), ChildScevs, Depth);
989 findForkedSCEVs(SE, L, I->getOperand(1), ChildScevs, Depth);
990 }
991 if (ChildScevs.size() == 2) {
992 ScevList.push_back(ChildScevs[0]);
993 ScevList.push_back(ChildScevs[1]);
994 } else
995 ScevList.emplace_back(Scev, !isGuaranteedNotToBeUndefOrPoison(Ptr));
996 break;
997 }
998 case Instruction::Add:
999 case Instruction::Sub: {
1002 findForkedSCEVs(SE, L, I->getOperand(0), LScevs, Depth);
1003 findForkedSCEVs(SE, L, I->getOperand(1), RScevs, Depth);
1004
1005 // See if we need to freeze our fork...
1006 bool NeedsFreeze =
1007 any_of(LScevs, UndefPoisonCheck) || any_of(RScevs, UndefPoisonCheck);
1008
1009 // Check that we only have a single fork, on either the left or right side.
1010 // Copy the SCEV across for the one without a fork in order to generate
1011 // the full SCEV for both sides of the BinOp.
1012 if (LScevs.size() == 2 && RScevs.size() == 1)
1013 RScevs.push_back(RScevs[0]);
1014 else if (RScevs.size() == 2 && LScevs.size() == 1)
1015 LScevs.push_back(LScevs[0]);
1016 else {
1017 ScevList.emplace_back(Scev, NeedsFreeze);
1018 break;
1019 }
1020
1021 ScevList.emplace_back(
1022 GetBinOpExpr(Opcode, get<0>(LScevs[0]), get<0>(RScevs[0])),
1023 NeedsFreeze);
1024 ScevList.emplace_back(
1025 GetBinOpExpr(Opcode, get<0>(LScevs[1]), get<0>(RScevs[1])),
1026 NeedsFreeze);
1027 break;
1028 }
1029 default:
1030 // Just return the current SCEV if we haven't handled the instruction yet.
1031 LLVM_DEBUG(dbgs() << "ForkedPtr unhandled instruction: " << *I << "\n");
1032 ScevList.emplace_back(Scev, !isGuaranteedNotToBeUndefOrPoison(Ptr));
1033 break;
1034 }
1035}
1036
1039 const DenseMap<Value *, const SCEV *> &StridesMap, Value *Ptr,
1040 const Loop *L) {
1041 ScalarEvolution *SE = PSE.getSE();
1042 assert(SE->isSCEVable(Ptr->getType()) && "Value is not SCEVable!");
1044 findForkedSCEVs(SE, L, Ptr, Scevs, MaxForkedSCEVDepth);
1045
1046 // For now, we will only accept a forked pointer with two possible SCEVs
1047 // that are either SCEVAddRecExprs or loop invariant.
1048 if (Scevs.size() == 2 &&
1049 (isa<SCEVAddRecExpr>(get<0>(Scevs[0])) ||
1050 SE->isLoopInvariant(get<0>(Scevs[0]), L)) &&
1051 (isa<SCEVAddRecExpr>(get<0>(Scevs[1])) ||
1052 SE->isLoopInvariant(get<0>(Scevs[1]), L))) {
1053 LLVM_DEBUG(dbgs() << "LAA: Found forked pointer: " << *Ptr << "\n");
1054 LLVM_DEBUG(dbgs() << "\t(1) " << *get<0>(Scevs[0]) << "\n");
1055 LLVM_DEBUG(dbgs() << "\t(2) " << *get<0>(Scevs[1]) << "\n");
1056 return Scevs;
1057 }
1058
1059 return {{replaceSymbolicStrideSCEV(PSE, StridesMap, Ptr), false}};
1060}
1061
1062bool AccessAnalysis::createCheckForAccess(RuntimePointerChecking &RtCheck,
1063 MemAccessInfo Access, Type *AccessTy,
1064 const DenseMap<Value *, const SCEV *> &StridesMap,
1066 Loop *TheLoop, unsigned &RunningDepId,
1067 unsigned ASId, bool ShouldCheckWrap,
1068 bool Assume) {
1069 Value *Ptr = Access.getPointer();
1070
1072 findForkedPointer(PSE, StridesMap, Ptr, TheLoop);
1073
1074 for (const auto &P : TranslatedPtrs) {
1075 const SCEV *PtrExpr = get<0>(P);
1076 if (!hasComputableBounds(PSE, Ptr, PtrExpr, TheLoop, Assume))
1077 return false;
1078
1079 // When we run after a failing dependency check we have to make sure
1080 // we don't have wrapping pointers.
1081 if (ShouldCheckWrap) {
1082 // Skip wrap checking when translating pointers.
1083 if (TranslatedPtrs.size() > 1)
1084 return false;
1085
1086 if (!isNoWrap(PSE, StridesMap, Ptr, AccessTy, TheLoop)) {
1087 const SCEV *Expr = PSE.getSCEV(Ptr);
1088 if (!Assume || !isa<SCEVAddRecExpr>(Expr))
1089 return false;
1091 }
1092 }
1093 // If there's only one option for Ptr, look it up after bounds and wrap
1094 // checking, because assumptions might have been added to PSE.
1095 if (TranslatedPtrs.size() == 1)
1096 TranslatedPtrs[0] = {replaceSymbolicStrideSCEV(PSE, StridesMap, Ptr),
1097 false};
1098 }
1099
1100 for (auto [PtrExpr, NeedsFreeze] : TranslatedPtrs) {
1101 // The id of the dependence set.
1102 unsigned DepId;
1103
1104 if (isDependencyCheckNeeded()) {
1105 Value *Leader = DepCands.getLeaderValue(Access).getPointer();
1106 unsigned &LeaderId = DepSetId[Leader];
1107 if (!LeaderId)
1108 LeaderId = RunningDepId++;
1109 DepId = LeaderId;
1110 } else
1111 // Each access has its own dependence set.
1112 DepId = RunningDepId++;
1113
1114 bool IsWrite = Access.getInt();
1115 RtCheck.insert(TheLoop, Ptr, PtrExpr, AccessTy, IsWrite, DepId, ASId, PSE,
1116 NeedsFreeze);
1117 LLVM_DEBUG(dbgs() << "LAA: Found a runtime check ptr:" << *Ptr << '\n');
1118 }
1119
1120 return true;
1121}
1122
1123bool AccessAnalysis::canCheckPtrAtRT(RuntimePointerChecking &RtCheck,
1124 ScalarEvolution *SE, Loop *TheLoop,
1125 const DenseMap<Value *, const SCEV *> &StridesMap,
1126 Value *&UncomputablePtr, bool ShouldCheckWrap) {
1127 // Find pointers with computable bounds. We are going to use this information
1128 // to place a runtime bound check.
1129 bool CanDoRT = true;
1130
1131 bool MayNeedRTCheck = false;
1132 if (!IsRTCheckAnalysisNeeded) return true;
1133
1134 bool IsDepCheckNeeded = isDependencyCheckNeeded();
1135
1136 // We assign a consecutive id to access from different alias sets.
1137 // Accesses between different groups doesn't need to be checked.
1138 unsigned ASId = 0;
1139 for (const auto &AS : AST) {
1140 int NumReadPtrChecks = 0;
1141 int NumWritePtrChecks = 0;
1142 bool CanDoAliasSetRT = true;
1143 ++ASId;
1144 auto ASPointers = AS.getPointers();
1145
1146 // We assign consecutive id to access from different dependence sets.
1147 // Accesses within the same set don't need a runtime check.
1148 unsigned RunningDepId = 1;
1150
1152
1153 // First, count how many write and read accesses are in the alias set. Also
1154 // collect MemAccessInfos for later.
1156 for (const Value *ConstPtr : ASPointers) {
1157 Value *Ptr = const_cast<Value *>(ConstPtr);
1158 bool IsWrite = Accesses.count(MemAccessInfo(Ptr, true));
1159 if (IsWrite)
1160 ++NumWritePtrChecks;
1161 else
1162 ++NumReadPtrChecks;
1163 AccessInfos.emplace_back(Ptr, IsWrite);
1164 }
1165
1166 // We do not need runtime checks for this alias set, if there are no writes
1167 // or a single write and no reads.
1168 if (NumWritePtrChecks == 0 ||
1169 (NumWritePtrChecks == 1 && NumReadPtrChecks == 0)) {
1170 assert((ASPointers.size() <= 1 ||
1171 all_of(ASPointers,
1172 [this](const Value *Ptr) {
1173 MemAccessInfo AccessWrite(const_cast<Value *>(Ptr),
1174 true);
1175 return DepCands.findValue(AccessWrite) == DepCands.end();
1176 })) &&
1177 "Can only skip updating CanDoRT below, if all entries in AS "
1178 "are reads or there is at most 1 entry");
1179 continue;
1180 }
1181
1182 for (auto &Access : AccessInfos) {
1183 for (const auto &AccessTy : Accesses[Access]) {
1184 if (!createCheckForAccess(RtCheck, Access, AccessTy, StridesMap,
1185 DepSetId, TheLoop, RunningDepId, ASId,
1186 ShouldCheckWrap, false)) {
1187 LLVM_DEBUG(dbgs() << "LAA: Can't find bounds for ptr:"
1188 << *Access.getPointer() << '\n');
1189 Retries.emplace_back(Access, AccessTy);
1190 CanDoAliasSetRT = false;
1191 }
1192 }
1193 }
1194
1195 // Note that this function computes CanDoRT and MayNeedRTCheck
1196 // independently. For example CanDoRT=false, MayNeedRTCheck=false means that
1197 // we have a pointer for which we couldn't find the bounds but we don't
1198 // actually need to emit any checks so it does not matter.
1199 //
1200 // We need runtime checks for this alias set, if there are at least 2
1201 // dependence sets (in which case RunningDepId > 2) or if we need to re-try
1202 // any bound checks (because in that case the number of dependence sets is
1203 // incomplete).
1204 bool NeedsAliasSetRTCheck = RunningDepId > 2 || !Retries.empty();
1205
1206 // We need to perform run-time alias checks, but some pointers had bounds
1207 // that couldn't be checked.
1208 if (NeedsAliasSetRTCheck && !CanDoAliasSetRT) {
1209 // Reset the CanDoSetRt flag and retry all accesses that have failed.
1210 // We know that we need these checks, so we can now be more aggressive
1211 // and add further checks if required (overflow checks).
1212 CanDoAliasSetRT = true;
1213 for (const auto &[Access, AccessTy] : Retries) {
1214 if (!createCheckForAccess(RtCheck, Access, AccessTy, StridesMap,
1215 DepSetId, TheLoop, RunningDepId, ASId,
1216 ShouldCheckWrap, /*Assume=*/true)) {
1217 CanDoAliasSetRT = false;
1218 UncomputablePtr = Access.getPointer();
1219 break;
1220 }
1221 }
1222 }
1223
1224 CanDoRT &= CanDoAliasSetRT;
1225 MayNeedRTCheck |= NeedsAliasSetRTCheck;
1226 ++ASId;
1227 }
1228
1229 // If the pointers that we would use for the bounds comparison have different
1230 // address spaces, assume the values aren't directly comparable, so we can't
1231 // use them for the runtime check. We also have to assume they could
1232 // overlap. In the future there should be metadata for whether address spaces
1233 // are disjoint.
1234 unsigned NumPointers = RtCheck.Pointers.size();
1235 for (unsigned i = 0; i < NumPointers; ++i) {
1236 for (unsigned j = i + 1; j < NumPointers; ++j) {
1237 // Only need to check pointers between two different dependency sets.
1238 if (RtCheck.Pointers[i].DependencySetId ==
1239 RtCheck.Pointers[j].DependencySetId)
1240 continue;
1241 // Only need to check pointers in the same alias set.
1242 if (RtCheck.Pointers[i].AliasSetId != RtCheck.Pointers[j].AliasSetId)
1243 continue;
1244
1245 Value *PtrI = RtCheck.Pointers[i].PointerValue;
1246 Value *PtrJ = RtCheck.Pointers[j].PointerValue;
1247
1248 unsigned ASi = PtrI->getType()->getPointerAddressSpace();
1249 unsigned ASj = PtrJ->getType()->getPointerAddressSpace();
1250 if (ASi != ASj) {
1251 LLVM_DEBUG(
1252 dbgs() << "LAA: Runtime check would require comparison between"
1253 " different address spaces\n");
1254 return false;
1255 }
1256 }
1257 }
1258
1259 if (MayNeedRTCheck && CanDoRT)
1260 RtCheck.generateChecks(DepCands, IsDepCheckNeeded);
1261
1262 LLVM_DEBUG(dbgs() << "LAA: We need to do " << RtCheck.getNumberOfChecks()
1263 << " pointer comparisons.\n");
1264
1265 // If we can do run-time checks, but there are no checks, no runtime checks
1266 // are needed. This can happen when all pointers point to the same underlying
1267 // object for example.
1268 RtCheck.Need = CanDoRT ? RtCheck.getNumberOfChecks() != 0 : MayNeedRTCheck;
1269
1270 bool CanDoRTIfNeeded = !RtCheck.Need || CanDoRT;
1271 if (!CanDoRTIfNeeded)
1272 RtCheck.reset();
1273 return CanDoRTIfNeeded;
1274}
1275
1276void AccessAnalysis::processMemAccesses() {
1277 // We process the set twice: first we process read-write pointers, last we
1278 // process read-only pointers. This allows us to skip dependence tests for
1279 // read-only pointers.
1280
1281 LLVM_DEBUG(dbgs() << "LAA: Processing memory accesses...\n");
1282 LLVM_DEBUG(dbgs() << " AST: "; AST.dump());
1283 LLVM_DEBUG(dbgs() << "LAA: Accesses(" << Accesses.size() << "):\n");
1284 LLVM_DEBUG({
1285 for (const auto &[A, _] : Accesses)
1286 dbgs() << "\t" << *A.getPointer() << " ("
1287 << (A.getInt() ? "write"
1288 : (ReadOnlyPtr.count(A.getPointer()) ? "read-only"
1289 : "read"))
1290 << ")\n";
1291 });
1292
1293 // The AliasSetTracker has nicely partitioned our pointers by metadata
1294 // compatibility and potential for underlying-object overlap. As a result, we
1295 // only need to check for potential pointer dependencies within each alias
1296 // set.
1297 for (const auto &AS : AST) {
1298 // Note that both the alias-set tracker and the alias sets themselves used
1299 // ordered collections internally and so the iteration order here is
1300 // deterministic.
1301 auto ASPointers = AS.getPointers();
1302
1303 bool SetHasWrite = false;
1304
1305 // Map of pointers to last access encountered.
1306 typedef DenseMap<const Value*, MemAccessInfo> UnderlyingObjToAccessMap;
1307 UnderlyingObjToAccessMap ObjToLastAccess;
1308
1309 // Set of access to check after all writes have been processed.
1310 PtrAccessMap DeferredAccesses;
1311
1312 // Iterate over each alias set twice, once to process read/write pointers,
1313 // and then to process read-only pointers.
1314 for (int SetIteration = 0; SetIteration < 2; ++SetIteration) {
1315 bool UseDeferred = SetIteration > 0;
1316 PtrAccessMap &S = UseDeferred ? DeferredAccesses : Accesses;
1317
1318 for (const Value *ConstPtr : ASPointers) {
1319 Value *Ptr = const_cast<Value *>(ConstPtr);
1320
1321 // For a single memory access in AliasSetTracker, Accesses may contain
1322 // both read and write, and they both need to be handled for CheckDeps.
1323 for (const auto &[AC, _] : S) {
1324 if (AC.getPointer() != Ptr)
1325 continue;
1326
1327 bool IsWrite = AC.getInt();
1328
1329 // If we're using the deferred access set, then it contains only
1330 // reads.
1331 bool IsReadOnlyPtr = ReadOnlyPtr.count(Ptr) && !IsWrite;
1332 if (UseDeferred && !IsReadOnlyPtr)
1333 continue;
1334 // Otherwise, the pointer must be in the PtrAccessSet, either as a
1335 // read or a write.
1336 assert(((IsReadOnlyPtr && UseDeferred) || IsWrite ||
1337 S.count(MemAccessInfo(Ptr, false))) &&
1338 "Alias-set pointer not in the access set?");
1339
1340 MemAccessInfo Access(Ptr, IsWrite);
1341 DepCands.insert(Access);
1342
1343 // Memorize read-only pointers for later processing and skip them in
1344 // the first round (they need to be checked after we have seen all
1345 // write pointers). Note: we also mark pointer that are not
1346 // consecutive as "read-only" pointers (so that we check
1347 // "a[b[i]] +="). Hence, we need the second check for "!IsWrite".
1348 if (!UseDeferred && IsReadOnlyPtr) {
1349 // We only use the pointer keys, the types vector values don't
1350 // matter.
1351 DeferredAccesses.insert({Access, {}});
1352 continue;
1353 }
1354
1355 // If this is a write - check other reads and writes for conflicts. If
1356 // this is a read only check other writes for conflicts (but only if
1357 // there is no other write to the ptr - this is an optimization to
1358 // catch "a[i] = a[i] + " without having to do a dependence check).
1359 if ((IsWrite || IsReadOnlyPtr) && SetHasWrite) {
1360 CheckDeps.push_back(Access);
1361 IsRTCheckAnalysisNeeded = true;
1362 }
1363
1364 if (IsWrite)
1365 SetHasWrite = true;
1366
1367 // Create sets of pointers connected by a shared alias set and
1368 // underlying object.
1369 typedef SmallVector<const Value *, 16> ValueVector;
1370 ValueVector TempObjects;
1371
1372 UnderlyingObjects[Ptr] = {};
1373 SmallVector<const Value *, 16> &UOs = UnderlyingObjects[Ptr];
1374 ::getUnderlyingObjects(Ptr, UOs, LI);
1376 << "Underlying objects for pointer " << *Ptr << "\n");
1377 for (const Value *UnderlyingObj : UOs) {
1378 // nullptr never alias, don't join sets for pointer that have "null"
1379 // in their UnderlyingObjects list.
1380 if (isa<ConstantPointerNull>(UnderlyingObj) &&
1382 TheLoop->getHeader()->getParent(),
1383 UnderlyingObj->getType()->getPointerAddressSpace()))
1384 continue;
1385
1386 UnderlyingObjToAccessMap::iterator Prev =
1387 ObjToLastAccess.find(UnderlyingObj);
1388 if (Prev != ObjToLastAccess.end())
1389 DepCands.unionSets(Access, Prev->second);
1390
1391 ObjToLastAccess[UnderlyingObj] = Access;
1392 LLVM_DEBUG(dbgs() << " " << *UnderlyingObj << "\n");
1393 }
1394 }
1395 }
1396 }
1397 }
1398}
1399
1400/// Return true if an AddRec pointer \p Ptr is unsigned non-wrapping,
1401/// i.e. monotonically increasing/decreasing.
1402static bool isNoWrapAddRec(Value *Ptr, const SCEVAddRecExpr *AR,
1403 PredicatedScalarEvolution &PSE, const Loop *L) {
1404
1405 // FIXME: This should probably only return true for NUW.
1407 return true;
1408
1410 return true;
1411
1412 // Scalar evolution does not propagate the non-wrapping flags to values that
1413 // are derived from a non-wrapping induction variable because non-wrapping
1414 // could be flow-sensitive.
1415 //
1416 // Look through the potentially overflowing instruction to try to prove
1417 // non-wrapping for the *specific* value of Ptr.
1418
1419 // The arithmetic implied by an inbounds GEP can't overflow.
1420 const auto *GEP = dyn_cast<GetElementPtrInst>(Ptr);
1421 if (!GEP || !GEP->isInBounds())
1422 return false;
1423
1424 // Make sure there is only one non-const index and analyze that.
1425 Value *NonConstIndex = nullptr;
1426 for (Value *Index : GEP->indices())
1427 if (!isa<ConstantInt>(Index)) {
1428 if (NonConstIndex)
1429 return false;
1430 NonConstIndex = Index;
1431 }
1432 if (!NonConstIndex)
1433 // The recurrence is on the pointer, ignore for now.
1434 return false;
1435
1436 // The index in GEP is signed. It is non-wrapping if it's derived from a NSW
1437 // AddRec using a NSW operation.
1438 if (auto *OBO = dyn_cast<OverflowingBinaryOperator>(NonConstIndex))
1439 if (OBO->hasNoSignedWrap() &&
1440 // Assume constant for other the operand so that the AddRec can be
1441 // easily found.
1442 isa<ConstantInt>(OBO->getOperand(1))) {
1443 const SCEV *OpScev = PSE.getSCEV(OBO->getOperand(0));
1444
1445 if (auto *OpAR = dyn_cast<SCEVAddRecExpr>(OpScev))
1446 return OpAR->getLoop() == L && OpAR->getNoWrapFlags(SCEV::FlagNSW);
1447 }
1448
1449 return false;
1450}
1451
1452/// Check whether the access through \p Ptr has a constant stride.
1453std::optional<int64_t>
1455 const Loop *Lp,
1456 const DenseMap<Value *, const SCEV *> &StridesMap,
1457 bool Assume, bool ShouldCheckWrap) {
1458 const SCEV *PtrScev = replaceSymbolicStrideSCEV(PSE, StridesMap, Ptr);
1459 if (PSE.getSE()->isLoopInvariant(PtrScev, Lp))
1460 return {0};
1461
1462 Type *Ty = Ptr->getType();
1463 assert(Ty->isPointerTy() && "Unexpected non-ptr");
1464 if (isa<ScalableVectorType>(AccessTy)) {
1465 LLVM_DEBUG(dbgs() << "LAA: Bad stride - Scalable object: " << *AccessTy
1466 << "\n");
1467 return std::nullopt;
1468 }
1469
1470 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(PtrScev);
1471 if (Assume && !AR)
1472 AR = PSE.getAsAddRec(Ptr);
1473
1474 if (!AR) {
1475 LLVM_DEBUG(dbgs() << "LAA: Bad stride - Not an AddRecExpr pointer " << *Ptr
1476 << " SCEV: " << *PtrScev << "\n");
1477 return std::nullopt;
1478 }
1479
1480 // The access function must stride over the innermost loop.
1481 if (Lp != AR->getLoop()) {
1482 LLVM_DEBUG(dbgs() << "LAA: Bad stride - Not striding over innermost loop "
1483 << *Ptr << " SCEV: " << *AR << "\n");
1484 return std::nullopt;
1485 }
1486
1487 // Check the step is constant.
1488 const SCEV *Step = AR->getStepRecurrence(*PSE.getSE());
1489
1490 // Calculate the pointer stride and check if it is constant.
1491 const SCEVConstant *C = dyn_cast<SCEVConstant>(Step);
1492 if (!C) {
1493 LLVM_DEBUG(dbgs() << "LAA: Bad stride - Not a constant strided " << *Ptr
1494 << " SCEV: " << *AR << "\n");
1495 return std::nullopt;
1496 }
1497
1498 const auto &DL = Lp->getHeader()->getDataLayout();
1499 TypeSize AllocSize = DL.getTypeAllocSize(AccessTy);
1500 int64_t Size = AllocSize.getFixedValue();
1501 const APInt &APStepVal = C->getAPInt();
1502
1503 // Huge step value - give up.
1504 if (APStepVal.getBitWidth() > 64)
1505 return std::nullopt;
1506
1507 int64_t StepVal = APStepVal.getSExtValue();
1508
1509 // Strided access.
1510 int64_t Stride = StepVal / Size;
1511 int64_t Rem = StepVal % Size;
1512 if (Rem)
1513 return std::nullopt;
1514
1515 if (!ShouldCheckWrap)
1516 return Stride;
1517
1518 // The address calculation must not wrap. Otherwise, a dependence could be
1519 // inverted.
1520 if (isNoWrapAddRec(Ptr, AR, PSE, Lp))
1521 return Stride;
1522
1523 // An inbounds getelementptr that is a AddRec with a unit stride
1524 // cannot wrap per definition. If it did, the result would be poison
1525 // and any memory access dependent on it would be immediate UB
1526 // when executed.
1527 if (auto *GEP = dyn_cast<GetElementPtrInst>(Ptr);
1528 GEP && GEP->isInBounds() && (Stride == 1 || Stride == -1))
1529 return Stride;
1530
1531 // If the null pointer is undefined, then a access sequence which would
1532 // otherwise access it can be assumed not to unsigned wrap. Note that this
1533 // assumes the object in memory is aligned to the natural alignment.
1534 unsigned AddrSpace = Ty->getPointerAddressSpace();
1535 if (!NullPointerIsDefined(Lp->getHeader()->getParent(), AddrSpace) &&
1536 (Stride == 1 || Stride == -1))
1537 return Stride;
1538
1539 if (Assume) {
1541 LLVM_DEBUG(dbgs() << "LAA: Pointer may wrap:\n"
1542 << "LAA: Pointer: " << *Ptr << "\n"
1543 << "LAA: SCEV: " << *AR << "\n"
1544 << "LAA: Added an overflow assumption\n");
1545 return Stride;
1546 }
1547 LLVM_DEBUG(
1548 dbgs() << "LAA: Bad stride - Pointer may wrap in the address space "
1549 << *Ptr << " SCEV: " << *AR << "\n");
1550 return std::nullopt;
1551}
1552
1553std::optional<int> llvm::getPointersDiff(Type *ElemTyA, Value *PtrA,
1554 Type *ElemTyB, Value *PtrB,
1555 const DataLayout &DL,
1556 ScalarEvolution &SE, bool StrictCheck,
1557 bool CheckType) {
1558 assert(PtrA && PtrB && "Expected non-nullptr pointers.");
1559
1560 // Make sure that A and B are different pointers.
1561 if (PtrA == PtrB)
1562 return 0;
1563
1564 // Make sure that the element types are the same if required.
1565 if (CheckType && ElemTyA != ElemTyB)
1566 return std::nullopt;
1567
1568 unsigned ASA = PtrA->getType()->getPointerAddressSpace();
1569 unsigned ASB = PtrB->getType()->getPointerAddressSpace();
1570
1571 // Check that the address spaces match.
1572 if (ASA != ASB)
1573 return std::nullopt;
1574 unsigned IdxWidth = DL.getIndexSizeInBits(ASA);
1575
1576 APInt OffsetA(IdxWidth, 0), OffsetB(IdxWidth, 0);
1577 const Value *PtrA1 =
1579 const Value *PtrB1 =
1581
1582 int Val;
1583 if (PtrA1 == PtrB1) {
1584 // Retrieve the address space again as pointer stripping now tracks through
1585 // `addrspacecast`.
1586 ASA = cast<PointerType>(PtrA1->getType())->getAddressSpace();
1587 ASB = cast<PointerType>(PtrB1->getType())->getAddressSpace();
1588 // Check that the address spaces match and that the pointers are valid.
1589 if (ASA != ASB)
1590 return std::nullopt;
1591
1592 IdxWidth = DL.getIndexSizeInBits(ASA);
1593 OffsetA = OffsetA.sextOrTrunc(IdxWidth);
1594 OffsetB = OffsetB.sextOrTrunc(IdxWidth);
1595
1596 OffsetB -= OffsetA;
1597 Val = OffsetB.getSExtValue();
1598 } else {
1599 // Otherwise compute the distance with SCEV between the base pointers.
1600 const SCEV *PtrSCEVA = SE.getSCEV(PtrA);
1601 const SCEV *PtrSCEVB = SE.getSCEV(PtrB);
1602 const auto *Diff =
1603 dyn_cast<SCEVConstant>(SE.getMinusSCEV(PtrSCEVB, PtrSCEVA));
1604 if (!Diff)
1605 return std::nullopt;
1606 Val = Diff->getAPInt().getSExtValue();
1607 }
1608 int Size = DL.getTypeStoreSize(ElemTyA);
1609 int Dist = Val / Size;
1610
1611 // Ensure that the calculated distance matches the type-based one after all
1612 // the bitcasts removal in the provided pointers.
1613 if (!StrictCheck || Dist * Size == Val)
1614 return Dist;
1615 return std::nullopt;
1616}
1617
1619 const DataLayout &DL, ScalarEvolution &SE,
1620 SmallVectorImpl<unsigned> &SortedIndices) {
1622 VL, [](const Value *V) { return V->getType()->isPointerTy(); }) &&
1623 "Expected list of pointer operands.");
1624 // Walk over the pointers, and map each of them to an offset relative to
1625 // first pointer in the array.
1626 Value *Ptr0 = VL[0];
1627
1628 using DistOrdPair = std::pair<int64_t, int>;
1629 auto Compare = llvm::less_first();
1630 std::set<DistOrdPair, decltype(Compare)> Offsets(Compare);
1631 Offsets.emplace(0, 0);
1632 bool IsConsecutive = true;
1633 for (auto [Idx, Ptr] : drop_begin(enumerate(VL))) {
1634 std::optional<int> Diff = getPointersDiff(ElemTy, Ptr0, ElemTy, Ptr, DL, SE,
1635 /*StrictCheck=*/true);
1636 if (!Diff)
1637 return false;
1638
1639 // Check if the pointer with the same offset is found.
1640 int64_t Offset = *Diff;
1641 auto [It, IsInserted] = Offsets.emplace(Offset, Idx);
1642 if (!IsInserted)
1643 return false;
1644 // Consecutive order if the inserted element is the last one.
1645 IsConsecutive &= std::next(It) == Offsets.end();
1646 }
1647 SortedIndices.clear();
1648 if (!IsConsecutive) {
1649 // Fill SortedIndices array only if it is non-consecutive.
1650 SortedIndices.resize(VL.size());
1651 for (auto [Idx, Off] : enumerate(Offsets))
1652 SortedIndices[Idx] = Off.second;
1653 }
1654 return true;
1655}
1656
1657/// Returns true if the memory operations \p A and \p B are consecutive.
1659 ScalarEvolution &SE, bool CheckType) {
1662 if (!PtrA || !PtrB)
1663 return false;
1664 Type *ElemTyA = getLoadStoreType(A);
1665 Type *ElemTyB = getLoadStoreType(B);
1666 std::optional<int> Diff =
1667 getPointersDiff(ElemTyA, PtrA, ElemTyB, PtrB, DL, SE,
1668 /*StrictCheck=*/true, CheckType);
1669 return Diff && *Diff == 1;
1670}
1671
1673 visitPointers(SI->getPointerOperand(), *InnermostLoop,
1674 [this, SI](Value *Ptr) {
1675 Accesses[MemAccessInfo(Ptr, true)].push_back(AccessIdx);
1676 InstMap.push_back(SI);
1677 ++AccessIdx;
1678 });
1679}
1680
1682 visitPointers(LI->getPointerOperand(), *InnermostLoop,
1683 [this, LI](Value *Ptr) {
1684 Accesses[MemAccessInfo(Ptr, false)].push_back(AccessIdx);
1685 InstMap.push_back(LI);
1686 ++AccessIdx;
1687 });
1688}
1689
1692 switch (Type) {
1693 case NoDep:
1694 case Forward:
1697
1698 case Unknown:
1701 case Backward:
1703 case IndirectUnsafe:
1705 }
1706 llvm_unreachable("unexpected DepType!");
1707}
1708
1710 switch (Type) {
1711 case NoDep:
1712 case Forward:
1713 case ForwardButPreventsForwarding:
1714 case Unknown:
1715 case IndirectUnsafe:
1716 return false;
1717
1718 case BackwardVectorizable:
1719 case Backward:
1720 case BackwardVectorizableButPreventsForwarding:
1721 return true;
1722 }
1723 llvm_unreachable("unexpected DepType!");
1724}
1725
1727 return isBackward() || Type == Unknown || Type == IndirectUnsafe;
1728}
1729
1731 switch (Type) {
1732 case Forward:
1733 case ForwardButPreventsForwarding:
1734 return true;
1735
1736 case NoDep:
1737 case Unknown:
1738 case BackwardVectorizable:
1739 case Backward:
1740 case BackwardVectorizableButPreventsForwarding:
1741 case IndirectUnsafe:
1742 return false;
1743 }
1744 llvm_unreachable("unexpected DepType!");
1745}
1746
1747bool MemoryDepChecker::couldPreventStoreLoadForward(uint64_t Distance,
1748 uint64_t TypeByteSize) {
1749 // If loads occur at a distance that is not a multiple of a feasible vector
1750 // factor store-load forwarding does not take place.
1751 // Positive dependences might cause troubles because vectorizing them might
1752 // prevent store-load forwarding making vectorized code run a lot slower.
1753 // a[i] = a[i-3] ^ a[i-8];
1754 // The stores to a[i:i+1] don't align with the stores to a[i-3:i-2] and
1755 // hence on your typical architecture store-load forwarding does not take
1756 // place. Vectorizing in such cases does not make sense.
1757 // Store-load forwarding distance.
1758
1759 // After this many iterations store-to-load forwarding conflicts should not
1760 // cause any slowdowns.
1761 const uint64_t NumItersForStoreLoadThroughMemory = 8 * TypeByteSize;
1762 // Maximum vector factor.
1763 uint64_t MaxVFWithoutSLForwardIssues = std::min(
1764 VectorizerParams::MaxVectorWidth * TypeByteSize, MinDepDistBytes);
1765
1766 // Compute the smallest VF at which the store and load would be misaligned.
1767 for (uint64_t VF = 2 * TypeByteSize; VF <= MaxVFWithoutSLForwardIssues;
1768 VF *= 2) {
1769 // If the number of vector iteration between the store and the load are
1770 // small we could incur conflicts.
1771 if (Distance % VF && Distance / VF < NumItersForStoreLoadThroughMemory) {
1772 MaxVFWithoutSLForwardIssues = (VF >> 1);
1773 break;
1774 }
1775 }
1776
1777 if (MaxVFWithoutSLForwardIssues < 2 * TypeByteSize) {
1778 LLVM_DEBUG(
1779 dbgs() << "LAA: Distance " << Distance
1780 << " that could cause a store-load forwarding conflict\n");
1781 return true;
1782 }
1783
1784 if (MaxVFWithoutSLForwardIssues < MinDepDistBytes &&
1785 MaxVFWithoutSLForwardIssues !=
1786 VectorizerParams::MaxVectorWidth * TypeByteSize)
1787 MinDepDistBytes = MaxVFWithoutSLForwardIssues;
1788 return false;
1789}
1790
1791void MemoryDepChecker::mergeInStatus(VectorizationSafetyStatus S) {
1792 if (Status < S)
1793 Status = S;
1794}
1795
1796/// Given a dependence-distance \p Dist between two
1797/// memory accesses, that have strides in the same direction whose absolute
1798/// value of the maximum stride is given in \p MaxStride, and that have the same
1799/// type size \p TypeByteSize, in a loop whose maximum backedge taken count is
1800/// \p MaxBTC, check if it is possible to prove statically that the dependence
1801/// distance is larger than the range that the accesses will travel through the
1802/// execution of the loop. If so, return true; false otherwise. This is useful
1803/// for example in loops such as the following (PR31098):
1804/// for (i = 0; i < D; ++i) {
1805/// = out[i];
1806/// out[i+D] =
1807/// }
1809 const SCEV &MaxBTC, const SCEV &Dist,
1810 uint64_t MaxStride,
1811 uint64_t TypeByteSize) {
1812
1813 // If we can prove that
1814 // (**) |Dist| > MaxBTC * Step
1815 // where Step is the absolute stride of the memory accesses in bytes,
1816 // then there is no dependence.
1817 //
1818 // Rationale:
1819 // We basically want to check if the absolute distance (|Dist/Step|)
1820 // is >= the loop iteration count (or > MaxBTC).
1821 // This is equivalent to the Strong SIV Test (Practical Dependence Testing,
1822 // Section 4.2.1); Note, that for vectorization it is sufficient to prove
1823 // that the dependence distance is >= VF; This is checked elsewhere.
1824 // But in some cases we can prune dependence distances early, and
1825 // even before selecting the VF, and without a runtime test, by comparing
1826 // the distance against the loop iteration count. Since the vectorized code
1827 // will be executed only if LoopCount >= VF, proving distance >= LoopCount
1828 // also guarantees that distance >= VF.
1829 //
1830 const uint64_t ByteStride = MaxStride * TypeByteSize;
1831 const SCEV *Step = SE.getConstant(MaxBTC.getType(), ByteStride);
1832 const SCEV *Product = SE.getMulExpr(&MaxBTC, Step);
1833
1834 const SCEV *CastedDist = &Dist;
1835 const SCEV *CastedProduct = Product;
1836 uint64_t DistTypeSizeBits = DL.getTypeSizeInBits(Dist.getType());
1837 uint64_t ProductTypeSizeBits = DL.getTypeSizeInBits(Product->getType());
1838
1839 // The dependence distance can be positive/negative, so we sign extend Dist;
1840 // The multiplication of the absolute stride in bytes and the
1841 // backedgeTakenCount is non-negative, so we zero extend Product.
1842 if (DistTypeSizeBits > ProductTypeSizeBits)
1843 CastedProduct = SE.getZeroExtendExpr(Product, Dist.getType());
1844 else
1845 CastedDist = SE.getNoopOrSignExtend(&Dist, Product->getType());
1846
1847 // Is Dist - (MaxBTC * Step) > 0 ?
1848 // (If so, then we have proven (**) because |Dist| >= Dist)
1849 const SCEV *Minus = SE.getMinusSCEV(CastedDist, CastedProduct);
1850 if (SE.isKnownPositive(Minus))
1851 return true;
1852
1853 // Second try: Is -Dist - (MaxBTC * Step) > 0 ?
1854 // (If so, then we have proven (**) because |Dist| >= -1*Dist)
1855 const SCEV *NegDist = SE.getNegativeSCEV(CastedDist);
1856 Minus = SE.getMinusSCEV(NegDist, CastedProduct);
1857 return SE.isKnownPositive(Minus);
1858}
1859
1860/// Check the dependence for two accesses with the same stride \p Stride.
1861/// \p Distance is the positive distance and \p TypeByteSize is type size in
1862/// bytes.
1863///
1864/// \returns true if they are independent.
1866 uint64_t TypeByteSize) {
1867 assert(Stride > 1 && "The stride must be greater than 1");
1868 assert(TypeByteSize > 0 && "The type size in byte must be non-zero");
1869 assert(Distance > 0 && "The distance must be non-zero");
1870
1871 // Skip if the distance is not multiple of type byte size.
1872 if (Distance % TypeByteSize)
1873 return false;
1874
1875 uint64_t ScaledDist = Distance / TypeByteSize;
1876
1877 // No dependence if the scaled distance is not multiple of the stride.
1878 // E.g.
1879 // for (i = 0; i < 1024 ; i += 4)
1880 // A[i+2] = A[i] + 1;
1881 //
1882 // Two accesses in memory (scaled distance is 2, stride is 4):
1883 // | A[0] | | | | A[4] | | | |
1884 // | | | A[2] | | | | A[6] | |
1885 //
1886 // E.g.
1887 // for (i = 0; i < 1024 ; i += 3)
1888 // A[i+4] = A[i] + 1;
1889 //
1890 // Two accesses in memory (scaled distance is 4, stride is 3):
1891 // | A[0] | | | A[3] | | | A[6] | | |
1892 // | | | | | A[4] | | | A[7] | |
1893 return ScaledDist % Stride;
1894}
1895
1897 MemoryDepChecker::DepDistanceStrideAndSizeInfo>
1898MemoryDepChecker::getDependenceDistanceStrideAndSize(
1901 const auto &DL = InnermostLoop->getHeader()->getDataLayout();
1902 auto &SE = *PSE.getSE();
1903 const auto &[APtr, AIsWrite] = A;
1904 const auto &[BPtr, BIsWrite] = B;
1905
1906 // Two reads are independent.
1907 if (!AIsWrite && !BIsWrite)
1909
1910 Type *ATy = getLoadStoreType(AInst);
1911 Type *BTy = getLoadStoreType(BInst);
1912
1913 // We cannot check pointers in different address spaces.
1914 if (APtr->getType()->getPointerAddressSpace() !=
1915 BPtr->getType()->getPointerAddressSpace())
1917
1918 std::optional<int64_t> StrideAPtr =
1919 getPtrStride(PSE, ATy, APtr, InnermostLoop, SymbolicStrides, true, true);
1920 std::optional<int64_t> StrideBPtr =
1921 getPtrStride(PSE, BTy, BPtr, InnermostLoop, SymbolicStrides, true, true);
1922
1923 const SCEV *Src = PSE.getSCEV(APtr);
1924 const SCEV *Sink = PSE.getSCEV(BPtr);
1925
1926 // If the induction step is negative we have to invert source and sink of the
1927 // dependence when measuring the distance between them. We should not swap
1928 // AIsWrite with BIsWrite, as their uses expect them in program order.
1929 if (StrideAPtr && *StrideAPtr < 0) {
1930 std::swap(Src, Sink);
1931 std::swap(AInst, BInst);
1932 std::swap(StrideAPtr, StrideBPtr);
1933 }
1934
1935 const SCEV *Dist = SE.getMinusSCEV(Sink, Src);
1936
1937 LLVM_DEBUG(dbgs() << "LAA: Src Scev: " << *Src << "Sink Scev: " << *Sink
1938 << "\n");
1939 LLVM_DEBUG(dbgs() << "LAA: Distance for " << *AInst << " to " << *BInst
1940 << ": " << *Dist << "\n");
1941
1942 // Check if we can prove that Sink only accesses memory after Src's end or
1943 // vice versa. At the moment this is limited to cases where either source or
1944 // sink are loop invariant to avoid compile-time increases. This is not
1945 // required for correctness.
1946 if (SE.isLoopInvariant(Src, InnermostLoop) ||
1947 SE.isLoopInvariant(Sink, InnermostLoop)) {
1948 const auto &[SrcStart, SrcEnd] =
1949 getStartAndEndForAccess(InnermostLoop, Src, ATy, PSE, PointerBounds);
1950 const auto &[SinkStart, SinkEnd] =
1951 getStartAndEndForAccess(InnermostLoop, Sink, BTy, PSE, PointerBounds);
1952 if (!isa<SCEVCouldNotCompute>(SrcStart) &&
1953 !isa<SCEVCouldNotCompute>(SrcEnd) &&
1954 !isa<SCEVCouldNotCompute>(SinkStart) &&
1955 !isa<SCEVCouldNotCompute>(SinkEnd)) {
1956 if (SE.isKnownPredicate(CmpInst::ICMP_ULE, SrcEnd, SinkStart))
1958 if (SE.isKnownPredicate(CmpInst::ICMP_ULE, SinkEnd, SrcStart))
1960 }
1961 }
1962
1963 // Need accesses with constant strides and the same direction for further
1964 // dependence analysis. We don't want to vectorize "A[B[i]] += ..." and
1965 // similar code or pointer arithmetic that could wrap in the address space.
1966
1967 // If either Src or Sink are not strided (i.e. not a non-wrapping AddRec) and
1968 // not loop-invariant (stride will be 0 in that case), we cannot analyze the
1969 // dependence further and also cannot generate runtime checks.
1970 if (!StrideAPtr || !StrideBPtr) {
1971 LLVM_DEBUG(dbgs() << "Pointer access with non-constant stride\n");
1973 }
1974
1975 int64_t StrideAPtrInt = *StrideAPtr;
1976 int64_t StrideBPtrInt = *StrideBPtr;
1977 LLVM_DEBUG(dbgs() << "LAA: Src induction step: " << StrideAPtrInt
1978 << " Sink induction step: " << StrideBPtrInt << "\n");
1979 // At least Src or Sink are loop invariant and the other is strided or
1980 // invariant. We can generate a runtime check to disambiguate the accesses.
1981 if (StrideAPtrInt == 0 || StrideBPtrInt == 0)
1983
1984 // Both Src and Sink have a constant stride, check if they are in the same
1985 // direction.
1986 if ((StrideAPtrInt > 0 && StrideBPtrInt < 0) ||
1987 (StrideAPtrInt < 0 && StrideBPtrInt > 0)) {
1988 LLVM_DEBUG(
1989 dbgs() << "Pointer access with strides in different directions\n");
1991 }
1992
1993 uint64_t TypeByteSize = DL.getTypeAllocSize(ATy);
1994 bool HasSameSize =
1995 DL.getTypeStoreSizeInBits(ATy) == DL.getTypeStoreSizeInBits(BTy);
1996 if (!HasSameSize)
1997 TypeByteSize = 0;
1998 return DepDistanceStrideAndSizeInfo(Dist, std::abs(StrideAPtrInt),
1999 std::abs(StrideBPtrInt), TypeByteSize,
2000 AIsWrite, BIsWrite);
2001}
2002
2004MemoryDepChecker::isDependent(const MemAccessInfo &A, unsigned AIdx,
2005 const MemAccessInfo &B, unsigned BIdx) {
2006 assert(AIdx < BIdx && "Must pass arguments in program order");
2007
2008 // Get the dependence distance, stride, type size and what access writes for
2009 // the dependence between A and B.
2010 auto Res =
2011 getDependenceDistanceStrideAndSize(A, InstMap[AIdx], B, InstMap[BIdx]);
2012 if (std::holds_alternative<Dependence::DepType>(Res))
2013 return std::get<Dependence::DepType>(Res);
2014
2015 auto &[Dist, StrideA, StrideB, TypeByteSize, AIsWrite, BIsWrite] =
2016 std::get<DepDistanceStrideAndSizeInfo>(Res);
2017 bool HasSameSize = TypeByteSize > 0;
2018
2019 std::optional<uint64_t> CommonStride =
2020 StrideA == StrideB ? std::make_optional(StrideA) : std::nullopt;
2021 if (isa<SCEVCouldNotCompute>(Dist)) {
2022 // TODO: Relax requirement that there is a common stride to retry with
2023 // non-constant distance dependencies.
2024 FoundNonConstantDistanceDependence |= CommonStride.has_value();
2025 LLVM_DEBUG(dbgs() << "LAA: Dependence because of uncomputable distance.\n");
2026 return Dependence::Unknown;
2027 }
2028
2029 ScalarEvolution &SE = *PSE.getSE();
2030 auto &DL = InnermostLoop->getHeader()->getDataLayout();
2031 uint64_t MaxStride = std::max(StrideA, StrideB);
2032
2033 // If the distance between the acecsses is larger than their maximum absolute
2034 // stride multiplied by the symbolic maximum backedge taken count (which is an
2035 // upper bound of the number of iterations), the accesses are independet, i.e.
2036 // they are far enough appart that accesses won't access the same location
2037 // across all loop ierations.
2038 if (HasSameSize && isSafeDependenceDistance(
2040 *Dist, MaxStride, TypeByteSize))
2041 return Dependence::NoDep;
2042
2043 const SCEVConstant *C = dyn_cast<SCEVConstant>(Dist);
2044
2045 // Attempt to prove strided accesses independent.
2046 if (C) {
2047 const APInt &Val = C->getAPInt();
2048 int64_t Distance = Val.getSExtValue();
2049
2050 // If the distance between accesses and their strides are known constants,
2051 // check whether the accesses interlace each other.
2052 if (std::abs(Distance) > 0 && CommonStride && *CommonStride > 1 &&
2053 HasSameSize &&
2054 areStridedAccessesIndependent(std::abs(Distance), *CommonStride,
2055 TypeByteSize)) {
2056 LLVM_DEBUG(dbgs() << "LAA: Strided accesses are independent\n");
2057 return Dependence::NoDep;
2058 }
2059 } else
2060 Dist = SE.applyLoopGuards(Dist, InnermostLoop);
2061
2062 // Negative distances are not plausible dependencies.
2063 if (SE.isKnownNonPositive(Dist)) {
2064 if (SE.isKnownNonNegative(Dist)) {
2065 if (HasSameSize) {
2066 // Write to the same location with the same size.
2067 return Dependence::Forward;
2068 }
2069 LLVM_DEBUG(dbgs() << "LAA: possibly zero dependence difference but "
2070 "different type sizes\n");
2071 return Dependence::Unknown;
2072 }
2073
2074 bool IsTrueDataDependence = (AIsWrite && !BIsWrite);
2075 // Check if the first access writes to a location that is read in a later
2076 // iteration, where the distance between them is not a multiple of a vector
2077 // factor and relatively small.
2078 //
2079 // NOTE: There is no need to update MaxSafeVectorWidthInBits after call to
2080 // couldPreventStoreLoadForward, even if it changed MinDepDistBytes, since a
2081 // forward dependency will allow vectorization using any width.
2082
2083 if (IsTrueDataDependence && EnableForwardingConflictDetection) {
2084 if (!C) {
2085 // TODO: FoundNonConstantDistanceDependence is used as a necessary
2086 // condition to consider retrying with runtime checks. Historically, we
2087 // did not set it when strides were different but there is no inherent
2088 // reason to.
2089 FoundNonConstantDistanceDependence |= CommonStride.has_value();
2090 return Dependence::Unknown;
2091 }
2092 if (!HasSameSize ||
2093 couldPreventStoreLoadForward(C->getAPInt().abs().getZExtValue(),
2094 TypeByteSize)) {
2095 LLVM_DEBUG(
2096 dbgs() << "LAA: Forward but may prevent st->ld forwarding\n");
2098 }
2099 }
2100
2101 LLVM_DEBUG(dbgs() << "LAA: Dependence is negative\n");
2102 return Dependence::Forward;
2103 }
2104
2105 int64_t MinDistance = SE.getSignedRangeMin(Dist).getSExtValue();
2106 // Below we only handle strictly positive distances.
2107 if (MinDistance <= 0) {
2108 FoundNonConstantDistanceDependence |= CommonStride.has_value();
2109 return Dependence::Unknown;
2110 }
2111
2112 if (!isa<SCEVConstant>(Dist)) {
2113 // Previously this case would be treated as Unknown, possibly setting
2114 // FoundNonConstantDistanceDependence to force re-trying with runtime
2115 // checks. Until the TODO below is addressed, set it here to preserve
2116 // original behavior w.r.t. re-trying with runtime checks.
2117 // TODO: FoundNonConstantDistanceDependence is used as a necessary
2118 // condition to consider retrying with runtime checks. Historically, we
2119 // did not set it when strides were different but there is no inherent
2120 // reason to.
2121 FoundNonConstantDistanceDependence |= CommonStride.has_value();
2122 }
2123
2124 if (!HasSameSize) {
2125 LLVM_DEBUG(dbgs() << "LAA: ReadWrite-Write positive dependency with "
2126 "different type sizes\n");
2127 return Dependence::Unknown;
2128 }
2129
2130 if (!CommonStride)
2131 return Dependence::Unknown;
2132
2133 // Bail out early if passed-in parameters make vectorization not feasible.
2134 unsigned ForcedFactor = (VectorizerParams::VectorizationFactor ?
2136 unsigned ForcedUnroll = (VectorizerParams::VectorizationInterleave ?
2138 // The minimum number of iterations for a vectorized/unrolled version.
2139 unsigned MinNumIter = std::max(ForcedFactor * ForcedUnroll, 2U);
2140
2141 // It's not vectorizable if the distance is smaller than the minimum distance
2142 // needed for a vectroized/unrolled version. Vectorizing one iteration in
2143 // front needs TypeByteSize * Stride. Vectorizing the last iteration needs
2144 // TypeByteSize (No need to plus the last gap distance).
2145 //
2146 // E.g. Assume one char is 1 byte in memory and one int is 4 bytes.
2147 // foo(int *A) {
2148 // int *B = (int *)((char *)A + 14);
2149 // for (i = 0 ; i < 1024 ; i += 2)
2150 // B[i] = A[i] + 1;
2151 // }
2152 //
2153 // Two accesses in memory (stride is 2):
2154 // | A[0] | | A[2] | | A[4] | | A[6] | |
2155 // | B[0] | | B[2] | | B[4] |
2156 //
2157 // MinDistance needs for vectorizing iterations except the last iteration:
2158 // 4 * 2 * (MinNumIter - 1). MinDistance needs for the last iteration: 4.
2159 // So the minimum distance needed is: 4 * 2 * (MinNumIter - 1) + 4.
2160 //
2161 // If MinNumIter is 2, it is vectorizable as the minimum distance needed is
2162 // 12, which is less than distance.
2163 //
2164 // If MinNumIter is 4 (Say if a user forces the vectorization factor to be 4),
2165 // the minimum distance needed is 28, which is greater than distance. It is
2166 // not safe to do vectorization.
2167
2168 // We know that Dist is positive, but it may not be constant. Use the signed
2169 // minimum for computations below, as this ensures we compute the closest
2170 // possible dependence distance.
2171 uint64_t MinDistanceNeeded =
2172 TypeByteSize * *CommonStride * (MinNumIter - 1) + TypeByteSize;
2173 if (MinDistanceNeeded > static_cast<uint64_t>(MinDistance)) {
2174 if (!isa<SCEVConstant>(Dist)) {
2175 // For non-constant distances, we checked the lower bound of the
2176 // dependence distance and the distance may be larger at runtime (and safe
2177 // for vectorization). Classify it as Unknown, so we re-try with runtime
2178 // checks.
2179 return Dependence::Unknown;
2180 }
2181 LLVM_DEBUG(dbgs() << "LAA: Failure because of positive minimum distance "
2182 << MinDistance << '\n');
2183 return Dependence::Backward;
2184 }
2185
2186 // Unsafe if the minimum distance needed is greater than smallest dependence
2187 // distance distance.
2188 if (MinDistanceNeeded > MinDepDistBytes) {
2189 LLVM_DEBUG(dbgs() << "LAA: Failure because it needs at least "
2190 << MinDistanceNeeded << " size in bytes\n");
2191 return Dependence::Backward;
2192 }
2193
2194 // Positive distance bigger than max vectorization factor.
2195 // FIXME: Should use max factor instead of max distance in bytes, which could
2196 // not handle different types.
2197 // E.g. Assume one char is 1 byte in memory and one int is 4 bytes.
2198 // void foo (int *A, char *B) {
2199 // for (unsigned i = 0; i < 1024; i++) {
2200 // A[i+2] = A[i] + 1;
2201 // B[i+2] = B[i] + 1;
2202 // }
2203 // }
2204 //
2205 // This case is currently unsafe according to the max safe distance. If we
2206 // analyze the two accesses on array B, the max safe dependence distance
2207 // is 2. Then we analyze the accesses on array A, the minimum distance needed
2208 // is 8, which is less than 2 and forbidden vectorization, But actually
2209 // both A and B could be vectorized by 2 iterations.
2210 MinDepDistBytes =
2211 std::min(static_cast<uint64_t>(MinDistance), MinDepDistBytes);
2212
2213 bool IsTrueDataDependence = (!AIsWrite && BIsWrite);
2214 uint64_t MinDepDistBytesOld = MinDepDistBytes;
2215 if (IsTrueDataDependence && EnableForwardingConflictDetection &&
2216 isa<SCEVConstant>(Dist) &&
2217 couldPreventStoreLoadForward(MinDistance, TypeByteSize)) {
2218 // Sanity check that we didn't update MinDepDistBytes when calling
2219 // couldPreventStoreLoadForward
2220 assert(MinDepDistBytes == MinDepDistBytesOld &&
2221 "An update to MinDepDistBytes requires an update to "
2222 "MaxSafeVectorWidthInBits");
2223 (void)MinDepDistBytesOld;
2225 }
2226
2227 // An update to MinDepDistBytes requires an update to MaxSafeVectorWidthInBits
2228 // since there is a backwards dependency.
2229 uint64_t MaxVF = MinDepDistBytes / (TypeByteSize * *CommonStride);
2230 LLVM_DEBUG(dbgs() << "LAA: Positive min distance " << MinDistance
2231 << " with max VF = " << MaxVF << '\n');
2232
2233 uint64_t MaxVFInBits = MaxVF * TypeByteSize * 8;
2234 if (!isa<SCEVConstant>(Dist) && MaxVFInBits < MaxTargetVectorWidthInBits) {
2235 // For non-constant distances, we checked the lower bound of the dependence
2236 // distance and the distance may be larger at runtime (and safe for
2237 // vectorization). Classify it as Unknown, so we re-try with runtime checks.
2238 return Dependence::Unknown;
2239 }
2240
2241 MaxSafeVectorWidthInBits = std::min(MaxSafeVectorWidthInBits, MaxVFInBits);
2243}
2244
2246 const MemAccessInfoList &CheckDeps) {
2247
2248 MinDepDistBytes = -1;
2250 for (MemAccessInfo CurAccess : CheckDeps) {
2251 if (Visited.count(CurAccess))
2252 continue;
2253
2254 // Get the relevant memory access set.
2256 AccessSets.findValue(AccessSets.getLeaderValue(CurAccess));
2257
2258 // Check accesses within this set.
2260 AccessSets.member_begin(I);
2262 AccessSets.member_end();
2263
2264 // Check every access pair.
2265 while (AI != AE) {
2266 Visited.insert(*AI);
2267 bool AIIsWrite = AI->getInt();
2268 // Check loads only against next equivalent class, but stores also against
2269 // other stores in the same equivalence class - to the same address.
2271 (AIIsWrite ? AI : std::next(AI));
2272 while (OI != AE) {
2273 // Check every accessing instruction pair in program order.
2274 for (std::vector<unsigned>::iterator I1 = Accesses[*AI].begin(),
2275 I1E = Accesses[*AI].end(); I1 != I1E; ++I1)
2276 // Scan all accesses of another equivalence class, but only the next
2277 // accesses of the same equivalent class.
2278 for (std::vector<unsigned>::iterator
2279 I2 = (OI == AI ? std::next(I1) : Accesses[*OI].begin()),
2280 I2E = (OI == AI ? I1E : Accesses[*OI].end());
2281 I2 != I2E; ++I2) {
2282 auto A = std::make_pair(&*AI, *I1);
2283 auto B = std::make_pair(&*OI, *I2);
2284
2285 assert(*I1 != *I2);
2286 if (*I1 > *I2)
2287 std::swap(A, B);
2288
2290 isDependent(*A.first, A.second, *B.first, B.second);
2292
2293 // Gather dependences unless we accumulated MaxDependences
2294 // dependences. In that case return as soon as we find the first
2295 // unsafe dependence. This puts a limit on this quadratic
2296 // algorithm.
2297 if (RecordDependences) {
2298 if (Type != Dependence::NoDep)
2299 Dependences.emplace_back(A.second, B.second, Type);
2300
2301 if (Dependences.size() >= MaxDependences) {
2302 RecordDependences = false;
2303 Dependences.clear();
2305 << "Too many dependences, stopped recording\n");
2306 }
2307 }
2308 if (!RecordDependences && !isSafeForVectorization())
2309 return false;
2310 }
2311 ++OI;
2312 }
2313 ++AI;
2314 }
2315 }
2316
2317 LLVM_DEBUG(dbgs() << "Total Dependences: " << Dependences.size() << "\n");
2318 return isSafeForVectorization();
2319}
2320
2323 MemAccessInfo Access(Ptr, IsWrite);
2324 auto &IndexVector = Accesses.find(Access)->second;
2325
2327 transform(IndexVector,
2328 std::back_inserter(Insts),
2329 [&](unsigned Idx) { return this->InstMap[Idx]; });
2330 return Insts;
2331}
2332
2334 "NoDep",
2335 "Unknown",
2336 "IndirectUnsafe",
2337 "Forward",
2338 "ForwardButPreventsForwarding",
2339 "Backward",
2340 "BackwardVectorizable",
2341 "BackwardVectorizableButPreventsForwarding"};
2342
2344 raw_ostream &OS, unsigned Depth,
2345 const SmallVectorImpl<Instruction *> &Instrs) const {
2346 OS.indent(Depth) << DepName[Type] << ":\n";
2347 OS.indent(Depth + 2) << *Instrs[Source] << " -> \n";
2348 OS.indent(Depth + 2) << *Instrs[Destination] << "\n";
2349}
2350
2351bool LoopAccessInfo::canAnalyzeLoop() {
2352 // We need to have a loop header.
2353 LLVM_DEBUG(dbgs() << "\nLAA: Checking a loop in '"
2354 << TheLoop->getHeader()->getParent()->getName() << "' from "
2355 << TheLoop->getLocStr() << "\n");
2356
2357 // We can only analyze innermost loops.
2358 if (!TheLoop->isInnermost()) {
2359 LLVM_DEBUG(dbgs() << "LAA: loop is not the innermost loop\n");
2360 recordAnalysis("NotInnerMostLoop") << "loop is not the innermost loop";
2361 return false;
2362 }
2363
2364 // We must have a single backedge.
2365 if (TheLoop->getNumBackEdges() != 1) {
2366 LLVM_DEBUG(
2367 dbgs() << "LAA: loop control flow is not understood by analyzer\n");
2368 recordAnalysis("CFGNotUnderstood")
2369 << "loop control flow is not understood by analyzer";
2370 return false;
2371 }
2372
2373 // ScalarEvolution needs to be able to find the symbolic max backedge taken
2374 // count, which is an upper bound on the number of loop iterations. The loop
2375 // may execute fewer iterations, if it exits via an uncountable exit.
2376 const SCEV *ExitCount = PSE->getSymbolicMaxBackedgeTakenCount();
2377 if (isa<SCEVCouldNotCompute>(ExitCount)) {
2378 recordAnalysis("CantComputeNumberOfIterations")
2379 << "could not determine number of loop iterations";
2380 LLVM_DEBUG(dbgs() << "LAA: SCEV could not compute the loop exit count.\n");
2381 return false;
2382 }
2383
2384 LLVM_DEBUG(dbgs() << "LAA: Found an analyzable loop: "
2385 << TheLoop->getHeader()->getName() << "\n");
2386 return true;
2387}
2388
2389bool LoopAccessInfo::analyzeLoop(AAResults *AA, const LoopInfo *LI,
2390 const TargetLibraryInfo *TLI,
2391 DominatorTree *DT) {
2392 // Holds the Load and Store instructions.
2395 SmallPtrSet<MDNode *, 8> LoopAliasScopes;
2396
2397 // Holds all the different accesses in the loop.
2398 unsigned NumReads = 0;
2399 unsigned NumReadWrites = 0;
2400
2401 bool HasComplexMemInst = false;
2402
2403 // A runtime check is only legal to insert if there are no convergent calls.
2404 HasConvergentOp = false;
2405
2406 PtrRtChecking->Pointers.clear();
2407 PtrRtChecking->Need = false;
2408
2409 const bool IsAnnotatedParallel = TheLoop->isAnnotatedParallel();
2410
2411 const bool EnableMemAccessVersioningOfLoop =
2413 !TheLoop->getHeader()->getParent()->hasOptSize();
2414
2415 // Traverse blocks in fixed RPOT order, regardless of their storage in the
2416 // loop info, as it may be arbitrary.
2417 LoopBlocksRPO RPOT(TheLoop);
2418 RPOT.perform(LI);
2419 for (BasicBlock *BB : RPOT) {
2420 // Scan the BB and collect legal loads and stores. Also detect any
2421 // convergent instructions.
2422 for (Instruction &I : *BB) {
2423 if (auto *Call = dyn_cast<CallBase>(&I)) {
2424 if (Call->isConvergent())
2425 HasConvergentOp = true;
2426 }
2427
2428 // With both a non-vectorizable memory instruction and a convergent
2429 // operation, found in this loop, no reason to continue the search.
2430 if (HasComplexMemInst && HasConvergentOp)
2431 return false;
2432
2433 // Avoid hitting recordAnalysis multiple times.
2434 if (HasComplexMemInst)
2435 continue;
2436
2437 // Record alias scopes defined inside the loop.
2438 if (auto *Decl = dyn_cast<NoAliasScopeDeclInst>(&I))
2439 for (Metadata *Op : Decl->getScopeList()->operands())
2440 LoopAliasScopes.insert(cast<MDNode>(Op));
2441
2442 // Many math library functions read the rounding mode. We will only
2443 // vectorize a loop if it contains known function calls that don't set
2444 // the flag. Therefore, it is safe to ignore this read from memory.
2445 auto *Call = dyn_cast<CallInst>(&I);
2446 if (Call && getVectorIntrinsicIDForCall(Call, TLI))
2447 continue;
2448
2449 // If this is a load, save it. If this instruction can read from memory
2450 // but is not a load, then we quit. Notice that we don't handle function
2451 // calls that read or write.
2452 if (I.mayReadFromMemory()) {
2453 // If the function has an explicit vectorized counterpart, we can safely
2454 // assume that it can be vectorized.
2455 if (Call && !Call->isNoBuiltin() && Call->getCalledFunction() &&
2456 !VFDatabase::getMappings(*Call).empty())
2457 continue;
2458
2459 auto *Ld = dyn_cast<LoadInst>(&I);
2460 if (!Ld) {
2461 recordAnalysis("CantVectorizeInstruction", Ld)
2462 << "instruction cannot be vectorized";
2463 HasComplexMemInst = true;
2464 continue;
2465 }
2466 if (!Ld->isSimple() && !IsAnnotatedParallel) {
2467 recordAnalysis("NonSimpleLoad", Ld)
2468 << "read with atomic ordering or volatile read";
2469 LLVM_DEBUG(dbgs() << "LAA: Found a non-simple load.\n");
2470 HasComplexMemInst = true;
2471 continue;
2472 }
2473 NumLoads++;
2474 Loads.push_back(Ld);
2475 DepChecker->addAccess(Ld);
2476 if (EnableMemAccessVersioningOfLoop)
2477 collectStridedAccess(Ld);
2478 continue;
2479 }
2480
2481 // Save 'store' instructions. Abort if other instructions write to memory.
2482 if (I.mayWriteToMemory()) {
2483 auto *St = dyn_cast<StoreInst>(&I);
2484 if (!St) {
2485 recordAnalysis("CantVectorizeInstruction", St)
2486 << "instruction cannot be vectorized";
2487 HasComplexMemInst = true;
2488 continue;
2489 }
2490 if (!St->isSimple() && !IsAnnotatedParallel) {
2491 recordAnalysis("NonSimpleStore", St)
2492 << "write with atomic ordering or volatile write";
2493 LLVM_DEBUG(dbgs() << "LAA: Found a non-simple store.\n");
2494 HasComplexMemInst = true;
2495 continue;
2496 }
2497 NumStores++;
2498 Stores.push_back(St);
2499 DepChecker->addAccess(St);
2500 if (EnableMemAccessVersioningOfLoop)
2501 collectStridedAccess(St);
2502 }
2503 } // Next instr.
2504 } // Next block.
2505
2506 if (HasComplexMemInst)
2507 return false;
2508
2509 // Now we have two lists that hold the loads and the stores.
2510 // Next, we find the pointers that they use.
2511
2512 // Check if we see any stores. If there are no stores, then we don't
2513 // care if the pointers are *restrict*.
2514 if (!Stores.size()) {
2515 LLVM_DEBUG(dbgs() << "LAA: Found a read-only loop!\n");
2516 return true;
2517 }
2518
2519 MemoryDepChecker::DepCandidates DependentAccesses;
2520 AccessAnalysis Accesses(TheLoop, AA, LI, DependentAccesses, *PSE,
2521 LoopAliasScopes);
2522
2523 // Holds the analyzed pointers. We don't want to call getUnderlyingObjects
2524 // multiple times on the same object. If the ptr is accessed twice, once
2525 // for read and once for write, it will only appear once (on the write
2526 // list). This is okay, since we are going to check for conflicts between
2527 // writes and between reads and writes, but not between reads and reads.
2529
2530 // Record uniform store addresses to identify if we have multiple stores
2531 // to the same address.
2532 SmallPtrSet<Value *, 16> UniformStores;
2533
2534 for (StoreInst *ST : Stores) {
2535 Value *Ptr = ST->getPointerOperand();
2536
2537 if (isInvariant(Ptr)) {
2538 // Record store instructions to loop invariant addresses
2539 StoresToInvariantAddresses.push_back(ST);
2540 HasStoreStoreDependenceInvolvingLoopInvariantAddress |=
2541 !UniformStores.insert(Ptr).second;
2542 }
2543
2544 // If we did *not* see this pointer before, insert it to the read-write
2545 // list. At this phase it is only a 'write' list.
2546 Type *AccessTy = getLoadStoreType(ST);
2547 if (Seen.insert({Ptr, AccessTy}).second) {
2548 ++NumReadWrites;
2549
2551 // The TBAA metadata could have a control dependency on the predication
2552 // condition, so we cannot rely on it when determining whether or not we
2553 // need runtime pointer checks.
2554 if (blockNeedsPredication(ST->getParent(), TheLoop, DT))
2555 Loc.AATags.TBAA = nullptr;
2556
2557 visitPointers(const_cast<Value *>(Loc.Ptr), *TheLoop,
2558 [&Accesses, AccessTy, Loc](Value *Ptr) {
2559 MemoryLocation NewLoc = Loc.getWithNewPtr(Ptr);
2560 Accesses.addStore(NewLoc, AccessTy);
2561 });
2562 }
2563 }
2564
2565 if (IsAnnotatedParallel) {
2566 LLVM_DEBUG(
2567 dbgs() << "LAA: A loop annotated parallel, ignore memory dependency "
2568 << "checks.\n");
2569 return true;
2570 }
2571
2572 for (LoadInst *LD : Loads) {
2573 Value *Ptr = LD->getPointerOperand();
2574 // If we did *not* see this pointer before, insert it to the
2575 // read list. If we *did* see it before, then it is already in
2576 // the read-write list. This allows us to vectorize expressions
2577 // such as A[i] += x; Because the address of A[i] is a read-write
2578 // pointer. This only works if the index of A[i] is consecutive.
2579 // If the address of i is unknown (for example A[B[i]]) then we may
2580 // read a few words, modify, and write a few words, and some of the
2581 // words may be written to the same address.
2582 bool IsReadOnlyPtr = false;
2583 Type *AccessTy = getLoadStoreType(LD);
2584 if (Seen.insert({Ptr, AccessTy}).second ||
2585 !getPtrStride(*PSE, LD->getType(), Ptr, TheLoop, SymbolicStrides).value_or(0)) {
2586 ++NumReads;
2587 IsReadOnlyPtr = true;
2588 }
2589
2590 // See if there is an unsafe dependency between a load to a uniform address and
2591 // store to the same uniform address.
2592 if (UniformStores.count(Ptr)) {
2593 LLVM_DEBUG(dbgs() << "LAA: Found an unsafe dependency between a uniform "
2594 "load and uniform store to the same address!\n");
2595 HasLoadStoreDependenceInvolvingLoopInvariantAddress = true;
2596 }
2597
2599 // The TBAA metadata could have a control dependency on the predication
2600 // condition, so we cannot rely on it when determining whether or not we
2601 // need runtime pointer checks.
2602 if (blockNeedsPredication(LD->getParent(), TheLoop, DT))
2603 Loc.AATags.TBAA = nullptr;
2604
2605 visitPointers(const_cast<Value *>(Loc.Ptr), *TheLoop,
2606 [&Accesses, AccessTy, Loc, IsReadOnlyPtr](Value *Ptr) {
2607 MemoryLocation NewLoc = Loc.getWithNewPtr(Ptr);
2608 Accesses.addLoad(NewLoc, AccessTy, IsReadOnlyPtr);
2609 });
2610 }
2611
2612 // If we write (or read-write) to a single destination and there are no
2613 // other reads in this loop then is it safe to vectorize.
2614 if (NumReadWrites == 1 && NumReads == 0) {
2615 LLVM_DEBUG(dbgs() << "LAA: Found a write-only loop!\n");
2616 return true;
2617 }
2618
2619 // Build dependence sets and check whether we need a runtime pointer bounds
2620 // check.
2621 Accesses.buildDependenceSets();
2622
2623 // Find pointers with computable bounds. We are going to use this information
2624 // to place a runtime bound check.
2625 Value *UncomputablePtr = nullptr;
2626 bool CanDoRTIfNeeded =
2627 Accesses.canCheckPtrAtRT(*PtrRtChecking, PSE->getSE(), TheLoop,
2628 SymbolicStrides, UncomputablePtr, false);
2629 if (!CanDoRTIfNeeded) {
2630 const auto *I = dyn_cast_or_null<Instruction>(UncomputablePtr);
2631 recordAnalysis("CantIdentifyArrayBounds", I)
2632 << "cannot identify array bounds";
2633 LLVM_DEBUG(dbgs() << "LAA: We can't vectorize because we can't find "
2634 << "the array bounds.\n");
2635 return false;
2636 }
2637
2638 LLVM_DEBUG(
2639 dbgs() << "LAA: May be able to perform a memory runtime check if needed.\n");
2640
2641 bool DepsAreSafe = true;
2642 if (Accesses.isDependencyCheckNeeded()) {
2643 LLVM_DEBUG(dbgs() << "LAA: Checking memory dependencies\n");
2644 DepsAreSafe = DepChecker->areDepsSafe(DependentAccesses,
2645 Accesses.getDependenciesToCheck());
2646
2647 if (!DepsAreSafe && DepChecker->shouldRetryWithRuntimeCheck()) {
2648 LLVM_DEBUG(dbgs() << "LAA: Retrying with memory checks\n");
2649
2650 // Clear the dependency checks. We assume they are not needed.
2651 Accesses.resetDepChecks(*DepChecker);
2652
2653 PtrRtChecking->reset();
2654 PtrRtChecking->Need = true;
2655
2656 auto *SE = PSE->getSE();
2657 UncomputablePtr = nullptr;
2658 CanDoRTIfNeeded = Accesses.canCheckPtrAtRT(
2659 *PtrRtChecking, SE, TheLoop, SymbolicStrides, UncomputablePtr, true);
2660
2661 // Check that we found the bounds for the pointer.
2662 if (!CanDoRTIfNeeded) {
2663 auto *I = dyn_cast_or_null<Instruction>(UncomputablePtr);
2664 recordAnalysis("CantCheckMemDepsAtRunTime", I)
2665 << "cannot check memory dependencies at runtime";
2666 LLVM_DEBUG(dbgs() << "LAA: Can't vectorize with memory checks\n");
2667 return false;
2668 }
2669 DepsAreSafe = true;
2670 }
2671 }
2672
2673 if (HasConvergentOp) {
2674 recordAnalysis("CantInsertRuntimeCheckWithConvergent")
2675 << "cannot add control dependency to convergent operation";
2676 LLVM_DEBUG(dbgs() << "LAA: We can't vectorize because a runtime check "
2677 "would be needed with a convergent operation\n");
2678 return false;
2679 }
2680
2681 if (DepsAreSafe) {
2682 LLVM_DEBUG(
2683 dbgs() << "LAA: No unsafe dependent memory operations in loop. We"
2684 << (PtrRtChecking->Need ? "" : " don't")
2685 << " need runtime memory checks.\n");
2686 return true;
2687 }
2688
2689 emitUnsafeDependenceRemark();
2690 return false;
2691}
2692
2693void LoopAccessInfo::emitUnsafeDependenceRemark() {
2694 const auto *Deps = getDepChecker().getDependences();
2695 if (!Deps)
2696 return;
2697 const auto *Found =
2698 llvm::find_if(*Deps, [](const MemoryDepChecker::Dependence &D) {
2701 });
2702 if (Found == Deps->end())
2703 return;
2704 MemoryDepChecker::Dependence Dep = *Found;
2705
2706 LLVM_DEBUG(dbgs() << "LAA: unsafe dependent memory operations in loop\n");
2707
2708 // Emit remark for first unsafe dependence
2709 bool HasForcedDistribution = false;
2710 std::optional<const MDOperand *> Value =
2711 findStringMetadataForLoop(TheLoop, "llvm.loop.distribute.enable");
2712 if (Value) {
2713 const MDOperand *Op = *Value;
2714 assert(Op && mdconst::hasa<ConstantInt>(*Op) && "invalid metadata");
2715 HasForcedDistribution = mdconst::extract<ConstantInt>(*Op)->getZExtValue();
2716 }
2717
2718 const std::string Info =
2719 HasForcedDistribution
2720 ? "unsafe dependent memory operations in loop."
2721 : "unsafe dependent memory operations in loop. Use "
2722 "#pragma clang loop distribute(enable) to allow loop distribution "
2723 "to attempt to isolate the offending operations into a separate "
2724 "loop";
2726 recordAnalysis("UnsafeDep", Dep.getDestination(getDepChecker())) << Info;
2727
2728 switch (Dep.Type) {
2732 llvm_unreachable("Unexpected dependence");
2734 R << "\nBackward loop carried data dependence.";
2735 break;
2737 R << "\nForward loop carried data dependence that prevents "
2738 "store-to-load forwarding.";
2739 break;
2741 R << "\nBackward loop carried data dependence that prevents "
2742 "store-to-load forwarding.";
2743 break;
2745 R << "\nUnsafe indirect dependence.";
2746 break;
2748 R << "\nUnknown data dependence.";
2749 break;
2750 }
2751
2752 if (Instruction *I = Dep.getSource(getDepChecker())) {
2753 DebugLoc SourceLoc = I->getDebugLoc();
2754 if (auto *DD = dyn_cast_or_null<Instruction>(getPointerOperand(I)))
2755 SourceLoc = DD->getDebugLoc();
2756 if (SourceLoc)
2757 R << " Memory location is the same as accessed at "
2758 << ore::NV("Location", SourceLoc);
2759 }
2760}
2761
2763 DominatorTree *DT) {
2764 assert(TheLoop->contains(BB) && "Unknown block used");
2765
2766 // Blocks that do not dominate the latch need predication.
2767 const BasicBlock *Latch = TheLoop->getLoopLatch();
2768 return !DT->dominates(BB, Latch);
2769}
2770
2772LoopAccessInfo::recordAnalysis(StringRef RemarkName, const Instruction *I) {
2773 assert(!Report && "Multiple reports generated");
2774
2775 const Value *CodeRegion = TheLoop->getHeader();
2776 DebugLoc DL = TheLoop->getStartLoc();
2777
2778 if (I) {
2779 CodeRegion = I->getParent();
2780 // If there is no debug location attached to the instruction, revert back to
2781 // using the loop's.
2782 if (I->getDebugLoc())
2783 DL = I->getDebugLoc();
2784 }
2785
2786 Report = std::make_unique<OptimizationRemarkAnalysis>(DEBUG_TYPE, RemarkName, DL,
2787 CodeRegion);
2788 return *Report;
2789}
2790
2792 auto *SE = PSE->getSE();
2793 // TODO: Is this really what we want? Even without FP SCEV, we may want some
2794 // trivially loop-invariant FP values to be considered invariant.
2795 if (!SE->isSCEVable(V->getType()))
2796 return false;
2797 const SCEV *S = SE->getSCEV(V);
2798 return SE->isLoopInvariant(S, TheLoop);
2799}
2800
2801/// Find the operand of the GEP that should be checked for consecutive
2802/// stores. This ignores trailing indices that have no effect on the final
2803/// pointer.
2804static unsigned getGEPInductionOperand(const GetElementPtrInst *Gep) {
2805 const DataLayout &DL = Gep->getDataLayout();
2806 unsigned LastOperand = Gep->getNumOperands() - 1;
2807 TypeSize GEPAllocSize = DL.getTypeAllocSize(Gep->getResultElementType());
2808
2809 // Walk backwards and try to peel off zeros.
2810 while (LastOperand > 1 && match(Gep->getOperand(LastOperand), m_Zero())) {
2811 // Find the type we're currently indexing into.
2812 gep_type_iterator GEPTI = gep_type_begin(Gep);
2813 std::advance(GEPTI, LastOperand - 2);
2814
2815 // If it's a type with the same allocation size as the result of the GEP we
2816 // can peel off the zero index.
2817 TypeSize ElemSize = GEPTI.isStruct()
2818 ? DL.getTypeAllocSize(GEPTI.getIndexedType())
2820 if (ElemSize != GEPAllocSize)
2821 break;
2822 --LastOperand;
2823 }
2824
2825 return LastOperand;
2826}
2827
2828/// If the argument is a GEP, then returns the operand identified by
2829/// getGEPInductionOperand. However, if there is some other non-loop-invariant
2830/// operand, it returns that instead.
2832 auto *GEP = dyn_cast<GetElementPtrInst>(Ptr);
2833 if (!GEP)
2834 return Ptr;
2835
2836 unsigned InductionOperand = getGEPInductionOperand(GEP);
2837
2838 // Check that all of the gep indices are uniform except for our induction
2839 // operand.
2840 for (unsigned I = 0, E = GEP->getNumOperands(); I != E; ++I)
2841 if (I != InductionOperand &&
2842 !SE->isLoopInvariant(SE->getSCEV(GEP->getOperand(I)), Lp))
2843 return Ptr;
2844 return GEP->getOperand(InductionOperand);
2845}
2846
2847/// Get the stride of a pointer access in a loop. Looks for symbolic
2848/// strides "a[i*stride]". Returns the symbolic stride, or null otherwise.
2850 auto *PtrTy = dyn_cast<PointerType>(Ptr->getType());
2851 if (!PtrTy || PtrTy->isAggregateType())
2852 return nullptr;
2853
2854 // Try to remove a gep instruction to make the pointer (actually index at this
2855 // point) easier analyzable. If OrigPtr is equal to Ptr we are analyzing the
2856 // pointer, otherwise, we are analyzing the index.
2857 Value *OrigPtr = Ptr;
2858
2859 // The size of the pointer access.
2860 int64_t PtrAccessSize = 1;
2861
2862 Ptr = stripGetElementPtr(Ptr, SE, Lp);
2863 const SCEV *V = SE->getSCEV(Ptr);
2864
2865 if (Ptr != OrigPtr)
2866 // Strip off casts.
2867 while (const SCEVIntegralCastExpr *C = dyn_cast<SCEVIntegralCastExpr>(V))
2868 V = C->getOperand();
2869
2870 const SCEVAddRecExpr *S = dyn_cast<SCEVAddRecExpr>(V);
2871 if (!S)
2872 return nullptr;
2873
2874 // If the pointer is invariant then there is no stride and it makes no
2875 // sense to add it here.
2876 if (Lp != S->getLoop())
2877 return nullptr;
2878
2879 V = S->getStepRecurrence(*SE);
2880 if (!V)
2881 return nullptr;
2882
2883 // Strip off the size of access multiplication if we are still analyzing the
2884 // pointer.
2885 if (OrigPtr == Ptr) {
2886 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(V)) {
2887 if (M->getOperand(0)->getSCEVType() != scConstant)
2888 return nullptr;
2889
2890 const APInt &APStepVal = cast<SCEVConstant>(M->getOperand(0))->getAPInt();
2891
2892 // Huge step value - give up.
2893 if (APStepVal.getBitWidth() > 64)
2894 return nullptr;
2895
2896 int64_t StepVal = APStepVal.getSExtValue();
2897 if (PtrAccessSize != StepVal)
2898 return nullptr;
2899 V = M->getOperand(1);
2900 }
2901 }
2902
2903 // Note that the restriction after this loop invariant check are only
2904 // profitability restrictions.
2905 if (!SE->isLoopInvariant(V, Lp))
2906 return nullptr;
2907
2908 // Look for the loop invariant symbolic value.
2909 if (isa<SCEVUnknown>(V))
2910 return V;
2911
2912 if (const auto *C = dyn_cast<SCEVIntegralCastExpr>(V))
2913 if (isa<SCEVUnknown>(C->getOperand()))
2914 return V;
2915
2916 return nullptr;
2917}
2918
2919void LoopAccessInfo::collectStridedAccess(Value *MemAccess) {
2920 Value *Ptr = getLoadStorePointerOperand(MemAccess);
2921 if (!Ptr)
2922 return;
2923
2924 // Note: getStrideFromPointer is a *profitability* heuristic. We
2925 // could broaden the scope of values returned here - to anything
2926 // which happens to be loop invariant and contributes to the
2927 // computation of an interesting IV - but we chose not to as we
2928 // don't have a cost model here, and broadening the scope exposes
2929 // far too many unprofitable cases.
2930 const SCEV *StrideExpr = getStrideFromPointer(Ptr, PSE->getSE(), TheLoop);
2931 if (!StrideExpr)
2932 return;
2933
2934 LLVM_DEBUG(dbgs() << "LAA: Found a strided access that is a candidate for "
2935 "versioning:");
2936 LLVM_DEBUG(dbgs() << " Ptr: " << *Ptr << " Stride: " << *StrideExpr << "\n");
2937
2938 if (!SpeculateUnitStride) {
2939 LLVM_DEBUG(dbgs() << " Chose not to due to -laa-speculate-unit-stride\n");
2940 return;
2941 }
2942
2943 // Avoid adding the "Stride == 1" predicate when we know that
2944 // Stride >= Trip-Count. Such a predicate will effectively optimize a single
2945 // or zero iteration loop, as Trip-Count <= Stride == 1.
2946 //
2947 // TODO: We are currently not making a very informed decision on when it is
2948 // beneficial to apply stride versioning. It might make more sense that the
2949 // users of this analysis (such as the vectorizer) will trigger it, based on
2950 // their specific cost considerations; For example, in cases where stride
2951 // versioning does not help resolving memory accesses/dependences, the
2952 // vectorizer should evaluate the cost of the runtime test, and the benefit
2953 // of various possible stride specializations, considering the alternatives
2954 // of using gather/scatters (if available).
2955
2956 const SCEV *MaxBTC = PSE->getSymbolicMaxBackedgeTakenCount();
2957
2958 // Match the types so we can compare the stride and the MaxBTC.
2959 // The Stride can be positive/negative, so we sign extend Stride;
2960 // The backedgeTakenCount is non-negative, so we zero extend MaxBTC.
2961 const DataLayout &DL = TheLoop->getHeader()->getDataLayout();
2962 uint64_t StrideTypeSizeBits = DL.getTypeSizeInBits(StrideExpr->getType());
2963 uint64_t BETypeSizeBits = DL.getTypeSizeInBits(MaxBTC->getType());
2964 const SCEV *CastedStride = StrideExpr;
2965 const SCEV *CastedBECount = MaxBTC;
2966 ScalarEvolution *SE = PSE->getSE();
2967 if (BETypeSizeBits >= StrideTypeSizeBits)
2968 CastedStride = SE->getNoopOrSignExtend(StrideExpr, MaxBTC->getType());
2969 else
2970 CastedBECount = SE->getZeroExtendExpr(MaxBTC, StrideExpr->getType());
2971 const SCEV *StrideMinusBETaken = SE->getMinusSCEV(CastedStride, CastedBECount);
2972 // Since TripCount == BackEdgeTakenCount + 1, checking:
2973 // "Stride >= TripCount" is equivalent to checking:
2974 // Stride - MaxBTC> 0
2975 if (SE->isKnownPositive(StrideMinusBETaken)) {
2976 LLVM_DEBUG(
2977 dbgs() << "LAA: Stride>=TripCount; No point in versioning as the "
2978 "Stride==1 predicate will imply that the loop executes "
2979 "at most once.\n");
2980 return;
2981 }
2982 LLVM_DEBUG(dbgs() << "LAA: Found a strided access that we can version.\n");
2983
2984 // Strip back off the integer cast, and check that our result is a
2985 // SCEVUnknown as we expect.
2986 const SCEV *StrideBase = StrideExpr;
2987 if (const auto *C = dyn_cast<SCEVIntegralCastExpr>(StrideBase))
2988 StrideBase = C->getOperand();
2989 SymbolicStrides[Ptr] = cast<SCEVUnknown>(StrideBase);
2990}
2991
2993 const TargetTransformInfo *TTI,
2994 const TargetLibraryInfo *TLI, AAResults *AA,
2995 DominatorTree *DT, LoopInfo *LI)
2996 : PSE(std::make_unique<PredicatedScalarEvolution>(*SE, *L)),
2997 PtrRtChecking(nullptr), TheLoop(L) {
2998 unsigned MaxTargetVectorWidthInBits = std::numeric_limits<unsigned>::max();
2999 if (TTI) {
3000 TypeSize FixedWidth =
3002 if (FixedWidth.isNonZero()) {
3003 // Scale the vector width by 2 as rough estimate to also consider
3004 // interleaving.
3005 MaxTargetVectorWidthInBits = FixedWidth.getFixedValue() * 2;
3006 }
3007
3008 TypeSize ScalableWidth =
3010 if (ScalableWidth.isNonZero())
3011 MaxTargetVectorWidthInBits = std::numeric_limits<unsigned>::max();
3012 }
3013 DepChecker = std::make_unique<MemoryDepChecker>(*PSE, L, SymbolicStrides,
3014 MaxTargetVectorWidthInBits);
3015 PtrRtChecking = std::make_unique<RuntimePointerChecking>(*DepChecker, SE);
3016 if (canAnalyzeLoop())
3017 CanVecMem = analyzeLoop(AA, LI, TLI, DT);
3018}
3019
3021 if (CanVecMem) {
3022 OS.indent(Depth) << "Memory dependences are safe";
3023 const MemoryDepChecker &DC = getDepChecker();
3024 if (!DC.isSafeForAnyVectorWidth())
3025 OS << " with a maximum safe vector width of "
3026 << DC.getMaxSafeVectorWidthInBits() << " bits";
3027 if (PtrRtChecking->Need)
3028 OS << " with run-time checks";
3029 OS << "\n";
3030 }
3031
3032 if (HasConvergentOp)
3033 OS.indent(Depth) << "Has convergent operation in loop\n";
3034
3035 if (Report)
3036 OS.indent(Depth) << "Report: " << Report->getMsg() << "\n";
3037
3038 if (auto *Dependences = DepChecker->getDependences()) {
3039 OS.indent(Depth) << "Dependences:\n";
3040 for (const auto &Dep : *Dependences) {
3041 Dep.print(OS, Depth + 2, DepChecker->getMemoryInstructions());
3042 OS << "\n";
3043 }
3044 } else
3045 OS.indent(Depth) << "Too many dependences, not recorded\n";
3046
3047 // List the pair of accesses need run-time checks to prove independence.
3048 PtrRtChecking->print(OS, Depth);
3049 OS << "\n";
3050
3051 OS.indent(Depth)
3052 << "Non vectorizable stores to invariant address were "
3053 << (HasStoreStoreDependenceInvolvingLoopInvariantAddress ||
3054 HasLoadStoreDependenceInvolvingLoopInvariantAddress
3055 ? ""
3056 : "not ")
3057 << "found in loop.\n";
3058
3059 OS.indent(Depth) << "SCEV assumptions:\n";
3060 PSE->getPredicate().print(OS, Depth);
3061
3062 OS << "\n";
3063
3064 OS.indent(Depth) << "Expressions re-written:\n";
3065 PSE->print(OS, Depth);
3066}
3067
3069 const auto &[It, Inserted] = LoopAccessInfoMap.insert({&L, nullptr});
3070
3071 if (Inserted)
3072 It->second =
3073 std::make_unique<LoopAccessInfo>(&L, &SE, TTI, TLI, &AA, &DT, &LI);
3074
3075 return *It->second;
3076}
3079 // Collect LoopAccessInfo entries that may keep references to IR outside the
3080 // analyzed loop or SCEVs that may have been modified or invalidated. At the
3081 // moment, that is loops requiring memory or SCEV runtime checks, as those cache
3082 // SCEVs, e.g. for pointer expressions.
3083 for (const auto &[L, LAI] : LoopAccessInfoMap) {
3084 if (LAI->getRuntimePointerChecking()->getChecks().empty() &&
3085 LAI->getPSE().getPredicate().isAlwaysTrue())
3086 continue;
3087 ToRemove.push_back(L);
3088 }
3089
3090 for (Loop *L : ToRemove)
3091 LoopAccessInfoMap.erase(L);
3092}
3093
3095 Function &F, const PreservedAnalyses &PA,
3097 // Check whether our analysis is preserved.
3098 auto PAC = PA.getChecker<LoopAccessAnalysis>();
3099 if (!PAC.preserved() && !PAC.preservedSet<AllAnalysesOn<Function>>())
3100 // If not, give up now.
3101 return true;
3102
3103 // Check whether the analyses we depend on became invalid for any reason.
3104 // Skip checking TargetLibraryAnalysis as it is immutable and can't become
3105 // invalid.
3106 return Inv.invalidate<AAManager>(F, PA) ||
3108 Inv.invalidate<LoopAnalysis>(F, PA) ||
3110}
3111
3115 auto &AA = FAM.getResult<AAManager>(F);
3116 auto &DT = FAM.getResult<DominatorTreeAnalysis>(F);
3117 auto &LI = FAM.getResult<LoopAnalysis>(F);
3119 auto &TLI = FAM.getResult<TargetLibraryAnalysis>(F);
3120 return LoopAccessInfoManager(SE, AA, DT, LI, &TTI, &TLI);
3121}
3122
3123AnalysisKey LoopAccessAnalysis::Key;
This file implements a class to represent arbitrary precision integral constant values and operations...
ReachingDefAnalysis InstSet & ToRemove
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
Analysis containing CSE Info
Definition: CSEInfo.cpp:27
This file contains the declarations for the subclasses of Constant, which represent the different fla...
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
#define LLVM_DEBUG(X)
Definition: Debug.h:101
This file defines the DenseMap class.
uint64_t Size
bool End
Definition: ELF_riscv.cpp:480
Generic implementation of equivalence classes through the use Tarjan's efficient union-find algorithm...
#define DEBUG_TYPE
Hexagon Common GEP
#define _
IRTranslator LLVM IR MI
static std::pair< const SCEV *, const SCEV * > getStartAndEndForAccess(const Loop *Lp, const SCEV *PtrExpr, Type *AccessTy, PredicatedScalarEvolution &PSE, DenseMap< std::pair< const SCEV *, Type * >, std::pair< const SCEV *, const SCEV * > > &PointerBounds)
Calculate Start and End points of memory access.
static cl::opt< unsigned > MaxDependences("max-dependences", cl::Hidden, cl::desc("Maximum number of dependences collected by " "loop-access analysis (default = 100)"), cl::init(100))
We collect dependences up to this threshold.
static cl::opt< bool > EnableForwardingConflictDetection("store-to-load-forwarding-conflict-detection", cl::Hidden, cl::desc("Enable conflict detection in loop-access analysis"), cl::init(true))
Enable store-to-load forwarding conflict detection.
static void findForkedSCEVs(ScalarEvolution *SE, const Loop *L, Value *Ptr, SmallVectorImpl< PointerIntPair< const SCEV *, 1, bool > > &ScevList, unsigned Depth)
static bool hasComputableBounds(PredicatedScalarEvolution &PSE, Value *Ptr, const SCEV *PtrScev, Loop *L, bool Assume)
Check whether a pointer can participate in a runtime bounds check.
static cl::opt< unsigned > MemoryCheckMergeThreshold("memory-check-merge-threshold", cl::Hidden, cl::desc("Maximum number of comparisons done when trying to merge " "runtime memory checks. (default = 100)"), cl::init(100))
The maximum iterations used to merge memory checks.
static bool isNoWrap(PredicatedScalarEvolution &PSE, const DenseMap< Value *, const SCEV * > &Strides, Value *Ptr, Type *AccessTy, Loop *L)
Check whether a pointer address cannot wrap.
static const SCEV * getStrideFromPointer(Value *Ptr, ScalarEvolution *SE, Loop *Lp)
Get the stride of a pointer access in a loop.
static unsigned getGEPInductionOperand(const GetElementPtrInst *Gep)
Find the operand of the GEP that should be checked for consecutive stores.
static cl::opt< unsigned, true > VectorizationInterleave("force-vector-interleave", cl::Hidden, cl::desc("Sets the vectorization interleave count. " "Zero is autoselect."), cl::location(VectorizerParams::VectorizationInterleave))
static cl::opt< bool, true > HoistRuntimeChecks("hoist-runtime-checks", cl::Hidden, cl::desc("Hoist inner loop runtime memory checks to outer loop if possible"), cl::location(VectorizerParams::HoistRuntimeChecks), cl::init(true))
static cl::opt< unsigned, true > VectorizationFactor("force-vector-width", cl::Hidden, cl::desc("Sets the SIMD width. Zero is autoselect."), cl::location(VectorizerParams::VectorizationFactor))
static bool isSafeDependenceDistance(const DataLayout &DL, ScalarEvolution &SE, const SCEV &MaxBTC, const SCEV &Dist, uint64_t MaxStride, uint64_t TypeByteSize)
Given a dependence-distance Dist between two memory accesses, that have strides in the same direction...
static cl::opt< unsigned, true > RuntimeMemoryCheckThreshold("runtime-memory-check-threshold", cl::Hidden, cl::desc("When performing memory disambiguation checks at runtime do not " "generate more than this number of comparisons (default = 8)."), cl::location(VectorizerParams::RuntimeMemoryCheckThreshold), cl::init(8))
static void visitPointers(Value *StartPtr, const Loop &InnermostLoop, function_ref< void(Value *)> AddPointer)
static bool isNoWrapAddRec(Value *Ptr, const SCEVAddRecExpr *AR, PredicatedScalarEvolution &PSE, const Loop *L)
Return true if an AddRec pointer Ptr is unsigned non-wrapping, i.e.
static Value * stripGetElementPtr(Value *Ptr, ScalarEvolution *SE, Loop *Lp)
If the argument is a GEP, then returns the operand identified by getGEPInductionOperand.
static bool areStridedAccessesIndependent(uint64_t Distance, uint64_t Stride, uint64_t TypeByteSize)
Check the dependence for two accesses with the same stride Stride.
static const SCEV * getMinFromExprs(const SCEV *I, const SCEV *J, ScalarEvolution *SE)
Compare I and J and return the minimum.
static cl::opt< unsigned > MaxForkedSCEVDepth("max-forked-scev-depth", cl::Hidden, cl::desc("Maximum recursion depth when finding forked SCEVs (default = 5)"), cl::init(5))
static cl::opt< bool > SpeculateUnitStride("laa-speculate-unit-stride", cl::Hidden, cl::desc("Speculate that non-constant strides are unit in LAA"), cl::init(true))
static SmallVector< PointerIntPair< const SCEV *, 1, bool > > findForkedPointer(PredicatedScalarEvolution &PSE, const DenseMap< Value *, const SCEV * > &StridesMap, Value *Ptr, const Loop *L)
static cl::opt< bool > EnableMemAccessVersioning("enable-mem-access-versioning", cl::init(true), cl::Hidden, cl::desc("Enable symbolic stride memory access versioning"))
This enables versioning on the strides of symbolically striding memory accesses in code like the foll...
This header provides classes for managing per-loop analyses.
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
This file provides utility analysis objects describing memory locations.
uint64_t High
#define P(N)
FunctionAnalysisManager FAM
This header defines various interfaces for pass management in LLVM.
This file defines the PointerIntPair class.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file contains some templates that are useful if you are working with the STL at all.
raw_pwrite_stream & OS
static LLVM_ATTRIBUTE_ALWAYS_INLINE bool CheckType(MVT::SimpleValueType VT, SDValue N, const TargetLowering *TLI, const DataLayout &DL)
This file implements a set that has insertion order iteration characteristics.
This file defines the SmallPtrSet class.
This file defines the SmallSet class.
This file defines the SmallVector class.
static SymbolRef::Type getType(const Symbol *Sym)
Definition: TapiFile.cpp:40
This pass exposes codegen information to IR-level passes.
static const X86InstrFMA3Group Groups[]
A manager for alias analyses.
Class for arbitrary precision integers.
Definition: APInt.h:78
unsigned getBitWidth() const
Return the number of bits in the APInt.
Definition: APInt.h:1448
APInt sextOrTrunc(unsigned width) const
Sign extend or truncate to width.
Definition: APInt.cpp:1010
int64_t getSExtValue() const
Get sign extended value.
Definition: APInt.h:1522
This templated class represents "all analyses that operate over <a particular IR unit>" (e....
Definition: Analysis.h:49
API to communicate dependencies between analyses during invalidation.
Definition: PassManager.h:292
bool invalidate(IRUnitT &IR, const PreservedAnalyses &PA)
Trigger the invalidation of some other analysis pass if not already handled and return whether it was...
Definition: PassManager.h:310
A container for analyses that lazily runs them and caches their results.
Definition: PassManager.h:253
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Definition: PassManager.h:405
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
size_t size() const
size - Get the array size.
Definition: ArrayRef.h:165
bool empty() const
empty - Check if the array is empty.
Definition: ArrayRef.h:160
LLVM Basic Block Representation.
Definition: BasicBlock.h:61
const Function * getParent() const
Return the enclosing method, or null if none.
Definition: BasicBlock.h:219
const DataLayout & getDataLayout() const
Get the data layout of the module this basic block belongs to.
Definition: BasicBlock.cpp:296
This class is a wrapper over an AAResults, and it is intended to be used only when there are no IR ch...
@ ICMP_ULE
unsigned less or equal
Definition: InstrTypes.h:783
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:63
A debug info location.
Definition: DebugLoc.h:33
iterator find(const_arg_type_t< KeyT > Val)
Definition: DenseMap.h:155
iterator end()
Definition: DenseMap.h:84
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition: DenseMap.h:211
Analysis pass which computes a DominatorTree.
Definition: Dominators.h:279
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition: Dominators.h:162
bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
Definition: Dominators.cpp:122
EquivalenceClasses - This represents a collection of equivalence classes and supports three efficient...
iterator findValue(const ElemTy &V) const
findValue - Return an iterator to the specified value.
iterator insert(const ElemTy &Data)
insert - Insert a new value into the union/find set, ignoring the request if the value already exists...
member_iterator member_end() const
typename std::set< ECValue, ECValueComparator >::const_iterator iterator
iterator* - Provides a way to iterate over all values in the set.
member_iterator member_begin(iterator I) const
member_iterator unionSets(const ElemTy &V1, const ElemTy &V2)
union - Merge the two equivalence sets for the specified values, inserting them if they do not alread...
const ElemTy & getLeaderValue(const ElemTy &V) const
getLeaderValue - Return the leader for the specified value that is in the set.
bool hasOptSize() const
Optimize this function for size (-Os) or minimum size (-Oz).
Definition: Function.h:705
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
Definition: Instructions.h:915
Type * getResultElementType() const
Definition: Instructions.h:976
PointerType * getType() const
Global values are always pointers.
Definition: GlobalValue.h:294
const DataLayout & getDataLayout() const
Get the data layout of the module this instruction belongs to.
Definition: Instruction.cpp:74
Class to represent integer types.
Definition: DerivedTypes.h:40
static IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
Definition: Type.cpp:266
An instruction for reading from memory.
Definition: Instructions.h:174
Value * getPointerOperand()
Definition: Instructions.h:253
static constexpr LocationSize beforeOrAfterPointer()
Any location before or after the base pointer (but still within the underlying object).
This analysis provides dependence information for the memory accesses of a loop.
Result run(Function &F, FunctionAnalysisManager &AM)
bool invalidate(Function &F, const PreservedAnalyses &PA, FunctionAnalysisManager::Invalidator &Inv)
const LoopAccessInfo & getInfo(Loop &L)
Drive the analysis of memory accesses in the loop.
const MemoryDepChecker & getDepChecker() const
the Memory Dependence Checker which can determine the loop-independent and loop-carried dependences b...
bool isInvariant(Value *V) const
Returns true if value V is loop invariant.
void print(raw_ostream &OS, unsigned Depth=0) const
Print the information about the memory accesses in the loop.
static bool blockNeedsPredication(BasicBlock *BB, Loop *TheLoop, DominatorTree *DT)
Return true if the block BB needs to be predicated in order for the loop to be vectorized.
LoopAccessInfo(Loop *L, ScalarEvolution *SE, const TargetTransformInfo *TTI, const TargetLibraryInfo *TLI, AAResults *AA, DominatorTree *DT, LoopInfo *LI)
Analysis pass that exposes the LoopInfo for a function.
Definition: LoopInfo.h:571
bool contains(const LoopT *L) const
Return true if the specified loop is contained within in this loop.
BlockT * getLoopLatch() const
If there is a single latch block for this loop, return it.
bool isInnermost() const
Return true if the loop does not contain any (natural) loops.
unsigned getNumBackEdges() const
Calculate the number of back edges to the loop header.
BlockT * getHeader() const
LoopT * getParentLoop() const
Return the parent loop if it exists or nullptr for top level loops.
Wrapper class to LoopBlocksDFS that provides a standard begin()/end() interface for the DFS reverse p...
Definition: LoopIterator.h:172
Represents a single loop in the control flow graph.
Definition: LoopInfo.h:44
std::string getLocStr() const
Return a string containing the debug location of the loop (file name + line number if present,...
Definition: LoopInfo.cpp:667
bool isAnnotatedParallel() const
Returns true if the loop is annotated parallel.
Definition: LoopInfo.cpp:565
DebugLoc getStartLoc() const
Return the debug location of the start of this loop.
Definition: LoopInfo.cpp:632
Metadata node.
Definition: Metadata.h:1069
ArrayRef< MDOperand > operands() const
Definition: Metadata.h:1428
Tracking metadata reference owned by Metadata.
Definition: Metadata.h:891
This class implements a map that also provides access to all stored values in a deterministic order.
Definition: MapVector.h:36
Checks memory dependences among accesses to the same underlying object to determine whether there vec...
ArrayRef< unsigned > getOrderForAccess(Value *Ptr, bool IsWrite) const
Return the program order indices for the access location (Ptr, IsWrite).
bool isSafeForAnyVectorWidth() const
Return true if the number of elements that are safe to operate on simultaneously is not bounded.
bool areDepsSafe(const DepCandidates &AccessSets, const MemAccessInfoList &CheckDeps)
Check whether the dependencies between the accesses are safe.
const SmallVectorImpl< Instruction * > & getMemoryInstructions() const
The vector of memory access instructions.
const Loop * getInnermostLoop() const
uint64_t getMaxSafeVectorWidthInBits() const
Return the number of elements that are safe to operate on simultaneously, multiplied by the size of t...
bool isSafeForVectorization() const
No memory dependence was encountered that would inhibit vectorization.
const SmallVectorImpl< Dependence > * getDependences() const
Returns the memory dependences.
DenseMap< std::pair< const SCEV *, Type * >, std::pair< const SCEV *, const SCEV * > > & getPointerBounds()
SmallVector< Instruction *, 4 > getInstructionsForAccess(Value *Ptr, bool isWrite) const
Find the set of instructions that read or write via Ptr.
VectorizationSafetyStatus
Type to keep track of the status of the dependence check.
bool shouldRetryWithRuntimeCheck() const
In same cases when the dependency check fails we can still vectorize the loop with a dynamic array ac...
void addAccess(StoreInst *SI)
Register the location (instructions are given increasing numbers) of a write access.
PointerIntPair< Value *, 1, bool > MemAccessInfo
Representation for a specific memory location.
static MemoryLocation get(const LoadInst *LI)
Return a location with information about the memory reference by the given instruction.
LocationSize Size
The maximum size of the location, in address-units, or UnknownSize if the size is not known.
AAMDNodes AATags
The metadata nodes which describes the aliasing of the location (each member is null if that kind of ...
const Value * Ptr
The address of the start of the location.
Root of the metadata hierarchy.
Definition: Metadata.h:62
Diagnostic information for optimization analysis remarks.
An interface layer with SCEV used to manage how we see SCEV expressions for values in the context of ...
void addPredicate(const SCEVPredicate &Pred)
Adds a new predicate.
ScalarEvolution * getSE() const
Returns the ScalarEvolution analysis used.
const SCEVPredicate & getPredicate() const
bool hasNoOverflow(Value *V, SCEVWrapPredicate::IncrementWrapFlags Flags)
Returns true if we've proved that V doesn't wrap by means of a SCEV predicate.
void setNoOverflow(Value *V, SCEVWrapPredicate::IncrementWrapFlags Flags)
Proves that V doesn't overflow by adding SCEV predicate.
void print(raw_ostream &OS, unsigned Depth) const
Print the SCEV mappings done by the Predicated Scalar Evolution.
const SCEVAddRecExpr * getAsAddRec(Value *V)
Attempts to produce an AddRecExpr for V by adding additional SCEV predicates.
const SCEV * getSymbolicMaxBackedgeTakenCount()
Get the (predicated) symbolic max backedge count for the analyzed loop.
const SCEV * getSCEV(Value *V)
Returns the SCEV expression of V, in the context of the current SCEV predicate.
A set of analyses that are preserved following a run of a transformation pass.
Definition: Analysis.h:111
PreservedAnalysisChecker getChecker() const
Build a checker for this PreservedAnalyses and the specified analysis type.
Definition: Analysis.h:264
Holds information about the memory runtime legality checks to verify that a group of pointers do not ...
bool Need
This flag indicates if we need to add the runtime check.
void reset()
Reset the state of the pointer runtime information.
unsigned getNumberOfChecks() const
Returns the number of run-time checks required according to needsChecking.
void printChecks(raw_ostream &OS, const SmallVectorImpl< RuntimePointerCheck > &Checks, unsigned Depth=0) const
Print Checks.
bool needsChecking(const RuntimeCheckingPtrGroup &M, const RuntimeCheckingPtrGroup &N) const
Decide if we need to add a check between two groups of pointers, according to needsChecking.
void print(raw_ostream &OS, unsigned Depth=0) const
Print the list run-time memory checks necessary.
SmallVector< RuntimeCheckingPtrGroup, 2 > CheckingGroups
Holds a partitioning of pointers into "check groups".
void generateChecks(MemoryDepChecker::DepCandidates &DepCands, bool UseDependencies)
Generate the checks and store it.
static bool arePointersInSamePartition(const SmallVectorImpl< int > &PtrToPartition, unsigned PtrIdx1, unsigned PtrIdx2)
Check if pointers are in the same partition.
SmallVector< PointerInfo, 2 > Pointers
Information about the pointers that may require checking.
void insert(Loop *Lp, Value *Ptr, const SCEV *PtrExpr, Type *AccessTy, bool WritePtr, unsigned DepSetId, unsigned ASId, PredicatedScalarEvolution &PSE, bool NeedsFreeze)
Insert a pointer and calculate the start and end SCEVs.
This node represents a polynomial recurrence on the trip count of the specified loop.
const SCEV * getStepRecurrence(ScalarEvolution &SE) const
Constructs and returns the recurrence indicating how much this expression steps by.
bool isAffine() const
Return true if this represents an expression A + B*x where A and B are loop invariant values.
This class represents a constant integer value.
This is the base class for unary integral cast operator classes.
This node represents multiplication of some number of SCEVs.
NoWrapFlags getNoWrapFlags(NoWrapFlags Mask=NoWrapMask) const
virtual void print(raw_ostream &OS, unsigned Depth=0) const =0
Prints a textual representation of this predicate with an indentation of Depth.
This class represents an analyzed expression in the program.
Type * getType() const
Return the LLVM type of this SCEV expression.
Analysis pass that exposes the ScalarEvolution for a function.
The main scalar evolution driver.
bool isKnownNonNegative(const SCEV *S)
Test if the given expression is known to be non-negative.
const SCEV * getNegativeSCEV(const SCEV *V, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap)
Return the SCEV object corresponding to -V.
bool isKnownNonPositive(const SCEV *S)
Test if the given expression is known to be non-positive.
const SCEV * getUMaxExpr(const SCEV *LHS, const SCEV *RHS)
const SCEVPredicate * getEqualPredicate(const SCEV *LHS, const SCEV *RHS)
const SCEV * getConstant(ConstantInt *V)
const SCEV * getSCEV(Value *V)
Return a SCEV expression for the full generality of the specified expression.
const SCEV * getNoopOrSignExtend(const SCEV *V, Type *Ty)
Return a SCEV corresponding to a conversion of the input value to the specified type.
const SCEV * getOne(Type *Ty)
Return a SCEV for the constant 1 of a specific type.
const SCEV * getPtrToIntExpr(const SCEV *Op, Type *Ty)
bool isLoopInvariant(const SCEV *S, const Loop *L)
Return true if the value of the given SCEV is unchanging in the specified loop.
bool isKnownPositive(const SCEV *S)
Test if the given expression is known to be positive.
bool isKnownPredicate(ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS)
Test if the given expression is known to satisfy the condition described by Pred, LHS,...
const SCEV * getZeroExtendExpr(const SCEV *Op, Type *Ty, unsigned Depth=0)
bool isSCEVable(Type *Ty) const
Test if values of the given type are analyzable within the SCEV framework.
Type * getEffectiveSCEVType(Type *Ty) const
Return a type with the same bitwidth as the given type and which represents how SCEV will treat the g...
const SCEV * getUMinExpr(const SCEV *LHS, const SCEV *RHS, bool Sequential=false)
APInt getSignedRangeMin(const SCEV *S)
Determine the min of the signed range for a particular SCEV.
const SCEV * getStoreSizeOfExpr(Type *IntTy, Type *StoreTy)
Return an expression for the store size of StoreTy that is type IntTy.
const SCEV * getMinusSCEV(const SCEV *LHS, const SCEV *RHS, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)
Return LHS-RHS.
const SCEV * getCouldNotCompute()
const SCEV * applyLoopGuards(const SCEV *Expr, const Loop *L)
Try to apply information from loop guards for L to Expr.
const SCEV * getMulExpr(SmallVectorImpl< const SCEV * > &Ops, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)
Get a canonical multiply expression, or something simpler if possible.
const SCEV * getSizeOfExpr(Type *IntTy, TypeSize Size)
Return an expression for a TypeSize.
const SCEV * getAddExpr(SmallVectorImpl< const SCEV * > &Ops, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)
Get a canonical add expression, or something simpler if possible.
const SCEV * getTruncateOrSignExtend(const SCEV *V, Type *Ty, unsigned Depth=0)
Return a SCEV corresponding to a conversion of the input value to the specified type.
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
Definition: SmallPtrSet.h:347
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
Definition: SmallPtrSet.h:436
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:368
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
Definition: SmallPtrSet.h:503
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
Definition: SmallSet.h:135
size_type count(const T &V) const
count - Return 1 if the element is in the set, 0 otherwise.
Definition: SmallSet.h:166
std::pair< const_iterator, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
Definition: SmallSet.h:179
bool empty() const
Definition: SmallVector.h:95
size_t size() const
Definition: SmallVector.h:92
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:587
reference emplace_back(ArgTypes &&... Args)
Definition: SmallVector.h:951
void resize(size_type N)
Definition: SmallVector.h:652
void push_back(const T &Elt)
Definition: SmallVector.h:427
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1210
An instruction for storing to memory.
Definition: Instructions.h:290
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:50
Analysis pass providing the TargetTransformInfo.
Analysis pass providing the TargetLibraryInfo.
Provides information about what library functions are available for the current target.
This pass provides access to the codegen interfaces that are needed for IR-level transformations.
TypeSize getRegisterBitWidth(RegisterKind K) const
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
bool isVectorTy() const
True if this is an instance of VectorType.
Definition: Type.h:261
bool isPointerTy() const
True if this is an instance of PointerType.
Definition: Type.h:251
unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
A Use represents the edge between a Value definition and its users.
Definition: Use.h:43
Value * getOperand(unsigned i) const
Definition: User.h:169
unsigned getNumOperands() const
Definition: User.h:191
static SmallVector< VFInfo, 8 > getMappings(const CallInst &CI)
Retrieve all the VFInfo instances associated to the CallInst CI.
Definition: VectorUtils.h:71
LLVM Value Representation.
Definition: Value.h:74
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:255
const Value * stripAndAccumulateInBoundsConstantOffsets(const DataLayout &DL, APInt &Offset) const
This is a wrapper around stripAndAccumulateConstantOffsets with the in-bounds requirement set to fals...
Definition: Value.h:736
StringRef getName() const
Return a constant reference to the value's name.
Definition: Value.cpp:309
constexpr ScalarTy getFixedValue() const
Definition: TypeSize.h:202
constexpr bool isNonZero() const
Definition: TypeSize.h:158
An efficient, type-erasing, non-owning reference to a callable.
TypeSize getSequentialElementStride(const DataLayout &DL) const
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition: raw_ostream.h:52
raw_ostream & indent(unsigned NumSpaces)
indent - Insert 'NumSpaces' spaces.
friend const_iterator end(StringRef path)
Get end iterator over path.
Definition: Path.cpp:236
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
bool match(Val *V, const Pattern &P)
Definition: PatternMatch.h:49
is_zero m_Zero()
Match any null constant or a vector with all elements equal to 0.
Definition: PatternMatch.h:612
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:443
LocationClass< Ty > location(Ty &L)
Definition: CommandLine.h:463
DiagnosticInfoOptimizationBase::Argument NV
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Definition: STLExtras.h:329
std::optional< int > getPointersDiff(Type *ElemTyA, Value *PtrA, Type *ElemTyB, Value *PtrB, const DataLayout &DL, ScalarEvolution &SE, bool StrictCheck=false, bool CheckType=true)
Returns the distance between the pointers PtrA and PtrB iff they are compatible and it is possible to...
@ Low
Lower the current thread's priority such that it does not affect foreground tasks significantly.
@ Offset
Definition: DWP.cpp:480
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1722
Intrinsic::ID getVectorIntrinsicIDForCall(const CallInst *CI, const TargetLibraryInfo *TLI)
Returns intrinsic ID for call.
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
Definition: STLExtras.h:2406
unsigned getPointerAddressSpace(const Type *T)
Definition: SPIRVUtils.h:126
std::optional< const MDOperand * > findStringMetadataForLoop(const Loop *TheLoop, StringRef Name)
Find string metadata for loop.
Definition: LoopInfo.cpp:1065
const Value * getLoadStorePointerOperand(const Value *V)
A helper function that returns the pointer operand of a load or store instruction.
AddressSpace
Definition: NVPTXBaseInfo.h:21
const Value * getPointerOperand(const Value *V)
A helper function that returns the pointer operand of a load, store or GEP instruction.
OutputIt transform(R &&Range, OutputIt d_first, UnaryFunction F)
Wrapper function around std::transform to apply a function to a range and store the result elsewhere.
Definition: STLExtras.h:1928
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1729
bool NullPointerIsDefined(const Function *F, unsigned AS=0)
Check whether null pointer dereferencing is considered undefined behavior for a given function or an ...
Definition: Function.cpp:2132
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
bool isPointerTy(const Type *T)
Definition: SPIRVUtils.h:120
std::optional< int64_t > getPtrStride(PredicatedScalarEvolution &PSE, Type *AccessTy, Value *Ptr, const Loop *Lp, const DenseMap< Value *, const SCEV * > &StridesMap=DenseMap< Value *, const SCEV * >(), bool Assume=false, bool ShouldCheckWrap=true)
If the pointer has a constant stride return it in units of the access type size.
bool sortPtrAccesses(ArrayRef< Value * > VL, Type *ElemTy, const DataLayout &DL, ScalarEvolution &SE, SmallVectorImpl< unsigned > &SortedIndices)
Attempt to sort the pointers in VL and return the sorted indices in SortedIndices,...
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
const SCEV * replaceSymbolicStrideSCEV(PredicatedScalarEvolution &PSE, const DenseMap< Value *, const SCEV * > &PtrToStride, Value *Ptr)
Return the SCEV corresponding to a pointer with the symbolic stride replaced with constant one,...
bool isConsecutiveAccess(Value *A, Value *B, const DataLayout &DL, ScalarEvolution &SE, bool CheckType=true)
Returns true if the memory operations A and B are consecutive.
bool isGuaranteedNotToBeUndefOrPoison(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Return true if this function can prove that V does not have undef bits and is never poison.
OutputIt copy(R &&Range, OutputIt Out)
Definition: STLExtras.h:1824
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1749
gep_type_iterator gep_type_begin(const User *GEP)
void getUnderlyingObjects(const Value *V, SmallVectorImpl< const Value * > &Objects, const LoopInfo *LI=nullptr, unsigned MaxLookup=6)
This method is similar to getUnderlyingObject except that it can look through phi and select instruct...
Type * getLoadStoreType(const Value *I)
A helper function that returns the type of a load or store instruction.
Implement std::hash so that hash_code can be used in STL containers.
Definition: BitVector.h:858
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition: BitVector.h:860
#define N
IR Values for the lower and upper bounds of a pointer evolution.
Definition: LoopUtils.cpp:1798
MDNode * Scope
The tag for alias scope specification (used with noalias).
Definition: Metadata.h:783
MDNode * TBAA
The tag for type-based alias analysis.
Definition: Metadata.h:777
MDNode * NoAlias
The tag specifying the noalias scope.
Definition: Metadata.h:786
A special type used by analysis passes to provide an address that identifies that particular analysis...
Definition: Analysis.h:28
Dependece between memory access instructions.
Instruction * getDestination(const MemoryDepChecker &DepChecker) const
Return the destination instruction of the dependence.
DepType Type
The type of the dependence.
bool isPossiblyBackward() const
May be a lexically backward dependence type (includes Unknown).
Instruction * getSource(const MemoryDepChecker &DepChecker) const
Return the source instruction of the dependence.
bool isForward() const
Lexically forward dependence.
bool isBackward() const
Lexically backward dependence.
void print(raw_ostream &OS, unsigned Depth, const SmallVectorImpl< Instruction * > &Instrs) const
Print the dependence.
DepType
The type of the dependence.
static const char * DepName[]
String version of the types.
static VectorizationSafetyStatus isSafeForVectorization(DepType Type)
Dependence types that don't prevent vectorization.
unsigned AddressSpace
Address space of the involved pointers.
bool addPointer(unsigned Index, const RuntimePointerChecking &RtCheck)
Tries to add the pointer recorded in RtCheck at index Index to this pointer checking group.
bool NeedsFreeze
Whether the pointer needs to be frozen after expansion, e.g.
RuntimeCheckingPtrGroup(unsigned Index, const RuntimePointerChecking &RtCheck)
Create a new pointer checking group containing a single pointer, with index Index in RtCheck.
const SCEV * High
The SCEV expression which represents the upper bound of all the pointers in this group.
SmallVector< unsigned, 2 > Members
Indices of all the pointers that constitute this grouping.
const SCEV * Low
The SCEV expression which represents the lower bound of all the pointers in this group.
bool IsWritePtr
Holds the information if this pointer is used for writing to memory.
unsigned DependencySetId
Holds the id of the set of pointers that could be dependent because of a shared underlying object.
unsigned AliasSetId
Holds the id of the disjoint alias set to which this pointer belongs.
static const unsigned MaxVectorWidth
Maximum SIMD width.
static unsigned VectorizationFactor
VF as overridden by the user.
static unsigned RuntimeMemoryCheckThreshold
\When performing memory disambiguation checks at runtime do not make more than this number of compari...
static bool isInterleaveForced()
True if force-vector-interleave was specified by the user.
static unsigned VectorizationInterleave
Interleave factor as overridden by the user.
Function object to check whether the first component of a container supported by std::get (like std::...
Definition: STLExtras.h:1450