LLVM 18.0.0git
LoopAccessAnalysis.cpp
Go to the documentation of this file.
1//===- LoopAccessAnalysis.cpp - Loop Access Analysis Implementation --------==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// The implementation for the loop memory dependence that was originally
10// developed for the loop vectorizer.
11//
12//===----------------------------------------------------------------------===//
13
15#include "llvm/ADT/APInt.h"
16#include "llvm/ADT/DenseMap.h"
19#include "llvm/ADT/STLExtras.h"
20#include "llvm/ADT/SetVector.h"
22#include "llvm/ADT/SmallSet.h"
36#include "llvm/IR/BasicBlock.h"
37#include "llvm/IR/Constants.h"
38#include "llvm/IR/DataLayout.h"
39#include "llvm/IR/DebugLoc.h"
42#include "llvm/IR/Dominators.h"
43#include "llvm/IR/Function.h"
45#include "llvm/IR/InstrTypes.h"
46#include "llvm/IR/Instruction.h"
48#include "llvm/IR/Operator.h"
49#include "llvm/IR/PassManager.h"
51#include "llvm/IR/Type.h"
52#include "llvm/IR/Value.h"
53#include "llvm/IR/ValueHandle.h"
56#include "llvm/Support/Debug.h"
59#include <algorithm>
60#include <cassert>
61#include <cstdint>
62#include <iterator>
63#include <utility>
64#include <variant>
65#include <vector>
66
67using namespace llvm;
68using namespace llvm::PatternMatch;
69
70#define DEBUG_TYPE "loop-accesses"
71
73VectorizationFactor("force-vector-width", cl::Hidden,
74 cl::desc("Sets the SIMD width. Zero is autoselect."),
77
79VectorizationInterleave("force-vector-interleave", cl::Hidden,
80 cl::desc("Sets the vectorization interleave count. "
81 "Zero is autoselect."),
85
87 "runtime-memory-check-threshold", cl::Hidden,
88 cl::desc("When performing memory disambiguation checks at runtime do not "
89 "generate more than this number of comparisons (default = 8)."),
92
93/// The maximum iterations used to merge memory checks
95 "memory-check-merge-threshold", cl::Hidden,
96 cl::desc("Maximum number of comparisons done when trying to merge "
97 "runtime memory checks. (default = 100)"),
98 cl::init(100));
99
100/// Maximum SIMD width.
101const unsigned VectorizerParams::MaxVectorWidth = 64;
102
103/// We collect dependences up to this threshold.
105 MaxDependences("max-dependences", cl::Hidden,
106 cl::desc("Maximum number of dependences collected by "
107 "loop-access analysis (default = 100)"),
108 cl::init(100));
109
110/// This enables versioning on the strides of symbolically striding memory
111/// accesses in code like the following.
112/// for (i = 0; i < N; ++i)
113/// A[i * Stride1] += B[i * Stride2] ...
114///
115/// Will be roughly translated to
116/// if (Stride1 == 1 && Stride2 == 1) {
117/// for (i = 0; i < N; i+=4)
118/// A[i:i+3] += ...
119/// } else
120/// ...
122 "enable-mem-access-versioning", cl::init(true), cl::Hidden,
123 cl::desc("Enable symbolic stride memory access versioning"));
124
125/// Enable store-to-load forwarding conflict detection. This option can
126/// be disabled for correctness testing.
128 "store-to-load-forwarding-conflict-detection", cl::Hidden,
129 cl::desc("Enable conflict detection in loop-access analysis"),
130 cl::init(true));
131
133 "max-forked-scev-depth", cl::Hidden,
134 cl::desc("Maximum recursion depth when finding forked SCEVs (default = 5)"),
135 cl::init(5));
136
138 "laa-speculate-unit-stride", cl::Hidden,
139 cl::desc("Speculate that non-constant strides are unit in LAA"),
140 cl::init(true));
141
143 "hoist-runtime-checks", cl::Hidden,
144 cl::desc(
145 "Hoist inner loop runtime memory checks to outer loop if possible"),
148
150 return ::VectorizationInterleave.getNumOccurrences() > 0;
151}
152
154 const DenseMap<Value *, const SCEV *> &PtrToStride,
155 Value *Ptr) {
156 const SCEV *OrigSCEV = PSE.getSCEV(Ptr);
157
158 // If there is an entry in the map return the SCEV of the pointer with the
159 // symbolic stride replaced by one.
161 if (SI == PtrToStride.end())
162 // For a non-symbolic stride, just return the original expression.
163 return OrigSCEV;
164
165 const SCEV *StrideSCEV = SI->second;
166 // Note: This assert is both overly strong and overly weak. The actual
167 // invariant here is that StrideSCEV should be loop invariant. The only
168 // such invariant strides we happen to speculate right now are unknowns
169 // and thus this is a reasonable proxy of the actual invariant.
170 assert(isa<SCEVUnknown>(StrideSCEV) && "shouldn't be in map");
171
172 ScalarEvolution *SE = PSE.getSE();
173 const auto *CT = SE->getOne(StrideSCEV->getType());
174 PSE.addPredicate(*SE->getEqualPredicate(StrideSCEV, CT));
175 auto *Expr = PSE.getSCEV(Ptr);
176
177 LLVM_DEBUG(dbgs() << "LAA: Replacing SCEV: " << *OrigSCEV
178 << " by: " << *Expr << "\n");
179 return Expr;
180}
181
183 unsigned Index, RuntimePointerChecking &RtCheck)
184 : High(RtCheck.Pointers[Index].End), Low(RtCheck.Pointers[Index].Start),
185 AddressSpace(RtCheck.Pointers[Index]
186 .PointerValue->getType()
187 ->getPointerAddressSpace()),
188 NeedsFreeze(RtCheck.Pointers[Index].NeedsFreeze) {
190}
191
192/// Calculate Start and End points of memory access.
193/// Let's assume A is the first access and B is a memory access on N-th loop
194/// iteration. Then B is calculated as:
195/// B = A + Step*N .
196/// Step value may be positive or negative.
197/// N is a calculated back-edge taken count:
198/// N = (TripCount > 0) ? RoundDown(TripCount -1 , VF) : 0
199/// Start and End points are calculated in the following way:
200/// Start = UMIN(A, B) ; End = UMAX(A, B) + SizeOfElt,
201/// where SizeOfElt is the size of single memory access in bytes.
202///
203/// There is no conflict when the intervals are disjoint:
204/// NoConflict = (P2.Start >= P1.End) || (P1.Start >= P2.End)
206 Type *AccessTy, bool WritePtr,
207 unsigned DepSetId, unsigned ASId,
209 bool NeedsFreeze) {
210 ScalarEvolution *SE = PSE.getSE();
211
212 const SCEV *ScStart;
213 const SCEV *ScEnd;
214
215 if (SE->isLoopInvariant(PtrExpr, Lp)) {
216 ScStart = ScEnd = PtrExpr;
217 } else {
218 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(PtrExpr);
219 assert(AR && "Invalid addrec expression");
220 const SCEV *Ex = PSE.getBackedgeTakenCount();
221
222 ScStart = AR->getStart();
223 ScEnd = AR->evaluateAtIteration(Ex, *SE);
224 const SCEV *Step = AR->getStepRecurrence(*SE);
225
226 // For expressions with negative step, the upper bound is ScStart and the
227 // lower bound is ScEnd.
228 if (const auto *CStep = dyn_cast<SCEVConstant>(Step)) {
229 if (CStep->getValue()->isNegative())
230 std::swap(ScStart, ScEnd);
231 } else {
232 // Fallback case: the step is not constant, but we can still
233 // get the upper and lower bounds of the interval by using min/max
234 // expressions.
235 ScStart = SE->getUMinExpr(ScStart, ScEnd);
236 ScEnd = SE->getUMaxExpr(AR->getStart(), ScEnd);
237 }
238 }
239 assert(SE->isLoopInvariant(ScStart, Lp) && "ScStart needs to be invariant");
240 assert(SE->isLoopInvariant(ScEnd, Lp)&& "ScEnd needs to be invariant");
241
242 // Add the size of the pointed element to ScEnd.
243 auto &DL = Lp->getHeader()->getModule()->getDataLayout();
244 Type *IdxTy = DL.getIndexType(Ptr->getType());
245 const SCEV *EltSizeSCEV = SE->getStoreSizeOfExpr(IdxTy, AccessTy);
246 ScEnd = SE->getAddExpr(ScEnd, EltSizeSCEV);
247
248 Pointers.emplace_back(Ptr, ScStart, ScEnd, WritePtr, DepSetId, ASId, PtrExpr,
249 NeedsFreeze);
250}
251
252void RuntimePointerChecking::tryToCreateDiffCheck(
253 const RuntimeCheckingPtrGroup &CGI, const RuntimeCheckingPtrGroup &CGJ) {
254 if (!CanUseDiffCheck)
255 return;
256
257 // If either group contains multiple different pointers, bail out.
258 // TODO: Support multiple pointers by using the minimum or maximum pointer,
259 // depending on src & sink.
260 if (CGI.Members.size() != 1 || CGJ.Members.size() != 1) {
261 CanUseDiffCheck = false;
262 return;
263 }
264
265 PointerInfo *Src = &Pointers[CGI.Members[0]];
266 PointerInfo *Sink = &Pointers[CGJ.Members[0]];
267
268 // If either pointer is read and written, multiple checks may be needed. Bail
269 // out.
270 if (!DC.getOrderForAccess(Src->PointerValue, !Src->IsWritePtr).empty() ||
271 !DC.getOrderForAccess(Sink->PointerValue, !Sink->IsWritePtr).empty()) {
272 CanUseDiffCheck = false;
273 return;
274 }
275
276 ArrayRef<unsigned> AccSrc =
277 DC.getOrderForAccess(Src->PointerValue, Src->IsWritePtr);
278 ArrayRef<unsigned> AccSink =
279 DC.getOrderForAccess(Sink->PointerValue, Sink->IsWritePtr);
280 // If either pointer is accessed multiple times, there may not be a clear
281 // src/sink relation. Bail out for now.
282 if (AccSrc.size() != 1 || AccSink.size() != 1) {
283 CanUseDiffCheck = false;
284 return;
285 }
286 // If the sink is accessed before src, swap src/sink.
287 if (AccSink[0] < AccSrc[0])
288 std::swap(Src, Sink);
289
290 auto *SrcAR = dyn_cast<SCEVAddRecExpr>(Src->Expr);
291 auto *SinkAR = dyn_cast<SCEVAddRecExpr>(Sink->Expr);
292 if (!SrcAR || !SinkAR || SrcAR->getLoop() != DC.getInnermostLoop() ||
293 SinkAR->getLoop() != DC.getInnermostLoop()) {
294 CanUseDiffCheck = false;
295 return;
296 }
297
299 DC.getInstructionsForAccess(Src->PointerValue, Src->IsWritePtr);
301 DC.getInstructionsForAccess(Sink->PointerValue, Sink->IsWritePtr);
302 Type *SrcTy = getLoadStoreType(SrcInsts[0]);
303 Type *DstTy = getLoadStoreType(SinkInsts[0]);
304 if (isa<ScalableVectorType>(SrcTy) || isa<ScalableVectorType>(DstTy)) {
305 CanUseDiffCheck = false;
306 return;
307 }
308 const DataLayout &DL =
309 SinkAR->getLoop()->getHeader()->getModule()->getDataLayout();
310 unsigned AllocSize =
311 std::max(DL.getTypeAllocSize(SrcTy), DL.getTypeAllocSize(DstTy));
312
313 // Only matching constant steps matching the AllocSize are supported at the
314 // moment. This simplifies the difference computation. Can be extended in the
315 // future.
316 auto *Step = dyn_cast<SCEVConstant>(SinkAR->getStepRecurrence(*SE));
317 if (!Step || Step != SrcAR->getStepRecurrence(*SE) ||
318 Step->getAPInt().abs() != AllocSize) {
319 CanUseDiffCheck = false;
320 return;
321 }
322
323 IntegerType *IntTy =
324 IntegerType::get(Src->PointerValue->getContext(),
325 DL.getPointerSizeInBits(CGI.AddressSpace));
326
327 // When counting down, the dependence distance needs to be swapped.
328 if (Step->getValue()->isNegative())
329 std::swap(SinkAR, SrcAR);
330
331 const SCEV *SinkStartInt = SE->getPtrToIntExpr(SinkAR->getStart(), IntTy);
332 const SCEV *SrcStartInt = SE->getPtrToIntExpr(SrcAR->getStart(), IntTy);
333 if (isa<SCEVCouldNotCompute>(SinkStartInt) ||
334 isa<SCEVCouldNotCompute>(SrcStartInt)) {
335 CanUseDiffCheck = false;
336 return;
337 }
338
339 const Loop *InnerLoop = SrcAR->getLoop();
340 // If the start values for both Src and Sink also vary according to an outer
341 // loop, then it's probably better to avoid creating diff checks because
342 // they may not be hoisted. We should instead let llvm::addRuntimeChecks
343 // do the expanded full range overlap checks, which can be hoisted.
344 if (HoistRuntimeChecks && InnerLoop->getParentLoop() &&
345 isa<SCEVAddRecExpr>(SinkStartInt) && isa<SCEVAddRecExpr>(SrcStartInt)) {
346 auto *SrcStartAR = cast<SCEVAddRecExpr>(SrcStartInt);
347 auto *SinkStartAR = cast<SCEVAddRecExpr>(SinkStartInt);
348 const Loop *StartARLoop = SrcStartAR->getLoop();
349 if (StartARLoop == SinkStartAR->getLoop() &&
350 StartARLoop == InnerLoop->getParentLoop()) {
351 LLVM_DEBUG(dbgs() << "LAA: Not creating diff runtime check, since these "
352 "cannot be hoisted out of the outer loop\n");
353 CanUseDiffCheck = false;
354 return;
355 }
356 }
357
358 LLVM_DEBUG(dbgs() << "LAA: Creating diff runtime check for:\n"
359 << "SrcStart: " << *SrcStartInt << '\n'
360 << "SinkStartInt: " << *SinkStartInt << '\n');
361 DiffChecks.emplace_back(SrcStartInt, SinkStartInt, AllocSize,
362 Src->NeedsFreeze || Sink->NeedsFreeze);
363}
364
365SmallVector<RuntimePointerCheck, 4> RuntimePointerChecking::generateChecks() {
367
368 for (unsigned I = 0; I < CheckingGroups.size(); ++I) {
369 for (unsigned J = I + 1; J < CheckingGroups.size(); ++J) {
372
373 if (needsChecking(CGI, CGJ)) {
374 tryToCreateDiffCheck(CGI, CGJ);
375 Checks.push_back(std::make_pair(&CGI, &CGJ));
376 }
377 }
378 }
379 return Checks;
380}
381
382void RuntimePointerChecking::generateChecks(
383 MemoryDepChecker::DepCandidates &DepCands, bool UseDependencies) {
384 assert(Checks.empty() && "Checks is not empty");
385 groupChecks(DepCands, UseDependencies);
386 Checks = generateChecks();
387}
388
390 const RuntimeCheckingPtrGroup &M, const RuntimeCheckingPtrGroup &N) const {
391 for (unsigned I = 0, EI = M.Members.size(); EI != I; ++I)
392 for (unsigned J = 0, EJ = N.Members.size(); EJ != J; ++J)
393 if (needsChecking(M.Members[I], N.Members[J]))
394 return true;
395 return false;
396}
397
398/// Compare \p I and \p J and return the minimum.
399/// Return nullptr in case we couldn't find an answer.
400static const SCEV *getMinFromExprs(const SCEV *I, const SCEV *J,
401 ScalarEvolution *SE) {
402 const SCEV *Diff = SE->getMinusSCEV(J, I);
403 const SCEVConstant *C = dyn_cast<const SCEVConstant>(Diff);
404
405 if (!C)
406 return nullptr;
407 if (C->getValue()->isNegative())
408 return J;
409 return I;
410}
411
413 RuntimePointerChecking &RtCheck) {
414 return addPointer(
415 Index, RtCheck.Pointers[Index].Start, RtCheck.Pointers[Index].End,
416 RtCheck.Pointers[Index].PointerValue->getType()->getPointerAddressSpace(),
417 RtCheck.Pointers[Index].NeedsFreeze, *RtCheck.SE);
418}
419
421 const SCEV *End, unsigned AS,
422 bool NeedsFreeze,
423 ScalarEvolution &SE) {
424 assert(AddressSpace == AS &&
425 "all pointers in a checking group must be in the same address space");
426
427 // Compare the starts and ends with the known minimum and maximum
428 // of this set. We need to know how we compare against the min/max
429 // of the set in order to be able to emit memchecks.
430 const SCEV *Min0 = getMinFromExprs(Start, Low, &SE);
431 if (!Min0)
432 return false;
433
434 const SCEV *Min1 = getMinFromExprs(End, High, &SE);
435 if (!Min1)
436 return false;
437
438 // Update the low bound expression if we've found a new min value.
439 if (Min0 == Start)
440 Low = Start;
441
442 // Update the high bound expression if we've found a new max value.
443 if (Min1 != End)
444 High = End;
445
447 this->NeedsFreeze |= NeedsFreeze;
448 return true;
449}
450
451void RuntimePointerChecking::groupChecks(
452 MemoryDepChecker::DepCandidates &DepCands, bool UseDependencies) {
453 // We build the groups from dependency candidates equivalence classes
454 // because:
455 // - We know that pointers in the same equivalence class share
456 // the same underlying object and therefore there is a chance
457 // that we can compare pointers
458 // - We wouldn't be able to merge two pointers for which we need
459 // to emit a memcheck. The classes in DepCands are already
460 // conveniently built such that no two pointers in the same
461 // class need checking against each other.
462
463 // We use the following (greedy) algorithm to construct the groups
464 // For every pointer in the equivalence class:
465 // For each existing group:
466 // - if the difference between this pointer and the min/max bounds
467 // of the group is a constant, then make the pointer part of the
468 // group and update the min/max bounds of that group as required.
469
470 CheckingGroups.clear();
471
472 // If we need to check two pointers to the same underlying object
473 // with a non-constant difference, we shouldn't perform any pointer
474 // grouping with those pointers. This is because we can easily get
475 // into cases where the resulting check would return false, even when
476 // the accesses are safe.
477 //
478 // The following example shows this:
479 // for (i = 0; i < 1000; ++i)
480 // a[5000 + i * m] = a[i] + a[i + 9000]
481 //
482 // Here grouping gives a check of (5000, 5000 + 1000 * m) against
483 // (0, 10000) which is always false. However, if m is 1, there is no
484 // dependence. Not grouping the checks for a[i] and a[i + 9000] allows
485 // us to perform an accurate check in this case.
486 //
487 // The above case requires that we have an UnknownDependence between
488 // accesses to the same underlying object. This cannot happen unless
489 // FoundNonConstantDistanceDependence is set, and therefore UseDependencies
490 // is also false. In this case we will use the fallback path and create
491 // separate checking groups for all pointers.
492
493 // If we don't have the dependency partitions, construct a new
494 // checking pointer group for each pointer. This is also required
495 // for correctness, because in this case we can have checking between
496 // pointers to the same underlying object.
497 if (!UseDependencies) {
498 for (unsigned I = 0; I < Pointers.size(); ++I)
499 CheckingGroups.push_back(RuntimeCheckingPtrGroup(I, *this));
500 return;
501 }
502
503 unsigned TotalComparisons = 0;
504
506 for (unsigned Index = 0; Index < Pointers.size(); ++Index) {
507 auto Iter = PositionMap.insert({Pointers[Index].PointerValue, {}});
508 Iter.first->second.push_back(Index);
509 }
510
511 // We need to keep track of what pointers we've already seen so we
512 // don't process them twice.
514
515 // Go through all equivalence classes, get the "pointer check groups"
516 // and add them to the overall solution. We use the order in which accesses
517 // appear in 'Pointers' to enforce determinism.
518 for (unsigned I = 0; I < Pointers.size(); ++I) {
519 // We've seen this pointer before, and therefore already processed
520 // its equivalence class.
521 if (Seen.count(I))
522 continue;
523
524 MemoryDepChecker::MemAccessInfo Access(Pointers[I].PointerValue,
525 Pointers[I].IsWritePtr);
526
528 auto LeaderI = DepCands.findValue(DepCands.getLeaderValue(Access));
529
530 // Because DepCands is constructed by visiting accesses in the order in
531 // which they appear in alias sets (which is deterministic) and the
532 // iteration order within an equivalence class member is only dependent on
533 // the order in which unions and insertions are performed on the
534 // equivalence class, the iteration order is deterministic.
535 for (auto MI = DepCands.member_begin(LeaderI), ME = DepCands.member_end();
536 MI != ME; ++MI) {
537 auto PointerI = PositionMap.find(MI->getPointer());
538 assert(PointerI != PositionMap.end() &&
539 "pointer in equivalence class not found in PositionMap");
540 for (unsigned Pointer : PointerI->second) {
541 bool Merged = false;
542 // Mark this pointer as seen.
543 Seen.insert(Pointer);
544
545 // Go through all the existing sets and see if we can find one
546 // which can include this pointer.
547 for (RuntimeCheckingPtrGroup &Group : Groups) {
548 // Don't perform more than a certain amount of comparisons.
549 // This should limit the cost of grouping the pointers to something
550 // reasonable. If we do end up hitting this threshold, the algorithm
551 // will create separate groups for all remaining pointers.
552 if (TotalComparisons > MemoryCheckMergeThreshold)
553 break;
554
555 TotalComparisons++;
556
557 if (Group.addPointer(Pointer, *this)) {
558 Merged = true;
559 break;
560 }
561 }
562
563 if (!Merged)
564 // We couldn't add this pointer to any existing set or the threshold
565 // for the number of comparisons has been reached. Create a new group
566 // to hold the current pointer.
567 Groups.push_back(RuntimeCheckingPtrGroup(Pointer, *this));
568 }
569 }
570
571 // We've computed the grouped checks for this partition.
572 // Save the results and continue with the next one.
573 llvm::copy(Groups, std::back_inserter(CheckingGroups));
574 }
575}
576
578 const SmallVectorImpl<int> &PtrToPartition, unsigned PtrIdx1,
579 unsigned PtrIdx2) {
580 return (PtrToPartition[PtrIdx1] != -1 &&
581 PtrToPartition[PtrIdx1] == PtrToPartition[PtrIdx2]);
582}
583
584bool RuntimePointerChecking::needsChecking(unsigned I, unsigned J) const {
585 const PointerInfo &PointerI = Pointers[I];
586 const PointerInfo &PointerJ = Pointers[J];
587
588 // No need to check if two readonly pointers intersect.
589 if (!PointerI.IsWritePtr && !PointerJ.IsWritePtr)
590 return false;
591
592 // Only need to check pointers between two different dependency sets.
593 if (PointerI.DependencySetId == PointerJ.DependencySetId)
594 return false;
595
596 // Only need to check pointers in the same alias set.
597 if (PointerI.AliasSetId != PointerJ.AliasSetId)
598 return false;
599
600 return true;
601}
602
605 unsigned Depth) const {
606 unsigned N = 0;
607 for (const auto &Check : Checks) {
608 const auto &First = Check.first->Members, &Second = Check.second->Members;
609
610 OS.indent(Depth) << "Check " << N++ << ":\n";
611
612 OS.indent(Depth + 2) << "Comparing group (" << Check.first << "):\n";
613 for (unsigned K = 0; K < First.size(); ++K)
614 OS.indent(Depth + 2) << *Pointers[First[K]].PointerValue << "\n";
615
616 OS.indent(Depth + 2) << "Against group (" << Check.second << "):\n";
617 for (unsigned K = 0; K < Second.size(); ++K)
618 OS.indent(Depth + 2) << *Pointers[Second[K]].PointerValue << "\n";
619 }
620}
621
623
624 OS.indent(Depth) << "Run-time memory checks:\n";
625 printChecks(OS, Checks, Depth);
626
627 OS.indent(Depth) << "Grouped accesses:\n";
628 for (unsigned I = 0; I < CheckingGroups.size(); ++I) {
629 const auto &CG = CheckingGroups[I];
630
631 OS.indent(Depth + 2) << "Group " << &CG << ":\n";
632 OS.indent(Depth + 4) << "(Low: " << *CG.Low << " High: " << *CG.High
633 << ")\n";
634 for (unsigned J = 0; J < CG.Members.size(); ++J) {
635 OS.indent(Depth + 6) << "Member: " << *Pointers[CG.Members[J]].Expr
636 << "\n";
637 }
638 }
639}
640
641namespace {
642
643/// Analyses memory accesses in a loop.
644///
645/// Checks whether run time pointer checks are needed and builds sets for data
646/// dependence checking.
647class AccessAnalysis {
648public:
649 /// Read or write access location.
650 typedef PointerIntPair<Value *, 1, bool> MemAccessInfo;
651 typedef SmallVector<MemAccessInfo, 8> MemAccessInfoList;
652
653 AccessAnalysis(Loop *TheLoop, AAResults *AA, LoopInfo *LI,
656 : TheLoop(TheLoop), BAA(*AA), AST(BAA), LI(LI), DepCands(DA), PSE(PSE) {
657 // We're analyzing dependences across loop iterations.
658 BAA.enableCrossIterationMode();
659 }
660
661 /// Register a load and whether it is only read from.
662 void addLoad(MemoryLocation &Loc, Type *AccessTy, bool IsReadOnly) {
663 Value *Ptr = const_cast<Value*>(Loc.Ptr);
665 Accesses[MemAccessInfo(Ptr, false)].insert(AccessTy);
666 if (IsReadOnly)
667 ReadOnlyPtr.insert(Ptr);
668 }
669
670 /// Register a store.
671 void addStore(MemoryLocation &Loc, Type *AccessTy) {
672 Value *Ptr = const_cast<Value*>(Loc.Ptr);
674 Accesses[MemAccessInfo(Ptr, true)].insert(AccessTy);
675 }
676
677 /// Check if we can emit a run-time no-alias check for \p Access.
678 ///
679 /// Returns true if we can emit a run-time no alias check for \p Access.
680 /// If we can check this access, this also adds it to a dependence set and
681 /// adds a run-time to check for it to \p RtCheck. If \p Assume is true,
682 /// we will attempt to use additional run-time checks in order to get
683 /// the bounds of the pointer.
684 bool createCheckForAccess(RuntimePointerChecking &RtCheck,
685 MemAccessInfo Access, Type *AccessTy,
686 const DenseMap<Value *, const SCEV *> &Strides,
688 Loop *TheLoop, unsigned &RunningDepId,
689 unsigned ASId, bool ShouldCheckStride, bool Assume);
690
691 /// Check whether we can check the pointers at runtime for
692 /// non-intersection.
693 ///
694 /// Returns true if we need no check or if we do and we can generate them
695 /// (i.e. the pointers have computable bounds).
696 bool canCheckPtrAtRT(RuntimePointerChecking &RtCheck, ScalarEvolution *SE,
697 Loop *TheLoop, const DenseMap<Value *, const SCEV *> &Strides,
698 Value *&UncomputablePtr, bool ShouldCheckWrap = false);
699
700 /// Goes over all memory accesses, checks whether a RT check is needed
701 /// and builds sets of dependent accesses.
702 void buildDependenceSets() {
703 processMemAccesses();
704 }
705
706 /// Initial processing of memory accesses determined that we need to
707 /// perform dependency checking.
708 ///
709 /// Note that this can later be cleared if we retry memcheck analysis without
710 /// dependency checking (i.e. FoundNonConstantDistanceDependence).
711 bool isDependencyCheckNeeded() { return !CheckDeps.empty(); }
712
713 /// We decided that no dependence analysis would be used. Reset the state.
714 void resetDepChecks(MemoryDepChecker &DepChecker) {
715 CheckDeps.clear();
716 DepChecker.clearDependences();
717 }
718
719 MemAccessInfoList &getDependenciesToCheck() { return CheckDeps; }
720
723 return UnderlyingObjects;
724 }
725
726private:
728
729 /// Go over all memory access and check whether runtime pointer checks
730 /// are needed and build sets of dependency check candidates.
731 void processMemAccesses();
732
733 /// Map of all accesses. Values are the types used to access memory pointed to
734 /// by the pointer.
735 PtrAccessMap Accesses;
736
737 /// The loop being checked.
738 const Loop *TheLoop;
739
740 /// List of accesses that need a further dependence check.
741 MemAccessInfoList CheckDeps;
742
743 /// Set of pointers that are read only.
744 SmallPtrSet<Value*, 16> ReadOnlyPtr;
745
746 /// Batched alias analysis results.
747 BatchAAResults BAA;
748
749 /// An alias set tracker to partition the access set by underlying object and
750 //intrinsic property (such as TBAA metadata).
751 AliasSetTracker AST;
752
753 LoopInfo *LI;
754
755 /// Sets of potentially dependent accesses - members of one set share an
756 /// underlying pointer. The set "CheckDeps" identfies which sets really need a
757 /// dependence check.
759
760 /// Initial processing of memory accesses determined that we may need
761 /// to add memchecks. Perform the analysis to determine the necessary checks.
762 ///
763 /// Note that, this is different from isDependencyCheckNeeded. When we retry
764 /// memcheck analysis without dependency checking
765 /// (i.e. FoundNonConstantDistanceDependence), isDependencyCheckNeeded is
766 /// cleared while this remains set if we have potentially dependent accesses.
767 bool IsRTCheckAnalysisNeeded = false;
768
769 /// The SCEV predicate containing all the SCEV-related assumptions.
771
773};
774
775} // end anonymous namespace
776
777/// Check whether a pointer can participate in a runtime bounds check.
778/// If \p Assume, try harder to prove that we can compute the bounds of \p Ptr
779/// by adding run-time checks (overflow checks) if necessary.
781 const SCEV *PtrScev, Loop *L, bool Assume) {
782 // The bounds for loop-invariant pointer is trivial.
783 if (PSE.getSE()->isLoopInvariant(PtrScev, L))
784 return true;
785
786 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(PtrScev);
787
788 if (!AR && Assume)
789 AR = PSE.getAsAddRec(Ptr);
790
791 if (!AR)
792 return false;
793
794 return AR->isAffine();
795}
796
797/// Check whether a pointer address cannot wrap.
799 const DenseMap<Value *, const SCEV *> &Strides, Value *Ptr, Type *AccessTy,
800 Loop *L) {
801 const SCEV *PtrScev = PSE.getSCEV(Ptr);
802 if (PSE.getSE()->isLoopInvariant(PtrScev, L))
803 return true;
804
805 int64_t Stride = getPtrStride(PSE, AccessTy, Ptr, L, Strides).value_or(0);
806 if (Stride == 1 || PSE.hasNoOverflow(Ptr, SCEVWrapPredicate::IncrementNUSW))
807 return true;
808
809 return false;
810}
811
812static void visitPointers(Value *StartPtr, const Loop &InnermostLoop,
813 function_ref<void(Value *)> AddPointer) {
815 SmallVector<Value *> WorkList;
816 WorkList.push_back(StartPtr);
817
818 while (!WorkList.empty()) {
819 Value *Ptr = WorkList.pop_back_val();
820 if (!Visited.insert(Ptr).second)
821 continue;
822 auto *PN = dyn_cast<PHINode>(Ptr);
823 // SCEV does not look through non-header PHIs inside the loop. Such phis
824 // can be analyzed by adding separate accesses for each incoming pointer
825 // value.
826 if (PN && InnermostLoop.contains(PN->getParent()) &&
827 PN->getParent() != InnermostLoop.getHeader()) {
828 for (const Use &Inc : PN->incoming_values())
829 WorkList.push_back(Inc);
830 } else
831 AddPointer(Ptr);
832 }
833}
834
835// Walk back through the IR for a pointer, looking for a select like the
836// following:
837//
838// %offset = select i1 %cmp, i64 %a, i64 %b
839// %addr = getelementptr double, double* %base, i64 %offset
840// %ld = load double, double* %addr, align 8
841//
842// We won't be able to form a single SCEVAddRecExpr from this since the
843// address for each loop iteration depends on %cmp. We could potentially
844// produce multiple valid SCEVAddRecExprs, though, and check all of them for
845// memory safety/aliasing if needed.
846//
847// If we encounter some IR we don't yet handle, or something obviously fine
848// like a constant, then we just add the SCEV for that term to the list passed
849// in by the caller. If we have a node that may potentially yield a valid
850// SCEVAddRecExpr then we decompose it into parts and build the SCEV terms
851// ourselves before adding to the list.
852static void findForkedSCEVs(
853 ScalarEvolution *SE, const Loop *L, Value *Ptr,
855 unsigned Depth) {
856 // If our Value is a SCEVAddRecExpr, loop invariant, not an instruction, or
857 // we've exceeded our limit on recursion, just return whatever we have
858 // regardless of whether it can be used for a forked pointer or not, along
859 // with an indication of whether it might be a poison or undef value.
860 const SCEV *Scev = SE->getSCEV(Ptr);
861 if (isa<SCEVAddRecExpr>(Scev) || L->isLoopInvariant(Ptr) ||
862 !isa<Instruction>(Ptr) || Depth == 0) {
863 ScevList.emplace_back(Scev, !isGuaranteedNotToBeUndefOrPoison(Ptr));
864 return;
865 }
866
867 Depth--;
868
869 auto UndefPoisonCheck = [](PointerIntPair<const SCEV *, 1, bool> S) {
870 return get<1>(S);
871 };
872
873 auto GetBinOpExpr = [&SE](unsigned Opcode, const SCEV *L, const SCEV *R) {
874 switch (Opcode) {
875 case Instruction::Add:
876 return SE->getAddExpr(L, R);
877 case Instruction::Sub:
878 return SE->getMinusSCEV(L, R);
879 default:
880 llvm_unreachable("Unexpected binary operator when walking ForkedPtrs");
881 }
882 };
883
884 Instruction *I = cast<Instruction>(Ptr);
885 unsigned Opcode = I->getOpcode();
886 switch (Opcode) {
887 case Instruction::GetElementPtr: {
888 GetElementPtrInst *GEP = cast<GetElementPtrInst>(I);
889 Type *SourceTy = GEP->getSourceElementType();
890 // We only handle base + single offset GEPs here for now.
891 // Not dealing with preexisting gathers yet, so no vectors.
892 if (I->getNumOperands() != 2 || SourceTy->isVectorTy()) {
893 ScevList.emplace_back(Scev, !isGuaranteedNotToBeUndefOrPoison(GEP));
894 break;
895 }
898 findForkedSCEVs(SE, L, I->getOperand(0), BaseScevs, Depth);
899 findForkedSCEVs(SE, L, I->getOperand(1), OffsetScevs, Depth);
900
901 // See if we need to freeze our fork...
902 bool NeedsFreeze = any_of(BaseScevs, UndefPoisonCheck) ||
903 any_of(OffsetScevs, UndefPoisonCheck);
904
905 // Check that we only have a single fork, on either the base or the offset.
906 // Copy the SCEV across for the one without a fork in order to generate
907 // the full SCEV for both sides of the GEP.
908 if (OffsetScevs.size() == 2 && BaseScevs.size() == 1)
909 BaseScevs.push_back(BaseScevs[0]);
910 else if (BaseScevs.size() == 2 && OffsetScevs.size() == 1)
911 OffsetScevs.push_back(OffsetScevs[0]);
912 else {
913 ScevList.emplace_back(Scev, NeedsFreeze);
914 break;
915 }
916
917 // Find the pointer type we need to extend to.
918 Type *IntPtrTy = SE->getEffectiveSCEVType(
919 SE->getSCEV(GEP->getPointerOperand())->getType());
920
921 // Find the size of the type being pointed to. We only have a single
922 // index term (guarded above) so we don't need to index into arrays or
923 // structures, just get the size of the scalar value.
924 const SCEV *Size = SE->getSizeOfExpr(IntPtrTy, SourceTy);
925
926 // Scale up the offsets by the size of the type, then add to the bases.
927 const SCEV *Scaled1 = SE->getMulExpr(
928 Size, SE->getTruncateOrSignExtend(get<0>(OffsetScevs[0]), IntPtrTy));
929 const SCEV *Scaled2 = SE->getMulExpr(
930 Size, SE->getTruncateOrSignExtend(get<0>(OffsetScevs[1]), IntPtrTy));
931 ScevList.emplace_back(SE->getAddExpr(get<0>(BaseScevs[0]), Scaled1),
932 NeedsFreeze);
933 ScevList.emplace_back(SE->getAddExpr(get<0>(BaseScevs[1]), Scaled2),
934 NeedsFreeze);
935 break;
936 }
937 case Instruction::Select: {
939 // A select means we've found a forked pointer, but we currently only
940 // support a single select per pointer so if there's another behind this
941 // then we just bail out and return the generic SCEV.
942 findForkedSCEVs(SE, L, I->getOperand(1), ChildScevs, Depth);
943 findForkedSCEVs(SE, L, I->getOperand(2), ChildScevs, Depth);
944 if (ChildScevs.size() == 2) {
945 ScevList.push_back(ChildScevs[0]);
946 ScevList.push_back(ChildScevs[1]);
947 } else
948 ScevList.emplace_back(Scev, !isGuaranteedNotToBeUndefOrPoison(Ptr));
949 break;
950 }
951 case Instruction::PHI: {
953 // A phi means we've found a forked pointer, but we currently only
954 // support a single phi per pointer so if there's another behind this
955 // then we just bail out and return the generic SCEV.
956 if (I->getNumOperands() == 2) {
957 findForkedSCEVs(SE, L, I->getOperand(0), ChildScevs, Depth);
958 findForkedSCEVs(SE, L, I->getOperand(1), ChildScevs, Depth);
959 }
960 if (ChildScevs.size() == 2) {
961 ScevList.push_back(ChildScevs[0]);
962 ScevList.push_back(ChildScevs[1]);
963 } else
964 ScevList.emplace_back(Scev, !isGuaranteedNotToBeUndefOrPoison(Ptr));
965 break;
966 }
967 case Instruction::Add:
968 case Instruction::Sub: {
971 findForkedSCEVs(SE, L, I->getOperand(0), LScevs, Depth);
972 findForkedSCEVs(SE, L, I->getOperand(1), RScevs, Depth);
973
974 // See if we need to freeze our fork...
975 bool NeedsFreeze =
976 any_of(LScevs, UndefPoisonCheck) || any_of(RScevs, UndefPoisonCheck);
977
978 // Check that we only have a single fork, on either the left or right side.
979 // Copy the SCEV across for the one without a fork in order to generate
980 // the full SCEV for both sides of the BinOp.
981 if (LScevs.size() == 2 && RScevs.size() == 1)
982 RScevs.push_back(RScevs[0]);
983 else if (RScevs.size() == 2 && LScevs.size() == 1)
984 LScevs.push_back(LScevs[0]);
985 else {
986 ScevList.emplace_back(Scev, NeedsFreeze);
987 break;
988 }
989
990 ScevList.emplace_back(
991 GetBinOpExpr(Opcode, get<0>(LScevs[0]), get<0>(RScevs[0])),
992 NeedsFreeze);
993 ScevList.emplace_back(
994 GetBinOpExpr(Opcode, get<0>(LScevs[1]), get<0>(RScevs[1])),
995 NeedsFreeze);
996 break;
997 }
998 default:
999 // Just return the current SCEV if we haven't handled the instruction yet.
1000 LLVM_DEBUG(dbgs() << "ForkedPtr unhandled instruction: " << *I << "\n");
1001 ScevList.emplace_back(Scev, !isGuaranteedNotToBeUndefOrPoison(Ptr));
1002 break;
1003 }
1004}
1005
1008 const DenseMap<Value *, const SCEV *> &StridesMap, Value *Ptr,
1009 const Loop *L) {
1010 ScalarEvolution *SE = PSE.getSE();
1011 assert(SE->isSCEVable(Ptr->getType()) && "Value is not SCEVable!");
1013 findForkedSCEVs(SE, L, Ptr, Scevs, MaxForkedSCEVDepth);
1014
1015 // For now, we will only accept a forked pointer with two possible SCEVs
1016 // that are either SCEVAddRecExprs or loop invariant.
1017 if (Scevs.size() == 2 &&
1018 (isa<SCEVAddRecExpr>(get<0>(Scevs[0])) ||
1019 SE->isLoopInvariant(get<0>(Scevs[0]), L)) &&
1020 (isa<SCEVAddRecExpr>(get<0>(Scevs[1])) ||
1021 SE->isLoopInvariant(get<0>(Scevs[1]), L))) {
1022 LLVM_DEBUG(dbgs() << "LAA: Found forked pointer: " << *Ptr << "\n");
1023 LLVM_DEBUG(dbgs() << "\t(1) " << *get<0>(Scevs[0]) << "\n");
1024 LLVM_DEBUG(dbgs() << "\t(2) " << *get<0>(Scevs[1]) << "\n");
1025 return Scevs;
1026 }
1027
1028 return {{replaceSymbolicStrideSCEV(PSE, StridesMap, Ptr), false}};
1029}
1030
1031bool AccessAnalysis::createCheckForAccess(RuntimePointerChecking &RtCheck,
1032 MemAccessInfo Access, Type *AccessTy,
1033 const DenseMap<Value *, const SCEV *> &StridesMap,
1035 Loop *TheLoop, unsigned &RunningDepId,
1036 unsigned ASId, bool ShouldCheckWrap,
1037 bool Assume) {
1038 Value *Ptr = Access.getPointer();
1039
1041 findForkedPointer(PSE, StridesMap, Ptr, TheLoop);
1042
1043 for (auto &P : TranslatedPtrs) {
1044 const SCEV *PtrExpr = get<0>(P);
1045 if (!hasComputableBounds(PSE, Ptr, PtrExpr, TheLoop, Assume))
1046 return false;
1047
1048 // When we run after a failing dependency check we have to make sure
1049 // we don't have wrapping pointers.
1050 if (ShouldCheckWrap) {
1051 // Skip wrap checking when translating pointers.
1052 if (TranslatedPtrs.size() > 1)
1053 return false;
1054
1055 if (!isNoWrap(PSE, StridesMap, Ptr, AccessTy, TheLoop)) {
1056 auto *Expr = PSE.getSCEV(Ptr);
1057 if (!Assume || !isa<SCEVAddRecExpr>(Expr))
1058 return false;
1060 }
1061 }
1062 // If there's only one option for Ptr, look it up after bounds and wrap
1063 // checking, because assumptions might have been added to PSE.
1064 if (TranslatedPtrs.size() == 1)
1065 TranslatedPtrs[0] = {replaceSymbolicStrideSCEV(PSE, StridesMap, Ptr),
1066 false};
1067 }
1068
1069 for (auto [PtrExpr, NeedsFreeze] : TranslatedPtrs) {
1070 // The id of the dependence set.
1071 unsigned DepId;
1072
1073 if (isDependencyCheckNeeded()) {
1074 Value *Leader = DepCands.getLeaderValue(Access).getPointer();
1075 unsigned &LeaderId = DepSetId[Leader];
1076 if (!LeaderId)
1077 LeaderId = RunningDepId++;
1078 DepId = LeaderId;
1079 } else
1080 // Each access has its own dependence set.
1081 DepId = RunningDepId++;
1082
1083 bool IsWrite = Access.getInt();
1084 RtCheck.insert(TheLoop, Ptr, PtrExpr, AccessTy, IsWrite, DepId, ASId, PSE,
1085 NeedsFreeze);
1086 LLVM_DEBUG(dbgs() << "LAA: Found a runtime check ptr:" << *Ptr << '\n');
1087 }
1088
1089 return true;
1090}
1091
1092bool AccessAnalysis::canCheckPtrAtRT(RuntimePointerChecking &RtCheck,
1093 ScalarEvolution *SE, Loop *TheLoop,
1094 const DenseMap<Value *, const SCEV *> &StridesMap,
1095 Value *&UncomputablePtr, bool ShouldCheckWrap) {
1096 // Find pointers with computable bounds. We are going to use this information
1097 // to place a runtime bound check.
1098 bool CanDoRT = true;
1099
1100 bool MayNeedRTCheck = false;
1101 if (!IsRTCheckAnalysisNeeded) return true;
1102
1103 bool IsDepCheckNeeded = isDependencyCheckNeeded();
1104
1105 // We assign a consecutive id to access from different alias sets.
1106 // Accesses between different groups doesn't need to be checked.
1107 unsigned ASId = 0;
1108 for (auto &AS : AST) {
1109 int NumReadPtrChecks = 0;
1110 int NumWritePtrChecks = 0;
1111 bool CanDoAliasSetRT = true;
1112 ++ASId;
1113
1114 // We assign consecutive id to access from different dependence sets.
1115 // Accesses within the same set don't need a runtime check.
1116 unsigned RunningDepId = 1;
1118
1120
1121 // First, count how many write and read accesses are in the alias set. Also
1122 // collect MemAccessInfos for later.
1124 for (const auto &A : AS) {
1125 Value *Ptr = A.getValue();
1126 bool IsWrite = Accesses.count(MemAccessInfo(Ptr, true));
1127 if (IsWrite)
1128 ++NumWritePtrChecks;
1129 else
1130 ++NumReadPtrChecks;
1131 AccessInfos.emplace_back(Ptr, IsWrite);
1132 }
1133
1134 // We do not need runtime checks for this alias set, if there are no writes
1135 // or a single write and no reads.
1136 if (NumWritePtrChecks == 0 ||
1137 (NumWritePtrChecks == 1 && NumReadPtrChecks == 0)) {
1138 assert((AS.size() <= 1 ||
1139 all_of(AS,
1140 [this](auto AC) {
1141 MemAccessInfo AccessWrite(AC.getValue(), true);
1142 return DepCands.findValue(AccessWrite) == DepCands.end();
1143 })) &&
1144 "Can only skip updating CanDoRT below, if all entries in AS "
1145 "are reads or there is at most 1 entry");
1146 continue;
1147 }
1148
1149 for (auto &Access : AccessInfos) {
1150 for (const auto &AccessTy : Accesses[Access]) {
1151 if (!createCheckForAccess(RtCheck, Access, AccessTy, StridesMap,
1152 DepSetId, TheLoop, RunningDepId, ASId,
1153 ShouldCheckWrap, false)) {
1154 LLVM_DEBUG(dbgs() << "LAA: Can't find bounds for ptr:"
1155 << *Access.getPointer() << '\n');
1156 Retries.push_back({Access, AccessTy});
1157 CanDoAliasSetRT = false;
1158 }
1159 }
1160 }
1161
1162 // Note that this function computes CanDoRT and MayNeedRTCheck
1163 // independently. For example CanDoRT=false, MayNeedRTCheck=false means that
1164 // we have a pointer for which we couldn't find the bounds but we don't
1165 // actually need to emit any checks so it does not matter.
1166 //
1167 // We need runtime checks for this alias set, if there are at least 2
1168 // dependence sets (in which case RunningDepId > 2) or if we need to re-try
1169 // any bound checks (because in that case the number of dependence sets is
1170 // incomplete).
1171 bool NeedsAliasSetRTCheck = RunningDepId > 2 || !Retries.empty();
1172
1173 // We need to perform run-time alias checks, but some pointers had bounds
1174 // that couldn't be checked.
1175 if (NeedsAliasSetRTCheck && !CanDoAliasSetRT) {
1176 // Reset the CanDoSetRt flag and retry all accesses that have failed.
1177 // We know that we need these checks, so we can now be more aggressive
1178 // and add further checks if required (overflow checks).
1179 CanDoAliasSetRT = true;
1180 for (auto Retry : Retries) {
1181 MemAccessInfo Access = Retry.first;
1182 Type *AccessTy = Retry.second;
1183 if (!createCheckForAccess(RtCheck, Access, AccessTy, StridesMap,
1184 DepSetId, TheLoop, RunningDepId, ASId,
1185 ShouldCheckWrap, /*Assume=*/true)) {
1186 CanDoAliasSetRT = false;
1187 UncomputablePtr = Access.getPointer();
1188 break;
1189 }
1190 }
1191 }
1192
1193 CanDoRT &= CanDoAliasSetRT;
1194 MayNeedRTCheck |= NeedsAliasSetRTCheck;
1195 ++ASId;
1196 }
1197
1198 // If the pointers that we would use for the bounds comparison have different
1199 // address spaces, assume the values aren't directly comparable, so we can't
1200 // use them for the runtime check. We also have to assume they could
1201 // overlap. In the future there should be metadata for whether address spaces
1202 // are disjoint.
1203 unsigned NumPointers = RtCheck.Pointers.size();
1204 for (unsigned i = 0; i < NumPointers; ++i) {
1205 for (unsigned j = i + 1; j < NumPointers; ++j) {
1206 // Only need to check pointers between two different dependency sets.
1207 if (RtCheck.Pointers[i].DependencySetId ==
1208 RtCheck.Pointers[j].DependencySetId)
1209 continue;
1210 // Only need to check pointers in the same alias set.
1211 if (RtCheck.Pointers[i].AliasSetId != RtCheck.Pointers[j].AliasSetId)
1212 continue;
1213
1214 Value *PtrI = RtCheck.Pointers[i].PointerValue;
1215 Value *PtrJ = RtCheck.Pointers[j].PointerValue;
1216
1217 unsigned ASi = PtrI->getType()->getPointerAddressSpace();
1218 unsigned ASj = PtrJ->getType()->getPointerAddressSpace();
1219 if (ASi != ASj) {
1220 LLVM_DEBUG(
1221 dbgs() << "LAA: Runtime check would require comparison between"
1222 " different address spaces\n");
1223 return false;
1224 }
1225 }
1226 }
1227
1228 if (MayNeedRTCheck && CanDoRT)
1229 RtCheck.generateChecks(DepCands, IsDepCheckNeeded);
1230
1231 LLVM_DEBUG(dbgs() << "LAA: We need to do " << RtCheck.getNumberOfChecks()
1232 << " pointer comparisons.\n");
1233
1234 // If we can do run-time checks, but there are no checks, no runtime checks
1235 // are needed. This can happen when all pointers point to the same underlying
1236 // object for example.
1237 RtCheck.Need = CanDoRT ? RtCheck.getNumberOfChecks() != 0 : MayNeedRTCheck;
1238
1239 bool CanDoRTIfNeeded = !RtCheck.Need || CanDoRT;
1240 if (!CanDoRTIfNeeded)
1241 RtCheck.reset();
1242 return CanDoRTIfNeeded;
1243}
1244
1245void AccessAnalysis::processMemAccesses() {
1246 // We process the set twice: first we process read-write pointers, last we
1247 // process read-only pointers. This allows us to skip dependence tests for
1248 // read-only pointers.
1249
1250 LLVM_DEBUG(dbgs() << "LAA: Processing memory accesses...\n");
1251 LLVM_DEBUG(dbgs() << " AST: "; AST.dump());
1252 LLVM_DEBUG(dbgs() << "LAA: Accesses(" << Accesses.size() << "):\n");
1253 LLVM_DEBUG({
1254 for (auto A : Accesses)
1255 dbgs() << "\t" << *A.first.getPointer() << " ("
1256 << (A.first.getInt()
1257 ? "write"
1258 : (ReadOnlyPtr.count(A.first.getPointer()) ? "read-only"
1259 : "read"))
1260 << ")\n";
1261 });
1262
1263 // The AliasSetTracker has nicely partitioned our pointers by metadata
1264 // compatibility and potential for underlying-object overlap. As a result, we
1265 // only need to check for potential pointer dependencies within each alias
1266 // set.
1267 for (const auto &AS : AST) {
1268 // Note that both the alias-set tracker and the alias sets themselves used
1269 // linked lists internally and so the iteration order here is deterministic
1270 // (matching the original instruction order within each set).
1271
1272 bool SetHasWrite = false;
1273
1274 // Map of pointers to last access encountered.
1275 typedef DenseMap<const Value*, MemAccessInfo> UnderlyingObjToAccessMap;
1276 UnderlyingObjToAccessMap ObjToLastAccess;
1277
1278 // Set of access to check after all writes have been processed.
1279 PtrAccessMap DeferredAccesses;
1280
1281 // Iterate over each alias set twice, once to process read/write pointers,
1282 // and then to process read-only pointers.
1283 for (int SetIteration = 0; SetIteration < 2; ++SetIteration) {
1284 bool UseDeferred = SetIteration > 0;
1285 PtrAccessMap &S = UseDeferred ? DeferredAccesses : Accesses;
1286
1287 for (const auto &AV : AS) {
1288 Value *Ptr = AV.getValue();
1289
1290 // For a single memory access in AliasSetTracker, Accesses may contain
1291 // both read and write, and they both need to be handled for CheckDeps.
1292 for (const auto &AC : S) {
1293 if (AC.first.getPointer() != Ptr)
1294 continue;
1295
1296 bool IsWrite = AC.first.getInt();
1297
1298 // If we're using the deferred access set, then it contains only
1299 // reads.
1300 bool IsReadOnlyPtr = ReadOnlyPtr.count(Ptr) && !IsWrite;
1301 if (UseDeferred && !IsReadOnlyPtr)
1302 continue;
1303 // Otherwise, the pointer must be in the PtrAccessSet, either as a
1304 // read or a write.
1305 assert(((IsReadOnlyPtr && UseDeferred) || IsWrite ||
1306 S.count(MemAccessInfo(Ptr, false))) &&
1307 "Alias-set pointer not in the access set?");
1308
1309 MemAccessInfo Access(Ptr, IsWrite);
1310 DepCands.insert(Access);
1311
1312 // Memorize read-only pointers for later processing and skip them in
1313 // the first round (they need to be checked after we have seen all
1314 // write pointers). Note: we also mark pointer that are not
1315 // consecutive as "read-only" pointers (so that we check
1316 // "a[b[i]] +="). Hence, we need the second check for "!IsWrite".
1317 if (!UseDeferred && IsReadOnlyPtr) {
1318 // We only use the pointer keys, the types vector values don't
1319 // matter.
1320 DeferredAccesses.insert({Access, {}});
1321 continue;
1322 }
1323
1324 // If this is a write - check other reads and writes for conflicts. If
1325 // this is a read only check other writes for conflicts (but only if
1326 // there is no other write to the ptr - this is an optimization to
1327 // catch "a[i] = a[i] + " without having to do a dependence check).
1328 if ((IsWrite || IsReadOnlyPtr) && SetHasWrite) {
1329 CheckDeps.push_back(Access);
1330 IsRTCheckAnalysisNeeded = true;
1331 }
1332
1333 if (IsWrite)
1334 SetHasWrite = true;
1335
1336 // Create sets of pointers connected by a shared alias set and
1337 // underlying object.
1338 typedef SmallVector<const Value *, 16> ValueVector;
1339 ValueVector TempObjects;
1340
1341 UnderlyingObjects[Ptr] = {};
1342 SmallVector<const Value *, 16> &UOs = UnderlyingObjects[Ptr];
1343 ::getUnderlyingObjects(Ptr, UOs, LI);
1345 << "Underlying objects for pointer " << *Ptr << "\n");
1346 for (const Value *UnderlyingObj : UOs) {
1347 // nullptr never alias, don't join sets for pointer that have "null"
1348 // in their UnderlyingObjects list.
1349 if (isa<ConstantPointerNull>(UnderlyingObj) &&
1351 TheLoop->getHeader()->getParent(),
1352 UnderlyingObj->getType()->getPointerAddressSpace()))
1353 continue;
1354
1355 UnderlyingObjToAccessMap::iterator Prev =
1356 ObjToLastAccess.find(UnderlyingObj);
1357 if (Prev != ObjToLastAccess.end())
1358 DepCands.unionSets(Access, Prev->second);
1359
1360 ObjToLastAccess[UnderlyingObj] = Access;
1361 LLVM_DEBUG(dbgs() << " " << *UnderlyingObj << "\n");
1362 }
1363 }
1364 }
1365 }
1366 }
1367}
1368
1369/// Return true if an AddRec pointer \p Ptr is unsigned non-wrapping,
1370/// i.e. monotonically increasing/decreasing.
1371static bool isNoWrapAddRec(Value *Ptr, const SCEVAddRecExpr *AR,
1372 PredicatedScalarEvolution &PSE, const Loop *L) {
1373
1374 // FIXME: This should probably only return true for NUW.
1376 return true;
1377
1379 return true;
1380
1381 // Scalar evolution does not propagate the non-wrapping flags to values that
1382 // are derived from a non-wrapping induction variable because non-wrapping
1383 // could be flow-sensitive.
1384 //
1385 // Look through the potentially overflowing instruction to try to prove
1386 // non-wrapping for the *specific* value of Ptr.
1387
1388 // The arithmetic implied by an inbounds GEP can't overflow.
1389 auto *GEP = dyn_cast<GetElementPtrInst>(Ptr);
1390 if (!GEP || !GEP->isInBounds())
1391 return false;
1392
1393 // Make sure there is only one non-const index and analyze that.
1394 Value *NonConstIndex = nullptr;
1395 for (Value *Index : GEP->indices())
1396 if (!isa<ConstantInt>(Index)) {
1397 if (NonConstIndex)
1398 return false;
1399 NonConstIndex = Index;
1400 }
1401 if (!NonConstIndex)
1402 // The recurrence is on the pointer, ignore for now.
1403 return false;
1404
1405 // The index in GEP is signed. It is non-wrapping if it's derived from a NSW
1406 // AddRec using a NSW operation.
1407 if (auto *OBO = dyn_cast<OverflowingBinaryOperator>(NonConstIndex))
1408 if (OBO->hasNoSignedWrap() &&
1409 // Assume constant for other the operand so that the AddRec can be
1410 // easily found.
1411 isa<ConstantInt>(OBO->getOperand(1))) {
1412 auto *OpScev = PSE.getSCEV(OBO->getOperand(0));
1413
1414 if (auto *OpAR = dyn_cast<SCEVAddRecExpr>(OpScev))
1415 return OpAR->getLoop() == L && OpAR->getNoWrapFlags(SCEV::FlagNSW);
1416 }
1417
1418 return false;
1419}
1420
1421/// Check whether the access through \p Ptr has a constant stride.
1423 Type *AccessTy, Value *Ptr,
1424 const Loop *Lp,
1425 const DenseMap<Value *, const SCEV *> &StridesMap,
1426 bool Assume, bool ShouldCheckWrap) {
1427 Type *Ty = Ptr->getType();
1428 assert(Ty->isPointerTy() && "Unexpected non-ptr");
1429
1430 if (isa<ScalableVectorType>(AccessTy)) {
1431 LLVM_DEBUG(dbgs() << "LAA: Bad stride - Scalable object: " << *AccessTy
1432 << "\n");
1433 return std::nullopt;
1434 }
1435
1436 const SCEV *PtrScev = replaceSymbolicStrideSCEV(PSE, StridesMap, Ptr);
1437
1438 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(PtrScev);
1439 if (Assume && !AR)
1440 AR = PSE.getAsAddRec(Ptr);
1441
1442 if (!AR) {
1443 LLVM_DEBUG(dbgs() << "LAA: Bad stride - Not an AddRecExpr pointer " << *Ptr
1444 << " SCEV: " << *PtrScev << "\n");
1445 return std::nullopt;
1446 }
1447
1448 // The access function must stride over the innermost loop.
1449 if (Lp != AR->getLoop()) {
1450 LLVM_DEBUG(dbgs() << "LAA: Bad stride - Not striding over innermost loop "
1451 << *Ptr << " SCEV: " << *AR << "\n");
1452 return std::nullopt;
1453 }
1454
1455 // Check the step is constant.
1456 const SCEV *Step = AR->getStepRecurrence(*PSE.getSE());
1457
1458 // Calculate the pointer stride and check if it is constant.
1459 const SCEVConstant *C = dyn_cast<SCEVConstant>(Step);
1460 if (!C) {
1461 LLVM_DEBUG(dbgs() << "LAA: Bad stride - Not a constant strided " << *Ptr
1462 << " SCEV: " << *AR << "\n");
1463 return std::nullopt;
1464 }
1465
1466 auto &DL = Lp->getHeader()->getModule()->getDataLayout();
1467 TypeSize AllocSize = DL.getTypeAllocSize(AccessTy);
1468 int64_t Size = AllocSize.getFixedValue();
1469 const APInt &APStepVal = C->getAPInt();
1470
1471 // Huge step value - give up.
1472 if (APStepVal.getBitWidth() > 64)
1473 return std::nullopt;
1474
1475 int64_t StepVal = APStepVal.getSExtValue();
1476
1477 // Strided access.
1478 int64_t Stride = StepVal / Size;
1479 int64_t Rem = StepVal % Size;
1480 if (Rem)
1481 return std::nullopt;
1482
1483 if (!ShouldCheckWrap)
1484 return Stride;
1485
1486 // The address calculation must not wrap. Otherwise, a dependence could be
1487 // inverted.
1488 if (isNoWrapAddRec(Ptr, AR, PSE, Lp))
1489 return Stride;
1490
1491 // An inbounds getelementptr that is a AddRec with a unit stride
1492 // cannot wrap per definition. If it did, the result would be poison
1493 // and any memory access dependent on it would be immediate UB
1494 // when executed.
1495 if (auto *GEP = dyn_cast<GetElementPtrInst>(Ptr);
1496 GEP && GEP->isInBounds() && (Stride == 1 || Stride == -1))
1497 return Stride;
1498
1499 // If the null pointer is undefined, then a access sequence which would
1500 // otherwise access it can be assumed not to unsigned wrap. Note that this
1501 // assumes the object in memory is aligned to the natural alignment.
1502 unsigned AddrSpace = Ty->getPointerAddressSpace();
1503 if (!NullPointerIsDefined(Lp->getHeader()->getParent(), AddrSpace) &&
1504 (Stride == 1 || Stride == -1))
1505 return Stride;
1506
1507 if (Assume) {
1509 LLVM_DEBUG(dbgs() << "LAA: Pointer may wrap:\n"
1510 << "LAA: Pointer: " << *Ptr << "\n"
1511 << "LAA: SCEV: " << *AR << "\n"
1512 << "LAA: Added an overflow assumption\n");
1513 return Stride;
1514 }
1515 LLVM_DEBUG(
1516 dbgs() << "LAA: Bad stride - Pointer may wrap in the address space "
1517 << *Ptr << " SCEV: " << *AR << "\n");
1518 return std::nullopt;
1519}
1520
1521std::optional<int> llvm::getPointersDiff(Type *ElemTyA, Value *PtrA,
1522 Type *ElemTyB, Value *PtrB,
1523 const DataLayout &DL,
1524 ScalarEvolution &SE, bool StrictCheck,
1525 bool CheckType) {
1526 assert(PtrA && PtrB && "Expected non-nullptr pointers.");
1527
1528 // Make sure that A and B are different pointers.
1529 if (PtrA == PtrB)
1530 return 0;
1531
1532 // Make sure that the element types are the same if required.
1533 if (CheckType && ElemTyA != ElemTyB)
1534 return std::nullopt;
1535
1536 unsigned ASA = PtrA->getType()->getPointerAddressSpace();
1537 unsigned ASB = PtrB->getType()->getPointerAddressSpace();
1538
1539 // Check that the address spaces match.
1540 if (ASA != ASB)
1541 return std::nullopt;
1542 unsigned IdxWidth = DL.getIndexSizeInBits(ASA);
1543
1544 APInt OffsetA(IdxWidth, 0), OffsetB(IdxWidth, 0);
1545 Value *PtrA1 = PtrA->stripAndAccumulateInBoundsConstantOffsets(DL, OffsetA);
1546 Value *PtrB1 = PtrB->stripAndAccumulateInBoundsConstantOffsets(DL, OffsetB);
1547
1548 int Val;
1549 if (PtrA1 == PtrB1) {
1550 // Retrieve the address space again as pointer stripping now tracks through
1551 // `addrspacecast`.
1552 ASA = cast<PointerType>(PtrA1->getType())->getAddressSpace();
1553 ASB = cast<PointerType>(PtrB1->getType())->getAddressSpace();
1554 // Check that the address spaces match and that the pointers are valid.
1555 if (ASA != ASB)
1556 return std::nullopt;
1557
1558 IdxWidth = DL.getIndexSizeInBits(ASA);
1559 OffsetA = OffsetA.sextOrTrunc(IdxWidth);
1560 OffsetB = OffsetB.sextOrTrunc(IdxWidth);
1561
1562 OffsetB -= OffsetA;
1563 Val = OffsetB.getSExtValue();
1564 } else {
1565 // Otherwise compute the distance with SCEV between the base pointers.
1566 const SCEV *PtrSCEVA = SE.getSCEV(PtrA);
1567 const SCEV *PtrSCEVB = SE.getSCEV(PtrB);
1568 const auto *Diff =
1569 dyn_cast<SCEVConstant>(SE.getMinusSCEV(PtrSCEVB, PtrSCEVA));
1570 if (!Diff)
1571 return std::nullopt;
1572 Val = Diff->getAPInt().getSExtValue();
1573 }
1574 int Size = DL.getTypeStoreSize(ElemTyA);
1575 int Dist = Val / Size;
1576
1577 // Ensure that the calculated distance matches the type-based one after all
1578 // the bitcasts removal in the provided pointers.
1579 if (!StrictCheck || Dist * Size == Val)
1580 return Dist;
1581 return std::nullopt;
1582}
1583
1585 const DataLayout &DL, ScalarEvolution &SE,
1586 SmallVectorImpl<unsigned> &SortedIndices) {
1588 VL, [](const Value *V) { return V->getType()->isPointerTy(); }) &&
1589 "Expected list of pointer operands.");
1590 // Walk over the pointers, and map each of them to an offset relative to
1591 // first pointer in the array.
1592 Value *Ptr0 = VL[0];
1593
1594 using DistOrdPair = std::pair<int64_t, int>;
1595 auto Compare = llvm::less_first();
1596 std::set<DistOrdPair, decltype(Compare)> Offsets(Compare);
1597 Offsets.emplace(0, 0);
1598 int Cnt = 1;
1599 bool IsConsecutive = true;
1600 for (auto *Ptr : VL.drop_front()) {
1601 std::optional<int> Diff = getPointersDiff(ElemTy, Ptr0, ElemTy, Ptr, DL, SE,
1602 /*StrictCheck=*/true);
1603 if (!Diff)
1604 return false;
1605
1606 // Check if the pointer with the same offset is found.
1607 int64_t Offset = *Diff;
1608 auto Res = Offsets.emplace(Offset, Cnt);
1609 if (!Res.second)
1610 return false;
1611 // Consecutive order if the inserted element is the last one.
1612 IsConsecutive = IsConsecutive && std::next(Res.first) == Offsets.end();
1613 ++Cnt;
1614 }
1615 SortedIndices.clear();
1616 if (!IsConsecutive) {
1617 // Fill SortedIndices array only if it is non-consecutive.
1618 SortedIndices.resize(VL.size());
1619 Cnt = 0;
1620 for (const std::pair<int64_t, int> &Pair : Offsets) {
1621 SortedIndices[Cnt] = Pair.second;
1622 ++Cnt;
1623 }
1624 }
1625 return true;
1626}
1627
1628/// Returns true if the memory operations \p A and \p B are consecutive.
1630 ScalarEvolution &SE, bool CheckType) {
1633 if (!PtrA || !PtrB)
1634 return false;
1635 Type *ElemTyA = getLoadStoreType(A);
1636 Type *ElemTyB = getLoadStoreType(B);
1637 std::optional<int> Diff =
1638 getPointersDiff(ElemTyA, PtrA, ElemTyB, PtrB, DL, SE,
1639 /*StrictCheck=*/true, CheckType);
1640 return Diff && *Diff == 1;
1641}
1642
1644 visitPointers(SI->getPointerOperand(), *InnermostLoop,
1645 [this, SI](Value *Ptr) {
1646 Accesses[MemAccessInfo(Ptr, true)].push_back(AccessIdx);
1647 InstMap.push_back(SI);
1648 ++AccessIdx;
1649 });
1650}
1651
1653 visitPointers(LI->getPointerOperand(), *InnermostLoop,
1654 [this, LI](Value *Ptr) {
1655 Accesses[MemAccessInfo(Ptr, false)].push_back(AccessIdx);
1656 InstMap.push_back(LI);
1657 ++AccessIdx;
1658 });
1659}
1660
1663 switch (Type) {
1664 case NoDep:
1665 case Forward:
1668
1669 case Unknown:
1672 case Backward:
1674 case IndirectUnsafe:
1676 }
1677 llvm_unreachable("unexpected DepType!");
1678}
1679
1681 switch (Type) {
1682 case NoDep:
1683 case Forward:
1684 case ForwardButPreventsForwarding:
1685 case Unknown:
1686 case IndirectUnsafe:
1687 return false;
1688
1689 case BackwardVectorizable:
1690 case Backward:
1691 case BackwardVectorizableButPreventsForwarding:
1692 return true;
1693 }
1694 llvm_unreachable("unexpected DepType!");
1695}
1696
1698 return isBackward() || Type == Unknown;
1699}
1700
1702 switch (Type) {
1703 case Forward:
1704 case ForwardButPreventsForwarding:
1705 return true;
1706
1707 case NoDep:
1708 case Unknown:
1709 case BackwardVectorizable:
1710 case Backward:
1711 case BackwardVectorizableButPreventsForwarding:
1712 case IndirectUnsafe:
1713 return false;
1714 }
1715 llvm_unreachable("unexpected DepType!");
1716}
1717
1718bool MemoryDepChecker::couldPreventStoreLoadForward(uint64_t Distance,
1719 uint64_t TypeByteSize) {
1720 // If loads occur at a distance that is not a multiple of a feasible vector
1721 // factor store-load forwarding does not take place.
1722 // Positive dependences might cause troubles because vectorizing them might
1723 // prevent store-load forwarding making vectorized code run a lot slower.
1724 // a[i] = a[i-3] ^ a[i-8];
1725 // The stores to a[i:i+1] don't align with the stores to a[i-3:i-2] and
1726 // hence on your typical architecture store-load forwarding does not take
1727 // place. Vectorizing in such cases does not make sense.
1728 // Store-load forwarding distance.
1729
1730 // After this many iterations store-to-load forwarding conflicts should not
1731 // cause any slowdowns.
1732 const uint64_t NumItersForStoreLoadThroughMemory = 8 * TypeByteSize;
1733 // Maximum vector factor.
1734 uint64_t MaxVFWithoutSLForwardIssues = std::min(
1735 VectorizerParams::MaxVectorWidth * TypeByteSize, MinDepDistBytes);
1736
1737 // Compute the smallest VF at which the store and load would be misaligned.
1738 for (uint64_t VF = 2 * TypeByteSize; VF <= MaxVFWithoutSLForwardIssues;
1739 VF *= 2) {
1740 // If the number of vector iteration between the store and the load are
1741 // small we could incur conflicts.
1742 if (Distance % VF && Distance / VF < NumItersForStoreLoadThroughMemory) {
1743 MaxVFWithoutSLForwardIssues = (VF >> 1);
1744 break;
1745 }
1746 }
1747
1748 if (MaxVFWithoutSLForwardIssues < 2 * TypeByteSize) {
1749 LLVM_DEBUG(
1750 dbgs() << "LAA: Distance " << Distance
1751 << " that could cause a store-load forwarding conflict\n");
1752 return true;
1753 }
1754
1755 if (MaxVFWithoutSLForwardIssues < MinDepDistBytes &&
1756 MaxVFWithoutSLForwardIssues !=
1757 VectorizerParams::MaxVectorWidth * TypeByteSize)
1758 MinDepDistBytes = MaxVFWithoutSLForwardIssues;
1759 return false;
1760}
1761
1762void MemoryDepChecker::mergeInStatus(VectorizationSafetyStatus S) {
1763 if (Status < S)
1764 Status = S;
1765}
1766
1767/// Given a dependence-distance \p Dist between two
1768/// memory accesses, that have the same stride whose absolute value is given
1769/// in \p Stride, and that have the same type size \p TypeByteSize,
1770/// in a loop whose takenCount is \p BackedgeTakenCount, check if it is
1771/// possible to prove statically that the dependence distance is larger
1772/// than the range that the accesses will travel through the execution of
1773/// the loop. If so, return true; false otherwise. This is useful for
1774/// example in loops such as the following (PR31098):
1775/// for (i = 0; i < D; ++i) {
1776/// = out[i];
1777/// out[i+D] =
1778/// }
1780 const SCEV &BackedgeTakenCount,
1781 const SCEV &Dist, uint64_t Stride,
1782 uint64_t TypeByteSize) {
1783
1784 // If we can prove that
1785 // (**) |Dist| > BackedgeTakenCount * Step
1786 // where Step is the absolute stride of the memory accesses in bytes,
1787 // then there is no dependence.
1788 //
1789 // Rationale:
1790 // We basically want to check if the absolute distance (|Dist/Step|)
1791 // is >= the loop iteration count (or > BackedgeTakenCount).
1792 // This is equivalent to the Strong SIV Test (Practical Dependence Testing,
1793 // Section 4.2.1); Note, that for vectorization it is sufficient to prove
1794 // that the dependence distance is >= VF; This is checked elsewhere.
1795 // But in some cases we can prune dependence distances early, and
1796 // even before selecting the VF, and without a runtime test, by comparing
1797 // the distance against the loop iteration count. Since the vectorized code
1798 // will be executed only if LoopCount >= VF, proving distance >= LoopCount
1799 // also guarantees that distance >= VF.
1800 //
1801 const uint64_t ByteStride = Stride * TypeByteSize;
1802 const SCEV *Step = SE.getConstant(BackedgeTakenCount.getType(), ByteStride);
1803 const SCEV *Product = SE.getMulExpr(&BackedgeTakenCount, Step);
1804
1805 const SCEV *CastedDist = &Dist;
1806 const SCEV *CastedProduct = Product;
1807 uint64_t DistTypeSizeBits = DL.getTypeSizeInBits(Dist.getType());
1808 uint64_t ProductTypeSizeBits = DL.getTypeSizeInBits(Product->getType());
1809
1810 // The dependence distance can be positive/negative, so we sign extend Dist;
1811 // The multiplication of the absolute stride in bytes and the
1812 // backedgeTakenCount is non-negative, so we zero extend Product.
1813 if (DistTypeSizeBits > ProductTypeSizeBits)
1814 CastedProduct = SE.getZeroExtendExpr(Product, Dist.getType());
1815 else
1816 CastedDist = SE.getNoopOrSignExtend(&Dist, Product->getType());
1817
1818 // Is Dist - (BackedgeTakenCount * Step) > 0 ?
1819 // (If so, then we have proven (**) because |Dist| >= Dist)
1820 const SCEV *Minus = SE.getMinusSCEV(CastedDist, CastedProduct);
1821 if (SE.isKnownPositive(Minus))
1822 return true;
1823
1824 // Second try: Is -Dist - (BackedgeTakenCount * Step) > 0 ?
1825 // (If so, then we have proven (**) because |Dist| >= -1*Dist)
1826 const SCEV *NegDist = SE.getNegativeSCEV(CastedDist);
1827 Minus = SE.getMinusSCEV(NegDist, CastedProduct);
1828 if (SE.isKnownPositive(Minus))
1829 return true;
1830
1831 return false;
1832}
1833
1834/// Check the dependence for two accesses with the same stride \p Stride.
1835/// \p Distance is the positive distance and \p TypeByteSize is type size in
1836/// bytes.
1837///
1838/// \returns true if they are independent.
1840 uint64_t TypeByteSize) {
1841 assert(Stride > 1 && "The stride must be greater than 1");
1842 assert(TypeByteSize > 0 && "The type size in byte must be non-zero");
1843 assert(Distance > 0 && "The distance must be non-zero");
1844
1845 // Skip if the distance is not multiple of type byte size.
1846 if (Distance % TypeByteSize)
1847 return false;
1848
1849 uint64_t ScaledDist = Distance / TypeByteSize;
1850
1851 // No dependence if the scaled distance is not multiple of the stride.
1852 // E.g.
1853 // for (i = 0; i < 1024 ; i += 4)
1854 // A[i+2] = A[i] + 1;
1855 //
1856 // Two accesses in memory (scaled distance is 2, stride is 4):
1857 // | A[0] | | | | A[4] | | | |
1858 // | | | A[2] | | | | A[6] | |
1859 //
1860 // E.g.
1861 // for (i = 0; i < 1024 ; i += 3)
1862 // A[i+4] = A[i] + 1;
1863 //
1864 // Two accesses in memory (scaled distance is 4, stride is 3):
1865 // | A[0] | | | A[3] | | | A[6] | | |
1866 // | | | | | A[4] | | | A[7] | |
1867 return ScaledDist % Stride;
1868}
1869
1870/// Returns true if any of the underlying objects has a loop varying address,
1871/// i.e. may change in \p L.
1872static bool
1874 ScalarEvolution &SE, const Loop *L) {
1875 return any_of(UnderlyingObjects, [&SE, L](const Value *UO) {
1876 return !SE.isLoopInvariant(SE.getSCEV(const_cast<Value *>(UO)), L);
1877 });
1878}
1879
1880// Get the dependence distance, stride, type size in whether i is a write for
1881// the dependence between A and B. Returns a DepType, if we can prove there's
1882// no dependence or the analysis fails. Outlined to lambda to limit he scope
1883// of various temporary variables, like A/BPtr, StrideA/BPtr and others.
1884// Returns either the dependence result, if it could already be determined, or a
1885// tuple with (Distance, Stride, TypeSize, AIsWrite, BIsWrite).
1886static std::variant<MemoryDepChecker::Dependence::DepType,
1887 std::tuple<const SCEV *, uint64_t, uint64_t, bool, bool>>
1891 const DenseMap<Value *, const SCEV *> &Strides,
1892 const DenseMap<Value *, SmallVector<const Value *, 16>> &UnderlyingObjects,
1893 PredicatedScalarEvolution &PSE, const Loop *InnermostLoop) {
1894 auto &DL = InnermostLoop->getHeader()->getModule()->getDataLayout();
1895 auto &SE = *PSE.getSE();
1896 auto [APtr, AIsWrite] = A;
1897 auto [BPtr, BIsWrite] = B;
1898
1899 // Two reads are independent.
1900 if (!AIsWrite && !BIsWrite)
1902
1903 Type *ATy = getLoadStoreType(AInst);
1904 Type *BTy = getLoadStoreType(BInst);
1905
1906 // We cannot check pointers in different address spaces.
1907 if (APtr->getType()->getPointerAddressSpace() !=
1908 BPtr->getType()->getPointerAddressSpace())
1910
1911 int64_t StrideAPtr =
1912 getPtrStride(PSE, ATy, APtr, InnermostLoop, Strides, true).value_or(0);
1913 int64_t StrideBPtr =
1914 getPtrStride(PSE, BTy, BPtr, InnermostLoop, Strides, true).value_or(0);
1915
1916 const SCEV *Src = PSE.getSCEV(APtr);
1917 const SCEV *Sink = PSE.getSCEV(BPtr);
1918
1919 // If the induction step is negative we have to invert source and sink of
1920 // the dependence.
1921 if (StrideAPtr < 0) {
1922 std::swap(APtr, BPtr);
1923 std::swap(ATy, BTy);
1924 std::swap(Src, Sink);
1925 std::swap(AIsWrite, BIsWrite);
1926 std::swap(AInst, BInst);
1927 std::swap(StrideAPtr, StrideBPtr);
1928 }
1929
1930 const SCEV *Dist = SE.getMinusSCEV(Sink, Src);
1931
1932 LLVM_DEBUG(dbgs() << "LAA: Src Scev: " << *Src << "Sink Scev: " << *Sink
1933 << "(Induction step: " << StrideAPtr << ")\n");
1934 LLVM_DEBUG(dbgs() << "LAA: Distance for " << *AInst << " to " << *BInst
1935 << ": " << *Dist << "\n");
1936
1937 // Needs accesses where the addresses of the accessed underlying objects do
1938 // not change within the loop.
1939 if (isLoopVariantIndirectAddress(UnderlyingObjects.find(APtr)->second, SE,
1940 InnermostLoop) ||
1941 isLoopVariantIndirectAddress(UnderlyingObjects.find(BPtr)->second, SE,
1942 InnermostLoop))
1944
1945 // Need accesses with constant stride. We don't want to vectorize
1946 // "A[B[i]] += ..." and similar code or pointer arithmetic that could wrap
1947 // in the address space.
1948 if (!StrideAPtr || !StrideBPtr || StrideAPtr != StrideBPtr) {
1949 LLVM_DEBUG(dbgs() << "Pointer access with non-constant stride\n");
1951 }
1952
1953 uint64_t TypeByteSize = DL.getTypeAllocSize(ATy);
1954 bool HasSameSize =
1955 DL.getTypeStoreSizeInBits(ATy) == DL.getTypeStoreSizeInBits(BTy);
1956 if (!HasSameSize)
1957 TypeByteSize = 0;
1958 uint64_t Stride = std::abs(StrideAPtr);
1959 return std::make_tuple(Dist, Stride, TypeByteSize, AIsWrite, BIsWrite);
1960}
1961
1962MemoryDepChecker::Dependence::DepType MemoryDepChecker::isDependent(
1963 const MemAccessInfo &A, unsigned AIdx, const MemAccessInfo &B,
1964 unsigned BIdx, const DenseMap<Value *, const SCEV *> &Strides,
1966 &UnderlyingObjects) {
1967 assert(AIdx < BIdx && "Must pass arguments in program order");
1968
1969 // Get the dependence distance, stride, type size and what access writes for
1970 // the dependence between A and B.
1972 A, InstMap[AIdx], B, InstMap[BIdx], Strides, UnderlyingObjects, PSE,
1973 InnermostLoop);
1974 if (std::holds_alternative<Dependence::DepType>(Res))
1975 return std::get<Dependence::DepType>(Res);
1976
1977 const auto &[Dist, Stride, TypeByteSize, AIsWrite, BIsWrite] =
1978 std::get<std::tuple<const SCEV *, uint64_t, uint64_t, bool, bool>>(Res);
1979 bool HasSameSize = TypeByteSize > 0;
1980
1981 ScalarEvolution &SE = *PSE.getSE();
1982 auto &DL = InnermostLoop->getHeader()->getModule()->getDataLayout();
1983 if (!isa<SCEVCouldNotCompute>(Dist) && HasSameSize &&
1985 Stride, TypeByteSize))
1986 return Dependence::NoDep;
1987
1988 const SCEVConstant *C = dyn_cast<SCEVConstant>(Dist);
1989 if (!C) {
1990 LLVM_DEBUG(dbgs() << "LAA: Dependence because of non-constant distance\n");
1991 FoundNonConstantDistanceDependence = true;
1992 return Dependence::Unknown;
1993 }
1994
1995 const APInt &Val = C->getAPInt();
1996 int64_t Distance = Val.getSExtValue();
1997
1998 // Attempt to prove strided accesses independent.
1999 if (std::abs(Distance) > 0 && Stride > 1 && HasSameSize &&
2000 areStridedAccessesIndependent(std::abs(Distance), Stride, TypeByteSize)) {
2001 LLVM_DEBUG(dbgs() << "LAA: Strided accesses are independent\n");
2002 return Dependence::NoDep;
2003 }
2004
2005 // Negative distances are not plausible dependencies.
2006 if (Val.isNegative()) {
2007 bool IsTrueDataDependence = (AIsWrite && !BIsWrite);
2008 // There is no need to update MaxSafeVectorWidthInBits after call to
2009 // couldPreventStoreLoadForward, even if it changed MinDepDistBytes,
2010 // since a forward dependency will allow vectorization using any width.
2011 if (IsTrueDataDependence && EnableForwardingConflictDetection &&
2012 (!HasSameSize || couldPreventStoreLoadForward(Val.abs().getZExtValue(),
2013 TypeByteSize))) {
2014 LLVM_DEBUG(dbgs() << "LAA: Forward but may prevent st->ld forwarding\n");
2016 }
2017
2018 LLVM_DEBUG(dbgs() << "LAA: Dependence is negative\n");
2019 return Dependence::Forward;
2020 }
2021
2022 // Write to the same location with the same size.
2023 if (Val == 0) {
2024 if (HasSameSize)
2025 return Dependence::Forward;
2026 LLVM_DEBUG(
2027 dbgs() << "LAA: Zero dependence difference but different type sizes\n");
2028 return Dependence::Unknown;
2029 }
2030
2031 assert(Val.isStrictlyPositive() && "Expect a positive value");
2032
2033 if (!HasSameSize) {
2034 LLVM_DEBUG(dbgs() << "LAA: ReadWrite-Write positive dependency with "
2035 "different type sizes\n");
2036 return Dependence::Unknown;
2037 }
2038
2039 // Bail out early if passed-in parameters make vectorization not feasible.
2040 unsigned ForcedFactor = (VectorizerParams::VectorizationFactor ?
2042 unsigned ForcedUnroll = (VectorizerParams::VectorizationInterleave ?
2044 // The minimum number of iterations for a vectorized/unrolled version.
2045 unsigned MinNumIter = std::max(ForcedFactor * ForcedUnroll, 2U);
2046
2047 // It's not vectorizable if the distance is smaller than the minimum distance
2048 // needed for a vectroized/unrolled version. Vectorizing one iteration in
2049 // front needs TypeByteSize * Stride. Vectorizing the last iteration needs
2050 // TypeByteSize (No need to plus the last gap distance).
2051 //
2052 // E.g. Assume one char is 1 byte in memory and one int is 4 bytes.
2053 // foo(int *A) {
2054 // int *B = (int *)((char *)A + 14);
2055 // for (i = 0 ; i < 1024 ; i += 2)
2056 // B[i] = A[i] + 1;
2057 // }
2058 //
2059 // Two accesses in memory (stride is 2):
2060 // | A[0] | | A[2] | | A[4] | | A[6] | |
2061 // | B[0] | | B[2] | | B[4] |
2062 //
2063 // Distance needs for vectorizing iterations except the last iteration:
2064 // 4 * 2 * (MinNumIter - 1). Distance needs for the last iteration: 4.
2065 // So the minimum distance needed is: 4 * 2 * (MinNumIter - 1) + 4.
2066 //
2067 // If MinNumIter is 2, it is vectorizable as the minimum distance needed is
2068 // 12, which is less than distance.
2069 //
2070 // If MinNumIter is 4 (Say if a user forces the vectorization factor to be 4),
2071 // the minimum distance needed is 28, which is greater than distance. It is
2072 // not safe to do vectorization.
2073 uint64_t MinDistanceNeeded =
2074 TypeByteSize * Stride * (MinNumIter - 1) + TypeByteSize;
2075 if (MinDistanceNeeded > static_cast<uint64_t>(Distance)) {
2076 LLVM_DEBUG(dbgs() << "LAA: Failure because of positive distance "
2077 << Distance << '\n');
2078 return Dependence::Backward;
2079 }
2080
2081 // Unsafe if the minimum distance needed is greater than smallest dependence
2082 // distance distance.
2083 if (MinDistanceNeeded > MinDepDistBytes) {
2084 LLVM_DEBUG(dbgs() << "LAA: Failure because it needs at least "
2085 << MinDistanceNeeded << " size in bytes\n");
2086 return Dependence::Backward;
2087 }
2088
2089 // Positive distance bigger than max vectorization factor.
2090 // FIXME: Should use max factor instead of max distance in bytes, which could
2091 // not handle different types.
2092 // E.g. Assume one char is 1 byte in memory and one int is 4 bytes.
2093 // void foo (int *A, char *B) {
2094 // for (unsigned i = 0; i < 1024; i++) {
2095 // A[i+2] = A[i] + 1;
2096 // B[i+2] = B[i] + 1;
2097 // }
2098 // }
2099 //
2100 // This case is currently unsafe according to the max safe distance. If we
2101 // analyze the two accesses on array B, the max safe dependence distance
2102 // is 2. Then we analyze the accesses on array A, the minimum distance needed
2103 // is 8, which is less than 2 and forbidden vectorization, But actually
2104 // both A and B could be vectorized by 2 iterations.
2105 MinDepDistBytes =
2106 std::min(static_cast<uint64_t>(Distance), MinDepDistBytes);
2107
2108 bool IsTrueDataDependence = (!AIsWrite && BIsWrite);
2109 uint64_t MinDepDistBytesOld = MinDepDistBytes;
2110 if (IsTrueDataDependence && EnableForwardingConflictDetection &&
2111 couldPreventStoreLoadForward(Distance, TypeByteSize)) {
2112 // Sanity check that we didn't update MinDepDistBytes when calling
2113 // couldPreventStoreLoadForward
2114 assert(MinDepDistBytes == MinDepDistBytesOld &&
2115 "An update to MinDepDistBytes requires an update to "
2116 "MaxSafeVectorWidthInBits");
2117 (void)MinDepDistBytesOld;
2119 }
2120
2121 // An update to MinDepDistBytes requires an update to MaxSafeVectorWidthInBits
2122 // since there is a backwards dependency.
2123 uint64_t MaxVF = MinDepDistBytes / (TypeByteSize * Stride);
2124 LLVM_DEBUG(dbgs() << "LAA: Positive distance " << Val.getSExtValue()
2125 << " with max VF = " << MaxVF << '\n');
2126 uint64_t MaxVFInBits = MaxVF * TypeByteSize * 8;
2127 MaxSafeVectorWidthInBits = std::min(MaxSafeVectorWidthInBits, MaxVFInBits);
2129}
2130
2132 DepCandidates &AccessSets, MemAccessInfoList &CheckDeps,
2133 const DenseMap<Value *, const SCEV *> &Strides,
2135 &UnderlyingObjects) {
2136
2137 MinDepDistBytes = -1;
2139 for (MemAccessInfo CurAccess : CheckDeps) {
2140 if (Visited.count(CurAccess))
2141 continue;
2142
2143 // Get the relevant memory access set.
2145 AccessSets.findValue(AccessSets.getLeaderValue(CurAccess));
2146
2147 // Check accesses within this set.
2149 AccessSets.member_begin(I);
2151 AccessSets.member_end();
2152
2153 // Check every access pair.
2154 while (AI != AE) {
2155 Visited.insert(*AI);
2156 bool AIIsWrite = AI->getInt();
2157 // Check loads only against next equivalent class, but stores also against
2158 // other stores in the same equivalence class - to the same address.
2160 (AIIsWrite ? AI : std::next(AI));
2161 while (OI != AE) {
2162 // Check every accessing instruction pair in program order.
2163 for (std::vector<unsigned>::iterator I1 = Accesses[*AI].begin(),
2164 I1E = Accesses[*AI].end(); I1 != I1E; ++I1)
2165 // Scan all accesses of another equivalence class, but only the next
2166 // accesses of the same equivalent class.
2167 for (std::vector<unsigned>::iterator
2168 I2 = (OI == AI ? std::next(I1) : Accesses[*OI].begin()),
2169 I2E = (OI == AI ? I1E : Accesses[*OI].end());
2170 I2 != I2E; ++I2) {
2171 auto A = std::make_pair(&*AI, *I1);
2172 auto B = std::make_pair(&*OI, *I2);
2173
2174 assert(*I1 != *I2);
2175 if (*I1 > *I2)
2176 std::swap(A, B);
2177
2179 isDependent(*A.first, A.second, *B.first, B.second, Strides,
2180 UnderlyingObjects);
2182
2183 // Gather dependences unless we accumulated MaxDependences
2184 // dependences. In that case return as soon as we find the first
2185 // unsafe dependence. This puts a limit on this quadratic
2186 // algorithm.
2187 if (RecordDependences) {
2188 if (Type != Dependence::NoDep)
2189 Dependences.push_back(Dependence(A.second, B.second, Type));
2190
2191 if (Dependences.size() >= MaxDependences) {
2192 RecordDependences = false;
2193 Dependences.clear();
2195 << "Too many dependences, stopped recording\n");
2196 }
2197 }
2198 if (!RecordDependences && !isSafeForVectorization())
2199 return false;
2200 }
2201 ++OI;
2202 }
2203 AI++;
2204 }
2205 }
2206
2207 LLVM_DEBUG(dbgs() << "Total Dependences: " << Dependences.size() << "\n");
2208 return isSafeForVectorization();
2209}
2210
2213 MemAccessInfo Access(Ptr, isWrite);
2214 auto &IndexVector = Accesses.find(Access)->second;
2215
2217 transform(IndexVector,
2218 std::back_inserter(Insts),
2219 [&](unsigned Idx) { return this->InstMap[Idx]; });
2220 return Insts;
2221}
2222
2224 "NoDep",
2225 "Unknown",
2226 "IndidrectUnsafe",
2227 "Forward",
2228 "ForwardButPreventsForwarding",
2229 "Backward",
2230 "BackwardVectorizable",
2231 "BackwardVectorizableButPreventsForwarding"};
2232
2234 raw_ostream &OS, unsigned Depth,
2235 const SmallVectorImpl<Instruction *> &Instrs) const {
2236 OS.indent(Depth) << DepName[Type] << ":\n";
2237 OS.indent(Depth + 2) << *Instrs[Source] << " -> \n";
2238 OS.indent(Depth + 2) << *Instrs[Destination] << "\n";
2239}
2240
2241bool LoopAccessInfo::canAnalyzeLoop() {
2242 // We need to have a loop header.
2243 LLVM_DEBUG(dbgs() << "LAA: Found a loop in "
2244 << TheLoop->getHeader()->getParent()->getName() << ": "
2245 << TheLoop->getHeader()->getName() << '\n');
2246
2247 // We can only analyze innermost loops.
2248 if (!TheLoop->isInnermost()) {
2249 LLVM_DEBUG(dbgs() << "LAA: loop is not the innermost loop\n");
2250 recordAnalysis("NotInnerMostLoop") << "loop is not the innermost loop";
2251 return false;
2252 }
2253
2254 // We must have a single backedge.
2255 if (TheLoop->getNumBackEdges() != 1) {
2256 LLVM_DEBUG(
2257 dbgs() << "LAA: loop control flow is not understood by analyzer\n");
2258 recordAnalysis("CFGNotUnderstood")
2259 << "loop control flow is not understood by analyzer";
2260 return false;
2261 }
2262
2263 // ScalarEvolution needs to be able to find the exit count.
2264 const SCEV *ExitCount = PSE->getBackedgeTakenCount();
2265 if (isa<SCEVCouldNotCompute>(ExitCount)) {
2266 recordAnalysis("CantComputeNumberOfIterations")
2267 << "could not determine number of loop iterations";
2268 LLVM_DEBUG(dbgs() << "LAA: SCEV could not compute the loop exit count.\n");
2269 return false;
2270 }
2271
2272 return true;
2273}
2274
2275void LoopAccessInfo::analyzeLoop(AAResults *AA, LoopInfo *LI,
2276 const TargetLibraryInfo *TLI,
2277 DominatorTree *DT) {
2278 // Holds the Load and Store instructions.
2281
2282 // Holds all the different accesses in the loop.
2283 unsigned NumReads = 0;
2284 unsigned NumReadWrites = 0;
2285
2286 bool HasComplexMemInst = false;
2287
2288 // A runtime check is only legal to insert if there are no convergent calls.
2289 HasConvergentOp = false;
2290
2291 PtrRtChecking->Pointers.clear();
2292 PtrRtChecking->Need = false;
2293
2294 const bool IsAnnotatedParallel = TheLoop->isAnnotatedParallel();
2295
2296 const bool EnableMemAccessVersioningOfLoop =
2298 !TheLoop->getHeader()->getParent()->hasOptSize();
2299
2300 // Traverse blocks in fixed RPOT order, regardless of their storage in the
2301 // loop info, as it may be arbitrary.
2302 LoopBlocksRPO RPOT(TheLoop);
2303 RPOT.perform(LI);
2304 for (BasicBlock *BB : RPOT) {
2305 // Scan the BB and collect legal loads and stores. Also detect any
2306 // convergent instructions.
2307 for (Instruction &I : *BB) {
2308 if (auto *Call = dyn_cast<CallBase>(&I)) {
2309 if (Call->isConvergent())
2310 HasConvergentOp = true;
2311 }
2312
2313 // With both a non-vectorizable memory instruction and a convergent
2314 // operation, found in this loop, no reason to continue the search.
2315 if (HasComplexMemInst && HasConvergentOp) {
2316 CanVecMem = false;
2317 return;
2318 }
2319
2320 // Avoid hitting recordAnalysis multiple times.
2321 if (HasComplexMemInst)
2322 continue;
2323
2324 // Many math library functions read the rounding mode. We will only
2325 // vectorize a loop if it contains known function calls that don't set
2326 // the flag. Therefore, it is safe to ignore this read from memory.
2327 auto *Call = dyn_cast<CallInst>(&I);
2328 if (Call && getVectorIntrinsicIDForCall(Call, TLI))
2329 continue;
2330
2331 // If this is a load, save it. If this instruction can read from memory
2332 // but is not a load, then we quit. Notice that we don't handle function
2333 // calls that read or write.
2334 if (I.mayReadFromMemory()) {
2335 // If the function has an explicit vectorized counterpart, we can safely
2336 // assume that it can be vectorized.
2337 if (Call && !Call->isNoBuiltin() && Call->getCalledFunction() &&
2338 !VFDatabase::getMappings(*Call).empty())
2339 continue;
2340
2341 auto *Ld = dyn_cast<LoadInst>(&I);
2342 if (!Ld) {
2343 recordAnalysis("CantVectorizeInstruction", Ld)
2344 << "instruction cannot be vectorized";
2345 HasComplexMemInst = true;
2346 continue;
2347 }
2348 if (!Ld->isSimple() && !IsAnnotatedParallel) {
2349 recordAnalysis("NonSimpleLoad", Ld)
2350 << "read with atomic ordering or volatile read";
2351 LLVM_DEBUG(dbgs() << "LAA: Found a non-simple load.\n");
2352 HasComplexMemInst = true;
2353 continue;
2354 }
2355 NumLoads++;
2356 Loads.push_back(Ld);
2357 DepChecker->addAccess(Ld);
2358 if (EnableMemAccessVersioningOfLoop)
2359 collectStridedAccess(Ld);
2360 continue;
2361 }
2362
2363 // Save 'store' instructions. Abort if other instructions write to memory.
2364 if (I.mayWriteToMemory()) {
2365 auto *St = dyn_cast<StoreInst>(&I);
2366 if (!St) {
2367 recordAnalysis("CantVectorizeInstruction", St)
2368 << "instruction cannot be vectorized";
2369 HasComplexMemInst = true;
2370 continue;
2371 }
2372 if (!St->isSimple() && !IsAnnotatedParallel) {
2373 recordAnalysis("NonSimpleStore", St)
2374 << "write with atomic ordering or volatile write";
2375 LLVM_DEBUG(dbgs() << "LAA: Found a non-simple store.\n");
2376 HasComplexMemInst = true;
2377 continue;
2378 }
2379 NumStores++;
2380 Stores.push_back(St);
2381 DepChecker->addAccess(St);
2382 if (EnableMemAccessVersioningOfLoop)
2383 collectStridedAccess(St);
2384 }
2385 } // Next instr.
2386 } // Next block.
2387
2388 if (HasComplexMemInst) {
2389 CanVecMem = false;
2390 return;
2391 }
2392
2393 // Now we have two lists that hold the loads and the stores.
2394 // Next, we find the pointers that they use.
2395
2396 // Check if we see any stores. If there are no stores, then we don't
2397 // care if the pointers are *restrict*.
2398 if (!Stores.size()) {
2399 LLVM_DEBUG(dbgs() << "LAA: Found a read-only loop!\n");
2400 CanVecMem = true;
2401 return;
2402 }
2403
2404 MemoryDepChecker::DepCandidates DependentAccesses;
2405 AccessAnalysis Accesses(TheLoop, AA, LI, DependentAccesses, *PSE);
2406
2407 // Holds the analyzed pointers. We don't want to call getUnderlyingObjects
2408 // multiple times on the same object. If the ptr is accessed twice, once
2409 // for read and once for write, it will only appear once (on the write
2410 // list). This is okay, since we are going to check for conflicts between
2411 // writes and between reads and writes, but not between reads and reads.
2413
2414 // Record uniform store addresses to identify if we have multiple stores
2415 // to the same address.
2416 SmallPtrSet<Value *, 16> UniformStores;
2417
2418 for (StoreInst *ST : Stores) {
2419 Value *Ptr = ST->getPointerOperand();
2420
2421 if (isInvariant(Ptr)) {
2422 // Record store instructions to loop invariant addresses
2423 StoresToInvariantAddresses.push_back(ST);
2424 HasDependenceInvolvingLoopInvariantAddress |=
2425 !UniformStores.insert(Ptr).second;
2426 }
2427
2428 // If we did *not* see this pointer before, insert it to the read-write
2429 // list. At this phase it is only a 'write' list.
2430 Type *AccessTy = getLoadStoreType(ST);
2431 if (Seen.insert({Ptr, AccessTy}).second) {
2432 ++NumReadWrites;
2433
2435 // The TBAA metadata could have a control dependency on the predication
2436 // condition, so we cannot rely on it when determining whether or not we
2437 // need runtime pointer checks.
2438 if (blockNeedsPredication(ST->getParent(), TheLoop, DT))
2439 Loc.AATags.TBAA = nullptr;
2440
2441 visitPointers(const_cast<Value *>(Loc.Ptr), *TheLoop,
2442 [&Accesses, AccessTy, Loc](Value *Ptr) {
2443 MemoryLocation NewLoc = Loc.getWithNewPtr(Ptr);
2444 Accesses.addStore(NewLoc, AccessTy);
2445 });
2446 }
2447 }
2448
2449 if (IsAnnotatedParallel) {
2450 LLVM_DEBUG(
2451 dbgs() << "LAA: A loop annotated parallel, ignore memory dependency "
2452 << "checks.\n");
2453 CanVecMem = true;
2454 return;
2455 }
2456
2457 for (LoadInst *LD : Loads) {
2458 Value *Ptr = LD->getPointerOperand();
2459 // If we did *not* see this pointer before, insert it to the
2460 // read list. If we *did* see it before, then it is already in
2461 // the read-write list. This allows us to vectorize expressions
2462 // such as A[i] += x; Because the address of A[i] is a read-write
2463 // pointer. This only works if the index of A[i] is consecutive.
2464 // If the address of i is unknown (for example A[B[i]]) then we may
2465 // read a few words, modify, and write a few words, and some of the
2466 // words may be written to the same address.
2467 bool IsReadOnlyPtr = false;
2468 Type *AccessTy = getLoadStoreType(LD);
2469 if (Seen.insert({Ptr, AccessTy}).second ||
2470 !getPtrStride(*PSE, LD->getType(), Ptr, TheLoop, SymbolicStrides).value_or(0)) {
2471 ++NumReads;
2472 IsReadOnlyPtr = true;
2473 }
2474
2475 // See if there is an unsafe dependency between a load to a uniform address and
2476 // store to the same uniform address.
2477 if (UniformStores.count(Ptr)) {
2478 LLVM_DEBUG(dbgs() << "LAA: Found an unsafe dependency between a uniform "
2479 "load and uniform store to the same address!\n");
2480 HasDependenceInvolvingLoopInvariantAddress = true;
2481 }
2482
2484 // The TBAA metadata could have a control dependency on the predication
2485 // condition, so we cannot rely on it when determining whether or not we
2486 // need runtime pointer checks.
2487 if (blockNeedsPredication(LD->getParent(), TheLoop, DT))
2488 Loc.AATags.TBAA = nullptr;
2489
2490 visitPointers(const_cast<Value *>(Loc.Ptr), *TheLoop,
2491 [&Accesses, AccessTy, Loc, IsReadOnlyPtr](Value *Ptr) {
2492 MemoryLocation NewLoc = Loc.getWithNewPtr(Ptr);
2493 Accesses.addLoad(NewLoc, AccessTy, IsReadOnlyPtr);
2494 });
2495 }
2496
2497 // If we write (or read-write) to a single destination and there are no
2498 // other reads in this loop then is it safe to vectorize.
2499 if (NumReadWrites == 1 && NumReads == 0) {
2500 LLVM_DEBUG(dbgs() << "LAA: Found a write-only loop!\n");
2501 CanVecMem = true;
2502 return;
2503 }
2504
2505 // Build dependence sets and check whether we need a runtime pointer bounds
2506 // check.
2507 Accesses.buildDependenceSets();
2508
2509 // Find pointers with computable bounds. We are going to use this information
2510 // to place a runtime bound check.
2511 Value *UncomputablePtr = nullptr;
2512 bool CanDoRTIfNeeded =
2513 Accesses.canCheckPtrAtRT(*PtrRtChecking, PSE->getSE(), TheLoop,
2514 SymbolicStrides, UncomputablePtr, false);
2515 if (!CanDoRTIfNeeded) {
2516 auto *I = dyn_cast_or_null<Instruction>(UncomputablePtr);
2517 recordAnalysis("CantIdentifyArrayBounds", I)
2518 << "cannot identify array bounds";
2519 LLVM_DEBUG(dbgs() << "LAA: We can't vectorize because we can't find "
2520 << "the array bounds.\n");
2521 CanVecMem = false;
2522 return;
2523 }
2524
2525 LLVM_DEBUG(
2526 dbgs() << "LAA: May be able to perform a memory runtime check if needed.\n");
2527
2528 CanVecMem = true;
2529 if (Accesses.isDependencyCheckNeeded()) {
2530 LLVM_DEBUG(dbgs() << "LAA: Checking memory dependencies\n");
2531 CanVecMem = DepChecker->areDepsSafe(
2532 DependentAccesses, Accesses.getDependenciesToCheck(), SymbolicStrides,
2533 Accesses.getUnderlyingObjects());
2534
2535 if (!CanVecMem && DepChecker->shouldRetryWithRuntimeCheck()) {
2536 LLVM_DEBUG(dbgs() << "LAA: Retrying with memory checks\n");
2537
2538 // Clear the dependency checks. We assume they are not needed.
2539 Accesses.resetDepChecks(*DepChecker);
2540
2541 PtrRtChecking->reset();
2542 PtrRtChecking->Need = true;
2543
2544 auto *SE = PSE->getSE();
2545 UncomputablePtr = nullptr;
2546 CanDoRTIfNeeded = Accesses.canCheckPtrAtRT(
2547 *PtrRtChecking, SE, TheLoop, SymbolicStrides, UncomputablePtr, true);
2548
2549 // Check that we found the bounds for the pointer.
2550 if (!CanDoRTIfNeeded) {
2551 auto *I = dyn_cast_or_null<Instruction>(UncomputablePtr);
2552 recordAnalysis("CantCheckMemDepsAtRunTime", I)
2553 << "cannot check memory dependencies at runtime";
2554 LLVM_DEBUG(dbgs() << "LAA: Can't vectorize with memory checks\n");
2555 CanVecMem = false;
2556 return;
2557 }
2558
2559 CanVecMem = true;
2560 }
2561 }
2562
2563 if (HasConvergentOp) {
2564 recordAnalysis("CantInsertRuntimeCheckWithConvergent")
2565 << "cannot add control dependency to convergent operation";
2566 LLVM_DEBUG(dbgs() << "LAA: We can't vectorize because a runtime check "
2567 "would be needed with a convergent operation\n");
2568 CanVecMem = false;
2569 return;
2570 }
2571
2572 if (CanVecMem)
2573 LLVM_DEBUG(
2574 dbgs() << "LAA: No unsafe dependent memory operations in loop. We"
2575 << (PtrRtChecking->Need ? "" : " don't")
2576 << " need runtime memory checks.\n");
2577 else
2578 emitUnsafeDependenceRemark();
2579}
2580
2581void LoopAccessInfo::emitUnsafeDependenceRemark() {
2582 auto Deps = getDepChecker().getDependences();
2583 if (!Deps)
2584 return;
2585 auto Found = llvm::find_if(*Deps, [](const MemoryDepChecker::Dependence &D) {
2588 });
2589 if (Found == Deps->end())
2590 return;
2591 MemoryDepChecker::Dependence Dep = *Found;
2592
2593 LLVM_DEBUG(dbgs() << "LAA: unsafe dependent memory operations in loop\n");
2594
2595 // Emit remark for first unsafe dependence
2596 bool HasForcedDistribution = false;
2597 std::optional<const MDOperand *> Value =
2598 findStringMetadataForLoop(TheLoop, "llvm.loop.distribute.enable");
2599 if (Value) {
2600 const MDOperand *Op = *Value;
2601 assert(Op && mdconst::hasa<ConstantInt>(*Op) && "invalid metadata");
2602 HasForcedDistribution = mdconst::extract<ConstantInt>(*Op)->getZExtValue();
2603 }
2604
2605 const std::string Info =
2606 HasForcedDistribution
2607 ? "unsafe dependent memory operations in loop."
2608 : "unsafe dependent memory operations in loop. Use "
2609 "#pragma clang loop distribute(enable) to allow loop distribution "
2610 "to attempt to isolate the offending operations into a separate "
2611 "loop";
2613 recordAnalysis("UnsafeDep", Dep.getDestination(*this)) << Info;
2614
2615 switch (Dep.Type) {
2619 llvm_unreachable("Unexpected dependence");
2621 R << "\nBackward loop carried data dependence.";
2622 break;
2624 R << "\nForward loop carried data dependence that prevents "
2625 "store-to-load forwarding.";
2626 break;
2628 R << "\nBackward loop carried data dependence that prevents "
2629 "store-to-load forwarding.";
2630 break;
2632 R << "\nUnsafe indirect dependence.";
2633 break;
2635 R << "\nUnknown data dependence.";
2636 break;
2637 }
2638
2639 if (Instruction *I = Dep.getSource(*this)) {
2640 DebugLoc SourceLoc = I->getDebugLoc();
2641 if (auto *DD = dyn_cast_or_null<Instruction>(getPointerOperand(I)))
2642 SourceLoc = DD->getDebugLoc();
2643 if (SourceLoc)
2644 R << " Memory location is the same as accessed at "
2645 << ore::NV("Location", SourceLoc);
2646 }
2647}
2648
2650 DominatorTree *DT) {
2651 assert(TheLoop->contains(BB) && "Unknown block used");
2652
2653 // Blocks that do not dominate the latch need predication.
2654 BasicBlock* Latch = TheLoop->getLoopLatch();
2655 return !DT->dominates(BB, Latch);
2656}
2657
2658OptimizationRemarkAnalysis &LoopAccessInfo::recordAnalysis(StringRef RemarkName,
2659 Instruction *I) {
2660 assert(!Report && "Multiple reports generated");
2661
2662 Value *CodeRegion = TheLoop->getHeader();
2663 DebugLoc DL = TheLoop->getStartLoc();
2664
2665 if (I) {
2666 CodeRegion = I->getParent();
2667 // If there is no debug location attached to the instruction, revert back to
2668 // using the loop's.
2669 if (I->getDebugLoc())
2670 DL = I->getDebugLoc();
2671 }
2672
2673 Report = std::make_unique<OptimizationRemarkAnalysis>(DEBUG_TYPE, RemarkName, DL,
2674 CodeRegion);
2675 return *Report;
2676}
2677
2679 auto *SE = PSE->getSE();
2680 // TODO: Is this really what we want? Even without FP SCEV, we may want some
2681 // trivially loop-invariant FP values to be considered invariant.
2682 if (!SE->isSCEVable(V->getType()))
2683 return false;
2684 const SCEV *S = SE->getSCEV(V);
2685 return SE->isLoopInvariant(S, TheLoop);
2686}
2687
2688/// Find the operand of the GEP that should be checked for consecutive
2689/// stores. This ignores trailing indices that have no effect on the final
2690/// pointer.
2691static unsigned getGEPInductionOperand(const GetElementPtrInst *Gep) {
2692 const DataLayout &DL = Gep->getModule()->getDataLayout();
2693 unsigned LastOperand = Gep->getNumOperands() - 1;
2694 TypeSize GEPAllocSize = DL.getTypeAllocSize(Gep->getResultElementType());
2695
2696 // Walk backwards and try to peel off zeros.
2697 while (LastOperand > 1 && match(Gep->getOperand(LastOperand), m_Zero())) {
2698 // Find the type we're currently indexing into.
2699 gep_type_iterator GEPTI = gep_type_begin(Gep);
2700 std::advance(GEPTI, LastOperand - 2);
2701
2702 // If it's a type with the same allocation size as the result of the GEP we
2703 // can peel off the zero index.
2704 if (DL.getTypeAllocSize(GEPTI.getIndexedType()) != GEPAllocSize)
2705 break;
2706 --LastOperand;
2707 }
2708
2709 return LastOperand;
2710}
2711
2712/// If the argument is a GEP, then returns the operand identified by
2713/// getGEPInductionOperand. However, if there is some other non-loop-invariant
2714/// operand, it returns that instead.
2716 GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr);
2717 if (!GEP)
2718 return Ptr;
2719
2720 unsigned InductionOperand = getGEPInductionOperand(GEP);
2721
2722 // Check that all of the gep indices are uniform except for our induction
2723 // operand.
2724 for (unsigned i = 0, e = GEP->getNumOperands(); i != e; ++i)
2725 if (i != InductionOperand &&
2726 !SE->isLoopInvariant(SE->getSCEV(GEP->getOperand(i)), Lp))
2727 return Ptr;
2728 return GEP->getOperand(InductionOperand);
2729}
2730
2731/// If a value has only one user that is a CastInst, return it.
2733 Value *UniqueCast = nullptr;
2734 for (User *U : Ptr->users()) {
2735 CastInst *CI = dyn_cast<CastInst>(U);
2736 if (CI && CI->getType() == Ty) {
2737 if (!UniqueCast)
2738 UniqueCast = CI;
2739 else
2740 return nullptr;
2741 }
2742 }
2743 return UniqueCast;
2744}
2745
2746/// Get the stride of a pointer access in a loop. Looks for symbolic
2747/// strides "a[i*stride]". Returns the symbolic stride, or null otherwise.
2749 auto *PtrTy = dyn_cast<PointerType>(Ptr->getType());
2750 if (!PtrTy || PtrTy->isAggregateType())
2751 return nullptr;
2752
2753 // Try to remove a gep instruction to make the pointer (actually index at this
2754 // point) easier analyzable. If OrigPtr is equal to Ptr we are analyzing the
2755 // pointer, otherwise, we are analyzing the index.
2756 Value *OrigPtr = Ptr;
2757
2758 // The size of the pointer access.
2759 int64_t PtrAccessSize = 1;
2760
2761 Ptr = stripGetElementPtr(Ptr, SE, Lp);
2762 const SCEV *V = SE->getSCEV(Ptr);
2763
2764 if (Ptr != OrigPtr)
2765 // Strip off casts.
2766 while (const SCEVIntegralCastExpr *C = dyn_cast<SCEVIntegralCastExpr>(V))
2767 V = C->getOperand();
2768
2769 const SCEVAddRecExpr *S = dyn_cast<SCEVAddRecExpr>(V);
2770 if (!S)
2771 return nullptr;
2772
2773 // If the pointer is invariant then there is no stride and it makes no
2774 // sense to add it here.
2775 if (Lp != S->getLoop())
2776 return nullptr;
2777
2778 V = S->getStepRecurrence(*SE);
2779 if (!V)
2780 return nullptr;
2781
2782 // Strip off the size of access multiplication if we are still analyzing the
2783 // pointer.
2784 if (OrigPtr == Ptr) {
2785 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(V)) {
2786 if (M->getOperand(0)->getSCEVType() != scConstant)
2787 return nullptr;
2788
2789 const APInt &APStepVal = cast<SCEVConstant>(M->getOperand(0))->getAPInt();
2790
2791 // Huge step value - give up.
2792 if (APStepVal.getBitWidth() > 64)
2793 return nullptr;
2794
2795 int64_t StepVal = APStepVal.getSExtValue();
2796 if (PtrAccessSize != StepVal)
2797 return nullptr;
2798 V = M->getOperand(1);
2799 }
2800 }
2801
2802 // Note that the restriction after this loop invariant check are only
2803 // profitability restrictions.
2804 if (!SE->isLoopInvariant(V, Lp))
2805 return nullptr;
2806
2807 // Look for the loop invariant symbolic value.
2808 const SCEVUnknown *U = dyn_cast<SCEVUnknown>(V);
2809 if (!U) {
2810 const auto *C = dyn_cast<SCEVIntegralCastExpr>(V);
2811 if (!C)
2812 return nullptr;
2813 U = dyn_cast<SCEVUnknown>(C->getOperand());
2814 if (!U)
2815 return nullptr;
2816
2817 // Match legacy behavior - this is not needed for correctness
2818 if (!getUniqueCastUse(U->getValue(), Lp, V->getType()))
2819 return nullptr;
2820 }
2821
2822 return V;
2823}
2824
2825void LoopAccessInfo::collectStridedAccess(Value *MemAccess) {
2826 Value *Ptr = getLoadStorePointerOperand(MemAccess);
2827 if (!Ptr)
2828 return;
2829
2830 // Note: getStrideFromPointer is a *profitability* heuristic. We
2831 // could broaden the scope of values returned here - to anything
2832 // which happens to be loop invariant and contributes to the
2833 // computation of an interesting IV - but we chose not to as we
2834 // don't have a cost model here, and broadening the scope exposes
2835 // far too many unprofitable cases.
2836 const SCEV *StrideExpr = getStrideFromPointer(Ptr, PSE->getSE(), TheLoop);
2837 if (!StrideExpr)
2838 return;
2839
2840 LLVM_DEBUG(dbgs() << "LAA: Found a strided access that is a candidate for "
2841 "versioning:");
2842 LLVM_DEBUG(dbgs() << " Ptr: " << *Ptr << " Stride: " << *StrideExpr << "\n");
2843
2844 if (!SpeculateUnitStride) {
2845 LLVM_DEBUG(dbgs() << " Chose not to due to -laa-speculate-unit-stride\n");
2846 return;
2847 }
2848
2849 // Avoid adding the "Stride == 1" predicate when we know that
2850 // Stride >= Trip-Count. Such a predicate will effectively optimize a single
2851 // or zero iteration loop, as Trip-Count <= Stride == 1.
2852 //
2853 // TODO: We are currently not making a very informed decision on when it is
2854 // beneficial to apply stride versioning. It might make more sense that the
2855 // users of this analysis (such as the vectorizer) will trigger it, based on
2856 // their specific cost considerations; For example, in cases where stride
2857 // versioning does not help resolving memory accesses/dependences, the
2858 // vectorizer should evaluate the cost of the runtime test, and the benefit
2859 // of various possible stride specializations, considering the alternatives
2860 // of using gather/scatters (if available).
2861
2862 const SCEV *BETakenCount = PSE->getBackedgeTakenCount();
2863
2864 // Match the types so we can compare the stride and the BETakenCount.
2865 // The Stride can be positive/negative, so we sign extend Stride;
2866 // The backedgeTakenCount is non-negative, so we zero extend BETakenCount.
2867 const DataLayout &DL = TheLoop->getHeader()->getModule()->getDataLayout();
2868 uint64_t StrideTypeSizeBits = DL.getTypeSizeInBits(StrideExpr->getType());
2869 uint64_t BETypeSizeBits = DL.getTypeSizeInBits(BETakenCount->getType());
2870 const SCEV *CastedStride = StrideExpr;
2871 const SCEV *CastedBECount = BETakenCount;
2872 ScalarEvolution *SE = PSE->getSE();
2873 if (BETypeSizeBits >= StrideTypeSizeBits)
2874 CastedStride = SE->getNoopOrSignExtend(StrideExpr, BETakenCount->getType());
2875 else
2876 CastedBECount = SE->getZeroExtendExpr(BETakenCount, StrideExpr->getType());
2877 const SCEV *StrideMinusBETaken = SE->getMinusSCEV(CastedStride, CastedBECount);
2878 // Since TripCount == BackEdgeTakenCount + 1, checking:
2879 // "Stride >= TripCount" is equivalent to checking:
2880 // Stride - BETakenCount > 0
2881 if (SE->isKnownPositive(StrideMinusBETaken)) {
2882 LLVM_DEBUG(
2883 dbgs() << "LAA: Stride>=TripCount; No point in versioning as the "
2884 "Stride==1 predicate will imply that the loop executes "
2885 "at most once.\n");
2886 return;
2887 }
2888 LLVM_DEBUG(dbgs() << "LAA: Found a strided access that we can version.\n");
2889
2890 // Strip back off the integer cast, and check that our result is a
2891 // SCEVUnknown as we expect.
2892 const SCEV *StrideBase = StrideExpr;
2893 if (const auto *C = dyn_cast<SCEVIntegralCastExpr>(StrideBase))
2894 StrideBase = C->getOperand();
2895 SymbolicStrides[Ptr] = cast<SCEVUnknown>(StrideBase);
2896}
2897
2899 const TargetLibraryInfo *TLI, AAResults *AA,
2900 DominatorTree *DT, LoopInfo *LI)
2901 : PSE(std::make_unique<PredicatedScalarEvolution>(*SE, *L)),
2902 PtrRtChecking(nullptr),
2903 DepChecker(std::make_unique<MemoryDepChecker>(*PSE, L)), TheLoop(L) {
2904 PtrRtChecking = std::make_unique<RuntimePointerChecking>(*DepChecker, SE);
2905 if (canAnalyzeLoop()) {
2906 analyzeLoop(AA, LI, TLI, DT);
2907 }
2908}
2909
2911 if (CanVecMem) {
2912 OS.indent(Depth) << "Memory dependences are safe";
2913 const MemoryDepChecker &DC = getDepChecker();
2914 if (!DC.isSafeForAnyVectorWidth())
2915 OS << " with a maximum safe vector width of "
2916 << DC.getMaxSafeVectorWidthInBits() << " bits";
2917 if (PtrRtChecking->Need)
2918 OS << " with run-time checks";
2919 OS << "\n";
2920 }
2921
2922 if (HasConvergentOp)
2923 OS.indent(Depth) << "Has convergent operation in loop\n";
2924
2925 if (Report)
2926 OS.indent(Depth) << "Report: " << Report->getMsg() << "\n";
2927
2928 if (auto *Dependences = DepChecker->getDependences()) {
2929 OS.indent(Depth) << "Dependences:\n";
2930 for (const auto &Dep : *Dependences) {
2931 Dep.print(OS, Depth + 2, DepChecker->getMemoryInstructions());
2932 OS << "\n";
2933 }
2934 } else
2935 OS.indent(Depth) << "Too many dependences, not recorded\n";
2936
2937 // List the pair of accesses need run-time checks to prove independence.
2938 PtrRtChecking->print(OS, Depth);
2939 OS << "\n";
2940
2941 OS.indent(Depth) << "Non vectorizable stores to invariant address were "
2942 << (HasDependenceInvolvingLoopInvariantAddress ? "" : "not ")
2943 << "found in loop.\n";
2944
2945 OS.indent(Depth) << "SCEV assumptions:\n";
2946 PSE->getPredicate().print(OS, Depth);
2947
2948 OS << "\n";
2949
2950 OS.indent(Depth) << "Expressions re-written:\n";
2951 PSE->print(OS, Depth);
2952}
2953
2955 auto I = LoopAccessInfoMap.insert({&L, nullptr});
2956
2957 if (I.second)
2958 I.first->second =
2959 std::make_unique<LoopAccessInfo>(&L, &SE, TLI, &AA, &DT, &LI);
2960
2961 return *I.first->second;
2962}
2963
2965 Function &F, const PreservedAnalyses &PA,
2967 // Check whether our analysis is preserved.
2968 auto PAC = PA.getChecker<LoopAccessAnalysis>();
2969 if (!PAC.preserved() && !PAC.preservedSet<AllAnalysesOn<Function>>())
2970 // If not, give up now.
2971 return true;
2972
2973 // Check whether the analyses we depend on became invalid for any reason.
2974 // Skip checking TargetLibraryAnalysis as it is immutable and can't become
2975 // invalid.
2976 return Inv.invalidate<AAManager>(F, PA) ||
2978 Inv.invalidate<LoopAnalysis>(F, PA) ||
2980}
2981
2985 auto &AA = FAM.getResult<AAManager>(F);
2986 auto &DT = FAM.getResult<DominatorTreeAnalysis>(F);
2987 auto &LI = FAM.getResult<LoopAnalysis>(F);
2988 auto &TLI = FAM.getResult<TargetLibraryAnalysis>(F);
2989 return LoopAccessInfoManager(SE, AA, DT, LI, &TLI);
2990}
2991
2992AnalysisKey LoopAccessAnalysis::Key;
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
This file implements a class to represent arbitrary precision integral constant values and operations...
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
Analysis containing CSE Info
Definition: CSEInfo.cpp:27
This file contains the declarations for the subclasses of Constant, which represent the different fla...
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
#define LLVM_DEBUG(X)
Definition: Debug.h:101
This file defines the DenseMap class.
uint64_t Size
bool End
Definition: ELF_riscv.cpp:478
Generic implementation of equivalence classes through the use Tarjan's efficient union-find algorithm...
#define Check(C,...)
#define DEBUG_TYPE
Hexagon Common GEP
IRTranslator LLVM IR MI
static cl::opt< bool, true > HoistRuntimeChecks("hoist-runtime-checks", cl::Hidden, cl::desc("Hoist inner loop runtime memory checks to outer loop if possible"), cl::location(VectorizerParams::HoistRuntimeChecks), cl::init(false))
static cl::opt< unsigned > MaxDependences("max-dependences", cl::Hidden, cl::desc("Maximum number of dependences collected by " "loop-access analysis (default = 100)"), cl::init(100))
We collect dependences up to this threshold.
static cl::opt< bool > EnableForwardingConflictDetection("store-to-load-forwarding-conflict-detection", cl::Hidden, cl::desc("Enable conflict detection in loop-access analysis"), cl::init(true))
Enable store-to-load forwarding conflict detection.
static void findForkedSCEVs(ScalarEvolution *SE, const Loop *L, Value *Ptr, SmallVectorImpl< PointerIntPair< const SCEV *, 1, bool > > &ScevList, unsigned Depth)
static bool hasComputableBounds(PredicatedScalarEvolution &PSE, Value *Ptr, const SCEV *PtrScev, Loop *L, bool Assume)
Check whether a pointer can participate in a runtime bounds check.
static cl::opt< unsigned > MemoryCheckMergeThreshold("memory-check-merge-threshold", cl::Hidden, cl::desc("Maximum number of comparisons done when trying to merge " "runtime memory checks. (default = 100)"), cl::init(100))
The maximum iterations used to merge memory checks.
static bool isNoWrap(PredicatedScalarEvolution &PSE, const DenseMap< Value *, const SCEV * > &Strides, Value *Ptr, Type *AccessTy, Loop *L)
Check whether a pointer address cannot wrap.
static const SCEV * getStrideFromPointer(Value *Ptr, ScalarEvolution *SE, Loop *Lp)
Get the stride of a pointer access in a loop.
static unsigned getGEPInductionOperand(const GetElementPtrInst *Gep)
Find the operand of the GEP that should be checked for consecutive stores.
static cl::opt< unsigned, true > VectorizationInterleave("force-vector-interleave", cl::Hidden, cl::desc("Sets the vectorization interleave count. " "Zero is autoselect."), cl::location(VectorizerParams::VectorizationInterleave))
static bool isLoopVariantIndirectAddress(ArrayRef< const Value * > UnderlyingObjects, ScalarEvolution &SE, const Loop *L)
Returns true if any of the underlying objects has a loop varying address, i.e.
static Value * getUniqueCastUse(Value *Ptr, Loop *Lp, Type *Ty)
If a value has only one user that is a CastInst, return it.
static cl::opt< unsigned, true > VectorizationFactor("force-vector-width", cl::Hidden, cl::desc("Sets the SIMD width. Zero is autoselect."), cl::location(VectorizerParams::VectorizationFactor))
static cl::opt< unsigned, true > RuntimeMemoryCheckThreshold("runtime-memory-check-threshold", cl::Hidden, cl::desc("When performing memory disambiguation checks at runtime do not " "generate more than this number of comparisons (default = 8)."), cl::location(VectorizerParams::RuntimeMemoryCheckThreshold), cl::init(8))
static void visitPointers(Value *StartPtr, const Loop &InnermostLoop, function_ref< void(Value *)> AddPointer)
static bool isNoWrapAddRec(Value *Ptr, const SCEVAddRecExpr *AR, PredicatedScalarEvolution &PSE, const Loop *L)
Return true if an AddRec pointer Ptr is unsigned non-wrapping, i.e.
static Value * stripGetElementPtr(Value *Ptr, ScalarEvolution *SE, Loop *Lp)
If the argument is a GEP, then returns the operand identified by getGEPInductionOperand.
static bool isSafeDependenceDistance(const DataLayout &DL, ScalarEvolution &SE, const SCEV &BackedgeTakenCount, const SCEV &Dist, uint64_t Stride, uint64_t TypeByteSize)
Given a dependence-distance Dist between two memory accesses, that have the same stride whose absolut...
static bool areStridedAccessesIndependent(uint64_t Distance, uint64_t Stride, uint64_t TypeByteSize)
Check the dependence for two accesses with the same stride Stride.
static const SCEV * getMinFromExprs(const SCEV *I, const SCEV *J, ScalarEvolution *SE)
Compare I and J and return the minimum.
static std::variant< MemoryDepChecker::Dependence::DepType, std::tuple< const SCEV *, uint64_t, uint64_t, bool, bool > > getDependenceDistanceStrideAndSize(const AccessAnalysis::MemAccessInfo &A, Instruction *AInst, const AccessAnalysis::MemAccessInfo &B, Instruction *BInst, const DenseMap< Value *, const SCEV * > &Strides, const DenseMap< Value *, SmallVector< const Value *, 16 > > &UnderlyingObjects, PredicatedScalarEvolution &PSE, const Loop *InnermostLoop)
static cl::opt< unsigned > MaxForkedSCEVDepth("max-forked-scev-depth", cl::Hidden, cl::desc("Maximum recursion depth when finding forked SCEVs (default = 5)"), cl::init(5))
static cl::opt< bool > SpeculateUnitStride("laa-speculate-unit-stride", cl::Hidden, cl::desc("Speculate that non-constant strides are unit in LAA"), cl::init(true))
static SmallVector< PointerIntPair< const SCEV *, 1, bool > > findForkedPointer(PredicatedScalarEvolution &PSE, const DenseMap< Value *, const SCEV * > &StridesMap, Value *Ptr, const Loop *L)
static cl::opt< bool > EnableMemAccessVersioning("enable-mem-access-versioning", cl::init(true), cl::Hidden, cl::desc("Enable symbolic stride memory access versioning"))
This enables versioning on the strides of symbolically striding memory accesses in code like the foll...
This header provides classes for managing per-loop analyses.
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
This file provides utility analysis objects describing memory locations.
uint64_t High
#define P(N)
FunctionAnalysisManager FAM
This header defines various interfaces for pass management in LLVM.
This file defines the PointerIntPair class.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file contains some templates that are useful if you are working with the STL at all.
raw_pwrite_stream & OS
static LLVM_ATTRIBUTE_ALWAYS_INLINE bool CheckType(const unsigned char *MatcherTable, unsigned &MatcherIndex, SDValue N, const TargetLowering *TLI, const DataLayout &DL)
This file implements a set that has insertion order iteration characteristics.
This file defines the SmallPtrSet class.
This file defines the SmallSet class.
This file defines the SmallVector class.
static SymbolRef::Type getType(const Symbol *Sym)
Definition: TapiFile.cpp:40
static const X86InstrFMA3Group Groups[]
static constexpr uint32_t Opcode
Definition: aarch32.h:200
A manager for alias analyses.
Class for arbitrary precision integers.
Definition: APInt.h:76
uint64_t getZExtValue() const
Get zero extended value.
Definition: APInt.h:1485
APInt abs() const
Get the absolute value.
Definition: APInt.h:1730
unsigned getBitWidth() const
Return the number of bits in the APInt.
Definition: APInt.h:1433
bool isNegative() const
Determine sign of this APInt.
Definition: APInt.h:307
APInt sextOrTrunc(unsigned width) const
Sign extend or truncate to width.
Definition: APInt.cpp:1010
bool isStrictlyPositive() const
Determine if this APInt Value is positive.
Definition: APInt.h:334
int64_t getSExtValue() const
Get sign extended value.
Definition: APInt.h:1507
This templated class represents "all analyses that operate over <a particular IR unit>" (e....
Definition: PassManager.h:110
API to communicate dependencies between analyses during invalidation.
Definition: PassManager.h:690
bool invalidate(IRUnitT &IR, const PreservedAnalyses &PA)
Trigger the invalidation of some other analysis pass if not already handled and return whether it was...
Definition: PassManager.h:708
A container for analyses that lazily runs them and caches their results.
Definition: PassManager.h:649
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Definition: PassManager.h:803
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
ArrayRef< T > drop_front(size_t N=1) const
Drop the first N elements of the array.
Definition: ArrayRef.h:204
size_t size() const
size - Get the array size.
Definition: ArrayRef.h:165
bool empty() const
empty - Check if the array is empty.
Definition: ArrayRef.h:160
LLVM Basic Block Representation.
Definition: BasicBlock.h:60
const Function * getParent() const
Return the enclosing method, or null if none.
Definition: BasicBlock.h:205
const Module * getModule() const
Return the module owning the function this basic block belongs to, or nullptr if the function does no...
Definition: BasicBlock.cpp:328
This class is a wrapper over an AAResults, and it is intended to be used only when there are no IR ch...
This is the base class for all instructions that perform data casts.
Definition: InstrTypes.h:451
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:110
A debug info location.
Definition: DebugLoc.h:33
iterator find(const_arg_type_t< KeyT > Val)
Definition: DenseMap.h:155
iterator end()
Definition: DenseMap.h:84
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition: DenseMap.h:220
Analysis pass which computes a DominatorTree.
Definition: Dominators.h:278
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition: Dominators.h:165
bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
Definition: Dominators.cpp:123
EquivalenceClasses - This represents a collection of equivalence classes and supports three efficient...
iterator findValue(const ElemTy &V) const
findValue - Return an iterator to the specified value.
iterator insert(const ElemTy &Data)
insert - Insert a new value into the union/find set, ignoring the request if the value already exists...
member_iterator member_end() const
typename std::set< ECValue, ECValueComparator >::const_iterator iterator
iterator* - Provides a way to iterate over all values in the set.
member_iterator member_begin(iterator I) const
member_iterator unionSets(const ElemTy &V1, const ElemTy &V2)
union - Merge the two equivalence sets for the specified values, inserting them if they do not alread...
const ElemTy & getLeaderValue(const ElemTy &V) const
getLeaderValue - Return the leader for the specified value that is in the set.
bool hasOptSize() const
Optimize this function for size (-Os) or minimum size (-Oz).
Definition: Function.h:677
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
Definition: Instructions.h:940
Type * getResultElementType() const
PointerType * getType() const
Global values are always pointers.
Definition: GlobalValue.h:290
const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
Definition: Instruction.cpp:71
Class to represent integer types.
Definition: DerivedTypes.h:40
static IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
Definition: Type.cpp:285
An instruction for reading from memory.
Definition: Instructions.h:177
Value * getPointerOperand()
Definition: Instructions.h:264
static constexpr LocationSize beforeOrAfterPointer()
Any location before or after the base pointer (but still within the underlying object).
This analysis provides dependence information for the memory accesses of a loop.
Result run(Function &F, FunctionAnalysisManager &AM)
bool invalidate(Function &F, const PreservedAnalyses &PA, FunctionAnalysisManager::Invalidator &Inv)
const LoopAccessInfo & getInfo(Loop &L)
Drive the analysis of memory accesses in the loop.
const MemoryDepChecker & getDepChecker() const
the Memory Dependence Checker which can determine the loop-independent and loop-carried dependences b...
bool isInvariant(Value *V) const
Returns true if value V is loop invariant.
void print(raw_ostream &OS, unsigned Depth=0) const
Print the information about the memory accesses in the loop.
LoopAccessInfo(Loop *L, ScalarEvolution *SE, const TargetLibraryInfo *TLI, AAResults *AA, DominatorTree *DT, LoopInfo *LI)
static bool blockNeedsPredication(BasicBlock *BB, Loop *TheLoop, DominatorTree *DT)
Return true if the block BB needs to be predicated in order for the loop to be vectorized.
Analysis pass that exposes the LoopInfo for a function.
Definition: LoopInfo.h:566
bool contains(const LoopT *L) const
Return true if the specified loop is contained within in this loop.
BlockT * getLoopLatch() const
If there is a single latch block for this loop, return it.
bool isInnermost() const
Return true if the loop does not contain any (natural) loops.
unsigned getNumBackEdges() const
Calculate the number of back edges to the loop header.
BlockT * getHeader() const
LoopT * getParentLoop() const
Return the parent loop if it exists or nullptr for top level loops.
Wrapper class to LoopBlocksDFS that provides a standard begin()/end() interface for the DFS reverse p...
Definition: LoopIterator.h:172
Represents a single loop in the control flow graph.
Definition: LoopInfo.h:44
bool isAnnotatedParallel() const
Returns true if the loop is annotated parallel.
Definition: LoopInfo.cpp:564
DebugLoc getStartLoc() const
Return the debug location of the start of this loop.
Definition: LoopInfo.cpp:631
Tracking metadata reference owned by Metadata.
Definition: Metadata.h:859
This class implements a map that also provides access to all stored values in a deterministic order.
Definition: MapVector.h:36
Checks memory dependences among accesses to the same underlying object to determine whether there vec...
ArrayRef< unsigned > getOrderForAccess(Value *Ptr, bool IsWrite) const
Return the program order indices for the access location (Ptr, IsWrite).
bool isSafeForAnyVectorWidth() const
Return true if the number of elements that are safe to operate on simultaneously is not bounded.
bool areDepsSafe(DepCandidates &AccessSets, MemAccessInfoList &CheckDeps, const DenseMap< Value *, const SCEV * > &Strides, const DenseMap< Value *, SmallVector< const Value *, 16 > > &UnderlyingObjects)
Check whether the dependencies between the accesses are safe.
const Loop * getInnermostLoop() const
uint64_t getMaxSafeVectorWidthInBits() const
Return the number of elements that are safe to operate on simultaneously, multiplied by the size of t...
bool isSafeForVectorization() const
No memory dependence was encountered that would inhibit vectorization.
SmallVector< Instruction *, 4 > getInstructionsForAccess(Value *Ptr, bool isWrite) const
Find the set of instructions that read or write via Ptr.
VectorizationSafetyStatus
Type to keep track of the status of the dependence check.
void addAccess(StoreInst *SI)
Register the location (instructions are given increasing numbers) of a write access.
PointerIntPair< Value *, 1, bool > MemAccessInfo
Representation for a specific memory location.
static MemoryLocation get(const LoadInst *LI)
Return a location with information about the memory reference by the given instruction.
AAMDNodes AATags
The metadata nodes which describes the aliasing of the location (each member is null if that kind of ...
const Value * Ptr
The address of the start of the location.
const DataLayout & getDataLayout() const
Get the data layout for the module's target platform.
Definition: Module.h:275
Diagnostic information for optimization analysis remarks.
An interface layer with SCEV used to manage how we see SCEV expressions for values in the context of ...
void addPredicate(const SCEVPredicate &Pred)
Adds a new predicate.
ScalarEvolution * getSE() const
Returns the ScalarEvolution analysis used.
const SCEVPredicate & getPredicate() const
bool hasNoOverflow(Value *V, SCEVWrapPredicate::IncrementWrapFlags Flags)
Returns true if we've proved that V doesn't wrap by means of a SCEV predicate.
void setNoOverflow(Value *V, SCEVWrapPredicate::IncrementWrapFlags Flags)
Proves that V doesn't overflow by adding SCEV predicate.
void print(raw_ostream &OS, unsigned Depth) const
Print the SCEV mappings done by the Predicated Scalar Evolution.
const SCEVAddRecExpr * getAsAddRec(Value *V)
Attempts to produce an AddRecExpr for V by adding additional SCEV predicates.
const SCEV * getBackedgeTakenCount()
Get the (predicated) backedge count for the analyzed loop.
const SCEV * getSCEV(Value *V)
Returns the SCEV expression of V, in the context of the current SCEV predicate.
A set of analyses that are preserved following a run of a transformation pass.
Definition: PassManager.h:172
PreservedAnalysisChecker getChecker() const
Build a checker for this PreservedAnalyses and the specified analysis type.
Definition: PassManager.h:330
Holds information about the memory runtime legality checks to verify that a group of pointers do not ...
bool Need
This flag indicates if we need to add the runtime check.
void reset()
Reset the state of the pointer runtime information.
unsigned getNumberOfChecks() const
Returns the number of run-time checks required according to needsChecking.
void printChecks(raw_ostream &OS, const SmallVectorImpl< RuntimePointerCheck > &Checks, unsigned Depth=0) const
Print Checks.
bool needsChecking(const RuntimeCheckingPtrGroup &M, const RuntimeCheckingPtrGroup &N) const
Decide if we need to add a check between two groups of pointers, according to needsChecking.
void print(raw_ostream &OS, unsigned Depth=0) const
Print the list run-time memory checks necessary.
SmallVector< RuntimeCheckingPtrGroup, 2 > CheckingGroups
Holds a partitioning of pointers into "check groups".
void generateChecks(MemoryDepChecker::DepCandidates &DepCands, bool UseDependencies)
Generate the checks and store it.
static bool arePointersInSamePartition(const SmallVectorImpl< int > &PtrToPartition, unsigned PtrIdx1, unsigned PtrIdx2)
Check if pointers are in the same partition.
SmallVector< PointerInfo, 2 > Pointers
Information about the pointers that may require checking.
void insert(Loop *Lp, Value *Ptr, const SCEV *PtrExpr, Type *AccessTy, bool WritePtr, unsigned DepSetId, unsigned ASId, PredicatedScalarEvolution &PSE, bool NeedsFreeze)
Insert a pointer and calculate the start and end SCEVs.
This node represents a polynomial recurrence on the trip count of the specified loop.
const SCEV * evaluateAtIteration(const SCEV *It, ScalarEvolution &SE) const
Return the value of this chain of recurrences at the specified iteration number.
const SCEV * getStepRecurrence(ScalarEvolution &SE) const
Constructs and returns the recurrence indicating how much this expression steps by.
bool isAffine() const
Return true if this represents an expression A + B*x where A and B are loop invariant values.
This class represents a constant integer value.
This is the base class for unary integral cast operator classes.
This node represents multiplication of some number of SCEVs.
NoWrapFlags getNoWrapFlags(NoWrapFlags Mask=NoWrapMask) const
virtual void print(raw_ostream &OS, unsigned Depth=0) const =0
Prints a textual representation of this predicate with an indentation of Depth.
This means that we are dealing with an entirely unknown SCEV value, and only represent it as its LLVM...
This class represents an analyzed expression in the program.
Type * getType() const
Return the LLVM type of this SCEV expression.
Analysis pass that exposes the ScalarEvolution for a function.
The main scalar evolution driver.
const SCEV * getNegativeSCEV(const SCEV *V, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap)
Return the SCEV object corresponding to -V.
const SCEV * getUMaxExpr(const SCEV *LHS, const SCEV *RHS)
const SCEVPredicate * getEqualPredicate(const SCEV *LHS, const SCEV *RHS)
const SCEV * getConstant(ConstantInt *V)
const SCEV * getSCEV(Value *V)
Return a SCEV expression for the full generality of the specified expression.
const SCEV * getNoopOrSignExtend(const SCEV *V, Type *Ty)
Return a SCEV corresponding to a conversion of the input value to the specified type.
const SCEV * getOne(Type *Ty)
Return a SCEV for the constant 1 of a specific type.
const SCEV * getPtrToIntExpr(const SCEV *Op, Type *Ty)
bool isLoopInvariant(const SCEV *S, const Loop *L)
Return true if the value of the given SCEV is unchanging in the specified loop.
bool isKnownPositive(const SCEV *S)
Test if the given expression is known to be positive.
const SCEV * getZeroExtendExpr(const SCEV *Op, Type *Ty, unsigned Depth=0)
bool isSCEVable(Type *Ty) const
Test if values of the given type are analyzable within the SCEV framework.
Type * getEffectiveSCEVType(Type *Ty) const
Return a type with the same bitwidth as the given type and which represents how SCEV will treat the g...
const SCEV * getUMinExpr(const SCEV *LHS, const SCEV *RHS, bool Sequential=false)
const SCEV * getStoreSizeOfExpr(Type *IntTy, Type *StoreTy)
Return an expression for the store size of StoreTy that is type IntTy.
const SCEV * getMinusSCEV(const SCEV *LHS, const SCEV *RHS, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)
Return LHS-RHS.
const SCEV * getMulExpr(SmallVectorImpl< const SCEV * > &Ops, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)
Get a canonical multiply expression, or something simpler if possible.
const SCEV * getSizeOfExpr(Type *IntTy, TypeSize Size)
Return an expression for a TypeSize.
const SCEV * getAddExpr(SmallVectorImpl< const SCEV * > &Ops, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)
Get a canonical add expression, or something simpler if possible.
const SCEV * getTruncateOrSignExtend(const SCEV *V, Type *Ty, unsigned Depth=0)
Return a SCEV corresponding to a conversion of the input value to the specified type.
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
Definition: SmallPtrSet.h:384
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:366
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
Definition: SmallPtrSet.h:451
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
Definition: SmallSet.h:135
size_type count(const T &V) const
count - Return 1 if the element is in the set, 0 otherwise.
Definition: SmallSet.h:166
std::pair< const_iterator, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
Definition: SmallSet.h:179
bool empty() const
Definition: SmallVector.h:94
size_t size() const
Definition: SmallVector.h:91
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:577
reference emplace_back(ArgTypes &&... Args)
Definition: SmallVector.h:941
void resize(size_type N)
Definition: SmallVector.h:642
void push_back(const T &Elt)
Definition: SmallVector.h:416
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1200
An instruction for storing to memory.
Definition: Instructions.h:301
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:50
Analysis pass providing the TargetLibraryInfo.
Provides information about what library functions are available for the current target.
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
bool isVectorTy() const
True if this is an instance of VectorType.
Definition: Type.h:265
bool isPointerTy() const
True if this is an instance of PointerType.
Definition: Type.h:255
unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
A Use represents the edge between a Value definition and its users.
Definition: Use.h:43
Value * getOperand(unsigned i) const
Definition: User.h:169
unsigned getNumOperands() const
Definition: User.h:191
static SmallVector< VFInfo, 8 > getMappings(const CallInst &CI)
Retrieve all the VFInfo instances associated to the CallInst CI.
Definition: VectorUtils.h:245
LLVM Value Representation.
Definition: Value.h:74
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:255
const Value * stripAndAccumulateInBoundsConstantOffsets(const DataLayout &DL, APInt &Offset) const
This is a wrapper around stripAndAccumulateConstantOffsets with the in-bounds requirement set to fals...
Definition: Value.h:733
StringRef getName() const
Return a constant reference to the value's name.
Definition: Value.cpp:309
constexpr ScalarTy getFixedValue() const
Definition: TypeSize.h:189
An efficient, type-erasing, non-owning reference to a callable.
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition: raw_ostream.h:52
raw_ostream & indent(unsigned NumSpaces)
indent - Insert 'NumSpaces' spaces.
friend const_iterator end(StringRef path)
Get end iterator over path.
Definition: Path.cpp:237
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
bool match(Val *V, const Pattern &P)
Definition: PatternMatch.h:49
is_zero m_Zero()
Match any null constant or a vector with all elements equal to 0.
Definition: PatternMatch.h:545
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:445
LocationClass< Ty > location(Ty &L)
Definition: CommandLine.h:465
DiagnosticInfoOptimizationBase::Argument NV
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
std::optional< int > getPointersDiff(Type *ElemTyA, Value *PtrA, Type *ElemTyB, Value *PtrB, const DataLayout &DL, ScalarEvolution &SE, bool StrictCheck=false, bool CheckType=true)
Returns the distance between the pointers PtrA and PtrB iff they are compatible and it is possible to...
@ Low
Lower the current thread's priority such that it does not affect foreground tasks significantly.
@ Offset
Definition: DWP.cpp:440
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1726
Intrinsic::ID getVectorIntrinsicIDForCall(const CallInst *CI, const TargetLibraryInfo *TLI)
Returns intrinsic ID for call.
std::optional< const MDOperand * > findStringMetadataForLoop(const Loop *TheLoop, StringRef Name)
Find string metadata for loop.
Definition: LoopInfo.cpp:1052
const Value * getLoadStorePointerOperand(const Value *V)
A helper function that returns the pointer operand of a load or store instruction.
AddressSpace
Definition: NVPTXBaseInfo.h:21
const Value * getPointerOperand(const Value *V)
A helper function that returns the pointer operand of a load, store or GEP instruction.
OutputIt transform(R &&Range, OutputIt d_first, UnaryFunction F)
Wrapper function around std::transform to apply a function to a range and store the result elsewhere.
Definition: STLExtras.h:1932
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1733
bool NullPointerIsDefined(const Function *F, unsigned AS=0)
Check whether null pointer dereferencing is considered undefined behavior for a given function or an ...
Definition: Function.cpp:2007
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
std::optional< int64_t > getPtrStride(PredicatedScalarEvolution &PSE, Type *AccessTy, Value *Ptr, const Loop *Lp, const DenseMap< Value *, const SCEV * > &StridesMap=DenseMap< Value *, const SCEV * >(), bool Assume=false, bool ShouldCheckWrap=true)
If the pointer has a constant stride return it in units of the access type size.
bool sortPtrAccesses(ArrayRef< Value * > VL, Type *ElemTy, const DataLayout &DL, ScalarEvolution &SE, SmallVectorImpl< unsigned > &SortedIndices)
Attempt to sort the pointers in VL and return the sorted indices in SortedIndices,...
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
void getUnderlyingObjects(const Value *V, SmallVectorImpl< const Value * > &Objects, LoopInfo *LI=nullptr, unsigned MaxLookup=6)
This method is similar to getUnderlyingObject except that it can look through phi and select instruct...
const SCEV * replaceSymbolicStrideSCEV(PredicatedScalarEvolution &PSE, const DenseMap< Value *, const SCEV * > &PtrToStride, Value *Ptr)
Return the SCEV corresponding to a pointer with the symbolic stride replaced with constant one,...
bool isConsecutiveAccess(Value *A, Value *B, const DataLayout &DL, ScalarEvolution &SE, bool CheckType=true)
Returns true if the memory operations A and B are consecutive.
bool isGuaranteedNotToBeUndefOrPoison(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Return true if this function can prove that V does not have undef bits and is never poison.
OutputIt copy(R &&Range, OutputIt Out)
Definition: STLExtras.h:1828
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1753
gep_type_iterator gep_type_begin(const User *GEP)
Type * getLoadStoreType(Value *I)
A helper function that returns the type of a load or store instruction.
Implement std::hash so that hash_code can be used in STL containers.
Definition: BitVector.h:858
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition: BitVector.h:860
#define N
MDNode * TBAA
The tag for type-based alias analysis.
Definition: Metadata.h:755
A special type used by analysis passes to provide an address that identifies that particular analysis...
Definition: PassManager.h:89
Dependece between memory access instructions.
DepType Type
The type of the dependence.
bool isPossiblyBackward() const
May be a lexically backward dependence type (includes Unknown).
bool isForward() const
Lexically forward dependence.
bool isBackward() const
Lexically backward dependence.
void print(raw_ostream &OS, unsigned Depth, const SmallVectorImpl< Instruction * > &Instrs) const
Print the dependence.
Instruction * getDestination(const LoopAccessInfo &LAI) const
Return the destination instruction of the dependence.
Instruction * getSource(const LoopAccessInfo &LAI) const
Return the source instruction of the dependence.
DepType
The type of the dependence.
static const char * DepName[]
String version of the types.
static VectorizationSafetyStatus isSafeForVectorization(DepType Type)
Dependence types that don't prevent vectorization.
unsigned AddressSpace
Address space of the involved pointers.
bool addPointer(unsigned Index, RuntimePointerChecking &RtCheck)
Tries to add the pointer recorded in RtCheck at index Index to this pointer checking group.
bool NeedsFreeze
Whether the pointer needs to be frozen after expansion, e.g.
const SCEV * High
The SCEV expression which represents the upper bound of all the pointers in this group.
SmallVector< unsigned, 2 > Members
Indices of all the pointers that constitute this grouping.
RuntimeCheckingPtrGroup(unsigned Index, RuntimePointerChecking &RtCheck)
Create a new pointer checking group containing a single pointer, with index Index in RtCheck.
const SCEV * Low
The SCEV expression which represents the lower bound of all the pointers in this group.
bool IsWritePtr
Holds the information if this pointer is used for writing to memory.
unsigned DependencySetId
Holds the id of the set of pointers that could be dependent because of a shared underlying object.
unsigned AliasSetId
Holds the id of the disjoint alias set to which this pointer belongs.
static const unsigned MaxVectorWidth
Maximum SIMD width.
static unsigned VectorizationFactor
VF as overridden by the user.
static unsigned RuntimeMemoryCheckThreshold
\When performing memory disambiguation checks at runtime do not make more than this number of compari...
static bool isInterleaveForced()
True if force-vector-interleave was specified by the user.
static unsigned VectorizationInterleave
Interleave factor as overridden by the user.
Function object to check whether the first component of a container supported by std::get (like std::...
Definition: STLExtras.h:1454